text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ #include "analog.h" #include "clock.h" #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include <zephyr/drivers/interrupt_controller/riscv_plic.h> /* Driver dts compatibility: telink,b91_uart */ #define DT_DRV_COMPAT telink_b91_uart /* Get UART instance */ #define GET_UART(dev) ((volatile struct uart_b91_t *) \ ((const struct uart_b91_config *)dev->config)->uart_addr) /* UART TX buffer count max value */ #define UART_TX_BUF_CNT ((uint8_t)8u) /* UART TX/RX data registers size */ #define UART_DATA_SIZE ((uint8_t)4u) /* Parity type */ #define UART_PARITY_NONE ((uint8_t)0u) #define UART_PARITY_EVEN ((uint8_t)1u) #define UART_PARITY_ODD ((uint8_t)2u) /* Stop bits length */ #define UART_STOP_BIT_1 ((uint8_t)0u) #define UART_STOP_BIT_1P5 BIT(4) #define UART_STOP_BIT_2 BIT(5) /* TX RX reset bits */ #define UART_RX_RESET_BIT BIT(6) #define UART_TX_RESET_BIT BIT(7) /* B91 UART registers structure */ struct uart_b91_t { uint8_t data_buf[UART_DATA_SIZE]; uint16_t clk_div; uint8_t ctrl0; uint8_t ctrl1; uint8_t ctrl2; uint8_t ctrl3; uint16_t rxtimeout; uint8_t bufcnt; uint8_t status; uint8_t txrx_status; uint8_t state; }; /* B91 UART data structure */ struct uart_b91_data { uint8_t tx_byte_index; uint8_t rx_byte_index; struct uart_config cfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; /* B91 UART config structure */ struct uart_b91_config { const struct pinctrl_dev_config *pcfg; uint32_t uart_addr; uint32_t baud_rate; void (*pirq_connect)(void); }; /* rxtimeout register enums */ enum { UART_ERR_IRQ_MASK = BIT(15), }; /* ctrl0 register enums */ enum { UART_RX_IRQ_MASK = BIT(6), UART_TX_IRQ_MASK = BIT(7), }; /* ctrl3 register enums */ enum { FLD_UART_RX_IRQ_TRIQ_LEV_OFFSET = 0, FLD_UART_TX_IRQ_TRIQ_LEV_OFFSET = 4, }; /* bufcnt register enums */ enum { FLD_UART_RX_BUF_CNT_OFFSET = 0, FLD_UART_TX_BUF_CNT_OFFSET = 4, }; /* status register enums */ enum { UART_IRQ_STATUS = BIT(3), UART_RX_ERR_STATUS = BIT(7), }; /* Get tx fifo count */ static inline uint8_t uart_b91_get_tx_bufcnt(volatile struct uart_b91_t *uart) { return (uart->bufcnt & FLD_UART_TX_BUF_CNT) >> FLD_UART_TX_BUF_CNT_OFFSET; } /* Get rx fifo count */ static inline uint8_t uart_b91_get_rx_bufcnt(volatile struct uart_b91_t *uart) { return (uart->bufcnt & FLD_UART_RX_BUF_CNT) >> FLD_UART_RX_BUF_CNT_OFFSET; } /* Check for prime */ static uint8_t uart_b91_is_prime(uint32_t n) { uint32_t i = 5; if (n <= 3) { return 1; } else if ((n % 2 == 0) || (n % 3 == 0)) { return 0; } for (i = 5; i * i < n; i += 6) { if ((n % i == 0) || (n % (i + 2)) == 0) { return 0; } } return 1; } /* Calculate the best bit width */ static void uart_b91_cal_div_and_bwpc(uint32_t baudrate, uint32_t pclk, uint16_t *divider, uint8_t *bwpc) { uint8_t i = 0, j = 0; uint32_t primeInt = 0; uint8_t primeDec = 0; uint32_t D_intdec[13], D_int[13]; uint8_t D_dec[13]; primeInt = pclk / baudrate; primeDec = 10 * pclk / baudrate - 10 * primeInt; if (uart_b91_is_prime(primeInt)) { primeInt += 1; } else if (primeDec > 5) { primeInt += 1; if (uart_b91_is_prime(primeInt)) { primeInt -= 1; } } for (i = 3; i <= 15; i++) { D_intdec[i - 3] = (10 * primeInt) / (i + 1); D_dec[i - 3] = D_intdec[i - 3] - 10 * (D_intdec[i - 3] / 10); D_int[i - 3] = D_intdec[i - 3] / 10; } /* find the max and min one decimation point */ uint8_t position_min = 0, position_max = 0; uint32_t min = 0xffffffff, max = 0x00; for (j = 0; j < 13; j++) { if ((D_dec[j] <= min) && (D_int[j] != 0x01)) { min = D_dec[j]; position_min = j; } if (D_dec[j] >= max) { max = D_dec[j]; position_max = j; } } if ((D_dec[position_min] < 5) && (D_dec[position_max] >= 5)) { if (D_dec[position_min] < (10 - D_dec[position_max])) { *bwpc = position_min + 3; *divider = D_int[position_min] - 1; } else { *bwpc = position_max + 3; *divider = D_int[position_max]; } } else if ((D_dec[position_min] < 5) && (D_dec[position_max] < 5)) { *bwpc = position_min + 3; *divider = D_int[position_min] - 1; } else { *bwpc = position_max + 3; *divider = D_int[position_max]; } } /* Initializes the UART instance */ static void uart_b91_init(volatile struct uart_b91_t *uart, uint16_t divider, uint8_t bwpc, uint8_t parity, uint8_t stop_bit) { /* config clock */ divider = divider | FLD_UART_CLK_DIV_EN; uart->ctrl0 = bwpc; uart->clk_div = divider; /* config parity */ if (parity) { /* enable parity function */ uart->ctrl1 |= FLD_UART_PARITY_ENABLE; if (parity == UART_PARITY_EVEN) { /* enable even parity */ uart->ctrl1 &= (~FLD_UART_PARITY_POLARITY); } else if (parity == UART_PARITY_ODD) { /* enable odd parity */ uart->ctrl1 |= FLD_UART_PARITY_POLARITY; } } else { uart->ctrl1 &= (~FLD_UART_PARITY_ENABLE); /* disable parity function */ } /* stop bit config */ uart->ctrl1 &= (~FLD_UART_STOP_SEL); uart->ctrl1 |= stop_bit; } /* API implementation: irq handler */ static void uart_b91_irq_handler(const struct device *dev) { #ifndef CONFIG_UART_INTERRUPT_DRIVEN ARG_UNUSED(dev); #else struct uart_b91_data *data = dev->data; if (data->callback != NULL) { data->callback(dev, data->cb_data); } #endif } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE /* API implementation: configure */ static int uart_b91_configure(const struct device *dev, const struct uart_config *cfg) { struct uart_b91_data *data = dev->data; uint16_t divider; uint8_t bwpc; uint8_t parity; uint8_t stop_bits; volatile struct uart_b91_t *uart = GET_UART(dev); /* check parity */ if (cfg->parity == UART_CFG_PARITY_NONE) { parity = UART_PARITY_NONE; } else if (cfg->parity == UART_CFG_PARITY_ODD) { parity = UART_PARITY_ODD; } else if (cfg->parity == UART_CFG_PARITY_EVEN) { parity = UART_PARITY_EVEN; } else { return -ENOTSUP; } /* check stop bits */ if (cfg->stop_bits == UART_CFG_STOP_BITS_1) { stop_bits = UART_STOP_BIT_1; } else if (cfg->stop_bits == UART_CFG_STOP_BITS_1_5) { stop_bits = UART_STOP_BIT_1P5; } else if (cfg->stop_bits == UART_CFG_STOP_BITS_2) { stop_bits = UART_STOP_BIT_2; } else { return -ENOTSUP; } /* check flow control */ if (cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) { return -ENOTSUP; } /* UART configure */ uart_b91_cal_div_and_bwpc(cfg->baudrate, sys_clk.pclk * 1000 * 1000, &divider, &bwpc); uart_b91_init(uart, divider, bwpc, parity, stop_bits); /* save configuration */ data->cfg = *cfg; return 0; } /* API implementation: config_get */ static int uart_b91_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_b91_data *data = dev->data; *cfg = data->cfg; return 0; } #endif /* API implementation: driver initialization */ static int uart_b91_driver_init(const struct device *dev) { int status = 0; uint16_t divider = 0u; uint8_t bwpc = 0u; volatile struct uart_b91_t *uart = GET_UART(dev); const struct uart_b91_config *cfg = dev->config; struct uart_b91_data *data = dev->data; /* Reset Tx, Rx status before usage */ uart->status |= UART_RX_RESET_BIT | UART_TX_RESET_BIT; data->rx_byte_index = 0; data->tx_byte_index = 0; /* configure pins */ status = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (status < 0) { return status; } uart_b91_cal_div_and_bwpc(cfg->baud_rate, sys_clk.pclk * 1000 * 1000, &divider, &bwpc); uart_b91_init(uart, divider, bwpc, UART_PARITY_NONE, UART_STOP_BIT_1); #ifdef CONFIG_UART_INTERRUPT_DRIVEN cfg->pirq_connect(); #endif return 0; } /* API implementation: poll_out */ static void uart_b91_poll_out(const struct device *dev, uint8_t c) { volatile struct uart_b91_t *uart = GET_UART(dev); struct uart_b91_data *data = dev->data; while (uart_b91_get_tx_bufcnt(uart) >= UART_TX_BUF_CNT) { }; uart->data_buf[data->tx_byte_index] = c; data->tx_byte_index = (data->tx_byte_index + 1) % ARRAY_SIZE(uart->data_buf); } /* API implementation: poll_in */ static int uart_b91_poll_in(const struct device *dev, unsigned char *c) { volatile struct uart_b91_t *uart = GET_UART(dev); struct uart_b91_data *data = dev->data; if (uart_b91_get_rx_bufcnt(uart) == 0) { return -1; } *c = uart->data_buf[data->rx_byte_index]; data->rx_byte_index = (data->rx_byte_index + 1) % ARRAY_SIZE(uart->data_buf); return 0; } /* API implementation: err_check */ static int uart_b91_err_check(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); return ((uart->status & UART_RX_ERR_STATUS) != 0) ? 1 : 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* API implementation: fifo_fill */ static int uart_b91_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { int i = 0; volatile struct uart_b91_t *uart = GET_UART(dev); if (size > UART_DATA_SIZE) { size = UART_DATA_SIZE; } for (i = 0; i < size; i++) { if (uart_b91_get_rx_bufcnt(uart) != 0) { break; } uart_b91_poll_out(dev, tx_data[i]); } return i; } /* API implementation: fifo_read */ static int uart_b91_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { int rx_count; volatile struct uart_b91_t *uart = GET_UART(dev); for (rx_count = 0; rx_count < size; rx_count++) { if (uart_b91_get_rx_bufcnt(uart) == 0) { break; } uart_b91_poll_in(dev, &rx_data[rx_count]); } return rx_count; } /* API implementation: irq_tx_enable */ static void uart_b91_irq_tx_enable(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); uart->ctrl3 = (uart->ctrl3 & (~FLD_UART_TX_IRQ_TRIQ_LEV)) | BIT(FLD_UART_TX_IRQ_TRIQ_LEV_OFFSET); uart->ctrl0 |= UART_TX_IRQ_MASK; } /* API implementation: irq_tx_disable */ static void uart_b91_irq_tx_disable(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); uart->ctrl0 &= ~UART_TX_IRQ_MASK; } /* API implementation: irq_tx_ready */ static int uart_b91_irq_tx_ready(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); return ((uart_b91_get_tx_bufcnt(uart) < UART_TX_BUF_CNT) && ((uart->ctrl0 & UART_TX_IRQ_MASK) != 0)) ? 1 : 0; } /* API implementation: irq_tx_complete */ static int uart_b91_irq_tx_complete(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); return (uart_b91_get_tx_bufcnt(uart) == 0) ? 1 : 0; } /* API implementation: irq_rx_enable */ static void uart_b91_irq_rx_enable(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); uart->ctrl3 = (uart->ctrl3 & (~FLD_UART_RX_IRQ_TRIQ_LEV)) | BIT(FLD_UART_RX_IRQ_TRIQ_LEV_OFFSET); uart->ctrl0 |= UART_RX_IRQ_MASK; } /* API implementation: irq_rx_disable */ static void uart_b91_irq_rx_disable(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); uart->ctrl0 &= ~UART_RX_IRQ_MASK; } /* API implementation: irq_rx_ready */ static int uart_b91_irq_rx_ready(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); return (uart_b91_get_rx_bufcnt(uart) > 0) ? 1 : 0; } /* API implementation: irq_err_enable */ static void uart_b91_irq_err_enable(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); uart->rxtimeout |= UART_ERR_IRQ_MASK; } /* API implementation: irq_err_disable*/ static void uart_b91_irq_err_disable(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); uart->rxtimeout &= ~UART_ERR_IRQ_MASK; } /* API implementation: irq_is_pending */ static int uart_b91_irq_is_pending(const struct device *dev) { volatile struct uart_b91_t *uart = GET_UART(dev); return ((uart->status & UART_IRQ_STATUS) != 0) ? 1 : 0; } /* API implementation: irq_update */ static int uart_b91_irq_update(const struct device *dev) { ARG_UNUSED(dev); /* nothing to be done */ return 1; } /* API implementation: irq_callback_set */ static void uart_b91_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_b91_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_b91_driver_api = { .poll_in = uart_b91_poll_in, .poll_out = uart_b91_poll_out, .err_check = uart_b91_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_b91_configure, .config_get = uart_b91_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_b91_fifo_fill, .fifo_read = uart_b91_fifo_read, .irq_tx_enable = uart_b91_irq_tx_enable, .irq_tx_disable = uart_b91_irq_tx_disable, .irq_tx_ready = uart_b91_irq_tx_ready, .irq_tx_complete = uart_b91_irq_tx_complete, .irq_rx_enable = uart_b91_irq_rx_enable, .irq_rx_disable = uart_b91_irq_rx_disable, .irq_rx_ready = uart_b91_irq_rx_ready, .irq_err_enable = uart_b91_irq_err_enable, .irq_err_disable = uart_b91_irq_err_disable, .irq_is_pending = uart_b91_irq_is_pending, .irq_update = uart_b91_irq_update, .irq_callback_set = uart_b91_irq_callback_set, #endif }; #define UART_B91_INIT(n) \ \ static void uart_b91_irq_connect_##n(void); \ \ PINCTRL_DT_INST_DEFINE(n); \ \ static const struct uart_b91_config uart_b91_cfg_##n = \ { \ .uart_addr = DT_INST_REG_ADDR(n), \ .baud_rate = DT_INST_PROP(n, current_speed), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .pirq_connect = uart_b91_irq_connect_##n \ }; \ \ static struct uart_b91_data uart_b91_data_##n; \ \ DEVICE_DT_INST_DEFINE(n, uart_b91_driver_init, \ NULL, \ &uart_b91_data_##n, \ &uart_b91_cfg_##n, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ (void *)&uart_b91_driver_api); \ \ static void uart_b91_irq_connect_##n(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ uart_b91_irq_handler, \ DEVICE_DT_INST_GET(n), 0); \ \ riscv_plic_irq_enable(DT_INST_IRQN(n)); \ riscv_plic_set_priority(DT_INST_IRQN(n), DT_INST_IRQ(n, priority)); \ } DT_INST_FOREACH_STATUS_OKAY(UART_B91_INIT) ```
/content/code_sandbox/drivers/serial/uart_b91.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,317
```c /* * * * This driver implements input/output API for Xen domain through the * Xen consoleio interface. This should be used only for Zephyr as initial * domain (Dom0). For unprivileged domains regular ring buffer HVC driver * should be used (uart_hvc_xen.c), this console will not be available. */ #include <zephyr/arch/arm64/hypercall.h> #include <zephyr/xen/public/xen.h> #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #define DT_DRV_COMPAT xen_hvc_consoleio static int xen_consoleio_poll_in(const struct device *dev, unsigned char *c) { int ret = 0; char temp; ret = HYPERVISOR_console_io(CONSOLEIO_read, sizeof(temp), &temp); if (!ret) { /* Char was not received */ return -1; } *c = temp; return 0; } static void xen_consoleio_poll_out(const struct device *dev, unsigned char c) { (void) HYPERVISOR_console_io(CONSOLEIO_write, sizeof(c), &c); } static const struct uart_driver_api xen_consoleio_hvc_api = { .poll_in = xen_consoleio_poll_in, .poll_out = xen_consoleio_poll_out, }; static int xen_consoleio_init(const struct device *dev) { /* Nothing to do, but still needed for device API */ return 0; } DEVICE_DT_INST_DEFINE(0, xen_consoleio_init, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_XEN_HVC_INIT_PRIORITY, &xen_consoleio_hvc_api); ```
/content/code_sandbox/drivers/serial/uart_hvc_xen_consoleio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
368
```c /** * @brief "Bottom" of native tty uart driver * */ #include "uart_native_tty_bottom.h" #include <errno.h> #include <stdio.h> #include <string.h> #include <fcntl.h> #include <poll.h> #include <termios.h> #include <unistd.h> #include <nsi_tracing.h> #define WARN(...) nsi_print_warning(__VA_ARGS__) #define ERROR(...) nsi_print_error_and_exit(__VA_ARGS__) #define ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0])) struct baudrate_termios_pair { int baudrate; speed_t termios_baudrate; }; /** * @brief Lookup table for mapping the baud rate to the macro understood by termios. */ static const struct baudrate_termios_pair baudrate_lut[] = { {1200, B1200}, {1800, B1800}, {2400, B2400}, {4800, B4800}, {9600, B9600}, {19200, B19200}, {38400, B38400}, {57600, B57600}, {115200, B115200}, {230400, B230400}, {460800, B460800}, {500000, B500000}, {576000, B576000}, {921600, B921600}, {1000000, B1000000}, {1152000, B1152000}, {1500000, B1500000}, {2000000, B2000000}, {2500000, B2500000}, {3000000, B3000000}, {3500000, B3500000}, {4000000, B4000000}, }; /** * @brief Set given termios to defaults appropriate for communicating with serial port devices. * * @param ter */ static inline void native_tty_termios_defaults_set(struct termios *ter) { /* Set terminal in "serial" mode: * - Not canonical (no line input) * - No signal generation from Ctr+{C|Z..} * - No echoing */ ter->c_lflag &= ~(ICANON | ISIG | ECHO); /* No special interpretation of output bytes. * No conversion of newline to carriage return/line feed. */ ter->c_oflag &= ~(OPOST | ONLCR); /* No software flow control. */ ter->c_iflag &= ~(IXON | IXOFF | IXANY); /* No blocking, return immediately with what is available. */ ter->c_cc[VMIN] = 0; ter->c_cc[VTIME] = 0; /* No special handling of bytes on receive. */ ter->c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL); /* - Enable reading data and ignore control lines */ ter->c_cflag |= CREAD | CLOCAL; } /** * @brief Set the baud rate speed in the termios structure * * @param ter * @param baudrate */ static inline void native_tty_baud_speed_set(struct termios *ter, int baudrate) { for (int i = 0; i < ARRAY_SIZE(baudrate_lut); i++) { if (baudrate_lut[i].baudrate == baudrate) { cfsetospeed(ter, baudrate_lut[i].termios_baudrate); cfsetispeed(ter, baudrate_lut[i].termios_baudrate); return; } } ERROR("Could not set baudrate, as %d is not supported.\n", baudrate); } /** * @brief Set parity setting in the termios structure * * @param ter * @param parity */ static inline void native_tty_baud_parity_set(struct termios *ter, enum native_tty_bottom_parity parity) { switch (parity) { case NTB_PARITY_NONE: ter->c_cflag &= ~PARENB; break; case NTB_PARITY_ODD: ter->c_cflag |= PARENB; ter->c_cflag |= PARODD; break; case NTB_PARITY_EVEN: ter->c_cflag |= PARENB; ter->c_cflag &= ~PARODD; break; default: /* Parity options mark and space are not supported on this driver. */ ERROR("Could not set parity.\n"); } } /** * @brief Set the number of stop bits in the termios structure * * @param ter * @param stop_bits * */ static inline void native_tty_stop_bits_set(struct termios *ter, enum native_tty_bottom_stop_bits stop_bits) { switch (stop_bits) { case NTB_STOP_BITS_1: ter->c_cflag &= ~CSTOPB; break; case NTB_STOP_BITS_2: ter->c_cflag |= CSTOPB; break; default: /* Anything else is not supported in termios. */ ERROR("Could not set number of data bits.\n"); } } /** * @brief Set the number of data bits in the termios structure * * @param ter * @param data_bits * */ static inline void native_tty_data_bits_set(struct termios *ter, enum native_tty_bottom_data_bits data_bits) { unsigned int data_bits_to_set = CS5; switch (data_bits) { case NTB_DATA_BITS_5: data_bits_to_set = CS5; break; case NTB_DATA_BITS_6: data_bits_to_set = CS6; break; case NTB_DATA_BITS_7: data_bits_to_set = CS7; break; case NTB_DATA_BITS_8: data_bits_to_set = CS8; break; default: /* Anything else is not supported in termios */ ERROR("Could not set number of data bits.\n"); } /* Clear all bits that set the data size */ ter->c_cflag &= ~CSIZE; ter->c_cflag |= data_bits_to_set; } int native_tty_poll_bottom(int fd) { struct pollfd pfd = { .fd = fd, .events = POLLIN }; return poll(&pfd, 1, 0); } int native_tty_open_tty_bottom(const char *pathname) { int fd = open(pathname, O_RDWR | O_NOCTTY); if (fd < 0) { ERROR("Failed to open serial port %s, errno: %i\n", pathname, errno); } return fd; } int native_tty_configure_bottom(int fd, struct native_tty_bottom_cfg *cfg) { int rc, err; /* Structure used to control properties of a serial port */ struct termios ter; /* Read current terminal driver settings */ rc = tcgetattr(fd, &ter); if (rc) { WARN("Could not read terminal driver settings\n"); return rc; } native_tty_termios_defaults_set(&ter); native_tty_baud_speed_set(&ter, cfg->baudrate); native_tty_baud_parity_set(&ter, cfg->parity); native_tty_stop_bits_set(&ter, cfg->stop_bits); native_tty_data_bits_set(&ter, cfg->data_bits); cfg->flow_ctrl = NTB_FLOW_CTRL_NONE; rc = tcsetattr(fd, TCSANOW, &ter); if (rc) { err = errno; WARN("Could not set serial port settings, reason: %s\n", strerror(err)); return err; } /* tcsetattr returns success if ANY of the requested changes were successfully carried out, * not if ALL were. So we need to read back the settings and check if they are equal to the * requested ones. */ struct termios read_ter; rc = tcgetattr(fd, &read_ter); if (rc) { err = errno; WARN("Could not read serial port settings, reason: %s\n", strerror(err)); return err; } if (ter.c_cflag != read_ter.c_cflag || ter.c_iflag != read_ter.c_iflag || ter.c_oflag != read_ter.c_oflag || ter.c_lflag != read_ter.c_lflag || ter.c_line != read_ter.c_line || ter.c_ispeed != read_ter.c_ispeed || ter.c_ospeed != read_ter.c_ospeed || 0 != memcmp(ter.c_cc, read_ter.c_cc, NCCS)) { WARN("Read serial port settings do not match set ones.\n"); return -1; } /* Flush both input and output */ rc = tcflush(fd, TCIOFLUSH); if (rc) { WARN("Could not flush serial port\n"); return rc; } return 0; } ```
/content/code_sandbox/drivers/serial/uart_native_tty_bottom.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,943
```c /* * * Based on uart_mcux_lpuart.c, which is: * */ #define DT_DRV_COMPAT xlnx_xps_uartlite_1_00_a #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <zephyr/sys/sys_io.h> /* AXI UART Lite v2 registers offsets (See Xilinx PG142 for details) */ #define RX_FIFO_OFFSET 0x00 #define TX_FIFO_OFFSET 0x04 #define STAT_REG_OFFSET 0x08 #define CTRL_REG_OFFSET 0x0c /* STAT_REG bit definitions */ #define STAT_REG_RX_FIFO_VALID_DATA BIT(0) #define STAT_REG_RX_FIFO_FULL BIT(1) #define STAT_REG_TX_FIFO_EMPTY BIT(2) #define STAT_REG_TX_FIFO_FULL BIT(3) #define STAT_REG_INTR_ENABLED BIT(4) #define STAT_REG_OVERRUN_ERROR BIT(5) #define STAT_REG_FRAME_ERROR BIT(6) #define STAT_REG_PARITY_ERROR BIT(7) /* STAT_REG bit masks */ #define STAT_REG_ERROR_MASK GENMASK(7, 5) /* CTRL_REG bit definitions */ #define CTRL_REG_RST_TX_FIFO BIT(0) #define CTRL_REG_RST_RX_FIFO BIT(1) #define CTRL_REG_ENABLE_INTR BIT(4) struct xlnx_uartlite_config { mm_reg_t base; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; struct xlnx_uartlite_data { uint32_t errors; /* spinlocks for RX and TX FIFO preventing a bus error */ struct k_spinlock rx_lock; struct k_spinlock tx_lock; #ifdef CONFIG_UART_INTERRUPT_DRIVEN const struct device *dev; struct k_timer timer; uart_irq_callback_user_data_t callback; void *callback_data; volatile uint8_t tx_irq_enabled : 1; volatile uint8_t rx_irq_enabled : 1; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; static inline uint32_t xlnx_uartlite_read_status(const struct device *dev) { const struct xlnx_uartlite_config *config = dev->config; struct xlnx_uartlite_data *data = dev->data; uint32_t status; /* Cache errors as they are cleared by reading the STAT_REG */ status = sys_read32(config->base + STAT_REG_OFFSET); data->errors &= (status & STAT_REG_ERROR_MASK); /* Return current status and previously cached errors */ return status | data->errors; } static inline void xlnx_uartlite_clear_status(const struct device *dev) { struct xlnx_uartlite_data *data = dev->data; /* Clear cached errors */ data->errors = 0; } static inline unsigned char xlnx_uartlite_read_rx_fifo(const struct device *dev) { const struct xlnx_uartlite_config *config = dev->config; return (sys_read32(config->base + RX_FIFO_OFFSET) & BIT_MASK(8)); } static inline void xlnx_uartlite_write_tx_fifo(const struct device *dev, unsigned char c) { const struct xlnx_uartlite_config *config = dev->config; sys_write32((uint32_t)c, config->base + TX_FIFO_OFFSET); } static int xlnx_uartlite_poll_in(const struct device *dev, unsigned char *c) { uint32_t status; k_spinlock_key_t key; struct xlnx_uartlite_data *data = dev->data; int ret = -1; key = k_spin_lock(&data->rx_lock); status = xlnx_uartlite_read_status(dev); if ((status & STAT_REG_RX_FIFO_VALID_DATA) != 0) { *c = xlnx_uartlite_read_rx_fifo(dev); ret = 0; } k_spin_unlock(&data->rx_lock, key); return ret; } static void xlnx_uartlite_poll_out(const struct device *dev, unsigned char c) { uint32_t status; k_spinlock_key_t key; struct xlnx_uartlite_data *data = dev->data; bool done = false; while (!done) { key = k_spin_lock(&data->tx_lock); status = xlnx_uartlite_read_status(dev); if ((status & STAT_REG_TX_FIFO_FULL) == 0) { xlnx_uartlite_write_tx_fifo(dev, c); done = true; } k_spin_unlock(&data->tx_lock, key); } } static int xlnx_uartlite_err_check(const struct device *dev) { uint32_t status = xlnx_uartlite_read_status(dev); int err = 0; if (status & STAT_REG_OVERRUN_ERROR) { err |= UART_ERROR_OVERRUN; } if (status & STAT_REG_PARITY_ERROR) { err |= UART_ERROR_PARITY; } if (status & STAT_REG_FRAME_ERROR) { err |= UART_ERROR_FRAMING; } xlnx_uartlite_clear_status(dev); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static inline void xlnx_uartlite_irq_enable(const struct device *dev) { const struct xlnx_uartlite_config *config = dev->config; sys_write32(CTRL_REG_ENABLE_INTR, config->base + CTRL_REG_OFFSET); } static inline void xlnx_uartlite_irq_cond_disable(const struct device *dev) { const struct xlnx_uartlite_config *config = dev->config; struct xlnx_uartlite_data *data = dev->data; /* TX and RX IRQs are shared. Only disable if both are disabled. */ if (!data->tx_irq_enabled && !data->rx_irq_enabled) { sys_write32(0, config->base + CTRL_REG_OFFSET); } } static int xlnx_uartlite_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { uint32_t status; k_spinlock_key_t key; struct xlnx_uartlite_data *data = dev->data; int count = 0U; while (len - count > 0) { key = k_spin_lock(&data->tx_lock); status = xlnx_uartlite_read_status(dev); if ((status & STAT_REG_TX_FIFO_FULL) == 0U) { xlnx_uartlite_write_tx_fifo(dev, tx_data[count++]); } k_spin_unlock(&data->tx_lock, key); } return count; } static int xlnx_uartlite_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { uint32_t status; k_spinlock_key_t key; struct xlnx_uartlite_data *data = dev->data; int count = 0U; while ((len - count) > 0) { key = k_spin_lock(&data->rx_lock); status = xlnx_uartlite_read_status(dev); if ((status & STAT_REG_RX_FIFO_VALID_DATA) != 0) { rx_data[count++] = xlnx_uartlite_read_rx_fifo(dev); } k_spin_unlock(&data->rx_lock, key); if (!(status & STAT_REG_RX_FIFO_VALID_DATA)) { break; } } return count; } static void xlnx_uartlite_tx_soft_isr(struct k_timer *timer) { struct xlnx_uartlite_data *data = CONTAINER_OF(timer, struct xlnx_uartlite_data, timer); if (data->callback) { data->callback(data->dev, data->callback_data); } } static void xlnx_uartlite_irq_tx_enable(const struct device *dev) { struct xlnx_uartlite_data *data = dev->data; uint32_t status; data->tx_irq_enabled = true; status = xlnx_uartlite_read_status(dev); xlnx_uartlite_irq_enable(dev); if ((status & STAT_REG_TX_FIFO_EMPTY) && data->callback) { /* * TX_FIFO_EMPTY event already generated an edge * interrupt. Generate a soft interrupt and have it call the * callback function in timer isr context. */ k_timer_start(&data->timer, K_NO_WAIT, K_NO_WAIT); } } static void xlnx_uartlite_irq_tx_disable(const struct device *dev) { struct xlnx_uartlite_data *data = dev->data; data->tx_irq_enabled = false; xlnx_uartlite_irq_cond_disable(dev); } static int xlnx_uartlite_irq_tx_ready(const struct device *dev) { struct xlnx_uartlite_data *data = dev->data; uint32_t status = xlnx_uartlite_read_status(dev); return (((status & STAT_REG_TX_FIFO_FULL) == 0U) && data->tx_irq_enabled); } static int xlnx_uartlite_irq_tx_complete(const struct device *dev) { uint32_t status = xlnx_uartlite_read_status(dev); return (status & STAT_REG_TX_FIFO_EMPTY); } static void xlnx_uartlite_irq_rx_enable(const struct device *dev) { struct xlnx_uartlite_data *data = dev->data; data->rx_irq_enabled = true; /* RX_FIFO_VALID_DATA generates a level interrupt */ xlnx_uartlite_irq_enable(dev); } static void xlnx_uartlite_irq_rx_disable(const struct device *dev) { struct xlnx_uartlite_data *data = dev->data; data->rx_irq_enabled = false; xlnx_uartlite_irq_cond_disable(dev); } static int xlnx_uartlite_irq_rx_ready(const struct device *dev) { struct xlnx_uartlite_data *data = dev->data; uint32_t status = xlnx_uartlite_read_status(dev); return ((status & STAT_REG_RX_FIFO_VALID_DATA) && data->rx_irq_enabled); } static int xlnx_uartlite_irq_is_pending(const struct device *dev) { return (xlnx_uartlite_irq_tx_ready(dev) || xlnx_uartlite_irq_rx_ready(dev)); } static int xlnx_uartlite_irq_update(const struct device *dev) { return 1; } static void xlnx_uartlite_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *user_data) { struct xlnx_uartlite_data *data = dev->data; data->callback = cb; data->callback_data = user_data; } static __unused void xlnx_uartlite_isr(const struct device *dev) { struct xlnx_uartlite_data *data = dev->data; if (data->callback) { data->callback(dev, data->callback_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int xlnx_uartlite_init(const struct device *dev) { const struct xlnx_uartlite_config *config = dev->config; #ifdef CONFIG_UART_INTERRUPT_DRIVEN struct xlnx_uartlite_data *data = dev->data; data->dev = dev; k_timer_init(&data->timer, &xlnx_uartlite_tx_soft_isr, NULL); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ /* Reset FIFOs and disable interrupts */ sys_write32(CTRL_REG_RST_RX_FIFO | CTRL_REG_RST_TX_FIFO, config->base + CTRL_REG_OFFSET); #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ return 0; } static const struct uart_driver_api xlnx_uartlite_driver_api = { .poll_in = xlnx_uartlite_poll_in, .poll_out = xlnx_uartlite_poll_out, .err_check = xlnx_uartlite_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = xlnx_uartlite_fifo_fill, .fifo_read = xlnx_uartlite_fifo_read, .irq_tx_enable = xlnx_uartlite_irq_tx_enable, .irq_tx_disable = xlnx_uartlite_irq_tx_disable, .irq_tx_ready = xlnx_uartlite_irq_tx_ready, .irq_tx_complete = xlnx_uartlite_irq_tx_complete, .irq_rx_enable = xlnx_uartlite_irq_rx_enable, .irq_rx_disable = xlnx_uartlite_irq_rx_disable, .irq_rx_ready = xlnx_uartlite_irq_rx_ready, .irq_is_pending = xlnx_uartlite_irq_is_pending, .irq_update = xlnx_uartlite_irq_update, .irq_callback_set = xlnx_uartlite_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define XLNX_UARTLITE_IRQ_INIT(n, i) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, i, irq), \ DT_INST_IRQ_BY_IDX(n, i, priority), \ xlnx_uartlite_isr, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq)); \ } while (false) #define XLNX_UARTLITE_CONFIG_FUNC(n) \ static void xlnx_uartlite_config_func_##n(const struct device *dev) \ { \ /* IRQ line not always present on all instances */ \ IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 0), \ (XLNX_UARTLITE_IRQ_INIT(n, 0);)) \ } #define XLNX_UARTLITE_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = xlnx_uartlite_config_func_##n #define XLNX_UARTLITE_INIT_CFG(n) \ XLNX_UARTLITE_DECLARE_CFG(n, XLNX_UARTLITE_IRQ_CFG_FUNC_INIT(n)) #else #define XLNX_UARTLITE_CONFIG_FUNC(n) #define XLNX_UARTLITE_IRQ_CFG_FUNC_INIT #define XLNX_UARTLITE_INIT_CFG(n) \ XLNX_UARTLITE_DECLARE_CFG(n, XLNX_UARTLITE_IRQ_CFG_FUNC_INIT) #endif #define XLNX_UARTLITE_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct xlnx_uartlite_config xlnx_uartlite_##n##_config = { \ .base = DT_INST_REG_ADDR(n), \ IRQ_FUNC_INIT \ } #define XLNX_UARTLITE_INIT(n) \ static struct xlnx_uartlite_data xlnx_uartlite_##n##_data; \ \ static const struct xlnx_uartlite_config xlnx_uartlite_##n##_config;\ \ DEVICE_DT_INST_DEFINE(n, \ &xlnx_uartlite_init, \ NULL, \ &xlnx_uartlite_##n##_data, \ &xlnx_uartlite_##n##_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &xlnx_uartlite_driver_api); \ \ XLNX_UARTLITE_CONFIG_FUNC(n) \ \ XLNX_UARTLITE_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(XLNX_UARTLITE_INIT) ```
/content/code_sandbox/drivers/serial/uart_xlnx_uartlite.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,232
```unknown # Gecko SDK UART config UART_GECKO bool "Gecko UART/USART driver" default y depends on DT_HAS_SILABS_GECKO_UART_ENABLED || DT_HAS_SILABS_GECKO_USART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select SOC_GECKO_USART select PINCTRL if SOC_FAMILY_SILABS_S1 help Enable the Gecko uart driver. ```
/content/code_sandbox/drivers/serial/Kconfig.gecko
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
87
```c /* * */ #define DT_DRV_COMPAT gaisler_apbuart #include <zephyr/drivers/uart.h> #include <zephyr/irq.h> #include <zephyr/sys/time_units.h> #include <errno.h> /* APBUART registers * * Offset | Name | Description * ------ | ------ | ---------------------------------------- * 0x0000 | data | UART data register * 0x0004 | status | UART status register * 0x0008 | ctrl | UART control register * 0x000c | scaler | UART scaler register * 0x0010 | debug | UART FIFO debug register */ struct apbuart_regs { /** @brief UART data register * * Bit | Name | Description * ------ | ------ | ---------------------------------------- * 7-0 | data | Holding register or FIFO */ uint32_t data; /* 0x0000 */ /** @brief UART status register * * Bit | Name | Description * ------ | ------ | ---------------------------------------- * 31-26 | RCNT | Receiver FIFO count * 25-20 | TCNT | Transmitter FIFO count * 10 | RF | Receiver FIFO full * 9 | TF | Transmitter FIFO full * 8 | RH | Receiver FIFO half-full * 7 | TH | Transmitter FIFO half-full * 6 | FE | Framing error * 5 | PE | Parity error * 4 | OV | Overrun * 3 | BR | Break received * 2 | TE | Transmitter FIFO empty * 1 | TS | Transmitter shift register empty * 0 | DR | Data ready */ uint32_t status; /* 0x0004 */ /** @brief UART control register * * Bit | Name | Description * ------ | ------ | ---------------------------------------- * 31 | FA | FIFOs available * 14 | SI | Transmitter shift register empty interrupt enable * 13 | DI | Delayed interrupt enable * 12 | BI | Break interrupt enable * 11 | DB | FIFO debug mode enable * 10 | RF | Receiver FIFO interrupt enable * 9 | TF | Transmitter FIFO interrupt enable * 8 | EC | External clock * 7 | LB | Loop back * 6 | FL | Flow control * 5 | PE | Parity enable * 4 | PS | Parity select * 3 | TI | Transmitter interrupt enable * 2 | RI | Receiver interrupt enable * 1 | TE | Transmitter enable * 0 | RE | Receiver enable */ uint32_t ctrl; /* 0x0008 */ /** @brief UART scaler register * * Bit | Name | Description * ------ | ------ | ---------------------------------------- * 11-0 | RELOAD | Scaler reload value */ uint32_t scaler; /* 0x000c */ /** @brief UART FIFO debug register * * Bit | Name | Description * ------ | ------ | ---------------------------------------- * 7-0 | data | Holding register or FIFO */ uint32_t debug; /* 0x0010 */ }; /* APBUART register bits. */ /* Control register */ #define APBUART_CTRL_FA (1 << 31) #define APBUART_CTRL_DB (1 << 11) #define APBUART_CTRL_RF (1 << 10) #define APBUART_CTRL_TF (1 << 9) #define APBUART_CTRL_LB (1 << 7) #define APBUART_CTRL_FL (1 << 6) #define APBUART_CTRL_PE (1 << 5) #define APBUART_CTRL_PS (1 << 4) #define APBUART_CTRL_TI (1 << 3) #define APBUART_CTRL_RI (1 << 2) #define APBUART_CTRL_TE (1 << 1) #define APBUART_CTRL_RE (1 << 0) /* Status register */ #define APBUART_STATUS_RF (1 << 10) #define APBUART_STATUS_TF (1 << 9) #define APBUART_STATUS_RH (1 << 8) #define APBUART_STATUS_TH (1 << 7) #define APBUART_STATUS_FE (1 << 6) #define APBUART_STATUS_PE (1 << 5) #define APBUART_STATUS_OV (1 << 4) #define APBUART_STATUS_BR (1 << 3) #define APBUART_STATUS_TE (1 << 2) #define APBUART_STATUS_TS (1 << 1) #define APBUART_STATUS_DR (1 << 0) /* For APBUART implemented without FIFO */ #define APBUART_STATUS_HOLD_REGISTER_EMPTY (1 << 2) struct apbuart_dev_cfg { struct apbuart_regs *regs; int interrupt; }; struct apbuart_dev_data { int usefifo; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; void *cb_data; #endif }; /* * This routine waits for the TX holding register or TX FIFO to be ready and * then it writes a character to the data register. */ static void apbuart_poll_out(const struct device *dev, unsigned char x) { const struct apbuart_dev_cfg *config = dev->config; struct apbuart_dev_data *data = dev->data; volatile struct apbuart_regs *regs = (void *) config->regs; if (data->usefifo) { /* Transmitter FIFO full flag is available. */ while (regs->status & APBUART_STATUS_TF) { ; } } else { /* * Transmitter "hold register empty" AKA "FIFO empty" flag is * available. */ while (!(regs->status & APBUART_STATUS_HOLD_REGISTER_EMPTY)) { ; } } regs->data = x & 0xff; } static int apbuart_poll_in(const struct device *dev, unsigned char *c) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; if ((regs->status & APBUART_STATUS_DR) == 0) { return -1; } *c = regs->data & 0xff; return 0; } static int apbuart_err_check(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; const uint32_t status = regs->status; int err = 0; if (status & APBUART_STATUS_FE) { err |= UART_ERROR_FRAMING; } if (status & APBUART_STATUS_PE) { err |= UART_ERROR_PARITY; } if (status & APBUART_STATUS_OV) { err |= UART_ERROR_OVERRUN; } if (status & APBUART_STATUS_BR) { err |= UART_BREAK; } return err; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int get_baud(volatile struct apbuart_regs *const regs) { unsigned int core_clk_hz; unsigned int scaler; scaler = regs->scaler; core_clk_hz = sys_clock_hw_cycles_per_sec(); /* Calculate baud rate from generator "scaler" number */ return core_clk_hz / ((scaler + 1) * 8); } static void set_baud(volatile struct apbuart_regs *const regs, uint32_t baud) { unsigned int core_clk_hz; unsigned int scaler; if (baud == 0) { return; } core_clk_hz = sys_clock_hw_cycles_per_sec(); /* Calculate Baud rate generator "scaler" number */ scaler = (core_clk_hz / (baud * 8)) - 1; /* Set new baud rate by setting scaler */ regs->scaler = scaler; } static int apbuart_configure(const struct device *dev, const struct uart_config *cfg) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; uint32_t ctrl = 0; uint32_t newctrl = 0; switch (cfg->parity) { case UART_CFG_PARITY_NONE: break; case UART_CFG_PARITY_EVEN: newctrl |= APBUART_CTRL_PE; break; case UART_CFG_PARITY_ODD: newctrl |= APBUART_CTRL_PE | APBUART_CTRL_PS; break; default: return -ENOTSUP; } if (cfg->stop_bits != UART_CFG_STOP_BITS_1) { return -ENOTSUP; } if (cfg->data_bits != UART_CFG_DATA_BITS_8) { return -ENOTSUP; } switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: break; case UART_CFG_FLOW_CTRL_RTS_CTS: newctrl |= APBUART_CTRL_FL; break; default: return -ENOTSUP; } set_baud(regs, cfg->baudrate); ctrl = regs->ctrl; ctrl &= ~(APBUART_CTRL_PE | APBUART_CTRL_PS | APBUART_CTRL_FL); regs->ctrl = ctrl | newctrl; return 0; } static int apbuart_config_get(const struct device *dev, struct uart_config *cfg) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; const uint32_t ctrl = regs->ctrl; cfg->parity = UART_CFG_PARITY_NONE; if (ctrl & APBUART_CTRL_PE) { if (ctrl & APBUART_CTRL_PS) { cfg->parity = UART_CFG_PARITY_ODD; } else { cfg->parity = UART_CFG_PARITY_EVEN; } } cfg->flow_ctrl = UART_CFG_FLOW_CTRL_NONE; if (ctrl & APBUART_CTRL_FL) { cfg->flow_ctrl = UART_CFG_FLOW_CTRL_RTS_CTS; } cfg->baudrate = get_baud(regs); cfg->data_bits = UART_CFG_DATA_BITS_8; cfg->stop_bits = UART_CFG_STOP_BITS_1; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void apbuart_isr(const struct device *dev); static int apbuart_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct apbuart_dev_cfg *config = dev->config; struct apbuart_dev_data *data = dev->data; volatile struct apbuart_regs *regs = (void *) config->regs; int i; if (data->usefifo) { /* Transmitter FIFO full flag is available. */ for ( i = 0; (i < size) && !(regs->status & APBUART_STATUS_TF); i++ ) { regs->data = tx_data[i]; } return i; } for (i = 0; (i < size) && (regs->status & APBUART_STATUS_TE); i++) { regs->data = tx_data[i]; } return i; } static int apbuart_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; int i; for (i = 0; (i < size) && (regs->status & APBUART_STATUS_DR); i++) { rx_data[i] = regs->data & 0xff; } return i; } static void apbuart_irq_tx_enable(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; struct apbuart_dev_data *data = dev->data; volatile struct apbuart_regs *regs = (void *) config->regs; unsigned int key; if (data->usefifo) { /* Enable the FIFO level interrupt */ regs->ctrl |= APBUART_CTRL_TF; return; } regs->ctrl |= APBUART_CTRL_TI; /* * The "TI" interrupt is an edge interrupt. It fires each time the TX * holding register (or FIFO if implemented) moves from non-empty to * empty. * * When the APBUART is implemented _without_ FIFO, the TI interrupt is * the only TX interrupt we have. When the APBUART is implemented * _with_ FIFO, the TI will fire on each TX byte. */ regs->ctrl |= APBUART_CTRL_TI; /* Fire the first "TI" edge interrupt to get things going. */ key = irq_lock(); apbuart_isr(dev); irq_unlock(key); } static void apbuart_irq_tx_disable(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; regs->ctrl &= ~(APBUART_CTRL_TF | APBUART_CTRL_TI); } static int apbuart_irq_tx_ready(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; struct apbuart_dev_data *data = dev->data; volatile struct apbuart_regs *regs = (void *) config->regs; if (data->usefifo) { return !(regs->status & APBUART_STATUS_TF); } return !!(regs->status & APBUART_STATUS_TE); } static int apbuart_irq_tx_complete(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; return !!(regs->status & APBUART_STATUS_TS); } static void apbuart_irq_rx_enable(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; regs->ctrl |= APBUART_CTRL_RI; } static void apbuart_irq_rx_disable(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; regs->ctrl &= ~APBUART_CTRL_RI; } static int apbuart_irq_rx_ready(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; volatile struct apbuart_regs *regs = (void *) config->regs; return !!(regs->status & APBUART_STATUS_DR); } static int apbuart_irq_is_pending(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; struct apbuart_dev_data *data = dev->data; volatile struct apbuart_regs *regs = (void *) config->regs; uint32_t status = regs->status; uint32_t ctrl = regs->ctrl; if ((ctrl & APBUART_CTRL_RI) && (status & APBUART_STATUS_DR)) { return 1; } if (data->usefifo) { /* TH is the TX FIFO half-empty flag */ if (status & APBUART_STATUS_TH) { return 1; } } else { if ((ctrl & APBUART_CTRL_TI) && (status & APBUART_STATUS_TE)) { return 1; } } return 0; } static int apbuart_irq_update(const struct device *dev) { return 1; } static void apbuart_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct apbuart_dev_data *const dev_data = dev->data; dev_data->cb = cb; dev_data->cb_data = cb_data; } static void apbuart_isr(const struct device *dev) { struct apbuart_dev_data *const dev_data = dev->data; if (dev_data->cb) { dev_data->cb(dev, dev_data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int apbuart_init(const struct device *dev) { const struct apbuart_dev_cfg *config = dev->config; struct apbuart_dev_data *data = dev->data; volatile struct apbuart_regs *regs = (void *) config->regs; const uint32_t APBUART_DEBUG_MASK = APBUART_CTRL_DB | APBUART_CTRL_FL; uint32_t dm; uint32_t ctrl; ctrl = regs->ctrl; data->usefifo = !!(ctrl & APBUART_CTRL_FA); /* NOTE: CTRL_FL has reset value 0. CTRL_DB has no reset value. */ dm = ctrl & APBUART_DEBUG_MASK; if (dm == APBUART_DEBUG_MASK) { /* Debug mode enabled so assume APBUART already initialized. */ ; } else { regs->ctrl = APBUART_CTRL_TE | APBUART_CTRL_RE; } regs->status = 0; #ifdef CONFIG_UART_INTERRUPT_DRIVEN irq_connect_dynamic(config->interrupt, 0, (void (*)(const void *))apbuart_isr, dev, 0); irq_enable(config->interrupt); #endif return 0; } /* Driver API defined in uart.h */ static const struct uart_driver_api apbuart_driver_api = { .poll_in = apbuart_poll_in, .poll_out = apbuart_poll_out, .err_check = apbuart_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = apbuart_configure, .config_get = apbuart_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = apbuart_fifo_fill, .fifo_read = apbuart_fifo_read, .irq_tx_enable = apbuart_irq_tx_enable, .irq_tx_disable = apbuart_irq_tx_disable, .irq_tx_ready = apbuart_irq_tx_ready, .irq_rx_enable = apbuart_irq_rx_enable, .irq_rx_disable = apbuart_irq_rx_disable, .irq_tx_complete = apbuart_irq_tx_complete, .irq_rx_ready = apbuart_irq_rx_ready, .irq_is_pending = apbuart_irq_is_pending, .irq_update = apbuart_irq_update, .irq_callback_set = apbuart_irq_callback_set, #endif }; #define APBUART_INIT(index) \ static const struct apbuart_dev_cfg apbuart##index##_config = { \ .regs = (struct apbuart_regs *) \ DT_INST_REG_ADDR(index), \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ (.interrupt = DT_INST_IRQN(index),)) \ }; \ \ static struct apbuart_dev_data apbuart##index##_data = { \ .usefifo = 0, \ }; \ \ DEVICE_DT_INST_DEFINE(index, \ &apbuart_init, \ NULL, \ &apbuart##index##_data, \ &apbuart##index##_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &apbuart_driver_api); DT_INST_FOREACH_STATUS_OKAY(APBUART_INIT) ```
/content/code_sandbox/drivers/serial/uart_apbuart.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,379
```c /* * */ #define DT_DRV_COMPAT silabs_gecko_leuart #include <errno.h> #include <zephyr/drivers/uart.h> #include <zephyr/irq.h> #include <em_leuart.h> #include <em_gpio.h> #include <em_cmu.h> #include <soc.h> #define LEUART_PREFIX cmuClock_LEUART #define CLOCK_ID_PRFX2(prefix, suffix) prefix##suffix #define CLOCK_ID_PRFX(prefix, suffix) CLOCK_ID_PRFX2(prefix, suffix) #define CLOCK_LEUART(id) CLOCK_ID_PRFX(LEUART_PREFIX, id) #define DEV_BASE(dev) \ ((LEUART_TypeDef *) \ ((const struct leuart_gecko_config * const)(dev)->config)->base) struct leuart_gecko_config { LEUART_TypeDef *base; CMU_Clock_TypeDef clock; uint32_t baud_rate; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif struct soc_gpio_pin pin_rx; struct soc_gpio_pin pin_tx; #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION uint8_t loc_rx; uint8_t loc_tx; #else uint8_t loc; #endif }; struct leuart_gecko_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; static int leuart_gecko_poll_in(const struct device *dev, unsigned char *c) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t flags = LEUART_StatusGet(base); if (flags & LEUART_STATUS_RXDATAV) { *c = LEUART_Rx(base); return 0; } return -1; } static void leuart_gecko_poll_out(const struct device *dev, unsigned char c) { LEUART_TypeDef *base = DEV_BASE(dev); /* LEUART_Tx function already waits for the transmit buffer being empty * and waits for the bus to be free to transmit. */ LEUART_Tx(base, c); } static int leuart_gecko_err_check(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t flags = LEUART_IntGet(base); int err = 0; if (flags & LEUART_IF_RXOF) { err |= UART_ERROR_OVERRUN; } if (flags & LEUART_IF_PERR) { err |= UART_ERROR_PARITY; } if (flags & LEUART_IF_FERR) { err |= UART_ERROR_FRAMING; } LEUART_IntClear(base, LEUART_IF_RXOF | LEUART_IF_PERR | LEUART_IF_FERR); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int leuart_gecko_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { LEUART_TypeDef *base = DEV_BASE(dev); uint8_t num_tx = 0U; while ((len - num_tx > 0) && (base->STATUS & LEUART_STATUS_TXBL)) { base->TXDATA = (uint32_t)tx_data[num_tx++]; } return num_tx; } static int leuart_gecko_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { LEUART_TypeDef *base = DEV_BASE(dev); uint8_t num_rx = 0U; while ((len - num_rx > 0) && (base->STATUS & LEUART_STATUS_RXDATAV)) { rx_data[num_rx++] = (uint8_t)base->RXDATA; } return num_rx; } static void leuart_gecko_irq_tx_enable(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t mask = LEUART_IEN_TXBL | LEUART_IEN_TXC; LEUART_IntEnable(base, mask); } static void leuart_gecko_irq_tx_disable(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t mask = LEUART_IEN_TXBL | LEUART_IEN_TXC; LEUART_IntDisable(base, mask); } static int leuart_gecko_irq_tx_complete(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t flags = LEUART_IntGet(base); return (flags & LEUART_IF_TXC) != 0U; } static int leuart_gecko_irq_tx_ready(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t flags = LEUART_IntGet(base); return (flags & LEUART_IF_TXBL) != 0U; } static void leuart_gecko_irq_rx_enable(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t mask = LEUART_IEN_RXDATAV; LEUART_IntEnable(base, mask); } static void leuart_gecko_irq_rx_disable(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t mask = LEUART_IEN_RXDATAV; LEUART_IntDisable(base, mask); } static int leuart_gecko_irq_rx_full(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t flags = LEUART_IntGet(base); return (flags & LEUART_IF_RXDATAV) != 0U; } static int leuart_gecko_irq_rx_ready(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); uint32_t mask = LEUART_IEN_RXDATAV; return (base->IEN & mask) && leuart_gecko_irq_rx_full(dev); } static void leuart_gecko_irq_err_enable(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); LEUART_IntEnable(base, LEUART_IF_RXOF | LEUART_IF_PERR | LEUART_IF_FERR); } static void leuart_gecko_irq_err_disable(const struct device *dev) { LEUART_TypeDef *base = DEV_BASE(dev); LEUART_IntDisable(base, LEUART_IF_RXOF | LEUART_IF_PERR | LEUART_IF_FERR); } static int leuart_gecko_irq_is_pending(const struct device *dev) { return leuart_gecko_irq_tx_ready(dev) || leuart_gecko_irq_rx_ready(dev); } static int leuart_gecko_irq_update(const struct device *dev) { return 1; } static void leuart_gecko_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct leuart_gecko_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void leuart_gecko_isr(const struct device *dev) { struct leuart_gecko_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static void leuart_gecko_init_pins(const struct device *dev) { const struct leuart_gecko_config *config = dev->config; LEUART_TypeDef *base = DEV_BASE(dev); GPIO_PinModeSet(config->pin_rx.port, config->pin_rx.pin, config->pin_rx.mode, config->pin_rx.out); GPIO_PinModeSet(config->pin_tx.port, config->pin_tx.pin, config->pin_tx.mode, config->pin_tx.out); #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION base->ROUTEPEN = LEUART_ROUTEPEN_RXPEN | LEUART_ROUTEPEN_TXPEN; base->ROUTELOC0 = (config->loc_tx << _LEUART_ROUTELOC0_TXLOC_SHIFT) | (config->loc_rx << _LEUART_ROUTELOC0_RXLOC_SHIFT); #else base->ROUTE = LEUART_ROUTE_RXPEN | LEUART_ROUTE_TXPEN | (config->loc << 8); #endif } static int leuart_gecko_init(const struct device *dev) { const struct leuart_gecko_config *config = dev->config; LEUART_TypeDef *base = DEV_BASE(dev); LEUART_Init_TypeDef leuartInit = LEUART_INIT_DEFAULT; /* The peripheral and gpio clock are already enabled from soc and gpio * driver */ leuartInit.baudrate = config->baud_rate; /* Enable CORE LE clock in order to access LE modules */ CMU_ClockEnable(cmuClock_CORELE, true); /* Select LFXO for LEUARTs (and wait for it to stabilize) */ CMU_ClockSelectSet(cmuClock_LFB, cmuSelect_LFXO); /* Enable LEUART clock */ CMU_ClockEnable(config->clock, true); /* Init LEUART */ LEUART_Init(base, &leuartInit); /* Initialize LEUART pins */ leuart_gecko_init_pins(dev); #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif return 0; } static const struct uart_driver_api leuart_gecko_driver_api = { .poll_in = leuart_gecko_poll_in, .poll_out = leuart_gecko_poll_out, .err_check = leuart_gecko_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = leuart_gecko_fifo_fill, .fifo_read = leuart_gecko_fifo_read, .irq_tx_enable = leuart_gecko_irq_tx_enable, .irq_tx_disable = leuart_gecko_irq_tx_disable, .irq_tx_complete = leuart_gecko_irq_tx_complete, .irq_tx_ready = leuart_gecko_irq_tx_ready, .irq_rx_enable = leuart_gecko_irq_rx_enable, .irq_rx_disable = leuart_gecko_irq_rx_disable, .irq_rx_ready = leuart_gecko_irq_rx_ready, .irq_err_enable = leuart_gecko_irq_err_enable, .irq_err_disable = leuart_gecko_irq_err_disable, .irq_is_pending = leuart_gecko_irq_is_pending, .irq_update = leuart_gecko_irq_update, .irq_callback_set = leuart_gecko_irq_callback_set, #endif }; #if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) #define PIN_LEUART_0_RXD {DT_INST_PROP_BY_IDX(0, location_rx, 1), \ DT_INST_PROP_BY_IDX(0, location_rx, 2), gpioModeInput, 1} #define PIN_LEUART_0_TXD {DT_INST_PROP_BY_IDX(0, location_tx, 1), \ DT_INST_PROP_BY_IDX(0, location_tx, 2), gpioModePushPull, 1} #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void leuart_gecko_config_func_0(const struct device *dev); #endif static const struct leuart_gecko_config leuart_gecko_0_config = { .base = (LEUART_TypeDef *)DT_INST_REG_ADDR(0), .clock = CLOCK_LEUART(DT_INST_PROP(0, peripheral_id)), .baud_rate = DT_INST_PROP(0, current_speed), .pin_rx = PIN_LEUART_0_RXD, .pin_tx = PIN_LEUART_0_TXD, #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION .loc_rx = DT_INST_PROP_BY_IDX(0, location_rx, 0), .loc_tx = DT_INST_PROP_BY_IDX(0, location_tx, 0), #else #if DT_INST_PROP_BY_IDX(0, location_rx, 0) \ != DT_INST_PROP_BY_IDX(0, location_tx, 0) #error LEUART_0 DTS location-* properties must have identical value #endif .loc = DT_INST_PROP_BY_IDX(0, location_rx, 0), #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = leuart_gecko_config_func_0, #endif }; static struct leuart_gecko_data leuart_gecko_0_data; DEVICE_DT_INST_DEFINE(0, &leuart_gecko_init, NULL, &leuart_gecko_0_data, &leuart_gecko_0_config, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &leuart_gecko_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void leuart_gecko_config_func_0(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), leuart_gecko_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */ #if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) #define PIN_LEUART_1_RXD {DT_INST_PROP_BY_IDX(1, location_rx, 1), \ DT_INST_PROP_BY_IDX(1, location_rx, 2), gpioModeInput, 1} #define PIN_LEUART_1_TXD {DT_INST_PROP_BY_IDX(1, location_tx, 1), \ DT_INST_PROP_BY_IDX(1, location_tx, 2), gpioModePushPull, 1} #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void leuart_gecko_config_func_1(const struct device *dev); #endif static const struct leuart_gecko_config leuart_gecko_1_config = { .base = (LEUART_TypeDef *)DT_INST_REG_ADDR(1), .clock = CLOCK_LEUART(DT_INST_PROP(1, peripheral_id)), .baud_rate = DT_INST_PROP(1, current_speed), .pin_rx = PIN_LEUART_1_RXD, .pin_tx = PIN_LEUART_1_TXD, #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION .loc_rx = DT_INST_PROP_BY_IDX(1, location_rx, 0), .loc_tx = DT_INST_PROP_BY_IDX(1, location_tx, 0), #else #if DT_INST_PROP_BY_IDX(1, location_rx, 0) \ != DT_INST_PROP_BY_IDX(1, location_tx, 0) #error LEUART_1 DTS location-* properties must have identical value #endif .loc = DT_INST_PROP_BY_IDX(1, location_rx, 0), #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = leuart_gecko_config_func_1, #endif }; static struct leuart_gecko_data leuart_gecko_1_data; DEVICE_DT_INST_DEFINE(1, &leuart_gecko_init, NULL, &leuart_gecko_1_data, &leuart_gecko_1_config, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &leuart_gecko_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void leuart_gecko_config_func_1(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(1), DT_INST_IRQ(1, priority), leuart_gecko_isr, DEVICE_DT_INST_GET(1), 0); irq_enable(DT_INST_IRQN(1)); } #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) */ ```
/content/code_sandbox/drivers/serial/leuart_gecko.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,225
```unknown # TI CC13xx / CC26xx UART configuration options config UART_CC13XX_CC26XX bool "TI SimpleLink CC13xx / CC26xx UART driver" default y depends on DT_HAS_TI_CC13XX_CC26XX_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable the TI SimpleLink CC13xx / CC26xx UART driver. ```
/content/code_sandbox/drivers/serial/Kconfig.cc13xx_cc26xx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
84
```unknown # Mi-V UART configuration option config UART_MIV bool "Mi-V serial driver" default y depends on DT_HAS_MICROCHIP_COREUART_ENABLED select SERIAL_HAS_DRIVER help This option enables the Mi-V serial driver. ```
/content/code_sandbox/drivers/serial/Kconfig.miv
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
52
```unknown # STM32 UART configuration config UART_STM32 bool "STM32 MCU serial driver" default y depends on DT_HAS_ST_STM32_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT # the ASYNC implementation requires a DMA controller select SERIAL_SUPPORT_ASYNC \ if DT_HAS_ST_STM32_DMA_V1_ENABLED || \ DT_HAS_ST_STM32_DMA_V2_ENABLED || \ DT_HAS_ST_STM32_DMA_V2BIS_ENABLED || \ DT_HAS_ST_STM32U5_DMA_ENABLED select DMA if UART_ASYNC_API select RESET help This option enables the UART driver for STM32 family of processors. Say y if you wish to use serial port on STM32 MCU. if UART_STM32 config UART_STM32U5_ERRATA_DMAT bool default y depends on SOC_STM32U575XX || SOC_STM32U585XX || \ SOC_STM32H562XX || SOC_STM32H563XX || SOC_STM32H573XX help Handles erratum "USART does not generate DMA requests after setting/clearing DMAT bit". Seen in Errata Sheet 0499 2.19.2 and 2.20.1 for stm32u57x/u58x, Errata Sheet 0565 2.14.1 and 2.15.1 for stm32h56x/h57x endif ```
/content/code_sandbox/drivers/serial/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
317
```c /* * */ #define DT_DRV_COMPAT ite_it8xxx2_uart #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/uart.h> #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <soc.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uart_ite_it8xxx2, CONFIG_UART_LOG_LEVEL); #if defined(CONFIG_PM_DEVICE) && defined(CONFIG_UART_CONSOLE_INPUT_EXPIRED) static struct uart_it8xxx2_data *uart_console_data; #endif struct uart_it8xxx2_config { uint8_t port; /* GPIO cells */ struct gpio_dt_spec gpio_wui; /* UART handle */ const struct device *uart_dev; /* UART alternate configuration */ const struct pinctrl_dev_config *pcfg; }; struct uart_it8xxx2_data { #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED struct k_work_delayable rx_refresh_timeout_work; #endif }; enum uart_port_num { UART1 = 1, UART2, }; #ifdef CONFIG_PM_DEVICE void uart1_wui_isr(const struct device *gpio, struct gpio_callback *cb, uint32_t pins) { /* Disable interrupts on UART1 RX pin to avoid repeated interrupts. */ (void)gpio_pin_interrupt_configure(gpio, (find_msb_set(pins) - 1), GPIO_INT_DISABLE); /* Refresh console expired time if got UART Rx wake-up event */ #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT); /* * The pm state of it8xxx2 chip only supports standby, so here we * can directly set the constraint for standby. */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); k_work_reschedule(&uart_console_data->rx_refresh_timeout_work, delay); #endif } void uart2_wui_isr(const struct device *gpio, struct gpio_callback *cb, uint32_t pins) { /* Disable interrupts on UART2 RX pin to avoid repeated interrupts. */ (void)gpio_pin_interrupt_configure(gpio, (find_msb_set(pins) - 1), GPIO_INT_DISABLE); /* Refresh console expired time if got UART Rx wake-up event */ #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT); /* * The pm state of it8xxx2 chip only supports standby, so here we * can directly set the constraint for standby. */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); k_work_reschedule(&uart_console_data->rx_refresh_timeout_work, delay); #endif } static inline int uart_it8xxx2_pm_action(const struct device *dev, enum pm_device_action action) { const struct uart_it8xxx2_config *const config = dev->config; int ret = 0; switch (action) { /* Next device power state is in active. */ case PM_DEVICE_ACTION_RESUME: /* Nothing to do. */ break; /* Next device power state is deep doze mode */ case PM_DEVICE_ACTION_SUSPEND: /* Enable UART WUI */ ret = gpio_pin_interrupt_configure_dt(&config->gpio_wui, GPIO_INT_MODE_EDGE | GPIO_INT_TRIG_LOW); if (ret < 0) { LOG_ERR("Failed to configure UART%d WUI (ret %d)", config->port, ret); return ret; } break; default: return -ENOTSUP; } return 0; } #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED static void uart_it8xxx2_rx_refresh_timeout(struct k_work *work) { ARG_UNUSED(work); pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } #endif #endif /* CONFIG_PM_DEVICE */ static int uart_it8xxx2_init(const struct device *dev) { const struct uart_it8xxx2_config *const config = dev->config; int status; /* Set the pin to UART alternate function. */ status = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (status < 0) { LOG_ERR("Failed to configure UART pins"); return status; } #ifdef CONFIG_PM_DEVICE const struct device *uart_console_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_console)); int ret = 0; /* * If the UART is used as a console device, we need to configure * UART Rx interrupt as wakeup source and initialize a delayable * work for console expired time. */ if (config->uart_dev == uart_console_dev) { #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED uart_console_data = dev->data; k_work_init_delayable(&uart_console_data->rx_refresh_timeout_work, uart_it8xxx2_rx_refresh_timeout); #endif /* * When the system enters deep doze, all clocks are gated only the * 32.768k clock is active. We need to wakeup EC by configuring * UART Rx interrupt as a wakeup source. When the interrupt of UART * Rx falling, EC will be woken. */ if (config->port == UART1) { static struct gpio_callback uart1_wui_cb; gpio_init_callback(&uart1_wui_cb, uart1_wui_isr, BIT(config->gpio_wui.pin)); ret = gpio_add_callback(config->gpio_wui.port, &uart1_wui_cb); } else if (config->port == UART2) { static struct gpio_callback uart2_wui_cb; gpio_init_callback(&uart2_wui_cb, uart2_wui_isr, BIT(config->gpio_wui.pin)); ret = gpio_add_callback(config->gpio_wui.port, &uart2_wui_cb); } if (ret < 0) { LOG_ERR("Failed to add UART%d callback (err %d)", config->port, ret); return ret; } } #endif /* CONFIG_PM_DEVICE */ return 0; } #define UART_ITE_IT8XXX2_INIT(inst) \ PINCTRL_DT_INST_DEFINE(inst); \ static const struct uart_it8xxx2_config uart_it8xxx2_cfg_##inst = { \ .port = DT_INST_PROP(inst, port_num), \ .gpio_wui = GPIO_DT_SPEC_INST_GET(inst, gpios), \ .uart_dev = DEVICE_DT_GET(DT_INST_PHANDLE(inst, uart_dev)), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst), \ }; \ \ static struct uart_it8xxx2_data uart_it8xxx2_data_##inst; \ \ PM_DEVICE_DT_INST_DEFINE(inst, uart_it8xxx2_pm_action); \ DEVICE_DT_INST_DEFINE(inst, uart_it8xxx2_init, \ PM_DEVICE_DT_INST_GET(inst), \ &uart_it8xxx2_data_##inst, \ &uart_it8xxx2_cfg_##inst, \ PRE_KERNEL_1, \ CONFIG_UART_ITE_IT8XXX2_INIT_PRIORITY, \ NULL); DT_INST_FOREACH_STATUS_OKAY(UART_ITE_IT8XXX2_INIT) ```
/content/code_sandbox/drivers/serial/uart_ite_it8xxx2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,587
```c /* * */ #define DT_DRV_COMPAT nxp_lpc11u6x_uart #include <cmsis_core.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/irq.h> #include "uart_lpc11u6x.h" #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart0), okay) static int lpc11u6x_uart0_poll_in(const struct device *dev, unsigned char *c) { const struct lpc11u6x_uart0_config *cfg = dev->config; if (!(cfg->uart0->lsr & LPC11U6X_UART0_LSR_RDR)) { return -1; } *c = cfg->uart0->rbr; return 0; } static void lpc11u6x_uart0_poll_out(const struct device *dev, unsigned char c) { const struct lpc11u6x_uart0_config *cfg = dev->config; while (!(cfg->uart0->lsr & LPC11U6X_UART0_LSR_THRE)) { } cfg->uart0->thr = c; } static int lpc11u6x_uart0_err_check(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; uint32_t lsr; int ret = 0; lsr = cfg->uart0->lsr; if (lsr & LPC11U6X_UART0_LSR_OE) { ret |= UART_ERROR_OVERRUN; } if (lsr & LPC11U6X_UART0_LSR_PE) { ret |= UART_ERROR_PARITY; } if (lsr & LPC11U6X_UART0_LSR_FE) { ret |= UART_ERROR_FRAMING; } if (lsr & LPC11U6X_UART0_LSR_BI) { ret |= UART_BREAK; } return ret; } static void lpc11u6x_uart0_write_divisor(struct lpc11u6x_uart0_regs *uart0, uint32_t div) { /* Enable access to dll & dlm registers */ uart0->lcr |= LPC11U6X_UART0_LCR_DLAB; uart0->dll = div & 0xFF; uart0->dlm = (div >> 8) & 0xFF; uart0->lcr &= ~LPC11U6X_UART0_LCR_DLAB; } static void lpc11u6x_uart0_write_fdr(struct lpc11u6x_uart0_regs *uart0, uint32_t div, uint32_t mul) { uart0->fdr = (div & 0xF) | ((mul & 0xF) << 4); } static void lpc11u6x_uart0_config_baudrate(const struct device *clk_drv, const struct lpc11u6x_uart0_config *cfg, uint32_t baudrate) { uint32_t div = 1, mul, dl; uint32_t pclk; /* Compute values for fractional baud rate generator. We need to have * a clock that is as close as possible to a multiple of * LPC11U6X_UART0_CLK so that we can have every baudrate that is * a multiple of 9600 */ clock_control_get_rate(clk_drv, (clock_control_subsys_t) cfg->clkid, &pclk); mul = pclk / (pclk % LPC11U6X_UART0_CLK); dl = pclk / (16 * baudrate + 16 * baudrate / mul); /* Configure clock divisor and fractional baudrate generator */ lpc11u6x_uart0_write_divisor(cfg->uart0, dl); lpc11u6x_uart0_write_fdr(cfg->uart0, div, mul); } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int lpc11u6x_uart0_configure(const struct device *dev, const struct uart_config *cfg) { const struct lpc11u6x_uart0_config *dev_cfg = dev->config; struct lpc11u6x_uart0_data *data = dev->data; uint32_t flags = 0; /* Check that the baudrate is a multiple of 9600 */ if (cfg->baudrate % 9600) { return -ENOTSUP; } switch (cfg->parity) { case UART_CFG_PARITY_NONE: break; case UART_CFG_PARITY_ODD: flags |= LPC11U6X_UART0_LCR_PARTIY_ENABLE | LPC11U6X_UART0_LCR_PARTIY_ODD; break; case UART_CFG_PARITY_EVEN: flags |= LPC11U6X_UART0_LCR_PARTIY_ENABLE | LPC11U6X_UART0_LCR_PARTIY_EVEN; break; case UART_CFG_PARITY_MARK: __fallthrough; case UART_CFG_PARITY_SPACE: return -ENOTSUP; default: return -EINVAL; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_0_5: return -ENOTSUP; case UART_CFG_STOP_BITS_1: flags |= LPC11U6X_UART0_LCR_STOP_1BIT; break; case UART_CFG_STOP_BITS_1_5: return -ENOTSUP; case UART_CFG_STOP_BITS_2: flags |= LPC11U6X_UART0_LCR_STOP_2BIT; break; default: return -EINVAL; } switch (cfg->data_bits) { case UART_CFG_DATA_BITS_5: flags |= LPC11U6X_UART0_LCR_WLS_5BITS; break; case UART_CFG_DATA_BITS_6: flags |= LPC11U6X_UART0_LCR_WLS_6BITS; break; case UART_CFG_DATA_BITS_7: flags |= LPC11U6X_UART0_LCR_WLS_7BITS; break; case UART_CFG_DATA_BITS_8: flags |= LPC11U6X_UART0_LCR_WLS_8BITS; break; case UART_CFG_DATA_BITS_9: return -ENOTSUP; default: return -EINVAL; } if (cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) { return -ENOTSUP; } lpc11u6x_uart0_config_baudrate(dev_cfg->clock_dev, dev_cfg, cfg->baudrate); dev_cfg->uart0->lcr = flags; data->baudrate = cfg->baudrate; data->stop_bits = cfg->stop_bits; data->data_bits = cfg->data_bits; data->flow_ctrl = cfg->flow_ctrl; data->parity = cfg->parity; return 0; } static int lpc11u6x_uart0_config_get(const struct device *dev, struct uart_config *cfg) { struct lpc11u6x_uart0_data *data = dev->data; cfg->baudrate = data->baudrate; cfg->parity = data->parity; cfg->stop_bits = data->stop_bits; cfg->data_bits = data->data_bits; cfg->flow_ctrl = data->flow_ctrl; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int lpc11u6x_uart0_fifo_fill(const struct device *dev, const uint8_t *data, const int size) { const struct lpc11u6x_uart0_config *cfg = dev->config; int nr_sent = 0; while (nr_sent < size && (cfg->uart0->lsr & LPC11U6X_UART0_LSR_THRE)) { cfg->uart0->thr = data[nr_sent++]; } return nr_sent; } static int lpc11u6x_uart0_fifo_read(const struct device *dev, uint8_t *data, const int size) { const struct lpc11u6x_uart0_config *cfg = dev->config; int nr_rx = 0; while (nr_rx < size && (cfg->uart0->lsr & LPC11U6X_UART0_LSR_RDR)) { data[nr_rx++] = cfg->uart0->rbr; } return nr_rx; } static void lpc11u6x_uart0_irq_tx_enable(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; cfg->uart0->ier = (cfg->uart0->ier & LPC11U6X_UART0_IER_MASK) | LPC11U6X_UART0_IER_THREINTEN; /* Due to hardware limitations, first TX interrupt is not triggered when * enabling it in the IER register. We have to trigger it. */ NVIC_SetPendingIRQ(DT_INST_IRQN(0)); } static void lpc11u6x_uart0_irq_tx_disable(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; cfg->uart0->ier = (cfg->uart0->ier & LPC11U6X_UART0_IER_MASK) & ~LPC11U6X_UART0_IER_THREINTEN; } static int lpc11u6x_uart0_irq_tx_complete(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; return (cfg->uart0->lsr & LPC11U6X_UART0_LSR_TEMT) != 0; } static int lpc11u6x_uart0_irq_tx_ready(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; return (cfg->uart0->lsr & LPC11U6X_UART0_LSR_THRE) && (cfg->uart0->ier & LPC11U6X_UART0_IER_THREINTEN); } static void lpc11u6x_uart0_irq_rx_enable(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; cfg->uart0->ier = (cfg->uart0->ier & LPC11U6X_UART0_IER_MASK) | LPC11U6X_UART0_IER_RBRINTEN; } static void lpc11u6x_uart0_irq_rx_disable(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; cfg->uart0->ier = (cfg->uart0->ier & LPC11U6X_UART0_IER_MASK) & ~LPC11U6X_UART0_IER_RBRINTEN; } static int lpc11u6x_uart0_irq_rx_ready(const struct device *dev) { struct lpc11u6x_uart0_data *data = dev->data; return (LPC11U6X_UART0_IIR_INTID(data->cached_iir) == LPC11U6X_UART0_IIR_INTID_RDA) || (LPC11U6X_UART0_IIR_INTID(data->cached_iir) == LPC11U6X_UART0_IIR_INTID_CTI); } static void lpc11u6x_uart0_irq_err_enable(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; cfg->uart0->ier = (cfg->uart0->ier & LPC11U6X_UART0_IER_MASK) | LPC11U6X_UART0_IER_RLSINTEN; } static void lpc11u6x_uart0_irq_err_disable(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; cfg->uart0->ier = (cfg->uart0->ier & LPC11U6X_UART0_IER_MASK) & ~LPC11U6X_UART0_IER_RLSINTEN; } static int lpc11u6x_uart0_irq_is_pending(const struct device *dev) { struct lpc11u6x_uart0_data *data = dev->data; return !(data->cached_iir & LPC11U6X_UART0_IIR_STATUS); } static int lpc11u6x_uart0_irq_update(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; struct lpc11u6x_uart0_data *data = dev->data; data->cached_iir = cfg->uart0->iir; return 1; } static void lpc11u6x_uart0_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *user_data) { struct lpc11u6x_uart0_data *data = dev->data; data->cb = cb; data->cb_data = user_data; } static void lpc11u6x_uart0_isr(const struct device *dev) { struct lpc11u6x_uart0_data *data = dev->data; if (data->cb) { data->cb(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int lpc11u6x_uart0_init(const struct device *dev) { const struct lpc11u6x_uart0_config *cfg = dev->config; struct lpc11u6x_uart0_data *data = dev->data; int err; /* Apply default pin control state to select RX and TX pins */ err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } if (!device_is_ready(cfg->clock_dev)) { return -ENODEV; } clock_control_on(cfg->clock_dev, (clock_control_subsys_t) cfg->clkid); /* Configure baudrate, parity and stop bits */ lpc11u6x_uart0_config_baudrate(cfg->clock_dev, cfg, cfg->baudrate); cfg->uart0->lcr |= LPC11U6X_UART0_LCR_WLS_8BITS; /* 8N1 */ data->baudrate = cfg->baudrate; data->parity = UART_CFG_PARITY_NONE; data->stop_bits = UART_CFG_STOP_BITS_1; data->data_bits = UART_CFG_DATA_BITS_8; data->flow_ctrl = UART_CFG_FLOW_CTRL_NONE; /* Configure FIFO */ cfg->uart0->fcr = LPC11U6X_UART0_FCR_FIFO_EN; #ifdef CONFIG_UART_INTERRUPT_DRIVEN cfg->irq_config_func(dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void lpc11u6x_uart0_isr_config(const struct device *dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ PINCTRL_DT_DEFINE(DT_NODELABEL(uart0)); BUILD_ASSERT(DT_PROP(DT_NODELABEL(uart0), rx_invert) == 0, "rx-invert not supported for UART0"); BUILD_ASSERT(DT_PROP(DT_NODELABEL(uart0), tx_invert) == 0, "tx-invert not supported for UART0"); static const struct lpc11u6x_uart0_config uart0_config = { .uart0 = (struct lpc11u6x_uart0_regs *) DT_REG_ADDR(DT_NODELABEL(uart0)), .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_NODELABEL(uart0))), .pincfg = PINCTRL_DT_DEV_CONFIG_GET(DT_NODELABEL(uart0)), .clkid = DT_PHA_BY_IDX(DT_NODELABEL(uart0), clocks, 0, clkid), .baudrate = DT_PROP(DT_NODELABEL(uart0), current_speed), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = lpc11u6x_uart0_isr_config, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; static const struct uart_driver_api uart0_api = { .poll_in = lpc11u6x_uart0_poll_in, .poll_out = lpc11u6x_uart0_poll_out, .err_check = lpc11u6x_uart0_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = lpc11u6x_uart0_configure, .config_get = lpc11u6x_uart0_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = lpc11u6x_uart0_fifo_fill, .fifo_read = lpc11u6x_uart0_fifo_read, .irq_tx_enable = lpc11u6x_uart0_irq_tx_enable, .irq_tx_disable = lpc11u6x_uart0_irq_tx_disable, .irq_tx_ready = lpc11u6x_uart0_irq_tx_ready, .irq_tx_complete = lpc11u6x_uart0_irq_tx_complete, .irq_rx_enable = lpc11u6x_uart0_irq_rx_enable, .irq_rx_disable = lpc11u6x_uart0_irq_rx_disable, .irq_rx_ready = lpc11u6x_uart0_irq_rx_ready, .irq_err_enable = lpc11u6x_uart0_irq_err_enable, .irq_err_disable = lpc11u6x_uart0_irq_err_disable, .irq_is_pending = lpc11u6x_uart0_irq_is_pending, .irq_update = lpc11u6x_uart0_irq_update, .irq_callback_set = lpc11u6x_uart0_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; static struct lpc11u6x_uart0_data uart0_data; DEVICE_DT_DEFINE(DT_NODELABEL(uart0), &lpc11u6x_uart0_init, NULL, &uart0_data, &uart0_config, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart0_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void lpc11u6x_uart0_isr_config(const struct device *dev) { IRQ_CONNECT(DT_IRQN(DT_NODELABEL(uart0)), DT_IRQ(DT_NODELABEL(uart0), priority), lpc11u6x_uart0_isr, DEVICE_DT_GET(DT_NODELABEL(uart0)), 0); irq_enable(DT_IRQN(DT_NODELABEL(uart0))); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart0), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) || \ DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) || \ DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay) || \ DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay) static int lpc11u6x_uartx_poll_in(const struct device *dev, unsigned char *c) { const struct lpc11u6x_uartx_config *cfg = dev->config; if (!(cfg->base->stat & LPC11U6X_UARTX_STAT_RXRDY)) { return -1; } *c = cfg->base->rx_dat; return 0; } static void lpc11u6x_uartx_poll_out(const struct device *dev, unsigned char c) { const struct lpc11u6x_uartx_config *cfg = dev->config; while (!(cfg->base->stat & LPC11U6X_UARTX_STAT_TXRDY)) { } cfg->base->tx_dat = c; } static int lpc11u6x_uartx_err_check(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; int ret = 0; if (cfg->base->stat & LPC11U6X_UARTX_STAT_OVERRUNINT) { ret |= UART_ERROR_OVERRUN; } if (cfg->base->stat & LPC11U6X_UARTX_STAT_FRAMERRINT) { ret |= UART_ERROR_FRAMING; } if (cfg->base->stat & LPC11U6X_UARTX_STAT_PARITYERRINT) { ret |= UART_ERROR_PARITY; } return ret; } static void lpc11u6x_uartx_config_baud(const struct lpc11u6x_uartx_config *cfg, uint32_t baudrate) { uint32_t clk_rate; uint32_t div; const struct device *clk_drv = cfg->clock_dev; clock_control_get_rate(clk_drv, (clock_control_subsys_t) cfg->clkid, &clk_rate); div = clk_rate / (16 * baudrate); if (div != 0) { div -= 1; } cfg->base->brg = div & LPC11U6X_UARTX_BRG_MASK; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int lpc11u6x_uartx_configure(const struct device *dev, const struct uart_config *cfg) { const struct lpc11u6x_uartx_config *dev_cfg = dev->config; struct lpc11u6x_uartx_data *data = dev->data; uint32_t flags = 0; /* We only support baudrates that are multiple of 9600 */ if (cfg->baudrate % 9600) { return -ENOTSUP; } switch (cfg->parity) { case UART_CFG_PARITY_NONE: flags |= LPC11U6X_UARTX_CFG_PARITY_NONE; break; case UART_CFG_PARITY_ODD: flags |= LPC11U6X_UARTX_CFG_PARITY_ODD; break; case UART_CFG_PARITY_EVEN: flags |= LPC11U6X_UARTX_CFG_PARITY_EVEN; break; case UART_CFG_PARITY_MARK: __fallthrough; case UART_CFG_PARITY_SPACE: return -ENOTSUP; default: return -EINVAL; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_0_5: return -ENOTSUP; case UART_CFG_STOP_BITS_1: flags |= LPC11U6X_UARTX_CFG_STOP_1BIT; break; case UART_CFG_STOP_BITS_1_5: return -ENOTSUP; case UART_CFG_STOP_BITS_2: flags |= LPC11U6X_UARTX_CFG_STOP_2BIT; break; default: return -EINVAL; } switch (cfg->data_bits) { case UART_CFG_DATA_BITS_5: __fallthrough; case UART_CFG_DATA_BITS_6: return -ENOTSUP; case UART_CFG_DATA_BITS_7: flags |= LPC11U6X_UARTX_CFG_DATALEN_7BIT; break; case UART_CFG_DATA_BITS_8: flags |= LPC11U6X_UARTX_CFG_DATALEN_8BIT; break; case UART_CFG_DATA_BITS_9: flags |= LPC11U6X_UARTX_CFG_DATALEN_9BIT; break; default: return -EINVAL; } if (cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) { return -ENOTSUP; } if (dev_cfg->rx_invert) { flags |= LPC11U6X_UARTX_CFG_RXPOL(1); } if (dev_cfg->tx_invert) { flags |= LPC11U6X_UARTX_CFG_TXPOL(1); } /* Disable UART */ dev_cfg->base->cfg = 0; /* Update baudrate */ lpc11u6x_uartx_config_baud(dev_cfg, cfg->baudrate); /* Set parity, data bits, stop bits and re-enable UART interface */ dev_cfg->base->cfg = flags | LPC11U6X_UARTX_CFG_ENABLE; data->baudrate = cfg->baudrate; data->parity = cfg->parity; data->stop_bits = cfg->stop_bits; data->data_bits = cfg->data_bits; data->flow_ctrl = cfg->flow_ctrl; return 0; } static int lpc11u6x_uartx_config_get(const struct device *dev, struct uart_config *cfg) { const struct lpc11u6x_uartx_data *data = dev->data; cfg->baudrate = data->baudrate; cfg->parity = data->parity; cfg->stop_bits = data->stop_bits; cfg->data_bits = data->data_bits; cfg->flow_ctrl = data->flow_ctrl; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int lpc11u6x_uartx_fifo_fill(const struct device *dev, const uint8_t *data, int size) { const struct lpc11u6x_uartx_config *cfg = dev->config; int tx_size = 0; while (tx_size < size && (cfg->base->stat & LPC11U6X_UARTX_STAT_TXRDY)) { cfg->base->tx_dat = data[tx_size++]; } return tx_size; } static int lpc11u6x_uartx_fifo_read(const struct device *dev, uint8_t *data, int size) { const struct lpc11u6x_uartx_config *cfg = dev->config; int rx_size = 0; while (rx_size < size && (cfg->base->stat & LPC11U6X_UARTX_STAT_RXRDY)) { data[rx_size++] = cfg->base->rx_dat; } return rx_size; } static void lpc11u6x_uartx_irq_tx_enable(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; cfg->base->int_en_set = (cfg->base->int_en_set & LPC11U6X_UARTX_INT_EN_SET_MASK) | LPC11U6X_UARTX_INT_EN_SET_TXRDYEN; } static void lpc11u6x_uartx_irq_tx_disable(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; cfg->base->int_en_clr = LPC11U6X_UARTX_INT_EN_CLR_TXRDYCLR; } static int lpc11u6x_uartx_irq_tx_ready(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; return (cfg->base->stat & LPC11U6X_UARTX_STAT_TXRDY) && (cfg->base->int_en_set & LPC11U6X_UARTX_INT_EN_SET_TXRDYEN); } static int lpc11u6x_uartx_irq_tx_complete(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; return (cfg->base->stat & LPC11U6X_UARTX_STAT_TXIDLE) != 0; } static void lpc11u6x_uartx_irq_rx_enable(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; cfg->base->int_en_set = (cfg->base->int_en_set & LPC11U6X_UARTX_INT_EN_SET_MASK) | LPC11U6X_UARTX_INT_EN_SET_RXRDYEN; } static void lpc11u6x_uartx_irq_rx_disable(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; cfg->base->int_en_clr = LPC11U6X_UARTX_INT_EN_CLR_RXRDYCLR; } static int lpc11u6x_uartx_irq_rx_ready(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; return (cfg->base->stat & LPC11U6X_UARTX_STAT_RXRDY) && (cfg->base->int_en_set & LPC11U6X_UARTX_INT_EN_SET_RXRDYEN); } static void lpc11u6x_uartx_irq_err_enable(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; cfg->base->int_en_set = (cfg->base->int_en_set & LPC11U6X_UARTX_INT_EN_SET_MASK) | (LPC11U6X_UARTX_INT_EN_SET_RXRDYEN | LPC11U6X_UARTX_INT_EN_SET_FRAMERREN | LPC11U6X_UARTX_INT_EN_SET_PARITYERREN); } static void lpc11u6x_uartx_irq_err_disable(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; cfg->base->int_en_clr = LPC11U6X_UARTX_INT_EN_CLR_OVERRUNCLR | LPC11U6X_UARTX_INT_EN_CLR_FRAMERRCLR | LPC11U6X_UARTX_INT_EN_CLR_PARITYERRCLR; } static int lpc11u6x_uartx_irq_is_pending(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; if ((cfg->base->stat & LPC11U6X_UARTX_STAT_RXRDY) && (cfg->base->int_stat & LPC11U6X_UARTX_INT_STAT_RXRDY)) { return 1; } if ((cfg->base->stat & LPC11U6X_UARTX_STAT_TXRDY) && cfg->base->int_stat & LPC11U6X_UARTX_INT_STAT_TXRDY) { return 1; } if (cfg->base->stat & (LPC11U6X_UARTX_STAT_OVERRUNINT | LPC11U6X_UARTX_STAT_FRAMERRINT | LPC11U6X_UARTX_STAT_PARITYERRINT)) { return 1; } return 0; } static int lpc11u6x_uartx_irq_update(const struct device *dev) { return 1; } static void lpc11u6x_uartx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *user_data) { struct lpc11u6x_uartx_data *data = dev->data; data->cb = cb; data->cb_data = user_data; } static void lpc11u6x_uartx_isr(const struct device *dev) { struct lpc11u6x_uartx_data *data = dev->data; if (data->cb) { data->cb(dev, data->cb_data); } } static void lpc11u6x_uartx_shared_isr(const void *arg) { struct lpc11u6x_uartx_shared_irq *shared_irq = (struct lpc11u6x_uartx_shared_irq *)arg; int i; for (i = 0; i < ARRAY_SIZE(shared_irq->devices); i++) { if (shared_irq->devices[i]) { lpc11u6x_uartx_isr(shared_irq->devices[i]); } } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int lpc11u6x_uartx_init(const struct device *dev) { const struct lpc11u6x_uartx_config *cfg = dev->config; struct lpc11u6x_uartx_data *data = dev->data; int err; /* Apply default pin control state to select RX and TX pins */ err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } clock_control_on(cfg->clock_dev, (clock_control_subsys_t) cfg->clkid); /* Configure baudrate, parity and stop bits */ lpc11u6x_uartx_config_baud(cfg, cfg->baudrate); cfg->base->cfg = LPC11U6X_UARTX_CFG_DATALEN_8BIT; /* 8N1 */ data->baudrate = cfg->baudrate; data->parity = UART_CFG_PARITY_NONE; data->stop_bits = UART_CFG_STOP_BITS_1; data->data_bits = UART_CFG_DATA_BITS_8; data->flow_ctrl = UART_CFG_FLOW_CTRL_NONE; if (cfg->rx_invert) { cfg->base->cfg |= LPC11U6X_UARTX_CFG_RXPOL(1); } if (cfg->tx_invert) { cfg->base->cfg |= LPC11U6X_UARTX_CFG_TXPOL(1); } /* Enable UART */ cfg->base->cfg = (cfg->base->cfg & LPC11U6X_UARTX_CFG_MASK) | LPC11U6X_UARTX_CFG_ENABLE; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) || \ DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay) lpc11u6x_uartx_isr_config_1(dev); #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) || * DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) || \ DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay) lpc11u6x_uartx_isr_config_2(dev); #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) || * DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay) */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ return 0; } static const struct uart_driver_api uartx_api = { .poll_in = lpc11u6x_uartx_poll_in, .poll_out = lpc11u6x_uartx_poll_out, .err_check = lpc11u6x_uartx_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = lpc11u6x_uartx_configure, .config_get = lpc11u6x_uartx_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = lpc11u6x_uartx_fifo_fill, .fifo_read = lpc11u6x_uartx_fifo_read, .irq_tx_enable = lpc11u6x_uartx_irq_tx_enable, .irq_tx_disable = lpc11u6x_uartx_irq_tx_disable, .irq_tx_ready = lpc11u6x_uartx_irq_tx_ready, .irq_tx_complete = lpc11u6x_uartx_irq_tx_complete, .irq_rx_enable = lpc11u6x_uartx_irq_rx_enable, .irq_rx_disable = lpc11u6x_uartx_irq_rx_disable, .irq_rx_ready = lpc11u6x_uartx_irq_rx_ready, .irq_err_enable = lpc11u6x_uartx_irq_err_enable, .irq_err_disable = lpc11u6x_uartx_irq_err_disable, .irq_is_pending = lpc11u6x_uartx_irq_is_pending, .irq_update = lpc11u6x_uartx_irq_update, .irq_callback_set = lpc11u6x_uartx_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define LPC11U6X_UARTX_INIT(idx) \ PINCTRL_DT_DEFINE(DT_NODELABEL(uart##idx)); \ \ static const struct lpc11u6x_uartx_config uart_cfg_##idx = { \ .base = (struct lpc11u6x_uartx_regs *) \ DT_REG_ADDR(DT_NODELABEL(uart##idx)), \ .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_NODELABEL(uart##idx))), \ .clkid = DT_PHA_BY_IDX(DT_NODELABEL(uart##idx), clocks, 0, clkid), \ .pincfg = PINCTRL_DT_DEV_CONFIG_GET(DT_NODELABEL(uart##idx)), \ .baudrate = DT_PROP(DT_NODELABEL(uart##idx), current_speed), \ .rx_invert = DT_PROP(DT_NODELABEL(uart##idx), rx_invert), \ .tx_invert = DT_PROP(DT_NODELABEL(uart##idx), tx_invert), \ }; \ \ static struct lpc11u6x_uartx_data uart_data_##idx; \ \ DEVICE_DT_DEFINE(DT_NODELABEL(uart##idx), \ &lpc11u6x_uartx_init, NULL, \ &uart_data_##idx, &uart_cfg_##idx, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uartx_api) #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) LPC11U6X_UARTX_INIT(1); #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) LPC11U6X_UARTX_INIT(2); #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay) LPC11U6X_UARTX_INIT(3); #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay) LPC11U6X_UARTX_INIT(4); #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay) */ #if CONFIG_UART_INTERRUPT_DRIVEN && \ (DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) || \ DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay)) struct lpc11u6x_uartx_shared_irq lpc11u6x_uartx_shared_irq_info_1 = { .devices = { #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) DEVICE_DT_GET(DT_NODELABEL(uart1)), #else NULL, #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay) DEVICE_DT_GET(DT_NODELABEL(uart4)), #else NULL, #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay) */ }, }; static void lpc11u6x_uartx_isr_config_1(const struct device *dev) { #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) IRQ_CONNECT(DT_IRQN(DT_NODELABEL(uart1)), DT_IRQ(DT_NODELABEL(uart1), priority), lpc11u6x_uartx_shared_isr, &lpc11u6x_uartx_shared_irq_info_1, 0); irq_enable(DT_IRQN(DT_NODELABEL(uart1))); #else IRQ_CONNECT(DT_IRQN(DT_NODELABEL(uart4)), DT_IRQ(DT_NODELABEL(uart4), priority), lpc11u6x_uartx_shared_isr, &lpc11u6x_uartx_shared_irq_info_1, 0); irq_enable(DT_IRQN(DT_NODELABEL(uart4))); #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) */ } #endif /* CONFIG_UART_INTERRUPT_DRIVEN && * (DT_NODE_HAS_STATUS(DT_NODELABEL(uart1), okay) || * DT_NODE_HAS_STATUS(DT_NODELABEL(uart4), okay)) */ #if CONFIG_UART_INTERRUPT_DRIVEN && \ (DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) || \ DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay)) struct lpc11u6x_uartx_shared_irq lpc11u6x_uartx_shared_irq_info_2 = { .devices = { #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) DEVICE_DT_GET(DT_NODELABEL(uart2)), #else NULL, #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) */ #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay) DEVICE_DT_GET(DT_NODELABEL(uart3)), #else NULL, #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay) */ }, }; static void lpc11u6x_uartx_isr_config_2(const struct device *dev) { #if DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) IRQ_CONNECT(DT_IRQN(DT_NODELABEL(uart2)), DT_IRQ(DT_NODELABEL(uart2), priority), lpc11u6x_uartx_shared_isr, &lpc11u6x_uartx_shared_irq_info_2, 0); irq_enable(DT_IRQN(DT_NODELABEL(uart2))); #else IRQ_CONNECT(DT_IRQN(DT_NODELABEL(uart3)), DT_IRQ(DT_NODELABEL(uart3), priority), lpc11u6x_uartx_shared_isr, &lpc11u6x_uartx_shared_irq_info_2, 0); irq_enable(DT_IRQN(DT_NODELABEL(uart3))); #endif /* DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) */ } #endif /* CONFIG_UART_INTERRUPT_DRIVEN && * (DT_NODE_HAS_STATUS(DT_NODELABEL(uart2), okay) || * DT_NODE_HAS_STATUS(DT_NODELABEL(uart3), okay)) */ #endif /* DT_NODE_EXISTS(DT_NODELABEL(uart1) || * DT_NODE_EXISTS(DT_NODELABEL(uart2) || * DT_NODE_EXISTS(DT_NODELABEL(uart3) || * DT_NODE_EXISTS(DT_NODELABEL(uart4) */ ```
/content/code_sandbox/drivers/serial/uart_lpc11u6x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,983
```c /* * */ #define DT_DRV_COMPAT brcm_bcm2711_aux_uart /** * @brief BCM2711 Miniuart Serial Driver * */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <stdbool.h> #include <zephyr/sys/__assert.h> #include <zephyr/init.h> #include <zephyr/drivers/uart.h> #include <zephyr/irq.h> #define BCM2711_MU_IO 0x00 #define BCM2711_MU_IER 0x04 #define BCM2711_MU_IIR 0x08 #define BCM2711_MU_LCR 0x0c #define BCM2711_MU_MCR 0x10 #define BCM2711_MU_LSR 0x14 #define BCM2711_MU_MSR 0x18 #define BCM2711_MU_SCRATCH 0x1c #define BCM2711_MU_CNTL 0x20 #define BCM2711_MU_STAT 0x24 #define BCM2711_MU_BAUD 0x28 #define BCM2711_MU_IER_TX_INTERRUPT BIT(1) #define BCM2711_MU_IER_RX_INTERRUPT BIT(0) #define BCM2711_MU_IIR_RX_INTERRUPT BIT(2) #define BCM2711_MU_IIR_TX_INTERRUPT BIT(1) #define BCM2711_MU_IIR_FLUSH 0xc6 #define BCM2711_MU_LCR_7BIT 0x02 #define BCM2711_MU_LCR_8BIT 0x03 #define BCM2711_MU_LSR_TX_IDLE BIT(6) #define BCM2711_MU_LSR_TX_EMPTY BIT(5) #define BCM2711_MU_LSR_RX_OVERRUN BIT(1) #define BCM2711_MU_LSR_RX_READY BIT(0) #define BCM2711_MU_CNTL_RX_ENABLE BIT(0) #define BCM2711_MU_CNTL_TX_ENABLE BIT(1) struct bcm2711_uart_config { DEVICE_MMIO_ROM; /* Must be first */ uint32_t baud_rate; uint32_t clocks; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif }; struct bcm2711_uart_data { DEVICE_MMIO_RAM; /* Must be first */ mem_addr_t uart_addr; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; static bool bcm2711_mu_lowlevel_can_getc(mem_addr_t base) { return sys_read32(base + BCM2711_MU_LSR) & BCM2711_MU_LSR_RX_READY; } static bool bcm2711_mu_lowlevel_can_putc(mem_addr_t base) { return sys_read32(base + BCM2711_MU_LSR) & BCM2711_MU_LSR_TX_EMPTY; } static void bcm2711_mu_lowlevel_putc(mem_addr_t base, uint8_t ch) { /* Wait until there is data in the FIFO */ while (!bcm2711_mu_lowlevel_can_putc(base)) { ; } /* Send the character */ sys_write32(ch, base + BCM2711_MU_IO); } static void bcm2711_mu_lowlevel_init(mem_addr_t base, bool skip_baudrate_config, uint32_t baudrate, uint32_t input_clock) { uint32_t divider; /* Wait until there is data in the FIFO */ while (!bcm2711_mu_lowlevel_can_putc(base)) { ; } /* Disable port */ sys_write32(0x0, base + BCM2711_MU_CNTL); /* Disable interrupts */ sys_write32(0x0, base + BCM2711_MU_IER); /* Setup 8bit data width and baudrate */ sys_write32(BCM2711_MU_LCR_8BIT, base + BCM2711_MU_LCR); if (!skip_baudrate_config) { divider = (input_clock / (baudrate * 8)); sys_write32(divider - 1, base + BCM2711_MU_BAUD); } /* Enable RX & TX port */ sys_write32(BCM2711_MU_CNTL_RX_ENABLE | BCM2711_MU_CNTL_TX_ENABLE, base + BCM2711_MU_CNTL); } /** * @brief Initialize UART channel * * This routine is called to reset the chip in a quiescent state. * It is assumed that this function is called only once per UART. * * @param dev UART device struct * * @return 0 */ static int uart_bcm2711_init(const struct device *dev) { const struct bcm2711_uart_config *uart_cfg = dev->config; struct bcm2711_uart_data *uart_data = dev->data; DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); uart_data->uart_addr = DEVICE_MMIO_GET(dev); bcm2711_mu_lowlevel_init(uart_data->uart_addr, 1, uart_cfg->baud_rate, uart_cfg->clocks); #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_cfg->irq_config_func(dev); #endif return 0; } static void uart_bcm2711_poll_out(const struct device *dev, unsigned char c) { struct bcm2711_uart_data *uart_data = dev->data; bcm2711_mu_lowlevel_putc(uart_data->uart_addr, c); } static int uart_bcm2711_poll_in(const struct device *dev, unsigned char *c) { struct bcm2711_uart_data *uart_data = dev->data; while (!bcm2711_mu_lowlevel_can_getc(uart_data->uart_addr)) { ; } return sys_read32(uart_data->uart_addr + BCM2711_MU_IO) & 0xFF; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_bcm2711_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { int num_tx = 0U; struct bcm2711_uart_data *uart_data = dev->data; while ((size - num_tx) > 0) { /* Send a character */ bcm2711_mu_lowlevel_putc(uart_data->uart_addr, tx_data[num_tx]); num_tx++; } return num_tx; } static int uart_bcm2711_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { int num_rx = 0U; struct bcm2711_uart_data *uart_data = dev->data; while ((size - num_rx) > 0 && bcm2711_mu_lowlevel_can_getc(uart_data->uart_addr)) { /* Receive a character */ rx_data[num_rx++] = sys_read32(uart_data->uart_addr + BCM2711_MU_IO) & 0xFF; } return num_rx; } static void uart_bcm2711_irq_tx_enable(const struct device *dev) { struct bcm2711_uart_data *uart_data = dev->data; sys_write32(BCM2711_MU_IER_TX_INTERRUPT, uart_data->uart_addr + BCM2711_MU_IER); } static void uart_bcm2711_irq_tx_disable(const struct device *dev) { struct bcm2711_uart_data *uart_data = dev->data; sys_write32((uint32_t)(~BCM2711_MU_IER_TX_INTERRUPT), uart_data->uart_addr + BCM2711_MU_IER); } static int uart_bcm2711_irq_tx_ready(const struct device *dev) { struct bcm2711_uart_data *uart_data = dev->data; return bcm2711_mu_lowlevel_can_putc(uart_data->uart_addr); } static void uart_bcm2711_irq_rx_enable(const struct device *dev) { struct bcm2711_uart_data *uart_data = dev->data; sys_write32(BCM2711_MU_IER_RX_INTERRUPT, uart_data->uart_addr + BCM2711_MU_IER); } static void uart_bcm2711_irq_rx_disable(const struct device *dev) { struct bcm2711_uart_data *uart_data = dev->data; sys_write32((uint32_t)(~BCM2711_MU_IER_RX_INTERRUPT), uart_data->uart_addr + BCM2711_MU_IER); } static int uart_bcm2711_irq_rx_ready(const struct device *dev) { struct bcm2711_uart_data *uart_data = dev->data; return bcm2711_mu_lowlevel_can_getc(uart_data->uart_addr); } static int uart_bcm2711_irq_is_pending(const struct device *dev) { struct bcm2711_uart_data *uart_data = dev->data; return bcm2711_mu_lowlevel_can_getc(uart_data->uart_addr) || bcm2711_mu_lowlevel_can_putc(uart_data->uart_addr); } static int uart_bcm2711_irq_update(const struct device *dev) { return 1; } static void uart_bcm2711_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct bcm2711_uart_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * Note: imx UART Tx interrupts when ready to send; Rx interrupts when char * received. * * @param arg Argument to ISR. */ void uart_isr(const struct device *dev) { struct bcm2711_uart_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_bcm2711_driver_api = { .poll_in = uart_bcm2711_poll_in, .poll_out = uart_bcm2711_poll_out, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_bcm2711_fifo_fill, .fifo_read = uart_bcm2711_fifo_read, .irq_tx_enable = uart_bcm2711_irq_tx_enable, .irq_tx_disable = uart_bcm2711_irq_tx_disable, .irq_tx_ready = uart_bcm2711_irq_tx_ready, .irq_rx_enable = uart_bcm2711_irq_rx_enable, .irq_rx_disable = uart_bcm2711_irq_rx_disable, .irq_rx_ready = uart_bcm2711_irq_rx_ready, .irq_is_pending = uart_bcm2711_irq_is_pending, .irq_update = uart_bcm2711_irq_update, .irq_callback_set = uart_bcm2711_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define UART_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct bcm2711_uart_config bcm2711_uart_##n##_config = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), .baud_rate = DT_INST_PROP(n, current_speed), \ .clocks = DT_INST_PROP(n, clock_frequency), IRQ_FUNC_INIT} #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_CONFIG_FUNC(n) \ static void irq_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), uart_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } #define UART_IRQ_CFG_FUNC_INIT(n) .irq_config_func = irq_config_func_##n #define UART_INIT_CFG(n) UART_DECLARE_CFG(n, UART_IRQ_CFG_FUNC_INIT(n)) #else #define UART_CONFIG_FUNC(n) #define UART_IRQ_CFG_FUNC_INIT #define UART_INIT_CFG(n) UART_DECLARE_CFG(n, UART_IRQ_CFG_FUNC_INIT) #endif #define UART_INIT(n) \ static struct bcm2711_uart_data bcm2711_uart_##n##_data; \ \ static const struct bcm2711_uart_config bcm2711_uart_##n##_config; \ \ DEVICE_DT_INST_DEFINE(n, &uart_bcm2711_init, NULL, &bcm2711_uart_##n##_data, \ &bcm2711_uart_##n##_config, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, &uart_bcm2711_driver_api); \ \ UART_CONFIG_FUNC(n) \ \ UART_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_bcm2711.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,687
```c /* stellarisUartDrv.c - Stellaris UART driver */ #define DT_DRV_COMPAT ti_stellaris_uart /* * */ /** * @brief Driver for Stellaris UART * * Driver for Stellaris UART found namely on TI LM3S6965 board. It is similar to * an 16550 in functionality, but is not register-compatible. * It is also register-compatible with the UART found on TI CC2650 SoC, * so it can be used for boards using it, like the TI SensorTag. * * There is only support for poll-mode, so it can only be used with the printk * and STDOUT_CONSOLE APIs. */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/__assert.h> #include <soc.h> #include <zephyr/init.h> #include <zephyr/drivers/uart.h> #include <zephyr/linker/sections.h> #include <zephyr/irq.h> /* definitions */ /* Stellaris UART module */ struct _uart { uint32_t dr; union { uint32_t _sr; uint32_t _cr; } u1; uint8_t _res1[0x010]; uint32_t fr; uint8_t _res2[0x04]; uint32_t ilpr; uint32_t ibrd; uint32_t fbrd; uint32_t lcrh; uint32_t ctl; uint32_t ifls; uint32_t im; uint32_t ris; uint32_t mis; uint32_t icr; uint8_t _res3[0xf8c]; uint32_t peripd_id4; uint32_t peripd_id5; uint32_t peripd_id6; uint32_t peripd_id7; uint32_t peripd_id0; uint32_t peripd_id1; uint32_t peripd_id2; uint32_t peripd_id3; uint32_t p_cell_id0; uint32_t p_cell_id1; uint32_t p_cell_id2; uint32_t p_cell_id3; }; struct uart_stellaris_config { volatile struct _uart *uart; uint32_t sys_clk_freq; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif }; /* Device data structure */ struct uart_stellaris_dev_data_t { uint32_t baud_rate; /* Baud rate */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; /**< Callback function pointer */ void *cb_data; /**< Callback function arg */ #endif }; /* bits */ #define UARTFR_BUSY 0x00000008 #define UARTFR_RXFE 0x00000010 #define UARTFR_TXFF 0x00000020 #define UARTFR_RXFF 0x00000040 #define UARTFR_TXFE 0x00000080 #define UARTLCRH_FEN 0x00000010 #define UARTLCRH_WLEN 0x00000060 #define UARTCTL_UARTEN 0x00000001 #define UARTCTL_LBE 0x00000800 #define UARTCTL_TXEN 0x00000100 #define UARTCTL_RXEN 0x00000200 #define UARTTIM_RXIM 0x00000010 #define UARTTIM_TXIM 0x00000020 #define UARTTIM_RTIM 0x00000040 #define UARTTIM_FEIM 0x00000080 #define UARTTIM_PEIM 0x00000100 #define UARTTIM_BEIM 0x00000200 #define UARTTIM_OEIM 0x00000400 #define UARTMIS_RXMIS 0x00000010 #define UARTMIS_TXMIS 0x00000020 static const struct uart_driver_api uart_stellaris_driver_api; /** * @brief Set the baud rate * * This routine set the given baud rate for the UART. * * @param dev UART device struct * @param baudrate Baud rate * @param sys_clk_freq_hz System clock frequency in Hz */ static void baudrate_set(const struct device *dev, uint32_t baudrate, uint32_t sys_clk_freq_hz) { const struct uart_stellaris_config *config = dev->config; uint32_t brdi, brdf, div, rem; /* upon reset, the system clock uses the internal OSC @ 12MHz */ div = (baudrate * 16U); rem = sys_clk_freq_hz % div; /* * floating part of baud rate (LM3S6965 p.433), equivalent to * [float part of (SYSCLK / div)] * 64 + 0.5 */ brdf = ((((rem * 64U) << 1) / div) + 1) >> 1; /* integer part of baud rate (LM3S6965 p.433) */ brdi = sys_clk_freq_hz / div; /* * those registers are 32-bit, but the reserved bits should be * preserved */ config->uart->ibrd = (uint16_t)(brdi & 0xffff); /* 16 bits */ config->uart->fbrd = (uint8_t)(brdf & 0x3f); /* 6 bits */ } /** * @brief Enable the UART * * This routine enables the given UART. * * @param dev UART device struct */ static inline void enable(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; config->uart->ctl |= UARTCTL_UARTEN; } /** * @brief Disable the UART * * This routine disables the given UART. * * @param dev UART device struct */ static inline void disable(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; config->uart->ctl &= ~UARTCTL_UARTEN; /* ensure transmissions are complete */ while (config->uart->fr & UARTFR_BUSY) { } /* flush the FIFOs by disabling them */ config->uart->lcrh &= ~UARTLCRH_FEN; } /* * no stick parity * 8-bit frame * FIFOs disabled * one stop bit * parity disabled * send break off */ #define LINE_CONTROL_DEFAULTS UARTLCRH_WLEN /** * @brief Set the default UART line controls * * This routine sets the given UART's line controls to their default settings. * * @param dev UART device struct */ static inline void line_control_defaults_set(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; config->uart->lcrh = LINE_CONTROL_DEFAULTS; } /** * @brief Initialize UART channel * * This routine is called to reset the chip in a quiescent state. * It is assumed that this function is called only once per UART. * * @param dev UART device struct * * @return 0 */ static int uart_stellaris_init(const struct device *dev) { struct uart_stellaris_dev_data_t *data = dev->data; const struct uart_stellaris_config *config = dev->config; disable(dev); baudrate_set(dev, data->baud_rate, config->sys_clk_freq); line_control_defaults_set(dev); enable(dev); #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif return 0; } /** * @brief Get the UART transmit ready status * * This routine returns the given UART's transmit ready status. * * @param dev UART device struct * * @return 0 if ready to transmit, 1 otherwise */ static int poll_tx_ready(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; return (config->uart->fr & UARTFR_TXFE); } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_stellaris_poll_in(const struct device *dev, unsigned char *c) { const struct uart_stellaris_config *config = dev->config; if (config->uart->fr & UARTFR_RXFE) { return (-1); } /* got a character */ *c = (unsigned char)config->uart->dr; return 0; } /** * @brief Output a character in polled mode. * * Checks if the transmitter is empty. If empty, a character is written to * the data register. * * @param dev UART device struct * @param c Character to send */ static void uart_stellaris_poll_out(const struct device *dev, unsigned char c) { const struct uart_stellaris_config *config = dev->config; while (!poll_tx_ready(dev)) { } /* send a character */ config->uart->dr = (uint32_t)c; } #if CONFIG_UART_INTERRUPT_DRIVEN /** * @brief Fill FIFO with data * * @param dev UART device struct * @param tx_data Data to transmit * @param len Number of bytes to send * * @return Number of bytes sent */ static int uart_stellaris_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct uart_stellaris_config *config = dev->config; uint8_t num_tx = 0U; while ((len - num_tx > 0) && ((config->uart->fr & UARTFR_TXFF) == 0U)) { config->uart->dr = (uint32_t)tx_data[num_tx++]; } return (int)num_tx; } /** * @brief Read data from FIFO * * @param dev UART device struct * @param rx_data Pointer to data container * @param size Container size * * @return Number of bytes read */ static int uart_stellaris_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_stellaris_config *config = dev->config; uint8_t num_rx = 0U; while ((size - num_rx > 0) && ((config->uart->fr & UARTFR_RXFE) == 0U)) { rx_data[num_rx++] = (uint8_t)config->uart->dr; } return num_rx; } /** * @brief Enable TX interrupt * * @param dev UART device struct */ static void uart_stellaris_irq_tx_enable(const struct device *dev) { static uint8_t first_time = 1U; /* used to allow the first transmission */ uint32_t saved_ctl; /* saved UARTCTL (control) register */ uint32_t saved_ibrd; /* saved UARTIBRD (integer baud rate) register */ uint32_t saved_fbrd; /* saved UARTFBRD (fractional baud rate) register */ const struct uart_stellaris_config *config = dev->config; if (first_time) { /* * The Tx interrupt will not be set when transmission is first * enabled. * A character has to be transmitted before Tx interrupts will * work, * so send one via loopback mode. */ first_time = 0U; /* save current control and baud rate settings */ saved_ctl = config->uart->ctl; saved_ibrd = config->uart->ibrd; saved_fbrd = config->uart->fbrd; /* send a character with default settings via loopback */ disable(dev); config->uart->fbrd = 0U; config->uart->ibrd = 1U; config->uart->lcrh = 0U; config->uart->ctl = (UARTCTL_UARTEN | UARTCTL_TXEN | UARTCTL_LBE); config->uart->dr = 0U; while (config->uart->fr & UARTFR_BUSY) { } /* restore control and baud rate settings */ disable(dev); config->uart->ibrd = saved_ibrd; config->uart->fbrd = saved_fbrd; line_control_defaults_set(dev); config->uart->ctl = saved_ctl; } config->uart->im |= UARTTIM_TXIM; } /** * @brief Disable TX interrupt in IER * * @param dev UART device struct */ static void uart_stellaris_irq_tx_disable(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; config->uart->im &= ~UARTTIM_TXIM; } /** * @brief Check if Tx IRQ has been raised * * @param dev UART device struct * * @return 1 if a Tx IRQ is pending, 0 otherwise */ static int uart_stellaris_irq_tx_ready(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; return ((config->uart->mis & UARTMIS_TXMIS) == UARTMIS_TXMIS); } /** * @brief Enable RX interrupt in IER * * @param dev UART device struct */ static void uart_stellaris_irq_rx_enable(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; config->uart->im |= UARTTIM_RXIM; } /** * @brief Disable RX interrupt in IER * * @param dev UART device struct */ static void uart_stellaris_irq_rx_disable(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; config->uart->im &= ~UARTTIM_RXIM; } /** * @brief Check if Rx IRQ has been raised * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_stellaris_irq_rx_ready(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; return ((config->uart->mis & UARTMIS_RXMIS) == UARTMIS_RXMIS); } /** * @brief Enable error interrupts * * @param dev UART device struct */ static void uart_stellaris_irq_err_enable(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; config->uart->im |= (UARTTIM_RTIM | UARTTIM_FEIM | UARTTIM_PEIM | UARTTIM_BEIM | UARTTIM_OEIM); } /** * @brief Disable error interrupts * * @param dev UART device struct */ static void uart_stellaris_irq_err_disable(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; config->uart->im &= ~(UARTTIM_RTIM | UARTTIM_FEIM | UARTTIM_PEIM | UARTTIM_BEIM | UARTTIM_OEIM); } /** * @brief Check if Tx or Rx IRQ is pending * * @param dev UART device struct * * @return 1 if a Tx or Rx IRQ is pending, 0 otherwise */ static int uart_stellaris_irq_is_pending(const struct device *dev) { const struct uart_stellaris_config *config = dev->config; /* Look only at Tx and Rx data interrupt flags */ return ((config->uart->mis & (UARTMIS_RXMIS | UARTMIS_TXMIS)) ? 1 : 0); } /** * @brief Update IRQ status * * @param dev UART device struct * * @return Always 1 */ static int uart_stellaris_irq_update(const struct device *dev) { return 1; } /** * @brief Set the callback function pointer for IRQ. * * @param dev UART device struct * @param cb Callback function pointer. */ static void uart_stellaris_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_stellaris_dev_data_t * const dev_data = dev->data; dev_data->cb = cb; dev_data->cb_data = cb_data; } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ static void uart_stellaris_isr(const struct device *dev) { struct uart_stellaris_dev_data_t * const dev_data = dev->data; if (dev_data->cb) { dev_data->cb(dev, dev_data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_stellaris_driver_api = { .poll_in = uart_stellaris_poll_in, .poll_out = uart_stellaris_poll_out, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_stellaris_fifo_fill, .fifo_read = uart_stellaris_fifo_read, .irq_tx_enable = uart_stellaris_irq_tx_enable, .irq_tx_disable = uart_stellaris_irq_tx_disable, .irq_tx_ready = uart_stellaris_irq_tx_ready, .irq_rx_enable = uart_stellaris_irq_rx_enable, .irq_rx_disable = uart_stellaris_irq_rx_disable, .irq_rx_ready = uart_stellaris_irq_rx_ready, .irq_err_enable = uart_stellaris_irq_err_enable, .irq_err_disable = uart_stellaris_irq_err_disable, .irq_is_pending = uart_stellaris_irq_is_pending, .irq_update = uart_stellaris_irq_update, .irq_callback_set = uart_stellaris_irq_callback_set, #endif }; #ifdef CONFIG_UART_STELLARIS_PORT_0 #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void irq_config_func_0(const struct device *port); #endif static const struct uart_stellaris_config uart_stellaris_dev_cfg_0 = { .uart = (volatile struct _uart *)DT_INST_REG_ADDR(0), .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(0, clocks, clock_frequency), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = irq_config_func_0, #endif }; static struct uart_stellaris_dev_data_t uart_stellaris_dev_data_0 = { .baud_rate = DT_INST_PROP(0, current_speed), }; DEVICE_DT_INST_DEFINE(0, uart_stellaris_init, NULL, &uart_stellaris_dev_data_0, &uart_stellaris_dev_cfg_0, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_stellaris_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void irq_config_func_0(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), uart_stellaris_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } #endif #endif /* CONFIG_UART_STELLARIS_PORT_0 */ #ifdef CONFIG_UART_STELLARIS_PORT_1 #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void irq_config_func_1(const struct device *port); #endif static struct uart_stellaris_config uart_stellaris_dev_cfg_1 = { .uart = (volatile struct _uart *)DT_INST_REG_ADDR(1), .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(1, clocks, clock_frequency), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = irq_config_func_1, #endif }; static struct uart_stellaris_dev_data_t uart_stellaris_dev_data_1 = { .baud_rate = DT_INST_PROP(1, current_speed), }; DEVICE_DT_INST_DEFINE(1, uart_stellaris_init, NULL, &uart_stellaris_dev_data_1, &uart_stellaris_dev_cfg_1, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_stellaris_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void irq_config_func_1(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(1), DT_INST_IRQ(1, priority), uart_stellaris_isr, DEVICE_DT_INST_GET(1), 0); irq_enable(DT_INST_IRQN(1)); } #endif #endif /* CONFIG_UART_STELLARIS_PORT_1 */ #ifdef CONFIG_UART_STELLARIS_PORT_2 #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void irq_config_func_2(const struct device *port); #endif static const struct uart_stellaris_config uart_stellaris_dev_cfg_2 = { .uart = (volatile struct _uart *)DT_INST_REG_ADDR(2), .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(2, clocks, clock_frequency), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = irq_config_func_2, #endif }; static struct uart_stellaris_dev_data_t uart_stellaris_dev_data_2 = { .baud_rate = DT_INST_PROP(2, current_speed), }; DEVICE_DT_INST_DEFINE(2, uart_stellaris_init, NULL, &uart_stellaris_dev_data_2, &uart_stellaris_dev_cfg_2, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_stellaris_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void irq_config_func_2(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(2), DT_INST_IRQ(2, priority), uart_stellaris_isr, DEVICE_DT_INST_GET(2), 0); irq_enable(DT_INST_IRQN(2)); } #endif #endif /* CONFIG_UART_STELLARIS_PORT_2 */ ```
/content/code_sandbox/drivers/serial/uart_stellaris.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,622
```c /* * * Author: Saravanan Sekar <saravanan@linumiz.com> */ #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/uart.h> #include <NuMicro.h> #include <string.h> #define DT_DRV_COMPAT nuvoton_numicro_uart struct uart_numicro_config { UART_T *uart; uint32_t id_rst; uint32_t id_clk; const struct pinctrl_dev_config *pincfg; }; struct uart_numicro_data { const struct device *clock; struct uart_config ucfg; }; static int uart_numicro_poll_in(const struct device *dev, unsigned char *c) { const struct uart_numicro_config *config = dev->config; if ((config->uart->FIFOSTS & UART_FIFOSTS_RXEMPTY_Msk) != 0) { return -1; } *c = (uint8_t)config->uart->DAT; return 0; } static void uart_numicro_poll_out(const struct device *dev, unsigned char c) { const struct uart_numicro_config *config = dev->config; UART_Write(config->uart, &c, 1); } static int uart_numicro_err_check(const struct device *dev) { return 0; } static inline int32_t uart_numicro_convert_stopbit(enum uart_config_stop_bits sb) { switch (sb) { case UART_CFG_STOP_BITS_1: return UART_STOP_BIT_1; case UART_CFG_STOP_BITS_1_5: return UART_STOP_BIT_1_5; case UART_CFG_STOP_BITS_2: return UART_STOP_BIT_2; default: return -ENOTSUP; } }; static inline int32_t uart_numicro_convert_datalen(enum uart_config_data_bits db) { switch (db) { case UART_CFG_DATA_BITS_5: return UART_WORD_LEN_5; case UART_CFG_DATA_BITS_6: return UART_WORD_LEN_6; case UART_CFG_DATA_BITS_7: return UART_WORD_LEN_7; case UART_CFG_DATA_BITS_8: return UART_WORD_LEN_8; default: return -ENOTSUP; } } static inline uint32_t uart_numicro_convert_parity(enum uart_config_parity parity) { switch (parity) { case UART_CFG_PARITY_ODD: return UART_PARITY_ODD; case UART_CFG_PARITY_EVEN: return UART_PARITY_EVEN; case UART_CFG_PARITY_MARK: return UART_PARITY_MARK; case UART_CFG_PARITY_SPACE: return UART_PARITY_SPACE; case UART_CFG_PARITY_NONE: default: return UART_PARITY_NONE; } } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_numicro_configure(const struct device *dev, const struct uart_config *cfg) { const struct uart_numicro_config *config = dev->config; struct uart_numicro_data *ddata = dev->data; int32_t databits, stopbits; uint32_t parity; databits = uart_numicro_convert_datalen(cfg->data_bits); if (databits < 0) { return databits; } stopbits = uart_numicro_convert_stopbit(cfg->stop_bits); if (stopbits < 0) { return stopbits; } if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_NONE) { UART_DisableFlowCtrl(config->uart); } else if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS) { UART_EnableFlowCtrl(config->uart); } else { return -ENOTSUP; } parity = uart_numicro_convert_parity(cfg->parity); UART_SetLineConfig(config->uart, cfg->baudrate, databits, parity, stopbits); memcpy(&ddata->ucfg, cfg, sizeof(*cfg)); return 0; } static int uart_numicro_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_numicro_data *ddata = dev->data; memcpy(cfg, &ddata->ucfg, sizeof(*cfg)); return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int uart_numicro_init(const struct device *dev) { const struct uart_numicro_config *config = dev->config; struct uart_numicro_data *ddata = dev->data; int err; SYS_ResetModule(config->id_rst); SYS_UnlockReg(); /* Enable UART module clock */ CLK_EnableModuleClock(config->id_clk); /* Select UART0 clock source is PLL */ CLK_SetModuleClock(config->id_clk, CLK_CLKSEL1_UART0SEL_PLL, CLK_CLKDIV0_UART0(0)); SYS_LockReg(); err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err != 0) { return err; } UART_Open(config->uart, ddata->ucfg.baudrate); return 0; } static const struct uart_driver_api uart_numicro_driver_api = { .poll_in = uart_numicro_poll_in, .poll_out = uart_numicro_poll_out, .err_check = uart_numicro_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_numicro_configure, .config_get = uart_numicro_config_get, #endif }; #define NUMICRO_INIT(index) \ PINCTRL_DT_INST_DEFINE(index); \ \ static const struct uart_numicro_config uart_numicro_cfg_##index = { \ .uart = (UART_T *)DT_INST_REG_ADDR(index), \ .id_rst = UART##index##_RST, \ .id_clk = UART##index##_MODULE, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \ }; \ \ static struct uart_numicro_data uart_numicro_data_##index = { \ .ucfg = { \ .baudrate = DT_INST_PROP(index, current_speed), \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(index, \ uart_numicro_init, \ NULL, \ &uart_numicro_data_##index, \ &uart_numicro_cfg_##index, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_numicro_driver_api); DT_INST_FOREACH_STATUS_OKAY(NUMICRO_INIT) ```
/content/code_sandbox/drivers/serial/uart_numicro.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,373
```unknown # Gecko SDK LEUART config LEUART_GECKO bool "Gecko leuart driver" default y depends on DT_HAS_SILABS_GECKO_LEUART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select SOC_GECKO_LEUART help Enable the Gecko leuart driver. ```
/content/code_sandbox/drivers/serial/Kconfig.leuart_gecko
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
68
```c /* * */ #define DT_DRV_COMPAT quicklogic_usbserialport_s3b #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include <eoss3_dev.h> #include "uart_ql_usbserialport_s3b.h" /* * code is a modified version of usbserial driver from path_to_url * freertos_gateware/src/eoss3_hal_fpga_usbserial.c * freertos_gateware/inc/eoss3_hal_fpga_usbserial.h */ volatile struct fpga_usbserial_regs *usbserial_regs = (struct fpga_usbserial_regs *)(FPGA_PERIPH_BASE); static uint32_t usbserial_tx_fifo_status(void) { return usbserial_regs->m2u_fifo_flags; } static bool usbserial_tx_fifo_full(void) { return usbserial_tx_fifo_status() == USBSERIAL_TX_FIFO_FULL; } static uint32_t usbserial_rx_fifo_status(void) { return usbserial_regs->u2m_fifo_flags; } static bool usbserial_rx_fifo_empty(void) { return usbserial_rx_fifo_status() == USBSERIAL_RX_FIFO_EMPTY; } /** * @brief Output a character in polled mode. * * Writes data to tx register. Waits for space if transmitter is full. * * @param dev UART device struct * @param c Character to send */ static void uart_usbserial_poll_out(const struct device *dev, unsigned char c) { /* Wait for room in Tx FIFO */ while (usbserial_tx_fifo_full()) { ; } usbserial_regs->wdata = c; } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_usbserial_poll_in(const struct device *dev, unsigned char *c) { if (usbserial_rx_fifo_empty()) { return -1; } *c = usbserial_regs->rdata; return 0; } static const struct uart_driver_api uart_usbserial_driver_api = { .poll_in = uart_usbserial_poll_in, .poll_out = uart_usbserial_poll_out, }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, NULL, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, (void *)&uart_usbserial_driver_api); ```
/content/code_sandbox/drivers/serial/uart_ql_usbserialport_s3b.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
505
```c /* * */ #include "uart_rzt2m.h" #include "zephyr/spinlock.h" #include "zephyr/sys/printk.h" #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/sys/util.h> #include <zephyr/irq.h> #include <stdint.h> #include <zephyr/logging/log.h> #include <soc.h> #define DT_DRV_COMPAT renesas_rzt2m_uart LOG_MODULE_REGISTER(uart_renesas_rzt2m, CONFIG_UART_LOG_LEVEL); struct rzt2m_device_config { mm_reg_t base; const struct pinctrl_dev_config *pin_config; uart_irq_config_func_t irq_config_func; }; struct rzt2m_device_data { struct uart_config uart_cfg; struct k_spinlock lock; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *callback_data; #endif }; static int rzt2m_poll_in(const struct device *dev, unsigned char *c) { if (!dev || !dev->config || !dev->data) { return -ENODEV; } const struct rzt2m_device_config *config = dev->config; struct rzt2m_device_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); if (FRSR_R(*FRSR(config->base)) == 0) { k_spin_unlock(&data->lock, key); return -1; } *c = *RDR(config->base) & RDR_MASK_RDAT; *CFCLR(config->base) |= CFCLR_MASK_RDRFC; if (FRSR_R(*FRSR(config->base)) == 0) { *FFCLR(config->base) |= FFCLR_MASK_DRC; } k_spin_unlock(&data->lock, key); return 0; } static void rzt2m_poll_out(const struct device *dev, unsigned char c) { if (!dev || !dev->config || !dev->data) { return; } const struct rzt2m_device_config *config = dev->config; struct rzt2m_device_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); int fifo_count = FTSR_T(*FTSR(config->base)); while (fifo_count == MAX_FIFO_DEPTH) { fifo_count = FTSR_T(*FTSR(config->base)); } *TDR(config->base) = c; /* Clear `Transmit data empty flag`. */ *CFCLR(config->base) |= CFCLR_MASK_TDREC; k_spin_unlock(&data->lock, key); } static int rzt2m_err_check(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; uint32_t status = *CSR(config->base); uint32_t retval = 0; if (status & CSR_MASK_ORER) { retval |= UART_ERROR_OVERRUN; } if (status & CSR_MASK_FER) { retval |= UART_ERROR_FRAMING; } if (status & CSR_MASK_PER) { retval |= UART_ERROR_PARITY; } return retval; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_rzt2m_irq_tx_ready(const struct device *dev); static int rzt2m_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { struct rzt2m_device_data *data = dev->data; const struct rzt2m_device_config *config = dev->config; int num_tx = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); while ((size - num_tx > 0) && uart_rzt2m_irq_tx_ready(dev)) { *TDR(config->base) = (uint8_t)tx_data[num_tx++]; } k_spin_unlock(&data->lock, key); return num_tx; } static int rzt2m_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { struct rzt2m_device_data *data = dev->data; const struct rzt2m_device_config *config = dev->config; int num_rx = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); while (num_rx < size && (FRSR_R(*FRSR(config->base)))) { rx_data[num_rx++] = *RDR(config->base); } *CFCLR(config->base) = CFCLR_MASK_RDRFC; *FFCLR(config->base) = FFCLR_MASK_DRC; k_spin_unlock(&data->lock, key); return num_rx; } static void uart_rzt2m_irq_rx_enable(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; *CCR0(config->base) |= CCR0_MASK_RIE | CCR0_MASK_RE; } static void uart_rzt2m_irq_rx_disable(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; *CCR0(config->base) &= ~CCR0_MASK_RIE; } static void uart_rzt2m_irq_tx_enable(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; /* These bits must be set simultaneously. */ *CCR0(config->base) |= CCR0_MASK_TE | CCR0_MASK_TIE | CCR0_MASK_TEIE; } static void uart_rzt2m_irq_tx_disable(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; *CCR0(config->base) &= ~(CCR0_MASK_TIE | CCR0_MASK_TEIE); } static int uart_rzt2m_irq_tx_ready(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; if (FTSR_T(*FTSR(config->base)) == MAX_FIFO_DEPTH || ((*CCR0(config->base) & CCR0_MASK_TIE) == 0)) { return 0; } return 1; } static int uart_rzt2m_irq_rx_ready(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; if (FRSR_R(*FRSR(config->base))) { return 1; } return 0; } static int uart_rzt2m_irq_is_pending(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; if ((*CSR(config->base) & (CSR_MASK_RDRF)) || (*FRSR(config->base) & FRSR_MASK_DR)) { return 1; } return 0; } static void uart_rzt2m_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct rzt2m_device_data *data = dev->data; data->callback = cb; data->callback_data = cb_data; } static int uart_rzt2m_irq_update(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; *CFCLR(config->base) = CFCLR_MASK_RDRFC; *FFCLR(config->base) = FFCLR_MASK_DRC; return 1; } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api rzt2m_uart_api = { .poll_in = rzt2m_poll_in, .poll_out = rzt2m_poll_out, .err_check = rzt2m_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = rzt2m_fifo_fill, .fifo_read = rzt2m_fifo_read, .irq_rx_enable = uart_rzt2m_irq_rx_enable, .irq_rx_disable = uart_rzt2m_irq_rx_disable, .irq_tx_enable = uart_rzt2m_irq_tx_enable, .irq_tx_disable = uart_rzt2m_irq_tx_disable, .irq_tx_ready = uart_rzt2m_irq_tx_ready, .irq_rx_ready = uart_rzt2m_irq_rx_ready, .irq_is_pending = uart_rzt2m_irq_is_pending, .irq_callback_set = uart_rzt2m_irq_callback_set, .irq_update = uart_rzt2m_irq_update, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; static int rzt2m_module_start(const struct device *dev) { if (!dev || !dev->config || !dev->data) { return -ENODEV; } const struct rzt2m_device_config *config = dev->config; struct rzt2m_device_data *data = dev->data; int interface_id = BASE_TO_IFACE_ID(config->base); unsigned int irqkey = irq_lock(); volatile uint32_t dummy; k_spinlock_key_t key = k_spin_lock(&data->lock); if (interface_id < 5) { /* Dummy-read at least one time as stated in 8.3.1 of the User's Manual: Hardware */ *MSTPCRA &= ~(MSTPCRA_MASK_SCIx(interface_id)); dummy = *MSTPCRA; } else { LOG_ERR("SCI modules in the secure domain on RZT2M are not supported."); return -ENOTSUP; } /* Dummy-read at least five times as stated in 8.3.1 of the User's Manual: Hardware */ dummy = *RDR(config->base); dummy = *RDR(config->base); dummy = *RDR(config->base); dummy = *RDR(config->base); dummy = *RDR(config->base); k_spin_unlock(&data->lock, key); irq_unlock(irqkey); return 0; } static int rzt2m_uart_init(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; struct rzt2m_device_data *data = dev->data; uint32_t baud_setting = 0; uint32_t baud_settings[] = {CCR2_BAUD_SETTING_9600, CCR2_BAUD_SETTING_115200}; rzt2m_unlock_prcrs(PRCRS_GPIO); rzt2m_unlock_prcrn(PRCRN_PRC1 | PRCRN_PRC2); /* The module needs to be started * to allow any operation on the registers of Serial Communications Interface. */ int ret = rzt2m_module_start(dev); if (ret) { return ret; } /* Disable transmitter, receiver, interrupts. */ *CCR0(config->base) = CCR0_DEFAULT_VALUE; while (*CCR0(config->base) & (CCR0_MASK_RE | CCR0_MASK_TE)) { } *CCR1(config->base) = CCR1_DEFAULT_VALUE; *CCR2(config->base) = CCR2_DEFAULT_VALUE; *CCR3(config->base) = CCR3_DEFAULT_VALUE; *CCR4(config->base) = CCR4_DEFAULT_VALUE; /* Configure pinmuxes */ ret = pinctrl_apply_state(config->pin_config, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } *CFCLR(config->base) = CFCLR_ALL_FLAG_CLEAR; *FFCLR(config->base) = FFCLR_MASK_DRC; /* Use FIFO mode. */ *CCR3(config->base) |= (CCR3_MASK_FM); switch (data->uart_cfg.stop_bits) { case UART_CFG_STOP_BITS_1: /* Default value, already set. */ break; case UART_CFG_STOP_BITS_2: *CCR3(config->base) |= CCR3_MASK_STP; break; default: LOG_ERR("Selected bit stop length is not supported: %u.", data->uart_cfg.stop_bits); return -ENOTSUP; } switch (data->uart_cfg.data_bits) { case UART_CFG_DATA_BITS_7: *CCR3(config->base) |= CCR3_CHR_7BIT; break; case UART_CFG_DATA_BITS_8: *CCR3(config->base) |= CCR3_CHR_8BIT; break; default: LOG_ERR("Selected number of data bits is not supported: %u.", data->uart_cfg.data_bits); return -ENOTSUP; } if (data->uart_cfg.baudrate > ARRAY_SIZE(baud_settings)) { LOG_ERR("Selected baudrate variant is not supported: %u.", data->uart_cfg.baudrate); return -ENOTSUP; } baud_setting = baud_settings[data->uart_cfg.baudrate]; *CCR2(config->base) &= ~(CCR2_MASK_BAUD_SETTING); *CCR2(config->base) |= (baud_setting & CCR2_MASK_BAUD_SETTING); *CCR1(config->base) |= (CCR1_MASK_NFEN | CCR1_MASK_SPB2DT | CCR1_MASK_SPB2IO); switch (data->uart_cfg.parity) { case UART_CFG_PARITY_NONE: /* Default value, already set. */ break; case UART_CFG_PARITY_EVEN: *CCR1(config->base) |= CCR1_MASK_PE; break; case UART_CFG_PARITY_ODD: *CCR1(config->base) |= (CCR1_MASK_PE | CCR1_MASK_PM); break; default: LOG_ERR("Unsupported parity: %u", data->uart_cfg.parity); } /* Specify trigger thresholds and clear FIFOs. */ *FCR(config->base) = FCR_MASK_TFRST | FCR_MASK_RFRST | FCR_TTRG_15 | FCR_RTRG_15; /* Enable the clock. */ *CCR3(config->base) &= ~CCR3_MASK_CKE; *CCR3(config->base) |= CCR3_CKE_ENABLE; /* Clear status flags. */ *CFCLR(config->base) = CFCLR_ALL_FLAG_CLEAR; *FFCLR(config->base) = FFCLR_MASK_DRC; #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ /* Start transmitter and receiver. */ *CCR0(config->base) |= (CCR0_MASK_TE | CCR0_MASK_RE); while (!(*CCR0(config->base) & CCR0_MASK_RE)) { } while (!(*CCR0(config->base) & CCR0_MASK_TE)) { } rzt2m_lock_prcrs(PRCRS_GPIO); rzt2m_lock_prcrn(PRCRN_PRC1 | PRCRN_PRC2); return 0; } static void uart_rzt2m_isr(const struct device *dev) { const struct rzt2m_device_config *config = dev->config; #ifdef CONFIG_UART_INTERRUPT_DRIVEN struct rzt2m_device_data *data = dev->data; if (data->callback) { data->callback(dev, data->callback_data); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ *CFCLR(config->base) = CFCLR_MASK_RDRFC; *FFCLR(config->base) = FFCLR_MASK_DRC; } #define UART_RZT2M_IRQ_CONNECT(n, irq_name) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, irq_name, irq), \ DT_INST_IRQ_BY_NAME(n, irq_name, priority), uart_rzt2m_isr, \ DEVICE_DT_INST_GET(n), DT_INST_IRQ_BY_NAME(n, irq_name, flags)); \ irq_enable(DT_INST_IRQ_BY_NAME(n, irq_name, irq)); \ } while (false) #define UART_RZT2M_CONFIG_FUNC(n) \ static void uart##n##_rzt2m_irq_config(const struct device *port) \ { \ UART_RZT2M_IRQ_CONNECT(n, rx_err); \ UART_RZT2M_IRQ_CONNECT(n, rx); \ UART_RZT2M_IRQ_CONNECT(n, tx); \ UART_RZT2M_IRQ_CONNECT(n, tx_end); \ } #define UART_RZT2M_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static struct rzt2m_device_data rzt2m_uart_##n##data = { \ .uart_cfg = \ { \ .baudrate = DT_INST_ENUM_IDX(n, current_speed), \ .parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \ .stop_bits = \ DT_INST_ENUM_IDX_OR(n, stop_bits, UART_CFG_STOP_BITS_1), \ .data_bits = \ DT_INST_ENUM_IDX_OR(n, data_bits, UART_CFG_DATA_BITS_8), \ }, \ }; \ UART_RZT2M_CONFIG_FUNC(n); \ static const struct rzt2m_device_config rzt2m_uart_##n##_config = { \ .base = DT_INST_REG_ADDR(n), \ .irq_config_func = uart##n##_rzt2m_irq_config, \ .pin_config = PINCTRL_DT_INST_DEV_CONFIG_GET(n)}; \ DEVICE_DT_INST_DEFINE(n, rzt2m_uart_init, NULL, &rzt2m_uart_##n##data, \ &rzt2m_uart_##n##_config, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &rzt2m_uart_api); DT_INST_FOREACH_STATUS_OKAY(UART_RZT2M_INIT) ```
/content/code_sandbox/drivers/serial/uart_rzt2m.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,819
```c /* * */ #define DT_DRV_COMPAT nxp_kinetis_uart #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/irq.h> #include <fsl_uart.h> #include <soc.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/pinctrl.h> struct uart_mcux_config { UART_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif const struct pinctrl_dev_config *pincfg; }; struct uart_mcux_data { struct uart_config uart_cfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; static int uart_mcux_configure(const struct device *dev, const struct uart_config *cfg) { const struct uart_mcux_config *config = dev->config; struct uart_mcux_data *data = dev->data; uart_config_t uart_config; uint32_t clock_freq; status_t retval; if (!device_is_ready(config->clock_dev)) { return -ENODEV; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } UART_GetDefaultConfig(&uart_config); uart_config.enableTx = true; uart_config.enableRx = true; uart_config.baudRate_Bps = cfg->baudrate; switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: #if defined(FSL_FEATURE_UART_HAS_STOP_BIT_CONFIG_SUPPORT) && \ FSL_FEATURE_UART_HAS_STOP_BIT_CONFIG_SUPPORT uart_config.stopBitCount = kUART_OneStopBit; break; case UART_CFG_STOP_BITS_2: uart_config.stopBitCount = kUART_TwoStopBit; #endif break; default: return -ENOTSUP; } #if defined(FSL_FEATURE_UART_HAS_MODEM_SUPPORT) && FSL_FEATURE_UART_HAS_MODEM_SUPPORT switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: uart_config.enableRxRTS = false; uart_config.enableTxCTS = false; break; case UART_CFG_FLOW_CTRL_RTS_CTS: uart_config.enableRxRTS = true; uart_config.enableTxCTS = true; break; default: return -ENOTSUP; } #endif switch (cfg->parity) { case UART_CFG_PARITY_NONE: uart_config.parityMode = kUART_ParityDisabled; break; case UART_CFG_PARITY_EVEN: uart_config.parityMode = kUART_ParityEven; break; case UART_CFG_PARITY_ODD: uart_config.parityMode = kUART_ParityOdd; break; default: return -ENOTSUP; } retval = UART_Init(config->base, &uart_config, clock_freq); if (retval != kStatus_Success) { return -EINVAL; } data->uart_cfg = *cfg; return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_mcux_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_mcux_data *data = dev->data; *cfg = data->uart_cfg; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int uart_mcux_poll_in(const struct device *dev, unsigned char *c) { const struct uart_mcux_config *config = dev->config; uint32_t flags = UART_GetStatusFlags(config->base); int ret = -1; if (flags & kUART_RxDataRegFullFlag) { *c = UART_ReadByte(config->base); ret = 0; } return ret; } static void uart_mcux_poll_out(const struct device *dev, unsigned char c) { const struct uart_mcux_config *config = dev->config; while (!(UART_GetStatusFlags(config->base) & kUART_TxDataRegEmptyFlag)) { } UART_WriteByte(config->base, c); } static int uart_mcux_err_check(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t flags = UART_GetStatusFlags(config->base); int err = 0; if (flags & kUART_RxOverrunFlag) { err |= UART_ERROR_OVERRUN; } if (flags & kUART_ParityErrorFlag) { err |= UART_ERROR_PARITY; } if (flags & kUART_FramingErrorFlag) { err |= UART_ERROR_FRAMING; } UART_ClearStatusFlags(config->base, kUART_RxOverrunFlag | kUART_ParityErrorFlag | kUART_FramingErrorFlag); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_mcux_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct uart_mcux_config *config = dev->config; uint8_t num_tx = 0U; while ((len - num_tx > 0) && (UART_GetStatusFlags(config->base) & kUART_TxDataRegEmptyFlag)) { UART_WriteByte(config->base, tx_data[num_tx++]); } return num_tx; } static int uart_mcux_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { const struct uart_mcux_config *config = dev->config; uint8_t num_rx = 0U; while ((len - num_rx > 0) && (UART_GetStatusFlags(config->base) & kUART_RxDataRegFullFlag)) { rx_data[num_rx++] = UART_ReadByte(config->base); } return num_rx; } static void uart_mcux_irq_tx_enable(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t mask = kUART_TxDataRegEmptyInterruptEnable; pm_device_busy_set(dev); UART_EnableInterrupts(config->base, mask); } static void uart_mcux_irq_tx_disable(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t mask = kUART_TxDataRegEmptyInterruptEnable; pm_device_busy_clear(dev); UART_DisableInterrupts(config->base, mask); } static int uart_mcux_irq_tx_complete(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t flags = UART_GetStatusFlags(config->base); return (flags & kUART_TransmissionCompleteFlag) != 0U; } static int uart_mcux_irq_tx_ready(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t mask = kUART_TxDataRegEmptyInterruptEnable; uint32_t flags = UART_GetStatusFlags(config->base); return (UART_GetEnabledInterrupts(config->base) & mask) && (flags & kUART_TxDataRegEmptyFlag); } static void uart_mcux_irq_rx_enable(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t mask = kUART_RxDataRegFullInterruptEnable; UART_EnableInterrupts(config->base, mask); } static void uart_mcux_irq_rx_disable(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t mask = kUART_RxDataRegFullInterruptEnable; UART_DisableInterrupts(config->base, mask); } static int uart_mcux_irq_rx_full(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t flags = UART_GetStatusFlags(config->base); return (flags & kUART_RxDataRegFullFlag) != 0U; } static int uart_mcux_irq_rx_pending(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t mask = kUART_RxDataRegFullInterruptEnable; return (UART_GetEnabledInterrupts(config->base) & mask) && uart_mcux_irq_rx_full(dev); } static void uart_mcux_irq_err_enable(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t mask = kUART_NoiseErrorInterruptEnable | kUART_FramingErrorInterruptEnable | kUART_ParityErrorInterruptEnable; UART_EnableInterrupts(config->base, mask); } static void uart_mcux_irq_err_disable(const struct device *dev) { const struct uart_mcux_config *config = dev->config; uint32_t mask = kUART_NoiseErrorInterruptEnable | kUART_FramingErrorInterruptEnable | kUART_ParityErrorInterruptEnable; UART_DisableInterrupts(config->base, mask); } static int uart_mcux_irq_is_pending(const struct device *dev) { return uart_mcux_irq_tx_ready(dev) || uart_mcux_irq_rx_pending(dev); } static int uart_mcux_irq_update(const struct device *dev) { return 1; } static void uart_mcux_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_mcux_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void uart_mcux_isr(const struct device *dev) { struct uart_mcux_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int uart_mcux_init(const struct device *dev) { const struct uart_mcux_config *config = dev->config; struct uart_mcux_data *data = dev->data; int err; err = uart_mcux_configure(dev, &data->uart_cfg); if (err != 0) { return err; } err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err != 0) { return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif return 0; } #ifdef CONFIG_PM_DEVICE static int uart_mcux_pm_action(const struct device *dev, enum pm_device_action action) { const struct uart_mcux_config *config = dev->config; switch (action) { case PM_DEVICE_ACTION_RESUME: clock_control_on(config->clock_dev, config->clock_subsys); break; case PM_DEVICE_ACTION_SUSPEND: clock_control_off(config->clock_dev, config->clock_subsys); break; case PM_DEVICE_ACTION_TURN_OFF: return 0; case PM_DEVICE_ACTION_TURN_ON: return 0; default: return -ENOTSUP; } return 0; } #endif /*CONFIG_PM_DEVICE*/ static const struct uart_driver_api uart_mcux_driver_api = { .poll_in = uart_mcux_poll_in, .poll_out = uart_mcux_poll_out, .err_check = uart_mcux_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_mcux_configure, .config_get = uart_mcux_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_mcux_fifo_fill, .fifo_read = uart_mcux_fifo_read, .irq_tx_enable = uart_mcux_irq_tx_enable, .irq_tx_disable = uart_mcux_irq_tx_disable, .irq_tx_complete = uart_mcux_irq_tx_complete, .irq_tx_ready = uart_mcux_irq_tx_ready, .irq_rx_enable = uart_mcux_irq_rx_enable, .irq_rx_disable = uart_mcux_irq_rx_disable, .irq_rx_ready = uart_mcux_irq_rx_full, .irq_err_enable = uart_mcux_irq_err_enable, .irq_err_disable = uart_mcux_irq_err_disable, .irq_is_pending = uart_mcux_irq_is_pending, .irq_update = uart_mcux_irq_update, .irq_callback_set = uart_mcux_irq_callback_set, #endif }; #define UART_MCUX_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct uart_mcux_config uart_mcux_##n##_config = { \ .base = (UART_Type *)DT_INST_REG_ADDR(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name),\ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ IRQ_FUNC_INIT \ } #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_MCUX_CONFIG_FUNC(n) \ static void uart_mcux_config_func_##n(const struct device *dev) \ { \ UART_MCUX_IRQ(n, status); \ UART_MCUX_IRQ(n, error); \ } #define UART_MCUX_IRQ_INIT(n, name) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, name, irq), \ DT_INST_IRQ_BY_NAME(n, name, priority), \ uart_mcux_isr, DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQ_BY_NAME(n, name, irq)); \ } while (false) #define UART_MCUX_IRQ(n, name) \ COND_CODE_1(DT_INST_IRQ_HAS_NAME(n, name), \ (UART_MCUX_IRQ_INIT(n, name)), ()) #define UART_MCUX_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = uart_mcux_config_func_##n #define UART_MCUX_INIT_CFG(n) \ UART_MCUX_DECLARE_CFG(n, UART_MCUX_IRQ_CFG_FUNC_INIT(n)) #else #define UART_MCUX_CONFIG_FUNC(n) #define UART_MCUX_IRQ_CFG_FUNC_INIT #define UART_MCUX_INIT_CFG(n) \ UART_MCUX_DECLARE_CFG(n, UART_MCUX_IRQ_CFG_FUNC_INIT) #endif #define UART_MCUX_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ static struct uart_mcux_data uart_mcux_##n##_data = { \ .uart_cfg = { \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .baudrate = DT_INST_PROP(n, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .flow_ctrl = DT_INST_PROP(n, hw_flow_control) ? \ UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE,\ }, \ }; \ \ static const struct uart_mcux_config uart_mcux_##n##_config; \ PM_DEVICE_DT_INST_DEFINE(n, uart_mcux_pm_action);\ \ DEVICE_DT_INST_DEFINE(n, \ uart_mcux_init, \ PM_DEVICE_DT_INST_GET(n), \ &uart_mcux_##n##_data, \ &uart_mcux_##n##_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_mcux_driver_api); \ \ UART_MCUX_CONFIG_FUNC(n) \ \ UART_MCUX_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(UART_MCUX_INIT) ```
/content/code_sandbox/drivers/serial/uart_mcux.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,337
```unknown config UART_NXP_S32_LINFLEXD bool "LINFlexD UART driver for NXP S32 family processors" default y depends on DT_HAS_NXP_S32_LINFLEXD_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT help Enable the LINFlexD UART driver for NXP S32 family processors. if UART_NXP_S32_LINFLEXD config UART_NXP_S32_POLL_OUT_TIMEOUT int "The maximum duration to transfer a byte data in blocking mode" default 10000 help Maximum duration in micro-seconds to transfer a byte data in blocking mode. config UART_NXP_S32_POLL_IN_TIMEOUT int "The maximum duration to receive a byte data in blocking mode" default 10000 help Maximum duration in micro-seconds to receive a byte data in blocking mode. endif # UART_NXP_S32_LINFLEXD ```
/content/code_sandbox/drivers/serial/Kconfig.nxp_s32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
204
```c /* * */ #define DT_DRV_COMPAT renesas_smartbond_uart #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/pm/device_runtime.h> #include <zephyr/kernel.h> #include <zephyr/spinlock.h> #include <zephyr/sys/byteorder.h> #include <DA1469xAB.h> #include <zephyr/irq.h> #include <da1469x_pd.h> #define IIR_NO_INTR 1 #define IIR_THR_EMPTY 2 #define IIR_RX_DATA 4 #define IIR_LINE_STATUS 5 #define IIR_BUSY 7 #define IIR_TIMEOUT 12 #define STOP_BITS_1 0 #define STOP_BITS_2 1 #define DATA_BITS_5 0 #define DATA_BITS_6 1 #define DATA_BITS_7 2 #define DATA_BITS_8 3 #define RX_FIFO_TRIG_1_CHAR 0 #define RX_FIFO_TRIG_1_4_FULL 1 #define RX_FIFO_TRIG_1_2_FULL 2 #define RX_FIFO_TRIG_MINUS_2_CHARS 3 #define TX_FIFO_TRIG_EMPTY 0 #define TX_FIFO_TRIG_2_CHARS 1 #define TX_FIFO_TRIG_1_4_FULL 2 #define TX_FIFO_TRIG_1_2_FULL 3 #define BAUDRATE_CFG_DLH(cfg) (((cfg) >> 16) & 0xff) #define BAUDRATE_CFG_DLL(cfg) (((cfg) >> 8) & 0xff) #define BAUDRATE_CFG_DLF(cfg) ((cfg) & 0xff) struct uart_smartbond_baudrate_cfg { uint32_t baudrate; /* DLH=cfg[23:16] DLL=cfg[15:8] DLF=cfg[7:0] */ uint32_t cfg; }; static const struct uart_smartbond_baudrate_cfg uart_smartbond_baudrate_table[] = { { 2000000, 0x00000100 }, { 1000000, 0x00000200 }, { 921600, 0x00000203 }, { 500000, 0x00000400 }, { 230400, 0x0000080b }, { 115200, 0x00001106 }, { 57600, 0x0000220c }, { 38400, 0x00003401 }, { 28800, 0x00004507 }, { 19200, 0x00006803 }, { 14400, 0x00008a0e }, { 9600, 0x0000d005 }, { 4800, 0x0001a00b }, }; struct uart_smartbond_cfg { UART2_Type *regs; int periph_clock_config; const struct pinctrl_dev_config *pcfg; bool hw_flow_control_supported; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif #if CONFIG_PM_DEVICE int rx_wake_timeout; struct gpio_dt_spec rx_wake_gpio; struct gpio_dt_spec dtr_gpio; #endif }; struct uart_smartbond_runtime_cfg { uint32_t baudrate_cfg; uint32_t lcr_reg_val; uint8_t mcr_reg_val; uint8_t ier_reg_val; }; struct uart_smartbond_data { struct uart_config current_config; struct uart_smartbond_runtime_cfg runtime_cfg; struct k_spinlock lock; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; uint32_t flags; uint8_t rx_enabled; uint8_t tx_enabled; #if CONFIG_PM_DEVICE struct gpio_callback dtr_wake_cb; const struct device *dev; struct gpio_callback rx_wake_cb; int rx_wake_timeout; struct k_work_delayable rx_timeout_work; #endif #endif }; #ifdef CONFIG_PM_DEVICE static inline void uart_smartbond_pm_prevent_system_sleep(void) { pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } static inline void uart_smartbond_pm_allow_system_sleep(void) { pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } static void uart_smartbond_pm_policy_state_lock_get(const struct device *dev) { #ifdef CONFIG_PM_DEVICE_RUNTIME pm_device_runtime_get(dev); #else ARG_UNUSED(dev); uart_smartbond_pm_prevent_system_sleep(); #endif } static void uart_smartbond_pm_policy_state_lock_put(const struct device *dev) { #ifdef CONFIG_PM_DEVICE_RUNTIME pm_device_runtime_put(dev); #else ARG_UNUSED(dev); uart_smartbond_pm_allow_system_sleep(); #endif } static void uart_smartbond_rx_refresh_timeout(struct k_work *work) { struct uart_smartbond_data *data = CONTAINER_OF(work, struct uart_smartbond_data, rx_timeout_work.work); uart_smartbond_pm_policy_state_lock_put(data->dev); } #endif static int uart_smartbond_poll_in(const struct device *dev, unsigned char *p_char) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); pm_device_runtime_get(dev); if ((config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_RFNE_Msk) == 0) { pm_device_runtime_put(dev); k_spin_unlock(&data->lock, key); return -1; } *p_char = config->regs->UART2_RBR_THR_DLL_REG; pm_device_runtime_put(dev); k_spin_unlock(&data->lock, key); return 0; } static void uart_smartbond_poll_out(const struct device *dev, unsigned char out_char) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); pm_device_runtime_get(dev); while (!(config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_TFNF_Msk)) { /* Wait until FIFO has free space */ } config->regs->UART2_RBR_THR_DLL_REG = out_char; pm_device_runtime_put(dev); k_spin_unlock(&data->lock, key); } static void apply_runtime_config(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; k_spinlock_key_t key; key = k_spin_lock(&data->lock); CRG_COM->SET_CLK_COM_REG = config->periph_clock_config; config->regs->UART2_MCR_REG = data->runtime_cfg.mcr_reg_val; config->regs->UART2_SRR_REG = UART2_UART2_SRR_REG_UART_UR_Msk | UART2_UART2_SRR_REG_UART_RFR_Msk | UART2_UART2_SRR_REG_UART_XFR_Msk; /* Configure baudrate */ config->regs->UART2_LCR_REG |= UART2_UART2_LCR_REG_UART_DLAB_Msk; config->regs->UART2_IER_DLH_REG = BAUDRATE_CFG_DLH(data->runtime_cfg.baudrate_cfg); config->regs->UART2_RBR_THR_DLL_REG = BAUDRATE_CFG_DLL(data->runtime_cfg.baudrate_cfg); config->regs->UART2_DLF_REG = BAUDRATE_CFG_DLF(data->runtime_cfg.baudrate_cfg); config->regs->UART2_LCR_REG &= ~UART2_UART2_LCR_REG_UART_DLAB_Msk; /* Configure frame */ config->regs->UART2_LCR_REG = data->runtime_cfg.lcr_reg_val; /* Enable hardware FIFO */ config->regs->UART2_SFE_REG = UART2_UART2_SFE_REG_UART_SHADOW_FIFO_ENABLE_Msk; config->regs->UART2_SRT_REG = RX_FIFO_TRIG_1_CHAR; config->regs->UART2_STET_REG = TX_FIFO_TRIG_1_2_FULL; config->regs->UART2_IER_DLH_REG = data->runtime_cfg.ier_reg_val; k_spin_unlock(&data->lock, key); } static int uart_smartbond_configure(const struct device *dev, const struct uart_config *cfg) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; uint32_t baudrate_cfg = 0; uint32_t lcr_reg_val; int err; int i; if ((cfg->parity != UART_CFG_PARITY_NONE && cfg->parity != UART_CFG_PARITY_ODD && cfg->parity != UART_CFG_PARITY_EVEN) || (cfg->stop_bits != UART_CFG_STOP_BITS_1 && cfg->stop_bits != UART_CFG_STOP_BITS_2) || (cfg->data_bits != UART_CFG_DATA_BITS_5 && cfg->data_bits != UART_CFG_DATA_BITS_6 && cfg->data_bits != UART_CFG_DATA_BITS_7 && cfg->data_bits != UART_CFG_DATA_BITS_8) || (cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE && cfg->flow_ctrl != UART_CFG_FLOW_CTRL_RTS_CTS)) { return -ENOTSUP; } /* Flow control is not supported on UART */ if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS && !config->hw_flow_control_supported) { return -ENOTSUP; } /* Lookup configuration for baudrate */ for (i = 0; i < ARRAY_SIZE(uart_smartbond_baudrate_table); i++) { if (uart_smartbond_baudrate_table[i].baudrate == cfg->baudrate) { baudrate_cfg = uart_smartbond_baudrate_table[i].cfg; break; } } if (baudrate_cfg == 0) { return -ENOTSUP; } /* Calculate frame configuration register value */ lcr_reg_val = 0; switch (cfg->parity) { case UART_CFG_PARITY_NONE: break; case UART_CFG_PARITY_EVEN: lcr_reg_val |= UART2_UART2_LCR_REG_UART_EPS_Msk; /* no break */ case UART_CFG_PARITY_ODD: lcr_reg_val |= UART2_UART2_LCR_REG_UART_PEN_Msk; break; } if (cfg->stop_bits == UART_CFG_STOP_BITS_2) { lcr_reg_val |= STOP_BITS_2 << UART2_UART2_LCR_REG_UART_STOP_Pos; } switch (cfg->data_bits) { case UART_CFG_DATA_BITS_6: lcr_reg_val |= DATA_BITS_6 << UART2_UART2_LCR_REG_UART_DLS_Pos; break; case UART_CFG_DATA_BITS_7: lcr_reg_val |= DATA_BITS_7 << UART2_UART2_LCR_REG_UART_DLS_Pos; break; case UART_CFG_DATA_BITS_8: lcr_reg_val |= DATA_BITS_8 << UART2_UART2_LCR_REG_UART_DLS_Pos; break; } data->runtime_cfg.baudrate_cfg = baudrate_cfg; data->runtime_cfg.lcr_reg_val = lcr_reg_val; data->runtime_cfg.mcr_reg_val = cfg->flow_ctrl ? UART2_UART2_MCR_REG_UART_AFCE_Msk : 0; pm_device_runtime_get(dev); apply_runtime_config(dev); pm_device_runtime_put(dev); data->current_config = *cfg; err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_smartbond_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_smartbond_data *data = dev->data; *cfg = data->current_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #if CONFIG_PM_DEVICE static void uart_smartbond_wake_handler(const struct device *gpio, struct gpio_callback *cb, uint32_t pins) { struct uart_smartbond_data *data = CONTAINER_OF(cb, struct uart_smartbond_data, rx_wake_cb); /* Disable interrupts on UART RX pin to avoid repeated interrupts. */ (void)gpio_pin_interrupt_configure(gpio, (find_msb_set(pins) - 1), GPIO_INT_DISABLE); /* Refresh console expired time */ if (data->rx_wake_timeout) { uart_smartbond_pm_policy_state_lock_get(data->dev); k_work_reschedule(&data->rx_timeout_work, K_MSEC(data->rx_wake_timeout)); } } static void uart_smartbond_dtr_handler(const struct device *gpio, struct gpio_callback *cb, uint32_t pins) { struct uart_smartbond_data *data = CONTAINER_OF(cb, struct uart_smartbond_data, dtr_wake_cb); int pin = find_lsb_set(pins) - 1; if (gpio_pin_get(gpio, pin) == 1) { uart_smartbond_pm_policy_state_lock_put(data->dev); } else { uart_smartbond_pm_policy_state_lock_get(data->dev); } } #endif static int uart_smartbond_init(const struct device *dev) { struct uart_smartbond_data *data = dev->data; int ret = 0; #ifdef CONFIG_PM_DEVICE_RUNTIME /* Make sure device state is marked as suspended */ pm_device_init_suspended(dev); ret = pm_device_runtime_enable(dev); #else da1469x_pd_acquire(MCU_PD_DOMAIN_COM); #endif #ifdef CONFIG_PM_DEVICE int rx_wake_timeout; const struct uart_smartbond_cfg *config = dev->config; const struct device *uart_console_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_console)); data->dev = dev; /* All uarts can have wake time specified in device tree to keep * device awake after receiving data */ rx_wake_timeout = config->rx_wake_timeout; if (dev == uart_console_dev) { #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED /* For device configured as console wake time is taken from * Kconfig same way it is configured for other platforms */ rx_wake_timeout = CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT; #endif } /* If DTR pin is configured, use it for power management */ if (config->dtr_gpio.port != NULL) { gpio_init_callback(&data->dtr_wake_cb, uart_smartbond_dtr_handler, BIT(config->dtr_gpio.pin)); ret = gpio_add_callback(config->dtr_gpio.port, &data->dtr_wake_cb); if (ret == 0) { ret = gpio_pin_interrupt_configure_dt(&config->dtr_gpio, GPIO_INT_MODE_EDGE | GPIO_INT_TRIG_BOTH); /* Check if DTR is already active (low), if so lock power state */ if (gpio_pin_get(config->dtr_gpio.port, config->dtr_gpio.pin) == 0) { uart_smartbond_pm_policy_state_lock_get(dev); } } } if (rx_wake_timeout > 0 && config->rx_wake_gpio.port != NULL) { k_work_init_delayable(&data->rx_timeout_work, uart_smartbond_rx_refresh_timeout); gpio_init_callback(&data->rx_wake_cb, uart_smartbond_wake_handler, BIT(config->rx_wake_gpio.pin)); ret = gpio_add_callback(config->rx_wake_gpio.port, &data->rx_wake_cb); if (ret == 0) { data->rx_wake_timeout = rx_wake_timeout; } } #endif ret = uart_smartbond_configure(dev, &data->current_config); #ifndef CONFIG_PM_DEVICE_RUNTIME if (ret < 0) { da1469x_pd_release(MCU_PD_DOMAIN_COM); } #endif return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static inline void irq_tx_enable(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; config->regs->UART2_IER_DLH_REG |= UART2_UART2_IER_DLH_REG_PTIME_DLH7_Msk | UART2_UART2_IER_DLH_REG_ETBEI_DLH1_Msk; data->runtime_cfg.ier_reg_val = config->regs->UART2_IER_DLH_REG; } static inline void irq_tx_disable(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; config->regs->UART2_IER_DLH_REG &= ~(UART2_UART2_IER_DLH_REG_PTIME_DLH7_Msk | UART2_UART2_IER_DLH_REG_ETBEI_DLH1_Msk); data->runtime_cfg.ier_reg_val = config->regs->UART2_IER_DLH_REG; } static inline void irq_rx_enable(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; config->regs->UART2_IER_DLH_REG |= UART2_UART2_IER_DLH_REG_ERBFI_DLH0_Msk; data->runtime_cfg.ier_reg_val = config->regs->UART2_IER_DLH_REG; } static inline void irq_rx_disable(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; config->regs->UART2_IER_DLH_REG &= ~UART2_UART2_IER_DLH_REG_ERBFI_DLH0_Msk; data->runtime_cfg.ier_reg_val = config->regs->UART2_IER_DLH_REG; } static int uart_smartbond_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; int num_tx = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); pm_device_runtime_get(dev); while ((len - num_tx > 0) && (config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_TFNF_Msk)) { config->regs->UART2_RBR_THR_DLL_REG = tx_data[num_tx++]; } if (data->tx_enabled) { irq_tx_enable(dev); } pm_device_runtime_put(dev); k_spin_unlock(&data->lock, key); return num_tx; } static int uart_smartbond_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; int num_rx = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); pm_device_runtime_get(dev); while ((size - num_rx > 0) && (config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_RFNE_Msk)) { rx_data[num_rx++] = config->regs->UART2_RBR_THR_DLL_REG; } if (data->rx_enabled) { irq_rx_enable(dev); } #ifdef CONFIG_PM_DEVICE if (data->rx_wake_timeout) { k_work_reschedule(&data->rx_timeout_work, K_MSEC(data->rx_wake_timeout)); } #endif pm_device_runtime_put(dev); k_spin_unlock(&data->lock, key); return num_rx; } static void uart_smartbond_irq_tx_enable(const struct device *dev) { struct uart_smartbond_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); data->tx_enabled = 1; irq_tx_enable(dev); k_spin_unlock(&data->lock, key); } static void uart_smartbond_irq_tx_disable(const struct device *dev) { struct uart_smartbond_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); irq_tx_disable(dev); data->tx_enabled = 0; k_spin_unlock(&data->lock, key); } static int uart_smartbond_irq_tx_ready(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; bool ret = (config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_TFNF_Msk) != 0; return ret; } static void uart_smartbond_irq_rx_enable(const struct device *dev) { struct uart_smartbond_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); data->rx_enabled = 1; irq_rx_enable(dev); k_spin_unlock(&data->lock, key); } static void uart_smartbond_irq_rx_disable(const struct device *dev) { struct uart_smartbond_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); irq_rx_disable(dev); data->rx_enabled = 0; k_spin_unlock(&data->lock, key); } static int uart_smartbond_irq_tx_complete(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; bool ret = (config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_TFE_Msk) != 0; return ret; } static int uart_smartbond_irq_rx_ready(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; bool ret = (config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_RFNE_Msk) != 0; return ret; } static void uart_smartbond_irq_err_enable(const struct device *dev) { k_panic(); } static void uart_smartbond_irq_err_disable(const struct device *dev) { k_panic(); } static int uart_smartbond_irq_is_pending(const struct device *dev) { k_panic(); return 0; } static int uart_smartbond_irq_update(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; bool no_intr = false; while (!no_intr) { switch (config->regs->UART2_IIR_FCR_REG & 0x0F) { case IIR_NO_INTR: no_intr = true; break; case IIR_THR_EMPTY: irq_tx_disable(dev); break; case IIR_RX_DATA: irq_rx_disable(dev); break; case IIR_LINE_STATUS: case IIR_TIMEOUT: /* ignore */ break; case IIR_BUSY: /* busy detect */ /* fall-through */ default: k_panic(); break; } } return 1; } static void uart_smartbond_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_smartbond_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void uart_smartbond_isr(const struct device *dev) { struct uart_smartbond_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_PM_DEVICE static int uart_disable(const struct device *dev) { const struct uart_smartbond_cfg *config = dev->config; struct uart_smartbond_data *data = dev->data; /* Store IER register in case UART will go to sleep */ data->runtime_cfg.ier_reg_val = config->regs->UART2_IER_DLH_REG; if (config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_RFNE_Msk) { return -EBUSY; } while (!(config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_TFE_Msk) || (config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_BUSY_Msk)) { /* Wait until FIFO is empty and UART finished tx */ if (config->regs->UART2_USR_REG & UART2_UART2_USR_REG_UART_RFNE_Msk) { return -EBUSY; } } CRG_COM->RESET_CLK_COM_REG = config->periph_clock_config; da1469x_pd_release(MCU_PD_DOMAIN_COM); return 0; } static int uart_smartbond_pm_action(const struct device *dev, enum pm_device_action action) { const struct uart_smartbond_cfg *config; int ret = 0; switch (action) { case PM_DEVICE_ACTION_RESUME: #ifdef CONFIG_PM_DEVICE_RUNTIME uart_smartbond_pm_prevent_system_sleep(); #endif da1469x_pd_acquire(MCU_PD_DOMAIN_COM); apply_runtime_config(dev); break; case PM_DEVICE_ACTION_SUSPEND: config = dev->config; ret = uart_disable(dev); if (ret == 0 && config->rx_wake_gpio.port != NULL) { ret = gpio_pin_interrupt_configure_dt(&config->rx_wake_gpio, GPIO_INT_MODE_EDGE | GPIO_INT_TRIG_LOW); } #ifdef CONFIG_PM_DEVICE_RUNTIME uart_smartbond_pm_allow_system_sleep(); #endif break; default: ret = -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ static const struct uart_driver_api uart_smartbond_driver_api = { .poll_in = uart_smartbond_poll_in, .poll_out = uart_smartbond_poll_out, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_smartbond_configure, .config_get = uart_smartbond_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_smartbond_fifo_fill, .fifo_read = uart_smartbond_fifo_read, .irq_tx_enable = uart_smartbond_irq_tx_enable, .irq_tx_disable = uart_smartbond_irq_tx_disable, .irq_tx_ready = uart_smartbond_irq_tx_ready, .irq_rx_enable = uart_smartbond_irq_rx_enable, .irq_rx_disable = uart_smartbond_irq_rx_disable, .irq_tx_complete = uart_smartbond_irq_tx_complete, .irq_rx_ready = uart_smartbond_irq_rx_ready, .irq_err_enable = uart_smartbond_irq_err_enable, .irq_err_disable = uart_smartbond_irq_err_disable, .irq_is_pending = uart_smartbond_irq_is_pending, .irq_update = uart_smartbond_irq_update, .irq_callback_set = uart_smartbond_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_SMARTBOND_CONFIGURE(id) \ do { \ IRQ_CONNECT(DT_INST_IRQN(id), \ DT_INST_IRQ(id, priority), \ uart_smartbond_isr, \ DEVICE_DT_INST_GET(id), 0); \ \ irq_enable(DT_INST_IRQN(id)); \ } while (0) #else #define UART_SMARTBOND_CONFIGURE(id) #endif #ifdef CONFIG_PM_DEVICE #define UART_PM_WAKE_RX_TIMEOUT(n) \ .rx_wake_timeout = (DT_INST_PROP_OR(n, rx_wake_timeout, 0)), #define UART_PM_WAKE_RX_PIN(n) \ .rx_wake_gpio = GPIO_DT_SPEC_INST_GET_OR(n, rx_wake_gpios, {0}), #define UART_PM_WAKE_DTR_PIN(n) \ .dtr_gpio = GPIO_DT_SPEC_INST_GET_OR(n, dtr_gpios, {0}), #else #define UART_PM_WAKE_RX_PIN(n) /* Not used */ #define UART_PM_WAKE_RX_TIMEOUT(n) /* Not used */ #define UART_PM_WAKE_DTR_PIN(n) /* Not used */ #endif #define UART_SMARTBOND_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static const struct uart_smartbond_cfg uart_smartbond_##id##_cfg = { \ .regs = (UART2_Type *)DT_INST_REG_ADDR(id), \ .periph_clock_config = DT_INST_PROP(id, periph_clock_config), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ .hw_flow_control_supported = DT_INST_PROP(id, hw_flow_control_supported), \ UART_PM_WAKE_RX_TIMEOUT(id) \ UART_PM_WAKE_RX_PIN(id) \ UART_PM_WAKE_DTR_PIN(id) \ }; \ static struct uart_smartbond_data uart_smartbond_##id##_data = { \ .current_config = { \ .baudrate = DT_INST_PROP(id, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, \ }, \ }; \ static int uart_smartbond_##id##_init(const struct device *dev) \ { \ UART_SMARTBOND_CONFIGURE(id); \ return uart_smartbond_init(dev); \ } \ PM_DEVICE_DT_INST_DEFINE(id, uart_smartbond_pm_action); \ DEVICE_DT_INST_DEFINE(id, \ uart_smartbond_##id##_init, \ PM_DEVICE_DT_INST_GET(id), \ &uart_smartbond_##id##_data, \ &uart_smartbond_##id##_cfg, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_smartbond_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(UART_SMARTBOND_DEVICE) ```
/content/code_sandbox/drivers/serial/uart_smartbond.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,461
```unknown # MAX32 UART configuration config UART_MAX32 bool "MAX32 MCU serial driver" default y depends on DT_HAS_ADI_MAX32_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select PINCTRL help This option enables the UART driver for MAX32 family of processors. Say y if you wish to use serial port on MAX32 MCU. ```
/content/code_sandbox/drivers/serial/Kconfig.max32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
82
```c /* * */ #define DT_DRV_COMPAT renesas_rcar_scif #include <errno.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/renesas_cpg_mssr.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include <zephyr/spinlock.h> struct uart_rcar_cfg { DEVICE_MMIO_ROM; /* Must be first */ const struct device *clock_dev; struct rcar_cpg_clk mod_clk; struct rcar_cpg_clk bus_clk; const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif bool is_hscif; }; struct uart_rcar_data { DEVICE_MMIO_RAM; /* Must be first */ struct uart_config current_config; uint32_t clk_rate; struct k_spinlock lock; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; /* Registers */ #define SCSMR 0x00 /* Serial Mode Register */ #define SCBRR 0x04 /* Bit Rate Register */ #define SCSCR 0x08 /* Serial Control Register */ #define SCFTDR 0x0c /* Transmit FIFO Data Register */ #define SCFSR 0x10 /* Serial Status Register */ #define SCFRDR 0x14 /* Receive FIFO Data Register */ #define SCFCR 0x18 /* FIFO Control Register */ #define SCFDR 0x1c /* FIFO Data Count Register */ #define SCSPTR 0x20 /* Serial Port Register */ #define SCLSR 0x24 /* Line Status Register */ #define DL 0x30 /* Frequency Division Register */ #define CKS 0x34 /* Clock Select Register */ #define HSSRR 0x40 /* Sampling Rate Register */ /* SCSMR (Serial Mode Register) */ #define SCSMR_C_A BIT(7) /* Communication Mode */ #define SCSMR_CHR BIT(6) /* 7-bit Character Length */ #define SCSMR_PE BIT(5) /* Parity Enable */ #define SCSMR_O_E BIT(4) /* Odd Parity */ #define SCSMR_STOP BIT(3) /* Stop Bit Length */ #define SCSMR_CKS1 BIT(1) /* Clock Select 1 */ #define SCSMR_CKS0 BIT(0) /* Clock Select 0 */ /* SCSCR (Serial Control Register) */ #define SCSCR_TEIE BIT(11) /* Transmit End Interrupt Enable */ #define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */ #define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */ #define SCSCR_TE BIT(5) /* Transmit Enable */ #define SCSCR_RE BIT(4) /* Receive Enable */ #define SCSCR_REIE BIT(3) /* Receive Error Interrupt Enable */ #define SCSCR_TOIE BIT(2) /* Timeout Interrupt Enable */ #define SCSCR_CKE1 BIT(1) /* Clock Enable 1 */ #define SCSCR_CKE0 BIT(0) /* Clock Enable 0 */ /* SCFCR (FIFO Control Register) */ #define SCFCR_RTRG1 BIT(7) /* Receive FIFO Data Count Trigger 1 */ #define SCFCR_RTRG0 BIT(6) /* Receive FIFO Data Count Trigger 0 */ #define SCFCR_TTRG1 BIT(5) /* Transmit FIFO Data Count Trigger 1 */ #define SCFCR_TTRG0 BIT(4) /* Transmit FIFO Data Count Trigger 0 */ #define SCFCR_MCE BIT(3) /* Modem Control Enable */ #define SCFCR_TFRST BIT(2) /* Transmit FIFO Data Register Reset */ #define SCFCR_RFRST BIT(1) /* Receive FIFO Data Register Reset */ #define SCFCR_LOOP BIT(0) /* Loopback Test */ /* SCFSR (Serial Status Register) */ #define SCFSR_PER3 BIT(15) /* Parity Error Count 3 */ #define SCFSR_PER2 BIT(14) /* Parity Error Count 2 */ #define SCFSR_PER1 BIT(13) /* Parity Error Count 1 */ #define SCFSR_PER0 BIT(12) /* Parity Error Count 0 */ #define SCFSR_FER3 BIT(11) /* Framing Error Count 3 */ #define SCFSR_FER2 BIT(10) /* Framing Error Count 2 */ #define SCFSR_FER_1 BIT(9) /* Framing Error Count 1 */ #define SCFSR_FER0 BIT(8) /* Framing Error Count 0 */ #define SCFSR_ER BIT(7) /* Receive Error */ #define SCFSR_TEND BIT(6) /* Transmission ended */ #define SCFSR_TDFE BIT(5) /* Transmit FIFO Data Empty */ #define SCFSR_BRK BIT(4) /* Break Detect */ #define SCFSR_FER BIT(3) /* Framing Error */ #define SCFSR_PER BIT(2) /* Parity Error */ #define SCFSR_RDF BIT(1) /* Receive FIFO Data Full */ #define SCFSR_DR BIT(0) /* Receive Data Ready */ /* SCLSR (Line Status Register) on (H)SCIF */ #define SCLSR_TO BIT(2) /* Timeout */ #define SCLSR_ORER BIT(0) /* Overrun Error */ /* HSSRR (Sampling Rate Register) */ #define HSSRR_SRE BIT(15) /* Sampling Rate Register Enable */ #define HSSRR_SRCYC_DEF_VAL 0x7 /* Sampling rate default value */ static uint8_t uart_rcar_read_8(const struct device *dev, uint32_t offs) { return sys_read8(DEVICE_MMIO_GET(dev) + offs); } static void uart_rcar_write_8(const struct device *dev, uint32_t offs, uint8_t value) { sys_write8(value, DEVICE_MMIO_GET(dev) + offs); } static uint16_t uart_rcar_read_16(const struct device *dev, uint32_t offs) { return sys_read16(DEVICE_MMIO_GET(dev) + offs); } static void uart_rcar_write_16(const struct device *dev, uint32_t offs, uint16_t value) { sys_write16(value, DEVICE_MMIO_GET(dev) + offs); } static void uart_rcar_set_baudrate(const struct device *dev, uint32_t baud_rate) { struct uart_rcar_data *data = dev->data; const struct uart_rcar_cfg *cfg = dev->config; uint8_t reg_val; if (cfg->is_hscif) { reg_val = data->clk_rate / (2 * (HSSRR_SRCYC_DEF_VAL + 1) * baud_rate) - 1; } else { reg_val = ((data->clk_rate + 16 * baud_rate) / (32 * baud_rate) - 1); } uart_rcar_write_8(dev, SCBRR, reg_val); } static int uart_rcar_poll_in(const struct device *dev, unsigned char *p_char) { struct uart_rcar_data *data = dev->data; uint16_t reg_val; int ret = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); /* Receive FIFO empty */ if (!((uart_rcar_read_16(dev, SCFSR)) & SCFSR_RDF)) { ret = -1; goto unlock; } *p_char = uart_rcar_read_8(dev, SCFRDR); reg_val = uart_rcar_read_16(dev, SCFSR); reg_val &= ~SCFSR_RDF; uart_rcar_write_16(dev, SCFSR, reg_val); unlock: k_spin_unlock(&data->lock, key); return ret; } static void uart_rcar_poll_out(const struct device *dev, unsigned char out_char) { struct uart_rcar_data *data = dev->data; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); /* Wait for empty space in transmit FIFO */ while (!(uart_rcar_read_16(dev, SCFSR) & SCFSR_TDFE)) { } uart_rcar_write_8(dev, SCFTDR, out_char); reg_val = uart_rcar_read_16(dev, SCFSR); reg_val &= ~(SCFSR_TDFE | SCFSR_TEND); uart_rcar_write_16(dev, SCFSR, reg_val); k_spin_unlock(&data->lock, key); } static int uart_rcar_configure(const struct device *dev, const struct uart_config *cfg) { struct uart_rcar_data *data = dev->data; const struct uart_rcar_cfg *cfg_drv = dev->config; uint16_t reg_val; k_spinlock_key_t key; if (cfg->parity != UART_CFG_PARITY_NONE || cfg->stop_bits != UART_CFG_STOP_BITS_1 || cfg->data_bits != UART_CFG_DATA_BITS_8 || cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) { return -ENOTSUP; } key = k_spin_lock(&data->lock); /* Disable Transmit and Receive */ reg_val = uart_rcar_read_16(dev, SCSCR); reg_val &= ~(SCSCR_TE | SCSCR_RE); uart_rcar_write_16(dev, SCSCR, reg_val); /* Emptying Transmit and Receive FIFO */ reg_val = uart_rcar_read_16(dev, SCFCR); reg_val |= (SCFCR_TFRST | SCFCR_RFRST); uart_rcar_write_16(dev, SCFCR, reg_val); /* Resetting Errors Registers */ reg_val = uart_rcar_read_16(dev, SCFSR); reg_val &= ~(SCFSR_ER | SCFSR_DR | SCFSR_BRK | SCFSR_RDF); uart_rcar_write_16(dev, SCFSR, reg_val); reg_val = uart_rcar_read_16(dev, SCLSR); reg_val &= ~(SCLSR_TO | SCLSR_ORER); uart_rcar_write_16(dev, SCLSR, reg_val); /* Select internal clock */ reg_val = uart_rcar_read_16(dev, SCSCR); reg_val &= ~(SCSCR_CKE1 | SCSCR_CKE0); uart_rcar_write_16(dev, SCSCR, reg_val); /* Serial Configuration (8N1) & Clock divider selection */ reg_val = uart_rcar_read_16(dev, SCSMR); reg_val &= ~(SCSMR_C_A | SCSMR_CHR | SCSMR_PE | SCSMR_O_E | SCSMR_STOP | SCSMR_CKS1 | SCSMR_CKS0); uart_rcar_write_16(dev, SCSMR, reg_val); if (cfg_drv->is_hscif) { /* TODO: calculate the optimal sampling and bit rates based on error rate */ uart_rcar_write_16(dev, HSSRR, HSSRR_SRE | HSSRR_SRCYC_DEF_VAL); } /* Set baudrate */ uart_rcar_set_baudrate(dev, cfg->baudrate); /* FIFOs data count trigger configuration */ reg_val = uart_rcar_read_16(dev, SCFCR); reg_val &= ~(SCFCR_RTRG1 | SCFCR_RTRG0 | SCFCR_TTRG1 | SCFCR_TTRG0 | SCFCR_MCE | SCFCR_TFRST | SCFCR_RFRST); uart_rcar_write_16(dev, SCFCR, reg_val); /* Enable Transmit & Receive + disable Interrupts */ reg_val = uart_rcar_read_16(dev, SCSCR); reg_val |= (SCSCR_TE | SCSCR_RE); reg_val &= ~(SCSCR_TIE | SCSCR_RIE | SCSCR_TEIE | SCSCR_REIE | SCSCR_TOIE); uart_rcar_write_16(dev, SCSCR, reg_val); data->current_config = *cfg; k_spin_unlock(&data->lock, key); return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_rcar_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_rcar_data *data = dev->data; *cfg = data->current_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int uart_rcar_init(const struct device *dev) { const struct uart_rcar_cfg *config = dev->config; struct uart_rcar_data *data = dev->data; int ret; /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } if (!device_is_ready(config->clock_dev)) { return -ENODEV; } ret = clock_control_on(config->clock_dev, (clock_control_subsys_t)&config->mod_clk); if (ret < 0) { return ret; } ret = clock_control_get_rate(config->clock_dev, (clock_control_subsys_t)&config->bus_clk, &data->clk_rate); if (ret < 0) { return ret; } DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); ret = uart_rcar_configure(dev, &data->current_config); if (ret != 0) { return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static bool uart_rcar_irq_is_enabled(const struct device *dev, uint32_t irq) { return !!(uart_rcar_read_16(dev, SCSCR) & irq); } static int uart_rcar_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { struct uart_rcar_data *data = dev->data; int num_tx = 0; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); while (((len - num_tx) > 0) && (uart_rcar_read_16(dev, SCFSR) & SCFSR_TDFE)) { /* Send current byte */ uart_rcar_write_8(dev, SCFTDR, tx_data[num_tx]); reg_val = uart_rcar_read_16(dev, SCFSR); reg_val &= ~(SCFSR_TDFE | SCFSR_TEND); uart_rcar_write_16(dev, SCFSR, reg_val); num_tx++; } k_spin_unlock(&data->lock, key); return num_tx; } static int uart_rcar_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { struct uart_rcar_data *data = dev->data; int num_rx = 0; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); while (((size - num_rx) > 0) && (uart_rcar_read_16(dev, SCFSR) & SCFSR_RDF)) { /* Receive current byte */ rx_data[num_rx++] = uart_rcar_read_8(dev, SCFRDR); reg_val = uart_rcar_read_16(dev, SCFSR); reg_val &= ~(SCFSR_RDF); uart_rcar_write_16(dev, SCFSR, reg_val); } k_spin_unlock(&data->lock, key); return num_rx; } static void uart_rcar_irq_tx_enable(const struct device *dev) { struct uart_rcar_data *data = dev->data; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); reg_val = uart_rcar_read_16(dev, SCSCR); reg_val |= (SCSCR_TIE); uart_rcar_write_16(dev, SCSCR, reg_val); k_spin_unlock(&data->lock, key); } static void uart_rcar_irq_tx_disable(const struct device *dev) { struct uart_rcar_data *data = dev->data; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); reg_val = uart_rcar_read_16(dev, SCSCR); reg_val &= ~(SCSCR_TIE); uart_rcar_write_16(dev, SCSCR, reg_val); k_spin_unlock(&data->lock, key); } static int uart_rcar_irq_tx_ready(const struct device *dev) { return !!(uart_rcar_read_16(dev, SCFSR) & SCFSR_TDFE); } static void uart_rcar_irq_rx_enable(const struct device *dev) { struct uart_rcar_data *data = dev->data; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); reg_val = uart_rcar_read_16(dev, SCSCR); reg_val |= (SCSCR_RIE); uart_rcar_write_16(dev, SCSCR, reg_val); k_spin_unlock(&data->lock, key); } static void uart_rcar_irq_rx_disable(const struct device *dev) { struct uart_rcar_data *data = dev->data; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); reg_val = uart_rcar_read_16(dev, SCSCR); reg_val &= ~(SCSCR_RIE); uart_rcar_write_16(dev, SCSCR, reg_val); k_spin_unlock(&data->lock, key); } static int uart_rcar_irq_rx_ready(const struct device *dev) { return !!(uart_rcar_read_16(dev, SCFSR) & SCFSR_RDF); } static void uart_rcar_irq_err_enable(const struct device *dev) { struct uart_rcar_data *data = dev->data; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); reg_val = uart_rcar_read_16(dev, SCSCR); reg_val |= (SCSCR_REIE); uart_rcar_write_16(dev, SCSCR, reg_val); k_spin_unlock(&data->lock, key); } static void uart_rcar_irq_err_disable(const struct device *dev) { struct uart_rcar_data *data = dev->data; uint16_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); reg_val = uart_rcar_read_16(dev, SCSCR); reg_val &= ~(SCSCR_REIE); uart_rcar_write_16(dev, SCSCR, reg_val); k_spin_unlock(&data->lock, key); } static int uart_rcar_irq_is_pending(const struct device *dev) { return (uart_rcar_irq_rx_ready(dev) && uart_rcar_irq_is_enabled(dev, SCSCR_RIE)) || (uart_rcar_irq_tx_ready(dev) && uart_rcar_irq_is_enabled(dev, SCSCR_TIE)); } static int uart_rcar_irq_update(const struct device *dev) { return 1; } static void uart_rcar_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_rcar_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ void uart_rcar_isr(const struct device *dev) { struct uart_rcar_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_rcar_driver_api = { .poll_in = uart_rcar_poll_in, .poll_out = uart_rcar_poll_out, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_rcar_configure, .config_get = uart_rcar_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_rcar_fifo_fill, .fifo_read = uart_rcar_fifo_read, .irq_tx_enable = uart_rcar_irq_tx_enable, .irq_tx_disable = uart_rcar_irq_tx_disable, .irq_tx_ready = uart_rcar_irq_tx_ready, .irq_rx_enable = uart_rcar_irq_rx_enable, .irq_rx_disable = uart_rcar_irq_rx_disable, .irq_rx_ready = uart_rcar_irq_rx_ready, .irq_err_enable = uart_rcar_irq_err_enable, .irq_err_disable = uart_rcar_irq_err_disable, .irq_is_pending = uart_rcar_irq_is_pending, .irq_update = uart_rcar_irq_update, .irq_callback_set = uart_rcar_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; /* Device Instantiation */ #define UART_RCAR_DECLARE_CFG(n, IRQ_FUNC_INIT, compat) \ PINCTRL_DT_INST_DEFINE(n); \ static const struct uart_rcar_cfg uart_rcar_cfg_##compat##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .mod_clk.module = DT_INST_CLOCKS_CELL_BY_IDX(n, 0, module), \ .mod_clk.domain = DT_INST_CLOCKS_CELL_BY_IDX(n, 0, domain), \ .bus_clk.module = DT_INST_CLOCKS_CELL_BY_IDX(n, 1, module), \ .bus_clk.domain = DT_INST_CLOCKS_CELL_BY_IDX(n, 1, domain), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .is_hscif = DT_INST_NODE_HAS_COMPAT(n, renesas_rcar_hscif), \ IRQ_FUNC_INIT \ } #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_RCAR_CONFIG_FUNC(n, compat) \ static void irq_config_func_##compat##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ uart_rcar_isr, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } #define UART_RCAR_IRQ_CFG_FUNC_INIT(n, compat) \ .irq_config_func = irq_config_func_##compat##n #define UART_RCAR_INIT_CFG(n, compat) \ UART_RCAR_DECLARE_CFG(n, UART_RCAR_IRQ_CFG_FUNC_INIT(n, compat), compat) #else #define UART_RCAR_CONFIG_FUNC(n, compat) #define UART_RCAR_IRQ_CFG_FUNC_INIT #define UART_RCAR_INIT_CFG(n, compat) \ UART_RCAR_DECLARE_CFG(n, UART_RCAR_IRQ_CFG_FUNC_INIT, compat) #endif #define UART_RCAR_INIT(n, compat) \ static struct uart_rcar_data uart_rcar_data_##compat##n = { \ .current_config = { \ .baudrate = DT_INST_PROP(n, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, \ }, \ }; \ \ static const struct uart_rcar_cfg uart_rcar_cfg_##compat##n; \ \ DEVICE_DT_INST_DEFINE(n, \ uart_rcar_init, \ NULL, \ &uart_rcar_data_##compat##n, \ &uart_rcar_cfg_##compat##n, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_rcar_driver_api); \ \ UART_RCAR_CONFIG_FUNC(n, compat) \ \ UART_RCAR_INIT_CFG(n, compat); DT_INST_FOREACH_STATUS_OKAY_VARGS(UART_RCAR_INIT, DT_DRV_COMPAT) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT renesas_rcar_hscif DT_INST_FOREACH_STATUS_OKAY_VARGS(UART_RCAR_INIT, DT_DRV_COMPAT) ```
/content/code_sandbox/drivers/serial/uart_rcar.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,340
```unknown config UART_CDNS bool "Serial driver for Cadence UART IP6528" default y depends on DT_HAS_CDNS_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the serial driver for Cadence UART IP6528. ```
/content/code_sandbox/drivers/serial/Kconfig.cdns
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
58
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include <zephyr/sys/ring_buffer.h> #include <zephyr/sys/atomic.h> #include <zephyr/bluetooth/services/nus.h> #define DT_DRV_COMPAT zephyr_nus_uart #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uart_nus, CONFIG_UART_LOG_LEVEL); K_THREAD_STACK_DEFINE(nus_work_queue_stack, CONFIG_UART_BT_WORKQUEUE_STACK_SIZE); static struct k_work_q nus_work_queue; struct uart_bt_data { struct { struct bt_nus_inst *inst; struct bt_nus_cb cb; atomic_t enabled; } bt; struct { struct ring_buf *rx_ringbuf; struct ring_buf *tx_ringbuf; struct k_work cb_work; struct k_work_delayable tx_work; bool rx_irq_ena; bool tx_irq_ena; struct { const struct device *dev; uart_irq_callback_user_data_t cb; void *cb_data; } callback; } uart; }; static void bt_notif_enabled(bool enabled, void *ctx) { __ASSERT_NO_MSG(ctx); const struct device *dev = (const struct device *)ctx; struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; (void)atomic_set(&dev_data->bt.enabled, enabled ? 1 : 0); LOG_DBG("%s() - %s", __func__, enabled ? "enabled" : "disabled"); if (!ring_buf_is_empty(dev_data->uart.tx_ringbuf)) { k_work_reschedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_NO_WAIT); } } static void bt_received(struct bt_conn *conn, const void *data, uint16_t len, void *ctx) { __ASSERT_NO_MSG(conn); __ASSERT_NO_MSG(ctx); __ASSERT_NO_MSG(data); __ASSERT_NO_MSG(len > 0); const struct device *dev = (const struct device *)ctx; struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; struct ring_buf *ringbuf = dev_data->uart.rx_ringbuf; uint32_t put_len; LOG_DBG("%s() - len: %d, rx_ringbuf space %d", __func__, len, ring_buf_space_get(ringbuf)); LOG_HEXDUMP_DBG(data, len, "data"); put_len = ring_buf_put(ringbuf, (const uint8_t *)data, len); if (put_len < len) { LOG_ERR("RX Ring buffer full. received: %d, added to queue: %d", len, put_len); } k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work); } static void cb_work_handler(struct k_work *work) { struct uart_bt_data *dev_data = CONTAINER_OF(work, struct uart_bt_data, uart.cb_work); if (dev_data->uart.callback.cb) { dev_data->uart.callback.cb( dev_data->uart.callback.dev, dev_data->uart.callback.cb_data); } } static void tx_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_bt_data *dev_data = CONTAINER_OF(dwork, struct uart_bt_data, uart.tx_work); uint8_t *data = NULL; size_t len; int err; __ASSERT_NO_MSG(dev_data); do { /** Using Minimum MTU at this point to guarantee all connected * peers will receive the data, without keeping track of MTU * size per-connection. This has the trade-off of limiting * throughput but allows multi-connection support. */ len = ring_buf_get_claim(dev_data->uart.tx_ringbuf, &data, 20); if (len > 0) { err = bt_nus_inst_send(NULL, dev_data->bt.inst, data, len); if (err) { LOG_ERR("Failed to send data over BT: %d", err); } } ring_buf_get_finish(dev_data->uart.tx_ringbuf, len); } while (len > 0 && !err); if ((ring_buf_space_get(dev_data->uart.tx_ringbuf) > 0) && dev_data->uart.tx_irq_ena) { k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work); } } static int uart_bt_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; size_t wrote; wrote = ring_buf_put(dev_data->uart.tx_ringbuf, tx_data, len); if (wrote < len) { LOG_WRN("Ring buffer full, drop %zd bytes", len - wrote); } if (atomic_get(&dev_data->bt.enabled)) { k_work_reschedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_NO_WAIT); } return wrote; } static int uart_bt_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; return ring_buf_get(dev_data->uart.rx_ringbuf, rx_data, size); } static int uart_bt_poll_in(const struct device *dev, unsigned char *c) { int err = uart_bt_fifo_read(dev, c, 1); return err == 1 ? 0 : -1; } static void uart_bt_poll_out(const struct device *dev, unsigned char c) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; struct ring_buf *ringbuf = dev_data->uart.tx_ringbuf; /** Right now we're discarding data if ring-buf is full. */ while (!ring_buf_put(ringbuf, &c, 1)) { if (k_is_in_isr() || !atomic_get(&dev_data->bt.enabled)) { LOG_INF("Ring buffer full, discard %c", c); break; } k_sleep(K_MSEC(1)); } /** Don't flush the data until notifications are enabled. */ if (atomic_get(&dev_data->bt.enabled)) { /** Delay will allow buffering some characters before transmitting * data, so more than one byte is transmitted (e.g: when poll_out is * called inside a for-loop). */ k_work_schedule_for_queue(&nus_work_queue, &dev_data->uart.tx_work, K_MSEC(1)); } } static int uart_bt_irq_tx_ready(const struct device *dev) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; if ((ring_buf_space_get(dev_data->uart.tx_ringbuf) > 0) && dev_data->uart.tx_irq_ena) { return 1; } return 0; } static void uart_bt_irq_tx_enable(const struct device *dev) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; dev_data->uart.tx_irq_ena = true; if (uart_bt_irq_tx_ready(dev)) { k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work); } } static void uart_bt_irq_tx_disable(const struct device *dev) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; dev_data->uart.tx_irq_ena = false; } static int uart_bt_irq_rx_ready(const struct device *dev) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; if (!ring_buf_is_empty(dev_data->uart.rx_ringbuf) && dev_data->uart.rx_irq_ena) { return 1; } return 0; } static void uart_bt_irq_rx_enable(const struct device *dev) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; dev_data->uart.rx_irq_ena = true; k_work_submit_to_queue(&nus_work_queue, &dev_data->uart.cb_work); } static void uart_bt_irq_rx_disable(const struct device *dev) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; dev_data->uart.rx_irq_ena = false; } static int uart_bt_irq_is_pending(const struct device *dev) { return uart_bt_irq_rx_ready(dev); } static int uart_bt_irq_update(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void uart_bt_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; dev_data->uart.callback.cb = cb; dev_data->uart.callback.cb_data = cb_data; } static const struct uart_driver_api uart_bt_driver_api = { .poll_in = uart_bt_poll_in, .poll_out = uart_bt_poll_out, .fifo_fill = uart_bt_fifo_fill, .fifo_read = uart_bt_fifo_read, .irq_tx_enable = uart_bt_irq_tx_enable, .irq_tx_disable = uart_bt_irq_tx_disable, .irq_tx_ready = uart_bt_irq_tx_ready, .irq_rx_enable = uart_bt_irq_rx_enable, .irq_rx_disable = uart_bt_irq_rx_disable, .irq_rx_ready = uart_bt_irq_rx_ready, .irq_is_pending = uart_bt_irq_is_pending, .irq_update = uart_bt_irq_update, .irq_callback_set = uart_bt_irq_callback_set, }; static int uart_bt_workqueue_init(void) { k_work_queue_init(&nus_work_queue); k_work_queue_start(&nus_work_queue, nus_work_queue_stack, K_THREAD_STACK_SIZEOF(nus_work_queue_stack), CONFIG_UART_BT_WORKQUEUE_PRIORITY, NULL); return 0; } /** The work-queue is shared across all instances, hence we initialize it separatedly */ SYS_INIT(uart_bt_workqueue_init, POST_KERNEL, CONFIG_SERIAL_INIT_PRIORITY); static int uart_bt_init(const struct device *dev) { int err; struct uart_bt_data *dev_data = (struct uart_bt_data *)dev->data; /** As a way to backtrace the device handle from uart_bt_data. * Used in cb_work_handler. */ dev_data->uart.callback.dev = dev; k_work_init_delayable(&dev_data->uart.tx_work, tx_work_handler); k_work_init(&dev_data->uart.cb_work, cb_work_handler); err = bt_nus_inst_cb_register(dev_data->bt.inst, &dev_data->bt.cb, (void *)dev); if (err) { return err; } return 0; } #define UART_BT_RX_FIFO_SIZE(inst) (DT_INST_PROP(inst, rx_fifo_size)) #define UART_BT_TX_FIFO_SIZE(inst) (DT_INST_PROP(inst, tx_fifo_size)) #define UART_BT_INIT(n) \ \ BT_NUS_INST_DEFINE(bt_nus_inst_##n); \ \ RING_BUF_DECLARE(bt_nus_rx_rb_##n, UART_BT_RX_FIFO_SIZE(n)); \ RING_BUF_DECLARE(bt_nus_tx_rb_##n, UART_BT_TX_FIFO_SIZE(n)); \ \ static struct uart_bt_data uart_bt_data_##n = { \ .bt = { \ .inst = &bt_nus_inst_##n, \ .enabled = ATOMIC_INIT(0), \ .cb = { \ .notif_enabled = bt_notif_enabled, \ .received = bt_received, \ }, \ }, \ .uart = { \ .rx_ringbuf = &bt_nus_rx_rb_##n, \ .tx_ringbuf = &bt_nus_tx_rb_##n, \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(n, uart_bt_init, NULL, &uart_bt_data_##n, \ NULL, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_bt_driver_api); DT_INST_FOREACH_STATUS_OKAY(UART_BT_INIT) ```
/content/code_sandbox/drivers/serial/uart_bt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,627
```unknown config UART_ESP32 bool "ESP32 UART driver" default y depends on DT_HAS_ESPRESSIF_ESP32_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select SERIAL_SUPPORT_ASYNC if (SOC_SERIES_ESP32C3 || SOC_SERIES_ESP32C6 || SOC_SERIES_ESP32S3) select GPIO_ESP32 help Enable the ESP32 UART. config SERIAL_ESP32_USB bool "ESP32 built-in USB serial driver" default y depends on DT_HAS_ESPRESSIF_ESP32_USB_SERIAL_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable the built-in USB serial interface present in some Espressif MCUs like ESP32-Cx. This driver uses the peripheral called USB Serial/JTAG Controller (USB_SERIAL_JTAG), which acts as a CDC-ACM interface towards the USB host. The USB stack is built into the chip and accessed by the firmware through a simplified API similar to a "normal" UART peripheral. config UART_ESP32_TX_FIFO_THRESH hex "ESP32 UART TX FIFO Threshold" depends on UART_ESP32 default 0x1 range 1 127 help Configure the TX FIFO threshold for ESP32 UART driver. config UART_ESP32_RX_FIFO_THRESH hex "ESP32 UART RX FIFO Threshold" depends on UART_ESP32 default 0x16 range 1 127 help Configure the RX FIFO threshold for ESP32 UART driver. ```
/content/code_sandbox/drivers/serial/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
332
```c /* * */ #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/misc/pio_rpi_pico/pio_rpi_pico.h> #include <hardware/pio.h> #include <hardware/clocks.h> #define DT_DRV_COMPAT raspberrypi_pico_uart_pio #define CYCLES_PER_BIT 8 #define SIDESET_BIT_COUNT 2 struct pio_uart_config { const struct device *piodev; const struct pinctrl_dev_config *pcfg; const uint32_t tx_pin; const uint32_t rx_pin; uint32_t baudrate; }; struct pio_uart_data { size_t tx_sm; size_t rx_sm; }; RPI_PICO_PIO_DEFINE_PROGRAM(uart_tx, 0, 3, /* .wrap_target */ 0x9fa0, /* 0: pull block side 1 [7] */ 0xf727, /* 1: set x, 7 side 0 [7] */ 0x6001, /* 2: out pins, 1 */ 0x0642, /* 3: jmp x--, 2 [6] */ /* .wrap */ ); RPI_PICO_PIO_DEFINE_PROGRAM(uart_rx, 0, 8, /* .wrap_target */ 0x2020, /* 0: wait 0 pin, 0 */ 0xea27, /* 1: set x, 7 [10] */ 0x4001, /* 2: in pins, 1 */ 0x0642, /* 3: jmp x--, 2 [6] */ 0x00c8, /* 4: jmp pin, 8 */ 0xc014, /* 5: irq nowait 4 rel */ 0x20a0, /* 6: wait 1 pin, 0 */ 0x0000, /* 7: jmp 0 */ 0x8020, /* 8: push block */ /* .wrap */ ); static int pio_uart_tx_init(PIO pio, uint32_t sm, uint32_t tx_pin, float div) { uint32_t offset; pio_sm_config sm_config; if (!pio_can_add_program(pio, RPI_PICO_PIO_GET_PROGRAM(uart_tx))) { return -EBUSY; } offset = pio_add_program(pio, RPI_PICO_PIO_GET_PROGRAM(uart_tx)); sm_config = pio_get_default_sm_config(); sm_config_set_sideset(&sm_config, SIDESET_BIT_COUNT, true, false); sm_config_set_out_shift(&sm_config, true, false, 0); sm_config_set_out_pins(&sm_config, tx_pin, 1); sm_config_set_sideset_pins(&sm_config, tx_pin); sm_config_set_fifo_join(&sm_config, PIO_FIFO_JOIN_TX); sm_config_set_clkdiv(&sm_config, div); sm_config_set_wrap(&sm_config, offset + RPI_PICO_PIO_GET_WRAP_TARGET(uart_tx), offset + RPI_PICO_PIO_GET_WRAP(uart_tx)); pio_sm_set_pins_with_mask(pio, sm, BIT(tx_pin), BIT(tx_pin)); pio_sm_set_pindirs_with_mask(pio, sm, BIT(tx_pin), BIT(tx_pin)); pio_sm_init(pio, sm, offset, &sm_config); pio_sm_set_enabled(pio, sm, true); return 0; } static int pio_uart_rx_init(PIO pio, uint32_t sm, uint32_t rx_pin, float div) { pio_sm_config sm_config; uint32_t offset; if (!pio_can_add_program(pio, RPI_PICO_PIO_GET_PROGRAM(uart_rx))) { return -EBUSY; } offset = pio_add_program(pio, RPI_PICO_PIO_GET_PROGRAM(uart_rx)); sm_config = pio_get_default_sm_config(); pio_sm_set_consecutive_pindirs(pio, sm, rx_pin, 1, false); sm_config_set_in_pins(&sm_config, rx_pin); sm_config_set_jmp_pin(&sm_config, rx_pin); sm_config_set_in_shift(&sm_config, true, false, 0); sm_config_set_fifo_join(&sm_config, PIO_FIFO_JOIN_RX); sm_config_set_clkdiv(&sm_config, div); sm_config_set_wrap(&sm_config, offset + RPI_PICO_PIO_GET_WRAP_TARGET(uart_rx), offset + RPI_PICO_PIO_GET_WRAP(uart_rx)); pio_sm_init(pio, sm, offset, &sm_config); pio_sm_set_enabled(pio, sm, true); return 0; } static int pio_uart_poll_in(const struct device *dev, unsigned char *c) { const struct pio_uart_config *config = dev->config; PIO pio = pio_rpi_pico_get_pio(config->piodev); struct pio_uart_data *data = dev->data; io_rw_8 *uart_rx_fifo_msb; /* * The rx FIFO is 4 bytes wide, add 3 to get the most significant * byte. */ uart_rx_fifo_msb = (io_rw_8 *)&pio->rxf[data->rx_sm] + 3; if (pio_sm_is_rx_fifo_empty(pio, data->rx_sm)) { return -1; } /* Accessing the FIFO pops the read word from it */ *c = (char)*uart_rx_fifo_msb; return 0; } static void pio_uart_poll_out(const struct device *dev, unsigned char c) { const struct pio_uart_config *config = dev->config; struct pio_uart_data *data = dev->data; pio_sm_put_blocking(pio_rpi_pico_get_pio(config->piodev), data->tx_sm, (uint32_t)c); } static int pio_uart_init(const struct device *dev) { const struct pio_uart_config *config = dev->config; struct pio_uart_data *data = dev->data; float sm_clock_div; size_t tx_sm; size_t rx_sm; int retval; PIO pio; pio = pio_rpi_pico_get_pio(config->piodev); sm_clock_div = (float)clock_get_hz(clk_sys) / (CYCLES_PER_BIT * config->baudrate); retval = pio_rpi_pico_allocate_sm(config->piodev, &tx_sm); retval |= pio_rpi_pico_allocate_sm(config->piodev, &rx_sm); if (retval < 0) { return retval; } data->tx_sm = tx_sm; data->rx_sm = rx_sm; retval = pio_uart_tx_init(pio, tx_sm, config->tx_pin, sm_clock_div); if (retval < 0) { return retval; } retval = pio_uart_rx_init(pio, rx_sm, config->rx_pin, sm_clock_div); if (retval < 0) { return retval; } return pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); } static const struct uart_driver_api pio_uart_driver_api = { .poll_in = pio_uart_poll_in, .poll_out = pio_uart_poll_out, }; #define PIO_UART_INIT(idx) \ PINCTRL_DT_INST_DEFINE(idx); \ static const struct pio_uart_config pio_uart##idx##_config = { \ .piodev = DEVICE_DT_GET(DT_INST_PARENT(idx)), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \ .tx_pin = DT_INST_RPI_PICO_PIO_PIN_BY_NAME(idx, default, 0, tx_pins, 0), \ .rx_pin = DT_INST_RPI_PICO_PIO_PIN_BY_NAME(idx, default, 0, rx_pins, 0), \ .baudrate = DT_INST_PROP(idx, current_speed), \ }; \ static struct pio_uart_data pio_uart##idx##_data; \ \ DEVICE_DT_INST_DEFINE(idx, pio_uart_init, NULL, &pio_uart##idx##_data, \ &pio_uart##idx##_config, POST_KERNEL, \ CONFIG_SERIAL_INIT_PRIORITY, \ &pio_uart_driver_api); DT_INST_FOREACH_STATUS_OKAY(PIO_UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_rpi_pico_pio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,855
```c /* * */ #define DT_DRV_COMPAT ti_cc13xx_cc26xx_uart #include <zephyr/device.h> #include <errno.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/atomic.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <driverlib/prcm.h> #include <driverlib/uart.h> #include <ti/drivers/Power.h> #include <ti/drivers/power/PowerCC26X2.h> #include <zephyr/irq.h> struct uart_cc13xx_cc26xx_config { uint32_t reg; uint32_t sys_clk_freq; }; enum uart_cc13xx_cc26xx_pm_locks { UART_CC13XX_CC26XX_PM_LOCK_TX, UART_CC13XX_CC26XX_PM_LOCK_RX, UART_CC13XX_CC26XX_PM_LOCK_COUNT, }; struct uart_cc13xx_cc26xx_data { struct uart_config uart_config; const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *user_data; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_PM Power_NotifyObj postNotify; ATOMIC_DEFINE(pm_lock, UART_CC13XX_CC26XX_PM_LOCK_COUNT); #endif }; static int uart_cc13xx_cc26xx_poll_in(const struct device *dev, unsigned char *c) { const struct uart_cc13xx_cc26xx_config *config = dev->config; if (!UARTCharsAvail(config->reg)) { return -1; } *c = UARTCharGetNonBlocking(config->reg); return 0; } static void uart_cc13xx_cc26xx_poll_out(const struct device *dev, unsigned char c) { const struct uart_cc13xx_cc26xx_config *config = dev->config; UARTCharPut(config->reg, c); /* * Need to wait for character to be transmitted to ensure cpu does not * enter standby when uart is busy */ while (UARTBusy(config->reg) == true) { } } static int uart_cc13xx_cc26xx_err_check(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; uint32_t flags = UARTRxErrorGet(config->reg); int error = (flags & UART_RXERROR_FRAMING ? UART_ERROR_FRAMING : 0) | (flags & UART_RXERROR_PARITY ? UART_ERROR_PARITY : 0) | (flags & UART_RXERROR_BREAK ? UART_BREAK : 0) | (flags & UART_RXERROR_OVERRUN ? UART_ERROR_OVERRUN : 0); UARTRxErrorClear(config->reg); return error; } static int uart_cc13xx_cc26xx_configure(const struct device *dev, const struct uart_config *cfg) { const struct uart_cc13xx_cc26xx_config *config = dev->config; struct uart_cc13xx_cc26xx_data *data = dev->data; uint32_t line_ctrl = 0; bool flow_ctrl; switch (cfg->parity) { case UART_CFG_PARITY_NONE: line_ctrl |= UART_CONFIG_PAR_NONE; break; case UART_CFG_PARITY_ODD: line_ctrl |= UART_CONFIG_PAR_ODD; break; case UART_CFG_PARITY_EVEN: line_ctrl |= UART_CONFIG_PAR_EVEN; break; case UART_CFG_PARITY_MARK: line_ctrl |= UART_CONFIG_PAR_ONE; break; case UART_CFG_PARITY_SPACE: line_ctrl |= UART_CONFIG_PAR_ZERO; break; default: return -EINVAL; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: line_ctrl |= UART_CONFIG_STOP_ONE; break; case UART_CFG_STOP_BITS_2: line_ctrl |= UART_CONFIG_STOP_TWO; break; case UART_CFG_STOP_BITS_0_5: case UART_CFG_STOP_BITS_1_5: return -ENOTSUP; default: return -EINVAL; } switch (cfg->data_bits) { case UART_CFG_DATA_BITS_5: line_ctrl |= UART_CONFIG_WLEN_5; break; case UART_CFG_DATA_BITS_6: line_ctrl |= UART_CONFIG_WLEN_6; break; case UART_CFG_DATA_BITS_7: line_ctrl |= UART_CONFIG_WLEN_7; break; case UART_CFG_DATA_BITS_8: line_ctrl |= UART_CONFIG_WLEN_8; break; default: return -EINVAL; } switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: flow_ctrl = false; break; case UART_CFG_FLOW_CTRL_RTS_CTS: flow_ctrl = true; break; case UART_CFG_FLOW_CTRL_DTR_DSR: return -ENOTSUP; default: return -EINVAL; } /* Disables UART before setting control registers */ UARTConfigSetExpClk(config->reg, config->sys_clk_freq, cfg->baudrate, line_ctrl); /* Clear all UART interrupts */ UARTIntClear(config->reg, UART_INT_OE | UART_INT_BE | UART_INT_PE | UART_INT_FE | UART_INT_RT | UART_INT_TX | UART_INT_RX | UART_INT_CTS); if (flow_ctrl) { UARTHwFlowControlEnable(config->reg); } else { UARTHwFlowControlDisable(config->reg); } /* Re-enable UART */ UARTEnable(config->reg); /* Disabled FIFOs act as 1-byte-deep holding registers (character mode) */ UARTFIFODisable(config->reg); data->uart_config = *cfg; return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_cc13xx_cc26xx_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_cc13xx_cc26xx_data *data = dev->data; *cfg = data->uart_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_cc13xx_cc26xx_fifo_fill(const struct device *dev, const uint8_t *buf, int len) { const struct uart_cc13xx_cc26xx_config *config = dev->config; int n = 0; while (n < len) { if (!UARTCharPutNonBlocking(config->reg, buf[n])) { break; } n++; } return n; } static int uart_cc13xx_cc26xx_fifo_read(const struct device *dev, uint8_t *buf, const int len) { const struct uart_cc13xx_cc26xx_config *config = dev->config; int c, n; n = 0; while (n < len) { c = UARTCharGetNonBlocking(config->reg); if (c == -1) { break; } buf[n++] = c; } return n; } static void uart_cc13xx_cc26xx_irq_tx_enable(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; #ifdef CONFIG_PM struct uart_cc13xx_cc26xx_data *data = dev->data; if (!atomic_test_and_set_bit(data->pm_lock, UART_CC13XX_CC26XX_PM_LOCK_TX)) { /* * When tx irq is enabled, it is implicit that we are expecting * to transmit using the uart, hence we should no longer go * into standby. * * Instead of using pm_device_busy_set(), which currently does * not impact the PM policy, we specifically disable the * standby mode instead, since it is the power state that * would interfere with a transfer. */ pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } #endif UARTIntEnable(config->reg, UART_INT_TX); } static void uart_cc13xx_cc26xx_irq_tx_disable(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; UARTIntDisable(config->reg, UART_INT_TX); #ifdef CONFIG_PM struct uart_cc13xx_cc26xx_data *data = dev->data; if (atomic_test_and_clear_bit(data->pm_lock, UART_CC13XX_CC26XX_PM_LOCK_TX)) { pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } #endif } static int uart_cc13xx_cc26xx_irq_tx_ready(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; return UARTSpaceAvail(config->reg) ? 1 : 0; } static void uart_cc13xx_cc26xx_irq_rx_enable(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; #ifdef CONFIG_PM struct uart_cc13xx_cc26xx_data *data = dev->data; /* * When rx is enabled, it is implicit that we are expecting * to receive from the uart, hence we can no longer go into * standby. */ if (!atomic_test_and_set_bit(data->pm_lock, UART_CC13XX_CC26XX_PM_LOCK_RX)) { pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } #endif UARTIntEnable(config->reg, UART_INT_RX); } static void uart_cc13xx_cc26xx_irq_rx_disable(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; #ifdef CONFIG_PM struct uart_cc13xx_cc26xx_data *data = dev->data; if (atomic_test_and_clear_bit(data->pm_lock, UART_CC13XX_CC26XX_PM_LOCK_RX)) { pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES); } #endif UARTIntDisable(config->reg, UART_INT_RX); } static int uart_cc13xx_cc26xx_irq_tx_complete(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; return UARTBusy(config->reg) ? 0 : 1; } static int uart_cc13xx_cc26xx_irq_rx_ready(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; return UARTCharsAvail(config->reg) ? 1 : 0; } static void uart_cc13xx_cc26xx_irq_err_enable(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; return UARTIntEnable(config->reg, UART_INT_OE | UART_INT_BE | UART_INT_PE | UART_INT_FE); } static void uart_cc13xx_cc26xx_irq_err_disable(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; return UARTIntDisable(config->reg, UART_INT_OE | UART_INT_BE | UART_INT_PE | UART_INT_FE); } static int uart_cc13xx_cc26xx_irq_is_pending(const struct device *dev) { const struct uart_cc13xx_cc26xx_config *config = dev->config; uint32_t status = UARTIntStatus(config->reg, true); return status & (UART_INT_TX | UART_INT_RX) ? 1 : 0; } static int uart_cc13xx_cc26xx_irq_update(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void uart_cc13xx_cc26xx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *user_data) { struct uart_cc13xx_cc26xx_data *data = dev->data; data->callback = cb; data->user_data = user_data; } static void uart_cc13xx_cc26xx_isr(const struct device *dev) { struct uart_cc13xx_cc26xx_data *data = dev->data; if (data->callback) { data->callback(dev, data->user_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_PM /* * ======== postNotifyFxn ======== * Called by Power module when waking up the CPU from Standby, to support * the case when PM is set but PM_DEVICE is * not. The uart needs to be reconfigured afterwards unless Zephyr's device * PM turned it off, in which case it'd be responsible for turning it back * on and reconfiguring it. */ static int postNotifyFxn(unsigned int eventType, uintptr_t eventArg, uintptr_t clientArg) { const struct device *dev = (const struct device *)clientArg; const struct uart_cc13xx_cc26xx_config *config = dev->config; struct uart_cc13xx_cc26xx_data *data = dev->data; int ret = Power_NOTIFYDONE; int16_t res_id; /* Reconfigure the hardware if returning from standby */ if (eventType == PowerCC26XX_AWAKE_STANDBY) { if (config->reg == DT_INST_REG_ADDR(0)) { res_id = PowerCC26XX_PERIPH_UART0; } else { /* DT_INST_REG_ADDR(1) */ res_id = PowerCC26X2_PERIPH_UART1; } if (Power_getDependencyCount(res_id) != 0) { /* * Reconfigure and enable UART only if not * actively powered down */ if (uart_cc13xx_cc26xx_configure(dev, &data->uart_config) != 0) { ret = Power_NOTIFYERROR; } } } return (ret); } #endif #ifdef CONFIG_PM_DEVICE static int uart_cc13xx_cc26xx_pm_action(const struct device *dev, enum pm_device_action action) { const struct uart_cc13xx_cc26xx_config *config = dev->config; struct uart_cc13xx_cc26xx_data *data = dev->data; int ret = 0; switch (action) { case PM_DEVICE_ACTION_RESUME: if (config->reg == DT_INST_REG_ADDR(0)) { Power_setDependency(PowerCC26XX_PERIPH_UART0); } else { Power_setDependency(PowerCC26X2_PERIPH_UART1); } /* Configure and enable UART */ ret = uart_cc13xx_cc26xx_configure(dev, &data->uart_config); break; case PM_DEVICE_ACTION_SUSPEND: UARTDisable(config->reg); /* * Release power dependency - i.e. potentially power * down serial domain. */ if (config->reg == DT_INST_REG_ADDR(0)) { Power_releaseDependency(PowerCC26XX_PERIPH_UART0); } else { Power_releaseDependency(PowerCC26X2_PERIPH_UART1); } break; default: return -ENOTSUP; } return ret; } #endif /* CONFIG_PM_DEVICE */ static const struct uart_driver_api uart_cc13xx_cc26xx_driver_api = { .poll_in = uart_cc13xx_cc26xx_poll_in, .poll_out = uart_cc13xx_cc26xx_poll_out, .err_check = uart_cc13xx_cc26xx_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_cc13xx_cc26xx_configure, .config_get = uart_cc13xx_cc26xx_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_cc13xx_cc26xx_fifo_fill, .fifo_read = uart_cc13xx_cc26xx_fifo_read, .irq_tx_enable = uart_cc13xx_cc26xx_irq_tx_enable, .irq_tx_disable = uart_cc13xx_cc26xx_irq_tx_disable, .irq_tx_ready = uart_cc13xx_cc26xx_irq_tx_ready, .irq_rx_enable = uart_cc13xx_cc26xx_irq_rx_enable, .irq_rx_disable = uart_cc13xx_cc26xx_irq_rx_disable, .irq_tx_complete = uart_cc13xx_cc26xx_irq_tx_complete, .irq_rx_ready = uart_cc13xx_cc26xx_irq_rx_ready, .irq_err_enable = uart_cc13xx_cc26xx_irq_err_enable, .irq_err_disable = uart_cc13xx_cc26xx_irq_err_disable, .irq_is_pending = uart_cc13xx_cc26xx_irq_is_pending, .irq_update = uart_cc13xx_cc26xx_irq_update, .irq_callback_set = uart_cc13xx_cc26xx_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_PM #define UART_CC13XX_CC26XX_POWER_UART(n) \ do { \ struct uart_cc13xx_cc26xx_data *dev_data = dev->data; \ \ atomic_clear_bit(dev_data->pm_lock, UART_CC13XX_CC26XX_PM_LOCK_RX); \ atomic_clear_bit(dev_data->pm_lock, UART_CC13XX_CC26XX_PM_LOCK_TX); \ \ /* Set Power dependencies */ \ if (DT_INST_REG_ADDR(n) == 0x40001000) { \ Power_setDependency(PowerCC26XX_PERIPH_UART0); \ } else { \ Power_setDependency(PowerCC26X2_PERIPH_UART1); \ } \ \ /* Register notification function */ \ Power_registerNotify(&dev_data->postNotify, \ PowerCC26XX_AWAKE_STANDBY, \ postNotifyFxn, (uintptr_t)dev); \ } while (false) #else #define UART_CC13XX_CC26XX_POWER_UART(n) \ do { \ uint32_t domain, periph; \ \ /* Enable UART power domain */ \ if (DT_INST_REG_ADDR(n) == 0x40001000) { \ domain = PRCM_DOMAIN_SERIAL; \ periph = PRCM_PERIPH_UART0; \ } else { \ domain = PRCM_DOMAIN_PERIPH; \ periph = PRCM_PERIPH_UART1; \ } \ PRCMPowerDomainOn(domain); \ \ /* Enable UART peripherals */ \ PRCMPeripheralRunEnable(periph); \ PRCMPeripheralSleepEnable(periph); \ \ /* Load PRCM settings */ \ PRCMLoadSet(); \ while (!PRCMLoadGet()) { \ continue; \ } \ \ /* UART should not be accessed until power domain is on. */ \ while (PRCMPowerDomainsAllOn(domain) != \ PRCM_DOMAIN_POWER_ON) { \ continue; \ } \ } while (false) #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_CC13XX_CC26XX_IRQ_CFG(n) \ do { \ const struct uart_cc13xx_cc26xx_config *config = \ dev->config; \ \ UARTIntClear(config->reg, UART_INT_RX); \ \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ uart_cc13xx_cc26xx_isr, \ DEVICE_DT_INST_GET(n), \ 0); \ irq_enable(DT_INST_IRQN(n)); \ /* Causes an initial TX ready INT when TX INT enabled */\ UARTCharPutNonBlocking(config->reg, '\0'); \ } while (false) #define UART_CC13XX_CC26XX_INT_FIELDS \ .callback = NULL, \ .user_data = NULL, #else #define UART_CC13XX_CC26XX_IRQ_CFG(n) #define UART_CC13XX_CC26XX_INT_FIELDS #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #define UART_CC13XX_CC26XX_DEVICE_DEFINE(n) \ PM_DEVICE_DT_INST_DEFINE(n, uart_cc13xx_cc26xx_pm_action); \ \ DEVICE_DT_INST_DEFINE(n, \ uart_cc13xx_cc26xx_init_##n, \ PM_DEVICE_DT_INST_GET(n), \ &uart_cc13xx_cc26xx_data_##n, &uart_cc13xx_cc26xx_config_##n,\ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_cc13xx_cc26xx_driver_api) #define UART_CC13XX_CC26XX_INIT_FUNC(n) \ static int uart_cc13xx_cc26xx_init_##n(const struct device *dev) \ { \ struct uart_cc13xx_cc26xx_data *data = dev->data; \ int ret; \ \ UART_CC13XX_CC26XX_POWER_UART(n); \ \ ret = pinctrl_apply_state(data->pcfg, PINCTRL_STATE_DEFAULT); \ if (ret < 0) { \ return ret; \ } \ \ /* Configure and enable UART */ \ ret = uart_cc13xx_cc26xx_configure(dev, &data->uart_config);\ \ /* Enable interrupts */ \ UART_CC13XX_CC26XX_IRQ_CFG(n); \ \ return ret; \ } #define UART_CC13XX_CC26XX_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ UART_CC13XX_CC26XX_INIT_FUNC(n); \ \ static const struct uart_cc13xx_cc26xx_config \ uart_cc13xx_cc26xx_config_##n = { \ .reg = DT_INST_REG_ADDR(n), \ .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(n, clocks, \ clock_frequency) \ }; \ \ static struct uart_cc13xx_cc26xx_data \ uart_cc13xx_cc26xx_data_##n = { \ .uart_config = { \ .baudrate = DT_INST_PROP(n, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, \ }, \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ UART_CC13XX_CC26XX_INT_FIELDS \ }; \ \ UART_CC13XX_CC26XX_DEVICE_DEFINE(n); DT_INST_FOREACH_STATUS_OKAY(UART_CC13XX_CC26XX_INIT) ```
/content/code_sandbox/drivers/serial/uart_cc13xx_cc26xx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,992
```unknown # MCUXpresso SDK IUART config UART_MCUX_IUART bool "MCUX IUART driver" default y depends on DT_HAS_NXP_IMX_IUART_ENABLED depends on CLOCK_CONTROL select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable the MCUX IUART driver. ```
/content/code_sandbox/drivers/serial/Kconfig.mcux_iuart
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```c /** * @brief UART Driver for interacting with host serial ports * * @note Driver can open and send characters to the host serial ports (such as /dev/ttyUSB0 or * /dev/ttyACM0). Only polling Uart API is implemented. Driver can be configured via devicetree, * command line options or at runtime. * * To learn more see Native TTY section at: * path_to_url * or * ${ZEPHYR_BASE}/boards/posix/native_sim/doc/index.rst * */ #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/kernel.h> #include <nsi_tracing.h> #include "cmdline.h" #include "posix_native_task.h" #include "uart_native_tty_bottom.h" #include "nsi_host_trampolines.h" #define WARN(...) nsi_print_warning(__VA_ARGS__) #define ERROR(...) nsi_print_error_and_exit(__VA_ARGS__) #define DT_DRV_COMPAT zephyr_native_tty_uart struct native_tty_data { /* File descriptor used for the tty device. */ int fd; /* Absolute path to the tty device. */ char *serial_port; /* Baudrate set from the command line. If UINT32_MAX, it was not set. */ int cmd_baudrate; /* Serial port set from the command line. If NULL, it was not set. */ char *cmd_serial_port; #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* Emulated tx irq is enabled. */ bool tx_irq_enabled; /* Emulated rx irq is enabled. */ bool rx_irq_enabled; /* IRQ callback */ uart_irq_callback_user_data_t callback; /* IRQ callback data */ void *cb_data; #endif }; struct native_tty_config { struct uart_config uart_config; }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static struct k_thread rx_thread; static K_KERNEL_STACK_DEFINE(rx_stack, CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE); #define NATIVE_TTY_INIT_LEVEL POST_KERNEL #else #define NATIVE_TTY_INIT_LEVEL PRE_KERNEL_1 #endif /** * @brief Convert from uart_config to native_tty_bottom_cfg eqvivalent struct * * @param bottom_cfg * @param cfg * * @return 0 on success, negative errno otherwise. */ static int native_tty_conv_to_bottom_cfg(struct native_tty_bottom_cfg *bottom_cfg, const struct uart_config *cfg) { bottom_cfg->baudrate = cfg->baudrate; switch (cfg->parity) { case UART_CFG_PARITY_NONE: bottom_cfg->parity = NTB_PARITY_NONE; break; case UART_CFG_PARITY_ODD: bottom_cfg->parity = NTB_PARITY_ODD; break; case UART_CFG_PARITY_EVEN: bottom_cfg->parity = NTB_PARITY_EVEN; break; default: return -ENOTSUP; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: bottom_cfg->stop_bits = NTB_STOP_BITS_1; break; case UART_CFG_STOP_BITS_2: bottom_cfg->stop_bits = NTB_STOP_BITS_2; break; default: return -ENOTSUP; } switch (cfg->data_bits) { case UART_CFG_DATA_BITS_5: bottom_cfg->data_bits = NTB_DATA_BITS_5; break; case UART_CFG_DATA_BITS_6: bottom_cfg->data_bits = NTB_DATA_BITS_6; break; case UART_CFG_DATA_BITS_7: bottom_cfg->data_bits = NTB_DATA_BITS_7; break; case UART_CFG_DATA_BITS_8: bottom_cfg->data_bits = NTB_DATA_BITS_8; break; default: return -ENOTSUP; } if (cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) { WARN("Could not set flow control, any kind of hw flow control is not supported.\n"); return -ENOTSUP; } bottom_cfg->flow_ctrl = NTB_FLOW_CTRL_NONE; return 0; } /* * @brief Output a character towards the serial port * * @param dev UART device structure. * @param out_char Character to send. */ static void native_tty_uart_poll_out(const struct device *dev, unsigned char out_char) { struct native_tty_data *data = dev->data; int ret = nsi_host_write(data->fd, &out_char, 1); if (ret == -1) { ERROR("Could not write to %s\n", data->serial_port); } } /** * @brief Poll the device for input. * * @param dev UART device structure. * @param p_char Pointer to a character. * * @retval 0 If a character arrived. * @retval -1 If no character was available to read. */ static int native_tty_uart_poll_in(const struct device *dev, unsigned char *p_char) { struct native_tty_data *data = dev->data; return nsi_host_read(data->fd, p_char, 1) > 0 ? 0 : -1; } static int native_tty_configure(const struct device *dev, const struct uart_config *cfg) { int fd = ((struct native_tty_data *)dev->data)->fd; struct native_tty_bottom_cfg bottom_cfg; int rc = native_tty_conv_to_bottom_cfg(&bottom_cfg, cfg); if (rc) { WARN("Could not convert uart config to native tty bottom cfg\n"); return rc; } return native_tty_configure_bottom(fd, &bottom_cfg); } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int native_tty_uart_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { struct native_tty_data *data = dev->data; return nsi_host_write(data->fd, (void *)tx_data, size); } static int native_tty_uart_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { struct native_tty_data *data = dev->data; return nsi_host_read(data->fd, rx_data, size); } static int native_tty_uart_irq_tx_ready(const struct device *dev) { struct native_tty_data *data = dev->data; return data->tx_irq_enabled ? 1 : 0; } static int native_tty_uart_irq_tx_complete(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void native_tty_uart_irq_tx_enable(const struct device *dev) { struct native_tty_data *data = dev->data; data->tx_irq_enabled = true; } static void native_tty_uart_irq_tx_disable(const struct device *dev) { struct native_tty_data *data = dev->data; data->tx_irq_enabled = false; } static void native_tty_uart_irq_rx_enable(const struct device *dev) { struct native_tty_data *data = dev->data; data->rx_irq_enabled = true; } static void native_tty_uart_irq_rx_disable(const struct device *dev) { struct native_tty_data *data = dev->data; data->rx_irq_enabled = false; } static int native_tty_uart_irq_rx_ready(const struct device *dev) { struct native_tty_data *data = dev->data; if (data->rx_irq_enabled && native_tty_poll_bottom(data->fd) == 1) { return 1; } return 0; } static int native_tty_uart_irq_is_pending(const struct device *dev) { return native_tty_uart_irq_rx_ready(dev) || native_tty_uart_irq_tx_ready(dev); } static int native_tty_uart_irq_update(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void native_tty_uart_irq_handler(const struct device *dev) { struct native_tty_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } else { WARN("No callback!\n"); } } /* * Emulate uart interrupts using a polling thread */ void native_tty_uart_irq_function(void *arg1, void *arg2, void *arg3) { ARG_UNUSED(arg2); ARG_UNUSED(arg3); struct device *dev = (struct device *)arg1; struct native_tty_data *data = dev->data; while (1) { if (data->rx_irq_enabled) { int ret = native_tty_poll_bottom(data->fd); if (ret == 1) { native_tty_uart_irq_handler(dev); } else if (ret < 0) { WARN("Poll returned error %d\n", ret); } else { k_sleep(K_MSEC(1)); } } if (data->tx_irq_enabled) { native_tty_uart_irq_handler(dev); } if (data->tx_irq_enabled == false && data->rx_irq_enabled == false) { k_sleep(K_MSEC(10)); } } } static void native_tty_uart_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct native_tty_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void native_tty_irq_init(const struct device *dev) { /* Create a thread which will wait for data - replacement for IRQ */ k_thread_create(&rx_thread, rx_stack, K_KERNEL_STACK_SIZEOF(rx_stack), native_tty_uart_irq_function, (void *)dev, NULL, NULL, K_HIGHEST_THREAD_PRIO, 0, K_NO_WAIT); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int native_tty_serial_init(const struct device *dev) { struct native_tty_data *data = dev->data; struct uart_config uart_config = ((struct native_tty_config *)dev->config)->uart_config; /* Default value for cmd_serial_port is NULL, this is due to the set 's' type in * command line opts. If it is anything else then it was configured via command * line. */ if (data->cmd_serial_port) { data->serial_port = data->cmd_serial_port; } /* Default value for cmd_baudrate is UINT32_MAX, this is due to the set 'u' type in * command line opts. If it is anything else then it was configured via command * line. */ if (data->cmd_baudrate != UINT32_MAX) { uart_config.baudrate = data->cmd_baudrate; } /* Serial port needs to be set either in the devicetree or provided via command line * opts, if that is not the case, then abort. */ if (!data->serial_port) { ERROR("%s: path to the serial port was not set.\n", dev->name); } /* Try to open a serial port as with read/write access, also prevent serial port * from becoming the controlling terminal. */ data->fd = native_tty_open_tty_bottom(data->serial_port); if (native_tty_configure(dev, &uart_config)) { ERROR("%s: could not configure serial port %s\n", dev->name, data->serial_port); } posix_print_trace("%s connected to the serial port: %s\n", dev->name, data->serial_port); #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* Start irq emulation thread */ native_tty_irq_init(dev); #endif return 0; } static struct uart_driver_api native_tty_uart_driver_api = { .poll_out = native_tty_uart_poll_out, .poll_in = native_tty_uart_poll_in, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = native_tty_configure, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = native_tty_uart_fifo_fill, .fifo_read = native_tty_uart_fifo_read, .irq_tx_enable = native_tty_uart_irq_tx_enable, .irq_tx_disable = native_tty_uart_irq_tx_disable, .irq_tx_ready = native_tty_uart_irq_tx_ready, .irq_tx_complete = native_tty_uart_irq_tx_complete, .irq_rx_enable = native_tty_uart_irq_rx_enable, .irq_rx_disable = native_tty_uart_irq_rx_disable, .irq_rx_ready = native_tty_uart_irq_rx_ready, .irq_is_pending = native_tty_uart_irq_is_pending, .irq_update = native_tty_uart_irq_update, .irq_callback_set = native_tty_uart_irq_callback_set, #endif }; #define NATIVE_TTY_INSTANCE(inst) \ static const struct native_tty_config native_tty_##inst##_cfg = { \ .uart_config = \ { \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .baudrate = DT_INST_PROP(inst, current_speed), \ }, \ }; \ \ static struct native_tty_data native_tty_##inst##_data = { \ .serial_port = DT_INST_PROP_OR(inst, serial_port, NULL), \ }; \ \ DEVICE_DT_INST_DEFINE(inst, native_tty_serial_init, NULL, &native_tty_##inst##_data, \ &native_tty_##inst##_cfg, NATIVE_TTY_INIT_LEVEL, 55, \ &native_tty_uart_driver_api); DT_INST_FOREACH_STATUS_OKAY(NATIVE_TTY_INSTANCE); #define INST_NAME(inst) DEVICE_DT_NAME(DT_DRV_INST(inst)) #define NATIVE_TTY_COMMAND_LINE_OPTS(inst) \ { \ .option = INST_NAME(inst) "_port", \ .name = "\"serial_port\"", \ .type = 's', \ .dest = &native_tty_##inst##_data.cmd_serial_port, \ .descript = "Set a serial port for " INST_NAME(inst) " uart device, " \ "overriding the one in devicetree.", \ }, \ { \ .option = INST_NAME(inst) "_baud", \ .name = "baudrate", \ .type = 'u', \ .dest = &native_tty_##inst##_data.cmd_baudrate, \ .descript = "Set a baudrate for " INST_NAME(inst) " device, overriding the " \ "baudrate of " STRINGIFY(DT_INST_PROP(inst, current_speed)) \ "set in the devicetree.", \ }, /** * @brief Adds command line options for setting serial port and baud rate for each uart * device. */ static void native_tty_add_serial_options(void) { static struct args_struct_t opts[] = { DT_INST_FOREACH_STATUS_OKAY(NATIVE_TTY_COMMAND_LINE_OPTS) ARG_TABLE_ENDMARKER}; native_add_command_line_opts(opts); } #define NATIVE_TTY_CLEANUP(inst) \ if (native_tty_##inst##_data.fd != 0) { \ nsi_host_close(native_tty_##inst##_data.fd); \ } /** * @brief Cleans up any open serial ports on the exit. */ static void native_tty_cleanup_uart(void) { DT_INST_FOREACH_STATUS_OKAY(NATIVE_TTY_CLEANUP); } NATIVE_TASK(native_tty_add_serial_options, PRE_BOOT_1, 11); NATIVE_TASK(native_tty_cleanup_uart, ON_EXIT, 99); ```
/content/code_sandbox/drivers/serial/uart_native_tty.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,409
```c /* * */ #define DT_DRV_COMPAT ti_msp432p4xx_uart /* See www.ti.com/lit/pdf/slau356f, Chapter 22, for MSP432P4XX UART info. */ /* include driverlib/gpio.h (from the msp432p4xx SDK) before Z's uart.h so * that the definition of BIT is not overridden */ #include <driverlib/gpio.h> #include <zephyr/drivers/uart.h> /* Driverlib includes */ #include <driverlib/rom.h> #include <driverlib/rom_map.h> #include <driverlib/uart.h> #include <zephyr/irq.h> struct uart_msp432p4xx_config { unsigned long base; }; struct uart_msp432p4xx_dev_data_t { /* UART config structure */ eUSCI_UART_Config uartConfig; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; /**< Callback function pointer */ void *cb_data; /**< Callback function arg */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_msp432p4xx_isr(const struct device *dev); #endif static const struct uart_msp432p4xx_config uart_msp432p4xx_dev_cfg_0 = { .base = DT_INST_REG_ADDR(0), }; static struct uart_msp432p4xx_dev_data_t uart_msp432p4xx_dev_data_0 = { #ifdef CONFIG_UART_INTERRUPT_DRIVEN .cb = NULL, #endif }; static int baudrate_set(eUSCI_UART_Config *config, uint32_t baudrate) { uint16_t prescalar; uint8_t first_mod_reg, second_mod_reg; switch (baudrate) { case 1200: prescalar = 2500U; first_mod_reg = 0U; second_mod_reg = 0U; break; case 2400: prescalar = 1250U; first_mod_reg = 0U; second_mod_reg = 0U; break; case 4800: prescalar = 625U; first_mod_reg = 0U; second_mod_reg = 0U; break; case 9600: prescalar = 312U; first_mod_reg = 8U; second_mod_reg = 0U; break; case 19200: prescalar = 156U; first_mod_reg = 4U; second_mod_reg = 0U; break; case 38400: prescalar = 78U; first_mod_reg = 2U; second_mod_reg = 0U; break; case 57600: prescalar = 52U; first_mod_reg = 1U; second_mod_reg = 37U; break; case 115200: prescalar = 26U; first_mod_reg = 0U; second_mod_reg = 111U; break; case 230400: prescalar = 13U; first_mod_reg = 0U; second_mod_reg = 37U; break; case 460800: prescalar = 6U; first_mod_reg = 8U; second_mod_reg = 32U; break; default: return -EINVAL; } config->clockPrescalar = prescalar; config->firstModReg = first_mod_reg; config->secondModReg = second_mod_reg; return 0; } static int uart_msp432p4xx_init(const struct device *dev) { int err; const struct uart_msp432p4xx_config *config = dev->config; eUSCI_UART_Config UartConfig; /* Select P1.2 and P1.3 in UART mode */ MAP_GPIO_setAsPeripheralModuleFunctionInputPin(GPIO_PORT_P1, (GPIO_PIN2 | GPIO_PIN3), GPIO_PRIMARY_MODULE_FUNCTION); UartConfig.selectClockSource = EUSCI_A_UART_CLOCKSOURCE_SMCLK; UartConfig.parity = EUSCI_A_UART_NO_PARITY; UartConfig.msborLsbFirst = EUSCI_A_UART_LSB_FIRST; UartConfig.numberofStopBits = EUSCI_A_UART_ONE_STOP_BIT; UartConfig.uartMode = EUSCI_A_UART_MODE; UartConfig.overSampling = EUSCI_A_UART_OVERSAMPLING_BAUDRATE_GENERATION; /* Baud rate settings calculated for 48MHz */ err = baudrate_set(&UartConfig, DT_INST_PROP(0, current_speed)); if (err) { return err; } /* Configure UART Module */ MAP_UART_initModule(config->base, &UartConfig); /* Enable UART module */ MAP_UART_enableModule(config->base); #ifdef CONFIG_UART_INTERRUPT_DRIVEN IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), uart_msp432p4xx_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); #endif return 0; } static int uart_msp432p4xx_poll_in(const struct device *dev, unsigned char *c) { const struct uart_msp432p4xx_config *config = dev->config; *c = MAP_UART_receiveData(config->base); return 0; } static void uart_msp432p4xx_poll_out(const struct device *dev, unsigned char c) { const struct uart_msp432p4xx_config *config = dev->config; MAP_UART_transmitData(config->base, c); } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_msp432p4xx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_msp432p4xx_config *config = dev->config; unsigned int num_tx = 0U; while ((size - num_tx) > 0) { MAP_UART_transmitData(config->base, tx_data[num_tx]); if (MAP_UART_getInterruptStatus(config->base, EUSCI_A_UART_TRANSMIT_COMPLETE_INTERRUPT_FLAG)) { num_tx++; } else { break; } } return (int)num_tx; } static int uart_msp432p4xx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_msp432p4xx_config *config = dev->config; unsigned int num_rx = 0U; while (((size - num_rx) > 0) && MAP_UART_getInterruptStatus( config->base, EUSCI_A_UART_RECEIVE_INTERRUPT_FLAG)) { rx_data[num_rx++] = MAP_UART_receiveData(config->base); } return num_rx; } static void uart_msp432p4xx_irq_tx_enable(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; MAP_UART_enableInterrupt(config->base, EUSCI_A_UART_TRANSMIT_INTERRUPT); } static void uart_msp432p4xx_irq_tx_disable(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; MAP_UART_disableInterrupt(config->base, EUSCI_A_UART_TRANSMIT_INTERRUPT); } static int uart_msp432p4xx_irq_tx_ready(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; unsigned int int_status; int_status = MAP_UART_getInterruptStatus( config->base, EUSCI_A_UART_TRANSMIT_INTERRUPT_FLAG); return (int_status & EUSCI_A_IE_TXIE); } static void uart_msp432p4xx_irq_rx_enable(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; MAP_UART_enableInterrupt(config->base, EUSCI_A_UART_RECEIVE_INTERRUPT); } static void uart_msp432p4xx_irq_rx_disable(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; MAP_UART_disableInterrupt(config->base, EUSCI_A_UART_RECEIVE_INTERRUPT); } static int uart_msp432p4xx_irq_tx_complete(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; return MAP_UART_getInterruptStatus( config->base, EUSCI_A_UART_TRANSMIT_COMPLETE_INTERRUPT_FLAG); } static int uart_msp432p4xx_irq_rx_ready(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; unsigned int int_status; int_status = MAP_UART_getInterruptStatus( config->base, EUSCI_A_UART_RECEIVE_INTERRUPT_FLAG); return (int_status & EUSCI_A_IE_RXIE); } static void uart_msp432p4xx_irq_err_enable(const struct device *dev) { /* Not yet used in zephyr */ } static void uart_msp432p4xx_irq_err_disable(const struct device *dev) { /* Not yet used in zephyr */ } static int uart_msp432p4xx_irq_is_pending(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; unsigned int int_status; int_status = MAP_UART_getEnabledInterruptStatus(config->base); return (int_status & (EUSCI_A_IE_TXIE | EUSCI_A_IE_RXIE)); } static int uart_msp432p4xx_irq_update(const struct device *dev) { return 1; } static void uart_msp432p4xx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_msp432p4xx_dev_data_t * const dev_data = dev->data; dev_data->cb = cb; dev_data->cb_data = cb_data; } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ static void uart_msp432p4xx_isr(const struct device *dev) { const struct uart_msp432p4xx_config *config = dev->config; struct uart_msp432p4xx_dev_data_t * const dev_data = dev->data; unsigned int int_status; int_status = MAP_UART_getEnabledInterruptStatus(config->base); if (dev_data->cb) { dev_data->cb(dev, dev_data->cb_data); } /* * Clear interrupts only after cb called, as Zephyr UART clients expect * to check interrupt status during the callback. */ MAP_UART_disableInterrupt(config->base, int_status); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_msp432p4xx_driver_api = { .poll_in = uart_msp432p4xx_poll_in, .poll_out = uart_msp432p4xx_poll_out, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_msp432p4xx_fifo_fill, .fifo_read = uart_msp432p4xx_fifo_read, .irq_tx_enable = uart_msp432p4xx_irq_tx_enable, .irq_tx_disable = uart_msp432p4xx_irq_tx_disable, .irq_tx_ready = uart_msp432p4xx_irq_tx_ready, .irq_rx_enable = uart_msp432p4xx_irq_rx_enable, .irq_rx_disable = uart_msp432p4xx_irq_rx_disable, .irq_tx_complete = uart_msp432p4xx_irq_tx_complete, .irq_rx_ready = uart_msp432p4xx_irq_rx_ready, .irq_err_enable = uart_msp432p4xx_irq_err_enable, .irq_err_disable = uart_msp432p4xx_irq_err_disable, .irq_is_pending = uart_msp432p4xx_irq_is_pending, .irq_update = uart_msp432p4xx_irq_update, .irq_callback_set = uart_msp432p4xx_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; DEVICE_DT_INST_DEFINE(0, uart_msp432p4xx_init, NULL, &uart_msp432p4xx_dev_data_0, &uart_msp432p4xx_dev_cfg_0, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, (void *)&uart_msp432p4xx_driver_api); ```
/content/code_sandbox/drivers/serial/uart_msp432p4xx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,706
```unknown config UART_RENESAS_RA bool "Renesas RA Series UART Driver" default y depends on DT_HAS_RENESAS_RA_UART_SCI_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable Renesas RA series UART driver. config UART_SCI_RA bool "Renesas RA SCI UART" default y depends on DT_HAS_RENESAS_RA_SCI_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select SERIAL_SUPPORT_ASYNC select USE_RA_FSP_SCI_UART select USE_RA_FSP_DTC if UART_ASYNC_API help Enable Renesas RA SCI UART Driver. if UART_SCI_RA config UART_RA_SCI_UART_FIFO_ENABLE bool "RA SCI UART FIFO usage enable" default y help Enable RA SCI FIFO endif ```
/content/code_sandbox/drivers/serial/Kconfig.renesas_ra
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
175
```unknown config UART_RZT2M bool "Renesas RZ/T2M UART Driver" default y depends on DT_HAS_RENESAS_RZT2M_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select PINCTRL help Enable Renesas RZ/T2M UART Driver. ```
/content/code_sandbox/drivers/serial/Kconfig.rzt2m
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
71
```c /* * an affiliate of Cypress Semiconductor Corporation * */ /** * @brief UART driver for Infineon CAT1 MCU family. * * Note: * - Uart ASYNC functionality is not implemented in current * version of Uart CAT1 driver. */ #define DT_DRV_COMPAT infineon_cat1_uart #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <cyhal_uart.h> #include <cyhal_utils_impl.h> #include <cyhal_scb_common.h> /* Data structure */ struct ifx_cat1_uart_data { cyhal_uart_t obj; /* UART CYHAL object */ struct uart_config cfg; cyhal_resource_inst_t hw_resource; cyhal_clock_t clock; #if CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t irq_cb; /* Interrupt Callback */ void *irq_cb_data; /* Interrupt Callback Arg */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; /* Device config structure */ struct ifx_cat1_uart_config { const struct pinctrl_dev_config *pcfg; CySCB_Type *reg_addr; struct uart_config dt_cfg; uint8_t irq_priority; }; /* Default Counter configuration structure */ static const cy_stc_scb_uart_config_t _cyhal_uart_default_config = { .uartMode = CY_SCB_UART_STANDARD, .enableMutliProcessorMode = false, .smartCardRetryOnNack = false, .irdaInvertRx = false, .irdaEnableLowPowerReceiver = false, .oversample = 12, .enableMsbFirst = false, .dataWidth = 8UL, .parity = CY_SCB_UART_PARITY_NONE, .stopBits = CY_SCB_UART_STOP_BITS_1, .enableInputFilter = false, .breakWidth = 11UL, .dropOnFrameError = false, .dropOnParityError = false, .receiverAddress = 0x0UL, .receiverAddressMask = 0x0UL, .acceptAddrInFifo = false, .enableCts = false, .ctsPolarity = CY_SCB_UART_ACTIVE_LOW, #if defined(COMPONENT_CAT1A) || defined(COMPONENT_CAT1B) .rtsRxFifoLevel = 20UL, #elif defined(COMPONENT_CAT2) .rtsRxFifoLevel = 3UL, #endif .rtsPolarity = CY_SCB_UART_ACTIVE_LOW, /* Level triggers when at least one element is in FIFO */ .rxFifoTriggerLevel = 0UL, .rxFifoIntEnableMask = 0x0UL, /* Level triggers when half-fifo is half empty */ .txFifoTriggerLevel = (CY_SCB_FIFO_SIZE / 2 - 1), .txFifoIntEnableMask = 0x0UL }; /* Helper API */ static cyhal_uart_parity_t _convert_uart_parity_z_to_cyhal(enum uart_config_parity parity) { cyhal_uart_parity_t cyhal_parity; switch (parity) { case UART_CFG_PARITY_NONE: cyhal_parity = CYHAL_UART_PARITY_NONE; break; case UART_CFG_PARITY_ODD: cyhal_parity = CYHAL_UART_PARITY_ODD; break; case UART_CFG_PARITY_EVEN: cyhal_parity = CYHAL_UART_PARITY_EVEN; break; default: cyhal_parity = CYHAL_UART_PARITY_NONE; } return cyhal_parity; } static uint32_t _convert_uart_stop_bits_z_to_cyhal(enum uart_config_stop_bits stop_bits) { uint32_t cyhal_stop_bits; switch (stop_bits) { case UART_CFG_STOP_BITS_1: cyhal_stop_bits = 1u; break; case UART_CFG_STOP_BITS_2: cyhal_stop_bits = 2u; break; default: cyhal_stop_bits = 1u; } return cyhal_stop_bits; } static uint32_t _convert_uart_data_bits_z_to_cyhal(enum uart_config_data_bits data_bits) { uint32_t cyhal_data_bits; switch (data_bits) { case UART_CFG_DATA_BITS_5: cyhal_data_bits = 1u; break; case UART_CFG_DATA_BITS_6: cyhal_data_bits = 6u; break; case UART_CFG_DATA_BITS_7: cyhal_data_bits = 7u; break; case UART_CFG_DATA_BITS_8: cyhal_data_bits = 8u; break; case UART_CFG_DATA_BITS_9: cyhal_data_bits = 9u; break; default: cyhal_data_bits = 1u; } return cyhal_data_bits; } static int32_t _get_hw_block_num(CySCB_Type *reg_addr) { uint32_t i; for (i = 0u; i < _SCB_ARRAY_SIZE; i++) { if (_CYHAL_SCB_BASE_ADDRESSES[i] == reg_addr) { return i; } } return -1; } static int ifx_cat1_uart_poll_in(const struct device *dev, unsigned char *c) { cy_rslt_t rec; struct ifx_cat1_uart_data *data = dev->data; rec = cyhal_uart_getc(&data->obj, c, 0u); return ((rec == CY_SCB_UART_RX_NO_DATA) ? -1 : 0); } static void ifx_cat1_uart_poll_out(const struct device *dev, unsigned char c) { struct ifx_cat1_uart_data *data = dev->data; (void) cyhal_uart_putc(&data->obj, (uint32_t)c); } static int ifx_cat1_uart_err_check(const struct device *dev) { struct ifx_cat1_uart_data *data = dev->data; uint32_t status = Cy_SCB_UART_GetRxFifoStatus(data->obj.base); int errors = 0; if (status & CY_SCB_UART_RX_OVERFLOW) { errors |= UART_ERROR_OVERRUN; } if (status & CY_SCB_UART_RX_ERR_PARITY) { errors |= UART_ERROR_PARITY; } if (status & CY_SCB_UART_RX_ERR_FRAME) { errors |= UART_ERROR_FRAMING; } return errors; } static int ifx_cat1_uart_configure(const struct device *dev, const struct uart_config *cfg) { __ASSERT_NO_MSG(cfg != NULL); cy_rslt_t result; struct ifx_cat1_uart_data *data = dev->data; cyhal_uart_cfg_t uart_cfg = { .data_bits = _convert_uart_data_bits_z_to_cyhal(cfg->data_bits), .stop_bits = _convert_uart_stop_bits_z_to_cyhal(cfg->stop_bits), .parity = _convert_uart_parity_z_to_cyhal(cfg->parity) }; /* Store Uart Zephyr configuration (uart config) into data structure */ data->cfg = *cfg; /* Configure parity, data and stop bits */ result = cyhal_uart_configure(&data->obj, &uart_cfg); /* Configure the baud rate */ if (result == CY_RSLT_SUCCESS) { result = cyhal_uart_set_baud(&data->obj, cfg->baudrate, NULL); } /* Set RTS/CTS flow control pins as NC so cyhal will skip initialization */ data->obj.pin_cts = NC; data->obj.pin_rts = NC; /* Enable RTS/CTS flow control */ if ((result == CY_RSLT_SUCCESS) && cfg->flow_ctrl) { Cy_SCB_UART_EnableCts(data->obj.base); } return (result == CY_RSLT_SUCCESS) ? 0 : -ENOTSUP; }; static int ifx_cat1_uart_config_get(const struct device *dev, struct uart_config *cfg) { ARG_UNUSED(dev); struct ifx_cat1_uart_data *const data = dev->data; if (cfg == NULL) { return -EINVAL; } *cfg = data->cfg; return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* Uart event callback for Interrupt driven mode */ static void _uart_event_callback_irq_mode(void *arg, cyhal_uart_event_t event) { ARG_UNUSED(event); const struct device *dev = (const struct device *) arg; struct ifx_cat1_uart_data *const data = dev->data; if (data->irq_cb != NULL) { data->irq_cb(dev, data->irq_cb_data); } } /* Fill FIFO with data */ static int ifx_cat1_uart_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { struct ifx_cat1_uart_data *const data = dev->data; size_t _size = (size_t) size; (void)cyhal_uart_write(&data->obj, (uint8_t *) tx_data, &_size); return (int) _size; } /* Read data from FIFO */ static int ifx_cat1_uart_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { struct ifx_cat1_uart_data *const data = dev->data; size_t _size = (size_t) size; (void)cyhal_uart_read(&data->obj, rx_data, &_size); return (int) _size; } /* Enable TX interrupt */ static void ifx_cat1_uart_irq_tx_enable(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; const struct ifx_cat1_uart_config *const config = dev->config; cyhal_uart_enable_event(&data->obj, (cyhal_uart_event_t) CYHAL_UART_IRQ_TX_EMPTY, config->irq_priority, 1); } /* Disable TX interrupt */ static void ifx_cat1_uart_irq_tx_disable(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; const struct ifx_cat1_uart_config *const config = dev->config; cyhal_uart_enable_event(&data->obj, (cyhal_uart_event_t) CYHAL_UART_IRQ_TX_EMPTY, config->irq_priority, 0); } /* Check if UART TX buffer can accept a new char */ static int ifx_cat1_uart_irq_tx_ready(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; uint32_t mask = Cy_SCB_GetTxInterruptStatusMasked(data->obj.base); return (((mask & (CY_SCB_UART_TX_NOT_FULL | SCB_INTR_TX_EMPTY_Msk)) != 0u) ? 1 : 0); } /* Check if UART TX block finished transmission */ static int ifx_cat1_uart_irq_tx_complete(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; return (int) !(cyhal_uart_is_tx_active(&data->obj)); } /* Enable RX interrupt */ static void ifx_cat1_uart_irq_rx_enable(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; const struct ifx_cat1_uart_config *const config = dev->config; cyhal_uart_enable_event(&data->obj, (cyhal_uart_event_t) CYHAL_UART_IRQ_RX_NOT_EMPTY, config->irq_priority, 1); } /* Disable TX interrupt */ static void ifx_cat1_uart_irq_rx_disable(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; const struct ifx_cat1_uart_config *const config = dev->config; cyhal_uart_enable_event(&data->obj, (cyhal_uart_event_t) CYHAL_UART_IRQ_RX_NOT_EMPTY, config->irq_priority, 0); } /* Check if UART RX buffer has a received char */ static int ifx_cat1_uart_irq_rx_ready(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; return cyhal_uart_readable(&data->obj) ? 1 : 0; } /* Enable Error interrupts */ static void ifx_cat1_uart_irq_err_enable(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; const struct ifx_cat1_uart_config *const config = dev->config; cyhal_uart_enable_event(&data->obj, (cyhal_uart_event_t) (CYHAL_UART_IRQ_TX_ERROR | CYHAL_UART_IRQ_RX_ERROR), config->irq_priority, 1); } /* Disable Error interrupts */ static void ifx_cat1_uart_irq_err_disable(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; const struct ifx_cat1_uart_config *const config = dev->config; cyhal_uart_enable_event(&data->obj, (cyhal_uart_event_t) (CYHAL_UART_IRQ_TX_ERROR | CYHAL_UART_IRQ_RX_ERROR), config->irq_priority, 0); } /* Check if any IRQs is pending */ static int ifx_cat1_uart_irq_is_pending(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; uint32_t intcause = Cy_SCB_GetInterruptCause(data->obj.base); return (int) (intcause & (CY_SCB_TX_INTR | CY_SCB_RX_INTR)); } /* Start processing interrupts in ISR. * This function should be called the first thing in the ISR. Calling * uart_irq_rx_ready(), uart_irq_tx_ready(), uart_irq_tx_complete() * allowed only after this. */ static int ifx_cat1_uart_irq_update(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; int status = 1; if (((ifx_cat1_uart_irq_is_pending(dev) & CY_SCB_RX_INTR) != 0u) && (Cy_SCB_UART_GetNumInRxFifo(data->obj.base) == 0u)) { status = 0; } return status; } static void ifx_cat1_uart_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct ifx_cat1_uart_data *data = dev->data; cyhal_uart_t *uart_obj = &data->obj; /* Store user callback info */ data->irq_cb = cb; data->irq_cb_data = cb_data; /* Register a uart general callback handler */ cyhal_uart_register_callback(uart_obj, _uart_event_callback_irq_mode, (void *) dev); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int ifx_cat1_uart_init(const struct device *dev) { struct ifx_cat1_uart_data *const data = dev->data; const struct ifx_cat1_uart_config *const config = dev->config; cy_rslt_t result; int ret; cyhal_uart_configurator_t uart_init_cfg = { .resource = &data->hw_resource, .config = &_cyhal_uart_default_config, .clock = &data->clock, .gpios = { .pin_tx = NC, .pin_rts = NC, .pin_cts = NC, }, }; /* Dedicate SCB HW resource */ data->hw_resource.type = CYHAL_RSC_SCB; data->hw_resource.block_num = _get_hw_block_num(config->reg_addr); /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Allocates clock for selected IP block */ result = _cyhal_utils_allocate_clock(&data->clock, &data->hw_resource, CYHAL_CLOCK_BLOCK_PERIPHERAL_16BIT, true); if (result != CY_RSLT_SUCCESS) { return -ENOTSUP; } /* Assigns a programmable divider to a selected IP block */ en_clk_dst_t clk_idx = _cyhal_scb_get_clock_index(uart_init_cfg.resource->block_num); result = _cyhal_utils_peri_pclk_assign_divider(clk_idx, uart_init_cfg.clock); if (result != CY_RSLT_SUCCESS) { return -ENOTSUP; } /* Initialize the UART peripheral */ result = cyhal_uart_init_cfg(&data->obj, &uart_init_cfg); if (result != CY_RSLT_SUCCESS) { return -ENOTSUP; } /* Perform initial Uart configuration */ data->obj.is_clock_owned = true; ret = ifx_cat1_uart_configure(dev, &config->dt_cfg); return ret; } static const struct uart_driver_api ifx_cat1_uart_driver_api = { .poll_in = ifx_cat1_uart_poll_in, .poll_out = ifx_cat1_uart_poll_out, .err_check = ifx_cat1_uart_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = ifx_cat1_uart_configure, .config_get = ifx_cat1_uart_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = ifx_cat1_uart_fifo_fill, .fifo_read = ifx_cat1_uart_fifo_read, .irq_tx_enable = ifx_cat1_uart_irq_tx_enable, .irq_tx_disable = ifx_cat1_uart_irq_tx_disable, .irq_tx_ready = ifx_cat1_uart_irq_tx_ready, .irq_rx_enable = ifx_cat1_uart_irq_rx_enable, .irq_rx_disable = ifx_cat1_uart_irq_rx_disable, .irq_tx_complete = ifx_cat1_uart_irq_tx_complete, .irq_rx_ready = ifx_cat1_uart_irq_rx_ready, .irq_err_enable = ifx_cat1_uart_irq_err_enable, .irq_err_disable = ifx_cat1_uart_irq_err_disable, .irq_is_pending = ifx_cat1_uart_irq_is_pending, .irq_update = ifx_cat1_uart_irq_update, .irq_callback_set = ifx_cat1_uart_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define INFINEON_CAT1_UART_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static struct ifx_cat1_uart_data ifx_cat1_uart##n##_data; \ \ static struct ifx_cat1_uart_config ifx_cat1_uart##n##_cfg = { \ .dt_cfg.baudrate = DT_INST_PROP(n, current_speed), \ .dt_cfg.parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \ .dt_cfg.stop_bits = DT_INST_ENUM_IDX_OR(n, stop_bits, UART_CFG_STOP_BITS_1), \ .dt_cfg.data_bits = DT_INST_ENUM_IDX_OR(n, data_bits, UART_CFG_DATA_BITS_8), \ .dt_cfg.flow_ctrl = DT_INST_PROP(n, hw_flow_control), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .reg_addr = (CySCB_Type *)DT_INST_REG_ADDR(n), \ .irq_priority = DT_INST_IRQ(n, priority) \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ ifx_cat1_uart_init, NULL, \ &ifx_cat1_uart##n##_data, \ &ifx_cat1_uart##n##_cfg, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &ifx_cat1_uart_driver_api); DT_INST_FOREACH_STATUS_OKAY(INFINEON_CAT1_UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_ifx_cat1.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,208
```c /* * Author: Parthiban Nallathambi <parthiban@linumiz.com> * */ #define DT_DRV_COMPAT infineon_xmc4xxx_uart #include <xmc_uart.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/uart.h> #include <zephyr/sys/util.h> #include <zephyr/irq.h> #define MAX_FIFO_SIZE 64 #define USIC_IRQ_MIN 84 #define USIC_IRQ_MAX 101 #define IRQS_PER_USIC 6 #define CURRENT_BUFFER 0 #define NEXT_BUFFER 1 struct uart_xmc4xxx_config { XMC_USIC_CH_t *uart; const struct pinctrl_dev_config *pcfg; uint8_t input_src; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) uart_irq_config_func_t irq_config_func; uint8_t irq_num_tx; uint8_t irq_num_rx; #endif uint8_t fifo_start_offset; uint8_t fifo_tx_size; uint8_t fifo_rx_size; }; #ifdef CONFIG_UART_ASYNC_API struct uart_dma_stream { const struct device *dma_dev; uint32_t dma_channel; struct dma_config dma_cfg; struct dma_block_config blk_cfg; uint8_t *buffer; size_t buffer_len; size_t offset; size_t counter; int32_t timeout; struct k_work_delayable timeout_work; }; #endif struct uart_xmc4xxx_data { XMC_UART_CH_CONFIG_t config; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) uart_irq_callback_user_data_t user_cb; void *user_data; #endif #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) uint8_t service_request_tx; uint8_t service_request_rx; #endif #if defined(CONFIG_UART_ASYNC_API) const struct device *dev; uart_callback_t async_cb; void *async_user_data; struct uart_dma_stream dma_rx; struct uart_dma_stream dma_tx; uint8_t *rx_next_buffer; size_t rx_next_buffer_len; #endif }; static int uart_xmc4xxx_poll_in(const struct device *dev, unsigned char *c) { const struct uart_xmc4xxx_config *config = dev->config; bool fifo_empty; if (config->fifo_rx_size > 0) { fifo_empty = XMC_USIC_CH_RXFIFO_IsEmpty(config->uart); } else { fifo_empty = !XMC_USIC_CH_GetReceiveBufferStatus(config->uart); } if (fifo_empty) { return -1; } *c = (unsigned char)XMC_UART_CH_GetReceivedData(config->uart); return 0; } static void uart_xmc4xxx_poll_out(const struct device *dev, unsigned char c) { const struct uart_xmc4xxx_config *config = dev->config; /* XMC_UART_CH_Transmit() only blocks for UART to finish transmitting */ /* when fifo is not used */ while (config->fifo_tx_size > 0 && XMC_USIC_CH_TXFIFO_IsFull(config->uart)) { } XMC_UART_CH_Transmit(config->uart, c); } #if defined(CONFIG_UART_ASYNC_API) static inline void async_timer_start(struct k_work_delayable *work, int32_t timeout) { if ((timeout != SYS_FOREVER_US) && (timeout != 0)) { k_work_reschedule(work, K_USEC(timeout)); } } static void disable_tx_events(const struct uart_xmc4xxx_config *config) { if (config->fifo_tx_size > 0) { XMC_USIC_CH_TXFIFO_DisableEvent(config->uart, XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD); } else { XMC_USIC_CH_DisableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_BUFFER); } } #endif #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) static void enable_tx_events(const struct uart_xmc4xxx_config *config) { if (config->fifo_tx_size > 0) { /* wait till the fifo has at least 1 byte free */ while (XMC_USIC_CH_TXFIFO_IsFull(config->uart)) { } XMC_USIC_CH_TXFIFO_EnableEvent(config->uart, XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD); } else { XMC_USIC_CH_EnableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_BUFFER); } } #define NVIC_ICPR_BASE 0xe000e280u static void clear_pending_interrupt(int irq_num) { uint32_t *clearpend = (uint32_t *)(NVIC_ICPR_BASE) + irq_num / 32; irq_num = irq_num & 0x1f; /* writing zero has not effect, i.e. we only clear irq_num */ *clearpend = BIT(irq_num); } static void uart_xmc4xxx_isr(void *arg) { const struct device *dev = arg; struct uart_xmc4xxx_data *data = dev->data; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) if (data->user_cb) { data->user_cb(dev, data->user_data); } #endif #if defined(CONFIG_UART_ASYNC_API) const struct uart_xmc4xxx_config *config = dev->config; unsigned int key = irq_lock(); if (data->dma_rx.buffer_len) { /* We only need to trigger this irq once to start timer */ /* event. Everything else is handled by the timer callback and dma_rx_callback. */ /* Note that we can't simply disable the event that triggers this irq, since the */ /* same service_request gets routed to the dma. Thus we disable the nvic irq */ /* below. Any pending irq must be cleared before irq_enable() is called. */ irq_disable(config->irq_num_rx); async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout); } irq_unlock(key); #endif } static void uart_xmc4xxx_configure_service_requests(const struct device *dev) { struct uart_xmc4xxx_data *data = dev->data; const struct uart_xmc4xxx_config *config = dev->config; __ASSERT(config->irq_num_tx >= USIC_IRQ_MIN && config->irq_num_tx <= USIC_IRQ_MAX, "Invalid irq number\n"); data->service_request_tx = (config->irq_num_tx - USIC_IRQ_MIN) % IRQS_PER_USIC; if (config->fifo_tx_size > 0) { XMC_USIC_CH_TXFIFO_SetInterruptNodePointer( config->uart, XMC_USIC_CH_TXFIFO_INTERRUPT_NODE_POINTER_STANDARD, data->service_request_tx); } else { XMC_USIC_CH_SetInterruptNodePointer( config->uart, XMC_USIC_CH_INTERRUPT_NODE_POINTER_TRANSMIT_BUFFER, data->service_request_tx); } __ASSERT(config->irq_num_rx >= USIC_IRQ_MIN && config->irq_num_rx <= USIC_IRQ_MAX, "Invalid irq number\n"); data->service_request_rx = (config->irq_num_rx - USIC_IRQ_MIN) % IRQS_PER_USIC; if (config->fifo_rx_size > 0) { XMC_USIC_CH_RXFIFO_SetInterruptNodePointer( config->uart, XMC_USIC_CH_RXFIFO_INTERRUPT_NODE_POINTER_STANDARD, data->service_request_rx); XMC_USIC_CH_RXFIFO_SetInterruptNodePointer( config->uart, XMC_USIC_CH_RXFIFO_INTERRUPT_NODE_POINTER_ALTERNATE, data->service_request_rx); } else { XMC_USIC_CH_SetInterruptNodePointer(config->uart, XMC_USIC_CH_INTERRUPT_NODE_POINTER_RECEIVE, data->service_request_rx); XMC_USIC_CH_SetInterruptNodePointer( config->uart, XMC_USIC_CH_INTERRUPT_NODE_POINTER_ALTERNATE_RECEIVE, data->service_request_rx); } } static int uart_xmc4xxx_irq_tx_ready(const struct device *dev) { const struct uart_xmc4xxx_config *config = dev->config; if (config->fifo_tx_size > 0) { return !XMC_USIC_CH_TXFIFO_IsFull(config->uart); } else { return XMC_USIC_CH_GetTransmitBufferStatus(config->uart) == XMC_USIC_CH_TBUF_STATUS_IDLE; } } static void uart_xmc4xxx_irq_rx_disable(const struct device *dev) { const struct uart_xmc4xxx_config *config = dev->config; if (config->fifo_rx_size > 0) { XMC_USIC_CH_RXFIFO_DisableEvent(config->uart, XMC_USIC_CH_RXFIFO_EVENT_CONF_STANDARD | XMC_USIC_CH_RXFIFO_EVENT_CONF_ALTERNATE); } else { XMC_USIC_CH_DisableEvent(config->uart, XMC_USIC_CH_EVENT_STANDARD_RECEIVE | XMC_USIC_CH_EVENT_ALTERNATIVE_RECEIVE); } } static void uart_xmc4xxx_irq_rx_enable(const struct device *dev) { const struct uart_xmc4xxx_config *config = dev->config; uint32_t recv_status; /* re-enable the IRQ as it may have been disabled during async_rx */ clear_pending_interrupt(config->irq_num_rx); irq_enable(config->irq_num_rx); if (config->fifo_rx_size > 0) { XMC_USIC_CH_RXFIFO_Flush(config->uart); XMC_USIC_CH_RXFIFO_SetSizeTriggerLimit(config->uart, config->fifo_rx_size, 0); #if CONFIG_UART_XMC4XXX_RX_FIFO_INT_TRIGGER config->uart->RBCTR |= BIT(USIC_CH_RBCTR_SRBTEN_Pos); #endif XMC_USIC_CH_RXFIFO_EnableEvent(config->uart, XMC_USIC_CH_RXFIFO_EVENT_CONF_STANDARD | XMC_USIC_CH_RXFIFO_EVENT_CONF_ALTERNATE); } else { /* flush out any received bytes while the uart rx irq was disabled */ recv_status = XMC_USIC_CH_GetReceiveBufferStatus(config->uart); if (recv_status & USIC_CH_RBUFSR_RDV0_Msk) { XMC_UART_CH_GetReceivedData(config->uart); } if (recv_status & USIC_CH_RBUFSR_RDV1_Msk) { XMC_UART_CH_GetReceivedData(config->uart); } XMC_USIC_CH_EnableEvent(config->uart, XMC_USIC_CH_EVENT_STANDARD_RECEIVE | XMC_USIC_CH_EVENT_ALTERNATIVE_RECEIVE); } } #endif #if defined(CONFIG_UART_INTERRUPT_DRIVEN) static int uart_xmc4xxx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct uart_xmc4xxx_config *config = dev->config; int i = 0; for (i = 0; i < len; i++) { bool fifo_full; XMC_UART_CH_Transmit(config->uart, tx_data[i]); if (config->fifo_tx_size == 0) { return 1; } fifo_full = XMC_USIC_CH_TXFIFO_IsFull(config->uart); if (fifo_full) { return i + 1; } } return i; } static int uart_xmc4xxx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_xmc4xxx_config *config = dev->config; int i; for (i = 0; i < size; i++) { bool fifo_empty; if (config->fifo_rx_size > 0) { fifo_empty = XMC_USIC_CH_RXFIFO_IsEmpty(config->uart); } else { fifo_empty = !XMC_USIC_CH_GetReceiveBufferStatus(config->uart); } if (fifo_empty) { break; } rx_data[i] = XMC_UART_CH_GetReceivedData(config->uart); } return i; } static void uart_xmc4xxx_irq_tx_enable(const struct device *dev) { const struct uart_xmc4xxx_config *config = dev->config; const struct uart_xmc4xxx_data *data = dev->data; clear_pending_interrupt(config->irq_num_tx); irq_enable(config->irq_num_tx); enable_tx_events(config); XMC_USIC_CH_TriggerServiceRequest(config->uart, data->service_request_tx); } static void uart_xmc4xxx_irq_tx_disable(const struct device *dev) { const struct uart_xmc4xxx_config *config = dev->config; if (config->fifo_tx_size > 0) { XMC_USIC_CH_TXFIFO_DisableEvent(config->uart, XMC_USIC_CH_TXFIFO_EVENT_CONF_STANDARD); } else { XMC_USIC_CH_DisableEvent(config->uart, XMC_USIC_CH_EVENT_TRANSMIT_BUFFER); } } static int uart_xmc4xxx_irq_rx_ready(const struct device *dev) { const struct uart_xmc4xxx_config *config = dev->config; if (config->fifo_rx_size > 0) { return !XMC_USIC_CH_RXFIFO_IsEmpty(config->uart); } else { return XMC_USIC_CH_GetReceiveBufferStatus(config->uart); } } static void uart_xmc4xxx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *user_data) { struct uart_xmc4xxx_data *data = dev->data; data->user_cb = cb; data->user_data = user_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->async_cb = NULL; data->async_user_data = NULL; #endif } #define NVIC_ISPR_BASE 0xe000e200u static int uart_xmc4xxx_irq_is_pending(const struct device *dev) { const struct uart_xmc4xxx_config *config = dev->config; uint32_t irq_num_tx = config->irq_num_tx; uint32_t irq_num_rx = config->irq_num_rx; bool tx_pending; bool rx_pending; uint32_t setpend; /* the NVIC_ISPR_BASE address stores info which interrupts are pending */ /* bit 0 -> irq 0, bit 1 -> irq 1,... */ setpend = *((uint32_t *)(NVIC_ISPR_BASE) + irq_num_tx / 32); irq_num_tx = irq_num_tx & 0x1f; /* take modulo 32 */ tx_pending = setpend & BIT(irq_num_tx); setpend = *((uint32_t *)(NVIC_ISPR_BASE) + irq_num_rx / 32); irq_num_rx = irq_num_rx & 0x1f; /* take modulo 32 */ rx_pending = setpend & BIT(irq_num_rx); return tx_pending || rx_pending; } #endif #if defined(CONFIG_UART_ASYNC_API) static inline void async_evt_rx_buf_request(struct uart_xmc4xxx_data *data) { struct uart_event evt = {.type = UART_RX_BUF_REQUEST}; if (data->async_cb) { data->async_cb(data->dev, &evt, data->async_user_data); } } static inline void async_evt_rx_release_buffer(struct uart_xmc4xxx_data *data, int buffer_type) { struct uart_event event = {.type = UART_RX_BUF_RELEASED}; if (buffer_type == NEXT_BUFFER && !data->rx_next_buffer) { return; } if (buffer_type == CURRENT_BUFFER && !data->dma_rx.buffer) { return; } if (buffer_type == NEXT_BUFFER) { event.data.rx_buf.buf = data->rx_next_buffer; data->rx_next_buffer = NULL; data->rx_next_buffer_len = 0; } else { event.data.rx_buf.buf = data->dma_rx.buffer; data->dma_rx.buffer = NULL; data->dma_rx.buffer_len = 0; } if (data->async_cb) { data->async_cb(data->dev, &event, data->async_user_data); } } static inline void async_evt_rx_disabled(struct uart_xmc4xxx_data *data) { struct uart_event event = {.type = UART_RX_DISABLED}; data->dma_rx.buffer = NULL; data->dma_rx.buffer_len = 0; data->dma_rx.offset = 0; data->dma_rx.counter = 0; if (data->async_cb) { data->async_cb(data->dev, &event, data->async_user_data); } } static inline void async_evt_rx_rdy(struct uart_xmc4xxx_data *data) { struct uart_event event = {.type = UART_RX_RDY, .data.rx.buf = (uint8_t *)data->dma_rx.buffer, .data.rx.len = data->dma_rx.counter - data->dma_rx.offset, .data.rx.offset = data->dma_rx.offset}; data->dma_rx.offset = data->dma_rx.counter; if (event.data.rx.len > 0 && data->async_cb) { data->async_cb(data->dev, &event, data->async_user_data); } } static inline void async_evt_tx_done(struct uart_xmc4xxx_data *data) { struct uart_event event = {.type = UART_TX_DONE, .data.tx.buf = data->dma_tx.buffer, .data.tx.len = data->dma_tx.counter}; data->dma_tx.buffer = NULL; data->dma_tx.buffer_len = 0; data->dma_tx.counter = 0; if (data->async_cb) { data->async_cb(data->dev, &event, data->async_user_data); } } static inline void async_evt_tx_abort(struct uart_xmc4xxx_data *data) { struct uart_event event = {.type = UART_TX_ABORTED, .data.tx.buf = data->dma_tx.buffer, .data.tx.len = data->dma_tx.counter}; data->dma_tx.buffer = NULL; data->dma_tx.buffer_len = 0; data->dma_tx.counter = 0; if (data->async_cb) { data->async_cb(data->dev, &event, data->async_user_data); } } static void uart_xmc4xxx_async_rx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_dma_stream *rx_stream = CONTAINER_OF(dwork, struct uart_dma_stream, timeout_work); struct uart_xmc4xxx_data *data = CONTAINER_OF(rx_stream, struct uart_xmc4xxx_data, dma_rx); struct dma_status stat; unsigned int key = irq_lock(); if (data->dma_rx.buffer_len == 0) { irq_unlock(key); return; } if (dma_get_status(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &stat) == 0) { size_t rx_rcv_len = data->dma_rx.buffer_len - stat.pending_length; if (rx_rcv_len > data->dma_rx.offset) { data->dma_rx.counter = rx_rcv_len; async_evt_rx_rdy(data); } } irq_unlock(key); async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout); } static int uart_xmc4xxx_async_tx_abort(const struct device *dev) { struct uart_xmc4xxx_data *data = dev->data; struct dma_status stat; size_t tx_buffer_len; unsigned int key = irq_lock(); k_work_cancel_delayable(&data->dma_tx.timeout_work); tx_buffer_len = data->dma_tx.buffer_len; if (tx_buffer_len == 0) { irq_unlock(key); return -EINVAL; } if (!dma_get_status(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &stat)) { data->dma_tx.counter = tx_buffer_len - stat.pending_length; } dma_stop(data->dma_tx.dma_dev, data->dma_tx.dma_channel); disable_tx_events(dev->config); async_evt_tx_abort(data); irq_unlock(key); return 0; } static void uart_xmc4xxx_async_tx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_dma_stream *tx_stream = CONTAINER_OF(dwork, struct uart_dma_stream, timeout_work); struct uart_xmc4xxx_data *data = CONTAINER_OF(tx_stream, struct uart_xmc4xxx_data, dma_tx); uart_xmc4xxx_async_tx_abort(data->dev); } static int uart_xmc4xxx_async_init(const struct device *dev) { const struct uart_xmc4xxx_config *config = dev->config; struct uart_xmc4xxx_data *data = dev->data; data->dev = dev; if (data->dma_rx.dma_dev != NULL) { if (!device_is_ready(data->dma_rx.dma_dev)) { return -ENODEV; } k_work_init_delayable(&data->dma_rx.timeout_work, uart_xmc4xxx_async_rx_timeout); if (config->fifo_rx_size > 0) { data->dma_rx.blk_cfg.source_address = (uint32_t)&config->uart->OUTR; } else { data->dma_rx.blk_cfg.source_address = (uint32_t)&config->uart->RBUF; } data->dma_rx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; data->dma_rx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; data->dma_rx.dma_cfg.head_block = &data->dma_rx.blk_cfg; data->dma_rx.dma_cfg.user_data = (void *)dev; } if (data->dma_tx.dma_dev != NULL) { if (!device_is_ready(data->dma_tx.dma_dev)) { return -ENODEV; } k_work_init_delayable(&data->dma_tx.timeout_work, uart_xmc4xxx_async_tx_timeout); if (config->fifo_tx_size > 0) { data->dma_tx.blk_cfg.dest_address = (uint32_t)&config->uart->IN[0]; } else { data->dma_tx.blk_cfg.dest_address = (uint32_t)&config->uart->TBUF[0]; } data->dma_tx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; data->dma_tx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; data->dma_tx.dma_cfg.head_block = &data->dma_tx.blk_cfg; data->dma_tx.dma_cfg.user_data = (void *)dev; } return 0; } static int uart_xmc4xxx_async_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct uart_xmc4xxx_data *data = dev->data; data->async_cb = callback; data->async_user_data = user_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->user_cb = NULL; data->user_data = NULL; #endif return 0; } static int uart_xmc4xxx_async_tx(const struct device *dev, const uint8_t *tx_data, size_t buf_size, int32_t timeout) { struct uart_xmc4xxx_data *data = dev->data; const struct uart_xmc4xxx_config *config = dev->config; int ret; /* Assume threads are pre-emptive so this call cannot be interrupted */ /* by uart_xmc4xxx_async_tx_abort */ if (data->dma_tx.dma_dev == NULL) { return -ENODEV; } if (tx_data == NULL || buf_size == 0) { return -EINVAL; } /* No need to lock irq. Isr uart_xmc4xxx_dma_tx_cb() will only trigger if */ /* dma_tx.buffer_len != 0 */ if (data->dma_tx.buffer_len != 0) { return -EBUSY; } data->dma_tx.buffer = (uint8_t *)tx_data; data->dma_tx.buffer_len = buf_size; data->dma_tx.timeout = timeout; /* set source address */ data->dma_tx.blk_cfg.source_address = (uint32_t)data->dma_tx.buffer; data->dma_tx.blk_cfg.block_size = data->dma_tx.buffer_len; ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &data->dma_tx.dma_cfg); if (ret < 0) { return ret; } /* make sure the tx is not transmitting */ while (!uart_xmc4xxx_irq_tx_ready(dev)) { }; /* Tx irq is not used in async mode so disable it */ irq_disable(config->irq_num_tx); enable_tx_events(config); XMC_USIC_CH_TriggerServiceRequest(config->uart, data->service_request_tx); async_timer_start(&data->dma_tx.timeout_work, data->dma_tx.timeout); return dma_start(data->dma_tx.dma_dev, data->dma_tx.dma_channel); } static int uart_xmc4xxx_async_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { struct uart_xmc4xxx_data *data = dev->data; int ret; if (data->dma_rx.dma_dev == NULL) { return -ENODEV; } if (data->dma_rx.buffer_len != 0) { return -EBUSY; } uart_xmc4xxx_irq_rx_disable(dev); data->dma_rx.buffer = buf; data->dma_rx.buffer_len = len; data->dma_rx.timeout = timeout; data->dma_rx.blk_cfg.dest_address = (uint32_t)data->dma_rx.buffer; data->dma_rx.blk_cfg.block_size = data->dma_rx.buffer_len; ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &data->dma_rx.dma_cfg); if (ret < 0) { return ret; } /* Request buffers before enabling rx. It's unlikely, but we may not */ /* request a new buffer in time (for example if receive buffer size is one byte). */ async_evt_rx_buf_request(data); uart_xmc4xxx_irq_rx_enable(dev); return dma_start(data->dma_rx.dma_dev, data->dma_rx.dma_channel); } static void uart_xmc4xxx_dma_rx_cb(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *dev_uart = user_data; struct uart_xmc4xxx_data *data = dev_uart->data; unsigned int key; int ret; if (status != 0) { return; } __ASSERT_NO_MSG(channel == data->dma_rx.dma_channel); key = irq_lock(); k_work_cancel_delayable(&data->dma_rx.timeout_work); if (data->dma_rx.buffer_len == 0) { goto done; } data->dma_rx.counter = data->dma_rx.buffer_len; async_evt_rx_rdy(data); async_evt_rx_release_buffer(data, CURRENT_BUFFER); if (!data->rx_next_buffer) { dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel); uart_xmc4xxx_irq_rx_disable(dev_uart); async_evt_rx_disabled(data); goto done; } data->dma_rx.buffer = data->rx_next_buffer; data->dma_rx.buffer_len = data->rx_next_buffer_len; data->dma_rx.offset = 0; data->dma_rx.counter = 0; data->rx_next_buffer = NULL; data->rx_next_buffer_len = 0; ret = dma_reload(data->dma_rx.dma_dev, data->dma_rx.dma_channel, data->dma_rx.blk_cfg.source_address, (uint32_t)data->dma_rx.buffer, data->dma_rx.buffer_len); if (ret < 0) { dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel); uart_xmc4xxx_irq_rx_disable(dev_uart); async_evt_rx_release_buffer(data, CURRENT_BUFFER); async_evt_rx_disabled(data); goto done; } dma_start(data->dma_rx.dma_dev, data->dma_rx.dma_channel); async_evt_rx_buf_request(data); async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout); done: irq_unlock(key); } static int uart_xmc4xxx_async_rx_disable(const struct device *dev) { struct uart_xmc4xxx_data *data = dev->data; struct dma_status stat; unsigned int key; k_work_cancel_delayable(&data->dma_rx.timeout_work); key = irq_lock(); if (data->dma_rx.buffer_len == 0) { __ASSERT_NO_MSG(data->dma_rx.buffer == NULL); irq_unlock(key); return -EINVAL; } dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel); uart_xmc4xxx_irq_rx_disable(dev); if (dma_get_status(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &stat) == 0) { size_t rx_rcv_len = data->dma_rx.buffer_len - stat.pending_length; if (rx_rcv_len > data->dma_rx.offset) { data->dma_rx.counter = rx_rcv_len; async_evt_rx_rdy(data); } } async_evt_rx_release_buffer(data, CURRENT_BUFFER); async_evt_rx_release_buffer(data, NEXT_BUFFER); async_evt_rx_disabled(data); irq_unlock(key); return 0; } static void uart_xmc4xxx_dma_tx_cb(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *dev_uart = user_data; struct uart_xmc4xxx_data *data = dev_uart->data; size_t tx_buffer_len = data->dma_tx.buffer_len; struct dma_status stat; if (status != 0) { return; } __ASSERT_NO_MSG(channel == data->dma_tx.dma_channel); k_work_cancel_delayable(&data->dma_tx.timeout_work); if (tx_buffer_len == 0) { return; } if (!dma_get_status(data->dma_tx.dma_dev, channel, &stat)) { data->dma_tx.counter = tx_buffer_len - stat.pending_length; } async_evt_tx_done(data); /* if the callback doesn't doesn't do a chained uart_tx write, then stop the dma */ if (data->dma_tx.buffer == NULL) { dma_stop(data->dma_tx.dma_dev, data->dma_tx.dma_channel); disable_tx_events(dev_uart->config); } } static int uart_xmc4xxx_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uart_xmc4xxx_data *data = dev->data; unsigned int key; int ret = 0; key = irq_lock(); if (data->dma_rx.buffer_len == 0U) { ret = -EACCES; goto done; } if (data->rx_next_buffer_len != 0U) { ret = -EBUSY; goto done; } data->rx_next_buffer = buf; data->rx_next_buffer_len = len; done: irq_unlock(key); return ret; } #endif static int uart_xmc4xxx_init(const struct device *dev) { int ret; const struct uart_xmc4xxx_config *config = dev->config; struct uart_xmc4xxx_data *data = dev->data; uint8_t fifo_offset = config->fifo_start_offset; data->config.data_bits = 8U; data->config.stop_bits = 1U; XMC_UART_CH_Init(config->uart, &(data->config)); if (config->fifo_tx_size > 0) { /* fifos need to be aligned on fifo size */ fifo_offset = ROUND_UP(fifo_offset, BIT(config->fifo_tx_size)); XMC_USIC_CH_TXFIFO_Configure(config->uart, fifo_offset, config->fifo_tx_size, 1); fifo_offset += BIT(config->fifo_tx_size); } if (config->fifo_rx_size > 0) { /* fifos need to be aligned on fifo size */ fifo_offset = ROUND_UP(fifo_offset, BIT(config->fifo_rx_size)); XMC_USIC_CH_RXFIFO_Configure(config->uart, fifo_offset, config->fifo_rx_size, 0); fifo_offset += BIT(config->fifo_rx_size); } if (fifo_offset > MAX_FIFO_SIZE) { return -EINVAL; } /* Connect UART RX to logical 1. It is connected to proper pin after pinctrl is applied */ XMC_UART_CH_SetInputSource(config->uart, XMC_UART_CH_INPUT_RXD, 0x7); /* Start the UART before pinctrl, because the USIC is driving the TX line */ /* low in off state */ XMC_UART_CH_Start(config->uart); ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Connect UART RX to the target pin */ XMC_UART_CH_SetInputSource(config->uart, XMC_UART_CH_INPUT_RXD, config->input_src); #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) config->irq_config_func(dev); uart_xmc4xxx_configure_service_requests(dev); #endif #if defined(CONFIG_UART_ASYNC_API) ret = uart_xmc4xxx_async_init(dev); #endif return ret; } static const struct uart_driver_api uart_xmc4xxx_driver_api = { .poll_in = uart_xmc4xxx_poll_in, .poll_out = uart_xmc4xxx_poll_out, #if defined(CONFIG_UART_INTERRUPT_DRIVEN) .fifo_fill = uart_xmc4xxx_fifo_fill, .fifo_read = uart_xmc4xxx_fifo_read, .irq_tx_enable = uart_xmc4xxx_irq_tx_enable, .irq_tx_disable = uart_xmc4xxx_irq_tx_disable, .irq_tx_ready = uart_xmc4xxx_irq_tx_ready, .irq_rx_enable = uart_xmc4xxx_irq_rx_enable, .irq_rx_disable = uart_xmc4xxx_irq_rx_disable, .irq_rx_ready = uart_xmc4xxx_irq_rx_ready, .irq_callback_set = uart_xmc4xxx_irq_callback_set, .irq_is_pending = uart_xmc4xxx_irq_is_pending, #endif #if defined(CONFIG_UART_ASYNC_API) .callback_set = uart_xmc4xxx_async_callback_set, .tx = uart_xmc4xxx_async_tx, .tx_abort = uart_xmc4xxx_async_tx_abort, .rx_enable = uart_xmc4xxx_async_rx_enable, .rx_buf_rsp = uart_xmc4xxx_rx_buf_rsp, .rx_disable = uart_xmc4xxx_async_rx_disable, #endif }; #ifdef CONFIG_UART_ASYNC_API #define UART_DMA_CHANNEL_INIT(index, dir, ch_dir, src_burst, dst_burst) \ .dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(index, dir)), \ .dma_channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \ .dma_cfg = { \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(index, dir, config), \ .channel_direction = ch_dir, \ .channel_priority = DT_INST_DMAS_CELL_BY_NAME(index, dir, priority), \ .source_data_size = 1, \ .dest_data_size = 1, \ .source_burst_length = src_burst, \ .dest_burst_length = dst_burst, \ .block_count = 1, \ .dma_callback = uart_xmc4xxx_dma_##dir##_cb, \ }, #define UART_DMA_CHANNEL(index, dir, ch_dir, src_burst, dst_burst) \ .dma_##dir = {COND_CODE_1( \ DT_INST_DMAS_HAS_NAME(index, dir), \ (UART_DMA_CHANNEL_INIT(index, dir, ch_dir, src_burst, dst_burst)), (NULL))}, #else #define UART_DMA_CHANNEL(index, dir, ch_dir, src_burst, dst_burst) #endif #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) #define XMC4XXX_IRQ_HANDLER(index) \ static void uart_xmc4xxx_irq_setup_##index(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, tx, irq), \ DT_INST_IRQ_BY_NAME(index, tx, priority), uart_xmc4xxx_isr, \ DEVICE_DT_INST_GET(index), 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, rx, irq), \ DT_INST_IRQ_BY_NAME(index, rx, priority), uart_xmc4xxx_isr, \ DEVICE_DT_INST_GET(index), 0); \ irq_enable(DT_INST_IRQ_BY_NAME(index, tx, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(index, rx, irq)); \ } #define XMC4XXX_IRQ_STRUCT_INIT(index) \ .irq_config_func = uart_xmc4xxx_irq_setup_##index, \ .irq_num_tx = DT_INST_IRQ_BY_NAME(index, tx, irq), \ .irq_num_rx = DT_INST_IRQ_BY_NAME(index, rx, irq), #else #define XMC4XXX_IRQ_HANDLER(index) #define XMC4XXX_IRQ_STRUCT_INIT(index) #endif #define XMC4XXX_INIT(index) \ PINCTRL_DT_INST_DEFINE(index); \ XMC4XXX_IRQ_HANDLER(index) \ static struct uart_xmc4xxx_data xmc4xxx_data_##index = { \ .config.baudrate = DT_INST_PROP(index, current_speed), \ UART_DMA_CHANNEL(index, tx, MEMORY_TO_PERIPHERAL, 8, 1) \ UART_DMA_CHANNEL(index, rx, PERIPHERAL_TO_MEMORY, 1, 8) \ }; \ \ static const struct uart_xmc4xxx_config xmc4xxx_config_##index = { \ .uart = (XMC_USIC_CH_t *)DT_INST_REG_ADDR(index), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \ .input_src = DT_INST_ENUM_IDX(index, input_src), \ XMC4XXX_IRQ_STRUCT_INIT(index) \ .fifo_start_offset = DT_INST_PROP(index, fifo_start_offset), \ .fifo_tx_size = DT_INST_ENUM_IDX(index, fifo_tx_size), \ .fifo_rx_size = DT_INST_ENUM_IDX(index, fifo_rx_size), \ }; \ \ DEVICE_DT_INST_DEFINE(index, uart_xmc4xxx_init, \ NULL, \ &xmc4xxx_data_##index, \ &xmc4xxx_config_##index, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_xmc4xxx_driver_api); DT_INST_FOREACH_STATUS_OKAY(XMC4XXX_INIT) ```
/content/code_sandbox/drivers/serial/uart_xmc4xxx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,444
```c /* */ #define DT_DRV_COMPAT gd_gd32_usart #include <errno.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/gd32.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/uart.h> #include <zephyr/irq.h> #include <gd32_usart.h> /* Unify GD32 HAL USART status register name to USART_STAT */ #ifndef USART_STAT #define USART_STAT USART_STAT0 #endif struct gd32_usart_config { uint32_t reg; uint16_t clkid; struct reset_dt_spec reset; const struct pinctrl_dev_config *pcfg; uint32_t parity; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; struct gd32_usart_data { uint32_t baud_rate; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t user_cb; void *user_data; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void usart_gd32_isr(const struct device *dev) { struct gd32_usart_data *const data = dev->data; if (data->user_cb) { data->user_cb(dev, data->user_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int usart_gd32_init(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; struct gd32_usart_data *const data = dev->data; uint32_t word_length; uint32_t parity; int ret; ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /** * In order to keep the transfer data size to 8 bits(1 byte), * append word length to 9BIT if parity bit enabled. */ switch (cfg->parity) { case UART_CFG_PARITY_NONE: parity = USART_PM_NONE; word_length = USART_WL_8BIT; break; case UART_CFG_PARITY_ODD: parity = USART_PM_ODD; word_length = USART_WL_9BIT; break; case UART_CFG_PARITY_EVEN: parity = USART_PM_EVEN; word_length = USART_WL_9BIT; break; default: return -ENOTSUP; } (void)clock_control_on(GD32_CLOCK_CONTROLLER, (clock_control_subsys_t)&cfg->clkid); (void)reset_line_toggle_dt(&cfg->reset); usart_baudrate_set(cfg->reg, data->baud_rate); usart_parity_config(cfg->reg, parity); usart_word_length_set(cfg->reg, word_length); /* Default to 1 stop bit */ usart_stop_bit_set(cfg->reg, USART_STB_1BIT); usart_receive_config(cfg->reg, USART_RECEIVE_ENABLE); usart_transmit_config(cfg->reg, USART_TRANSMIT_ENABLE); usart_enable(cfg->reg); #ifdef CONFIG_UART_INTERRUPT_DRIVEN cfg->irq_config_func(dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ return 0; } static int usart_gd32_poll_in(const struct device *dev, unsigned char *c) { const struct gd32_usart_config *const cfg = dev->config; uint32_t status; status = usart_flag_get(cfg->reg, USART_FLAG_RBNE); if (!status) { return -EPERM; } *c = usart_data_receive(cfg->reg); return 0; } static void usart_gd32_poll_out(const struct device *dev, unsigned char c) { const struct gd32_usart_config *const cfg = dev->config; usart_data_transmit(cfg->reg, c); while (usart_flag_get(cfg->reg, USART_FLAG_TBE) == RESET) { ; } } static int usart_gd32_err_check(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; uint32_t status = USART_STAT(cfg->reg); int errors = 0; if (status & USART_FLAG_ORERR) { usart_flag_clear(cfg->reg, USART_FLAG_ORERR); errors |= UART_ERROR_OVERRUN; } if (status & USART_FLAG_PERR) { usart_flag_clear(cfg->reg, USART_FLAG_PERR); errors |= UART_ERROR_PARITY; } if (status & USART_FLAG_FERR) { usart_flag_clear(cfg->reg, USART_FLAG_FERR); errors |= UART_ERROR_FRAMING; } usart_flag_clear(cfg->reg, USART_FLAG_NERR); return errors; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN int usart_gd32_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct gd32_usart_config *const cfg = dev->config; uint8_t num_tx = 0U; while ((len - num_tx > 0) && usart_flag_get(cfg->reg, USART_FLAG_TBE)) { usart_data_transmit(cfg->reg, tx_data[num_tx++]); } return num_tx; } int usart_gd32_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct gd32_usart_config *const cfg = dev->config; uint8_t num_rx = 0U; while ((size - num_rx > 0) && usart_flag_get(cfg->reg, USART_FLAG_RBNE)) { rx_data[num_rx++] = usart_data_receive(cfg->reg); } return num_rx; } void usart_gd32_irq_tx_enable(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; usart_interrupt_enable(cfg->reg, USART_INT_TC); } void usart_gd32_irq_tx_disable(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; usart_interrupt_disable(cfg->reg, USART_INT_TC); } int usart_gd32_irq_tx_ready(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; return usart_flag_get(cfg->reg, USART_FLAG_TBE) && usart_interrupt_flag_get(cfg->reg, USART_INT_FLAG_TC); } int usart_gd32_irq_tx_complete(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; return usart_flag_get(cfg->reg, USART_FLAG_TC); } void usart_gd32_irq_rx_enable(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; usart_interrupt_enable(cfg->reg, USART_INT_RBNE); } void usart_gd32_irq_rx_disable(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; usart_interrupt_disable(cfg->reg, USART_INT_RBNE); } int usart_gd32_irq_rx_ready(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; return usart_flag_get(cfg->reg, USART_FLAG_RBNE); } void usart_gd32_irq_err_enable(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; usart_interrupt_enable(cfg->reg, USART_INT_ERR); usart_interrupt_enable(cfg->reg, USART_INT_PERR); } void usart_gd32_irq_err_disable(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; usart_interrupt_disable(cfg->reg, USART_INT_ERR); usart_interrupt_disable(cfg->reg, USART_INT_PERR); } int usart_gd32_irq_is_pending(const struct device *dev) { const struct gd32_usart_config *const cfg = dev->config; return ((usart_flag_get(cfg->reg, USART_FLAG_RBNE) && usart_interrupt_flag_get(cfg->reg, USART_INT_FLAG_RBNE)) || (usart_flag_get(cfg->reg, USART_FLAG_TC) && usart_interrupt_flag_get(cfg->reg, USART_INT_FLAG_TC))); } void usart_gd32_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *user_data) { struct gd32_usart_data *const data = dev->data; data->user_cb = cb; data->user_data = user_data; } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api usart_gd32_driver_api = { .poll_in = usart_gd32_poll_in, .poll_out = usart_gd32_poll_out, .err_check = usart_gd32_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = usart_gd32_fifo_fill, .fifo_read = usart_gd32_fifo_read, .irq_tx_enable = usart_gd32_irq_tx_enable, .irq_tx_disable = usart_gd32_irq_tx_disable, .irq_tx_ready = usart_gd32_irq_tx_ready, .irq_tx_complete = usart_gd32_irq_tx_complete, .irq_rx_enable = usart_gd32_irq_rx_enable, .irq_rx_disable = usart_gd32_irq_rx_disable, .irq_rx_ready = usart_gd32_irq_rx_ready, .irq_err_enable = usart_gd32_irq_err_enable, .irq_err_disable = usart_gd32_irq_err_disable, .irq_is_pending = usart_gd32_irq_is_pending, .irq_callback_set = usart_gd32_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define GD32_USART_IRQ_HANDLER(n) \ static void usart_gd32_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ usart_gd32_isr, \ DEVICE_DT_INST_GET(n), \ 0); \ irq_enable(DT_INST_IRQN(n)); \ } #define GD32_USART_IRQ_HANDLER_FUNC_INIT(n) \ .irq_config_func = usart_gd32_config_func_##n #else /* CONFIG_UART_INTERRUPT_DRIVEN */ #define GD32_USART_IRQ_HANDLER(n) #define GD32_USART_IRQ_HANDLER_FUNC_INIT(n) #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #define GD32_USART_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ GD32_USART_IRQ_HANDLER(n) \ static struct gd32_usart_data usart_gd32_data_##n = { \ .baud_rate = DT_INST_PROP(n, current_speed), \ }; \ static const struct gd32_usart_config usart_gd32_config_##n = { \ .reg = DT_INST_REG_ADDR(n), \ .clkid = DT_INST_CLOCKS_CELL(n, id), \ .reset = RESET_DT_SPEC_INST_GET(n), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \ GD32_USART_IRQ_HANDLER_FUNC_INIT(n) \ }; \ DEVICE_DT_INST_DEFINE(n, usart_gd32_init, \ NULL, \ &usart_gd32_data_##n, \ &usart_gd32_config_##n, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &usart_gd32_driver_api); DT_INST_FOREACH_STATUS_OKAY(GD32_USART_INIT) ```
/content/code_sandbox/drivers/serial/usart_gd32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,550
```unknown menuconfig UART_INTEL_LW bool "Intel Lightweight UART driver" depends on DT_HAS_INTEL_LW_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable the Intel Lightweight UART driver, that can be built into Intel NiosV CPU designs. if UART_INTEL_LW config UART_INTEL_LW_EOP bool "Include end of packet register" depends on UART_DRV_CMD && UART_INTERRUPT_DRIVEN help Use driver command CMD_ENABLE_EOP and CMD_DISABLE_EOP to use the feature. config UART_INTEL_LW_AUTO_LINE_CTRL_POLL bool "Auto set RTS signal during poll out" depends on UART_LINE_CTRL help Assert RTS before polling out a character, and deassert RTS after the character is polled out. Please note that this is not suitable, when polling out several characters. Please use uart_drv_cmd with CMD_POLL_ASSERT_RTS before polling out. Then use CMD_POLL_DEASSERT_RTS to resume normal operation after polling. endif # UART_INTEL_LW ```
/content/code_sandbox/drivers/serial/Kconfig.intel_lw
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
223
```c /* * */ /** * @brief Driver for Nordic Semiconductor nRF UARTE */ #include <zephyr/kernel.h> #include <zephyr/sys/util.h> #include <zephyr/irq.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/serial/uart_async_to_irq.h> #include <zephyr/pm/device.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/linker/devicetree_regions.h> #include <zephyr/logging/log.h> #include <nrfx_uarte.h> #include <helpers/nrfx_gppi.h> #include <haly/nrfy_uarte.h> #define LOG_MODULE_NAME uarte LOG_MODULE_REGISTER(LOG_MODULE_NAME, CONFIG_UART_LOG_LEVEL); #define INSTANCE_INT_DRIVEN(periph, prefix, i, _) \ IS_ENABLED(CONFIG_UART_##prefix##i##_INTERRUPT_DRIVEN) #define INSTANCE_ASYNC(periph, prefix, i, _) \ IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC) #define INSTANCE_POLLING(periph, prefix, id, _) \ UTIL_AND(CONFIG_HAS_HW_NRF_UARTE##prefix##id, \ UTIL_AND(COND_CODE_1(CONFIG_UART_##prefix##id##_INTERRUPT_DRIVEN, (0), (1)), \ COND_CODE_1(CONFIG_UART_##prefix##id##_ASYNC, (0), (1)))) #define INSTANCE_ENHANCED_POLL_OUT(periph, prefix, i, _) \ IS_ENABLED(CONFIG_UART_##prefix##i##_ENHANCED_POLL_OUT) /* Macro determining if any instance is using interrupt driven API. */ #if (NRFX_FOREACH_ENABLED(UARTE, INSTANCE_INT_DRIVEN, (+), (0), _)) #define UARTE_ANY_INTERRUPT_DRIVEN 1 #else #define UARTE_ANY_INTERRUPT_DRIVEN 0 #endif /* Macro determining if any instance is enabled and using ASYNC API. */ #if (NRFX_FOREACH_ENABLED(UARTE, INSTANCE_ASYNC, (+), (0), _)) #define UARTE_ANY_ASYNC 1 #else #define UARTE_ANY_ASYNC 0 #endif /* Macro determining if any instance is using only polling API. */ #if (NRFX_FOREACH_ENABLED(UARTE, INSTANCE_POLLING, (+), (0), _)) #define UARTE_ANY_POLLING 1 #else #define UARTE_ANY_POLLING 0 #endif /* Macro determining if any instance is using interrupt driven API. */ #if (NRFX_FOREACH_ENABLED(UARTE, INSTANCE_ENHANCED_POLL_OUT, (+), (0), _)) #define UARTE_ENHANCED_POLL_OUT 1 #else #define UARTE_ENHANCED_POLL_OUT 0 #endif #if UARTE_ANY_INTERRUPT_DRIVEN || UARTE_ANY_ASYNC #define UARTE_INT_ASYNC 1 #else #define UARTE_INT_ASYNC 0 #endif #if defined(UARTE_CONFIG_PARITYTYPE_Msk) #define UARTE_ODD_PARITY_ALLOWED 1 #else #define UARTE_ODD_PARITY_ALLOWED 0 #endif /* * RX timeout is divided into time slabs, this define tells how many divisions * should be made. More divisions - higher timeout accuracy and processor usage. */ #define RX_TIMEOUT_DIV 5 /* Macro for converting numerical baudrate to register value. It is convenient * to use this approach because for constant input it can calculate nrf setting * at compile time. */ #define NRF_BAUDRATE(baudrate) ((baudrate) == 300 ? 0x00014000 :\ (baudrate) == 600 ? 0x00027000 : \ (baudrate) == 1200 ? NRF_UARTE_BAUDRATE_1200 : \ (baudrate) == 2400 ? NRF_UARTE_BAUDRATE_2400 : \ (baudrate) == 4800 ? NRF_UARTE_BAUDRATE_4800 : \ (baudrate) == 9600 ? NRF_UARTE_BAUDRATE_9600 : \ (baudrate) == 14400 ? NRF_UARTE_BAUDRATE_14400 : \ (baudrate) == 19200 ? NRF_UARTE_BAUDRATE_19200 : \ (baudrate) == 28800 ? NRF_UARTE_BAUDRATE_28800 : \ (baudrate) == 31250 ? NRF_UARTE_BAUDRATE_31250 : \ (baudrate) == 38400 ? NRF_UARTE_BAUDRATE_38400 : \ (baudrate) == 56000 ? NRF_UARTE_BAUDRATE_56000 : \ (baudrate) == 57600 ? NRF_UARTE_BAUDRATE_57600 : \ (baudrate) == 76800 ? NRF_UARTE_BAUDRATE_76800 : \ (baudrate) == 115200 ? NRF_UARTE_BAUDRATE_115200 : \ (baudrate) == 230400 ? NRF_UARTE_BAUDRATE_230400 : \ (baudrate) == 250000 ? NRF_UARTE_BAUDRATE_250000 : \ (baudrate) == 460800 ? NRF_UARTE_BAUDRATE_460800 : \ (baudrate) == 921600 ? NRF_UARTE_BAUDRATE_921600 : \ (baudrate) == 1000000 ? NRF_UARTE_BAUDRATE_1000000 : 0) #define UARTE_DATA_FLAG_TRAMPOLINE BIT(0) #define UARTE_DATA_FLAG_RX_ENABLED BIT(1) struct uarte_async_data { uart_callback_t user_callback; void *user_data; uint8_t *en_rx_buf; size_t en_rx_len; struct k_timer tx_timer; struct k_timer rx_timer; k_timeout_t rx_timeout; /* Keeps the most recent error mask. */ uint32_t err; uint8_t idle_cnt; }; /* Device data structure */ struct uarte_nrfx_data { struct uart_async_to_irq_data *a2i_data; #if CONFIG_UART_USE_RUNTIME_CONFIGURE struct uart_config uart_config; #endif struct uarte_async_data *async; atomic_t flags; uint8_t rx_byte; }; BUILD_ASSERT(offsetof(struct uarte_nrfx_data, a2i_data) == 0); /* If set then pins are managed when going to low power mode. */ #define UARTE_CFG_FLAG_GPIO_MGMT BIT(0) /* If set then receiver is not used. */ #define UARTE_CFG_FLAG_NO_RX BIT(1) /* If set then instance is using interrupt driven API. */ #define UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API BIT(2) /** * @brief Structure for UARTE configuration. */ struct uarte_nrfx_config { const struct uart_async_to_irq_config *a2i_config; nrfx_uarte_t nrfx_dev; nrfx_uarte_config_t nrfx_config; const struct pinctrl_dev_config *pcfg; uint32_t flags; LOG_INSTANCE_PTR_DECLARE(log); }; BUILD_ASSERT(offsetof(struct uarte_nrfx_config, a2i_config) == 0); #define UARTE_ERROR_FROM_MASK(mask) \ ((mask) & NRF_UARTE_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN \ : (mask) & NRF_UARTE_ERROR_PARITY_MASK ? UART_ERROR_PARITY \ : (mask) & NRF_UARTE_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING \ : (mask) & NRF_UARTE_ERROR_BREAK_MASK ? UART_BREAK \ : 0) /* Determine if the device has interrupt driven API enabled. */ #define IS_INT_DRIVEN_API(dev) \ (UARTE_ANY_INTERRUPT_DRIVEN && \ (((const struct uarte_nrfx_config *)dev->config)->flags & \ UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API)) /* Determine if the device supports only polling API. */ #define IS_POLLING_API(dev) \ (!UARTE_INT_ASYNC || (((struct uarte_nrfx_data *)dev->data)->async == NULL)) /* Determine if the device supports asynchronous API. */ #define IS_ASYNC_API(dev) (!IS_INT_DRIVEN_API(dev) && !IS_POLLING_API(dev)) static inline const nrfx_uarte_t *get_nrfx_dev(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; return &config->nrfx_dev; } static int callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct uarte_nrfx_data *data = dev->data; data->async->user_callback = callback; data->async->user_data = user_data; return 0; } #if UARTE_ANY_ASYNC static int api_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { if (!IS_ASYNC_API(dev)) { return -ENOTSUP; } return callback_set(dev, callback, user_data); } #endif static void on_tx_done(const struct device *dev, const nrfx_uarte_event_t *event) { struct uarte_nrfx_data *data = dev->data; struct uart_event evt = { .type = (event->data.tx.flags & NRFX_UARTE_TX_DONE_ABORTED) ? UART_TX_ABORTED : UART_TX_DONE, .data.tx.buf = event->data.tx.p_buffer, .data.tx.len = event->data.tx.length }; bool hwfc; #if CONFIG_UART_USE_RUNTIME_CONFIGURE hwfc = data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS; #else const struct uarte_nrfx_config *config = dev->config; hwfc = config->nrfx_config.config.hwfc == NRF_UARTE_HWFC_ENABLED; #endif if (hwfc) { k_timer_stop(&data->async->tx_timer); } data->async->user_callback(dev, &evt, data->async->user_data); } static void on_rx_done(const struct device *dev, const nrfx_uarte_event_t *event) { struct uarte_nrfx_data *data = dev->data; struct uart_event evt; if (data->async->err) { evt.type = UART_RX_STOPPED; evt.data.rx_stop.reason = UARTE_ERROR_FROM_MASK(data->async->err); evt.data.rx_stop.data.buf = event->data.rx.p_buffer; evt.data.rx_stop.data.len = event->data.rx.length; /* Keep error code for uart_err_check(). */ if (!IS_INT_DRIVEN_API(dev)) { data->async->err = 0; } data->async->user_callback(dev, &evt, data->async->user_data); } else if (event->data.rx.length) { evt.type = UART_RX_RDY, evt.data.rx.buf = event->data.rx.p_buffer, evt.data.rx.len = event->data.rx.length, evt.data.rx.offset = 0; data->async->user_callback(dev, &evt, data->async->user_data); } evt.type = UART_RX_BUF_RELEASED; evt.data.rx_buf.buf = event->data.rx.p_buffer; data->async->user_callback(dev, &evt, data->async->user_data); } static void start_rx_timer(struct uarte_nrfx_data *data) { struct uarte_async_data *adata = data->async; k_timer_start(&adata->rx_timer, adata->rx_timeout, K_NO_WAIT); } static void on_rx_byte(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; struct uarte_async_data *adata = data->async; const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); nrfx_uarte_rxdrdy_disable(nrfx_dev); adata->idle_cnt = RX_TIMEOUT_DIV; start_rx_timer(data); } static void on_rx_buf_req(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; struct uarte_async_data *adata = data->async; const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); /* If buffer is not null it indicates that event comes from RX enabling * function context. We need to pass provided buffer to the driver. */ if (adata->en_rx_buf) { uint8_t *buf = adata->en_rx_buf; size_t len = adata->en_rx_len; nrfx_err_t err; adata->en_rx_buf = NULL; adata->en_rx_len = 0; err = nrfx_uarte_rx_buffer_set(nrfx_dev, buf, len); __ASSERT_NO_MSG(err == NRFX_SUCCESS); return; } struct uart_event evt = { .type = UART_RX_BUF_REQUEST }; /* If counter reached zero that indicates that timeout was reached and * reception of one buffer was terminated to restart another transfer. */ if (!K_TIMEOUT_EQ(adata->rx_timeout, K_NO_WAIT)) { nrfx_uarte_rxdrdy_enable(nrfx_dev); } data->async->user_callback(dev, &evt, data->async->user_data); } static void on_rx_disabled(const struct device *dev, struct uarte_nrfx_data *data) { struct uart_event evt = { .type = UART_RX_DISABLED }; atomic_and(&data->flags, ~UARTE_DATA_FLAG_RX_ENABLED); k_timer_stop(&data->async->rx_timer); data->async->user_callback(dev, &evt, data->async->user_data); } static void trigger_handler(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; if (UARTE_ANY_INTERRUPT_DRIVEN && atomic_and(&data->flags, ~UARTE_DATA_FLAG_TRAMPOLINE) & UARTE_DATA_FLAG_TRAMPOLINE) { uart_async_to_irq_trampoline_cb(dev); } } static void evt_handler(nrfx_uarte_event_t const *event, void *context) { const struct device *dev = context; struct uarte_nrfx_data *data = dev->data; switch (event->type) { case NRFX_UARTE_EVT_TX_DONE: on_tx_done(dev, event); break; case NRFX_UARTE_EVT_RX_DONE: on_rx_done(dev, event); break; case NRFX_UARTE_EVT_RX_BYTE: on_rx_byte(dev); break; case NRFX_UARTE_EVT_ERROR: data->async->err = event->data.error.error_mask; break; case NRFX_UARTE_EVT_RX_BUF_REQUEST: on_rx_buf_req(dev); break; case NRFX_UARTE_EVT_RX_DISABLED: on_rx_disabled(dev, data); break; case NRFX_UARTE_EVT_RX_BUF_TOO_LATE: /* No support */ break; case NRFX_UARTE_EVT_TRIGGER: trigger_handler(dev); break; default: __ASSERT_NO_MSG(0); } } static int api_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { struct uarte_nrfx_data *data = dev->data; const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); nrfx_err_t err; bool hwfc; #if CONFIG_PM_DEVICE enum pm_device_state state; (void)pm_device_state_get(dev, &state); if (state != PM_DEVICE_STATE_ACTIVE) { return -ECANCELED; } #endif #if CONFIG_UART_USE_RUNTIME_CONFIGURE hwfc = data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS; #else const struct uarte_nrfx_config *config = dev->config; hwfc = config->nrfx_config.config.hwfc == NRF_UARTE_HWFC_ENABLED; #endif err = nrfx_uarte_tx(nrfx_dev, buf, len, 0); if (err != NRFX_SUCCESS) { return (err == NRFX_ERROR_BUSY) ? -EBUSY : -EIO; } if (hwfc && timeout != SYS_FOREVER_US) { k_timer_start(&data->async->tx_timer, K_USEC(timeout), K_NO_WAIT); } return 0; } static int api_tx_abort(const struct device *dev) { const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); nrfx_err_t err; err = nrfx_uarte_tx_abort(nrfx_dev, false); return (err == NRFX_SUCCESS) ? 0 : -EFAULT; } static void tx_timeout_handler(struct k_timer *timer) { const struct device *dev = k_timer_user_data_get(timer); (void)api_tx_abort(dev); } static void rx_timeout_handler(struct k_timer *timer) { const struct device *dev = (const struct device *)k_timer_user_data_get(timer); struct uarte_nrfx_data *data = dev->data; struct uarte_async_data *adata = data->async; const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); if (nrfx_uarte_rx_new_data_check(nrfx_dev)) { adata->idle_cnt = RX_TIMEOUT_DIV - 1; } else { adata->idle_cnt--; if (adata->idle_cnt == 0) { (void)nrfx_uarte_rx_abort(nrfx_dev, false, false); return; } } start_rx_timer(data); } /* Determine if RX FIFO content shall be kept when device is being disabled. * When flow-control is used then we expect to keep RX FIFO content since HWFC * enforces lossless communication. However, when HWFC is not used (by any instance * then RX FIFO handling feature is disabled in the nrfx_uarte to save space. * It is based on assumption that without HWFC it is expected that some data may * be lost and there are means to prevent that (keeping receiver always opened by * provided reception buffers on time). */ static inline uint32_t get_keep_fifo_content_flag(const struct device *dev) { #if CONFIG_UART_USE_RUNTIME_CONFIGURE struct uarte_nrfx_data *data = dev->data; if (data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS) { return NRFX_UARTE_RX_ENABLE_KEEP_FIFO_CONTENT; } #else const struct uarte_nrfx_config *config = dev->config; if (config->nrfx_config.config.hwfc == NRF_UARTE_HWFC_ENABLED) { return NRFX_UARTE_RX_ENABLE_KEEP_FIFO_CONTENT; } #endif return 0; } static int api_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { nrfx_err_t err; const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); const struct uarte_nrfx_config *cfg = dev->config; struct uarte_nrfx_data *data = dev->data; struct uarte_async_data *adata = data->async; uint32_t flags = NRFX_UARTE_RX_ENABLE_CONT | get_keep_fifo_content_flag(dev) | (IS_ASYNC_API(dev) ? NRFX_UARTE_RX_ENABLE_STOP_ON_END : 0); if (cfg->flags & UARTE_CFG_FLAG_NO_RX) { return -ENOTSUP; } if (timeout != SYS_FOREVER_US) { adata->idle_cnt = RX_TIMEOUT_DIV + 1; adata->rx_timeout = K_USEC(timeout / RX_TIMEOUT_DIV); nrfx_uarte_rxdrdy_enable(nrfx_dev); } else { adata->rx_timeout = K_NO_WAIT; } /* Store the buffer. It will be passed to the driver in the event handler. * We do that instead of calling nrfx_uarte_rx_buffer_set here to ensure * that nrfx_uarte_rx_buffer_set is called when RX enable configuration * flags are already known to the driver (e.g. if flushed data shall be * kept or not). */ adata->err = 0; adata->en_rx_buf = buf; adata->en_rx_len = len; atomic_or(&data->flags, UARTE_DATA_FLAG_RX_ENABLED); err = nrfx_uarte_rx_enable(nrfx_dev, flags); if (err != NRFX_SUCCESS) { atomic_and(&data->flags, ~UARTE_DATA_FLAG_RX_ENABLED); return (err == NRFX_ERROR_BUSY) ? -EBUSY : -EIO; } return 0; } static int api_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); struct uarte_nrfx_data *data = dev->data; nrfx_err_t err; if (!(data->flags & UARTE_DATA_FLAG_RX_ENABLED)) { return -EACCES; } err = nrfx_uarte_rx_buffer_set(nrfx_dev, buf, len); switch (err) { case NRFX_SUCCESS: return 0; case NRFX_ERROR_BUSY: return -EBUSY; default: return -EIO; } } static int api_rx_disable(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; k_timer_stop(&data->async->rx_timer); return (nrfx_uarte_rx_abort(get_nrfx_dev(dev), true, false) == NRFX_SUCCESS) ? 0 : -EFAULT; } static int api_poll_in(const struct device *dev, unsigned char *c) { const struct uarte_nrfx_config *cfg = dev->config; const nrfx_uarte_t *instance = &cfg->nrfx_dev; nrfx_err_t err; if (IS_INT_DRIVEN_API(dev)) { return uart_fifo_read(dev, c, 1) == 0 ? -1 : 0; } if (IS_ASYNC_API(dev)) { return -EBUSY; } err = nrfx_uarte_rx_ready(instance, NULL); if (err == NRFX_SUCCESS) { uint8_t *rx_byte = cfg->nrfx_config.rx_cache.p_buffer; *c = *rx_byte; err = nrfx_uarte_rx_buffer_set(instance, rx_byte, 1); __ASSERT_NO_MSG(err == NRFX_SUCCESS); return 0; } return -1; } static void api_poll_out(const struct device *dev, unsigned char out_char) { const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); nrfx_err_t err; #if CONFIG_PM_DEVICE enum pm_device_state state; (void)pm_device_state_get(dev, &state); if (state != PM_DEVICE_STATE_ACTIVE) { return; } #endif do { /* When runtime PM is used we cannot use early return because then * we have no information when UART is actually done with the * transmission. It reduces UART performance however, polling in * general is not power efficient and should be avoided in low * power applications. */ err = nrfx_uarte_tx(nrfx_dev, &out_char, 1, NRFX_UARTE_TX_EARLY_RETURN); __ASSERT(err != NRFX_ERROR_INVALID_ADDR, "Invalid address of the buffer"); if (err == NRFX_ERROR_BUSY) { if (IS_ENABLED(CONFIG_MULTITHREADING) && k_is_preempt_thread()) { k_msleep(1); } else { Z_SPIN_DELAY(3); } } } while (err == NRFX_ERROR_BUSY); } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE /** * @brief Set the baud rate * * This routine set the given baud rate for the UARTE. * * @param dev UARTE device struct * @param baudrate Baud rate * * @return 0 on success or error code */ static int baudrate_set(NRF_UARTE_Type *uarte, uint32_t baudrate) { nrf_uarte_baudrate_t nrf_baudrate = NRF_BAUDRATE(baudrate); if (baudrate == 0) { return -EINVAL; } nrfy_uarte_baudrate_set(uarte, nrf_baudrate); return 0; } static int uarte_nrfx_configure(const struct device *dev, const struct uart_config *cfg) { const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); struct uarte_nrfx_data *data = dev->data; nrf_uarte_config_t uarte_cfg; #if NRF_UARTE_HAS_FRAME_TIMEOUT uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_DIS; #endif #if defined(UARTE_CONFIG_STOP_Msk) switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: uarte_cfg.stop = NRF_UARTE_STOP_ONE; break; case UART_CFG_STOP_BITS_2: uarte_cfg.stop = NRF_UARTE_STOP_TWO; break; default: return -ENOTSUP; } #else if (cfg->stop_bits != UART_CFG_STOP_BITS_1) { return -ENOTSUP; } #endif if (cfg->data_bits != UART_CFG_DATA_BITS_8) { return -ENOTSUP; } switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: uarte_cfg.hwfc = NRF_UARTE_HWFC_DISABLED; break; case UART_CFG_FLOW_CTRL_RTS_CTS: uarte_cfg.hwfc = NRF_UARTE_HWFC_ENABLED; break; default: return -ENOTSUP; } #if defined(UARTE_CONFIG_PARITYTYPE_Msk) uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_EVEN; #endif switch (cfg->parity) { case UART_CFG_PARITY_NONE: uarte_cfg.parity = NRF_UARTE_PARITY_EXCLUDED; break; case UART_CFG_PARITY_EVEN: uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED; break; #if defined(UARTE_CONFIG_PARITYTYPE_Msk) case UART_CFG_PARITY_ODD: uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED; uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_ODD; break; #endif default: return -ENOTSUP; } if (baudrate_set(nrfx_dev->p_reg, cfg->baudrate) != 0) { return -ENOTSUP; } nrfy_uarte_configure(nrfx_dev->p_reg, &uarte_cfg); data->uart_config = *cfg; return 0; } static int uarte_nrfx_config_get(const struct device *dev, struct uart_config *cfg) { struct uarte_nrfx_data *data = dev->data; *cfg = data->uart_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #if UARTE_ANY_POLLING || UARTE_ANY_INTERRUPT_DRIVEN static int api_err_check(const struct device *dev) { if (IS_POLLING_API(dev)) { const struct uarte_nrfx_config *cfg = dev->config; const nrfx_uarte_t *instance = &cfg->nrfx_dev; uint32_t mask = nrfx_uarte_errorsrc_get(instance); return mask; } struct uarte_nrfx_data *data = dev->data; uint32_t rv = data->async->err; data->async->err = 0; return rv; } #endif static const struct uart_async_to_irq_async_api a2i_api = { .callback_set = callback_set, .tx = api_tx, .tx_abort = api_tx_abort, .rx_enable = api_rx_enable, .rx_buf_rsp = api_rx_buf_rsp, .rx_disable = api_rx_disable, }; static const struct uart_driver_api uart_nrfx_uarte_driver_api = { .poll_in = api_poll_in, .poll_out = api_poll_out, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uarte_nrfx_configure, .config_get = uarte_nrfx_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #if UARTE_ANY_POLLING || UARTE_ANY_INTERRUPT_DRIVEN .err_check = api_err_check, #endif #if UARTE_ANY_ASYNC .callback_set = api_callback_set, .tx = api_tx, .tx_abort = api_tx_abort, .rx_enable = api_rx_enable, .rx_buf_rsp = api_rx_buf_rsp, .rx_disable = api_rx_disable, #endif /* UARTE_ANY_ASYNC */ #if UARTE_ANY_INTERRUPT_DRIVEN UART_ASYNC_TO_IRQ_API_INIT(), #endif /* UARTE_ANY_INTERRUPT_DRIVEN */ }; static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte) { nrfx_err_t ret; uint8_t ch; ret = nrfx_gppi_channel_alloc(&ch); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel"); return -EIO; } nrfx_gppi_channel_endpoints_setup(ch, nrfy_uarte_event_address_get(uarte, NRF_UARTE_EVENT_ENDTX), nrfy_uarte_task_address_get(uarte, NRF_UARTE_TASK_STOPTX)); nrfx_gppi_channels_enable(BIT(ch)); return 0; } static int start_rx(const struct device *dev) { const struct uarte_nrfx_config *cfg = dev->config; if (IS_INT_DRIVEN_API(dev)) { return uart_async_to_irq_rx_enable(dev); } __ASSERT_NO_MSG(IS_POLLING_API(dev)); nrfx_err_t err; const nrfx_uarte_t *instance = &cfg->nrfx_dev; uint8_t *rx_byte = cfg->nrfx_config.rx_cache.p_buffer; err = nrfx_uarte_rx_buffer_set(instance, rx_byte, 1); __ASSERT_NO_MSG(err == NRFX_SUCCESS); err = nrfx_uarte_rx_enable(instance, 0); __ASSERT_NO_MSG(err == NRFX_SUCCESS || err == NRFX_ERROR_BUSY); (void)err; return 0; } static void async_to_irq_trampoline(const struct device *dev) { const struct uarte_nrfx_config *cfg = dev->config; struct uarte_nrfx_data *data = dev->data; uint32_t prev = atomic_or(&data->flags, UARTE_DATA_FLAG_TRAMPOLINE); if (!(prev & UARTE_DATA_FLAG_TRAMPOLINE)) { nrfx_uarte_int_trigger(&cfg->nrfx_dev); } } static int uarte_nrfx_init(const struct device *dev) { int err; nrfx_err_t nerr; const nrfx_uarte_t *nrfx_dev = get_nrfx_dev(dev); const struct uarte_nrfx_config *cfg = dev->config; struct uarte_nrfx_data *data = dev->data; #ifdef CONFIG_ARCH_POSIX /* For simulation the DT provided peripheral address needs to be corrected */ ((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)nrfx_dev->p_reg; #endif err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } if (UARTE_ENHANCED_POLL_OUT && cfg->nrfx_config.tx_stop_on_end) { err = endtx_stoptx_ppi_init(nrfx_dev->p_reg); if (err < 0) { return err; } } if (UARTE_ANY_INTERRUPT_DRIVEN) { if (cfg->a2i_config) { err = uart_async_to_irq_init(data->a2i_data, cfg->a2i_config); if (err < 0) { return err; } } } if (IS_ENABLED(UARTE_INT_ASYNC) && data->async) { k_timer_init(&data->async->rx_timer, rx_timeout_handler, NULL); k_timer_user_data_set(&data->async->rx_timer, (void *)dev); k_timer_init(&data->async->tx_timer, tx_timeout_handler, NULL); k_timer_user_data_set(&data->async->tx_timer, (void *)dev); } nerr = nrfx_uarte_init(nrfx_dev, &cfg->nrfx_config, IS_ENABLED(UARTE_INT_ASYNC) ? (IS_POLLING_API(dev) ? NULL : evt_handler) : NULL); if (nerr == NRFX_SUCCESS && !IS_ASYNC_API(dev) && !(cfg->flags & UARTE_CFG_FLAG_NO_RX)) { err = start_rx(dev); } switch (nerr) { case NRFX_ERROR_INVALID_STATE: return -EBUSY; case NRFX_ERROR_BUSY: return -EACCES; case NRFX_ERROR_INVALID_PARAM: return -EINVAL; default: return 0; } } #ifdef CONFIG_PM_DEVICE static int stop_rx(const struct device *dev) { const struct uarte_nrfx_config *cfg = dev->config; if (IS_INT_DRIVEN_API(dev)) { return uart_async_to_irq_rx_disable(dev); } __ASSERT_NO_MSG(IS_POLLING_API(dev)); nrfx_err_t err; const nrfx_uarte_t *instance = &cfg->nrfx_dev; err = nrfx_uarte_rx_abort(instance, true, true); __ASSERT_NO_MSG(err == NRFX_SUCCESS); return 0; } static int uarte_nrfx_pm_action(const struct device *dev, enum pm_device_action action) { const struct uarte_nrfx_config *cfg = dev->config; int ret; switch (action) { case PM_DEVICE_ACTION_RESUME: if (cfg->flags & UARTE_CFG_FLAG_GPIO_MGMT) { ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } } if (!IS_ASYNC_API(dev) && !(cfg->flags & UARTE_CFG_FLAG_NO_RX)) { return start_rx(dev); } break; case PM_DEVICE_ACTION_SUSPEND: if (!IS_ASYNC_API(dev) && !(cfg->flags & UARTE_CFG_FLAG_NO_RX)) { stop_rx(dev); } if (cfg->flags & UARTE_CFG_FLAG_GPIO_MGMT) { ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP); if (ret < 0) { return ret; } } break; default: return -ENOTSUP; } return 0; } #endif #if defined(UARTE_CONFIG_STOP_Msk) #define UARTE_HAS_STOP_CONFIG 1 #endif #define UARTE(idx) DT_NODELABEL(uart##idx) #define UARTE_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(UARTE(idx), prop) #define UARTE_PROP(idx, prop) DT_PROP(UARTE(idx), prop) /* Macro returning initial log level. Logs are off for UART used for console. */ #define GET_INIT_LOG_LEVEL(idx) \ COND_CODE_1(DT_HAS_CHOSEN(zephyr_console), \ (DT_SAME_NODE(UARTE(idx), \ DT_CHOSEN(zephyr_console)) ? \ LOG_LEVEL_NONE : CONFIG_UART_LOG_LEVEL), \ (CONFIG_UART_LOG_LEVEL)) /* Macro puts buffers in dedicated section if device tree property is set. */ #define UARTE_MEMORY_SECTION(idx) \ COND_CODE_1(UARTE_HAS_PROP(idx, memory_regions), \ (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ DT_PHANDLE(UARTE(idx), memory_regions)))))), \ ()) #define UART_NRF_UARTE_DEVICE(idx) \ LOG_INSTANCE_REGISTER(LOG_MODULE_NAME, idx, GET_INIT_LOG_LEVEL(idx)); \ static uint8_t uarte##idx##_tx_cache[CONFIG_UART_##idx##_TX_CACHE_SIZE] \ UARTE_MEMORY_SECTION(idx) __aligned(4); \ static uint8_t uarte##idx##_rx_cache[CONFIG_UART_##idx##_RX_CACHE_SIZE] \ UARTE_MEMORY_SECTION(idx) __aligned(4); \ static nrfx_uarte_rx_cache_t uarte##idx##_rx_cache_scratch; \ IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \ (static uint8_t a2i_rx_buf##idx[CONFIG_UART_##idx##_A2I_RX_SIZE] \ UARTE_MEMORY_SECTION(idx) __aligned(4);)) \ PINCTRL_DT_DEFINE(UARTE(idx)); \ static const struct uart_async_to_irq_config uarte_a2i_config_##idx = \ UART_ASYNC_TO_IRQ_API_CONFIG_INITIALIZER(&a2i_api, \ async_to_irq_trampoline, \ UARTE_PROP(idx, current_speed), \ uarte##idx##_tx_cache, \ /* nrfx_uarte driver is using the last byte in the */ \ /* cache buffer for keeping a byte that is currently*/\ /* polled out so it cannot be used as a cache buffer*/\ /* by the adaptation layer. */ \ sizeof(uarte##idx##_tx_cache) - 1, \ COND_CODE_1(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \ (a2i_rx_buf##idx), (NULL)), \ COND_CODE_1(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \ (sizeof(a2i_rx_buf##idx)), (0)), \ CONFIG_UART_##idx##_A2I_RX_BUF_COUNT, \ LOG_INSTANCE_PTR(LOG_MODULE_NAME, idx)); \ static const struct uarte_nrfx_config uarte_config_##idx = { \ .a2i_config = IS_ENABLED(CONFIG_UART_##idx## _INTERRUPT_DRIVEN) ? \ &uarte_a2i_config_##idx : NULL, \ .nrfx_dev = NRFX_UARTE_INSTANCE(idx), \ .nrfx_config = { \ .p_context = (void *)DEVICE_DT_GET(UARTE(idx)), \ .tx_cache = { \ .p_buffer = uarte##idx##_tx_cache, \ .length = CONFIG_UART_##idx##_TX_CACHE_SIZE \ }, \ .rx_cache = { \ .p_buffer = uarte##idx##_rx_cache, \ .length = CONFIG_UART_##idx##_RX_CACHE_SIZE \ }, \ .p_rx_cache_scratch = &uarte##idx##_rx_cache_scratch, \ .baudrate = NRF_BAUDRATE(UARTE_PROP(idx, current_speed)), \ .interrupt_priority = DT_IRQ(UARTE(idx), priority), \ .config = { \ .hwfc = (UARTE_PROP(idx, hw_flow_control) == \ UART_CFG_FLOW_CTRL_RTS_CTS) ? \ NRF_UARTE_HWFC_ENABLED : NRF_UARTE_HWFC_DISABLED, \ .parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) ? \ NRF_UARTE_PARITY_INCLUDED : NRF_UARTE_PARITY_EXCLUDED, \ IF_ENABLED(UARTE_HAS_STOP_CONFIG, (.stop = NRF_UARTE_STOP_ONE,))\ IF_ENABLED(UARTE_ODD_PARITY_ALLOWED, \ (.paritytype = NRF_UARTE_PARITYTYPE_EVEN,)) \ }, \ .tx_stop_on_end = IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT), \ .skip_psel_cfg = true, \ .skip_gpio_cfg = true, \ }, \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)), \ .flags = (UARTE_PROP(idx, disable_rx) ? UARTE_CFG_FLAG_NO_RX : 0) | \ (IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT) ? \ UARTE_CFG_FLAG_GPIO_MGMT : 0) | \ (IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN) ? \ UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API : 0), \ LOG_INSTANCE_PTR_INIT(log, LOG_MODULE_NAME, idx) \ }; \ static struct uart_async_to_irq_data uarte_a2i_data_##idx; \ static struct uarte_async_data uarte_async_##idx; \ static struct uarte_nrfx_data uarte_data_##idx = { \ .a2i_data = IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN) ? \ &uarte_a2i_data_##idx : NULL, \ IF_ENABLED(CONFIG_UART_USE_RUNTIME_CONFIGURE, \ (.uart_config = { \ .baudrate = UARTE_PROP(idx, current_speed), \ .parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) ? \ UART_CFG_PARITY_EVEN : UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = UARTE_PROP(idx, hw_flow_control) ? \ UART_CFG_FLOW_CTRL_RTS_CTS : UART_CFG_FLOW_CTRL_NONE, \ },)) \ .async = (IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN) || \ IS_ENABLED(CONFIG_UART_##idx##_ASYNC)) ? &uarte_async_##idx : NULL \ }; \ static int uarte_init_##idx(const struct device *dev) \ { \ COND_CODE_1(INSTANCE_POLLING(_, /*empty*/, idx, _), (), \ ( \ IRQ_CONNECT(DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority), \ nrfx_isr, nrfx_uarte_##idx##_irq_handler, 0); \ irq_enable(DT_IRQN(UARTE(idx))); \ ) \ ) \ return uarte_nrfx_init(dev); \ } \ PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action); \ DEVICE_DT_DEFINE(UARTE(idx), \ uarte_init_##idx, \ PM_DEVICE_DT_GET(UARTE(idx)), \ &uarte_data_##idx, \ &uarte_config_##idx, \ PRE_KERNEL_1, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &uart_nrfx_uarte_driver_api) /* Macro creates device instance if it is enabled in devicetree. */ #define UARTE_DEVICE(periph, prefix, id, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##id, (UART_NRF_UARTE_DEVICE(prefix##id);)) /* Macro iterates over nrfx_uarte instances enabled in the nrfx_config.h. */ NRFX_FOREACH_ENABLED(UARTE, UARTE_DEVICE, (), (), _) ```
/content/code_sandbox/drivers/serial/uart_nrfx_uarte2.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,587
```unknown # QuickLogic USBserialport_S3B configuration option config UART_QUICKLOGIC_USBSERIALPORT_S3B bool "QuickLogic USBserialport_S3B serial driver" default y depends on DT_HAS_QUICKLOGIC_USBSERIALPORT_S3B_ENABLED select SERIAL_HAS_DRIVER select PINCTRL help This option enables the QuickLogic USBserialport_S3B serial driver. ```
/content/code_sandbox/drivers/serial/Kconfig.ql_usbserialport_s3b
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
90
```c /* * */ #include <string.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/serial/uart_async_rx.h> static uint8_t inc(struct uart_async_rx *rx_data, uint8_t val) { return (val + 1) & (rx_data->config->buf_cnt - 1); } static struct uart_async_rx_buf *get_buf(struct uart_async_rx *rx_data, uint8_t idx) { uint8_t *p = rx_data->config->buffer; p += idx * (rx_data->buf_len + sizeof(struct uart_async_rx_buf)); return (struct uart_async_rx_buf *)p; } uint8_t *uart_async_rx_buf_req(struct uart_async_rx *rx_data) { uint8_t *data = NULL; if (rx_data->free_buf_cnt != 0) { struct uart_async_rx_buf *buf = get_buf(rx_data, rx_data->drv_buf_idx); data = buf->buffer; rx_data->drv_buf_idx = inc(rx_data, rx_data->drv_buf_idx); atomic_dec(&rx_data->free_buf_cnt); } return data; } void uart_async_rx_on_rdy(struct uart_async_rx *rx_data, uint8_t *buffer, size_t length) { /* Cannot use CONTAINER_OF because validation fails due to type mismatch: * uint8_t * vs uint8_t []. */ struct uart_async_rx_buf *rx_buf = (struct uart_async_rx_buf *)(buffer - offsetof(struct uart_async_rx_buf, buffer)); rx_buf->wr_idx += length; __ASSERT_NO_MSG(rx_buf->wr_idx <= rx_data->buf_len); atomic_add(&rx_data->pending_bytes, length); } static void buf_reset(struct uart_async_rx_buf *buf) { buf->wr_idx = 0; buf->completed = 0; } static void usr_rx_buf_release(struct uart_async_rx *rx_data, struct uart_async_rx_buf *buf) { buf_reset(buf); rx_data->rd_idx = 0; rx_data->rd_buf_idx = inc(rx_data, rx_data->rd_buf_idx); atomic_inc(&rx_data->free_buf_cnt); __ASSERT_NO_MSG(rx_data->free_buf_cnt <= rx_data->config->buf_cnt); } void uart_async_rx_on_buf_rel(struct uart_async_rx *rx_data, uint8_t *buffer) { /* Cannot use CONTAINER_OF because validation fails due to type mismatch: * uint8_t * vs uint8_t []. */ struct uart_async_rx_buf *rx_buf = (struct uart_async_rx_buf *)(buffer - offsetof(struct uart_async_rx_buf, buffer)); rx_buf->completed = 1; } size_t uart_async_rx_data_claim(struct uart_async_rx *rx_data, uint8_t **data, size_t length) { struct uart_async_rx_buf *buf; int rem; if ((rx_data->pending_bytes == 0) || (length == 0)) { return 0; } do { buf = get_buf(rx_data, rx_data->rd_buf_idx); /* Even though buffer is released in consume phase it is possible that * it is required here as well (e.g. was not completed previously). */ if ((buf->completed == 1) && (rx_data->rd_idx == buf->wr_idx)) { usr_rx_buf_release(rx_data, buf); } else { break; } } while (1); *data = &buf->buffer[rx_data->rd_idx]; rem = buf->wr_idx - rx_data->rd_idx; return MIN(length, rem); } bool uart_async_rx_data_consume(struct uart_async_rx *rx_data, size_t length) { struct uart_async_rx_buf *buf = get_buf(rx_data, rx_data->rd_buf_idx); rx_data->rd_idx += length; /* Attempt to release the buffer if it is completed and all data is consumed. */ if ((buf->completed == 1) && (rx_data->rd_idx == buf->wr_idx)) { usr_rx_buf_release(rx_data, buf); } atomic_sub(&rx_data->pending_bytes, length); __ASSERT_NO_MSG(rx_data->rd_idx <= buf->wr_idx); return rx_data->free_buf_cnt > 0; } void uart_async_rx_reset(struct uart_async_rx *rx_data) { rx_data->free_buf_cnt = rx_data->config->buf_cnt; rx_data->rd_idx = 0; rx_data->rd_buf_idx = 0; rx_data->drv_buf_idx = 0; rx_data->pending_bytes = 0; for (uint8_t i = 0; i < rx_data->config->buf_cnt; i++) { buf_reset(get_buf(rx_data, i)); } } int uart_async_rx_init(struct uart_async_rx *rx_data, const struct uart_async_rx_config *config) { __ASSERT_NO_MSG(config->buf_cnt > 0); __ASSERT_NO_MSG(config->length / config->buf_cnt <= UINT8_MAX); memset(rx_data, 0, sizeof(*rx_data)); rx_data->config = config; rx_data->buf_len = (config->length / config->buf_cnt) - UART_ASYNC_RX_BUF_OVERHEAD; if (rx_data->buf_len >= BIT(7)) { return -EINVAL; } uart_async_rx_reset(rx_data); return 0; } ```
/content/code_sandbox/drivers/serial/uart_async_rx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,154
```c /* * */ /** * @brief UART driver for Intel FPGA UART Core IP * Reference : Embedded Peripherals IP User Guide : 12. JTAG UART Core * */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/uart.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/__assert.h> #define DT_DRV_COMPAT altr_jtag_uart #define UART_ALTERA_JTAG_DATA_OFFSET 0x00 /* DATA : Register offset */ #define UART_ALTERA_JTAG_CTRL_OFFSET 0x04 /* CTRL : Register offset */ #define UART_IE_TX (1 << 1) /* CTRL : TX Interrupt Enable */ #define UART_IE_RX (1 << 0) /* CTRL : RX Interrupt Enable */ #define UART_DATA_MASK 0xFF /* DATA : Data Mask */ #define UART_WFIFO_MASK 0xFFFF0000 /* CTRL : Transmit FIFO Mask */ #define UART_WFIFO_OFST (16) #define ALTERA_AVALON_JTAG_UART_DATA_DATA_OFST (0) #define ALTERA_AVALON_JTAG_UART_DATA_RVALID_MSK (0x00008000) #define ALTERA_AVALON_JTAG_UART_CONTROL_RI_MSK (0x00000100) #define ALTERA_AVALON_JTAG_UART_CONTROL_WI_MSK (0x00000200) #ifdef CONFIG_UART_ALTERA_JTAG_HAL #include "altera_avalon_jtag_uart.h" #include "altera_avalon_jtag_uart_regs.h" extern int altera_avalon_jtag_uart_read(altera_avalon_jtag_uart_state *sp, char *buffer, int space, int flags); extern int altera_avalon_jtag_uart_write(altera_avalon_jtag_uart_state *sp, const char *ptr, int count, int flags); #else /* device data */ struct uart_altera_jtag_device_data { struct k_spinlock lock; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; /* Callback function pointer */ void *cb_data; /* Callback function arg */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; /* device config */ struct uart_altera_jtag_device_config { mm_reg_t base; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; unsigned int irq_num; uint16_t write_fifo_depth; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #endif /* CONFIG_UART_ALTERA_JTAG_HAL */ #ifndef CONFIG_UART_ALTERA_JTAG_HAL /** * @brief Poll the device for input. * * @param dev UART device instance * @param c Pointer to character * * @return 0 if a character arrived, -1 otherwise. * -EINVAL if c is null pointer. */ static int uart_altera_jtag_poll_in(const struct device *dev, unsigned char *c) { int ret = -1; const struct uart_altera_jtag_device_config *config = dev->config; struct uart_altera_jtag_device_data *data = dev->data; unsigned int input_data; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(c != NULL, "c is null pointer!"); /* Stop, if c is null pointer */ if (c == NULL) { return -EINVAL; } k_spinlock_key_t key = k_spin_lock(&data->lock); input_data = sys_read32(config->base + UART_ALTERA_JTAG_DATA_OFFSET); /* check if data is valid. */ if (input_data & ALTERA_AVALON_JTAG_UART_DATA_RVALID_MSK) { *c = (input_data & UART_DATA_MASK) >> ALTERA_AVALON_JTAG_UART_DATA_DATA_OFST; ret = 0; } k_spin_unlock(&data->lock, key); return ret; } #endif /* CONFIG_UART_ALTERA_JTAG_HAL */ /** * @brief Output a character in polled mode. * * This routine checks if the transmitter is full. * When the transmitter is not full, it writes a character to the data register. * It waits and blocks the calling thread, otherwise. This function is a blocking call. * * @param dev UART device instance * @param c Character to send */ static void uart_altera_jtag_poll_out(const struct device *dev, unsigned char c) { #ifdef CONFIG_UART_ALTERA_JTAG_HAL altera_avalon_jtag_uart_state ustate; ustate.base = JTAG_UART_0_BASE; altera_avalon_jtag_uart_write(&ustate, &c, 1, 0); #else const struct uart_altera_jtag_device_config *config = dev->config; struct uart_altera_jtag_device_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); /* While TX FIFO full */ while (!(sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET) & UART_WFIFO_MASK)) { } sys_write8(c, config->base + UART_ALTERA_JTAG_DATA_OFFSET); k_spin_unlock(&data->lock, key); #endif /* CONFIG_UART_ALTERA_JTAG_HAL */ } /** * @brief Initialise an instance of the driver * * This function initialise the interrupt configuration for the driver. * * @param dev UART device instance * * @return 0 to indicate success. */ static int uart_altera_jtag_init(const struct device *dev) { /* * Work around to clear interrupt enable bits * as it is not being done by HAL driver explicitly. */ #ifdef CONFIG_UART_ALTERA_JTAG_HAL IOWR_ALTERA_AVALON_JTAG_UART_CONTROL(JTAG_UART_0_BASE, 0); #else const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* * Enable hardware interrupt. * The corresponding csr from IP still needs to be set, * so that the IP generates interrupt signal. */ config->irq_config_func(dev); #endif /* Disable the tx and rx interrupt signals from JTAG core IP. */ ctrl_val &= ~(UART_IE_TX | UART_IE_RX); sys_write32(ctrl_val, config->base + UART_ALTERA_JTAG_CTRL_OFFSET); #endif /* CONFIG_UART_ALTERA_JTAG_HAL */ return 0; } /* * Functions for Interrupt driven API */ #if defined(CONFIG_UART_INTERRUPT_DRIVEN) && !defined(CONFIG_UART_ALTERA_JTAG_HAL) /** * @brief Fill FIFO with data * This function is expected to be called from UART interrupt handler (ISR), * if uart_irq_tx_ready() returns true. * * @param dev UART device instance * @param tx_data Data to transmit * @param size Number of bytes to send * * @return Number of bytes sent */ static int uart_altera_jtag_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_altera_jtag_device_config *config = dev->config; struct uart_altera_jtag_device_data *data = dev->data; uint32_t ctrl_val; uint32_t space = 0; int i; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(tx_data != NULL, "tx buffer is null pointer!"); /* Stop, if buffer is null pointer */ if (tx_data == NULL) { return 0; } k_spinlock_key_t key = k_spin_lock(&data->lock); ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); space = (ctrl_val & UART_WFIFO_MASK) >> UART_WFIFO_OFST; /* guard for tx data overflow: * make sure that driver is not sending more than current available space. */ for (i = 0; (i < size) && (i < space); i++) { sys_write8(tx_data[i], config->base + UART_ALTERA_JTAG_DATA_OFFSET); } k_spin_unlock(&data->lock, key); return i; } /** * @brief Read data from FIFO * This function is expected to be called from UART interrupt handler (ISR), * if uart_irq_rx_ready() returns true. * * @param dev UART device instance * @param rx_data Data container * @param size Container size * * @return Number of bytes read */ static int uart_altera_jtag_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_altera_jtag_device_config *config = dev->config; struct uart_altera_jtag_device_data *data = dev->data; int i; unsigned int input_data; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(rx_data != NULL, "Rx buffer is null pointer!"); /* Stop, if buffer is null pointer */ if (rx_data == NULL) { return 0; } k_spinlock_key_t key = k_spin_lock(&data->lock); for (i = 0; i < size; i++) { input_data = sys_read32(config->base + UART_ALTERA_JTAG_DATA_OFFSET); if (input_data & ALTERA_AVALON_JTAG_UART_DATA_RVALID_MSK) { rx_data[i] = (input_data & UART_DATA_MASK) >> ALTERA_AVALON_JTAG_UART_DATA_DATA_OFST; } else { /* break upon invalid data or no more data */ break; } } k_spin_unlock(&data->lock, key); return i; } /** * @brief Enable TX interrupt in IER * * @param dev UART device instance */ static void uart_altera_jtag_irq_tx_enable(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); k_spinlock_key_t key = k_spin_lock(&data->lock); ctrl_val |= UART_IE_TX; sys_write32(ctrl_val, config->base + UART_ALTERA_JTAG_CTRL_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Disable TX interrupt in IER * * @param dev UART device instance */ static void uart_altera_jtag_irq_tx_disable(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); k_spinlock_key_t key = k_spin_lock(&data->lock); ctrl_val &= ~UART_IE_TX; sys_write32(ctrl_val, config->base + UART_ALTERA_JTAG_CTRL_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Check if UART TX buffer can accept a new char. * * @param dev UART device instance * * @return 1 if TX interrupt is enabled and at least one char can be written to UART. * 0 If device is not ready to write a new byte. */ static int uart_altera_jtag_irq_tx_ready(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); int ret = 0; uint32_t space = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); /* if TX interrupt is enabled */ if (ctrl_val & ALTERA_AVALON_JTAG_UART_CONTROL_WI_MSK) { /* check for space in tx fifo */ space = (ctrl_val & UART_WFIFO_MASK) >> UART_WFIFO_OFST; if (space) { ret = 1; } } k_spin_unlock(&data->lock, key); return ret; } /** * @brief Check if nothing remains to be transmitted * * @param dev UART device instance * * @return 1 if nothing remains to be transmitted, 0 otherwise */ static int uart_altera_jtag_irq_tx_complete(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); int ret = 0; uint32_t space = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); /* note: This is checked indirectly via the space in tx fifo. */ space = (ctrl_val & UART_WFIFO_MASK) >> UART_WFIFO_OFST; if (space == config->write_fifo_depth) { ret = 1; } k_spin_unlock(&data->lock, key); return ret; } /** * @brief Enable RX interrupt in IER * * @param dev UART device instance */ static void uart_altera_jtag_irq_rx_enable(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); k_spinlock_key_t key = k_spin_lock(&data->lock); ctrl_val |= UART_IE_RX; sys_write32(ctrl_val, config->base + UART_ALTERA_JTAG_CTRL_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Disable RX interrupt in IER * * @param dev UART device instance */ static void uart_altera_jtag_irq_rx_disable(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); k_spinlock_key_t key = k_spin_lock(&data->lock); ctrl_val &= ~UART_IE_RX; sys_write32(ctrl_val, config->base + UART_ALTERA_JTAG_CTRL_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Check if Rx IRQ has been raised * * @param dev UART device instance * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_altera_jtag_irq_rx_ready(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); int ret = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); if (ctrl_val & ALTERA_AVALON_JTAG_UART_CONTROL_RI_MSK) { ret = 1; } k_spin_unlock(&data->lock, key); return ret; } /** * @brief Update cached contents of IIR * * @param dev UART device instance * * @return Always 1 */ static int uart_altera_jtag_irq_update(const struct device *dev) { return 1; } /** * @brief Check if any IRQ is pending * * @param dev UART device instance * * @return 1 if an IRQ is pending, 0 otherwise */ static int uart_altera_jtag_irq_is_pending(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); const struct uart_altera_jtag_device_config *config = dev->config; uint32_t ctrl_val = sys_read32(config->base + UART_ALTERA_JTAG_CTRL_OFFSET); int ret = 0; if (ctrl_val & (ALTERA_AVALON_JTAG_UART_CONTROL_RI_MSK|ALTERA_AVALON_JTAG_UART_CONTROL_WI_MSK)) { ret = 1; } k_spin_unlock(&data->lock, key); return ret; } /** * @brief Set the callback function pointer for IRQ. * * @param dev UART device instance * @param cb Callback function pointer. * @param cb_data Data to pass to callback function. */ static void uart_altera_jtag_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_altera_jtag_device_data *data = dev->data; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(cb != NULL, "uart_irq_callback_user_data_t cb is null pointer!"); k_spinlock_key_t key = k_spin_lock(&data->lock); data->cb = cb; data->cb_data = cb_data; k_spin_unlock(&data->lock, key); } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param dev Pointer to UART device struct */ static void uart_altera_jtag_isr(const struct device *dev) { struct uart_altera_jtag_device_data *data = dev->data; uart_irq_callback_user_data_t callback = data->cb; if (callback) { callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN && !CONFIG_UART_ALTERA_JTAG_HAL */ static const struct uart_driver_api uart_altera_jtag_driver_api = { #ifndef CONFIG_UART_ALTERA_JTAG_HAL .poll_in = uart_altera_jtag_poll_in, #endif /* CONFIG_UART_ALTERA_JTAG_HAL */ .poll_out = uart_altera_jtag_poll_out, .err_check = NULL, #if defined(CONFIG_UART_INTERRUPT_DRIVEN) && !defined(CONFIG_UART_ALTERA_JTAG_HAL) .fifo_fill = uart_altera_jtag_fifo_fill, .fifo_read = uart_altera_jtag_fifo_read, .irq_tx_enable = uart_altera_jtag_irq_tx_enable, .irq_tx_disable = uart_altera_jtag_irq_tx_disable, .irq_tx_ready = uart_altera_jtag_irq_tx_ready, .irq_tx_complete = uart_altera_jtag_irq_tx_complete, .irq_rx_enable = uart_altera_jtag_irq_rx_enable, .irq_rx_disable = uart_altera_jtag_irq_rx_disable, .irq_rx_ready = uart_altera_jtag_irq_rx_ready, .irq_is_pending = uart_altera_jtag_irq_is_pending, .irq_update = uart_altera_jtag_irq_update, .irq_callback_set = uart_altera_jtag_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN && !CONFIG_UART_ALTERA_JTAG_HAL */ }; #ifdef CONFIG_UART_ALTERA_JTAG_HAL #define UART_ALTERA_JTAG_DEVICE_INIT(n) \ DEVICE_DT_INST_DEFINE(n, uart_altera_jtag_init, NULL, NULL, NULL, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_altera_jtag_driver_api); #else #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_ALTERA_JTAG_CONFIG_FUNC(n) \ static void uart_altera_jtag_irq_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ uart_altera_jtag_isr, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } #define UART_ALTERA_JTAG_CONFIG_INIT(n) \ .irq_config_func = uart_altera_jtag_irq_config_func_##n, \ .irq_num = DT_INST_IRQN(n), \ .write_fifo_depth = DT_INST_PROP_OR(n, write_fifo_depth, 0),\ #else #define UART_ALTERA_JTAG_CONFIG_FUNC(n) #define UART_ALTERA_JTAG_CONFIG_INIT(n) #define UART_ALTERA_JTAG_DATA_INIT(n) #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #define UART_ALTERA_JTAG_DEVICE_INIT(n) \ UART_ALTERA_JTAG_CONFIG_FUNC(n) \ static struct uart_altera_jtag_device_data uart_altera_jtag_device_data_##n = { \ }; \ \ static const struct uart_altera_jtag_device_config uart_altera_jtag_dev_cfg_##n = { \ .base = DT_INST_REG_ADDR(n), \ UART_ALTERA_JTAG_CONFIG_INIT(n) \ }; \ DEVICE_DT_INST_DEFINE(n, \ uart_altera_jtag_init, \ NULL, \ &uart_altera_jtag_device_data_##n, \ &uart_altera_jtag_dev_cfg_##n, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_altera_jtag_driver_api); #endif /* CONFIG_UART_ALTERA_JTAG_HAL */ DT_INST_FOREACH_STATUS_OKAY(UART_ALTERA_JTAG_DEVICE_INIT) ```
/content/code_sandbox/drivers/serial/uart_altera_jtag.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,579
```unknown config UART_ALTERA bool "ALTERA UART driver" depends on DT_HAS_ALTR_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable the Altera UART driver, that can be built into Intel NiosV CPU designs. config UART_ALTERA_EOP bool "ALTERA UART end of packet feature" depends on UART_ALTERA && UART_DRV_CMD && UART_INTERRUPT_DRIVEN help Use driver command CMD_ENABLE_EOP and CMD_DISABLE_EOP to use the feature. config UART_ALTERA_LINE_CTRL_WORKAROUND bool "ALTERA UART flow control workaround" depends on UART_ALTERA && UART_LINE_CTRL help Before enabling this, please try to optimise the ISR to fetch the receive data faster. Enabling this will cause the transmitter to wait for rising edge of CTS before sending. The receiver will deassert RTS as soon as a byte is received and reassert after the byte is fetched. ```
/content/code_sandbox/drivers/serial/Kconfig.altera
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
207
```c /* * */ #define DT_DRV_COMPAT nuvoton_npcx_uart #include <zephyr/sys/__assert.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <soc.h> #include "soc_miwu.h" #include "soc_power.h" #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(uart_npcx, CONFIG_UART_LOG_LEVEL); /* Driver config */ struct uart_npcx_config { struct uart_reg *inst; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) uart_irq_config_func_t irq_config_func; #endif /* clock configuration */ struct npcx_clk_cfg clk_cfg; /* int-mux configuration */ const struct npcx_wui uart_rx_wui; /* pinmux configuration */ const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_UART_ASYNC_API struct npcx_clk_cfg mdma_clk_cfg; struct mdma_reg *mdma_reg_base; #endif }; enum uart_pm_policy_state_flag { UART_PM_POLICY_STATE_TX_FLAG, UART_PM_POLICY_STATE_RX_FLAG, UART_PM_POLICY_STATE_FLAG_COUNT, }; #ifdef CONFIG_UART_ASYNC_API struct uart_npcx_rx_dma_params { uint8_t *buf; size_t buf_len; size_t offset; size_t counter; size_t timeout_us; struct k_work_delayable timeout_work; bool enabled; }; struct uart_npcx_tx_dma_params { const uint8_t *buf; size_t buf_len; struct k_work_delayable timeout_work; size_t timeout_us; }; struct uart_npcx_async_data { const struct device *uart_dev; uart_callback_t user_callback; void *user_data; struct uart_npcx_rx_dma_params rx_dma_params; struct uart_npcx_tx_dma_params tx_dma_params; uint8_t *next_rx_buffer; size_t next_rx_buffer_len; bool tx_in_progress; }; #endif /* Driver data */ struct uart_npcx_data { /* Baud rate */ uint32_t baud_rate; struct miwu_callback uart_rx_cb; struct k_spinlock lock; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t user_cb; void *user_data; #endif #ifdef CONFIG_PM ATOMIC_DEFINE(pm_policy_state_flag, UART_PM_POLICY_STATE_FLAG_COUNT); #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED struct k_work_delayable rx_refresh_timeout_work; #endif #endif #ifdef CONFIG_UART_ASYNC_API struct uart_npcx_async_data async; #endif }; #ifdef CONFIG_PM static void uart_npcx_pm_policy_state_lock_get(struct uart_npcx_data *data, enum uart_pm_policy_state_flag flag) { if (atomic_test_and_set_bit(data->pm_policy_state_flag, flag) == 0) { pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } } static void uart_npcx_pm_policy_state_lock_put(struct uart_npcx_data *data, enum uart_pm_policy_state_flag flag) { if (atomic_test_and_clear_bit(data->pm_policy_state_flag, flag) == 1) { pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } } #endif /* UART local functions */ static int uart_set_npcx_baud_rate(struct uart_reg *const inst, int baud_rate, int src_clk) { /* * Support two baud rate setting so far: * - 115200 * - 3000000 */ if (baud_rate == 115200) { if (src_clk == MHZ(15)) { inst->UPSR = 0x38; inst->UBAUD = 0x01; } else if (src_clk == MHZ(20)) { inst->UPSR = 0x08; inst->UBAUD = 0x0a; } else if (src_clk == MHZ(25)) { inst->UPSR = 0x10; inst->UBAUD = 0x08; } else if (src_clk == MHZ(30)) { inst->UPSR = 0x10; inst->UBAUD = 0x0a; } else if (src_clk == MHZ(48)) { inst->UPSR = 0x08; inst->UBAUD = 0x19; } else if (src_clk == MHZ(50)) { inst->UPSR = 0x08; inst->UBAUD = 0x1a; } else { return -EINVAL; } } else if (baud_rate == MHZ(3)) { if (src_clk == MHZ(48)) { inst->UPSR = 0x08; inst->UBAUD = 0x0; } else { return -EINVAL; } } else { return -EINVAL; } return 0; } #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) static int uart_npcx_rx_fifo_available(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; /* True if at least one byte is in the Rx FIFO */ return IS_BIT_SET(inst->UFRSTS, NPCX_UFRSTS_RFIFO_NEMPTY_STS); } static void uart_npcx_dis_all_tx_interrupts(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; /* Disable all Tx interrupts */ inst->UFTCTL &= ~(BIT(NPCX_UFTCTL_TEMPTY_LVL_EN) | BIT(NPCX_UFTCTL_TEMPTY_EN) | BIT(NPCX_UFTCTL_NXMIP_EN)); } static void uart_npcx_clear_rx_fifo(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; uint8_t scratch; /* Read all dummy bytes out from Rx FIFO */ while (uart_npcx_rx_fifo_available(dev)) { scratch = inst->URBUF; } } #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_npcx_tx_fifo_ready(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; /* True if the Tx FIFO is not completely full */ return !(GET_FIELD(inst->UFTSTS, NPCX_UFTSTS_TEMPTY_LVL) == 0); } static int uart_npcx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; struct uart_npcx_data *data = dev->data; uint8_t tx_bytes = 0U; k_spinlock_key_t key = k_spin_lock(&data->lock); /* If Tx FIFO is still ready to send */ while ((size - tx_bytes > 0) && uart_npcx_tx_fifo_ready(dev)) { /* Put a character into Tx FIFO */ inst->UTBUF = tx_data[tx_bytes++]; } #ifdef CONFIG_PM uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_TX_FLAG); /* Enable NXMIP interrupt in case ec enters deep sleep early */ inst->UFTCTL |= BIT(NPCX_UFTCTL_NXMIP_EN); #endif /* CONFIG_PM */ k_spin_unlock(&data->lock, key); return tx_bytes; } static int uart_npcx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; unsigned int rx_bytes = 0U; /* If least one byte is in the Rx FIFO */ while ((size - rx_bytes > 0) && uart_npcx_rx_fifo_available(dev)) { /* Receive one byte from Rx FIFO */ rx_data[rx_bytes++] = inst->URBUF; } return rx_bytes; } static void uart_npcx_irq_tx_enable(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; struct uart_npcx_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); inst->UFTCTL |= BIT(NPCX_UFTCTL_TEMPTY_EN); k_spin_unlock(&data->lock, key); } static void uart_npcx_irq_tx_disable(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; struct uart_npcx_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); inst->UFTCTL &= ~(BIT(NPCX_UFTCTL_TEMPTY_EN)); k_spin_unlock(&data->lock, key); } static bool uart_npcx_irq_tx_is_enabled(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; return IS_BIT_SET(inst->UFTCTL, NPCX_UFTCTL_TEMPTY_EN); } static int uart_npcx_irq_tx_ready(const struct device *dev) { return uart_npcx_tx_fifo_ready(dev) && uart_npcx_irq_tx_is_enabled(dev); } static int uart_npcx_irq_tx_complete(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; /* Tx FIFO is empty or last byte is sending */ return IS_BIT_SET(inst->UFTSTS, NPCX_UFTSTS_NXMIP); } static void uart_npcx_irq_rx_enable(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; inst->UFRCTL |= BIT(NPCX_UFRCTL_RNEMPTY_EN); } static void uart_npcx_irq_rx_disable(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; inst->UFRCTL &= ~(BIT(NPCX_UFRCTL_RNEMPTY_EN)); } static bool uart_npcx_irq_rx_is_enabled(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; return IS_BIT_SET(inst->UFRCTL, NPCX_UFRCTL_RNEMPTY_EN); } static int uart_npcx_irq_rx_ready(const struct device *dev) { return uart_npcx_rx_fifo_available(dev); } static void uart_npcx_irq_err_enable(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; inst->UICTRL |= BIT(NPCX_UICTRL_EEI); } static void uart_npcx_irq_err_disable(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; inst->UICTRL &= ~(BIT(NPCX_UICTRL_EEI)); } static int uart_npcx_irq_is_pending(const struct device *dev) { return uart_npcx_irq_tx_ready(dev) || (uart_npcx_irq_rx_ready(dev) && uart_npcx_irq_rx_is_enabled(dev)); } static int uart_npcx_irq_update(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void uart_npcx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_npcx_data *data = dev->data; data->user_cb = cb; data->user_data = cb_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->async.user_callback = NULL; data->async.user_data = NULL; #endif } /* * Poll-in implementation for interrupt driven config, forward call to * uart_npcx_fifo_read(). */ static int uart_npcx_poll_in(const struct device *dev, unsigned char *c) { return uart_npcx_fifo_read(dev, c, 1) ? 0 : -1; } /* * Poll-out implementation for interrupt driven config, forward call to * uart_npcx_fifo_fill(). */ static void uart_npcx_poll_out(const struct device *dev, unsigned char c) { while (!uart_npcx_fifo_fill(dev, &c, 1)) { continue; } } #else /* !CONFIG_UART_INTERRUPT_DRIVEN */ /* * Poll-in implementation for byte mode config, read byte from URBUF if * available. */ static int uart_npcx_poll_in(const struct device *dev, unsigned char *c) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; /* Rx single byte buffer is not full */ if (!IS_BIT_SET(inst->UICTRL, NPCX_UICTRL_RBF)) { return -1; } *c = inst->URBUF; return 0; } /* * Poll-out implementation for byte mode config, write byte to UTBUF if empty. */ static void uart_npcx_poll_out(const struct device *dev, unsigned char c) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; /* Wait while Tx single byte buffer is ready to send */ while (!IS_BIT_SET(inst->UICTRL, NPCX_UICTRL_TBE)) { continue; } inst->UTBUF = c; } #endif /* !CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API static void async_user_callback(const struct device *dev, struct uart_event *evt) { const struct uart_npcx_data *data = dev->data; if (data->async.user_callback) { data->async.user_callback(dev, evt, data->async.user_data); } } static void async_evt_rx_rdy(const struct device *dev) { struct uart_npcx_data *data = dev->data; struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; struct uart_event event = {.type = UART_RX_RDY, .data.rx.buf = rx_dma_params->buf, .data.rx.len = rx_dma_params->counter - rx_dma_params->offset, .data.rx.offset = rx_dma_params->offset}; LOG_DBG("RX Ready: (len: %d off: %d buf: %x)", event.data.rx.len, event.data.rx.offset, (uint32_t)event.data.rx.buf); /* Update the current pos for new data */ rx_dma_params->offset = rx_dma_params->counter; /* Only send event for new data */ if (event.data.rx.len > 0) { async_user_callback(dev, &event); } } static void async_evt_tx_done(const struct device *dev) { struct uart_npcx_data *data = dev->data; (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work); LOG_DBG("TX done: %d", data->async.tx_dma_params.buf_len); struct uart_event event = {.type = UART_TX_DONE, .data.tx.buf = data->async.tx_dma_params.buf, .data.tx.len = data->async.tx_dma_params.buf_len}; /* Reset TX Buffer */ data->async.tx_dma_params.buf = NULL; data->async.tx_dma_params.buf_len = 0U; async_user_callback(dev, &event); } static void uart_npcx_async_rx_dma_get_status(const struct device *dev, size_t *pending_length) { const struct uart_npcx_config *const config = dev->config; struct mdma_reg *const mdma_reg_base = config->mdma_reg_base; if (IS_BIT_SET(mdma_reg_base->MDMA_CTL0, NPCX_MDMA_CTL_MDMAEN)) { *pending_length = mdma_reg_base->MDMA_CTCNT0; } else { *pending_length = 0; } } static void uart_npcx_async_rx_flush(const struct device *dev) { struct uart_npcx_data *data = dev->data; struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; size_t curr_rcv_len, dma_pending_len; uart_npcx_async_rx_dma_get_status(dev, &dma_pending_len); curr_rcv_len = rx_dma_params->buf_len - dma_pending_len; if (curr_rcv_len > rx_dma_params->offset) { rx_dma_params->counter = curr_rcv_len; async_evt_rx_rdy(dev); #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT); uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_RX_FLAG); k_work_reschedule(&data->rx_refresh_timeout_work, delay); #endif } } static void async_evt_rx_buf_request(const struct device *dev) { struct uart_event evt = { .type = UART_RX_BUF_REQUEST, }; async_user_callback(dev, &evt); } static int uart_npcx_async_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct uart_npcx_data *data = dev->data; data->async.user_callback = callback; data->async.user_data = user_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->user_cb = NULL; data->user_data = NULL; #endif return 0; } static inline void async_timer_start(struct k_work_delayable *work, uint32_t timeout_us) { if ((timeout_us != SYS_FOREVER_US) && (timeout_us != 0)) { LOG_DBG("async timer started for %d us", timeout_us); k_work_reschedule(work, K_USEC(timeout_us)); } } static int uart_npcx_async_tx_dma_get_status(const struct device *dev, size_t *pending_length) { const struct uart_npcx_config *const config = dev->config; struct mdma_reg *const mdma_reg_base = config->mdma_reg_base; if (IS_BIT_SET(mdma_reg_base->MDMA_CTL1, NPCX_MDMA_CTL_MDMAEN)) { *pending_length = mdma_reg_base->MDMA_CTCNT1; } else { *pending_length = 0; return -EBUSY; } return 0; } static int uart_npcx_async_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; struct mdma_reg *const mdma_reg_base = config->mdma_reg_base; struct uart_npcx_data *data = dev->data; struct uart_npcx_tx_dma_params *tx_dma_params = &data->async.tx_dma_params; int key = irq_lock(); if (buf == NULL || len == 0) { irq_unlock(key); return -EINVAL; } if (tx_dma_params->buf) { irq_unlock(key); return -EBUSY; } data->async.tx_in_progress = true; data->async.tx_dma_params.buf = buf; data->async.tx_dma_params.buf_len = len; data->async.tx_dma_params.timeout_us = timeout; mdma_reg_base->MDMA_SRCB1 = (uint32_t)buf; mdma_reg_base->MDMA_TCNT1 = len; async_timer_start(&data->async.tx_dma_params.timeout_work, timeout); mdma_reg_base->MDMA_CTL1 |= BIT(NPCX_MDMA_CTL_MDMAEN) | BIT(NPCX_MDMA_CTL_SIEN); inst->UMDSL |= BIT(NPCX_UMDSL_ETD); #ifdef CONFIG_PM /* Do not allow system to suspend until transmission has completed */ uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_TX_FLAG); #endif irq_unlock(key); return 0; } static int uart_npcx_async_tx_abort(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_npcx_data *data = dev->data; struct mdma_reg *const mdma_reg_base = config->mdma_reg_base; size_t dma_pending_len, bytes_transmitted; int ret; k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work); mdma_reg_base->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_MDMAEN); ret = uart_npcx_async_tx_dma_get_status(dev, &dma_pending_len); if (ret != 0) { bytes_transmitted = 0; } else { bytes_transmitted = data->async.tx_dma_params.buf_len - dma_pending_len; } struct uart_event tx_aborted_event = { .type = UART_TX_ABORTED, .data.tx.buf = data->async.tx_dma_params.buf, .data.tx.len = bytes_transmitted, }; async_user_callback(dev, &tx_aborted_event); return ret; } static void uart_npcx_async_tx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_npcx_tx_dma_params *tx_params = CONTAINER_OF(dwork, struct uart_npcx_tx_dma_params, timeout_work); struct uart_npcx_async_data *async_data = CONTAINER_OF(tx_params, struct uart_npcx_async_data, tx_dma_params); const struct device *dev = async_data->uart_dev; LOG_ERR("Async Tx Timeout"); uart_npcx_async_tx_abort(dev); } static int uart_npcx_async_rx_enable(const struct device *dev, uint8_t *buf, const size_t len, const int32_t timeout_us) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; struct mdma_reg *const mdma_reg_base = config->mdma_reg_base; struct uart_npcx_data *data = dev->data; struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; unsigned int key; LOG_DBG("Enable RX DMA, len:%d", len); key = irq_lock(); __ASSERT_NO_MSG(buf != NULL); __ASSERT_NO_MSG(len > 0); rx_dma_params->timeout_us = timeout_us; rx_dma_params->buf = buf; rx_dma_params->buf_len = len; rx_dma_params->offset = 0; rx_dma_params->counter = 0; SET_FIELD(inst->UFRCTL, NPCX_UFRCTL_RFULL_LVL_SEL, 1); mdma_reg_base->MDMA_DSTB0 = (uint32_t)buf; mdma_reg_base->MDMA_TCNT0 = len; mdma_reg_base->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_MDMAEN) | BIT(NPCX_MDMA_CTL_SIEN); inst->UMDSL |= BIT(NPCX_UMDSL_ERD); rx_dma_params->enabled = true; async_evt_rx_buf_request(dev); inst->UFRCTL |= BIT(NPCX_UFRCTL_RNEMPTY_EN); irq_unlock(key); return 0; } static void async_evt_rx_buf_release(const struct device *dev) { struct uart_npcx_data *data = dev->data; struct uart_event evt = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = data->async.rx_dma_params.buf, }; async_user_callback(dev, &evt); data->async.rx_dma_params.buf = NULL; data->async.rx_dma_params.buf_len = 0U; data->async.rx_dma_params.offset = 0U; data->async.rx_dma_params.counter = 0U; } static int uart_npcx_async_rx_disable(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; struct uart_npcx_data *data = dev->data; struct mdma_reg *const mdma_reg_base = config->mdma_reg_base; struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; unsigned int key; LOG_DBG("Async RX Disable"); key = irq_lock(); inst->UFRCTL &= ~(BIT(NPCX_UFRCTL_RNEMPTY_EN)); k_work_cancel_delayable(&rx_dma_params->timeout_work); if (rx_dma_params->buf == NULL) { LOG_DBG("No buffers to release from RX DMA!"); } else { uart_npcx_async_rx_flush(dev); async_evt_rx_buf_release(dev); } rx_dma_params->enabled = false; if (data->async.next_rx_buffer != NULL) { rx_dma_params->buf = data->async.next_rx_buffer; rx_dma_params->buf_len = data->async.next_rx_buffer_len; data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0; /* Release the next buffer as well */ async_evt_rx_buf_release(dev); } mdma_reg_base->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_MDMAEN); struct uart_event disabled_event = {.type = UART_RX_DISABLED}; async_user_callback(dev, &disabled_event); irq_unlock(key); return 0; } static int uart_npcx_async_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uart_npcx_data *data = dev->data; if (data->async.next_rx_buffer != NULL) { return -EBUSY; } else if (data->async.rx_dma_params.enabled == false) { return -EACCES; } data->async.next_rx_buffer = buf; data->async.next_rx_buffer_len = len; LOG_DBG("Next RX buf rsp, new: %d", len); return 0; } static void uart_npcx_async_rx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_npcx_rx_dma_params *rx_params = CONTAINER_OF(dwork, struct uart_npcx_rx_dma_params, timeout_work); struct uart_npcx_async_data *async_data = CONTAINER_OF(rx_params, struct uart_npcx_async_data, rx_dma_params); const struct device *dev = async_data->uart_dev; LOG_DBG("Async RX timeout"); uart_npcx_async_rx_flush(dev); } static void uart_npcx_async_dma_load_new_rx_buf(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; struct mdma_reg *const mdma_reg_base = config->mdma_reg_base; struct uart_npcx_data *data = dev->data; struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; rx_dma_params->offset = 0; rx_dma_params->counter = 0; rx_dma_params->buf = data->async.next_rx_buffer; rx_dma_params->buf_len = data->async.next_rx_buffer_len; data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0; mdma_reg_base->MDMA_DSTB0 = (uint32_t)rx_dma_params->buf; mdma_reg_base->MDMA_TCNT0 = rx_dma_params->buf_len; mdma_reg_base->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_MDMAEN) | BIT(NPCX_MDMA_CTL_SIEN); inst->UMDSL |= BIT(NPCX_UMDSL_ERD); } /* DMA rx reaches the terminal Count */ static void uart_npcx_async_dma_rx_complete(const struct device *dev) { struct uart_npcx_data *data = dev->data; struct uart_npcx_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; rx_dma_params->counter = rx_dma_params->buf_len; async_evt_rx_rdy(dev); /* A new buffer was available. */ if (data->async.next_rx_buffer != NULL) { async_evt_rx_buf_release(dev); uart_npcx_async_dma_load_new_rx_buf(dev); /* Request the next buffer */ async_evt_rx_buf_request(dev); async_timer_start(&rx_dma_params->timeout_work, rx_dma_params->timeout_us); } else { /* Buffer full without valid next buffer, disable RX DMA */ LOG_DBG("Disabled RX DMA, no valid next buffer "); uart_npcx_async_rx_disable(dev); } } #endif #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) static void uart_npcx_isr(const struct device *dev) { struct uart_npcx_data *data = dev->data; #if defined(CONFIG_PM) || defined(CONFIG_UART_ASYNC_API) const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; #endif /* * Set pm constraint to prevent the system enter suspend state within * the CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT period. */ #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED if (uart_npcx_irq_rx_ready(dev)) { k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT); uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_RX_FLAG); k_work_reschedule(&data->rx_refresh_timeout_work, delay); } #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN if (data->user_cb) { data->user_cb(dev, data->user_data); } #endif #ifdef CONFIG_UART_ASYNC_API if (data->async.user_callback) { struct mdma_reg *const mdma_reg_base = config->mdma_reg_base; /* * Check rx in any way because the RFIFO_NEMPTY_STS is not valid when MDMA mode is * used. This is needed when the rx timeout_us is zero. In the case that the * rx timeout_us is not zero, rx_flush is done in the tiemout_work callback. */ if (data->async.rx_dma_params.timeout_us == 0) { uart_npcx_async_rx_flush(dev); } else if (IS_BIT_SET(inst->UFRCTL, NPCX_UFRCTL_RNEMPTY_EN)) { async_timer_start(&data->async.rx_dma_params.timeout_work, data->async.rx_dma_params.timeout_us); } /* MDMA rx end interrupt */ if (IS_BIT_SET(mdma_reg_base->MDMA_CTL0, NPCX_MDMA_CTL_TC) && IS_BIT_SET(mdma_reg_base->MDMA_CTL0, NPCX_MDMA_CTL_SIEN)) { mdma_reg_base->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_SIEN); /* TC is write-0-clear bit */ mdma_reg_base->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_TC); inst->UMDSL &= ~BIT(NPCX_UMDSL_ERD); uart_npcx_async_dma_rx_complete(dev); LOG_DBG("DMA Rx TC"); } /* MDMA tx done interrupt */ if (IS_BIT_SET(mdma_reg_base->MDMA_CTL1, NPCX_MDMA_CTL_TC) && IS_BIT_SET(mdma_reg_base->MDMA_CTL1, NPCX_MDMA_CTL_SIEN)) { mdma_reg_base->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_SIEN); /* TC is write-0-clear bit */ mdma_reg_base->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_TC); /* * MDMA tx is done (i.e. all data in the memory are moved to UART tx FIFO), * but data in the tx FIFO are not completely sent to the bus. */ if (!IS_BIT_SET(inst->UFTSTS, NPCX_UFTSTS_NXMIP)) { k_spinlock_key_t key = k_spin_lock(&data->lock); inst->UFTCTL |= BIT(NPCX_UFTCTL_NXMIP_EN); k_spin_unlock(&data->lock, key); } else { data->async.tx_in_progress = false; #ifdef CONFIG_PM uart_npcx_pm_policy_state_lock_put(data, UART_PM_POLICY_STATE_TX_FLAG); #endif /* CONFIG_PM */ async_evt_tx_done(dev); } } } #endif #if defined(CONFIG_PM) || defined(CONFIG_UART_ASYNC_API) if (IS_BIT_SET(inst->UFTCTL, NPCX_UFTCTL_NXMIP_EN) && IS_BIT_SET(inst->UFTSTS, NPCX_UFTSTS_NXMIP)) { k_spinlock_key_t key = k_spin_lock(&data->lock); /* Disable NXMIP interrupt */ inst->UFTCTL &= ~BIT(NPCX_UFTCTL_NXMIP_EN); k_spin_unlock(&data->lock, key); #ifdef CONFIG_PM uart_npcx_pm_policy_state_lock_put(data, UART_PM_POLICY_STATE_TX_FLAG); #endif #ifdef CONFIG_UART_ASYNC_API if (data->async.tx_in_progress) { data->async.tx_in_progress = false; async_evt_tx_done(dev); LOG_DBG("Tx wait-empty done"); } #endif } #endif } #endif /* UART api functions */ static int uart_npcx_err_check(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_reg *const inst = config->inst; uint32_t err = 0U; uint8_t stat = inst->USTAT; if (IS_BIT_SET(stat, NPCX_USTAT_DOE)) { err |= UART_ERROR_OVERRUN; } if (IS_BIT_SET(stat, NPCX_USTAT_PE)) { err |= UART_ERROR_PARITY; } if (IS_BIT_SET(stat, NPCX_USTAT_FE)) { err |= UART_ERROR_FRAMING; } return err; } static __unused void uart_npcx_rx_wk_isr(const struct device *dev, struct npcx_wui *wui) { /* * Set pm constraint to prevent the system enter suspend state within * the CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT period. */ LOG_DBG("-->%s", dev->name); #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED struct uart_npcx_data *data = dev->data; k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT); uart_npcx_pm_policy_state_lock_get(data, UART_PM_POLICY_STATE_RX_FLAG); k_work_reschedule(&data->rx_refresh_timeout_work, delay); #endif /* * Disable MIWU CR_SIN interrupt to avoid the other redundant interrupts * after ec wakes up. */ npcx_uart_disable_access_interrupt(); } #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED static void uart_npcx_rx_refresh_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_npcx_data *data = CONTAINER_OF(dwork, struct uart_npcx_data, rx_refresh_timeout_work); uart_npcx_pm_policy_state_lock_put(data, UART_PM_POLICY_STATE_RX_FLAG); } #endif /* UART driver registration */ static const struct uart_driver_api uart_npcx_driver_api = { .poll_in = uart_npcx_poll_in, .poll_out = uart_npcx_poll_out, .err_check = uart_npcx_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_npcx_fifo_fill, .fifo_read = uart_npcx_fifo_read, .irq_tx_enable = uart_npcx_irq_tx_enable, .irq_tx_disable = uart_npcx_irq_tx_disable, .irq_tx_ready = uart_npcx_irq_tx_ready, .irq_tx_complete = uart_npcx_irq_tx_complete, .irq_rx_enable = uart_npcx_irq_rx_enable, .irq_rx_disable = uart_npcx_irq_rx_disable, .irq_rx_ready = uart_npcx_irq_rx_ready, .irq_err_enable = uart_npcx_irq_err_enable, .irq_err_disable = uart_npcx_irq_err_disable, .irq_is_pending = uart_npcx_irq_is_pending, .irq_update = uart_npcx_irq_update, .irq_callback_set = uart_npcx_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API .callback_set = uart_npcx_async_callback_set, .tx = uart_npcx_async_tx, .tx_abort = uart_npcx_async_tx_abort, .rx_enable = uart_npcx_async_rx_enable, .rx_buf_rsp = uart_npcx_async_rx_buf_rsp, .rx_disable = uart_npcx_async_rx_disable, #endif /* CONFIG_UART_ASYNC_API */ }; static int uart_npcx_init(const struct device *dev) { const struct uart_npcx_config *const config = dev->config; struct uart_npcx_data *const data = dev->data; const struct device *const clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE); struct uart_reg *const inst = config->inst; uint32_t uart_rate; int ret; if (!device_is_ready(clk_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* Turn on device clock first and get source clock freq. */ ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg); if (ret < 0) { LOG_ERR("Turn on UART clock fail %d", ret); return ret; } #ifdef CONFIG_UART_ASYNC_API ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->mdma_clk_cfg); if (ret < 0) { LOG_ERR("Turn on UART MDMA clock fail %d", ret); return ret; } #endif /* * If apb2's clock is not 15MHz, we need to find the other optimized * values of UPSR and UBAUD for baud rate 115200. */ ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->clk_cfg, &uart_rate); if (ret < 0) { LOG_ERR("Get UART clock rate error %d", ret); return ret; } /* Configure baud rate */ ret = uart_set_npcx_baud_rate(inst, data->baud_rate, uart_rate); if (ret < 0) { LOG_ERR("Set baud rate %d with unsupported apb clock %d failed", data->baud_rate, uart_rate); return ret; } /* * 8-N-1, FIFO enabled. Must be done after setting * the divisor for the new divisor to take effect. */ inst->UFRS = 0x00; /* Initialize UART FIFO if mode is interrupt driven */ #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) /* Enable the UART FIFO mode */ inst->UMDSL |= BIT(NPCX_UMDSL_FIFO_MD); /* Disable all UART tx FIFO interrupts */ uart_npcx_dis_all_tx_interrupts(dev); /* Clear UART rx FIFO */ uart_npcx_clear_rx_fifo(dev); /* Configure UART interrupts */ config->irq_config_func(dev); #endif #ifdef CONFIG_UART_ASYNC_API data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0; data->async.uart_dev = dev; k_work_init_delayable(&data->async.rx_dma_params.timeout_work, uart_npcx_async_rx_timeout); k_work_init_delayable(&data->async.tx_dma_params.timeout_work, uart_npcx_async_tx_timeout); #endif if (IS_ENABLED(CONFIG_PM)) { /* Initialize a miwu device input and its callback function */ npcx_miwu_init_dev_callback(&data->uart_rx_cb, &config->uart_rx_wui, uart_npcx_rx_wk_isr, dev); npcx_miwu_manage_callback(&data->uart_rx_cb, true); /* * Configure the UART wake-up event triggered from a falling * edge on CR_SIN pin. No need for callback function. */ npcx_miwu_interrupt_configure(&config->uart_rx_wui, NPCX_MIWU_MODE_EDGE, NPCX_MIWU_TRIG_LOW); #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED k_work_init_delayable(&data->rx_refresh_timeout_work, uart_npcx_rx_refresh_timeout); #endif } /* Configure pin-mux for uart device */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("UART pinctrl setup failed (%d)", ret); return ret; } return 0; } #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) #define NPCX_UART_IRQ_CONFIG_FUNC_DECL(inst) \ static void uart_npcx_irq_config_##inst(const struct device *dev) #define NPCX_UART_IRQ_CONFIG_FUNC_INIT(inst) .irq_config_func = uart_npcx_irq_config_##inst, #define NPCX_UART_IRQ_CONFIG_FUNC(inst) \ static void uart_npcx_irq_config_##inst(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), uart_npcx_isr, \ DEVICE_DT_INST_GET(inst), 0); \ irq_enable(DT_INST_IRQN(inst)); \ } #else #define NPCX_UART_IRQ_CONFIG_FUNC_DECL(inst) #define NPCX_UART_IRQ_CONFIG_FUNC_INIT(inst) #define NPCX_UART_IRQ_CONFIG_FUNC(inst) #endif #define NPCX_UART_INIT(i) \ NPCX_UART_IRQ_CONFIG_FUNC_DECL(i); \ \ PINCTRL_DT_INST_DEFINE(i); \ \ static const struct uart_npcx_config uart_npcx_cfg_##i = { \ .inst = (struct uart_reg *)DT_INST_REG_ADDR(i), \ .clk_cfg = NPCX_DT_CLK_CFG_ITEM(i), \ .uart_rx_wui = NPCX_DT_WUI_ITEM_BY_NAME(i, uart_rx), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(i), \ NPCX_UART_IRQ_CONFIG_FUNC_INIT(i) \ \ IF_ENABLED(CONFIG_UART_ASYNC_API, ( \ .mdma_clk_cfg = NPCX_DT_CLK_CFG_ITEM_BY_IDX(i, 1), \ .mdma_reg_base = (struct mdma_reg *)DT_INST_REG_ADDR_BY_IDX(i, 1), \ )) \ }; \ \ static struct uart_npcx_data uart_npcx_data_##i = { \ .baud_rate = DT_INST_PROP(i, current_speed), \ }; \ \ DEVICE_DT_INST_DEFINE(i, uart_npcx_init, NULL, &uart_npcx_data_##i, &uart_npcx_cfg_##i, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_npcx_driver_api); \ \ NPCX_UART_IRQ_CONFIG_FUNC(i) DT_INST_FOREACH_STATUS_OKAY(NPCX_UART_INIT) #define ENABLE_MIWU_CRIN_IRQ(i) \ npcx_miwu_irq_get_and_clear_pending(&uart_npcx_cfg_##i.uart_rx_wui); \ npcx_miwu_irq_enable(&uart_npcx_cfg_##i.uart_rx_wui); #define DISABLE_MIWU_CRIN_IRQ(i) npcx_miwu_irq_disable(&uart_npcx_cfg_##i.uart_rx_wui); void npcx_uart_enable_access_interrupt(void) { DT_INST_FOREACH_STATUS_OKAY(ENABLE_MIWU_CRIN_IRQ) } void npcx_uart_disable_access_interrupt(void) { DT_INST_FOREACH_STATUS_OKAY(DISABLE_MIWU_CRIN_IRQ) } ```
/content/code_sandbox/drivers/serial/uart_npcx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,444
```c /* * */ /** * @brief Microchip XEC UART Serial Driver * * This is the driver for the Microchip XEC MCU UART. It is NS16550 compatible. * */ #define DT_DRV_COMPAT microchip_xec_uart #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/types.h> #include <soc.h> #include <zephyr/init.h> #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #ifdef CONFIG_SOC_SERIES_MEC172X #include <zephyr/drivers/clock_control/mchp_xec_clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_mchp_xec_ecia.h> #endif #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/gpio.h> #include <zephyr/sys/sys_io.h> #include <zephyr/spinlock.h> #include <zephyr/irq.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uart_xec, CONFIG_UART_LOG_LEVEL); /* Clock source is 1.8432 MHz derived from PLL 48 MHz */ #define XEC_UART_CLK_SRC_1P8M 0 /* Clock source is PLL 48 MHz output */ #define XEC_UART_CLK_SRC_48M 1 /* Clock source is the UART_CLK alternate pin function. */ #define XEC_UART_CLK_SRC_EXT_PIN 2 /* register definitions */ #define REG_THR 0x00 /* Transmitter holding reg. */ #define REG_RDR 0x00 /* Receiver data reg. */ #define REG_BRDL 0x00 /* Baud rate divisor (LSB) */ #define REG_BRDH 0x01 /* Baud rate divisor (MSB) */ #define REG_IER 0x01 /* Interrupt enable reg. */ #define REG_IIR 0x02 /* Interrupt ID reg. */ #define REG_FCR 0x02 /* FIFO control reg. */ #define REG_LCR 0x03 /* Line control reg. */ #define REG_MDC 0x04 /* Modem control reg. */ #define REG_LSR 0x05 /* Line status reg. */ #define REG_MSR 0x06 /* Modem status reg. */ #define REG_SCR 0x07 /* scratch register */ #define REG_LD_ACTV 0x330 /* Logical Device activate */ #define REG_LD_CFG 0x3f0 /* Logical Device configuration */ /* equates for interrupt enable register */ #define IER_RXRDY 0x01 /* receiver data ready */ #define IER_TBE 0x02 /* transmit bit enable */ #define IER_LSR 0x04 /* line status interrupts */ #define IER_MSI 0x08 /* modem status interrupts */ /* equates for interrupt identification register */ #define IIR_MSTAT 0x00 /* modem status interrupt */ #define IIR_NIP 0x01 /* no interrupt pending */ #define IIR_THRE 0x02 /* transmit holding register empty interrupt */ #define IIR_RBRF 0x04 /* receiver buffer register full interrupt */ #define IIR_LS 0x06 /* receiver line status interrupt */ #define IIR_MASK 0x07 /* interrupt id bits mask */ #define IIR_ID 0x06 /* interrupt ID mask without NIP */ /* equates for FIFO control register */ #define FCR_FIFO 0x01 /* enable XMIT and RCVR FIFO */ #define FCR_RCVRCLR 0x02 /* clear RCVR FIFO */ #define FCR_XMITCLR 0x04 /* clear XMIT FIFO */ /* * Per PC16550D (Literature Number: SNLS378B): * * RXRDY, Mode 0: When in the 16450 Mode (FCR0 = 0) or in * the FIFO Mode (FCR0 = 1, FCR3 = 0) and there is at least 1 * character in the RCVR FIFO or RCVR holding register, the * RXRDY pin (29) will be low active. Once it is activated the * RXRDY pin will go inactive when there are no more charac- * ters in the FIFO or holding register. * * RXRDY, Mode 1: In the FIFO Mode (FCR0 = 1) when the * FCR3 = 1 and the trigger level or the timeout has been * reached, the RXRDY pin will go low active. Once it is acti- * vated it will go inactive when there are no more characters * in the FIFO or holding register. * * TXRDY, Mode 0: In the 16450 Mode (FCR0 = 0) or in the * FIFO Mode (FCR0 = 1, FCR3 = 0) and there are no charac- * ters in the XMIT FIFO or XMIT holding register, the TXRDY * pin (24) will be low active. Once it is activated the TXRDY * pin will go inactive after the first character is loaded into the * XMIT FIFO or holding register. * * TXRDY, Mode 1: In the FIFO Mode (FCR0 = 1) when * FCR3 = 1 and there are no characters in the XMIT FIFO, the * TXRDY pin will go low active. This pin will become inactive * when the XMIT FIFO is completely full. */ #define FCR_MODE0 0x00 /* set receiver in mode 0 */ #define FCR_MODE1 0x08 /* set receiver in mode 1 */ /* RCVR FIFO interrupt levels: trigger interrupt with this bytes in FIFO */ #define FCR_FIFO_1 0x00 /* 1 byte in RCVR FIFO */ #define FCR_FIFO_4 0x40 /* 4 bytes in RCVR FIFO */ #define FCR_FIFO_8 0x80 /* 8 bytes in RCVR FIFO */ #define FCR_FIFO_14 0xC0 /* 14 bytes in RCVR FIFO */ /* constants for line control register */ #define LCR_CS5 0x00 /* 5 bits data size */ #define LCR_CS6 0x01 /* 6 bits data size */ #define LCR_CS7 0x02 /* 7 bits data size */ #define LCR_CS8 0x03 /* 8 bits data size */ #define LCR_2_STB 0x04 /* 2 stop bits */ #define LCR_1_STB 0x00 /* 1 stop bit */ #define LCR_PEN 0x08 /* parity enable */ #define LCR_PDIS 0x00 /* parity disable */ #define LCR_EPS 0x10 /* even parity select */ #define LCR_SP 0x20 /* stick parity select */ #define LCR_SBRK 0x40 /* break control bit */ #define LCR_DLAB 0x80 /* divisor latch access enable */ /* constants for the modem control register */ #define MCR_DTR 0x01 /* dtr output */ #define MCR_RTS 0x02 /* rts output */ #define MCR_OUT1 0x04 /* output #1 */ #define MCR_OUT2 0x08 /* output #2 */ #define MCR_LOOP 0x10 /* loop back */ #define MCR_AFCE 0x20 /* auto flow control enable */ /* constants for line status register */ #define LSR_RXRDY 0x01 /* receiver data available */ #define LSR_OE 0x02 /* overrun error */ #define LSR_PE 0x04 /* parity error */ #define LSR_FE 0x08 /* framing error */ #define LSR_BI 0x10 /* break interrupt */ #define LSR_EOB_MASK 0x1E /* Error or Break mask */ #define LSR_THRE 0x20 /* transmit holding register empty */ #define LSR_TEMT 0x40 /* transmitter empty */ /* constants for modem status register */ #define MSR_DCTS 0x01 /* cts change */ #define MSR_DDSR 0x02 /* dsr change */ #define MSR_DRI 0x04 /* ring change */ #define MSR_DDCD 0x08 /* data carrier change */ #define MSR_CTS 0x10 /* complement of cts */ #define MSR_DSR 0x20 /* complement of dsr */ #define MSR_RI 0x40 /* complement of ring signal */ #define MSR_DCD 0x80 /* complement of dcd */ #define IIRC(dev) (((struct uart_xec_dev_data *)(dev)->data)->iir_cache) enum uart_xec_pm_policy_state_flag { UART_XEC_PM_POLICY_STATE_TX_FLAG, UART_XEC_PM_POLICY_STATE_RX_FLAG, UART_XEC_PM_POLICY_STATE_FLAG_COUNT, }; /* device config */ struct uart_xec_device_config { struct uart_regs *regs; uint32_t sys_clk_freq; uint8_t girq_id; uint8_t girq_pos; uint8_t pcr_idx; uint8_t pcr_bitpos; const struct pinctrl_dev_config *pcfg; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) uart_irq_config_func_t irq_config_func; #endif #ifdef CONFIG_PM_DEVICE struct gpio_dt_spec wakerx_gpio; bool wakeup_source; #endif }; /** Device data structure */ struct uart_xec_dev_data { struct uart_config uart_config; struct k_spinlock lock; uint8_t fcr_cache; /**< cache of FCR write only register */ uint8_t iir_cache; /**< cache of IIR since it clears when read */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; /**< Callback function pointer */ void *cb_data; /**< Callback function arg */ #endif }; #ifdef CONFIG_PM_DEVICE ATOMIC_DEFINE(pm_policy_state_flag, UART_XEC_PM_POLICY_STATE_FLAG_COUNT); #endif #if defined(CONFIG_PM_DEVICE) && defined(CONFIG_UART_CONSOLE_INPUT_EXPIRED) struct k_work_delayable rx_refresh_timeout_work; #endif static const struct uart_driver_api uart_xec_driver_api; #if defined(CONFIG_PM_DEVICE) && defined(CONFIG_UART_CONSOLE_INPUT_EXPIRED) static void uart_xec_pm_policy_state_lock_get(enum uart_xec_pm_policy_state_flag flag) { if (atomic_test_and_set_bit(pm_policy_state_flag, flag) == 0) { pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } } static void uart_xec_pm_policy_state_lock_put(enum uart_xec_pm_policy_state_flag flag) { if (atomic_test_and_clear_bit(pm_policy_state_flag, flag) == 1) { pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); } } #endif #ifdef CONFIG_SOC_SERIES_MEC172X static void uart_clr_slp_en(const struct device *dev) { struct uart_xec_device_config const *dev_cfg = dev->config; z_mchp_xec_pcr_periph_sleep(dev_cfg->pcr_idx, dev_cfg->pcr_bitpos, 0); } static inline void uart_xec_girq_clr(const struct device *dev) { struct uart_xec_device_config const *dev_cfg = dev->config; mchp_soc_ecia_girq_src_clr(dev_cfg->girq_id, dev_cfg->girq_pos); } static inline void uart_xec_girq_en(uint8_t girq_idx, uint8_t girq_posn) { mchp_xec_ecia_girq_src_en(girq_idx, girq_posn); } #else static void uart_clr_slp_en(const struct device *dev) { struct uart_xec_device_config const *dev_cfg = dev->config; if (dev_cfg->pcr_bitpos == MCHP_PCR2_UART0_POS) { mchp_pcr_periph_slp_ctrl(PCR_UART0, 0); } else if (dev_cfg->pcr_bitpos == MCHP_PCR2_UART1_POS) { mchp_pcr_periph_slp_ctrl(PCR_UART1, 0); } else { mchp_pcr_periph_slp_ctrl(PCR_UART2, 0); } } static inline void uart_xec_girq_clr(const struct device *dev) { struct uart_xec_device_config const *dev_cfg = dev->config; MCHP_GIRQ_SRC(dev_cfg->girq_id) = BIT(dev_cfg->girq_pos); } static inline void uart_xec_girq_en(uint8_t girq_idx, uint8_t girq_posn) { MCHP_GIRQ_ENSET(girq_idx) = BIT(girq_posn); } #endif static void set_baud_rate(const struct device *dev, uint32_t baud_rate) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data * const dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; uint32_t divisor; /* baud rate divisor */ uint8_t lcr_cache; if ((baud_rate != 0U) && (dev_cfg->sys_clk_freq != 0U)) { /* * calculate baud rate divisor. a variant of * (uint32_t)(dev_cfg->sys_clk_freq / (16.0 * baud_rate) + 0.5) */ divisor = ((dev_cfg->sys_clk_freq + (baud_rate << 3)) / baud_rate) >> 4; /* set the DLAB to access the baud rate divisor registers */ lcr_cache = regs->LCR; regs->LCR = LCR_DLAB | lcr_cache; regs->RTXB = (unsigned char)(divisor & 0xff); /* bit[7]=0 1.8MHz clock source, =1 48MHz clock source */ regs->IER = (unsigned char)((divisor >> 8) & 0x7f); /* restore the DLAB to access the baud rate divisor registers */ regs->LCR = lcr_cache; dev_data->uart_config.baudrate = baud_rate; } } /* * Configure UART. * MCHP XEC UART defaults to reset if external Host VCC_PWRGD is inactive. * We must change the UART reset signal to XEC VTR_PWRGD. Make sure UART * clock source is an internal clock and UART pins are not inverted. */ static int uart_xec_configure(const struct device *dev, const struct uart_config *cfg) { struct uart_xec_dev_data * const dev_data = dev->data; const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_regs *regs = dev_cfg->regs; uint8_t lcr_cache; /* temp for return value if error occurs in this locked region */ int ret = 0; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); ARG_UNUSED(dev_data); dev_data->fcr_cache = 0U; dev_data->iir_cache = 0U; /* XEC UART specific configuration and enable */ regs->CFG_SEL &= ~(MCHP_UART_LD_CFG_RESET_VCC | MCHP_UART_LD_CFG_EXTCLK | MCHP_UART_LD_CFG_INVERT); /* set activate to enable clocks */ regs->ACTV |= MCHP_UART_LD_ACTIVATE; set_baud_rate(dev, cfg->baudrate); /* Local structure to hold temporary values */ struct uart_config uart_cfg; switch (cfg->data_bits) { case UART_CFG_DATA_BITS_5: uart_cfg.data_bits = LCR_CS5; break; case UART_CFG_DATA_BITS_6: uart_cfg.data_bits = LCR_CS6; break; case UART_CFG_DATA_BITS_7: uart_cfg.data_bits = LCR_CS7; break; case UART_CFG_DATA_BITS_8: uart_cfg.data_bits = LCR_CS8; break; default: ret = -ENOTSUP; goto out; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: uart_cfg.stop_bits = LCR_1_STB; break; case UART_CFG_STOP_BITS_2: uart_cfg.stop_bits = LCR_2_STB; break; default: ret = -ENOTSUP; goto out; } switch (cfg->parity) { case UART_CFG_PARITY_NONE: uart_cfg.parity = LCR_PDIS; break; case UART_CFG_PARITY_EVEN: uart_cfg.parity = LCR_EPS; break; default: ret = -ENOTSUP; goto out; } dev_data->uart_config = *cfg; /* data bits, stop bits, parity, clear DLAB */ regs->LCR = uart_cfg.data_bits | uart_cfg.stop_bits | uart_cfg.parity; regs->MCR = MCR_OUT2 | MCR_RTS | MCR_DTR; /* * Program FIFO: enabled, mode 0 * generate the interrupt at 8th byte * Clear TX and RX FIFO */ dev_data->fcr_cache = FCR_FIFO | FCR_MODE0 | FCR_FIFO_8 | FCR_RCVRCLR | FCR_XMITCLR; regs->IIR_FCR = dev_data->fcr_cache; /* clear the port */ lcr_cache = regs->LCR; regs->LCR = LCR_DLAB | lcr_cache; regs->SCR = regs->RTXB; regs->LCR = lcr_cache; /* disable interrupts */ regs->IER = 0; out: k_spin_unlock(&dev_data->lock, key); return ret; }; #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_xec_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_xec_dev_data *data = dev->data; cfg->baudrate = data->uart_config.baudrate; cfg->parity = data->uart_config.parity; cfg->stop_bits = data->uart_config.stop_bits; cfg->data_bits = data->uart_config.data_bits; cfg->flow_ctrl = data->uart_config.flow_ctrl; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_PM_DEVICE static void uart_xec_wake_handler(const struct device *gpio, struct gpio_callback *cb, uint32_t pins) { /* Disable interrupts on UART RX pin to avoid repeated interrupts. */ (void)gpio_pin_interrupt_configure(gpio, (find_msb_set(pins) - 1), GPIO_INT_DISABLE); /* Refresh console expired time */ #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT); uart_xec_pm_policy_state_lock_get(UART_XEC_PM_POLICY_STATE_RX_FLAG); k_work_reschedule(&rx_refresh_timeout_work, delay); #endif } static int uart_xec_pm_action(const struct device *dev, enum pm_device_action action) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_regs *regs = dev_cfg->regs; int ret = 0; switch (action) { case PM_DEVICE_ACTION_RESUME: regs->ACTV = MCHP_UART_LD_ACTIVATE; break; case PM_DEVICE_ACTION_SUSPEND: /* Enable UART wake interrupt */ regs->ACTV = 0; if ((dev_cfg->wakeup_source) && (dev_cfg->wakerx_gpio.port != NULL)) { ret = gpio_pin_interrupt_configure_dt(&dev_cfg->wakerx_gpio, GPIO_INT_MODE_EDGE | GPIO_INT_TRIG_LOW); if (ret < 0) { LOG_ERR("Failed to configure UART wake interrupt (ret %d)", ret); return ret; } } break; default: return -ENOTSUP; } return 0; } #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED static void uart_xec_rx_refresh_timeout(struct k_work *work) { ARG_UNUSED(work); uart_xec_pm_policy_state_lock_put(UART_XEC_PM_POLICY_STATE_RX_FLAG); } #endif #endif /* CONFIG_PM_DEVICE */ /** * @brief Initialize individual UART port * * This routine is called to reset the chip in a quiescent state. * * @param dev UART device struct * * @return 0 if successful, failed otherwise */ static int uart_xec_init(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; int ret; uart_clr_slp_en(dev); ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { return ret; } ret = uart_xec_configure(dev, &dev_data->uart_config); if (ret != 0) { return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN dev_cfg->irq_config_func(dev); #endif #ifdef CONFIG_PM_DEVICE #ifdef CONFIG_UART_CONSOLE_INPUT_EXPIRED k_work_init_delayable(&rx_refresh_timeout_work, uart_xec_rx_refresh_timeout); #endif if ((dev_cfg->wakeup_source) && (dev_cfg->wakerx_gpio.port != NULL)) { static struct gpio_callback uart_xec_wake_cb; gpio_init_callback(&uart_xec_wake_cb, uart_xec_wake_handler, BIT(dev_cfg->wakerx_gpio.pin)); ret = gpio_add_callback(dev_cfg->wakerx_gpio.port, &uart_xec_wake_cb); if (ret < 0) { LOG_ERR("Failed to add UART wake callback (err %d)", ret); return ret; } } #endif return 0; } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_xec_poll_in(const struct device *dev, unsigned char *c) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; int ret = -1; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); if ((regs->LSR & LSR_RXRDY) != 0) { /* got a character */ *c = regs->RTXB; ret = 0; } k_spin_unlock(&dev_data->lock, key); return ret; } /** * @brief Output a character in polled mode. * * Checks if the transmitter is empty. If empty, a character is written to * the data register. * * If the hardware flow control is enabled then the handshake signal CTS has to * be asserted in order to send a character. * * @param dev UART device struct * @param c Character to send */ static void uart_xec_poll_out(const struct device *dev, unsigned char c) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); while ((regs->LSR & LSR_THRE) == 0) { ; } regs->RTXB = c; k_spin_unlock(&dev_data->lock, key); } /** * @brief Check if an error was received * * @param dev UART device struct * * @return one of UART_ERROR_OVERRUN, UART_ERROR_PARITY, UART_ERROR_FRAMING, * UART_BREAK if an error was detected, 0 otherwise. */ static int uart_xec_err_check(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); int check = regs->LSR & LSR_EOB_MASK; k_spin_unlock(&dev_data->lock, key); return check >> 1; } #if CONFIG_UART_INTERRUPT_DRIVEN /** * @brief Fill FIFO with data * * @param dev UART device struct * @param tx_data Data to transmit * @param size Number of bytes to send * * @return Number of bytes sent */ static int uart_xec_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; int i; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); for (i = 0; (i < size) && (regs->LSR & LSR_THRE) != 0; i++) { #if defined(CONFIG_PM_DEVICE) && defined(CONFIG_UART_CONSOLE_INPUT_EXPIRED) uart_xec_pm_policy_state_lock_get(UART_XEC_PM_POLICY_STATE_TX_FLAG); #endif regs->RTXB = tx_data[i]; } k_spin_unlock(&dev_data->lock, key); return i; } /** * @brief Read data from FIFO * * @param dev UART device struct * @param rxData Data container * @param size Container size * * @return Number of bytes read */ static int uart_xec_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; int i; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); for (i = 0; (i < size) && (regs->LSR & LSR_RXRDY) != 0; i++) { rx_data[i] = regs->RTXB; } k_spin_unlock(&dev_data->lock, key); return i; } /** * @brief Enable TX interrupt in IER * * @param dev UART device struct */ static void uart_xec_irq_tx_enable(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); regs->IER |= IER_TBE; k_spin_unlock(&dev_data->lock, key); } /** * @brief Disable TX interrupt in IER * * @param dev UART device struct */ static void uart_xec_irq_tx_disable(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); regs->IER &= ~(IER_TBE); k_spin_unlock(&dev_data->lock, key); } /** * @brief Check if Tx IRQ has been raised * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_xec_irq_tx_ready(const struct device *dev) { struct uart_xec_dev_data *dev_data = dev->data; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); int ret = ((IIRC(dev) & IIR_ID) == IIR_THRE) ? 1 : 0; k_spin_unlock(&dev_data->lock, key); return ret; } /** * @brief Check if nothing remains to be transmitted * * @param dev UART device struct * * @return 1 if nothing remains to be transmitted, 0 otherwise */ static int uart_xec_irq_tx_complete(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); int ret = ((regs->LSR & (LSR_TEMT | LSR_THRE)) == (LSR_TEMT | LSR_THRE)) ? 1 : 0; k_spin_unlock(&dev_data->lock, key); return ret; } /** * @brief Enable RX interrupt in IER * * @param dev UART device struct */ static void uart_xec_irq_rx_enable(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); regs->IER |= IER_RXRDY; k_spin_unlock(&dev_data->lock, key); } /** * @brief Disable RX interrupt in IER * * @param dev UART device struct */ static void uart_xec_irq_rx_disable(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); regs->IER &= ~(IER_RXRDY); k_spin_unlock(&dev_data->lock, key); } /** * @brief Check if Rx IRQ has been raised * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_xec_irq_rx_ready(const struct device *dev) { struct uart_xec_dev_data *dev_data = dev->data; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); int ret = ((IIRC(dev) & IIR_ID) == IIR_RBRF) ? 1 : 0; k_spin_unlock(&dev_data->lock, key); return ret; } /** * @brief Enable error interrupt in IER * * @param dev UART device struct */ static void uart_xec_irq_err_enable(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); regs->IER |= IER_LSR; k_spin_unlock(&dev_data->lock, key); } /** * @brief Disable error interrupt in IER * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static void uart_xec_irq_err_disable(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); regs->IER &= ~(IER_LSR); k_spin_unlock(&dev_data->lock, key); } /** * @brief Check if any IRQ is pending * * @param dev UART device struct * * @return 1 if an IRQ is pending, 0 otherwise */ static int uart_xec_irq_is_pending(const struct device *dev) { struct uart_xec_dev_data *dev_data = dev->data; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); int ret = (!(IIRC(dev) & IIR_NIP)) ? 1 : 0; k_spin_unlock(&dev_data->lock, key); return ret; } /** * @brief Update cached contents of IIR * * @param dev UART device struct * * @return Always 1 */ static int uart_xec_irq_update(const struct device *dev) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); IIRC(dev) = regs->IIR_FCR; k_spin_unlock(&dev_data->lock, key); return 1; } /** * @brief Set the callback function pointer for IRQ. * * @param dev UART device struct * @param cb Callback function pointer. */ static void uart_xec_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_xec_dev_data * const dev_data = dev->data; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); dev_data->cb = cb; dev_data->cb_data = cb_data; k_spin_unlock(&dev_data->lock, key); } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ static void uart_xec_isr(const struct device *dev) { struct uart_xec_dev_data * const dev_data = dev->data; #if defined(CONFIG_PM_DEVICE) && defined(CONFIG_UART_CONSOLE_INPUT_EXPIRED) const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_regs *regs = dev_cfg->regs; int rx_ready = 0; rx_ready = ((regs->LSR & LSR_RXRDY) == LSR_RXRDY) ? 1 : 0; if (rx_ready) { k_timeout_t delay = K_MSEC(CONFIG_UART_CONSOLE_INPUT_EXPIRED_TIMEOUT); uart_xec_pm_policy_state_lock_get(UART_XEC_PM_POLICY_STATE_RX_FLAG); k_work_reschedule(&rx_refresh_timeout_work, delay); } #endif if (dev_data->cb) { dev_data->cb(dev, dev_data->cb_data); } #if defined(CONFIG_PM_DEVICE) && defined(CONFIG_UART_CONSOLE_INPUT_EXPIRED) if (uart_xec_irq_tx_complete(dev)) { uart_xec_pm_policy_state_lock_put(UART_XEC_PM_POLICY_STATE_TX_FLAG); } #endif /* CONFIG_PM */ /* clear ECIA GIRQ R/W1C status bit after UART status cleared */ uart_xec_girq_clr(dev); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_XEC_LINE_CTRL /** * @brief Manipulate line control for UART. * * @param dev UART device struct * @param ctrl The line control to be manipulated * @param val Value to set the line control * * @return 0 if successful, failed otherwise */ static int uart_xec_line_ctrl_set(const struct device *dev, uint32_t ctrl, uint32_t val) { const struct uart_xec_device_config * const dev_cfg = dev->config; struct uart_xec_dev_data *dev_data = dev->data; struct uart_regs *regs = dev_cfg->regs; uint32_t mdc, chg; k_spinlock_key_t key; switch (ctrl) { case UART_LINE_CTRL_BAUD_RATE: set_baud_rate(dev, val); return 0; case UART_LINE_CTRL_RTS: case UART_LINE_CTRL_DTR: key = k_spin_lock(&dev_data->lock); mdc = regs->MCR; if (ctrl == UART_LINE_CTRL_RTS) { chg = MCR_RTS; } else { chg = MCR_DTR; } if (val) { mdc |= chg; } else { mdc &= ~(chg); } regs->MCR = mdc; k_spin_unlock(&dev_data->lock, key); return 0; } return -ENOTSUP; } #endif /* CONFIG_UART_XEC_LINE_CTRL */ static const struct uart_driver_api uart_xec_driver_api = { .poll_in = uart_xec_poll_in, .poll_out = uart_xec_poll_out, .err_check = uart_xec_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_xec_configure, .config_get = uart_xec_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_xec_fifo_fill, .fifo_read = uart_xec_fifo_read, .irq_tx_enable = uart_xec_irq_tx_enable, .irq_tx_disable = uart_xec_irq_tx_disable, .irq_tx_ready = uart_xec_irq_tx_ready, .irq_tx_complete = uart_xec_irq_tx_complete, .irq_rx_enable = uart_xec_irq_rx_enable, .irq_rx_disable = uart_xec_irq_rx_disable, .irq_rx_ready = uart_xec_irq_rx_ready, .irq_err_enable = uart_xec_irq_err_enable, .irq_err_disable = uart_xec_irq_err_disable, .irq_is_pending = uart_xec_irq_is_pending, .irq_update = uart_xec_irq_update, .irq_callback_set = uart_xec_irq_callback_set, #endif #ifdef CONFIG_UART_XEC_LINE_CTRL .line_ctrl_set = uart_xec_line_ctrl_set, #endif }; #define DEV_CONFIG_REG_INIT(n) \ .regs = (struct uart_regs *)(DT_INST_REG_ADDR(n)), #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define DEV_CONFIG_IRQ_FUNC_INIT(n) \ .irq_config_func = irq_config_func##n, #define UART_XEC_IRQ_FUNC_DECLARE(n) \ static void irq_config_func##n(const struct device *dev); #define UART_XEC_IRQ_FUNC_DEFINE(n) \ static void irq_config_func##n(const struct device *dev) \ { \ ARG_UNUSED(dev); \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ uart_xec_isr, DEVICE_DT_INST_GET(n), \ 0); \ irq_enable(DT_INST_IRQN(n)); \ uart_xec_girq_en(DT_INST_PROP_BY_IDX(n, girqs, 0), \ DT_INST_PROP_BY_IDX(n, girqs, 1)); \ } #else /* !CONFIG_UART_INTERRUPT_DRIVEN */ #define DEV_CONFIG_IRQ_FUNC_INIT(n) #define UART_XEC_IRQ_FUNC_DECLARE(n) #define UART_XEC_IRQ_FUNC_DEFINE(n) #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #define DEV_DATA_FLOW_CTRL(n) \ DT_INST_PROP_OR(n, hw_flow_control, UART_CFG_FLOW_CTRL_NONE) /* To enable wakeup on the UART, the DTS needs to have two entries defined * in the corresponding UART node in the DTS specifying it as a wake source * and specifying the UART_RX GPIO; example as below * * wakerx-gpios = <&gpio_140_176 25 GPIO_ACTIVE_HIGH>; * wakeup-source; */ #ifdef CONFIG_PM_DEVICE #define XEC_UART_PM_WAKEUP(n) \ .wakeup_source = (uint8_t)DT_INST_PROP_OR(n, wakeup_source, 0), \ .wakerx_gpio = GPIO_DT_SPEC_INST_GET_OR(n, wakerx_gpios, {0}), #else #define XEC_UART_PM_WAKEUP(index) /* Not used */ #endif #define UART_XEC_DEVICE_INIT(n) \ \ PINCTRL_DT_INST_DEFINE(n); \ \ UART_XEC_IRQ_FUNC_DECLARE(n); \ \ static const struct uart_xec_device_config uart_xec_dev_cfg_##n = { \ DEV_CONFIG_REG_INIT(n) \ .sys_clk_freq = DT_INST_PROP(n, clock_frequency), \ .girq_id = DT_INST_PROP_BY_IDX(n, girqs, 0), \ .girq_pos = DT_INST_PROP_BY_IDX(n, girqs, 1), \ .pcr_idx = DT_INST_PROP_BY_IDX(n, pcrs, 0), \ .pcr_bitpos = DT_INST_PROP_BY_IDX(n, pcrs, 1), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ XEC_UART_PM_WAKEUP(n) \ DEV_CONFIG_IRQ_FUNC_INIT(n) \ }; \ static struct uart_xec_dev_data uart_xec_dev_data_##n = { \ .uart_config.baudrate = DT_INST_PROP_OR(n, current_speed, 0), \ .uart_config.parity = UART_CFG_PARITY_NONE, \ .uart_config.stop_bits = UART_CFG_STOP_BITS_1, \ .uart_config.data_bits = UART_CFG_DATA_BITS_8, \ .uart_config.flow_ctrl = DEV_DATA_FLOW_CTRL(n), \ }; \ PM_DEVICE_DT_INST_DEFINE(n, uart_xec_pm_action); \ DEVICE_DT_INST_DEFINE(n, uart_xec_init, \ PM_DEVICE_DT_INST_GET(n), \ &uart_xec_dev_data_##n, \ &uart_xec_dev_cfg_##n, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_xec_driver_api); \ UART_XEC_IRQ_FUNC_DEFINE(n) DT_INST_FOREACH_STATUS_OKAY(UART_XEC_DEVICE_INIT) ```
/content/code_sandbox/drivers/serial/uart_mchp_xec.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,010
```unknown # NPCX UART driver configuration options config UART_NUMAKER bool "Nuvoton NUMAKER MCU serial driver" default y select SERIAL_HAS_DRIVER select HAS_NUMAKER_UART select SERIAL_SUPPORT_INTERRUPT depends on DT_HAS_NUVOTON_NUMAKER_UART_ENABLED help This option enables the UART driver for Nuvoton Numaker family of processors. Say y if you wish to use serial port on Nuvoton Numaker MCU. ```
/content/code_sandbox/drivers/serial/Kconfig.numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
97
```unknown # SiFive Freedom UART configuration option menuconfig UART_SIFIVE bool "SiFive Freedom serial driver" default y depends on DT_HAS_SIFIVE_UART0_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the SiFive Freedom serial driver. # ---------- Port 0 ---------- menuconfig UART_SIFIVE_PORT_0 bool "SIFIVE Port 0" depends on UART_SIFIVE help This tells the driver to configure the UART port at boot, depending on the additional configure options below. config UART_SIFIVE_PORT_0_RXCNT_IRQ int "Port 0 RX Interrupt Threshold Count" default 0 depends on UART_SIFIVE_PORT_0 help Port 0 RX Threshold at which the RX FIFO interrupt triggers. config UART_SIFIVE_PORT_0_TXCNT_IRQ int "Port 0 TX Interrupt Threshold Count" default 1 depends on UART_SIFIVE_PORT_0 help Port 0 TX Threshold at which the TX FIFO interrupt triggers. # ---------- Port 1 ---------- menuconfig UART_SIFIVE_PORT_1 bool "SIFIVE Port 1" depends on UART_SIFIVE help This tells the driver to configure the UART port at boot, depending on the additional configure options below. config UART_SIFIVE_PORT_1_RXCNT_IRQ int "Port 0 RX Interrupt Threshold Count" default 0 depends on UART_SIFIVE_PORT_1 help Port 1 RX Threshold at which the RX FIFO interrupt triggers. config UART_SIFIVE_PORT_1_TXCNT_IRQ int "Port 1 TX Interrupt Threshold Count" default 1 depends on UART_SIFIVE_PORT_1 help Port 1 TX Threshold at which the TX FIFO interrupt triggers. ```
/content/code_sandbox/drivers/serial/Kconfig.sifive
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
392
```unknown # Renesas R-Car UART configuration options config UART_RCAR bool "Renesas R-Car UART Driver" default y depends on DT_HAS_RENESAS_RCAR_SCIF_ENABLED || DT_HAS_RENESAS_RCAR_HSCIF_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable Renesas R-Car UART Driver. ```
/content/code_sandbox/drivers/serial/Kconfig.rcar
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
80
```c /* * */ #include <zephyr/arch/arm64/hypercall.h> #include <zephyr/xen/console.h> #include <zephyr/xen/events.h> #include <zephyr/xen/generic.h> #include <zephyr/xen/hvm.h> #include <zephyr/xen/public/io/console.h> #include <zephyr/xen/public/sched.h> #include <zephyr/xen/public/xen.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/sys/device_mmio.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uart_hvc_xen, CONFIG_UART_LOG_LEVEL); static struct hvc_xen_data xen_hvc_data = {0}; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void hvc_uart_evtchn_cb(void *priv); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int read_from_ring(const struct device *dev, char *str, int len) { int recv = 0; struct hvc_xen_data *hvc_data = dev->data; XENCONS_RING_IDX cons = hvc_data->intf->in_cons; XENCONS_RING_IDX prod = hvc_data->intf->in_prod; XENCONS_RING_IDX in_idx = 0; compiler_barrier(); __ASSERT((prod - cons) <= sizeof(hvc_data->intf->in), "Invalid input ring buffer"); while (cons != prod && recv < len) { in_idx = MASK_XENCONS_IDX(cons, hvc_data->intf->in); str[recv] = hvc_data->intf->in[in_idx]; recv++; cons++; } compiler_barrier(); hvc_data->intf->in_cons = cons; notify_evtchn(hvc_data->evtchn); return recv; } static int write_to_ring(const struct device *dev, const char *str, int len) { int sent = 0; struct hvc_xen_data *hvc_data = dev->data; XENCONS_RING_IDX cons = hvc_data->intf->out_cons; XENCONS_RING_IDX prod = hvc_data->intf->out_prod; XENCONS_RING_IDX out_idx = 0; compiler_barrier(); __ASSERT((prod - cons) <= sizeof(hvc_data->intf->out), "Invalid output ring buffer"); while ((sent < len) && ((prod - cons) < sizeof(hvc_data->intf->out))) { out_idx = MASK_XENCONS_IDX(prod, hvc_data->intf->out); hvc_data->intf->out[out_idx] = str[sent]; prod++; sent++; } compiler_barrier(); hvc_data->intf->out_prod = prod; if (sent) { notify_evtchn(hvc_data->evtchn); } return sent; } static int xen_hvc_poll_in(const struct device *dev, unsigned char *c) { int ret = 0; char temp; ret = read_from_ring(dev, &temp, sizeof(temp)); if (!ret) { /* Char was not received */ return -1; } *c = temp; return 0; } static void xen_hvc_poll_out(const struct device *dev, unsigned char c) { /* Not a good solution (notifying HV every time), but needed for poll_out */ (void) write_to_ring(dev, &c, sizeof(c)); } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int xen_hvc_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { int ret = 0, sent = 0; while (len) { sent = write_to_ring(dev, tx_data, len); ret += sent; tx_data += sent; len -= sent; if (len) { /* Need to be able to read it from another domain */ HYPERVISOR_sched_op(SCHEDOP_yield, NULL); } } return ret; } static int xen_hvc_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { return read_from_ring(dev, rx_data, size); } static void xen_hvc_irq_tx_enable(const struct device *dev) { /* * Need to explicitly call UART callback on TX enabling to * process available buffered TX actions, because no HV events * will be generated on tx_enable. */ hvc_uart_evtchn_cb(dev->data); } static int xen_hvc_irq_tx_ready(const struct device *dev) { return 1; } static void xen_hvc_irq_rx_enable(const struct device *dev) { /* * Need to explicitly call UART callback on RX enabling to * process available buffered RX actions, because no HV events * will be generated on rx_enable. */ hvc_uart_evtchn_cb(dev->data); } static int xen_hvc_irq_tx_complete(const struct device *dev) { /* * TX is performed by copying in ring buffer by fifo_fill, * so it will be always completed. */ return 1; } static int xen_hvc_irq_rx_ready(const struct device *dev) { struct hvc_xen_data *data = dev->data; /* RX is ready only if data is available in ring buffer */ return (data->intf->in_prod != data->intf->in_cons); } static int xen_hvc_irq_is_pending(const struct device *dev) { return xen_hvc_irq_rx_ready(dev); } static int xen_hvc_irq_update(const struct device *dev) { /* Nothing needs to be updated before actual ISR */ return 1; } static void xen_hvc_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *user_data) { struct hvc_xen_data *data = dev->data; data->irq_cb = cb; data->irq_cb_data = user_data; } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api xen_hvc_api = { .poll_in = xen_hvc_poll_in, .poll_out = xen_hvc_poll_out, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = xen_hvc_fifo_fill, .fifo_read = xen_hvc_fifo_read, .irq_tx_enable = xen_hvc_irq_tx_enable, .irq_tx_ready = xen_hvc_irq_tx_ready, .irq_rx_enable = xen_hvc_irq_rx_enable, .irq_tx_complete = xen_hvc_irq_tx_complete, .irq_rx_ready = xen_hvc_irq_rx_ready, .irq_is_pending = xen_hvc_irq_is_pending, .irq_update = xen_hvc_irq_update, .irq_callback_set = xen_hvc_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void hvc_uart_evtchn_cb(void *priv) { struct hvc_xen_data *data = priv; if (data->irq_cb) { data->irq_cb(data->dev, data->irq_cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ int xen_console_init(const struct device *dev) { int ret = 0; uint64_t console_pfn = 0; uintptr_t console_addr = 0; struct hvc_xen_data *data = dev->data; data->dev = dev; ret = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, DOMID_SELF, &data->evtchn); if (ret) { LOG_ERR("%s: failed to get Xen console evtchn, ret = %d\n", __func__, ret); return ret; } ret = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, DOMID_SELF, &console_pfn); if (ret) { LOG_ERR("%s: failed to get Xen console PFN, ret = %d\n", __func__, ret); return ret; } console_addr = (uintptr_t) (console_pfn << XEN_PAGE_SHIFT); device_map(DEVICE_MMIO_RAM_PTR(dev), console_addr, XEN_PAGE_SIZE, K_MEM_CACHE_WB); data->intf = (struct xencons_interface *) DEVICE_MMIO_GET(dev); #ifdef CONFIG_UART_INTERRUPT_DRIVEN bind_event_channel(data->evtchn, hvc_uart_evtchn_cb, data); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ LOG_INF("Xen HVC inited successfully\n"); return 0; } DEVICE_DT_DEFINE(DT_NODELABEL(xen_hvc), xen_console_init, NULL, &xen_hvc_data, NULL, PRE_KERNEL_1, CONFIG_XEN_HVC_INIT_PRIORITY, &xen_hvc_api); #ifdef CONFIG_XEN_EARLY_CONSOLEIO int xen_consoleio_putc(int c) { char symbol = (char) c; HYPERVISOR_console_io(CONSOLEIO_write, sizeof(symbol), &symbol); return c; } int consoleio_hooks_set(void) { /* Will be replaced with poll_in/poll_out by uart_console.c later on boot */ __stdout_hook_install(xen_consoleio_putc); __printk_hook_install(xen_consoleio_putc); return 0; } SYS_INIT(consoleio_hooks_set, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); #endif /* CONFIG_XEN_EARLY_CONSOLEIO */ ```
/content/code_sandbox/drivers/serial/uart_hvc_xen.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,026
```c /* * */ #define DT_DRV_COMPAT nxp_lpc_usart /** @file * @brief UART driver for MCUX Flexcomm USART. */ #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/irq.h> #include <fsl_usart.h> #include <soc.h> #include <fsl_device_registers.h> #include <zephyr/drivers/pinctrl.h> #ifdef CONFIG_UART_ASYNC_API #include <zephyr/drivers/dma.h> #include <fsl_inputmux.h> #endif #ifdef CONFIG_UART_ASYNC_API struct mcux_flexcomm_uart_dma_config { const struct device *dev; DMA_Type *base; uint8_t channel; struct dma_config cfg; }; #endif struct mcux_flexcomm_config { USART_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; uint32_t baud_rate; uint8_t parity; #ifdef CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT void (*irq_config_func)(const struct device *dev); #endif const struct pinctrl_dev_config *pincfg; #ifdef CONFIG_UART_ASYNC_API struct mcux_flexcomm_uart_dma_config tx_dma; struct mcux_flexcomm_uart_dma_config rx_dma; void (*rx_timeout_func)(struct k_work *work); void (*tx_timeout_func)(struct k_work *work); #endif }; #if CONFIG_UART_ASYNC_API struct mcux_flexcomm_uart_tx_data { const uint8_t *xfer_buf; size_t xfer_len; struct dma_block_config active_block; struct k_work_delayable timeout_work; }; struct mcux_flexcomm_uart_rx_data { uint8_t *xfer_buf; size_t xfer_len; struct dma_block_config active_block; uint8_t *next_xfer_buf; size_t next_xfer_len; struct k_work_delayable timeout_work; int32_t timeout; size_t count; size_t offset; }; #endif struct mcux_flexcomm_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t irq_callback; void *irq_cb_data; #endif #ifdef CONFIG_UART_ASYNC_API uart_callback_t async_callback; void *async_cb_data; struct mcux_flexcomm_uart_tx_data tx_data; struct mcux_flexcomm_uart_rx_data rx_data; #endif #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE struct uart_config uart_config; #endif }; static int mcux_flexcomm_poll_in(const struct device *dev, unsigned char *c) { const struct mcux_flexcomm_config *config = dev->config; uint32_t flags = USART_GetStatusFlags(config->base); int ret = -1; if (flags & kUSART_RxFifoNotEmptyFlag) { *c = USART_ReadByte(config->base); ret = 0; } return ret; } static void mcux_flexcomm_poll_out(const struct device *dev, unsigned char c) { const struct mcux_flexcomm_config *config = dev->config; /* Wait until space is available in TX FIFO */ while (!(USART_GetStatusFlags(config->base) & kUSART_TxFifoEmptyFlag)) { } USART_WriteByte(config->base, c); } static int mcux_flexcomm_err_check(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t flags = USART_GetStatusFlags(config->base); int err = 0; if (flags & kUSART_RxError) { err |= UART_ERROR_OVERRUN; } if (flags & kUSART_ParityErrorFlag) { err |= UART_ERROR_PARITY; } if (flags & kUSART_FramingErrorFlag) { err |= UART_ERROR_FRAMING; } USART_ClearStatusFlags(config->base, kUSART_RxError | kUSART_ParityErrorFlag | kUSART_FramingErrorFlag); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int mcux_flexcomm_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct mcux_flexcomm_config *config = dev->config; uint8_t num_tx = 0U; while ((len - num_tx > 0) && (USART_GetStatusFlags(config->base) & kUSART_TxFifoNotFullFlag)) { USART_WriteByte(config->base, tx_data[num_tx++]); } return num_tx; } static int mcux_flexcomm_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { const struct mcux_flexcomm_config *config = dev->config; uint8_t num_rx = 0U; while ((len - num_rx > 0) && (USART_GetStatusFlags(config->base) & kUSART_RxFifoNotEmptyFlag)) { rx_data[num_rx++] = USART_ReadByte(config->base); } return num_rx; } static void mcux_flexcomm_irq_tx_enable(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t mask = kUSART_TxLevelInterruptEnable; USART_EnableInterrupts(config->base, mask); } static void mcux_flexcomm_irq_tx_disable(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t mask = kUSART_TxLevelInterruptEnable; USART_DisableInterrupts(config->base, mask); } static int mcux_flexcomm_irq_tx_complete(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; return (config->base->STAT & USART_STAT_TXIDLE_MASK) != 0; } static int mcux_flexcomm_irq_tx_ready(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t mask = kUSART_TxLevelInterruptEnable; uint32_t flags = USART_GetStatusFlags(config->base); return (USART_GetEnabledInterrupts(config->base) & mask) && (flags & kUSART_TxFifoEmptyFlag); } static void mcux_flexcomm_irq_rx_enable(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t mask = kUSART_RxLevelInterruptEnable; USART_EnableInterrupts(config->base, mask); } static void mcux_flexcomm_irq_rx_disable(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t mask = kUSART_RxLevelInterruptEnable; USART_DisableInterrupts(config->base, mask); } static int mcux_flexcomm_irq_rx_full(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t flags = USART_GetStatusFlags(config->base); return (flags & kUSART_RxFifoNotEmptyFlag) != 0U; } static int mcux_flexcomm_irq_rx_pending(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t mask = kUSART_RxLevelInterruptEnable; return (USART_GetEnabledInterrupts(config->base) & mask) && mcux_flexcomm_irq_rx_full(dev); } static void mcux_flexcomm_irq_err_enable(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t mask = kUSART_NoiseErrorInterruptEnable | kUSART_FramingErrorInterruptEnable | kUSART_ParityErrorInterruptEnable; USART_EnableInterrupts(config->base, mask); } static void mcux_flexcomm_irq_err_disable(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; uint32_t mask = kUSART_NoiseErrorInterruptEnable | kUSART_FramingErrorInterruptEnable | kUSART_ParityErrorInterruptEnable; USART_DisableInterrupts(config->base, mask); } static int mcux_flexcomm_irq_is_pending(const struct device *dev) { return (mcux_flexcomm_irq_tx_ready(dev) || mcux_flexcomm_irq_rx_pending(dev)); } static int mcux_flexcomm_irq_update(const struct device *dev) { return 1; } static void mcux_flexcomm_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct mcux_flexcomm_data *data = dev->data; data->irq_callback = cb; data->irq_cb_data = cb_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->async_callback = NULL; data->async_cb_data = NULL; #endif } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int mcux_flexcomm_uart_configure(const struct device *dev, const struct uart_config *cfg) { const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; struct uart_config *uart_config = &data->uart_config; usart_config_t usart_config; usart_parity_mode_t parity_mode; usart_stop_bit_count_t stop_bits; usart_data_len_t data_bits = kUSART_8BitsPerChar; bool nine_bit_mode = false; uint32_t clock_freq; /* Set up structure to reconfigure UART */ USART_GetDefaultConfig(&usart_config); /* Set parity */ if (cfg->parity == UART_CFG_PARITY_ODD) { parity_mode = kUSART_ParityOdd; } else if (cfg->parity == UART_CFG_PARITY_EVEN) { parity_mode = kUSART_ParityEven; } else if (cfg->parity == UART_CFG_PARITY_NONE) { parity_mode = kUSART_ParityDisabled; } else { return -ENOTSUP; } usart_config.parityMode = parity_mode; /* Set baudrate */ usart_config.baudRate_Bps = cfg->baudrate; /* Set stop bits */ if (cfg->stop_bits == UART_CFG_STOP_BITS_1) { stop_bits = kUSART_OneStopBit; } else if (cfg->stop_bits == UART_CFG_STOP_BITS_2) { stop_bits = kUSART_TwoStopBit; } else { return -ENOTSUP; } usart_config.stopBitCount = stop_bits; /* Set data bits */ if (cfg->data_bits == UART_CFG_DATA_BITS_5 || cfg->data_bits == UART_CFG_DATA_BITS_6) { return -ENOTSUP; } else if (cfg->data_bits == UART_CFG_DATA_BITS_7) { data_bits = kUSART_7BitsPerChar; } else if (cfg->data_bits == UART_CFG_DATA_BITS_8) { data_bits = kUSART_8BitsPerChar; } else if (cfg->data_bits == UART_CFG_DATA_BITS_9) { nine_bit_mode = true; } else { return -EINVAL; } usart_config.bitCountPerChar = data_bits; /* Set flow control */ if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_NONE) { usart_config.enableHardwareFlowControl = false; } else if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS) { usart_config.enableHardwareFlowControl = true; } else { return -ENOTSUP; } /* Wait for USART to finish transmission and turn off */ USART_Deinit(config->base); /* Get UART clock frequency */ clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq); /* Handle 9 bit mode */ USART_Enable9bitMode(config->base, nine_bit_mode); /* Reconfigure UART */ USART_Init(config->base, &usart_config, clock_freq); /* Update driver device data */ uart_config->parity = cfg->parity; uart_config->baudrate = cfg->baudrate; uart_config->stop_bits = cfg->stop_bits; uart_config->data_bits = cfg->data_bits; uart_config->flow_ctrl = cfg->flow_ctrl; return 0; } static int mcux_flexcomm_uart_config_get(const struct device *dev, struct uart_config *cfg) { struct mcux_flexcomm_data *data = dev->data; *cfg = data->uart_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_ASYNC_API /* This function is called by this driver to notify user callback of events */ static void async_user_callback(const struct device *dev, struct uart_event *evt) { const struct mcux_flexcomm_data *data = dev->data; if (data->async_callback) { data->async_callback(dev, evt, data->async_cb_data); } } static int mcux_flexcomm_uart_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct mcux_flexcomm_data *data = dev->data; data->async_callback = callback; data->async_cb_data = user_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->irq_callback = NULL; data->irq_cb_data = NULL; #endif return 0; } static int mcux_flexcomm_uart_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; int ret = 0; if (config->tx_dma.dev == NULL) { return -ENODEV; } unsigned int key = irq_lock(); /* Getting DMA status to tell if channel is busy or not set up */ struct dma_status status; ret = dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &status); if (ret < 0) { irq_unlock(key); return ret; } /* There is an ongoing transfer */ if (status.busy) { irq_unlock(key); return -EBUSY; } /* Disable TX DMA requests for uart while setting up */ USART_EnableTxDMA(config->base, false); /* Set up the dma channel/transfer */ data->tx_data.xfer_buf = buf; data->tx_data.xfer_len = len; data->tx_data.active_block.source_address = (uint32_t)buf; data->tx_data.active_block.dest_address = (uint32_t) &config->base->FIFOWR; data->tx_data.active_block.block_size = len; data->tx_data.active_block.next_block = NULL; ret = dma_config(config->tx_dma.dev, config->tx_dma.channel, (struct dma_config *) &config->tx_dma.cfg); if (ret) { irq_unlock(key); return ret; } /* Enable interrupt for when TX fifo is empty (all data transmitted) */ config->base->FIFOINTENSET |= USART_FIFOINTENSET_TXLVL_MASK; /* Enable TX DMA requests */ USART_EnableTxDMA(config->base, true); /* Trigger the DMA to start transfer */ ret = dma_start(config->tx_dma.dev, config->tx_dma.channel); if (ret) { irq_unlock(key); return ret; } /* Schedule a TX abort for @param timeout */ if (timeout != SYS_FOREVER_US) { k_work_schedule(&data->tx_data.timeout_work, K_USEC(timeout)); } irq_unlock(key); return ret; } static int mcux_flexcomm_uart_tx_abort(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; int ret = 0; /* First disable DMA requests from UART to prevent transfer * status change during the abort routine */ USART_EnableTxDMA(config->base, false); /* In case there is no transfer to abort */ if (data->tx_data.xfer_len == 0) { return -EFAULT; } /* In case a user called this function, do not abort twice */ (void)k_work_cancel_delayable(&data->tx_data.timeout_work); /* Getting dma status to use to calculate bytes sent */ struct dma_status status = {0}; ret = dma_get_status(config->tx_dma.dev, config->tx_dma.channel, &status); if (ret < 0) { return ret; } /* Done with the DMA transfer, can stop it now */ ret = dma_stop(config->tx_dma.dev, config->tx_dma.channel); if (ret) { return ret; } /* Define TX abort event before resetting driver variables */ size_t sent_len = data->tx_data.xfer_len - status.pending_length; const uint8_t *aborted_buf = data->tx_data.xfer_buf; struct uart_event tx_abort_event = { .type = UART_TX_ABORTED, .data.tx.buf = aborted_buf, .data.tx.len = sent_len }; /* Driver data needs reset since there is no longer an ongoing * transfer, this should before the user callback, not after, * just in case the user callback calls tx again */ data->tx_data.xfer_len = 0; data->tx_data.xfer_buf = NULL; async_user_callback(dev, &tx_abort_event); return ret; } static int mcux_flexcomm_uart_rx_enable(const struct device *dev, uint8_t *buf, const size_t len, const int32_t timeout) { const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; int ret = 0; if (config->rx_dma.dev == NULL) { return -ENODEV; } /* Getting DMA status to tell if channel is busy or not set up */ struct dma_status status; ret = dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &status); if (ret < 0) { return ret; } /* There is an ongoing transfer */ if (status.busy) { return -EBUSY; } /* Disable RX DMA requests for uart while setting up */ USART_EnableRxDMA(config->base, false); /* Set up the dma channel/transfer */ data->rx_data.xfer_buf = buf; data->rx_data.xfer_len = len; data->rx_data.active_block.dest_address = (uint32_t)data->rx_data.xfer_buf; data->rx_data.active_block.source_address = (uint32_t) &config->base->FIFORD; data->rx_data.active_block.block_size = data->rx_data.xfer_len; ret = dma_config(config->rx_dma.dev, config->rx_dma.channel, (struct dma_config *) &config->rx_dma.cfg); if (ret) { return ret; } data->rx_data.timeout = timeout; /* Enable RX DMA requests from UART */ USART_EnableRxDMA(config->base, true); /* Enable start bit detected interrupt, this is the only * way for the flexcomm uart to support the Zephyr Async API. * This is only needed if using a timeout. */ if (timeout != SYS_FOREVER_US) { config->base->INTENSET |= USART_INTENSET_STARTEN_MASK; } /* Trigger the DMA to start transfer */ ret = dma_start(config->rx_dma.dev, config->rx_dma.channel); if (ret) { return ret; } /* Request next buffer */ struct uart_event rx_buf_request = { .type = UART_RX_BUF_REQUEST, }; async_user_callback(dev, &rx_buf_request); return ret; } static void flexcomm_uart_rx_update(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; struct dma_status status; (void)dma_get_status(config->rx_dma.dev, config->rx_dma.channel, &status); /* Calculate how many bytes have been received by RX DMA */ size_t total_rx_receive_len = data->rx_data.xfer_len - status.pending_length; /* Generate RX ready event if there has been new data received */ if (total_rx_receive_len > data->rx_data.offset) { data->rx_data.count = total_rx_receive_len - data->rx_data.offset; struct uart_event rx_rdy_event = { .type = UART_RX_RDY, .data.rx.buf = data->rx_data.xfer_buf, .data.rx.len = data->rx_data.count, .data.rx.offset = data->rx_data.offset, }; async_user_callback(dev, &rx_rdy_event); } /* The data is no longer new, update buffer tracking variables */ data->rx_data.offset += data->rx_data.count; data->rx_data.count = 0; } static int mcux_flexcomm_uart_rx_disable(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; int ret = 0; /* This bit can be used to check if RX is already disabled * because it is the bit changed by enabling and disabling DMA * requests, and in this driver, RX DMA requests should only be * disabled when the rx function is disabled other than when * setting up in uart_rx_enable. */ if (!(config->base->FIFOCFG & USART_FIFOCFG_DMARX_MASK)) { return -EFAULT; } /* In case a user called this function, don't disable twice */ (void)k_work_cancel_delayable(&data->rx_data.timeout_work); /* Disable RX requests to pause DMA first and measure what happened, * Can't stop yet because DMA pending length is needed to * calculate how many bytes have been received */ USART_EnableRxDMA(config->base, false); /* Check if RX data received and generate rx ready event if so */ flexcomm_uart_rx_update(dev); /* Notify DMA driver to stop transfer only after RX data handled */ ret = dma_stop(config->rx_dma.dev, config->rx_dma.channel); if (ret) { return ret; } /* Generate buffer release event for current buffer */ struct uart_event current_buffer_release_event = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = data->rx_data.xfer_buf, }; async_user_callback(dev, &current_buffer_release_event); /* Generate buffer release event for next buffer */ if (data->rx_data.next_xfer_buf) { struct uart_event next_buffer_release_event = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = data->rx_data.next_xfer_buf }; async_user_callback(dev, &next_buffer_release_event); } /* Reset RX driver data */ data->rx_data.xfer_buf = NULL; data->rx_data.xfer_len = 0; data->rx_data.next_xfer_buf = NULL; data->rx_data.next_xfer_len = 0; data->rx_data.offset = 0; data->rx_data.count = 0; /* Final event is the RX disable event */ struct uart_event rx_disabled_event = { .type = UART_RX_DISABLED }; async_user_callback(dev, &rx_disabled_event); return ret; } static int mcux_flexcomm_uart_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; /* There is already a next buffer scheduled */ if (data->rx_data.next_xfer_buf != NULL || data->rx_data.next_xfer_len != 0) { return -EBUSY; } /* DMA requests are disabled, meaning the RX has been disabled */ if (!(config->base->FIFOCFG & USART_FIFOCFG_DMARX_MASK)) { return -EACCES; } /* If everything is fine, schedule the new buffer */ data->rx_data.next_xfer_buf = buf; data->rx_data.next_xfer_len = len; return 0; } /* This callback is from the TX DMA and consumed by this driver */ static void mcux_flexcomm_uart_dma_tx_callback(const struct device *dma_device, void *cb_data, uint32_t channel, int status) { /* DMA callback data was configured during driver init as UART device ptr */ struct device *dev = (struct device *)cb_data; const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; unsigned int key = irq_lock(); /* Turn off requests since we are aborting */ USART_EnableTxDMA(config->base, false); /* Timeout did not happen */ (void)k_work_cancel_delayable(&data->tx_data.timeout_work); irq_unlock(key); } /* This callback is from the RX DMA and consumed by this driver */ static void mcux_flexcomm_uart_dma_rx_callback(const struct device *dma_device, void *cb_data, uint32_t channel, int status) { /* DMA callback data was configured during driver init as UART device ptr */ struct device *dev = (struct device *)cb_data; const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; /* Cancel timeout now that the transfer is complete */ (void)k_work_cancel_delayable(&data->rx_data.timeout_work); /* Update user with received RX data if needed */ flexcomm_uart_rx_update(dev); /* Release current buffer */ struct uart_event current_buffer_release_event = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = data->rx_data.xfer_buf, }; async_user_callback(dev, &current_buffer_release_event); if (data->rx_data.next_xfer_buf) { /* Replace buffer in driver data */ data->rx_data.xfer_buf = data->rx_data.next_xfer_buf; data->rx_data.xfer_len = data->rx_data.next_xfer_len; data->rx_data.next_xfer_buf = NULL; data->rx_data.next_xfer_len = 0; /* Reload DMA channel with new buffer */ data->rx_data.active_block.block_size = data->rx_data.xfer_len; data->rx_data.active_block.dest_address = (uint32_t) data->rx_data.xfer_buf; dma_reload(config->rx_dma.dev, config->rx_dma.channel, data->rx_data.active_block.source_address, data->rx_data.active_block.dest_address, data->rx_data.active_block.block_size); /* Request next buffer */ struct uart_event rx_buf_request = { .type = UART_RX_BUF_REQUEST, }; async_user_callback(dev, &rx_buf_request); /* Start the new transfer */ dma_start(config->rx_dma.dev, config->rx_dma.channel); } else { /* If there is no next available buffer then disable DMA */ mcux_flexcomm_uart_rx_disable(dev); } /* Now that this transfer was finished, reset tracking variables */ data->rx_data.count = 0; data->rx_data.offset = 0; } #if defined(CONFIG_SOC_SERIES_IMXRT5XX) || defined(CONFIG_SOC_SERIES_IMXRT6XX) /* * This functions calculates the inputmux connection value * needed by INPUTMUX_EnableSignal to allow the UART's DMA * request to reach the DMA. */ static uint32_t fc_uart_calc_inmux_connection(uint8_t channel, DMA_Type *base) { uint32_t chmux_avl = 0; uint32_t chmux_sel = 0; uint32_t chmux_val = 0; #if defined(CONFIG_SOC_SERIES_IMXRT5XX) uint32_t chmux_sel_id = 0; if (base == (DMA_Type *)DMA0_BASE) { chmux_sel_id = DMA0_CHMUX_SEL0_ID; } else if (base == (DMA_Type *)DMA1_BASE) { chmux_sel_id = DMA1_CHMUX_SEL0_ID; } if (channel >= 16 && !(channel >= 24 && channel <= 27)) { chmux_avl = 1 << CHMUX_AVL_SHIFT; } else { chmux_avl = 0; } /* 1 for flexcomm */ chmux_val = 1 << CHMUX_VAL_SHIFT; if (channel <= 15 || (channel >= 24 && channel <= 27)) { chmux_sel = 0; } else if (channel >= 16 && channel <= 23) { chmux_sel = (chmux_sel_id + 4 * (channel - 16)) << CHMUX_OFF_SHIFT; } else { chmux_sel = (chmux_sel_id + 4 * (channel - 20)) << CHMUX_OFF_SHIFT; } #endif /* RT5xx */ uint32_t req_en_id = 0; if (base == (DMA_Type *)DMA0_BASE) { req_en_id = DMA0_REQ_ENA0_ID; } else if (base == (DMA_Type *)DMA1_BASE) { req_en_id = DMA1_REQ_ENA0_ID; } uint32_t en_val; if (channel <= 31) { en_val = channel + (req_en_id << ENA_SHIFT); } else { en_val = (channel - 32) + ((req_en_id + 4) << ENA_SHIFT); } uint32_t ret = en_val + chmux_avl + chmux_val + chmux_sel; return ret; } #endif /* RT 3-digit */ static int flexcomm_uart_async_init(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; struct mcux_flexcomm_data *data = dev->data; if (config->rx_dma.dev == NULL || config->tx_dma.dev == NULL) { return -ENODEV; } if (!device_is_ready(config->rx_dma.dev) || !device_is_ready(config->tx_dma.dev)) { return -ENODEV; } /* Disable DMA requests */ USART_EnableTxDMA(config->base, false); USART_EnableRxDMA(config->base, false); /* Route DMA requests */ #if defined(CONFIG_SOC_SERIES_IMXRT5XX) || defined(CONFIG_SOC_SERIES_IMXRT6XX) /* RT 3 digit uses input mux to route DMA requests from * the UART peripheral to a hardware designated DMA channel */ INPUTMUX_Init(INPUTMUX); INPUTMUX_EnableSignal(INPUTMUX, fc_uart_calc_inmux_connection(config->rx_dma.channel, config->rx_dma.base), true); INPUTMUX_EnableSignal(INPUTMUX, fc_uart_calc_inmux_connection(config->tx_dma.channel, config->tx_dma.base), true); INPUTMUX_Deinit(INPUTMUX); #endif /* RT5xx and RT6xx */ /* Init work objects for RX and TX timeouts */ k_work_init_delayable(&data->tx_data.timeout_work, config->tx_timeout_func); k_work_init_delayable(&data->rx_data.timeout_work, config->rx_timeout_func); return 0; } #endif /* CONFIG_UART_ASYNC_API */ #ifdef CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT static void mcux_flexcomm_isr(const struct device *dev) { struct mcux_flexcomm_data *data = dev->data; #ifdef CONFIG_UART_INTERRUPT_DRIVEN if (data->irq_callback) { data->irq_callback(dev, data->irq_cb_data); } #endif #ifdef CONFIG_UART_ASYNC_API const struct mcux_flexcomm_config *config = dev->config; /* If there is an async callback then we are using async api */ if (data->async_callback) { /* Handle RX interrupt (START bit detected) * RX interrupt defeats the purpose of UART ASYNC API * because core is involved for every byte but * it is included for compatibility of applications. * There is no other way with flexcomm UART to handle * Zephyr's RX ASYNC API. However, if not using the RX * timeout (timeout is forever), then the performance is * still as might be expected. */ if (config->base->INTSTAT & USART_INTSTAT_START_MASK) { /* Receiving some data so reschedule timeout, * unless timeout is 0 in which case just handle * rx data now. If timeout is forever, don't do anything. */ if (data->rx_data.timeout == 0) { flexcomm_uart_rx_update(dev); } else if (data->rx_data.timeout != SYS_FOREVER_US) { k_work_reschedule(&data->rx_data.timeout_work, K_USEC(data->rx_data.timeout)); } /* Write 1 to clear start bit status bit */ config->base->STAT |= USART_STAT_START_MASK; } /* Handle TX interrupt (TXLVL = 0) * Default TXLVL interrupt happens when TXLVL = 0, which * has not been changed by this driver, so in this case the * TX interrupt should happen when transfer is complete * because DMA filling TX fifo is faster than transmitter rate */ if (config->base->FIFOINTSTAT & USART_FIFOINTSTAT_TXLVL_MASK) { /* Disable interrupt */ config->base->FIFOINTENCLR = USART_FIFOINTENCLR_TXLVL_MASK; /* Set up TX done event to notify the user of completion */ struct uart_event tx_done_event = { .type = UART_TX_DONE, .data.tx.buf = data->tx_data.xfer_buf, .data.tx.len = data->tx_data.xfer_len, }; /* Reset TX data */ data->tx_data.xfer_len = 0; data->tx_data.xfer_buf = NULL; async_user_callback(dev, &tx_done_event); } } #endif /* CONFIG_UART_ASYNC_API */ } #endif /* CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT */ static int mcux_flexcomm_init(const struct device *dev) { const struct mcux_flexcomm_config *config = dev->config; #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE struct mcux_flexcomm_data *data = dev->data; struct uart_config *cfg = &data->uart_config; #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ usart_config_t usart_config; usart_parity_mode_t parity_mode; uint32_t clock_freq; int err; err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } if (!device_is_ready(config->clock_dev)) { return -ENODEV; } /* Get the clock frequency */ if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } if (config->parity == UART_CFG_PARITY_ODD) { parity_mode = kUSART_ParityOdd; } else if (config->parity == UART_CFG_PARITY_EVEN) { parity_mode = kUSART_ParityEven; } else { parity_mode = kUSART_ParityDisabled; } USART_GetDefaultConfig(&usart_config); usart_config.enableTx = true; usart_config.enableRx = true; usart_config.parityMode = parity_mode; usart_config.baudRate_Bps = config->baud_rate; #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE cfg->baudrate = config->baud_rate; cfg->parity = config->parity; /* From USART_GetDefaultConfig */ cfg->stop_bits = UART_CFG_STOP_BITS_1; cfg->data_bits = UART_CFG_DATA_BITS_8; cfg->flow_ctrl = UART_CFG_FLOW_CTRL_NONE; #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ USART_Init(config->base, &usart_config, clock_freq); #ifdef CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT config->irq_config_func(dev); #endif #ifdef CONFIG_UART_ASYNC_API err = flexcomm_uart_async_init(dev); if (err) { return err; } #endif return 0; } static const struct uart_driver_api mcux_flexcomm_driver_api = { .poll_in = mcux_flexcomm_poll_in, .poll_out = mcux_flexcomm_poll_out, .err_check = mcux_flexcomm_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = mcux_flexcomm_uart_configure, .config_get = mcux_flexcomm_uart_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = mcux_flexcomm_fifo_fill, .fifo_read = mcux_flexcomm_fifo_read, .irq_tx_enable = mcux_flexcomm_irq_tx_enable, .irq_tx_disable = mcux_flexcomm_irq_tx_disable, .irq_tx_complete = mcux_flexcomm_irq_tx_complete, .irq_tx_ready = mcux_flexcomm_irq_tx_ready, .irq_rx_enable = mcux_flexcomm_irq_rx_enable, .irq_rx_disable = mcux_flexcomm_irq_rx_disable, .irq_rx_ready = mcux_flexcomm_irq_rx_full, .irq_err_enable = mcux_flexcomm_irq_err_enable, .irq_err_disable = mcux_flexcomm_irq_err_disable, .irq_is_pending = mcux_flexcomm_irq_is_pending, .irq_update = mcux_flexcomm_irq_update, .irq_callback_set = mcux_flexcomm_irq_callback_set, #endif #ifdef CONFIG_UART_ASYNC_API .callback_set = mcux_flexcomm_uart_callback_set, .tx = mcux_flexcomm_uart_tx, .tx_abort = mcux_flexcomm_uart_tx_abort, .rx_enable = mcux_flexcomm_uart_rx_enable, .rx_disable = mcux_flexcomm_uart_rx_disable, .rx_buf_rsp = mcux_flexcomm_uart_rx_buf_rsp, #endif }; #ifdef CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT #define UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC(n) \ static void mcux_flexcomm_irq_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ mcux_flexcomm_isr, DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } #define UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = mcux_flexcomm_irq_config_func_##n, #else #define UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC(n) #define UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC_INIT(n) #endif /* CONFIG_UART_MCUX_FLEXCOMM_ISR_SUPPORT */ #ifdef CONFIG_UART_ASYNC_API #define UART_MCUX_FLEXCOMM_TX_TIMEOUT_FUNC(n) \ static void mcux_flexcomm_uart_##n##_tx_timeout(struct k_work *work) \ { \ mcux_flexcomm_uart_tx_abort(DEVICE_DT_INST_GET(n)); \ } #define UART_MCUX_FLEXCOMM_RX_TIMEOUT_FUNC(n) \ static void mcux_flexcomm_uart_##n##_rx_timeout(struct k_work *work) \ { \ flexcomm_uart_rx_update(DEVICE_DT_INST_GET(n)); \ } DT_INST_FOREACH_STATUS_OKAY(UART_MCUX_FLEXCOMM_TX_TIMEOUT_FUNC); DT_INST_FOREACH_STATUS_OKAY(UART_MCUX_FLEXCOMM_RX_TIMEOUT_FUNC); #define UART_MCUX_FLEXCOMM_ASYNC_CFG(n) \ .tx_dma = { \ .dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \ .channel = DT_INST_DMAS_CELL_BY_NAME(n, tx, channel), \ .cfg = { \ .source_burst_length = 1, \ .dest_burst_length = 1, \ .source_data_size = 1, \ .dest_data_size = 1, \ .complete_callback_en = 1, \ .error_callback_dis = 1, \ .block_count = 1, \ .head_block = \ &mcux_flexcomm_##n##_data.tx_data.active_block, \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_callback = mcux_flexcomm_uart_dma_tx_callback, \ .user_data = (void *)DEVICE_DT_INST_GET(n), \ }, \ .base = (DMA_Type *) \ DT_REG_ADDR(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \ }, \ .rx_dma = { \ .dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \ .channel = DT_INST_DMAS_CELL_BY_NAME(n, rx, channel), \ .cfg = { \ .source_burst_length = 1, \ .dest_burst_length = 1, \ .source_data_size = 1, \ .dest_data_size = 1, \ .complete_callback_en = 1, \ .error_callback_dis = 1, \ .block_count = 1, \ .head_block = \ &mcux_flexcomm_##n##_data.rx_data.active_block, \ .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_callback = mcux_flexcomm_uart_dma_rx_callback, \ .user_data = (void *)DEVICE_DT_INST_GET(n) \ }, \ .base = (DMA_Type *) \ DT_REG_ADDR(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \ }, \ .rx_timeout_func = mcux_flexcomm_uart_##n##_rx_timeout, \ .tx_timeout_func = mcux_flexcomm_uart_##n##_tx_timeout, #else #define UART_MCUX_FLEXCOMM_ASYNC_CFG(n) #endif /* CONFIG_UART_ASYNC_API */ #define UART_MCUX_FLEXCOMM_INIT_CFG(n) \ static const struct mcux_flexcomm_config mcux_flexcomm_##n##_config = { \ .base = (USART_Type *)DT_INST_REG_ADDR(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ .baud_rate = DT_INST_PROP(n, current_speed), \ .parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC_INIT(n) \ UART_MCUX_FLEXCOMM_ASYNC_CFG(n) \ }; #define UART_MCUX_FLEXCOMM_INIT(n) \ \ PINCTRL_DT_INST_DEFINE(n); \ \ static struct mcux_flexcomm_data mcux_flexcomm_##n##_data; \ \ static const struct mcux_flexcomm_config mcux_flexcomm_##n##_config; \ \ DEVICE_DT_INST_DEFINE(n, \ mcux_flexcomm_init, \ NULL, \ &mcux_flexcomm_##n##_data, \ &mcux_flexcomm_##n##_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &mcux_flexcomm_driver_api); \ \ UART_MCUX_FLEXCOMM_IRQ_CFG_FUNC(n) \ \ UART_MCUX_FLEXCOMM_INIT_CFG(n) DT_INST_FOREACH_STATUS_OKAY(UART_MCUX_FLEXCOMM_INIT) ```
/content/code_sandbox/drivers/serial/uart_mcux_flexcomm.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,588
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_usb_serial #include <hal/usb_serial_jtag_ll.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <errno.h> #include <soc.h> #include <zephyr/drivers/uart.h> #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6) #include <zephyr/drivers/interrupt_controller/intc_esp32c3.h> #else #include <zephyr/drivers/interrupt_controller/intc_esp32.h> #endif #include <zephyr/drivers/clock_control.h> #include <zephyr/sys/util.h> #include <esp_attr.h> #if defined(CONFIG_SOC_SERIES_ESP32C3) || defined(CONFIG_SOC_SERIES_ESP32C6) #define ISR_HANDLER isr_handler_t #else #define ISR_HANDLER intr_handler_t #endif /* * Timeout after which the poll_out function stops waiting for space in the tx fifo. * * Without this timeout, the function would get stuck forever and block the processor if no host is * connected to the USB port. * * USB full-speed uses a frame rate of 1 ms. Thus, a timeout of 50 ms provides plenty of safety * margin even for a loaded bus. This is the same value as used in the ESP-IDF. */ #define USBSERIAL_POLL_OUT_TIMEOUT_MS (50U) struct serial_esp32_usb_config { const struct device *clock_dev; const clock_control_subsys_t clock_subsys; int irq_source; }; struct serial_esp32_usb_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t irq_cb; void *irq_cb_data; #endif int irq_line; int64_t last_tx_time; }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void serial_esp32_usb_isr(void *arg); #endif static int serial_esp32_usb_poll_in(const struct device *dev, unsigned char *p_char) { struct serial_esp32_usb_data *data = dev->data; if (!usb_serial_jtag_ll_rxfifo_data_available()) { return -1; } usb_serial_jtag_ll_read_rxfifo(p_char, 1); return 0; } static void serial_esp32_usb_poll_out(const struct device *dev, unsigned char c) { struct serial_esp32_usb_data *data = dev->data; /* * If there is no USB host connected, this function will busy-wait once for the timeout * period, but return immediately for subsequent calls. */ do { if (usb_serial_jtag_ll_txfifo_writable()) { usb_serial_jtag_ll_write_txfifo(&c, 1); usb_serial_jtag_ll_txfifo_flush(); data->last_tx_time = k_uptime_get(); return; } } while ((k_uptime_get() - data->last_tx_time) < USBSERIAL_POLL_OUT_TIMEOUT_MS); } static int serial_esp32_usb_err_check(const struct device *dev) { ARG_UNUSED(dev); return 0; } static int serial_esp32_usb_init(const struct device *dev) { const struct serial_esp32_usb_config *config = dev->config; struct serial_esp32_usb_data *data = dev->data; if (!device_is_ready(config->clock_dev)) { return -ENODEV; } int ret = clock_control_on(config->clock_dev, config->clock_subsys); #ifdef CONFIG_UART_INTERRUPT_DRIVEN data->irq_line = esp_intr_alloc(config->irq_source, 0, (ISR_HANDLER)serial_esp32_usb_isr, (void *)dev, NULL); #endif return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int serial_esp32_usb_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { ARG_UNUSED(dev); int ret = usb_serial_jtag_ll_write_txfifo(tx_data, len); usb_serial_jtag_ll_txfifo_flush(); return ret; } static int serial_esp32_usb_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { ARG_UNUSED(dev); return usb_serial_jtag_ll_read_rxfifo(rx_data, len); } static void serial_esp32_usb_irq_tx_enable(const struct device *dev) { struct serial_esp32_usb_data *data = dev->data; usb_serial_jtag_ll_clr_intsts_mask(USB_SERIAL_JTAG_INTR_SERIAL_IN_EMPTY); usb_serial_jtag_ll_ena_intr_mask(USB_SERIAL_JTAG_INTR_SERIAL_IN_EMPTY); if (data->irq_cb != NULL) { unsigned int key = irq_lock(); data->irq_cb(dev, data->irq_cb_data); arch_irq_unlock(key); } } static void serial_esp32_usb_irq_tx_disable(const struct device *dev) { ARG_UNUSED(dev); usb_serial_jtag_ll_disable_intr_mask(USB_SERIAL_JTAG_INTR_SERIAL_IN_EMPTY); } static int serial_esp32_usb_irq_tx_ready(const struct device *dev) { ARG_UNUSED(dev); return (usb_serial_jtag_ll_txfifo_writable() && usb_serial_jtag_ll_get_intr_ena_status() & USB_SERIAL_JTAG_INTR_SERIAL_IN_EMPTY); } static void serial_esp32_usb_irq_rx_enable(const struct device *dev) { ARG_UNUSED(dev); usb_serial_jtag_ll_clr_intsts_mask(USB_SERIAL_JTAG_INTR_SERIAL_OUT_RECV_PKT); usb_serial_jtag_ll_ena_intr_mask(USB_SERIAL_JTAG_INTR_SERIAL_OUT_RECV_PKT); } static void serial_esp32_usb_irq_rx_disable(const struct device *dev) { ARG_UNUSED(dev); usb_serial_jtag_ll_disable_intr_mask(USB_SERIAL_JTAG_INTR_SERIAL_OUT_RECV_PKT); } static int serial_esp32_usb_irq_tx_complete(const struct device *dev) { ARG_UNUSED(dev); return usb_serial_jtag_ll_txfifo_writable(); } static int serial_esp32_usb_irq_rx_ready(const struct device *dev) { ARG_UNUSED(dev); return usb_serial_jtag_ll_rxfifo_data_available(); } static void serial_esp32_usb_irq_err_enable(const struct device *dev) { ARG_UNUSED(dev); } static void serial_esp32_usb_irq_err_disable(const struct device *dev) { ARG_UNUSED(dev); } static int serial_esp32_usb_irq_is_pending(const struct device *dev) { return serial_esp32_usb_irq_rx_ready(dev) || serial_esp32_usb_irq_tx_ready(dev); } static int serial_esp32_usb_irq_update(const struct device *dev) { ARG_UNUSED(dev); usb_serial_jtag_ll_clr_intsts_mask(USB_SERIAL_JTAG_INTR_SERIAL_OUT_RECV_PKT); usb_serial_jtag_ll_clr_intsts_mask(USB_SERIAL_JTAG_INTR_SERIAL_IN_EMPTY); return 1; } static void serial_esp32_usb_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct serial_esp32_usb_data *data = dev->data; data->irq_cb_data = cb_data; data->irq_cb = cb; } static void serial_esp32_usb_isr(void *arg) { const struct device *dev = (const struct device *)arg; struct serial_esp32_usb_data *data = dev->data; uint32_t uart_intr_status = usb_serial_jtag_ll_get_intsts_mask(); if (uart_intr_status == 0) { return; } usb_serial_jtag_ll_clr_intsts_mask(uart_intr_status); if (data->irq_cb != NULL) { data->irq_cb(dev, data->irq_cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const DRAM_ATTR struct uart_driver_api serial_esp32_usb_api = { .poll_in = serial_esp32_usb_poll_in, .poll_out = serial_esp32_usb_poll_out, .err_check = serial_esp32_usb_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = serial_esp32_usb_fifo_fill, .fifo_read = serial_esp32_usb_fifo_read, .irq_tx_enable = serial_esp32_usb_irq_tx_enable, .irq_tx_disable = serial_esp32_usb_irq_tx_disable, .irq_tx_ready = serial_esp32_usb_irq_tx_ready, .irq_rx_enable = serial_esp32_usb_irq_rx_enable, .irq_rx_disable = serial_esp32_usb_irq_rx_disable, .irq_tx_complete = serial_esp32_usb_irq_tx_complete, .irq_rx_ready = serial_esp32_usb_irq_rx_ready, .irq_err_enable = serial_esp32_usb_irq_err_enable, .irq_err_disable = serial_esp32_usb_irq_err_disable, .irq_is_pending = serial_esp32_usb_irq_is_pending, .irq_update = serial_esp32_usb_irq_update, .irq_callback_set = serial_esp32_usb_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; static const DRAM_ATTR struct serial_esp32_usb_config serial_esp32_usb_cfg = { .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(0)), .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(0, offset), .irq_source = DT_INST_IRQN(0) }; static struct serial_esp32_usb_data serial_esp32_usb_data_0; DEVICE_DT_INST_DEFINE(0, serial_esp32_usb_init, NULL, &serial_esp32_usb_data_0, &serial_esp32_usb_cfg, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &serial_esp32_usb_api); ```
/content/code_sandbox/drivers/serial/serial_esp32_usb.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,054
```c /* */ #define DT_DRV_COMPAT renesas_ra8_uart_sci_b #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/sys/util.h> #include <zephyr/irq.h> #include <soc.h> #include "r_sci_b_uart.h" #include "r_dtc.h" #include "r_transfer_api.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ra8_uart_sci_b); #if defined(CONFIG_UART_ASYNC_API) void sci_b_uart_rxi_isr(void); void sci_b_uart_txi_isr(void); void sci_b_uart_tei_isr(void); void sci_b_uart_eri_isr(void); #endif struct uart_ra_sci_b_config { R_SCI_B0_Type * const regs; const struct pinctrl_dev_config *pcfg; }; struct uart_ra_sci_b_data { const struct device *dev; struct st_sci_b_uart_instance_ctrl sci; struct uart_config uart_config; struct st_uart_cfg fsp_config; struct st_sci_b_uart_extended_cfg fsp_config_extend; struct st_sci_b_baud_setting_t fsp_baud_setting; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) uart_irq_callback_user_data_t user_cb; void *user_cb_data; uint32_t csr; #endif #if defined(CONFIG_UART_ASYNC_API) /* RX */ struct st_transfer_instance rx_transfer; struct st_dtc_instance_ctrl rx_transfer_ctrl; struct st_transfer_info rx_transfer_info; struct st_transfer_cfg rx_transfer_cfg; struct st_dtc_extended_cfg rx_transfer_cfg_extend; struct k_work_delayable rx_timeout_work; size_t rx_timeout; uint8_t *rx_buffer; size_t rx_buffer_len; size_t rx_buffer_cap; size_t rx_buffer_offset; uint8_t *rx_next_buffer; size_t rx_next_buffer_cap; /* TX */ struct st_transfer_instance tx_transfer; struct st_dtc_instance_ctrl tx_transfer_ctrl; struct st_transfer_info tx_transfer_info; struct st_transfer_cfg tx_transfer_cfg; struct st_dtc_extended_cfg tx_transfer_cfg_extend; struct k_work_delayable tx_timeout_work; size_t tx_timeout; uint8_t *tx_buffer; size_t tx_buffer_len; size_t tx_buffer_cap; uart_callback_t async_user_cb; void *async_user_cb_data; #endif }; static int uart_ra_sci_b_poll_in(const struct device *dev, unsigned char *c) { const struct uart_ra_sci_b_config *cfg = dev->config; /* Check if async reception was enabled */ if (IS_ENABLED(CONFIG_UART_ASYNC_API) && cfg->regs->CCR0_b.RIE) { return -EBUSY; } if (IS_ENABLED(CONFIG_UART_RA_SCI_B_UART_FIFO_ENABLE) ? cfg->regs->FRSR_b.R == 0U : cfg->regs->CSR_b.RDRF == 0U) { /* There are no characters available to read. */ return -1; } /* got a character */ *c = (unsigned char)cfg->regs->RDR; return 0; } static void uart_ra_sci_b_poll_out(const struct device *dev, unsigned char c) { const struct uart_ra_sci_b_config *cfg = dev->config; while (cfg->regs->CSR_b.TEND == 0U) { } cfg->regs->TDR_BY = c; } static int uart_ra_sci_b_err_check(const struct device *dev) { const struct uart_ra_sci_b_config *cfg = dev->config; const uint32_t status = cfg->regs->CSR; int errors = 0; if ((status & BIT(R_SCI_B0_CSR_ORER_Pos)) != 0) { errors |= UART_ERROR_OVERRUN; } if ((status & BIT(R_SCI_B0_CSR_PER_Pos)) != 0) { errors |= UART_ERROR_PARITY; } if ((status & BIT(R_SCI_B0_CSR_FER_Pos)) != 0) { errors |= UART_ERROR_FRAMING; } return errors; } static int uart_ra_sci_b_apply_config(const struct uart_config *config, struct st_uart_cfg *fsp_config, struct st_sci_b_uart_extended_cfg *fsp_config_extend, struct st_sci_b_baud_setting_t *fsp_baud_setting) { fsp_err_t fsp_err; fsp_err = R_SCI_B_UART_BaudCalculate(config->baudrate, false, 5000, fsp_baud_setting); __ASSERT(fsp_err == 0, "sci_uart: baud calculate error"); switch (config->parity) { case UART_CFG_PARITY_NONE: fsp_config->parity = UART_PARITY_OFF; break; case UART_CFG_PARITY_ODD: fsp_config->parity = UART_PARITY_ODD; break; case UART_CFG_PARITY_EVEN: fsp_config->parity = UART_PARITY_EVEN; break; case UART_CFG_PARITY_MARK: return -ENOTSUP; case UART_CFG_PARITY_SPACE: return -ENOTSUP; default: return -EINVAL; } switch (config->stop_bits) { case UART_CFG_STOP_BITS_0_5: return -ENOTSUP; case UART_CFG_STOP_BITS_1: fsp_config->stop_bits = UART_STOP_BITS_1; break; case UART_CFG_STOP_BITS_1_5: return -ENOTSUP; case UART_CFG_STOP_BITS_2: fsp_config->stop_bits = UART_STOP_BITS_2; break; default: return -EINVAL; } switch (config->data_bits) { case UART_CFG_DATA_BITS_5: return -ENOTSUP; case UART_CFG_DATA_BITS_6: return -ENOTSUP; case UART_CFG_DATA_BITS_7: fsp_config->data_bits = UART_DATA_BITS_7; break; case UART_CFG_DATA_BITS_8: fsp_config->data_bits = UART_DATA_BITS_8; break; case UART_CFG_DATA_BITS_9: fsp_config->data_bits = UART_DATA_BITS_9; break; default: return -EINVAL; } fsp_config_extend->clock = SCI_B_UART_CLOCK_INT; fsp_config_extend->rx_edge_start = SCI_B_UART_START_BIT_FALLING_EDGE; fsp_config_extend->noise_cancel = SCI_B_UART_NOISE_CANCELLATION_DISABLE; fsp_config_extend->flow_control_pin = UINT16_MAX; #if CONFIG_UART_RA_SCI_B_UART_FIFO_ENABLE fsp_config_extend->rx_fifo_trigger = 0x8; #endif /* CONFIG_UART_RA_SCI_B_UART_FIFO_ENABLE */ switch (config->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: fsp_config_extend->flow_control = 0; fsp_config_extend->rs485_setting.enable = false; break; case UART_CFG_FLOW_CTRL_RTS_CTS: fsp_config_extend->flow_control = SCI_B_UART_FLOW_CONTROL_HARDWARE_CTSRTS; fsp_config_extend->rs485_setting.enable = false; break; case UART_CFG_FLOW_CTRL_DTR_DSR: return -ENOTSUP; case UART_CFG_FLOW_CTRL_RS485: /* TODO: implement this config */ return -ENOTSUP; default: return -EINVAL; } return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_ra_sci_b_configure(const struct device *dev, const struct uart_config *cfg) { int err; fsp_err_t fsp_err; struct uart_ra_sci_b_data *data = dev->data; err = uart_ra_sci_b_apply_config(cfg, &data->fsp_config, &data->fsp_config_extend, &data->fsp_baud_setting); if (err) { return err; } fsp_err = R_SCI_B_UART_Close(&data->sci); __ASSERT(fsp_err == 0, "sci_uart: configure: fsp close failed"); fsp_err = R_SCI_B_UART_Open(&data->sci, &data->fsp_config); __ASSERT(fsp_err == 0, "sci_uart: configure: fsp open failed"); memcpy(&data->uart_config, cfg, sizeof(struct uart_config)); return err; } static int uart_ra_sci_b_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_ra_sci_b_data *data = dev->data; memcpy(cfg, &data->uart_config, sizeof(*cfg)); return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_ra_sci_b_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { struct uart_ra_sci_b_data *data = dev->data; const struct uart_ra_sci_b_config *cfg = dev->config; uint8_t num_tx = 0U; if (IS_ENABLED(CONFIG_UART_RA_SCI_B_UART_FIFO_ENABLE) && data->sci.fifo_depth > 0) { while ((size - num_tx > 0) && cfg->regs->FTSR != 0x10U) { /* FTSR flag will be cleared with byte write to TDR register */ /* Send a character (8bit , parity none) */ cfg->regs->TDR_BY = tx_data[num_tx++]; } } else { if (size > 0 && cfg->regs->CSR_b.TDRE) { /* TEND flag will be cleared with byte write to TDR register */ /* Send a character (8bit , parity none) */ cfg->regs->TDR_BY = tx_data[num_tx++]; } } return num_tx; } static int uart_ra_sci_b_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { struct uart_ra_sci_b_data *data = dev->data; const struct uart_ra_sci_b_config *cfg = dev->config; uint8_t num_rx = 0U; if (IS_ENABLED(CONFIG_UART_RA_SCI_B_UART_FIFO_ENABLE) && data->sci.fifo_depth > 0) { while ((size - num_rx > 0) && cfg->regs->FRSR_b.R > 0U) { /* FRSR.DR flag will be cleared with byte write to RDR register */ /* Receive a character (8bit , parity none) */ rx_data[num_rx++] = cfg->regs->RDR; } if (cfg->regs->FRSR_b.R == 0U) { cfg->regs->CFCLR_b.RDRFC = 1U; cfg->regs->FFCLR_b.DRC = 1U; } } else { if (size > 0 && cfg->regs->CSR_b.RDRF) { /* Receive a character (8bit , parity none) */ rx_data[num_rx++] = cfg->regs->RDR; } } /* Clear overrun error flag */ cfg->regs->CFCLR_b.ORERC = 0U; return num_rx; } static void uart_ra_sci_b_irq_tx_enable(const struct device *dev) { const struct uart_ra_sci_b_config *cfg = dev->config; cfg->regs->CCR0 |= (BIT(R_SCI_B0_CCR0_TIE_Pos) | BIT(R_SCI_B0_CCR0_TEIE_Pos)); } static void uart_ra_sci_b_irq_tx_disable(const struct device *dev) { const struct uart_ra_sci_b_config *cfg = dev->config; cfg->regs->CCR0 &= ~(BIT(R_SCI_B0_CCR0_TIE_Pos) | BIT(R_SCI_B0_CCR0_TEIE_Pos)); } static int uart_ra_sci_b_irq_tx_ready(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; const struct uart_ra_sci_b_config *cfg = dev->config; return (cfg->regs->CCR0_b.TIE == 1U) && (data->csr & (BIT(R_SCI_B0_CSR_TDRE_Pos) | BIT(R_SCI_B0_CSR_TEND_Pos))); } static int uart_ra_sci_b_irq_tx_complete(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; const struct uart_ra_sci_b_config *cfg = dev->config; return (cfg->regs->CCR0_b.TEIE == 1U) && (data->csr & BIT(R_SCI_B0_CSR_TEND_Pos)); } static void uart_ra_sci_b_irq_rx_enable(const struct device *dev) { const struct uart_ra_sci_b_config *cfg = dev->config; cfg->regs->CCR0_b.RIE = 1U; } static void uart_ra_sci_b_irq_rx_disable(const struct device *dev) { const struct uart_ra_sci_b_config *cfg = dev->config; cfg->regs->CCR0_b.RIE = 0U; } static int uart_ra_sci_b_irq_rx_ready(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; const struct uart_ra_sci_b_config *cfg = dev->config; return (cfg->regs->CCR0_b.RIE == 1U) && ((data->csr & BIT(R_SCI_B0_CSR_RDRF_Pos)) || (IS_ENABLED(CONFIG_UART_RA_SCI_B_UART_FIFO_ENABLE) && cfg->regs->FRSR_b.DR == 1U)); } static void uart_ra_sci_b_irq_err_enable(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; NVIC_EnableIRQ(data->fsp_config.eri_irq); } static void uart_ra_sci_b_irq_err_disable(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; NVIC_DisableIRQ(data->fsp_config.eri_irq); } static int uart_ra_sci_b_irq_is_pending(const struct device *dev) { const struct uart_ra_sci_b_config *cfg = dev->config; const uint32_t ccr0 = cfg->regs->CCR0; const uint32_t csr = cfg->regs->CSR; const bool tx_pending = ((ccr0 & BIT(R_SCI_B0_CCR0_TIE_Pos)) && (csr & (BIT(R_SCI_B0_CSR_TEND_Pos) | BIT(R_SCI_B0_CSR_TDRE_Pos)))); const bool rx_pending = ((ccr0 & BIT(R_SCI_B0_CCR0_RIE_Pos)) && ((csr & (BIT(R_SCI_B0_CSR_RDRF_Pos) | BIT(R_SCI_B0_CSR_PER_Pos) | BIT(R_SCI_B0_CSR_FER_Pos) | BIT(R_SCI_B0_CSR_ORER_Pos))) || (IS_ENABLED(CONFIG_UART_RA_SCI_B_UART_FIFO_ENABLE) && cfg->regs->FRSR_b.DR == 1U))); return tx_pending || rx_pending; } static int uart_ra_sci_b_irq_update(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; const struct uart_ra_sci_b_config *cfg = dev->config; uint32_t cfclr = 0; data->csr = cfg->regs->CSR; if (data->csr & BIT(R_SCI_B0_CSR_PER_Pos)) { cfclr |= BIT(R_SCI_B0_CFCLR_PERC_Pos); } if (data->csr & BIT(R_SCI_B0_CSR_FER_Pos)) { cfclr |= BIT(R_SCI_B0_CFCLR_FERC_Pos); } if (data->csr & BIT(R_SCI_B0_CSR_ORER_Pos)) { cfclr |= BIT(R_SCI_B0_CFCLR_ORERC_Pos); } cfg->regs->CFCLR = cfclr; return 1; } static void uart_ra_sci_b_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_ra_sci_b_data *data = dev->data; data->user_cb = cb; data->user_cb_data = cb_data; } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API static inline void async_user_callback(const struct device *dev, struct uart_event *event) { struct uart_ra_sci_b_data *data = dev->data; if (data->async_user_cb) { data->async_user_cb(dev, event, data->async_user_cb_data); } } static inline void async_rx_error(const struct device *dev, enum uart_rx_stop_reason reason) { struct uart_ra_sci_b_data *data = dev->data; struct uart_event event = { .type = UART_RX_STOPPED, .data.rx_stop.reason = reason, .data.rx_stop.data.buf = (uint8_t *)data->rx_buffer, .data.rx_stop.data.offset = data->rx_buffer_offset, .data.rx_stop.data.len = data->rx_buffer_len, }; async_user_callback(dev, &event); } static inline void async_rx_disabled(const struct device *dev) { struct uart_event event = { .type = UART_RX_DISABLED, }; return async_user_callback(dev, &event); } static inline void async_request_rx_buffer(const struct device *dev) { struct uart_event event = { .type = UART_RX_BUF_REQUEST, }; return async_user_callback(dev, &event); } static inline void async_rx_ready(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; if (data->rx_buffer_len == 0) { return; } struct uart_event event = { .type = UART_RX_RDY, .data.rx.buf = (uint8_t *)data->rx_buffer, .data.rx.offset = data->rx_buffer_offset, .data.rx.len = data->rx_buffer_len, }; async_user_callback(dev, &event); data->rx_buffer_offset += data->rx_buffer_len; data->rx_buffer_len = 0; } static inline void async_replace_rx_buffer(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; if (data->rx_next_buffer != NULL) { data->rx_buffer = data->rx_next_buffer; data->rx_buffer_cap = data->rx_next_buffer_cap; R_SCI_B_UART_Read(&data->sci, data->rx_buffer, data->rx_buffer_cap); data->rx_next_buffer = NULL; data->rx_next_buffer_cap = 0; async_request_rx_buffer(dev); } else { async_rx_disabled(dev); } } static inline void async_release_rx_buffer(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; if (data->rx_buffer == NULL) { return; } struct uart_event event = { .type = UART_RX_BUF_RELEASED, .data.rx.buf = (uint8_t *)data->rx_buffer, }; async_user_callback(dev, &event); data->rx_buffer = NULL; data->rx_buffer_cap = 0; data->rx_buffer_len = 0; data->rx_buffer_offset = 0; } static inline void async_release_rx_next_buffer(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; if (data->rx_next_buffer == NULL) { return; } struct uart_event event = { .type = UART_RX_BUF_RELEASED, .data.rx.buf = (uint8_t *)data->rx_next_buffer, }; async_user_callback(dev, &event); data->rx_next_buffer = NULL; data->rx_next_buffer_cap = 0; } static inline void async_update_tx_buffer(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; struct uart_event event = { .type = UART_TX_DONE, .data.tx.buf = (uint8_t *)data->tx_buffer, .data.tx.len = data->tx_buffer_cap, }; async_user_callback(dev, &event); data->tx_buffer = NULL; data->tx_buffer_cap = 0; } static inline void async_tx_abort(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; if (data->tx_buffer_len < data->tx_buffer_cap) { struct uart_event event = { .type = UART_TX_ABORTED, .data.tx.buf = (uint8_t *)data->tx_buffer, .data.tx.len = data->tx_buffer_len, }; async_user_callback(dev, &event); } data->tx_buffer = NULL; data->tx_buffer_cap = 0; } static inline void uart_ra_sci_b_async_timer_start(struct k_work_delayable *work, size_t timeout) { if (timeout != SYS_FOREVER_US && timeout != 0) { LOG_DBG("Async timer started for %d us", timeout); k_work_reschedule(work, K_USEC(timeout)); } } static inline int fsp_err_to_errno(fsp_err_t fsp_err) { switch (fsp_err) { case FSP_ERR_INVALID_ARGUMENT: return -EINVAL; case FSP_ERR_NOT_OPEN: return -EIO; case FSP_ERR_IN_USE: return -EBUSY; case FSP_ERR_UNSUPPORTED: return -ENOTSUP; case 0: return 0; default: return -EINVAL; } } static int uart_ra_sci_b_async_callback_set(const struct device *dev, uart_callback_t cb, void *cb_data) { struct uart_ra_sci_b_data *data = dev->data; unsigned int key = irq_lock(); data->async_user_cb = cb; data->async_user_cb_data = cb_data; irq_unlock(key); return 0; } static int uart_ra_sci_b_async_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { struct uart_ra_sci_b_data *data = dev->data; int err = 0; unsigned int key = irq_lock(); if (data->tx_buffer_len < data->tx_buffer_cap) { err = -EBUSY; goto unlock; } err = fsp_err_to_errno(R_SCI_B_UART_Write(&data->sci, buf, len)); if (err != 0) { goto unlock; } data->tx_buffer = (uint8_t *)buf; data->tx_buffer_cap = len; uart_ra_sci_b_async_timer_start(&data->tx_timeout_work, timeout); unlock: irq_unlock(key); return err; } static inline void disable_tx(const struct device *dev) { const struct uart_ra_sci_b_config *cfg = dev->config; /* Transmit interrupts must be disabled to start with. */ cfg->regs->CCR0 &= (uint32_t) ~(R_SCI_B0_CCR0_TIE_Msk | R_SCI_B0_CCR0_TEIE_Msk); /* * Make sure no transmission is in progress. Setting CCR0_b.TE to 0 when CSR_b.TEND * is 0 causes SCI peripheral to work abnormally. */ while (cfg->regs->CSR_b.TEND != 1U) { } cfg->regs->CCR0 &= (uint32_t) ~(R_SCI_B0_CCR0_TE_Msk); while (cfg->regs->CESR_b.TIST != 0U) { } } static int uart_ra_sci_b_async_tx_abort(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; int err = 0; disable_tx(dev); k_work_cancel_delayable(&data->tx_timeout_work); if (data->fsp_config.p_transfer_tx) { transfer_properties_t transfer_info; err = fsp_err_to_errno(R_DTC_InfoGet(&data->tx_transfer_ctrl, &transfer_info)); if (err != 0) { return err; } data->tx_buffer_len = data->tx_buffer_cap - transfer_info.transfer_length_remaining; } else { data->tx_buffer_len = data->tx_buffer_cap - data->sci.tx_src_bytes; } R_SCI_B_UART_Abort(&data->sci, UART_DIR_TX); async_tx_abort(dev); return 0; } static void uart_ra_sci_b_async_tx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_ra_sci_b_data *data = CONTAINER_OF(dwork, struct uart_ra_sci_b_data, tx_timeout_work); uart_ra_sci_b_async_tx_abort(data->dev); } static int uart_ra_sci_b_async_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { struct uart_ra_sci_b_data *data = dev->data; const struct uart_ra_sci_b_config *cfg = dev->config; int err = 0; k_work_cancel_delayable(&data->rx_timeout_work); unsigned int key = irq_lock(); if (data->rx_buffer) { err = -EBUSY; goto unlock; } err = fsp_err_to_errno(R_SCI_B_UART_Read(&data->sci, buf, len)); if (err != 0) { goto unlock; } data->rx_timeout = timeout; data->rx_buffer = buf; data->rx_buffer_cap = len; data->rx_buffer_len = 0; data->rx_buffer_offset = 0; cfg->regs->CCR0_b.RIE = 1U; async_request_rx_buffer(dev); unlock: irq_unlock(key); return err; } static int uart_ra_sci_b_async_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uart_ra_sci_b_data *data = dev->data; data->rx_next_buffer = buf; data->rx_next_buffer_cap = len; return 0; } static int uart_ra_sci_b_async_rx_disable(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; const struct uart_ra_sci_b_config *cfg = dev->config; uint32_t remaining_byte = 0; int err = 0; unsigned int key = irq_lock(); k_work_cancel_delayable(&data->rx_timeout_work); err = fsp_err_to_errno(R_SCI_B_UART_ReadStop(&data->sci, &remaining_byte)); if (err != 0) { goto unlock; } if (!data->fsp_config.p_transfer_rx) { data->rx_buffer_len = data->rx_buffer_cap - data->rx_buffer_offset - remaining_byte; } async_rx_ready(dev); async_release_rx_buffer(dev); async_release_rx_next_buffer(dev); async_rx_disabled(dev); /* Clear the RDRF bit so that the next reception can be raised correctly */ cfg->regs->CFCLR_b.RDRFC = 1U; unlock: irq_unlock(key); return err; } static void uart_ra_sci_b_async_rx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_ra_sci_b_data *data = CONTAINER_OF(dwork, struct uart_ra_sci_b_data, rx_timeout_work); const struct device *dev = data->dev; unsigned int key = irq_lock(); if (!data->fsp_config.p_transfer_rx) { data->rx_buffer_len = data->rx_buffer_cap - data->rx_buffer_offset - data->sci.rx_dest_bytes; } async_rx_ready(dev); irq_unlock(key); } static void uart_ra_sci_b_callback_adapter(struct st_uart_callback_arg *fsp_args) { const struct device *dev = fsp_args->p_context; struct uart_ra_sci_b_data *data = dev->data; switch (fsp_args->event) { case UART_EVENT_TX_COMPLETE: { data->tx_buffer_len = data->tx_buffer_cap; async_update_tx_buffer(dev); return; } case UART_EVENT_RX_COMPLETE: { data->rx_buffer_len = data->rx_buffer_cap - data->rx_buffer_offset - data->sci.rx_dest_bytes; async_rx_ready(dev); async_release_rx_buffer(dev); async_replace_rx_buffer(dev); return; } case UART_EVENT_ERR_PARITY: return async_rx_error(dev, UART_ERROR_PARITY); case UART_EVENT_ERR_FRAMING: return async_rx_error(dev, UART_ERROR_FRAMING); case UART_EVENT_ERR_OVERFLOW: return async_rx_error(dev, UART_ERROR_OVERRUN); case UART_EVENT_BREAK_DETECT: return async_rx_error(dev, UART_BREAK); case UART_EVENT_TX_DATA_EMPTY: case UART_EVENT_RX_CHAR: break; } } #endif /* CONFIG_UART_ASYNC_API */ static const struct uart_driver_api uart_ra_sci_b_driver_api = { .poll_in = uart_ra_sci_b_poll_in, .poll_out = uart_ra_sci_b_poll_out, .err_check = uart_ra_sci_b_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_ra_sci_b_configure, .config_get = uart_ra_sci_b_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_ra_sci_b_fifo_fill, .fifo_read = uart_ra_sci_b_fifo_read, .irq_tx_enable = uart_ra_sci_b_irq_tx_enable, .irq_tx_disable = uart_ra_sci_b_irq_tx_disable, .irq_tx_ready = uart_ra_sci_b_irq_tx_ready, .irq_rx_enable = uart_ra_sci_b_irq_rx_enable, .irq_rx_disable = uart_ra_sci_b_irq_rx_disable, .irq_tx_complete = uart_ra_sci_b_irq_tx_complete, .irq_rx_ready = uart_ra_sci_b_irq_rx_ready, .irq_err_enable = uart_ra_sci_b_irq_err_enable, .irq_err_disable = uart_ra_sci_b_irq_err_disable, .irq_is_pending = uart_ra_sci_b_irq_is_pending, .irq_update = uart_ra_sci_b_irq_update, .irq_callback_set = uart_ra_sci_b_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #if CONFIG_UART_ASYNC_API .callback_set = uart_ra_sci_b_async_callback_set, .tx = uart_ra_sci_b_async_tx, .tx_abort = uart_ra_sci_b_async_tx_abort, .rx_enable = uart_ra_sci_b_async_rx_enable, .rx_buf_rsp = uart_ra_sci_b_async_rx_buf_rsp, .rx_disable = uart_ra_sci_b_async_rx_disable, #endif /* CONFIG_UART_ASYNC_API */ }; static int uart_ra_sci_b_init(const struct device *dev) { const struct uart_ra_sci_b_config *config = dev->config; struct uart_ra_sci_b_data *data = dev->data; int ret; fsp_err_t fsp_err; /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* Setup fsp sci_uart setting */ ret = uart_ra_sci_b_apply_config(&data->uart_config, &data->fsp_config, &data->fsp_config_extend, &data->fsp_baud_setting); if (ret != 0) { return ret; } data->fsp_config_extend.p_baud_setting = &data->fsp_baud_setting; data->fsp_config.p_extend = &data->fsp_config_extend; #if defined(CONFIG_UART_ASYNC_API) data->fsp_config.p_callback = uart_ra_sci_b_callback_adapter; data->fsp_config.p_context = dev; k_work_init_delayable(&data->tx_timeout_work, uart_ra_sci_b_async_tx_timeout); k_work_init_delayable(&data->rx_timeout_work, uart_ra_sci_b_async_rx_timeout); #endif /* defined(CONFIG_UART_ASYNC_API) */ fsp_err = R_SCI_B_UART_Open(&data->sci, &data->fsp_config); __ASSERT(fsp_err == 0, "sci_uart: initialization: open failed"); return 0; } #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) static void uart_ra_sci_b_rxi_isr(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) if (data->user_cb != NULL) { data->user_cb(dev, data->user_cb_data); } #endif #if defined(CONFIG_UART_ASYNC_API) uart_ra_sci_b_async_timer_start(&data->rx_timeout_work, data->rx_timeout); if (data->fsp_config.p_transfer_rx) { /* * The RX DTC is set to TRANSFER_IRQ_EACH, triggering an interrupt for each received * byte. However, the sci_b_uart_rxi_isr function currently only handles the * TRANSFER_IRQ_END case, which assumes the transfer is complete. To address this, * we need to add some code to simulate the TRANSFER_IRQ_END case by counting the * received length. */ data->rx_buffer_len++; if (data->rx_buffer_offset + data->rx_buffer_len == data->rx_buffer_cap) { sci_b_uart_rxi_isr(); } else { R_ICU->IELSR_b[data->fsp_config.rxi_irq].IR = 0U; } } else { sci_b_uart_rxi_isr(); } #else R_ICU->IELSR_b[data->fsp_config.rxi_irq].IR = 0U; #endif } static void uart_ra_sci_b_txi_isr(const struct device *dev) { #if defined(CONFIG_UART_INTERRUPT_DRIVEN) struct uart_ra_sci_b_data *data = dev->data; if (data->user_cb != NULL) { data->user_cb(dev, data->user_cb_data); } #endif #if defined(CONFIG_UART_ASYNC_API) sci_b_uart_txi_isr(); #else R_ICU->IELSR_b[data->fsp_config.txi_irq].IR = 0U; #endif } static void uart_ra_sci_b_tei_isr(const struct device *dev) { struct uart_ra_sci_b_data *data = dev->data; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) if (data->user_cb != NULL) { data->user_cb(dev, data->user_cb_data); } #endif #if defined(CONFIG_UART_ASYNC_API) k_work_cancel_delayable(&data->tx_timeout_work); sci_b_uart_tei_isr(); #else R_ICU->IELSR_b[data->fsp_config.tei_irq].IR = 0U; #endif } static void uart_ra_sci_b_eri_isr(const struct device *dev) { #if defined(CONFIG_UART_INTERRUPT_DRIVEN) struct uart_ra_sci_b_data *data = dev->data; if (data->user_cb != NULL) { data->user_cb(dev, data->user_cb_data); } #endif #if defined(CONFIG_UART_ASYNC_API) sci_b_uart_eri_isr(); #else R_ICU->IELSR_b[data->fsp_config.eri_irq].IR = 0U; #endif } #endif /* defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) */ #define _ELC_EVENT_SCI_RXI(channel) ELC_EVENT_SCI##channel##_RXI #define _ELC_EVENT_SCI_TXI(channel) ELC_EVENT_SCI##channel##_TXI #define _ELC_EVENT_SCI_TEI(channel) ELC_EVENT_SCI##channel##_TEI #define _ELC_EVENT_SCI_ERI(channel) ELC_EVENT_SCI##channel##_ERI #define ELC_EVENT_SCI_RXI(channel) _ELC_EVENT_SCI_RXI(channel) #define ELC_EVENT_SCI_TXI(channel) _ELC_EVENT_SCI_TXI(channel) #define ELC_EVENT_SCI_TEI(channel) _ELC_EVENT_SCI_TEI(channel) #define ELC_EVENT_SCI_ERI(channel) _ELC_EVENT_SCI_ERI(channel) #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) #define UART_RA_SCI_B_IRQ_CONFIG_INIT(index) \ do { \ R_ICU->IELSR[DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq)] = \ ELC_EVENT_SCI_RXI(DT_INST_PROP(index, channel)); \ R_ICU->IELSR[DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq)] = \ ELC_EVENT_SCI_TXI(DT_INST_PROP(index, channel)); \ R_ICU->IELSR[DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, irq)] = \ ELC_EVENT_SCI_TEI(DT_INST_PROP(index, channel)); \ R_ICU->IELSR[DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, irq)] = \ ELC_EVENT_SCI_ERI(DT_INST_PROP(index, channel)); \ \ IRQ_CONNECT(DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, priority), \ uart_ra_sci_b_rxi_isr, DEVICE_DT_INST_GET(index), 0); \ IRQ_CONNECT(DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, priority), \ uart_ra_sci_b_txi_isr, DEVICE_DT_INST_GET(index), 0); \ IRQ_CONNECT(DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, priority), \ uart_ra_sci_b_tei_isr, DEVICE_DT_INST_GET(index), 0); \ IRQ_CONNECT(DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, priority), \ uart_ra_sci_b_eri_isr, DEVICE_DT_INST_GET(index), 0); \ } while (0) #else #define UART_RA_SCI_B_IRQ_CONFIG_INIT(index) #endif #if defined(CONFIG_UART_ASYNC_API) #define UART_RA_SCI_B_DTC_INIT(index) \ do { \ uart_ra_sci_b_data_##index.fsp_config.p_transfer_rx = \ &uart_ra_sci_b_data_##index.rx_transfer; \ uart_ra_sci_b_data_##index.fsp_config.p_transfer_tx = \ &uart_ra_sci_b_data_##index.tx_transfer; \ } while (0) #define UART_RA_SCI_B_ASYNC_INIT(index) \ .rx_transfer_info = \ { \ .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \ .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_DESTINATION, \ .transfer_settings_word_b.irq = TRANSFER_IRQ_EACH, \ .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \ .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_FIXED, \ .transfer_settings_word_b.size = TRANSFER_SIZE_1_BYTE, \ .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \ .p_dest = (void *)NULL, \ .p_src = (void const *)NULL, \ .num_blocks = 0, \ .length = 0, \ }, \ .rx_transfer_cfg_extend = {.activation_source = \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq)}, \ .rx_transfer_cfg = \ { \ .p_info = &uart_ra_sci_b_data_##index.rx_transfer_info, \ .p_extend = &uart_ra_sci_b_data_##index.rx_transfer_cfg_extend, \ }, \ .rx_transfer = \ { \ .p_ctrl = &uart_ra_sci_b_data_##index.rx_transfer_ctrl, \ .p_cfg = &uart_ra_sci_b_data_##index.rx_transfer_cfg, \ .p_api = &g_transfer_on_dtc, \ }, \ .tx_transfer_info = \ { \ .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_FIXED, \ .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_SOURCE, \ .transfer_settings_word_b.irq = TRANSFER_IRQ_END, \ .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \ .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \ .transfer_settings_word_b.size = TRANSFER_SIZE_1_BYTE, \ .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \ .p_dest = (void *)NULL, \ .p_src = (void const *)NULL, \ .num_blocks = 0, \ .length = 0, \ }, \ .tx_transfer_cfg_extend = {.activation_source = \ DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq)}, \ .tx_transfer_cfg = \ { \ .p_info = &uart_ra_sci_b_data_##index.tx_transfer_info, \ .p_extend = &uart_ra_sci_b_data_##index.tx_transfer_cfg_extend, \ }, \ .tx_transfer = { \ .p_ctrl = &uart_ra_sci_b_data_##index.tx_transfer_ctrl, \ .p_cfg = &uart_ra_sci_b_data_##index.tx_transfer_cfg, \ .p_api = &g_transfer_on_dtc, \ }, #else #define UART_RA_SCI_B_ASYNC_INIT(index) #define UART_RA_SCI_B_DTC_INIT(index) #endif #define UART_RA_SCI_B_INIT(index) \ PINCTRL_DT_DEFINE(DT_INST_PARENT(index)); \ \ static const struct uart_ra_sci_b_config uart_ra_sci_b_config_##index = { \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(DT_INST_PARENT(index)), \ .regs = (R_SCI_B0_Type *)DT_REG_ADDR(DT_INST_PARENT(index)), \ }; \ \ static struct uart_ra_sci_b_data uart_ra_sci_b_data_##index = { \ .uart_config = \ { \ .baudrate = DT_INST_PROP(index, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = COND_CODE_1(DT_NODE_HAS_PROP(idx, hw_flow_control), \ (UART_CFG_FLOW_CTRL_RTS_CTS), \ (UART_CFG_FLOW_CTRL_NONE)), \ }, \ .fsp_config = \ { \ .channel = DT_INST_PROP(index, channel), \ .rxi_ipl = DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, priority), \ .rxi_irq = DT_IRQ_BY_NAME(DT_INST_PARENT(index), rxi, irq), \ .txi_ipl = DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, priority), \ .txi_irq = DT_IRQ_BY_NAME(DT_INST_PARENT(index), txi, irq), \ .tei_ipl = DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, priority), \ .tei_irq = DT_IRQ_BY_NAME(DT_INST_PARENT(index), tei, irq), \ .eri_ipl = DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, priority), \ .eri_irq = DT_IRQ_BY_NAME(DT_INST_PARENT(index), eri, irq), \ }, \ .fsp_config_extend = {}, \ .fsp_baud_setting = {}, \ .dev = DEVICE_DT_GET(DT_DRV_INST(index)), \ UART_RA_SCI_B_ASYNC_INIT(index)}; \ \ static int uart_ra_sci_b_init_##index(const struct device *dev) \ { \ UART_RA_SCI_B_DTC_INIT(index); \ UART_RA_SCI_B_IRQ_CONFIG_INIT(index); \ int err = uart_ra_sci_b_init(dev); \ if (err != 0) { \ return err; \ } \ return 0; \ } \ \ DEVICE_DT_INST_DEFINE(index, uart_ra_sci_b_init_##index, NULL, \ &uart_ra_sci_b_data_##index, &uart_ra_sci_b_config_##index, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_ra_sci_b_driver_api); DT_INST_FOREACH_STATUS_OKAY(UART_RA_SCI_B_INIT) ```
/content/code_sandbox/drivers/serial/uart_renesas_ra8_sci_b.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,689
```c /* */ #define DT_DRV_COMPAT atmel_sam_uart /** @file * @brief UART driver for Atmel SAM MCU family. */ #include <errno.h> #include <zephyr/sys/__assert.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <soc.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <zephyr/irq.h> /* Device constant configuration parameters */ struct uart_sam_dev_cfg { Uart *regs; const struct atmel_sam_pmc_config clock_cfg; const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif }; /* Device run time data */ struct uart_sam_dev_data { uint32_t baud_rate; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t irq_cb; /* Interrupt Callback */ void *irq_cb_data; /* Interrupt Callback Arg */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; static int uart_sam_poll_in(const struct device *dev, unsigned char *c) { const struct uart_sam_dev_cfg *const cfg = dev->config; Uart * const uart = cfg->regs; if (!(uart->UART_SR & UART_SR_RXRDY)) { return -1; } /* got a character */ *c = (unsigned char)uart->UART_RHR; return 0; } static void uart_sam_poll_out(const struct device *dev, unsigned char c) { const struct uart_sam_dev_cfg *const cfg = dev->config; Uart * const uart = cfg->regs; /* Wait for transmitter to be ready */ while (!(uart->UART_SR & UART_SR_TXRDY)) { } /* send a character */ uart->UART_THR = (uint32_t)c; } static int uart_sam_err_check(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; int errors = 0; if (uart->UART_SR & UART_SR_OVRE) { errors |= UART_ERROR_OVERRUN; } if (uart->UART_SR & UART_SR_PARE) { errors |= UART_ERROR_PARITY; } if (uart->UART_SR & UART_SR_FRAME) { errors |= UART_ERROR_FRAMING; } uart->UART_CR = UART_CR_RSTSTA; return errors; } static int uart_sam_baudrate_set(const struct device *dev, uint32_t baudrate) { struct uart_sam_dev_data *const dev_data = dev->data; const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; uint32_t divisor; __ASSERT(baudrate, "baud rate has to be bigger than 0"); __ASSERT(SOC_ATMEL_SAM_MCK_FREQ_HZ/16U >= baudrate, "MCK frequency is too small to set required baud rate"); divisor = SOC_ATMEL_SAM_MCK_FREQ_HZ / 16U / baudrate; if (divisor > 0xFFFF) { return -EINVAL; } uart->UART_BRGR = UART_BRGR_CD(divisor); dev_data->baud_rate = baudrate; return 0; } static uint32_t uart_sam_cfg2sam_parity(uint8_t parity) { switch (parity) { case UART_CFG_PARITY_EVEN: return UART_MR_PAR_EVEN; case UART_CFG_PARITY_ODD: return UART_MR_PAR_ODD; case UART_CFG_PARITY_SPACE: return UART_MR_PAR_SPACE; case UART_CFG_PARITY_MARK: return UART_MR_PAR_MARK; case UART_CFG_PARITY_NONE: default: return UART_MR_PAR_NO; } } static uint8_t uart_sam_get_parity(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; switch (uart->UART_MR & UART_MR_PAR_Msk) { case UART_MR_PAR_EVEN: return UART_CFG_PARITY_EVEN; case UART_MR_PAR_ODD: return UART_CFG_PARITY_ODD; case UART_MR_PAR_SPACE: return UART_CFG_PARITY_SPACE; case UART_MR_PAR_MARK: return UART_CFG_PARITY_MARK; case UART_MR_PAR_NO: default: return UART_CFG_PARITY_NONE; } } static int uart_sam_configure(const struct device *dev, const struct uart_config *cfg) { int retval; const struct uart_sam_dev_cfg *const config = dev->config; volatile Uart * const uart = config->regs; /* Driver only supports 8 data bits, 1 stop bit, and no flow control */ if (cfg->stop_bits != UART_CFG_STOP_BITS_1 || cfg->data_bits != UART_CFG_DATA_BITS_8 || cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) { return -ENOTSUP; } /* Reset and disable UART */ uart->UART_CR = UART_CR_RSTRX | UART_CR_RSTTX | UART_CR_RXDIS | UART_CR_TXDIS | UART_CR_RSTSTA; /* baud rate driven by the peripheral clock, UART does not filter * the receive line, parity chosen by config */ uart->UART_MR = UART_MR_CHMODE_NORMAL | uart_sam_cfg2sam_parity(cfg->parity); /* Set baud rate */ retval = uart_sam_baudrate_set(dev, cfg->baudrate); if (retval != 0) { return retval; } /* Enable receiver and transmitter */ uart->UART_CR = UART_CR_RXEN | UART_CR_TXEN; return 0; } static int uart_sam_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_sam_dev_data *const dev_data = dev->data; cfg->baudrate = dev_data->baud_rate; cfg->parity = uart_sam_get_parity(dev); /* only supported mode for this peripheral */ cfg->stop_bits = UART_CFG_STOP_BITS_1; cfg->data_bits = UART_CFG_DATA_BITS_8; cfg->flow_ctrl = UART_CFG_FLOW_CTRL_NONE; return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_sam_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; /* Wait for transmitter to be ready. */ while ((uart->UART_SR & UART_SR_TXRDY) == 0) { } uart->UART_THR = *tx_data; return 1; } static int uart_sam_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; int bytes_read; bytes_read = 0; while (bytes_read < size) { if (uart->UART_SR & UART_SR_RXRDY) { rx_data[bytes_read] = uart->UART_RHR; bytes_read++; } else { break; } } return bytes_read; } static void uart_sam_irq_tx_enable(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; uart->UART_IER = UART_IER_TXRDY; } static void uart_sam_irq_tx_disable(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; uart->UART_IDR = UART_IDR_TXRDY; } static int uart_sam_irq_tx_ready(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; /* Check that the transmitter is ready but only * return true if the interrupt is also enabled */ return (uart->UART_SR & UART_SR_TXRDY && uart->UART_IMR & UART_IMR_TXRDY); } static void uart_sam_irq_rx_enable(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; uart->UART_IER = UART_IER_RXRDY; } static void uart_sam_irq_rx_disable(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; uart->UART_IDR = UART_IDR_RXRDY; } static int uart_sam_irq_tx_complete(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; return (uart->UART_SR & UART_SR_TXRDY && uart->UART_IMR & UART_IMR_TXEMPTY); } static int uart_sam_irq_rx_ready(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; return (uart->UART_SR & UART_SR_RXRDY); } static void uart_sam_irq_err_enable(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; uart->UART_IER = UART_IER_OVRE | UART_IER_FRAME | UART_IER_PARE; } static void uart_sam_irq_err_disable(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; uart->UART_IDR = UART_IDR_OVRE | UART_IDR_FRAME | UART_IDR_PARE; } static int uart_sam_irq_is_pending(const struct device *dev) { const struct uart_sam_dev_cfg *const cfg = dev->config; volatile Uart * const uart = cfg->regs; return (uart->UART_IMR & (UART_IMR_TXRDY | UART_IMR_RXRDY)) & (uart->UART_SR & (UART_SR_TXRDY | UART_SR_RXRDY)); } static int uart_sam_irq_update(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void uart_sam_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_sam_dev_data *const dev_data = dev->data; dev_data->irq_cb = cb; dev_data->irq_cb_data = cb_data; } static void uart_sam_isr(const struct device *dev) { struct uart_sam_dev_data *const dev_data = dev->data; if (dev_data->irq_cb) { dev_data->irq_cb(dev, dev_data->irq_cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int uart_sam_init(const struct device *dev) { int retval; const struct uart_sam_dev_cfg *const cfg = dev->config; struct uart_sam_dev_data *const dev_data = dev->data; Uart * const uart = cfg->regs; /* Enable UART clock in PMC */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&cfg->clock_cfg); /* Connect pins to the peripheral */ retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (retval < 0) { return retval; } /* Disable Interrupts */ uart->UART_IDR = 0xFFFFFFFF; #ifdef CONFIG_UART_INTERRUPT_DRIVEN cfg->irq_config_func(dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ struct uart_config uart_config = { .baudrate = dev_data->baud_rate, .parity = UART_CFG_PARITY_NONE, .stop_bits = UART_CFG_STOP_BITS_1, .data_bits = UART_CFG_DATA_BITS_8, .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, }; return uart_sam_configure(dev, &uart_config); } static const struct uart_driver_api uart_sam_driver_api = { .poll_in = uart_sam_poll_in, .poll_out = uart_sam_poll_out, .err_check = uart_sam_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_sam_configure, .config_get = uart_sam_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_sam_fifo_fill, .fifo_read = uart_sam_fifo_read, .irq_tx_enable = uart_sam_irq_tx_enable, .irq_tx_disable = uart_sam_irq_tx_disable, .irq_tx_ready = uart_sam_irq_tx_ready, .irq_rx_enable = uart_sam_irq_rx_enable, .irq_rx_disable = uart_sam_irq_rx_disable, .irq_tx_complete = uart_sam_irq_tx_complete, .irq_rx_ready = uart_sam_irq_rx_ready, .irq_err_enable = uart_sam_irq_err_enable, .irq_err_disable = uart_sam_irq_err_disable, .irq_is_pending = uart_sam_irq_is_pending, .irq_update = uart_sam_irq_update, .irq_callback_set = uart_sam_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define UART_SAM_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct uart_sam_dev_cfg uart##n##_sam_config = { \ .regs = (Uart *)DT_INST_REG_ADDR(n), \ .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(n), \ \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ \ IRQ_FUNC_INIT \ } #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_SAM_CONFIG_FUNC(n) \ static void uart##n##_sam_irq_config_func(const struct device *port) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ uart_sam_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } #define UART_SAM_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = uart##n##_sam_irq_config_func #define UART_SAM_INIT_CFG(n) \ UART_SAM_DECLARE_CFG(n, UART_SAM_IRQ_CFG_FUNC_INIT(n)) #else #define UART_SAM_CONFIG_FUNC(n) #define UART_SAM_IRQ_CFG_FUNC_INIT #define UART_SAM_INIT_CFG(n) \ UART_SAM_DECLARE_CFG(n, UART_SAM_IRQ_CFG_FUNC_INIT) #endif #define UART_SAM_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ static struct uart_sam_dev_data uart##n##_sam_data = { \ .baud_rate = DT_INST_PROP(n, current_speed), \ }; \ \ static const struct uart_sam_dev_cfg uart##n##_sam_config; \ \ DEVICE_DT_INST_DEFINE(n, uart_sam_init, \ NULL, &uart##n##_sam_data, \ &uart##n##_sam_config, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_sam_driver_api); \ \ UART_SAM_CONFIG_FUNC(n) \ \ UART_SAM_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(UART_SAM_INIT) ```
/content/code_sandbox/drivers/serial/uart_sam.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,423
```unknown # # config UART_SEDI bool "Intel SEDI UART driver" default y depends on DT_HAS_INTEL_SEDI_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the Intel SEDI UART driver. This driver is simply a shim driver built upon the SEDI bare metal UART driver in the hal-intel module ```
/content/code_sandbox/drivers/serial/Kconfig.sedi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
80
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_uart #include <string.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/reset.h> #include <zephyr/irq.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/drivers/pinctrl.h> #include <NuMicro.h> LOG_MODULE_REGISTER(numaker_uart, LOG_LEVEL_ERR); struct uart_numaker_config { UART_T *uart; const struct reset_dt_spec reset; uint32_t clk_modidx; uint32_t clk_src; uint32_t clk_div; const struct device *clk_dev; uint32_t irq_n; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif const struct pinctrl_dev_config *pincfg; }; struct uart_numaker_data { const struct device *clock; struct uart_config ucfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t user_cb; void *user_data; #endif }; static int uart_numaker_poll_in(const struct device *dev, unsigned char *c) { const struct uart_numaker_config *config = dev->config; uint32_t count; count = UART_Read(config->uart, c, 1); if (!count) { return -1; } return 0; } static void uart_numaker_poll_out(const struct device *dev, unsigned char c) { const struct uart_numaker_config *config = dev->config; UART_Write(config->uart, &c, 1); } static int uart_numaker_err_check(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; uint32_t flags = uart->FIFOSTS; int err = 0; if (flags & UART_FIFOSTS_RXOVIF_Msk) { err |= UART_ERROR_OVERRUN; } if (flags & UART_FIFOSTS_PEF_Msk) { err |= UART_ERROR_PARITY; } if (flags & UART_FIFOSTS_FEF_Msk) { err |= UART_ERROR_FRAMING; } if (flags & UART_FIFOSTS_BIF_Msk) { err |= UART_BREAK; } if (flags & (UART_FIFOSTS_BIF_Msk | UART_FIFOSTS_FEF_Msk | UART_FIFOSTS_PEF_Msk | UART_FIFOSTS_RXOVIF_Msk)) { uart->FIFOSTS = (UART_FIFOSTS_BIF_Msk | UART_FIFOSTS_FEF_Msk | UART_FIFOSTS_PEF_Msk | UART_FIFOSTS_RXOVIF_Msk); } return err; } static inline int32_t uart_numaker_convert_stopbit(enum uart_config_stop_bits sb) { switch (sb) { case UART_CFG_STOP_BITS_1: return UART_STOP_BIT_1; case UART_CFG_STOP_BITS_1_5: return UART_STOP_BIT_1_5; case UART_CFG_STOP_BITS_2: return UART_STOP_BIT_2; default: return -ENOTSUP; } }; static inline int32_t uart_numaker_convert_datalen(enum uart_config_data_bits db) { switch (db) { case UART_CFG_DATA_BITS_5: return UART_WORD_LEN_5; case UART_CFG_DATA_BITS_6: return UART_WORD_LEN_6; case UART_CFG_DATA_BITS_7: return UART_WORD_LEN_7; case UART_CFG_DATA_BITS_8: return UART_WORD_LEN_8; default: return -ENOTSUP; } } static inline uint32_t uart_numaker_convert_parity(enum uart_config_parity parity) { switch (parity) { case UART_CFG_PARITY_ODD: return UART_PARITY_ODD; case UART_CFG_PARITY_EVEN: return UART_PARITY_EVEN; case UART_CFG_PARITY_MARK: return UART_PARITY_MARK; case UART_CFG_PARITY_SPACE: return UART_PARITY_SPACE; case UART_CFG_PARITY_NONE: default: return UART_PARITY_NONE; } } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_numaker_configure(const struct device *dev, const struct uart_config *cfg) { const struct uart_numaker_config *config = dev->config; struct uart_numaker_data *pData = dev->data; int32_t databits, stopbits; uint32_t parity; databits = uart_numaker_convert_datalen(cfg->data_bits); if (databits < 0) { return databits; } stopbits = uart_numaker_convert_stopbit(cfg->stop_bits); if (stopbits < 0) { return stopbits; } if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_NONE) { UART_DisableFlowCtrl(config->uart); } else if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS) { UART_EnableFlowCtrl(config->uart); } else { return -ENOTSUP; } parity = uart_numaker_convert_parity(cfg->parity); UART_SetLineConfig(config->uart, cfg->baudrate, databits, parity, stopbits); memcpy(&pData->ucfg, cfg, sizeof(*cfg)); return 0; } static int uart_numaker_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_numaker_data *pData = dev->data; memcpy(cfg, &pData->ucfg, sizeof(*cfg)); return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int uart_numaker_init(const struct device *dev) { const struct uart_numaker_config *config = dev->config; struct uart_numaker_data *pData = dev->data; int err = 0; SYS_UnlockReg(); struct numaker_scc_subsys scc_subsys; memset(&scc_subsys, 0x00, sizeof(scc_subsys)); scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC; scc_subsys.pcc.clk_modidx = config->clk_modidx; scc_subsys.pcc.clk_src = config->clk_src; scc_subsys.pcc.clk_div = config->clk_div; /* Equivalent to CLK_EnableModuleClock(clk_modidx) */ err = clock_control_on(config->clk_dev, (clock_control_subsys_t)&scc_subsys); if (err != 0) { goto move_exit; } /* Equivalent to CLK_SetModuleClock(clk_modidx, clk_src, clk_div) */ err = clock_control_configure(config->clk_dev, (clock_control_subsys_t)&scc_subsys, NULL); if (err != 0) { goto move_exit; } /* * Set pinctrl for UART0 RXD and TXD * Set multi-function pins for UART0 RXD and TXD */ err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err != 0) { goto move_exit; } /* Same as BSP's SYS_ResetModule(id_rst) */ if (!device_is_ready(config->reset.dev)) { LOG_ERR("reset controller not ready"); return -ENODEV; } /* Reset UART to default state */ reset_line_toggle_dt(&config->reset); UART_Open(config->uart, pData->ucfg.baudrate); #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif move_exit: SYS_LockReg(); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_numaker_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; int tx_bytes = 0; /* Check TX FIFO not full, then fill */ while (((size - tx_bytes) > 0) && (!(uart->FIFOSTS & UART_FIFOSTS_TXFULL_Msk))) { /* Fill one byte into TX FIFO */ uart->DAT = tx_data[tx_bytes++]; } return tx_bytes; } static int uart_numaker_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; int rx_bytes = 0; /* Check RX FIFO not empty, then read */ while (((size - rx_bytes) > 0) && (!(uart->FIFOSTS & UART_FIFOSTS_RXEMPTY_Msk))) { /* Read one byte from UART RX FIFO */ rx_data[rx_bytes++] = (uint8_t)uart->DAT; } return rx_bytes; } static void uart_numaker_irq_tx_enable(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; UART_EnableInt(uart, UART_INTEN_THREIEN_Msk); } static void uart_numaker_irq_tx_disable(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; UART_DisableInt(uart, UART_INTEN_THREIEN_Msk); } static int uart_numaker_irq_tx_ready(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; return ((!UART_IS_TX_FULL(uart)) && (uart->INTEN & UART_INTEN_THREIEN_Msk)); } static int uart_numaker_irq_tx_complete(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; return (uart->INTSTS & UART_INTSTS_THREINT_Msk); } static void uart_numaker_irq_rx_enable(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; UART_EnableInt(uart, UART_INTEN_RDAIEN_Msk); } static void uart_numaker_irq_rx_disable(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; UART_DisableInt(uart, UART_INTEN_RDAIEN_Msk); } static int uart_numaker_irq_rx_ready(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; return ((!UART_GET_RX_EMPTY(uart)) && (uart->INTEN & UART_INTEN_RDAIEN_Msk)); } static void uart_numaker_irq_err_enable(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; UART_EnableInt(uart, UART_INTEN_BUFERRIEN_Msk | UART_INTEN_SWBEIEN_Msk); } static void uart_numaker_irq_err_disable(const struct device *dev) { const struct uart_numaker_config *config = dev->config; UART_T *uart = config->uart; UART_DisableInt(uart, UART_INTEN_BUFERRIEN_Msk | UART_INTEN_SWBEIEN_Msk); } static int uart_numaker_irq_is_pending(const struct device *dev) { return (uart_numaker_irq_tx_ready(dev) || (uart_numaker_irq_rx_ready(dev))); } static int uart_numaker_irq_update(const struct device *dev) { ARG_UNUSED(dev); /* nothing to be done */ return 1; } static void uart_numaker_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_numaker_data *pData = dev->data; pData->user_cb = cb; pData->user_data = cb_data; } static void uart_numaker_isr(const struct device *dev) { struct uart_numaker_data *pData = dev->data; if (pData->user_cb) { pData->user_cb(dev, pData->user_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_numaker_driver_api = { .poll_in = uart_numaker_poll_in, .poll_out = uart_numaker_poll_out, .err_check = uart_numaker_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_numaker_configure, .config_get = uart_numaker_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_numaker_fifo_fill, .fifo_read = uart_numaker_fifo_read, .irq_tx_enable = uart_numaker_irq_tx_enable, .irq_tx_disable = uart_numaker_irq_tx_disable, .irq_tx_ready = uart_numaker_irq_tx_ready, .irq_tx_complete = uart_numaker_irq_tx_complete, .irq_rx_enable = uart_numaker_irq_rx_enable, .irq_rx_disable = uart_numaker_irq_rx_disable, .irq_rx_ready = uart_numaker_irq_rx_ready, .irq_err_enable = uart_numaker_irq_err_enable, .irq_err_disable = uart_numaker_irq_err_disable, .irq_is_pending = uart_numaker_irq_is_pending, .irq_update = uart_numaker_irq_update, .irq_callback_set = uart_numaker_irq_callback_set, #endif }; #define CLOCK_CTRL_INIT(n) .clk_dev = DEVICE_DT_GET(DT_PARENT(DT_INST_CLOCKS_CTLR(n))), #define PINCTRL_DEFINE(n) PINCTRL_DT_INST_DEFINE(n); #define PINCTRL_INIT(n) .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define NUMAKER_UART_IRQ_CONFIG_FUNC(n) \ static void uart_numaker_irq_config_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), uart_numaker_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } #define IRQ_FUNC_INIT(n) .irq_config_func = uart_numaker_irq_config_##n #else #define NUMAKER_UART_IRQ_CONFIG_FUNC(n) #define IRQ_FUNC_INIT(n) #endif #define NUMAKER_UART_INIT(inst) \ PINCTRL_DEFINE(inst) \ NUMAKER_UART_IRQ_CONFIG_FUNC(inst) \ \ static const struct uart_numaker_config uart_numaker_cfg_##inst = { \ .uart = (UART_T *)DT_INST_REG_ADDR(inst), \ .reset = RESET_DT_SPEC_INST_GET(inst), \ .clk_modidx = DT_INST_CLOCKS_CELL(inst, clock_module_index), \ .clk_src = DT_INST_CLOCKS_CELL(inst, clock_source), \ .clk_div = DT_INST_CLOCKS_CELL(inst, clock_divider), \ CLOCK_CTRL_INIT(inst).irq_n = DT_INST_IRQN(inst), \ PINCTRL_INIT(inst) IRQ_FUNC_INIT(inst)}; \ \ static struct uart_numaker_data uart_numaker_data_##inst = { \ .ucfg = \ { \ .baudrate = DT_INST_PROP(inst, current_speed), \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(inst, uart_numaker_init, NULL, &uart_numaker_data_##inst, \ &uart_numaker_cfg_##inst, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_numaker_driver_api); DT_INST_FOREACH_STATUS_OKAY(NUMAKER_UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,348
```unknown # Atmel SAM UART configuration options config UART_SAM bool "Atmel SAM MCU family UART driver" default y depends on DT_HAS_ATMEL_SAM_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the UARTx driver for Atmel SAM MCUs. ```
/content/code_sandbox/drivers/serial/Kconfig.uart_sam
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
67
```c /* * */ #define DT_DRV_COMPAT st_stm32_uart /** * @brief Driver for UART port on STM32 family processor. * @note LPUART and U(S)ART have the same base and * majority of operations are performed the same way. * Please validate for newly added series. */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/__assert.h> #include <soc.h> #include <zephyr/init.h> #include <zephyr/drivers/interrupt_controller/exti_stm32.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/pm/policy.h> #include <zephyr/pm/device.h> #ifdef CONFIG_UART_ASYNC_API #include <zephyr/drivers/dma/dma_stm32.h> #include <zephyr/drivers/dma.h> #endif #include <zephyr/linker/sections.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include "uart_stm32.h" #include <stm32_ll_usart.h> #include <stm32_ll_lpuart.h> #if defined(CONFIG_PM) && defined(IS_UART_WAKEUP_FROMSTOP_INSTANCE) #include <stm32_ll_exti.h> #endif /* CONFIG_PM */ #ifdef CONFIG_DCACHE #include <zephyr/linker/linker-defs.h> #include <zephyr/mem_mgmt/mem_attr.h> #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h> #endif /* CONFIG_DCACHE */ #include <zephyr/logging/log.h> #include <zephyr/irq.h> LOG_MODULE_REGISTER(uart_stm32, CONFIG_UART_LOG_LEVEL); /* This symbol takes the value 1 if one of the device instances */ /* is configured in dts with a domain clock */ #if STM32_DT_INST_DEV_DOMAIN_CLOCK_SUPPORT #define STM32_UART_DOMAIN_CLOCK_SUPPORT 1 #else #define STM32_UART_DOMAIN_CLOCK_SUPPORT 0 #endif #define HAS_LPUART DT_HAS_COMPAT_STATUS_OKAY(st_stm32_lpuart) /* Available everywhere except l1, f1, f2, f4. */ #ifdef USART_CR3_DEM #define HAS_DRIVER_ENABLE 1 #else #define HAS_DRIVER_ENABLE 0 #endif #if HAS_LPUART #ifdef USART_PRESC_PRESCALER uint32_t lpuartdiv_calc(const uint64_t clock_rate, const uint16_t presc_idx, const uint32_t baud_rate) { uint64_t lpuartdiv; lpuartdiv = clock_rate / LPUART_PRESCALER_TAB[presc_idx]; lpuartdiv *= LPUART_LPUARTDIV_FREQ_MUL; lpuartdiv += baud_rate / 2; lpuartdiv /= baud_rate; return (uint32_t)lpuartdiv; } #else uint32_t lpuartdiv_calc(const uint64_t clock_rate, const uint32_t baud_rate) { uint64_t lpuartdiv; lpuartdiv = clock_rate * LPUART_LPUARTDIV_FREQ_MUL; lpuartdiv += baud_rate / 2; lpuartdiv /= baud_rate; return (uint32_t)lpuartdiv; } #endif /* USART_PRESC_PRESCALER */ #endif /* HAS_LPUART */ #ifdef CONFIG_PM static void uart_stm32_pm_policy_state_lock_get(const struct device *dev) { struct uart_stm32_data *data = dev->data; if (!data->pm_policy_state_on) { data->pm_policy_state_on = true; pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); if (IS_ENABLED(CONFIG_PM_S2RAM)) { pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES); } } } static void uart_stm32_pm_policy_state_lock_put(const struct device *dev) { struct uart_stm32_data *data = dev->data; if (data->pm_policy_state_on) { data->pm_policy_state_on = false; pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES); if (IS_ENABLED(CONFIG_PM_S2RAM)) { pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_RAM, PM_ALL_SUBSTATES); } } } #endif /* CONFIG_PM */ static inline void uart_stm32_set_baudrate(const struct device *dev, uint32_t baud_rate) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; struct uart_stm32_data *data = dev->data; uint32_t clock_rate; /* Get clock rate */ if (IS_ENABLED(STM32_UART_DOMAIN_CLOCK_SUPPORT) && (config->pclk_len > 1)) { if (clock_control_get_rate(data->clock, (clock_control_subsys_t)&config->pclken[1], &clock_rate) < 0) { LOG_ERR("Failed call clock_control_get_rate(pclken[1])"); return; } } else { if (clock_control_get_rate(data->clock, (clock_control_subsys_t)&config->pclken[0], &clock_rate) < 0) { LOG_ERR("Failed call clock_control_get_rate(pclken[0])"); return; } } #if HAS_LPUART if (IS_LPUART_INSTANCE(usart)) { uint32_t lpuartdiv; #ifdef USART_PRESC_PRESCALER uint8_t presc_idx; uint32_t presc_val; for (presc_idx = 0; presc_idx < ARRAY_SIZE(LPUART_PRESCALER_TAB); presc_idx++) { lpuartdiv = lpuartdiv_calc(clock_rate, presc_idx, baud_rate); if (lpuartdiv >= LPUART_BRR_MIN_VALUE && lpuartdiv <= LPUART_BRR_MASK) { break; } } if (presc_idx == ARRAY_SIZE(LPUART_PRESCALER_TAB)) { LOG_ERR("Unable to set %s to %d", dev->name, baud_rate); return; } presc_val = presc_idx << USART_PRESC_PRESCALER_Pos; LL_LPUART_SetPrescaler(usart, presc_val); #else lpuartdiv = lpuartdiv_calc(clock_rate, baud_rate); if (lpuartdiv < LPUART_BRR_MIN_VALUE || lpuartdiv > LPUART_BRR_MASK) { LOG_ERR("Unable to set %s to %d", dev->name, baud_rate); return; } #endif /* USART_PRESC_PRESCALER */ LL_LPUART_SetBaudRate(usart, clock_rate, #ifdef USART_PRESC_PRESCALER presc_val, #endif baud_rate); /* Check BRR is greater than or equal to 0x300 */ __ASSERT(LL_LPUART_ReadReg(usart, BRR) >= 0x300U, "BaudRateReg >= 0x300"); /* Check BRR is lower than or equal to 0xFFFFF */ __ASSERT(LL_LPUART_ReadReg(usart, BRR) < 0x000FFFFFU, "BaudRateReg < 0xFFFF"); } else { #endif /* HAS_LPUART */ #ifdef USART_CR1_OVER8 LL_USART_SetOverSampling(usart, LL_USART_OVERSAMPLING_16); #endif LL_USART_SetBaudRate(usart, clock_rate, #ifdef USART_PRESC_PRESCALER LL_USART_PRESCALER_DIV1, #endif #ifdef USART_CR1_OVER8 LL_USART_OVERSAMPLING_16, #endif baud_rate); /* Check BRR is greater than or equal to 16d */ __ASSERT(LL_USART_ReadReg(usart, BRR) >= 16, "BaudRateReg >= 16"); #if HAS_LPUART } #endif /* HAS_LPUART */ } static inline void uart_stm32_set_parity(const struct device *dev, uint32_t parity) { const struct uart_stm32_config *config = dev->config; LL_USART_SetParity(config->usart, parity); } static inline uint32_t uart_stm32_get_parity(const struct device *dev) { const struct uart_stm32_config *config = dev->config; return LL_USART_GetParity(config->usart); } static inline void uart_stm32_set_stopbits(const struct device *dev, uint32_t stopbits) { const struct uart_stm32_config *config = dev->config; LL_USART_SetStopBitsLength(config->usart, stopbits); } static inline uint32_t uart_stm32_get_stopbits(const struct device *dev) { const struct uart_stm32_config *config = dev->config; return LL_USART_GetStopBitsLength(config->usart); } static inline void uart_stm32_set_databits(const struct device *dev, uint32_t databits) { const struct uart_stm32_config *config = dev->config; LL_USART_SetDataWidth(config->usart, databits); } static inline uint32_t uart_stm32_get_databits(const struct device *dev) { const struct uart_stm32_config *config = dev->config; return LL_USART_GetDataWidth(config->usart); } static inline void uart_stm32_set_hwctrl(const struct device *dev, uint32_t hwctrl) { const struct uart_stm32_config *config = dev->config; LL_USART_SetHWFlowCtrl(config->usart, hwctrl); } static inline uint32_t uart_stm32_get_hwctrl(const struct device *dev) { const struct uart_stm32_config *config = dev->config; return LL_USART_GetHWFlowCtrl(config->usart); } #if HAS_DRIVER_ENABLE static inline void uart_stm32_set_driver_enable(const struct device *dev, bool driver_enable) { const struct uart_stm32_config *config = dev->config; if (driver_enable) { LL_USART_EnableDEMode(config->usart); } else { LL_USART_DisableDEMode(config->usart); } } static inline bool uart_stm32_get_driver_enable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; return LL_USART_IsEnabledDEMode(config->usart); } #endif static inline uint32_t uart_stm32_cfg2ll_parity(enum uart_config_parity parity) { switch (parity) { case UART_CFG_PARITY_ODD: return LL_USART_PARITY_ODD; case UART_CFG_PARITY_EVEN: return LL_USART_PARITY_EVEN; case UART_CFG_PARITY_NONE: default: return LL_USART_PARITY_NONE; } } static inline enum uart_config_parity uart_stm32_ll2cfg_parity(uint32_t parity) { switch (parity) { case LL_USART_PARITY_ODD: return UART_CFG_PARITY_ODD; case LL_USART_PARITY_EVEN: return UART_CFG_PARITY_EVEN; case LL_USART_PARITY_NONE: default: return UART_CFG_PARITY_NONE; } } static inline uint32_t uart_stm32_cfg2ll_stopbits(const struct uart_stm32_config *config, enum uart_config_stop_bits sb) { switch (sb) { /* Some MCU's don't support 0.5 stop bits */ #ifdef LL_USART_STOPBITS_0_5 case UART_CFG_STOP_BITS_0_5: #if HAS_LPUART if (IS_LPUART_INSTANCE(config->usart)) { /* return the default */ return LL_USART_STOPBITS_1; } #endif /* HAS_LPUART */ return LL_USART_STOPBITS_0_5; #endif /* LL_USART_STOPBITS_0_5 */ case UART_CFG_STOP_BITS_1: return LL_USART_STOPBITS_1; /* Some MCU's don't support 1.5 stop bits */ #ifdef LL_USART_STOPBITS_1_5 case UART_CFG_STOP_BITS_1_5: #if HAS_LPUART if (IS_LPUART_INSTANCE(config->usart)) { /* return the default */ return LL_USART_STOPBITS_2; } #endif return LL_USART_STOPBITS_1_5; #endif /* LL_USART_STOPBITS_1_5 */ case UART_CFG_STOP_BITS_2: default: return LL_USART_STOPBITS_2; } } static inline enum uart_config_stop_bits uart_stm32_ll2cfg_stopbits(uint32_t sb) { switch (sb) { /* Some MCU's don't support 0.5 stop bits */ #ifdef LL_USART_STOPBITS_0_5 case LL_USART_STOPBITS_0_5: return UART_CFG_STOP_BITS_0_5; #endif /* LL_USART_STOPBITS_0_5 */ case LL_USART_STOPBITS_1: return UART_CFG_STOP_BITS_1; /* Some MCU's don't support 1.5 stop bits */ #ifdef LL_USART_STOPBITS_1_5 case LL_USART_STOPBITS_1_5: return UART_CFG_STOP_BITS_1_5; #endif /* LL_USART_STOPBITS_1_5 */ case LL_USART_STOPBITS_2: default: return UART_CFG_STOP_BITS_2; } } static inline uint32_t uart_stm32_cfg2ll_databits(enum uart_config_data_bits db, enum uart_config_parity p) { switch (db) { /* Some MCU's don't support 7B or 9B datawidth */ #ifdef LL_USART_DATAWIDTH_7B case UART_CFG_DATA_BITS_7: if (p == UART_CFG_PARITY_NONE) { return LL_USART_DATAWIDTH_7B; } else { return LL_USART_DATAWIDTH_8B; } #endif /* LL_USART_DATAWIDTH_7B */ #ifdef LL_USART_DATAWIDTH_9B case UART_CFG_DATA_BITS_9: return LL_USART_DATAWIDTH_9B; #endif /* LL_USART_DATAWIDTH_9B */ case UART_CFG_DATA_BITS_8: default: if (p == UART_CFG_PARITY_NONE) { return LL_USART_DATAWIDTH_8B; #ifdef LL_USART_DATAWIDTH_9B } else { return LL_USART_DATAWIDTH_9B; #endif } return LL_USART_DATAWIDTH_8B; } } static inline enum uart_config_data_bits uart_stm32_ll2cfg_databits(uint32_t db, uint32_t p) { switch (db) { /* Some MCU's don't support 7B or 9B datawidth */ #ifdef LL_USART_DATAWIDTH_7B case LL_USART_DATAWIDTH_7B: if (p == LL_USART_PARITY_NONE) { return UART_CFG_DATA_BITS_7; } else { return UART_CFG_DATA_BITS_6; } #endif /* LL_USART_DATAWIDTH_7B */ #ifdef LL_USART_DATAWIDTH_9B case LL_USART_DATAWIDTH_9B: if (p == LL_USART_PARITY_NONE) { return UART_CFG_DATA_BITS_9; } else { return UART_CFG_DATA_BITS_8; } #endif /* LL_USART_DATAWIDTH_9B */ case LL_USART_DATAWIDTH_8B: default: if (p == LL_USART_PARITY_NONE) { return UART_CFG_DATA_BITS_8; } else { return UART_CFG_DATA_BITS_7; } } } /** * @brief Get LL hardware flow control define from * Zephyr hardware flow control option. * @note Supports only UART_CFG_FLOW_CTRL_RTS_CTS and UART_CFG_FLOW_CTRL_RS485. * @param fc: Zephyr hardware flow control option. * @retval LL_USART_HWCONTROL_RTS_CTS, or LL_USART_HWCONTROL_NONE. */ static inline uint32_t uart_stm32_cfg2ll_hwctrl(enum uart_config_flow_control fc) { if (fc == UART_CFG_FLOW_CTRL_RTS_CTS) { return LL_USART_HWCONTROL_RTS_CTS; } else if (fc == UART_CFG_FLOW_CTRL_RS485) { /* Driver Enable is handled separately */ return LL_USART_HWCONTROL_NONE; } return LL_USART_HWCONTROL_NONE; } /** * @brief Get Zephyr hardware flow control option from * LL hardware flow control define. * @note Supports only LL_USART_HWCONTROL_RTS_CTS. * @param fc: LL hardware flow control definition. * @retval UART_CFG_FLOW_CTRL_RTS_CTS, or UART_CFG_FLOW_CTRL_NONE. */ static inline enum uart_config_flow_control uart_stm32_ll2cfg_hwctrl(uint32_t fc) { if (fc == LL_USART_HWCONTROL_RTS_CTS) { return UART_CFG_FLOW_CTRL_RTS_CTS; } return UART_CFG_FLOW_CTRL_NONE; } static void uart_stm32_parameters_set(const struct device *dev, const struct uart_config *cfg) { const struct uart_stm32_config *config = dev->config; struct uart_stm32_data *data = dev->data; struct uart_config *uart_cfg = data->uart_cfg; const uint32_t parity = uart_stm32_cfg2ll_parity(cfg->parity); const uint32_t stopbits = uart_stm32_cfg2ll_stopbits(config, cfg->stop_bits); const uint32_t databits = uart_stm32_cfg2ll_databits(cfg->data_bits, cfg->parity); const uint32_t flowctrl = uart_stm32_cfg2ll_hwctrl(cfg->flow_ctrl); #if HAS_DRIVER_ENABLE bool driver_enable = cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RS485; #endif if (cfg == uart_cfg) { /* Called via (re-)init function, so the SoC either just booted, * or is returning from a low-power state where it lost register * contents */ LL_USART_ConfigCharacter(config->usart, databits, parity, stopbits); uart_stm32_set_hwctrl(dev, flowctrl); uart_stm32_set_baudrate(dev, cfg->baudrate); } else { /* Called from application/subsys via uart_configure syscall */ if (parity != uart_stm32_get_parity(dev)) { uart_stm32_set_parity(dev, parity); } if (stopbits != uart_stm32_get_stopbits(dev)) { uart_stm32_set_stopbits(dev, stopbits); } if (databits != uart_stm32_get_databits(dev)) { uart_stm32_set_databits(dev, databits); } if (flowctrl != uart_stm32_get_hwctrl(dev)) { uart_stm32_set_hwctrl(dev, flowctrl); } #if HAS_DRIVER_ENABLE if (driver_enable != uart_stm32_get_driver_enable(dev)) { uart_stm32_set_driver_enable(dev, driver_enable); } #endif if (cfg->baudrate != uart_cfg->baudrate) { uart_stm32_set_baudrate(dev, cfg->baudrate); uart_cfg->baudrate = cfg->baudrate; } } } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_stm32_configure(const struct device *dev, const struct uart_config *cfg) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; struct uart_stm32_data *data = dev->data; struct uart_config *uart_cfg = data->uart_cfg; const uint32_t parity = uart_stm32_cfg2ll_parity(cfg->parity); const uint32_t stopbits = uart_stm32_cfg2ll_stopbits(config, cfg->stop_bits); const uint32_t databits = uart_stm32_cfg2ll_databits(cfg->data_bits, cfg->parity); /* Hardware doesn't support mark or space parity */ if ((cfg->parity == UART_CFG_PARITY_MARK) || (cfg->parity == UART_CFG_PARITY_SPACE)) { return -ENOTSUP; } /* Driver does not supports parity + 9 databits */ if ((cfg->parity != UART_CFG_PARITY_NONE) && (cfg->data_bits == UART_CFG_DATA_BITS_9)) { return -ENOTSUP; } /* When the transformed ll stop bits don't match with what was requested, then it's not * supported */ if (uart_stm32_ll2cfg_stopbits(stopbits) != cfg->stop_bits) { return -ENOTSUP; } /* When the transformed ll databits don't match with what was requested, then it's not * supported */ if (uart_stm32_ll2cfg_databits(databits, parity) != cfg->data_bits) { return -ENOTSUP; } /* Driver supports only RTS/CTS and RS485 flow control */ if (!(cfg->flow_ctrl == UART_CFG_FLOW_CTRL_NONE || (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS && IS_UART_HWFLOW_INSTANCE(usart)) #if HAS_DRIVER_ENABLE || (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RS485 && IS_UART_DRIVER_ENABLE_INSTANCE(usart)) #endif )) { return -ENOTSUP; } LL_USART_Disable(usart); /* Set basic parameters, such as data-/stop-bit, parity, and baudrate */ uart_stm32_parameters_set(dev, cfg); LL_USART_Enable(usart); /* Upon successful configuration, persist the syscall-passed * uart_config. * This allows restoring it, should the device return from a low-power * mode in which register contents are lost. */ *uart_cfg = *cfg; return 0; }; static int uart_stm32_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_stm32_data *data = dev->data; struct uart_config *uart_cfg = data->uart_cfg; cfg->baudrate = uart_cfg->baudrate; cfg->parity = uart_stm32_ll2cfg_parity(uart_stm32_get_parity(dev)); cfg->stop_bits = uart_stm32_ll2cfg_stopbits( uart_stm32_get_stopbits(dev)); cfg->data_bits = uart_stm32_ll2cfg_databits( uart_stm32_get_databits(dev), uart_stm32_get_parity(dev)); cfg->flow_ctrl = uart_stm32_ll2cfg_hwctrl( uart_stm32_get_hwctrl(dev)); #if HAS_DRIVER_ENABLE if (uart_stm32_get_driver_enable(dev)) { cfg->flow_ctrl = UART_CFG_FLOW_CTRL_RS485; } #endif return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ typedef void (*poll_in_fn)( USART_TypeDef *usart, void *in); static int uart_stm32_poll_in_visitor(const struct device *dev, void *in, poll_in_fn get_fn) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; /* Clear overrun error flag */ if (LL_USART_IsActiveFlag_ORE(usart)) { LL_USART_ClearFlag_ORE(usart); } /* * On stm32 F4X, F1X, and F2X, the RXNE flag is affected (cleared) by * the uart_err_check function call (on errors flags clearing) */ if (!LL_USART_IsActiveFlag_RXNE(usart)) { return -1; } get_fn(usart, in); return 0; } typedef void (*poll_out_fn)( USART_TypeDef *usart, void *out); static void uart_stm32_poll_out_visitor(const struct device *dev, void *out, poll_out_fn set_fn) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; #ifdef CONFIG_PM struct uart_stm32_data *data = dev->data; #endif unsigned int key; /* Wait for TXE flag to be raised * When TXE flag is raised, we lock interrupts to prevent interrupts (notably that of usart) * or thread switch. Then, we can safely send our character. The character sent will be * interlaced with the characters potentially send with interrupt transmission API */ while (1) { if (LL_USART_IsActiveFlag_TXE(usart)) { key = irq_lock(); if (LL_USART_IsActiveFlag_TXE(usart)) { break; } irq_unlock(key); } } #ifdef CONFIG_PM /* If an interrupt transmission is in progress, the pm constraint is already managed by the * call of uart_stm32_irq_tx_[en|dis]able */ if (!data->tx_poll_stream_on && !data->tx_int_stream_on) { data->tx_poll_stream_on = true; /* Don't allow system to suspend until stream * transmission has completed */ uart_stm32_pm_policy_state_lock_get(dev); /* Enable TC interrupt so we can release suspend * constraint when done */ LL_USART_EnableIT_TC(usart); } #endif /* CONFIG_PM */ set_fn(usart, out); irq_unlock(key); } static void poll_in_u8(USART_TypeDef *usart, void *in) { *((unsigned char *)in) = (unsigned char)LL_USART_ReceiveData8(usart); } static void poll_out_u8(USART_TypeDef *usart, void *out) { LL_USART_TransmitData8(usart, *((uint8_t *)out)); } static int uart_stm32_poll_in(const struct device *dev, unsigned char *c) { return uart_stm32_poll_in_visitor(dev, (void *)c, poll_in_u8); } static void uart_stm32_poll_out(const struct device *dev, unsigned char c) { uart_stm32_poll_out_visitor(dev, (void *)&c, poll_out_u8); } #ifdef CONFIG_UART_WIDE_DATA static void poll_out_u9(USART_TypeDef *usart, void *out) { LL_USART_TransmitData9(usart, *((uint16_t *)out)); } static void poll_in_u9(USART_TypeDef *usart, void *in) { *((uint16_t *)in) = LL_USART_ReceiveData9(usart); } static int uart_stm32_poll_in_u16(const struct device *dev, uint16_t *in_u16) { return uart_stm32_poll_in_visitor(dev, (void *)in_u16, poll_in_u9); } static void uart_stm32_poll_out_u16(const struct device *dev, uint16_t out_u16) { uart_stm32_poll_out_visitor(dev, (void *)&out_u16, poll_out_u9); } #endif static int uart_stm32_err_check(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; uint32_t err = 0U; /* Check for errors, then clear them. * Some SoC clear all error flags when at least * one is cleared. (e.g. F4X, F1X, and F2X). * The stm32 F4X, F1X, and F2X also reads the usart DR when clearing Errors */ if (LL_USART_IsActiveFlag_ORE(usart)) { err |= UART_ERROR_OVERRUN; } if (LL_USART_IsActiveFlag_PE(usart)) { err |= UART_ERROR_PARITY; } if (LL_USART_IsActiveFlag_FE(usart)) { err |= UART_ERROR_FRAMING; } if (LL_USART_IsActiveFlag_NE(usart)) { err |= UART_ERROR_NOISE; } #if !defined(CONFIG_SOC_SERIES_STM32F0X) || defined(USART_LIN_SUPPORT) if (LL_USART_IsActiveFlag_LBD(usart)) { err |= UART_BREAK; } if (err & UART_BREAK) { LL_USART_ClearFlag_LBD(usart); } #endif /* Clearing error : * the stm32 F4X, F1X, and F2X sw sequence is reading the usart SR * then the usart DR to clear the Error flags ORE, PE, FE, NE * --> so is the RXNE flag also cleared ! */ if (err & UART_ERROR_OVERRUN) { LL_USART_ClearFlag_ORE(usart); } if (err & UART_ERROR_PARITY) { LL_USART_ClearFlag_PE(usart); } if (err & UART_ERROR_FRAMING) { LL_USART_ClearFlag_FE(usart); } if (err & UART_ERROR_NOISE) { LL_USART_ClearFlag_NE(usart); } return err; } static inline void __uart_stm32_get_clock(const struct device *dev) { struct uart_stm32_data *data = dev->data; const struct device *const clk = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); data->clock = clk; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN typedef void (*fifo_fill_fn)(USART_TypeDef *usart, const void *tx_data, const uint8_t offset); static int uart_stm32_fifo_fill_visitor(const struct device *dev, const void *tx_data, int size, fifo_fill_fn fill_fn) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; uint8_t num_tx = 0U; unsigned int key; if (!LL_USART_IsActiveFlag_TXE(usart)) { return num_tx; } /* Lock interrupts to prevent nested interrupts or thread switch */ key = irq_lock(); while ((size - num_tx > 0) && LL_USART_IsActiveFlag_TXE(usart)) { /* TXE flag will be cleared with byte write to DR|RDR register */ /* Send a character */ fill_fn(usart, tx_data, num_tx); num_tx++; } irq_unlock(key); return num_tx; } static void fifo_fill_with_u8(USART_TypeDef *usart, const void *tx_data, const uint8_t offset) { const uint8_t *data = (const uint8_t *)tx_data; /* Send a character (8bit) */ LL_USART_TransmitData8(usart, data[offset]); } static int uart_stm32_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { if (uart_stm32_ll2cfg_databits(uart_stm32_get_databits(dev), uart_stm32_get_parity(dev)) == UART_CFG_DATA_BITS_9) { return -ENOTSUP; } return uart_stm32_fifo_fill_visitor(dev, (const void *)tx_data, size, fifo_fill_with_u8); } typedef void (*fifo_read_fn)(USART_TypeDef *usart, void *rx_data, const uint8_t offset); static int uart_stm32_fifo_read_visitor(const struct device *dev, void *rx_data, const int size, fifo_read_fn read_fn) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; uint8_t num_rx = 0U; while ((size - num_rx > 0) && LL_USART_IsActiveFlag_RXNE(usart)) { /* RXNE flag will be cleared upon read from DR|RDR register */ read_fn(usart, rx_data, num_rx); num_rx++; /* Clear overrun error flag */ if (LL_USART_IsActiveFlag_ORE(usart)) { LL_USART_ClearFlag_ORE(usart); /* * On stm32 F4X, F1X, and F2X, the RXNE flag is affected (cleared) by * the uart_err_check function call (on errors flags clearing) */ } } return num_rx; } static void fifo_read_with_u8(USART_TypeDef *usart, void *rx_data, const uint8_t offset) { uint8_t *data = (uint8_t *)rx_data; data[offset] = LL_USART_ReceiveData8(usart); } static int uart_stm32_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { if (uart_stm32_ll2cfg_databits(uart_stm32_get_databits(dev), uart_stm32_get_parity(dev)) == UART_CFG_DATA_BITS_9) { return -ENOTSUP; } return uart_stm32_fifo_read_visitor(dev, (void *)rx_data, size, fifo_read_with_u8); } #ifdef CONFIG_UART_WIDE_DATA static void fifo_fill_with_u16(USART_TypeDef *usart, const void *tx_data, const uint8_t offset) { const uint16_t *data = (const uint16_t *)tx_data; /* Send a character (9bit) */ LL_USART_TransmitData9(usart, data[offset]); } static int uart_stm32_fifo_fill_u16(const struct device *dev, const uint16_t *tx_data, int size) { if (uart_stm32_ll2cfg_databits(uart_stm32_get_databits(dev), uart_stm32_get_parity(dev)) != UART_CFG_DATA_BITS_9) { return -ENOTSUP; } return uart_stm32_fifo_fill_visitor(dev, (const void *)tx_data, size, fifo_fill_with_u16); } static void fifo_read_with_u16(USART_TypeDef *usart, void *rx_data, const uint8_t offset) { uint16_t *data = (uint16_t *)rx_data; data[offset] = LL_USART_ReceiveData9(usart); } static int uart_stm32_fifo_read_u16(const struct device *dev, uint16_t *rx_data, const int size) { if (uart_stm32_ll2cfg_databits(uart_stm32_get_databits(dev), uart_stm32_get_parity(dev)) != UART_CFG_DATA_BITS_9) { return -ENOTSUP; } return uart_stm32_fifo_read_visitor(dev, (void *)rx_data, size, fifo_read_with_u16); } #endif static void uart_stm32_irq_tx_enable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; #ifdef CONFIG_PM struct uart_stm32_data *data = dev->data; unsigned int key; #endif #ifdef CONFIG_PM key = irq_lock(); data->tx_poll_stream_on = false; data->tx_int_stream_on = true; uart_stm32_pm_policy_state_lock_get(dev); #endif LL_USART_EnableIT_TC(config->usart); #ifdef CONFIG_PM irq_unlock(key); #endif } static void uart_stm32_irq_tx_disable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; #ifdef CONFIG_PM struct uart_stm32_data *data = dev->data; unsigned int key; key = irq_lock(); #endif LL_USART_DisableIT_TC(config->usart); #ifdef CONFIG_PM data->tx_int_stream_on = false; uart_stm32_pm_policy_state_lock_put(dev); #endif #ifdef CONFIG_PM irq_unlock(key); #endif } static int uart_stm32_irq_tx_ready(const struct device *dev) { const struct uart_stm32_config *config = dev->config; return LL_USART_IsActiveFlag_TXE(config->usart) && LL_USART_IsEnabledIT_TC(config->usart); } static int uart_stm32_irq_tx_complete(const struct device *dev) { const struct uart_stm32_config *config = dev->config; return LL_USART_IsActiveFlag_TC(config->usart); } static void uart_stm32_irq_rx_enable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; LL_USART_EnableIT_RXNE(config->usart); } static void uart_stm32_irq_rx_disable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; LL_USART_DisableIT_RXNE(config->usart); } static int uart_stm32_irq_rx_ready(const struct device *dev) { const struct uart_stm32_config *config = dev->config; /* * On stm32 F4X, F1X, and F2X, the RXNE flag is affected (cleared) by * the uart_err_check function call (on errors flags clearing) */ return LL_USART_IsActiveFlag_RXNE(config->usart); } static void uart_stm32_irq_err_enable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; /* Enable FE, ORE interruptions */ LL_USART_EnableIT_ERROR(usart); #if !defined(CONFIG_SOC_SERIES_STM32F0X) || defined(USART_LIN_SUPPORT) /* Enable Line break detection */ if (IS_UART_LIN_INSTANCE(usart)) { LL_USART_EnableIT_LBD(usart); } #endif /* Enable parity error interruption */ LL_USART_EnableIT_PE(usart); } static void uart_stm32_irq_err_disable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; /* Disable FE, ORE interruptions */ LL_USART_DisableIT_ERROR(usart); #if !defined(CONFIG_SOC_SERIES_STM32F0X) || defined(USART_LIN_SUPPORT) /* Disable Line break detection */ if (IS_UART_LIN_INSTANCE(usart)) { LL_USART_DisableIT_LBD(usart); } #endif /* Disable parity error interruption */ LL_USART_DisableIT_PE(usart); } static int uart_stm32_irq_is_pending(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; return ((LL_USART_IsActiveFlag_RXNE(usart) && LL_USART_IsEnabledIT_RXNE(usart)) || (LL_USART_IsActiveFlag_TC(usart) && LL_USART_IsEnabledIT_TC(usart))); } static int uart_stm32_irq_update(const struct device *dev) { return 1; } static void uart_stm32_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_stm32_data *data = dev->data; data->user_cb = cb; data->user_data = cb_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->async_cb = NULL; data->async_user_data = NULL; #endif } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API static inline void async_user_callback(struct uart_stm32_data *data, struct uart_event *event) { if (data->async_cb) { data->async_cb(data->uart_dev, event, data->async_user_data); } } static inline void async_evt_rx_rdy(struct uart_stm32_data *data) { LOG_DBG("rx_rdy: (%d %d)", data->dma_rx.offset, data->dma_rx.counter); struct uart_event event = { .type = UART_RX_RDY, .data.rx.buf = data->dma_rx.buffer, .data.rx.len = data->dma_rx.counter - data->dma_rx.offset, .data.rx.offset = data->dma_rx.offset }; /* update the current pos for new data */ data->dma_rx.offset = data->dma_rx.counter; /* send event only for new data */ if (event.data.rx.len > 0) { async_user_callback(data, &event); } } static inline void async_evt_rx_err(struct uart_stm32_data *data, int err_code) { LOG_DBG("rx error: %d", err_code); struct uart_event event = { .type = UART_RX_STOPPED, .data.rx_stop.reason = err_code, .data.rx_stop.data.len = data->dma_rx.counter, .data.rx_stop.data.offset = 0, .data.rx_stop.data.buf = data->dma_rx.buffer }; async_user_callback(data, &event); } static inline void async_evt_tx_done(struct uart_stm32_data *data) { LOG_DBG("tx done: %d", data->dma_tx.counter); struct uart_event event = { .type = UART_TX_DONE, .data.tx.buf = data->dma_tx.buffer, .data.tx.len = data->dma_tx.counter }; /* Reset tx buffer */ data->dma_tx.buffer_length = 0; data->dma_tx.counter = 0; async_user_callback(data, &event); } static inline void async_evt_tx_abort(struct uart_stm32_data *data) { LOG_DBG("tx abort: %d", data->dma_tx.counter); struct uart_event event = { .type = UART_TX_ABORTED, .data.tx.buf = data->dma_tx.buffer, .data.tx.len = data->dma_tx.counter }; /* Reset tx buffer */ data->dma_tx.buffer_length = 0; data->dma_tx.counter = 0; async_user_callback(data, &event); } static inline void async_evt_rx_buf_request(struct uart_stm32_data *data) { struct uart_event evt = { .type = UART_RX_BUF_REQUEST, }; async_user_callback(data, &evt); } static inline void async_evt_rx_buf_release(struct uart_stm32_data *data) { struct uart_event evt = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = data->dma_rx.buffer, }; async_user_callback(data, &evt); } static inline void async_timer_start(struct k_work_delayable *work, int32_t timeout) { if ((timeout != SYS_FOREVER_US) && (timeout != 0)) { /* start timer */ LOG_DBG("async timer started for %d us", timeout); k_work_reschedule(work, K_USEC(timeout)); } } static void uart_stm32_dma_rx_flush(const struct device *dev) { struct dma_status stat; struct uart_stm32_data *data = dev->data; if (dma_get_status(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &stat) == 0) { size_t rx_rcv_len = data->dma_rx.buffer_length - stat.pending_length; if (rx_rcv_len > data->dma_rx.offset) { data->dma_rx.counter = rx_rcv_len; async_evt_rx_rdy(data); } } } #endif /* CONFIG_UART_ASYNC_API */ #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || \ defined(CONFIG_UART_ASYNC_API) || \ defined(CONFIG_PM) static void uart_stm32_isr(const struct device *dev) { struct uart_stm32_data *data = dev->data; #if defined(CONFIG_PM) || defined(CONFIG_UART_ASYNC_API) const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; #endif #ifdef CONFIG_PM if (LL_USART_IsEnabledIT_TC(usart) && LL_USART_IsActiveFlag_TC(usart)) { if (data->tx_poll_stream_on) { /* A poll stream transmission just completed, * allow system to suspend */ LL_USART_DisableIT_TC(usart); data->tx_poll_stream_on = false; uart_stm32_pm_policy_state_lock_put(dev); } /* Stream transmission was either async or IRQ based, * constraint will be released at the same time TC IT * is disabled */ } #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN if (data->user_cb) { data->user_cb(dev, data->user_data); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API if (LL_USART_IsEnabledIT_IDLE(usart) && LL_USART_IsActiveFlag_IDLE(usart)) { LL_USART_ClearFlag_IDLE(usart); LOG_DBG("idle interrupt occurred"); if (data->dma_rx.timeout == 0) { uart_stm32_dma_rx_flush(dev); } else { /* Start the RX timer not null */ async_timer_start(&data->dma_rx.timeout_work, data->dma_rx.timeout); } } else if (LL_USART_IsEnabledIT_TC(usart) && LL_USART_IsActiveFlag_TC(usart)) { LL_USART_DisableIT_TC(usart); /* Generate TX_DONE event when transmission is done */ async_evt_tx_done(data); #ifdef CONFIG_PM uart_stm32_pm_policy_state_lock_put(dev); #endif } else if (LL_USART_IsEnabledIT_RXNE(usart) && LL_USART_IsActiveFlag_RXNE(usart)) { #ifdef USART_SR_RXNE /* clear the RXNE flag, because Rx data was not read */ LL_USART_ClearFlag_RXNE(usart); #else /* clear the RXNE by flushing the fifo, because Rx data was not read */ LL_USART_RequestRxDataFlush(usart); #endif /* USART_SR_RXNE */ } /* Clear errors */ uart_stm32_err_check(dev); #endif /* CONFIG_UART_ASYNC_API */ #if defined(CONFIG_PM) && defined(IS_UART_WAKEUP_FROMSTOP_INSTANCE) \ && defined(USART_CR3_WUFIE) if (LL_USART_IsEnabledIT_WKUP(usart) && LL_USART_IsActiveFlag_WKUP(usart)) { LL_USART_ClearFlag_WKUP(usart); #ifdef USART_ISR_REACK while (LL_USART_IsActiveFlag_REACK(usart) == 0) { } #endif } #endif } #endif /* CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API || CONFIG_PM */ #ifdef CONFIG_UART_ASYNC_API #ifdef CONFIG_DCACHE static bool buf_in_nocache(uintptr_t buf, size_t len_bytes) { bool buf_within_nocache = false; #ifdef CONFIG_NOCACHE_MEMORY buf_within_nocache = (buf >= ((uintptr_t)_nocache_ram_start)) && ((buf + len_bytes - 1) <= ((uintptr_t)_nocache_ram_end)); if (buf_within_nocache) { return true; } #endif /* CONFIG_NOCACHE_MEMORY */ buf_within_nocache = mem_attr_check_buf( (void *)buf, len_bytes, DT_MEM_ARM_MPU_RAM_NOCACHE) == 0; if (buf_within_nocache) { return true; } buf_within_nocache = (buf >= ((uintptr_t)__rodata_region_start)) && ((buf + len_bytes - 1) <= ((uintptr_t)__rodata_region_end)); return buf_within_nocache; } #endif /* CONFIG_DCACHE */ static int uart_stm32_async_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct uart_stm32_data *data = dev->data; data->async_cb = callback; data->async_user_data = user_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->user_cb = NULL; data->user_data = NULL; #endif return 0; } static inline void uart_stm32_dma_tx_enable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; LL_USART_EnableDMAReq_TX(config->usart); } static inline void uart_stm32_dma_tx_disable(const struct device *dev) { #ifdef CONFIG_UART_STM32U5_ERRATA_DMAT ARG_UNUSED(dev); /* * Errata Sheet ES0499 : STM32U575xx and STM32U585xx device errata * USART does not generate DMA requests after setting/clearing DMAT bit * (also seen on stm32H5 serie) */ #else const struct uart_stm32_config *config = dev->config; LL_USART_DisableDMAReq_TX(config->usart); #endif } static inline void uart_stm32_dma_rx_enable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; struct uart_stm32_data *data = dev->data; LL_USART_EnableDMAReq_RX(config->usart); data->dma_rx.enabled = true; } static inline void uart_stm32_dma_rx_disable(const struct device *dev) { struct uart_stm32_data *data = dev->data; data->dma_rx.enabled = false; } static int uart_stm32_async_rx_disable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; struct uart_stm32_data *data = dev->data; struct uart_event disabled_event = { .type = UART_RX_DISABLED }; if (!data->dma_rx.enabled) { async_user_callback(data, &disabled_event); return -EFAULT; } LL_USART_DisableIT_IDLE(usart); uart_stm32_dma_rx_flush(dev); async_evt_rx_buf_release(data); uart_stm32_dma_rx_disable(dev); (void)k_work_cancel_delayable(&data->dma_rx.timeout_work); dma_stop(data->dma_rx.dma_dev, data->dma_rx.dma_channel); if (data->rx_next_buffer) { struct uart_event rx_next_buf_release_evt = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = data->rx_next_buffer, }; async_user_callback(data, &rx_next_buf_release_evt); } data->rx_next_buffer = NULL; data->rx_next_buffer_len = 0; /* When async rx is disabled, enable interruptible instance of uart to function normally */ LL_USART_EnableIT_RXNE(usart); LOG_DBG("rx: disabled"); async_user_callback(data, &disabled_event); return 0; } void uart_stm32_dma_tx_cb(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *uart_dev = user_data; struct uart_stm32_data *data = uart_dev->data; struct dma_status stat; unsigned int key = irq_lock(); /* Disable TX */ uart_stm32_dma_tx_disable(uart_dev); (void)k_work_cancel_delayable(&data->dma_tx.timeout_work); if (!dma_get_status(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &stat)) { data->dma_tx.counter = data->dma_tx.buffer_length - stat.pending_length; } data->dma_tx.buffer_length = 0; irq_unlock(key); } static void uart_stm32_dma_replace_buffer(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; struct uart_stm32_data *data = dev->data; /* Replace the buffer and reload the DMA */ LOG_DBG("Replacing RX buffer: %d", data->rx_next_buffer_len); /* reload DMA */ data->dma_rx.offset = 0; data->dma_rx.counter = 0; data->dma_rx.buffer = data->rx_next_buffer; data->dma_rx.buffer_length = data->rx_next_buffer_len; data->dma_rx.blk_cfg.block_size = data->dma_rx.buffer_length; data->dma_rx.blk_cfg.dest_address = (uint32_t)data->dma_rx.buffer; data->rx_next_buffer = NULL; data->rx_next_buffer_len = 0; dma_reload(data->dma_rx.dma_dev, data->dma_rx.dma_channel, data->dma_rx.blk_cfg.source_address, data->dma_rx.blk_cfg.dest_address, data->dma_rx.blk_cfg.block_size); dma_start(data->dma_rx.dma_dev, data->dma_rx.dma_channel); LL_USART_ClearFlag_IDLE(usart); /* Request next buffer */ async_evt_rx_buf_request(data); } void uart_stm32_dma_rx_cb(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *uart_dev = user_data; struct uart_stm32_data *data = uart_dev->data; if (status < 0) { async_evt_rx_err(data, status); return; } (void)k_work_cancel_delayable(&data->dma_rx.timeout_work); /* true since this functions occurs when buffer if full */ data->dma_rx.counter = data->dma_rx.buffer_length; async_evt_rx_rdy(data); if (data->rx_next_buffer != NULL) { async_evt_rx_buf_release(data); /* replace the buffer when the current * is full and not the same as the next * one. */ uart_stm32_dma_replace_buffer(uart_dev); } else { /* Buffer full without valid next buffer, * an UART_RX_DISABLED event must be generated, * but uart_stm32_async_rx_disable() cannot be * called in ISR context. So force the RX timeout * to minimum value and let the RX timeout to do the job. */ k_work_reschedule(&data->dma_rx.timeout_work, K_TICKS(1)); } } static int uart_stm32_async_tx(const struct device *dev, const uint8_t *tx_data, size_t buf_size, int32_t timeout) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; struct uart_stm32_data *data = dev->data; int ret; if (data->dma_tx.dma_dev == NULL) { return -ENODEV; } if (data->dma_tx.buffer_length != 0) { return -EBUSY; } #ifdef CONFIG_DCACHE if (!buf_in_nocache((uintptr_t)tx_data, buf_size)) { LOG_ERR("Tx buffer should be placed in a nocache memory region"); return -EFAULT; } #endif /* CONFIG_DCACHE */ data->dma_tx.buffer = (uint8_t *)tx_data; data->dma_tx.buffer_length = buf_size; data->dma_tx.timeout = timeout; LOG_DBG("tx: l=%d", data->dma_tx.buffer_length); /* Clear TC flag */ LL_USART_ClearFlag_TC(usart); /* Enable TC interrupt so we can signal correct TX done */ LL_USART_EnableIT_TC(usart); /* set source address */ data->dma_tx.blk_cfg.source_address = (uint32_t)data->dma_tx.buffer; data->dma_tx.blk_cfg.block_size = data->dma_tx.buffer_length; ret = dma_config(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &data->dma_tx.dma_cfg); if (ret != 0) { LOG_ERR("dma tx config error!"); return -EINVAL; } if (dma_start(data->dma_tx.dma_dev, data->dma_tx.dma_channel)) { LOG_ERR("UART err: TX DMA start failed!"); return -EFAULT; } /* Start TX timer */ async_timer_start(&data->dma_tx.timeout_work, data->dma_tx.timeout); #ifdef CONFIG_PM /* Do not allow system to suspend until transmission has completed */ uart_stm32_pm_policy_state_lock_get(dev); #endif /* Enable TX DMA requests */ uart_stm32_dma_tx_enable(dev); return 0; } static int uart_stm32_async_rx_enable(const struct device *dev, uint8_t *rx_buf, size_t buf_size, int32_t timeout) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; struct uart_stm32_data *data = dev->data; int ret; if (data->dma_rx.dma_dev == NULL) { return -ENODEV; } if (data->dma_rx.enabled) { LOG_WRN("RX was already enabled"); return -EBUSY; } #ifdef CONFIG_DCACHE if (!buf_in_nocache((uintptr_t)rx_buf, buf_size)) { LOG_ERR("Rx buffer should be placed in a nocache memory region"); return -EFAULT; } #endif /* CONFIG_DCACHE */ data->dma_rx.offset = 0; data->dma_rx.buffer = rx_buf; data->dma_rx.buffer_length = buf_size; data->dma_rx.counter = 0; data->dma_rx.timeout = timeout; /* Disable RX interrupts to let DMA to handle it */ LL_USART_DisableIT_RXNE(usart); data->dma_rx.blk_cfg.block_size = buf_size; data->dma_rx.blk_cfg.dest_address = (uint32_t)data->dma_rx.buffer; ret = dma_config(data->dma_rx.dma_dev, data->dma_rx.dma_channel, &data->dma_rx.dma_cfg); if (ret != 0) { LOG_ERR("UART ERR: RX DMA config failed!"); return -EINVAL; } if (dma_start(data->dma_rx.dma_dev, data->dma_rx.dma_channel)) { LOG_ERR("UART ERR: RX DMA start failed!"); return -EFAULT; } /* Flush RX data buffer */ #ifdef USART_SR_RXNE LL_USART_ClearFlag_RXNE(usart); #else LL_USART_RequestRxDataFlush(usart); #endif /* USART_SR_RXNE */ /* Enable RX DMA requests */ uart_stm32_dma_rx_enable(dev); /* Enable IRQ IDLE to define the end of a * RX DMA transaction. */ LL_USART_ClearFlag_IDLE(usart); LL_USART_EnableIT_IDLE(usart); LL_USART_EnableIT_ERROR(usart); /* Request next buffer */ async_evt_rx_buf_request(data); LOG_DBG("async rx enabled"); return ret; } static int uart_stm32_async_tx_abort(const struct device *dev) { struct uart_stm32_data *data = dev->data; size_t tx_buffer_length = data->dma_tx.buffer_length; struct dma_status stat; if (tx_buffer_length == 0) { return -EFAULT; } (void)k_work_cancel_delayable(&data->dma_tx.timeout_work); if (!dma_get_status(data->dma_tx.dma_dev, data->dma_tx.dma_channel, &stat)) { data->dma_tx.counter = tx_buffer_length - stat.pending_length; } #if DT_HAS_COMPAT_STATUS_OKAY(st_stm32u5_dma) dma_suspend(data->dma_tx.dma_dev, data->dma_tx.dma_channel); #endif dma_stop(data->dma_tx.dma_dev, data->dma_tx.dma_channel); async_evt_tx_abort(data); return 0; } static void uart_stm32_async_rx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_dma_stream *rx_stream = CONTAINER_OF(dwork, struct uart_dma_stream, timeout_work); struct uart_stm32_data *data = CONTAINER_OF(rx_stream, struct uart_stm32_data, dma_rx); const struct device *dev = data->uart_dev; LOG_DBG("rx timeout"); if (data->dma_rx.counter == data->dma_rx.buffer_length) { uart_stm32_async_rx_disable(dev); } else { uart_stm32_dma_rx_flush(dev); } } static void uart_stm32_async_tx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_dma_stream *tx_stream = CONTAINER_OF(dwork, struct uart_dma_stream, timeout_work); struct uart_stm32_data *data = CONTAINER_OF(tx_stream, struct uart_stm32_data, dma_tx); const struct device *dev = data->uart_dev; uart_stm32_async_tx_abort(dev); LOG_DBG("tx: async timeout"); } static int uart_stm32_async_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uart_stm32_data *data = dev->data; unsigned int key; int err = 0; LOG_DBG("replace buffer (%d)", len); key = irq_lock(); if (data->rx_next_buffer != NULL) { err = -EBUSY; } else if (!data->dma_rx.enabled) { err = -EACCES; } else { #ifdef CONFIG_DCACHE if (!buf_in_nocache((uintptr_t)buf, len)) { LOG_ERR("Rx buffer should be placed in a nocache memory region"); return -EFAULT; } #endif /* CONFIG_DCACHE */ data->rx_next_buffer = buf; data->rx_next_buffer_len = len; } irq_unlock(key); return err; } static int uart_stm32_async_init(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; struct uart_stm32_data *data = dev->data; data->uart_dev = dev; if (data->dma_rx.dma_dev != NULL) { if (!device_is_ready(data->dma_rx.dma_dev)) { return -ENODEV; } } if (data->dma_tx.dma_dev != NULL) { if (!device_is_ready(data->dma_tx.dma_dev)) { return -ENODEV; } } /* Disable both TX and RX DMA requests */ uart_stm32_dma_rx_disable(dev); uart_stm32_dma_tx_disable(dev); k_work_init_delayable(&data->dma_rx.timeout_work, uart_stm32_async_rx_timeout); k_work_init_delayable(&data->dma_tx.timeout_work, uart_stm32_async_tx_timeout); /* Configure dma rx config */ memset(&data->dma_rx.blk_cfg, 0, sizeof(data->dma_rx.blk_cfg)); #if defined(CONFIG_SOC_SERIES_STM32F1X) || \ defined(CONFIG_SOC_SERIES_STM32F2X) || \ defined(CONFIG_SOC_SERIES_STM32F4X) || \ defined(CONFIG_SOC_SERIES_STM32L1X) data->dma_rx.blk_cfg.source_address = LL_USART_DMA_GetRegAddr(usart); #else data->dma_rx.blk_cfg.source_address = LL_USART_DMA_GetRegAddr(usart, LL_USART_DMA_REG_DATA_RECEIVE); #endif data->dma_rx.blk_cfg.dest_address = 0; /* dest not ready */ if (data->dma_rx.src_addr_increment) { data->dma_rx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { data->dma_rx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } if (data->dma_rx.dst_addr_increment) { data->dma_rx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { data->dma_rx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } /* RX disable circular buffer */ data->dma_rx.blk_cfg.source_reload_en = 0; data->dma_rx.blk_cfg.dest_reload_en = 0; data->dma_rx.blk_cfg.fifo_mode_control = data->dma_rx.fifo_threshold; data->dma_rx.dma_cfg.head_block = &data->dma_rx.blk_cfg; data->dma_rx.dma_cfg.user_data = (void *)dev; data->rx_next_buffer = NULL; data->rx_next_buffer_len = 0; /* Configure dma tx config */ memset(&data->dma_tx.blk_cfg, 0, sizeof(data->dma_tx.blk_cfg)); #if defined(CONFIG_SOC_SERIES_STM32F1X) || \ defined(CONFIG_SOC_SERIES_STM32F2X) || \ defined(CONFIG_SOC_SERIES_STM32F4X) || \ defined(CONFIG_SOC_SERIES_STM32L1X) data->dma_tx.blk_cfg.dest_address = LL_USART_DMA_GetRegAddr(usart); #else data->dma_tx.blk_cfg.dest_address = LL_USART_DMA_GetRegAddr(usart, LL_USART_DMA_REG_DATA_TRANSMIT); #endif data->dma_tx.blk_cfg.source_address = 0; /* not ready */ if (data->dma_tx.src_addr_increment) { data->dma_tx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { data->dma_tx.blk_cfg.source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } if (data->dma_tx.dst_addr_increment) { data->dma_tx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_INCREMENT; } else { data->dma_tx.blk_cfg.dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE; } data->dma_tx.blk_cfg.fifo_mode_control = data->dma_tx.fifo_threshold; data->dma_tx.dma_cfg.head_block = &data->dma_tx.blk_cfg; data->dma_tx.dma_cfg.user_data = (void *)dev; return 0; } #ifdef CONFIG_UART_WIDE_DATA static int uart_stm32_async_tx_u16(const struct device *dev, const uint16_t *tx_data, size_t buf_size, int32_t timeout) { return uart_stm32_async_tx(dev, (const uint8_t *)tx_data, buf_size * 2, timeout); } static int uart_stm32_async_rx_enable_u16(const struct device *dev, uint16_t *buf, size_t len, int32_t timeout) { return uart_stm32_async_rx_enable(dev, (uint8_t *)buf, len * 2, timeout); } static int uart_stm32_async_rx_buf_rsp_u16(const struct device *dev, uint16_t *buf, size_t len) { return uart_stm32_async_rx_buf_rsp(dev, (uint8_t *)buf, len * 2); } #endif #endif /* CONFIG_UART_ASYNC_API */ static const struct uart_driver_api uart_stm32_driver_api = { .poll_in = uart_stm32_poll_in, .poll_out = uart_stm32_poll_out, #ifdef CONFIG_UART_WIDE_DATA .poll_in_u16 = uart_stm32_poll_in_u16, .poll_out_u16 = uart_stm32_poll_out_u16, #endif .err_check = uart_stm32_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_stm32_configure, .config_get = uart_stm32_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_stm32_fifo_fill, .fifo_read = uart_stm32_fifo_read, #ifdef CONFIG_UART_WIDE_DATA .fifo_fill_u16 = uart_stm32_fifo_fill_u16, .fifo_read_u16 = uart_stm32_fifo_read_u16, #endif .irq_tx_enable = uart_stm32_irq_tx_enable, .irq_tx_disable = uart_stm32_irq_tx_disable, .irq_tx_ready = uart_stm32_irq_tx_ready, .irq_tx_complete = uart_stm32_irq_tx_complete, .irq_rx_enable = uart_stm32_irq_rx_enable, .irq_rx_disable = uart_stm32_irq_rx_disable, .irq_rx_ready = uart_stm32_irq_rx_ready, .irq_err_enable = uart_stm32_irq_err_enable, .irq_err_disable = uart_stm32_irq_err_disable, .irq_is_pending = uart_stm32_irq_is_pending, .irq_update = uart_stm32_irq_update, .irq_callback_set = uart_stm32_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API .callback_set = uart_stm32_async_callback_set, .tx = uart_stm32_async_tx, .tx_abort = uart_stm32_async_tx_abort, .rx_enable = uart_stm32_async_rx_enable, .rx_disable = uart_stm32_async_rx_disable, .rx_buf_rsp = uart_stm32_async_rx_buf_rsp, #ifdef CONFIG_UART_WIDE_DATA .tx_u16 = uart_stm32_async_tx_u16, .rx_enable_u16 = uart_stm32_async_rx_enable_u16, .rx_buf_rsp_u16 = uart_stm32_async_rx_buf_rsp_u16, #endif #endif /* CONFIG_UART_ASYNC_API */ }; static int uart_stm32_clocks_enable(const struct device *dev) { const struct uart_stm32_config *config = dev->config; struct uart_stm32_data *data = dev->data; int err; __uart_stm32_get_clock(dev); if (!device_is_ready(data->clock)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* enable clock */ err = clock_control_on(data->clock, (clock_control_subsys_t)&config->pclken[0]); if (err != 0) { LOG_ERR("Could not enable (LP)UART clock"); return err; } if (IS_ENABLED(STM32_UART_DOMAIN_CLOCK_SUPPORT) && (config->pclk_len > 1)) { err = clock_control_configure(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), (clock_control_subsys_t) &config->pclken[1], NULL); if (err != 0) { LOG_ERR("Could not select UART domain clock"); return err; } } return 0; } static int uart_stm32_registers_configure(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; struct uart_stm32_data *data = dev->data; struct uart_config *uart_cfg = data->uart_cfg; LL_USART_Disable(usart); if (!device_is_ready(config->reset.dev)) { LOG_ERR("reset controller not ready"); return -ENODEV; } /* Reset UART to default state using RCC */ (void)reset_line_toggle_dt(&config->reset); /* TX/RX direction */ LL_USART_SetTransferDirection(usart, LL_USART_DIRECTION_TX_RX); /* Set basic parameters, such as data-/stop-bit, parity, and baudrate */ uart_stm32_parameters_set(dev, uart_cfg); /* Enable the single wire / half-duplex mode */ if (config->single_wire) { LL_USART_EnableHalfDuplex(usart); } #ifdef LL_USART_TXRX_SWAPPED if (config->tx_rx_swap) { LL_USART_SetTXRXSwap(usart, LL_USART_TXRX_SWAPPED); } #endif #ifdef LL_USART_RXPIN_LEVEL_INVERTED if (config->rx_invert) { LL_USART_SetRXPinLevel(usart, LL_USART_RXPIN_LEVEL_INVERTED); } #endif #ifdef LL_USART_TXPIN_LEVEL_INVERTED if (config->tx_invert) { LL_USART_SetTXPinLevel(usart, LL_USART_TXPIN_LEVEL_INVERTED); } #endif #if HAS_DRIVER_ENABLE if (config->de_enable) { if (!IS_UART_DRIVER_ENABLE_INSTANCE(usart)) { LOG_ERR("%s does not support driver enable", dev->name); return -EINVAL; } uart_stm32_set_driver_enable(dev, true); LL_USART_SetDEAssertionTime(usart, config->de_assert_time); LL_USART_SetDEDeassertionTime(usart, config->de_deassert_time); if (config->de_invert) { LL_USART_SetDESignalPolarity(usart, LL_USART_DE_POLARITY_LOW); } } #endif #ifdef USART_CR1_FIFOEN if (config->fifo_enable) { LL_USART_EnableFIFO(usart); } #endif #if defined(CONFIG_PM) && defined(IS_UART_WAKEUP_FROMSTOP_INSTANCE) if (config->wakeup_source) { /* Enable ability to wakeup device in Stop mode * Effect depends on CONFIG_PM_DEVICE status: * CONFIG_PM_DEVICE=n : Always active * CONFIG_PM_DEVICE=y : Controlled by pm_device_wakeup_enable() */ #ifdef USART_CR3_WUFIE LL_USART_SetWKUPType(usart, LL_USART_WAKEUP_ON_RXNE); LL_USART_EnableIT_WKUP(usart); LL_USART_ClearFlag_WKUP(usart); #endif LL_USART_EnableInStopMode(usart); if (config->wakeup_line != STM32_EXTI_LINE_NONE) { /* Prepare the WAKEUP with the expected EXTI line */ LL_EXTI_EnableIT_0_31(BIT(config->wakeup_line)); } } #endif /* CONFIG_PM */ LL_USART_Enable(usart); #ifdef USART_ISR_TEACK /* Wait until TEACK flag is set */ while (!(LL_USART_IsActiveFlag_TEACK(usart))) { } #endif /* !USART_ISR_TEACK */ #ifdef USART_ISR_REACK /* Wait until REACK flag is set */ while (!(LL_USART_IsActiveFlag_REACK(usart))) { } #endif /* !USART_ISR_REACK */ return 0; } /** * @brief Initialize UART channel * * This routine is called to reset the chip in a quiescent state. * It is assumed that this function is called only once per UART. * * @param dev UART device struct * * @return 0 */ static int uart_stm32_init(const struct device *dev) { const struct uart_stm32_config *config = dev->config; int err; err = uart_stm32_clocks_enable(dev); if (err < 0) { return err; } /* Configure dt provided device signals when available */ err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } err = uart_stm32_registers_configure(dev); if (err < 0) { return err; } #if defined(CONFIG_PM) || \ defined(CONFIG_UART_INTERRUPT_DRIVEN) || \ defined(CONFIG_UART_ASYNC_API) config->irq_config_func(dev); #endif /* CONFIG_PM || CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API */ #ifdef CONFIG_UART_ASYNC_API return uart_stm32_async_init(dev); #else return 0; #endif } #ifdef CONFIG_PM_DEVICE static void uart_stm32_suspend_setup(const struct device *dev) { const struct uart_stm32_config *config = dev->config; USART_TypeDef *usart = config->usart; #ifdef USART_ISR_BUSY /* Make sure that no USART transfer is on-going */ while (LL_USART_IsActiveFlag_BUSY(usart) == 1) { } #endif while (LL_USART_IsActiveFlag_TC(usart) == 0) { } #ifdef USART_ISR_REACK /* Make sure that USART is ready for reception */ while (LL_USART_IsActiveFlag_REACK(usart) == 0) { } #endif /* Clear OVERRUN flag */ LL_USART_ClearFlag_ORE(usart); } static int uart_stm32_pm_action(const struct device *dev, enum pm_device_action action) { const struct uart_stm32_config *config = dev->config; struct uart_stm32_data *data = dev->data; int err; switch (action) { case PM_DEVICE_ACTION_RESUME: /* Set pins to active state */ err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } /* Enable clock */ err = clock_control_on(data->clock, (clock_control_subsys_t)&config->pclken[0]); if (err < 0) { LOG_ERR("Could not enable (LP)UART clock"); return err; } if ((IS_ENABLED(CONFIG_PM_S2RAM)) && (!LL_USART_IsEnabled(config->usart))) { /* When exiting low power mode, check whether UART is enabled. * If not, it means we are exiting Suspend to RAM mode (STM32 * Standby), and the driver needs to be reinitialized. */ uart_stm32_init(dev); } break; case PM_DEVICE_ACTION_SUSPEND: uart_stm32_suspend_setup(dev); /* Stop device clock. Note: fixed clocks are not handled yet. */ err = clock_control_off(data->clock, (clock_control_subsys_t)&config->pclken[0]); if (err < 0) { LOG_ERR("Could not enable (LP)UART clock"); return err; } /* Move pins to sleep state */ err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP); if ((err < 0) && (err != -ENOENT)) { /* * If returning -ENOENT, no pins where defined for sleep mode : * Do not output on console (might sleep already) when going to sleep, * "(LP)UART pinctrl sleep state not available" * and don't block PM suspend. * Else return the error. */ return err; } break; default: return -ENOTSUP; } return 0; } #endif /* CONFIG_PM_DEVICE */ #ifdef CONFIG_UART_ASYNC_API /* src_dev and dest_dev should be 'MEMORY' or 'PERIPHERAL'. */ #define UART_DMA_CHANNEL_INIT(index, dir, dir_cap, src_dev, dest_dev) \ .dma_dev = DEVICE_DT_GET(STM32_DMA_CTLR(index, dir)), \ .dma_channel = DT_INST_DMAS_CELL_BY_NAME(index, dir, channel), \ .dma_cfg = { \ .dma_slot = STM32_DMA_SLOT(index, dir, slot),\ .channel_direction = STM32_DMA_CONFIG_DIRECTION( \ STM32_DMA_CHANNEL_CONFIG(index, dir)),\ .channel_priority = STM32_DMA_CONFIG_PRIORITY( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE(\ STM32_DMA_CHANNEL_CONFIG(index, dir)),\ .dest_data_size = STM32_DMA_CONFIG_##dest_dev##_DATA_SIZE(\ STM32_DMA_CHANNEL_CONFIG(index, dir)),\ .source_burst_length = 1, /* SINGLE transfer */ \ .dest_burst_length = 1, \ .block_count = 1, \ .dma_callback = uart_stm32_dma_##dir##_cb, \ }, \ .src_addr_increment = STM32_DMA_CONFIG_##src_dev##_ADDR_INC( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .dst_addr_increment = STM32_DMA_CONFIG_##dest_dev##_ADDR_INC( \ STM32_DMA_CHANNEL_CONFIG(index, dir)), \ .fifo_threshold = STM32_DMA_FEATURES_FIFO_THRESHOLD( \ STM32_DMA_FEATURES(index, dir)), \ #endif #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) || \ defined(CONFIG_PM) #define STM32_UART_IRQ_HANDLER_DECL(index) \ static void uart_stm32_irq_config_func_##index(const struct device *dev); #define STM32_UART_IRQ_HANDLER(index) \ static void uart_stm32_irq_config_func_##index(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(index), \ DT_INST_IRQ(index, priority), \ uart_stm32_isr, DEVICE_DT_INST_GET(index), \ 0); \ irq_enable(DT_INST_IRQN(index)); \ } #else #define STM32_UART_IRQ_HANDLER_DECL(index) /* Not used */ #define STM32_UART_IRQ_HANDLER(index) /* Not used */ #endif #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) || \ defined(CONFIG_PM) #define STM32_UART_IRQ_HANDLER_FUNC(index) \ .irq_config_func = uart_stm32_irq_config_func_##index, #else #define STM32_UART_IRQ_HANDLER_FUNC(index) /* Not used */ #endif #ifdef CONFIG_UART_ASYNC_API #define UART_DMA_CHANNEL(index, dir, DIR, src, dest) \ .dma_##dir = { \ COND_CODE_1(DT_INST_DMAS_HAS_NAME(index, dir), \ (UART_DMA_CHANNEL_INIT(index, dir, DIR, src, dest)), \ (NULL)) \ }, #else #define UART_DMA_CHANNEL(index, dir, DIR, src, dest) #endif #ifdef CONFIG_PM #define STM32_UART_PM_WAKEUP(index) \ .wakeup_source = DT_INST_PROP(index, wakeup_source), \ .wakeup_line = COND_CODE_1(DT_INST_NODE_HAS_PROP(index, wakeup_line), \ (DT_INST_PROP(index, wakeup_line)), \ (STM32_EXTI_LINE_NONE)), #else #define STM32_UART_PM_WAKEUP(index) /* Not used */ #endif /* Ensure DTS doesn't present an incompatible parity configuration. * Mark/space parity isn't supported on the STM32 family. * If 9 data bits are configured, ensure that a parity bit isn't set. */ #define STM32_UART_CHECK_DT_PARITY(index) \ BUILD_ASSERT( \ !(DT_INST_ENUM_IDX_OR(index, parity, STM32_UART_DEFAULT_PARITY) \ == UART_CFG_PARITY_MARK || \ DT_INST_ENUM_IDX_OR(index, parity, STM32_UART_DEFAULT_PARITY) \ == UART_CFG_PARITY_SPACE), \ "Node " DT_NODE_PATH(DT_DRV_INST(index)) \ " has unsupported parity configuration"); \ BUILD_ASSERT( \ !(DT_INST_ENUM_IDX_OR(index, parity, STM32_UART_DEFAULT_PARITY) \ != UART_CFG_PARITY_NONE && \ DT_INST_ENUM_IDX_OR(index, data_bits, \ STM32_UART_DEFAULT_DATA_BITS) \ == UART_CFG_DATA_BITS_9), \ "Node " DT_NODE_PATH(DT_DRV_INST(index)) \ " has unsupported parity + data bits combination"); /* Ensure DTS doesn't present an incompatible data bits configuration * The STM32 family doesn't support 5 data bits, or 6 data bits without parity. * Only some series support 7 data bits. */ #ifdef LL_USART_DATAWIDTH_7B #define STM32_UART_CHECK_DT_DATA_BITS(index) \ BUILD_ASSERT( \ !(DT_INST_ENUM_IDX_OR(index, data_bits, \ STM32_UART_DEFAULT_DATA_BITS) \ == UART_CFG_DATA_BITS_5 || \ (DT_INST_ENUM_IDX_OR(index, data_bits, \ STM32_UART_DEFAULT_DATA_BITS) \ == UART_CFG_DATA_BITS_6 && \ DT_INST_ENUM_IDX_OR(index, parity, \ STM32_UART_DEFAULT_PARITY) \ == UART_CFG_PARITY_NONE)), \ "Node " DT_NODE_PATH(DT_DRV_INST(index)) \ " has unsupported data bits configuration"); #else #define STM32_UART_CHECK_DT_DATA_BITS(index) \ BUILD_ASSERT( \ !(DT_INST_ENUM_IDX_OR(index, data_bits, \ STM32_UART_DEFAULT_DATA_BITS) \ == UART_CFG_DATA_BITS_5 || \ DT_INST_ENUM_IDX_OR(index, data_bits, \ STM32_UART_DEFAULT_DATA_BITS) \ == UART_CFG_DATA_BITS_6 || \ (DT_INST_ENUM_IDX_OR(index, data_bits, \ STM32_UART_DEFAULT_DATA_BITS) \ == UART_CFG_DATA_BITS_7 && \ DT_INST_ENUM_IDX_OR(index, parity, \ STM32_UART_DEFAULT_PARITY) \ == UART_CFG_PARITY_NONE)), \ "Node " DT_NODE_PATH(DT_DRV_INST(index)) \ " has unsupported data bits configuration"); #endif /* Ensure DTS doesn't present an incompatible stop bits configuration. * Some STM32 series USARTs don't support 0.5 stop bits, and it generally isn't * supported for LPUART. */ #ifndef LL_USART_STOPBITS_0_5 #define STM32_UART_CHECK_DT_STOP_BITS_0_5(index) \ BUILD_ASSERT( \ !(DT_INST_ENUM_IDX_OR(index, stop_bits, \ STM32_UART_DEFAULT_STOP_BITS) \ == UART_CFG_STOP_BITS_0_5), \ "Node " DT_NODE_PATH(DT_DRV_INST(index)) \ " has unsupported stop bits configuration"); /* LPUARTs don't support 0.5 stop bits configurations */ #else #define STM32_UART_CHECK_DT_STOP_BITS_0_5(index) \ BUILD_ASSERT( \ !(DT_HAS_COMPAT_STATUS_OKAY(st_stm32_lpuart) && \ DT_INST_ENUM_IDX_OR(index, stop_bits, \ STM32_UART_DEFAULT_STOP_BITS) \ == UART_CFG_STOP_BITS_0_5), \ "Node " DT_NODE_PATH(DT_DRV_INST(index)) \ " has unsupported stop bits configuration"); #endif /* Ensure DTS doesn't present an incompatible stop bits configuration. * Some STM32 series USARTs don't support 1.5 stop bits, and it generally isn't * supported for LPUART. */ #ifndef LL_USART_STOPBITS_1_5 #define STM32_UART_CHECK_DT_STOP_BITS_1_5(index) \ BUILD_ASSERT( \ DT_INST_ENUM_IDX_OR(index, stop_bits, \ STM32_UART_DEFAULT_STOP_BITS) \ != UART_CFG_STOP_BITS_1_5, \ "Node " DT_NODE_PATH(DT_DRV_INST(index)) \ " has unsupported stop bits configuration"); /* LPUARTs don't support 1.5 stop bits configurations */ #else #define STM32_UART_CHECK_DT_STOP_BITS_1_5(index) \ BUILD_ASSERT( \ !(DT_HAS_COMPAT_STATUS_OKAY(st_stm32_lpuart) && \ DT_INST_ENUM_IDX_OR(index, stop_bits, \ STM32_UART_DEFAULT_STOP_BITS) \ == UART_CFG_STOP_BITS_1_5), \ "Node " DT_NODE_PATH(DT_DRV_INST(index)) \ " has unsupported stop bits configuration"); #endif #define STM32_UART_INIT(index) \ STM32_UART_IRQ_HANDLER_DECL(index) \ \ PINCTRL_DT_INST_DEFINE(index); \ \ static const struct stm32_pclken pclken_##index[] = \ STM32_DT_INST_CLOCKS(index);\ \ static struct uart_config uart_cfg_##index = { \ .baudrate = DT_INST_PROP_OR(index, current_speed, \ STM32_UART_DEFAULT_BAUDRATE), \ .parity = DT_INST_ENUM_IDX_OR(index, parity, \ STM32_UART_DEFAULT_PARITY), \ .stop_bits = DT_INST_ENUM_IDX_OR(index, stop_bits, \ STM32_UART_DEFAULT_STOP_BITS), \ .data_bits = DT_INST_ENUM_IDX_OR(index, data_bits, \ STM32_UART_DEFAULT_DATA_BITS), \ .flow_ctrl = DT_INST_PROP(index, hw_flow_control) \ ? UART_CFG_FLOW_CTRL_RTS_CTS \ : UART_CFG_FLOW_CTRL_NONE, \ }; \ \ static const struct uart_stm32_config uart_stm32_cfg_##index = { \ .usart = (USART_TypeDef *)DT_INST_REG_ADDR(index), \ .reset = RESET_DT_SPEC_GET(DT_DRV_INST(index)), \ .pclken = pclken_##index, \ .pclk_len = DT_INST_NUM_CLOCKS(index), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \ .single_wire = DT_INST_PROP(index, single_wire), \ .tx_rx_swap = DT_INST_PROP(index, tx_rx_swap), \ .rx_invert = DT_INST_PROP(index, rx_invert), \ .tx_invert = DT_INST_PROP(index, tx_invert), \ .de_enable = DT_INST_PROP(index, de_enable), \ .de_assert_time = DT_INST_PROP(index, de_assert_time), \ .de_deassert_time = DT_INST_PROP(index, de_deassert_time), \ .de_invert = DT_INST_PROP(index, de_invert), \ .fifo_enable = DT_INST_PROP(index, fifo_enable), \ STM32_UART_IRQ_HANDLER_FUNC(index) \ STM32_UART_PM_WAKEUP(index) \ }; \ \ static struct uart_stm32_data uart_stm32_data_##index = { \ .uart_cfg = &uart_cfg_##index, \ UART_DMA_CHANNEL(index, rx, RX, PERIPHERAL, MEMORY) \ UART_DMA_CHANNEL(index, tx, TX, MEMORY, PERIPHERAL) \ }; \ \ PM_DEVICE_DT_INST_DEFINE(index, uart_stm32_pm_action); \ \ DEVICE_DT_INST_DEFINE(index, \ uart_stm32_init, \ PM_DEVICE_DT_INST_GET(index), \ &uart_stm32_data_##index, &uart_stm32_cfg_##index, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_stm32_driver_api); \ \ STM32_UART_IRQ_HANDLER(index) \ \ STM32_UART_CHECK_DT_PARITY(index) \ STM32_UART_CHECK_DT_DATA_BITS(index) \ STM32_UART_CHECK_DT_STOP_BITS_0_5(index) \ STM32_UART_CHECK_DT_STOP_BITS_1_5(index) DT_INST_FOREACH_STATUS_OKAY(STM32_UART_INIT) ```
/content/code_sandbox/drivers/serial/uart_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
19,413
```c /* * */ /** * @brief Driver for Nordic Semiconductor nRF UARTE */ #include <zephyr/drivers/uart.h> #include <zephyr/pm/device.h> #include <hal/nrf_uarte.h> #include <nrfx_timer.h> #include <zephyr/sys/util.h> #include <zephyr/kernel.h> #include <soc.h> #include <helpers/nrfx_gppi.h> #include <zephyr/linker/devicetree_regions.h> #include <zephyr/irq.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); #include <zephyr/drivers/pinctrl.h> /* Generalize PPI or DPPI channel management */ #if defined(PPI_PRESENT) #include <nrfx_ppi.h> #define gppi_channel_t nrf_ppi_channel_t #define gppi_channel_alloc nrfx_ppi_channel_alloc #define gppi_channel_enable nrfx_ppi_channel_enable #elif defined(DPPI_PRESENT) #include <nrfx_dppi.h> #define gppi_channel_t uint8_t #define gppi_channel_alloc nrfx_dppi_channel_alloc #define gppi_channel_enable nrfx_dppi_channel_enable #else #error "No PPI or DPPI" #endif /* Execute macro f(x) for all instances. */ #define UARTE_FOR_EACH_INSTANCE(f, sep, off_code) \ NRFX_FOREACH_PRESENT(UARTE, f, sep, off_code, _) /* Determine if any instance is using interrupt driven API. */ #define IS_INT_DRIVEN(unused, prefix, i, _) \ (IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \ IS_ENABLED(CONFIG_UART_##prefix##i##_INTERRUPT_DRIVEN)) #if UARTE_FOR_EACH_INSTANCE(IS_INT_DRIVEN, (||), (0)) #define UARTE_INTERRUPT_DRIVEN 1 #endif /* Determine if any instance is not using asynchronous API. */ #define IS_NOT_ASYNC(unused, prefix, i, _) \ (IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \ !IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC)) #if UARTE_FOR_EACH_INSTANCE(IS_NOT_ASYNC, (||), (0)) #define UARTE_ANY_NONE_ASYNC 1 #endif /* Determine if any instance is using asynchronous API. */ #define IS_ASYNC(unused, prefix, i, _) \ (IS_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i) && \ IS_ENABLED(CONFIG_UART_##prefix##i##_ASYNC)) #if UARTE_FOR_EACH_INSTANCE(IS_ASYNC, (||), (0)) #define UARTE_ANY_ASYNC 1 #endif /* Determine if any instance is using asynchronous API with HW byte counting. */ #define IS_HW_ASYNC(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_HW_ASYNC) #if UARTE_FOR_EACH_INSTANCE(IS_HW_ASYNC, (||), (0)) #define UARTE_HW_ASYNC 1 #endif /* Determine if any instance is using enhanced poll_out feature. */ #define IS_ENHANCED_POLL_OUT(unused, prefix, i, _) \ IS_ENABLED(CONFIG_UART_##prefix##i##_ENHANCED_POLL_OUT) #if UARTE_FOR_EACH_INSTANCE(IS_ENHANCED_POLL_OUT, (||), (0)) #define UARTE_ENHANCED_POLL_OUT 1 #endif /* * RX timeout is divided into time slabs, this define tells how many divisions * should be made. More divisions - higher timeout accuracy and processor usage. */ #define RX_TIMEOUT_DIV 5 /* Size of hardware fifo in RX path. */ #define UARTE_HW_RX_FIFO_SIZE 5 #ifdef UARTE_ANY_ASYNC struct uarte_async_cb { uart_callback_t user_callback; void *user_data; const uint8_t *tx_buf; volatile size_t tx_size; const uint8_t *xfer_buf; size_t xfer_len; size_t tx_cache_offset; struct k_timer tx_timeout_timer; uint8_t *rx_buf; size_t rx_buf_len; size_t rx_offset; uint8_t *rx_next_buf; size_t rx_next_buf_len; uint32_t rx_total_byte_cnt; /* Total number of bytes received */ uint32_t rx_total_user_byte_cnt; /* Total number of bytes passed to user */ int32_t rx_timeout; /* Timeout set by user */ int32_t rx_timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */ int32_t rx_timeout_left; /* Current time left until user callback */ struct k_timer rx_timeout_timer; union { gppi_channel_t ppi; uint32_t cnt; } rx_cnt; volatile int tx_amount; atomic_t low_power_mask; uint8_t rx_flush_buffer[UARTE_HW_RX_FIFO_SIZE]; uint8_t rx_flush_cnt; volatile bool rx_enabled; volatile bool discard_rx_fifo; bool pending_tx; /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */ volatile bool is_in_irq; }; #endif /* UARTE_ANY_ASYNC */ #ifdef UARTE_INTERRUPT_DRIVEN struct uarte_nrfx_int_driven { uart_irq_callback_user_data_t cb; /**< Callback function pointer */ void *cb_data; /**< Callback function arg */ uint8_t *tx_buffer; uint16_t tx_buff_size; volatile bool disable_tx_irq; #ifdef CONFIG_PM_DEVICE bool rx_irq_enabled; #endif atomic_t fifo_fill_lock; }; #endif /* Device data structure */ struct uarte_nrfx_data { const struct device *dev; #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE struct uart_config uart_config; #endif #ifdef UARTE_INTERRUPT_DRIVEN struct uarte_nrfx_int_driven *int_driven; #endif #ifdef UARTE_ANY_ASYNC struct uarte_async_cb *async; #endif atomic_val_t poll_out_lock; uint8_t *char_out; uint8_t *rx_data; gppi_channel_t ppi_ch_endtx; }; #define UARTE_LOW_POWER_TX BIT(0) #define UARTE_LOW_POWER_RX BIT(1) /* If enabled, pins are managed when going to low power mode. */ #define UARTE_CFG_FLAG_GPIO_MGMT BIT(0) /* If enabled then ENDTX is PPI'ed to TXSTOP */ #define UARTE_CFG_FLAG_PPI_ENDTX BIT(1) /* If enabled then TIMER and PPI is used for byte counting. */ #define UARTE_CFG_FLAG_HW_BYTE_COUNTING BIT(2) /* If enabled then UARTE peripheral is disabled when not used. This allows * to achieve lowest power consumption in idle. */ #define UARTE_CFG_FLAG_LOW_POWER BIT(4) /* Macro for converting numerical baudrate to register value. It is convenient * to use this approach because for constant input it can calculate nrf setting * at compile time. */ #define NRF_BAUDRATE(baudrate) ((baudrate) == 300 ? 0x00014000 :\ (baudrate) == 600 ? 0x00027000 : \ (baudrate) == 1200 ? NRF_UARTE_BAUDRATE_1200 : \ (baudrate) == 2400 ? NRF_UARTE_BAUDRATE_2400 : \ (baudrate) == 4800 ? NRF_UARTE_BAUDRATE_4800 : \ (baudrate) == 9600 ? NRF_UARTE_BAUDRATE_9600 : \ (baudrate) == 14400 ? NRF_UARTE_BAUDRATE_14400 : \ (baudrate) == 19200 ? NRF_UARTE_BAUDRATE_19200 : \ (baudrate) == 28800 ? NRF_UARTE_BAUDRATE_28800 : \ (baudrate) == 31250 ? NRF_UARTE_BAUDRATE_31250 : \ (baudrate) == 38400 ? NRF_UARTE_BAUDRATE_38400 : \ (baudrate) == 56000 ? NRF_UARTE_BAUDRATE_56000 : \ (baudrate) == 57600 ? NRF_UARTE_BAUDRATE_57600 : \ (baudrate) == 76800 ? NRF_UARTE_BAUDRATE_76800 : \ (baudrate) == 115200 ? NRF_UARTE_BAUDRATE_115200 : \ (baudrate) == 230400 ? NRF_UARTE_BAUDRATE_230400 : \ (baudrate) == 250000 ? NRF_UARTE_BAUDRATE_250000 : \ (baudrate) == 460800 ? NRF_UARTE_BAUDRATE_460800 : \ (baudrate) == 921600 ? NRF_UARTE_BAUDRATE_921600 : \ (baudrate) == 1000000 ? NRF_UARTE_BAUDRATE_1000000 : 0) /** * @brief Structure for UARTE configuration. */ struct uarte_nrfx_config { NRF_UARTE_Type *uarte_regs; /* Instance address */ uint32_t clock_freq; uint32_t flags; bool disable_rx; const struct pinctrl_dev_config *pcfg; #ifndef CONFIG_UART_USE_RUNTIME_CONFIGURE nrf_uarte_baudrate_t baudrate; nrf_uarte_config_t hw_config; #endif #ifdef UARTE_ANY_ASYNC nrfx_timer_t timer; uint8_t *tx_cache; #endif }; static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; return config->uarte_regs; } static void endtx_isr(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); unsigned int key = irq_lock(); if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); } irq_unlock(key); } #ifdef UARTE_ANY_NONE_ASYNC /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ static void uarte_nrfx_isr_int(const void *arg) { const struct device *dev = arg; const struct uarte_nrfx_config *config = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); /* If interrupt driven and asynchronous APIs are disabled then UART * interrupt is still called to stop TX. Unless it is done using PPI. */ if (nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK) && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) { endtx_isr(dev); } if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { unsigned int key = irq_lock(); if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) { nrf_uarte_disable(uarte); } #ifdef UARTE_INTERRUPT_DRIVEN struct uarte_nrfx_data *data = dev->data; if (!data->int_driven || data->int_driven->fifo_fill_lock == 0) #endif { nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); } irq_unlock(key); } #ifdef UARTE_INTERRUPT_DRIVEN struct uarte_nrfx_data *data = dev->data; if (!data->int_driven) { return; } if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) { data->int_driven->fifo_fill_lock = 0; if (data->int_driven->disable_tx_irq) { nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); data->int_driven->disable_tx_irq = false; return; } } if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR); } if (data->int_driven->cb) { data->int_driven->cb(dev, data->int_driven->cb_data); } #endif /* UARTE_INTERRUPT_DRIVEN */ } #endif /* UARTE_ANY_NONE_ASYNC */ #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE /** * @brief Set the baud rate * * This routine set the given baud rate for the UARTE. * * @param dev UARTE device struct * @param baudrate Baud rate * * @return 0 on success or error code */ static int baudrate_set(const struct device *dev, uint32_t baudrate) { const struct uarte_nrfx_config *config = dev->config; /* calculated baudrate divisor */ nrf_uarte_baudrate_t nrf_baudrate = NRF_BAUDRATE(baudrate); NRF_UARTE_Type *uarte = get_uarte_instance(dev); if (nrf_baudrate == 0) { return -EINVAL; } /* scale baudrate setting */ if (config->clock_freq > 0U) { nrf_baudrate /= config->clock_freq / NRF_UARTE_BASE_FREQUENCY_16MHZ; } nrf_uarte_baudrate_set(uarte, nrf_baudrate); return 0; } static int uarte_nrfx_configure(const struct device *dev, const struct uart_config *cfg) { struct uarte_nrfx_data *data = dev->data; nrf_uarte_config_t uarte_cfg; #if NRF_UARTE_HAS_FRAME_TIMEOUT uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_DIS; #endif #if defined(UARTE_CONFIG_STOP_Msk) switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: uarte_cfg.stop = NRF_UARTE_STOP_ONE; break; case UART_CFG_STOP_BITS_2: uarte_cfg.stop = NRF_UARTE_STOP_TWO; break; default: return -ENOTSUP; } #else if (cfg->stop_bits != UART_CFG_STOP_BITS_1) { return -ENOTSUP; } #endif if (cfg->data_bits != UART_CFG_DATA_BITS_8) { return -ENOTSUP; } switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: uarte_cfg.hwfc = NRF_UARTE_HWFC_DISABLED; break; case UART_CFG_FLOW_CTRL_RTS_CTS: uarte_cfg.hwfc = NRF_UARTE_HWFC_ENABLED; break; default: return -ENOTSUP; } #if defined(UARTE_CONFIG_PARITYTYPE_Msk) uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_EVEN; #endif switch (cfg->parity) { case UART_CFG_PARITY_NONE: uarte_cfg.parity = NRF_UARTE_PARITY_EXCLUDED; break; case UART_CFG_PARITY_EVEN: uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED; break; #if defined(UARTE_CONFIG_PARITYTYPE_Msk) case UART_CFG_PARITY_ODD: uarte_cfg.parity = NRF_UARTE_PARITY_INCLUDED; uarte_cfg.paritytype = NRF_UARTE_PARITYTYPE_ODD; break; #endif default: return -ENOTSUP; } if (baudrate_set(dev, cfg->baudrate) != 0) { return -ENOTSUP; } nrf_uarte_configure(get_uarte_instance(dev), &uarte_cfg); data->uart_config = *cfg; return 0; } static int uarte_nrfx_config_get(const struct device *dev, struct uart_config *cfg) { struct uarte_nrfx_data *data = dev->data; *cfg = data->uart_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int uarte_nrfx_err_check(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); /* register bitfields maps to the defines in uart.h */ return nrf_uarte_errorsrc_get_and_clear(uarte); } /* Function returns true if new transfer can be started. Since TXSTOPPED * (and ENDTX) is cleared before triggering new transfer, TX is ready for new * transfer if any event is set. */ static bool is_tx_ready(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); bool ppi_endtx = config->flags & UARTE_CFG_FLAG_PPI_ENDTX; return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) || (!ppi_endtx ? nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX) : 0); } /* Wait until the transmitter is in the idle state. When this function returns, * IRQ's are locked with the returned key. */ static int wait_tx_ready(const struct device *dev) { unsigned int key; do { /* wait arbitrary time before back off. */ bool res; #if defined(CONFIG_ARCH_POSIX) NRFX_WAIT_FOR(is_tx_ready(dev), 33, 3, res); #else NRFX_WAIT_FOR(is_tx_ready(dev), 100, 1, res); #endif if (res) { key = irq_lock(); if (is_tx_ready(dev)) { break; } irq_unlock(key); } if (IS_ENABLED(CONFIG_MULTITHREADING)) { k_msleep(1); } } while (1); return key; } #if defined(UARTE_ANY_ASYNC) || defined(CONFIG_PM_DEVICE) static int pins_state_change(const struct device *dev, bool on) { const struct uarte_nrfx_config *config = dev->config; if (config->flags & UARTE_CFG_FLAG_GPIO_MGMT) { return pinctrl_apply_state(config->pcfg, on ? PINCTRL_STATE_DEFAULT : PINCTRL_STATE_SLEEP); } return 0; } #endif #ifdef UARTE_ANY_ASYNC /* Using Macro instead of static inline function to handle NO_OPTIMIZATIONS case * where static inline fails on linking. */ #define HW_RX_COUNTING_ENABLED(config) \ (IS_ENABLED(UARTE_HW_ASYNC) ? (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false) #endif /* UARTE_ANY_ASYNC */ static int uarte_enable(const struct device *dev, uint32_t mask) { #ifdef UARTE_ANY_ASYNC const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; if (data->async) { bool disabled = data->async->low_power_mask == 0; int ret; data->async->low_power_mask |= mask; ret = pins_state_change(dev, true); if (ret < 0) { return ret; } if (HW_RX_COUNTING_ENABLED(config) && disabled) { const nrfx_timer_t *timer = &config->timer; nrfx_timer_enable(timer); for (int i = 0; i < data->async->rx_flush_cnt; i++) { nrfx_timer_increment(timer); } } } #endif nrf_uarte_enable(get_uarte_instance(dev)); return 0; } /* At this point we should have irq locked and any previous transfer completed. * Transfer can be started, no need to wait for completion. */ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) { const struct uarte_nrfx_config *config = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); #ifdef CONFIG_PM_DEVICE enum pm_device_state state; (void)pm_device_state_get(dev, &state); if (state != PM_DEVICE_STATE_ACTIVE) { return; } #endif nrf_uarte_tx_buffer_set(uarte, buf, len); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED); if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { (void)uarte_enable(dev, UARTE_LOW_POWER_TX); nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); } nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); } #if defined(UARTE_ANY_ASYNC) || defined(CONFIG_PM_DEVICE) static void uart_disable(const struct device *dev) { #ifdef UARTE_ANY_ASYNC const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; if (data->async && HW_RX_COUNTING_ENABLED(config)) { nrfx_timer_disable(&config->timer); /* Timer/counter value is reset when disabled. */ data->async->rx_total_byte_cnt = 0; data->async->rx_total_user_byte_cnt = 0; } #endif nrf_uarte_disable(get_uarte_instance(dev)); } #endif #ifdef UARTE_ANY_ASYNC static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } static void rx_timeout(struct k_timer *timer); static void tx_timeout(struct k_timer *timer); static int uarte_nrfx_rx_counting_init(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; const struct uarte_nrfx_config *cfg = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); int ret; if (HW_RX_COUNTING_ENABLED(cfg)) { nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG( NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg)); tmr_config.mode = NRF_TIMER_MODE_COUNTER; tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32; ret = nrfx_timer_init(&cfg->timer, &tmr_config, timer_handler); if (ret != NRFX_SUCCESS) { LOG_ERR("Timer already initialized"); return -EINVAL; } else { nrfx_timer_enable(&cfg->timer); nrfx_timer_clear(&cfg->timer); } ret = gppi_channel_alloc(&data->async->rx_cnt.ppi); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel"); nrfx_timer_uninit(&cfg->timer); return -EINVAL; } #if CONFIG_HAS_HW_NRF_PPI ret = nrfx_ppi_channel_assign( data->async->rx_cnt.ppi, nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_RXDRDY), nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT)); if (ret != NRFX_SUCCESS) { return -EIO; } #else nrf_uarte_publish_set(uarte, NRF_UARTE_EVENT_RXDRDY, data->async->rx_cnt.ppi); nrf_timer_subscribe_set(cfg->timer.p_reg, NRF_TIMER_TASK_COUNT, data->async->rx_cnt.ppi); #endif ret = gppi_channel_enable(data->async->rx_cnt.ppi); if (ret != NRFX_SUCCESS) { return -EIO; } } else { nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK); } return 0; } static int uarte_nrfx_init(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); int ret = uarte_nrfx_rx_counting_init(dev); if (ret != 0) { return ret; } data->async->low_power_mask = UARTE_LOW_POWER_TX; nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK | NRF_UARTE_INT_RXSTARTED_MASK | NRF_UARTE_INT_ERROR_MASK | NRF_UARTE_INT_RXTO_MASK); nrf_uarte_enable(uarte); /** * Stop any currently running RX operations. This can occur when a * bootloader sets up the UART hardware and does not clean it up * before jumping to the next application. */ if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) && !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) { /* Busy wait for event to register */ } nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO); } k_timer_init(&data->async->rx_timeout_timer, rx_timeout, NULL); k_timer_user_data_set(&data->async->rx_timeout_timer, data); k_timer_init(&data->async->tx_timeout_timer, tx_timeout, NULL); k_timer_user_data_set(&data->async->tx_timeout_timer, data); return 0; } /* Attempt to start TX (asynchronous transfer). If hardware is not ready, then pending * flag is set. When current poll_out is completed, pending transfer is started. * Function must be called with interrupts locked. */ static void start_tx_locked(const struct device *dev, struct uarte_nrfx_data *data) { if (!is_tx_ready(dev)) { /* Active poll out, postpone until it is completed. */ data->async->pending_tx = true; } else { data->async->pending_tx = false; data->async->tx_amount = -1; tx_start(dev, data->async->xfer_buf, data->async->xfer_len); } } /* Setup cache buffer (used for sending data outside of RAM memory). * During setup data is copied to cache buffer and transfer length is set. * * @return True if cache was set, false if no more data to put in cache. */ static bool setup_tx_cache(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; const struct uarte_nrfx_config *config = dev->config; size_t remaining = data->async->tx_size - data->async->tx_cache_offset; if (!remaining) { return false; } size_t len = MIN(remaining, CONFIG_UART_ASYNC_TX_CACHE_SIZE); data->async->xfer_len = len; data->async->xfer_buf = config->tx_cache; memcpy(config->tx_cache, &data->async->tx_buf[data->async->tx_cache_offset], len); return true; } static bool has_hwfc(const struct device *dev) { #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE struct uarte_nrfx_data *data = dev->data; return data->uart_config.flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS; #else const struct uarte_nrfx_config *config = dev->config; return config->hw_config.hwfc == NRF_UARTE_HWFC_ENABLED; #endif } static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); unsigned int key = irq_lock(); if (data->async->tx_size) { irq_unlock(key); return -EBUSY; } data->async->tx_size = len; data->async->tx_buf = buf; nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); if (nrfx_is_in_ram(buf)) { data->async->xfer_buf = buf; data->async->xfer_len = len; } else { data->async->tx_cache_offset = 0; (void)setup_tx_cache(dev); } start_tx_locked(dev, data); irq_unlock(key); if (has_hwfc(dev) && timeout != SYS_FOREVER_US) { k_timer_start(&data->async->tx_timeout_timer, K_USEC(timeout), K_NO_WAIT); } return 0; } static int uarte_nrfx_tx_abort(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); if (data->async->tx_buf == NULL) { return -EFAULT; } data->async->pending_tx = false; k_timer_stop(&data->async->tx_timeout_timer); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); return 0; } static void user_callback(const struct device *dev, struct uart_event *evt) { struct uarte_nrfx_data *data = dev->data; if (data->async->user_callback) { data->async->user_callback(dev, evt, data->async->user_data); } } static void notify_uart_rx_rdy(const struct device *dev, size_t len) { struct uarte_nrfx_data *data = dev->data; struct uart_event evt = { .type = UART_RX_RDY, .data.rx.buf = data->async->rx_buf, .data.rx.len = len, .data.rx.offset = data->async->rx_offset }; user_callback(dev, &evt); } static void rx_buf_release(const struct device *dev, uint8_t **buf) { if (*buf) { struct uart_event evt = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = *buf, }; user_callback(dev, &evt); *buf = NULL; } } static void notify_rx_disable(const struct device *dev) { struct uart_event evt = { .type = UART_RX_DISABLED, }; user_callback(dev, (struct uart_event *)&evt); } static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { struct uarte_nrfx_data *data = dev->data; const struct uarte_nrfx_config *cfg = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); int ret = 0; if (cfg->disable_rx) { __ASSERT(false, "TX only UARTE instance"); return -ENOTSUP; } /* Signal error if RX is already enabled or if the driver is waiting * for the RXTO event after a call to uart_rx_disable() to discard * data from the UARTE internal RX FIFO. */ if (data->async->rx_enabled || data->async->discard_rx_fifo) { return -EBUSY; } data->async->rx_timeout = timeout; data->async->rx_timeout_slab = timeout / RX_TIMEOUT_DIV; data->async->rx_buf = buf; data->async->rx_buf_len = len; data->async->rx_offset = 0; data->async->rx_next_buf = NULL; data->async->rx_next_buf_len = 0; if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { if (data->async->rx_flush_cnt) { int cpy_len = MIN(len, data->async->rx_flush_cnt); memcpy(buf, data->async->rx_flush_buffer, cpy_len); buf += cpy_len; len -= cpy_len; /* If flush content filled whole new buffer complete the * request and indicate rx being disabled. */ if (!len) { data->async->rx_flush_cnt -= cpy_len; notify_uart_rx_rdy(dev, cpy_len); rx_buf_release(dev, &data->async->rx_buf); notify_rx_disable(dev); return 0; } } } nrf_uarte_rx_buffer_set(uarte, buf, len); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); data->async->rx_enabled = true; if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { unsigned int key = irq_lock(); ret = uarte_enable(dev, UARTE_LOW_POWER_RX); irq_unlock(key); } nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); return 0; } static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uarte_nrfx_data *data = dev->data; int err; NRF_UARTE_Type *uarte = get_uarte_instance(dev); unsigned int key = irq_lock(); if (data->async->rx_buf == NULL) { err = -EACCES; } else if (data->async->rx_next_buf == NULL) { data->async->rx_next_buf = buf; data->async->rx_next_buf_len = len; nrf_uarte_rx_buffer_set(uarte, buf, len); nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); err = 0; } else { err = -EBUSY; } irq_unlock(key); return err; } static int uarte_nrfx_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct uarte_nrfx_data *data = dev->data; if (!data->async) { return -ENOTSUP; } data->async->user_callback = callback; data->async->user_data = user_data; return 0; } static int uarte_nrfx_rx_disable(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); if (data->async->rx_buf == NULL) { return -EFAULT; } if (data->async->rx_next_buf != NULL) { nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); } k_timer_stop(&data->async->rx_timeout_timer); data->async->rx_enabled = false; data->async->discard_rx_fifo = true; nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); return 0; } static void tx_timeout(struct k_timer *timer) { struct uarte_nrfx_data *data = k_timer_user_data_get(timer); (void) uarte_nrfx_tx_abort(data->dev); } /** * Whole timeout is divided by RX_TIMEOUT_DIV into smaller units, rx_timeout * is executed periodically every rx_timeout_slab us. If between executions * data was received, then we start counting down time from start, if not, then * we subtract rx_timeout_slab from rx_timeout_left. * If rx_timeout_left is less than rx_timeout_slab it means that receiving has * timed out and we should tell user about that. */ static void rx_timeout(struct k_timer *timer) { struct uarte_nrfx_data *data = k_timer_user_data_get(timer); const struct device *dev = data->dev; const struct uarte_nrfx_config *cfg = dev->config; uint32_t read; if (data->async->is_in_irq) { return; } /* Disable ENDRX ISR, in case ENDRX event is generated, it will be * handled after rx_timeout routine is complete. */ nrf_uarte_int_disable(get_uarte_instance(dev), NRF_UARTE_INT_ENDRX_MASK); if (HW_RX_COUNTING_ENABLED(cfg)) { read = nrfx_timer_capture(&cfg->timer, 0); } else { read = data->async->rx_cnt.cnt; } /* Check if data was received since last function call */ if (read != data->async->rx_total_byte_cnt) { data->async->rx_total_byte_cnt = read; data->async->rx_timeout_left = data->async->rx_timeout; } /* Check if there is data that was not sent to user yet * Note though that 'len' is a count of data bytes received, but not * necessarily the amount available in the current buffer */ int32_t len = data->async->rx_total_byte_cnt - data->async->rx_total_user_byte_cnt; if (!HW_RX_COUNTING_ENABLED(cfg) && (len < 0)) { /* Prevent too low value of rx_cnt.cnt which may occur due to * latencies in handling of the RXRDY interrupt. * At this point, the number of received bytes is at least * equal to what was reported to the user. */ data->async->rx_cnt.cnt = data->async->rx_total_user_byte_cnt; len = 0; } /* Check for current buffer being full. * if the UART receives characters before the ENDRX is handled * and the 'next' buffer is set up, then the SHORT between ENDRX and * STARTRX will mean that data will be going into to the 'next' buffer * until the ENDRX event gets a chance to be handled. */ bool clipped = false; if (len + data->async->rx_offset > data->async->rx_buf_len) { len = data->async->rx_buf_len - data->async->rx_offset; clipped = true; } if (len > 0) { if (clipped || (data->async->rx_timeout_left < data->async->rx_timeout_slab)) { /* rx_timeout us elapsed since last receiving */ if (data->async->rx_buf != NULL) { notify_uart_rx_rdy(dev, len); data->async->rx_offset += len; data->async->rx_total_user_byte_cnt += len; } } else { data->async->rx_timeout_left -= data->async->rx_timeout_slab; } /* If there's nothing left to report until the buffers are * switched then the timer can be stopped */ if (clipped) { k_timer_stop(&data->async->rx_timeout_timer); } } nrf_uarte_int_enable(get_uarte_instance(dev), NRF_UARTE_INT_ENDRX_MASK); } #define UARTE_ERROR_FROM_MASK(mask) \ ((mask) & NRF_UARTE_ERROR_OVERRUN_MASK ? UART_ERROR_OVERRUN \ : (mask) & NRF_UARTE_ERROR_PARITY_MASK ? UART_ERROR_PARITY \ : (mask) & NRF_UARTE_ERROR_FRAMING_MASK ? UART_ERROR_FRAMING \ : (mask) & NRF_UARTE_ERROR_BREAK_MASK ? UART_BREAK \ : 0) static void error_isr(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); uint32_t err = nrf_uarte_errorsrc_get_and_clear(uarte); struct uart_event evt = { .type = UART_RX_STOPPED, .data.rx_stop.reason = UARTE_ERROR_FROM_MASK(err), }; user_callback(dev, &evt); (void) uarte_nrfx_rx_disable(dev); } static void rxstarted_isr(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; struct uart_event evt = { .type = UART_RX_BUF_REQUEST, }; user_callback(dev, &evt); if (data->async->rx_timeout != SYS_FOREVER_US) { data->async->rx_timeout_left = data->async->rx_timeout; k_timer_start(&data->async->rx_timeout_timer, K_USEC(data->async->rx_timeout_slab), K_USEC(data->async->rx_timeout_slab)); } } static void endrx_isr(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); data->async->is_in_irq = true; /* ensure rx timer is stopped - it will be restarted in RXSTARTED * handler if needed */ k_timer_stop(&data->async->rx_timeout_timer); /* this is the amount that the EasyDMA controller has copied into the * buffer */ const int rx_amount = nrf_uarte_rx_amount_get(uarte) + data->async->rx_flush_cnt; data->async->rx_flush_cnt = 0; /* The 'rx_offset' can be bigger than 'rx_amount', so it the length * of data we report back the user may need to be clipped. * This can happen because the 'rx_offset' count derives from RXRDY * events, which can occur already for the next buffer before we are * here to handle this buffer. (The next buffer is now already active * because of the ENDRX_STARTRX shortcut) */ int rx_len = rx_amount - data->async->rx_offset; if (rx_len < 0) { rx_len = 0; } data->async->rx_total_user_byte_cnt += rx_len; /* Only send the RX_RDY event if there is something to send */ if (rx_len > 0) { notify_uart_rx_rdy(dev, rx_len); } if (!data->async->rx_enabled) { data->async->is_in_irq = false; return; } rx_buf_release(dev, &data->async->rx_buf); /* If there is a next buffer, then STARTRX will have already been * invoked by the short (the next buffer will be filling up already) * and here we just do the swap of which buffer the driver is following, * the next rx_timeout() will update the rx_offset. */ unsigned int key = irq_lock(); if (data->async->rx_next_buf) { data->async->rx_buf = data->async->rx_next_buf; data->async->rx_buf_len = data->async->rx_next_buf_len; data->async->rx_next_buf = NULL; data->async->rx_next_buf_len = 0; data->async->rx_offset = 0; /* Check is based on assumption that ISR handler handles * ENDRX before RXSTARTED so if short was set on time, RXSTARTED * event will be set. */ if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); } /* Remove the short until the subsequent next buffer is setup */ nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); } else { nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); } irq_unlock(key); data->async->is_in_irq = false; } /* Function for flushing internal RX fifo. Function can be called in case * flushed data is discarded or when data is valid and needs to be retrieved. * * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value * remains. In certain cases it makes it impossible to distinguish between * case when fifo was empty and not. Function is trying to minimize chances of * error with following measures: * - RXAMOUNT is read before flushing and compared against value after flushing * if they differ it indicates that data was flushed * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if * it is still dirty. If not then it indicates that data was flushed * * In other cases function indicates that fifo was empty. It means that if * number of bytes in the fifo equal last rx transfer length and data is equal * to dirty marker it will be discarded. * * @param dev Device. * @param buf Buffer for flushed data, null indicates that flushed data can be * dropped. * @param len Buffer size, not used if @p buf is null. * * @return number of bytes flushed from the fifo. */ static uint8_t rx_flush(const struct device *dev, uint8_t *buf, uint32_t len) { /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/ static const uint8_t dirty; NRF_UARTE_Type *uarte = get_uarte_instance(dev); uint32_t prev_rx_amount = nrf_uarte_rx_amount_get(uarte); uint8_t tmp_buf[UARTE_HW_RX_FIFO_SIZE]; uint8_t *flush_buf = buf ? buf : tmp_buf; size_t flush_len = buf ? len : sizeof(tmp_buf); if (buf) { memset(buf, dirty, len); flush_buf = buf; flush_len = len; } else { flush_buf = tmp_buf; flush_len = sizeof(tmp_buf); } nrf_uarte_rx_buffer_set(uarte, flush_buf, flush_len); /* Final part of handling RXTO event is in ENDRX interrupt * handler. ENDRX is generated as a result of FLUSHRX task. */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_FLUSHRX); while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { /* empty */ } nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); if (!buf) { return nrf_uarte_rx_amount_get(uarte); } uint32_t rx_amount = nrf_uarte_rx_amount_get(uarte); if (rx_amount != prev_rx_amount) { return rx_amount; } for (int i = 0; i < flush_len; i++) { if (buf[i] != dirty) { return rx_amount; } } return 0; } static void async_uart_release(const struct device *dev, uint32_t dir_mask) { struct uarte_nrfx_data *data = dev->data; unsigned int key = irq_lock(); data->async->low_power_mask &= ~dir_mask; if (!data->async->low_power_mask) { if (dir_mask == UARTE_LOW_POWER_RX) { data->async->rx_flush_cnt = rx_flush(dev, data->async->rx_flush_buffer, sizeof(data->async->rx_flush_buffer)); } uart_disable(dev); int err = pins_state_change(dev, false); (void)err; __ASSERT_NO_MSG(err == 0); } irq_unlock(key); } /* This handler is called when the receiver is stopped. If rx was aborted * data from fifo is flushed. */ static void rxto_isr(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; rx_buf_release(dev, &data->async->rx_buf); rx_buf_release(dev, &data->async->rx_next_buf); /* This point can be reached in two cases: * 1. RX is disabled because all provided RX buffers have been filled. * 2. RX was explicitly disabled by a call to uart_rx_disable(). * In both cases, the rx_enabled flag is cleared, so that RX can be * enabled again. * In the second case, additionally, data from the UARTE internal RX * FIFO need to be discarded. */ data->async->rx_enabled = false; if (data->async->discard_rx_fifo) { data->async->discard_rx_fifo = false; (void)rx_flush(dev, NULL, 0); } if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { async_uart_release(dev, UARTE_LOW_POWER_RX); } notify_rx_disable(dev); } static void txstopped_isr(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); unsigned int key; if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); async_uart_release(dev, UARTE_LOW_POWER_TX); if (!data->async->tx_size) { return; } } if (!data->async->tx_buf) { return; } key = irq_lock(); size_t amount = (data->async->tx_amount >= 0) ? data->async->tx_amount : nrf_uarte_tx_amount_get(uarte); irq_unlock(key); /* If there is a pending tx request, it means that uart_tx() * was called when there was ongoing uart_poll_out. Handling * TXSTOPPED interrupt means that uart_poll_out has completed. */ if (data->async->pending_tx) { key = irq_lock(); start_tx_locked(dev, data); irq_unlock(key); return; } /* Cache buffer is used because tx_buf wasn't in RAM. */ if (data->async->tx_buf != data->async->xfer_buf) { /* In that case setup next chunk. If that was the last chunk * fall back to reporting TX_DONE. */ if (amount == data->async->xfer_len) { data->async->tx_cache_offset += amount; if (setup_tx_cache(dev)) { key = irq_lock(); start_tx_locked(dev, data); irq_unlock(key); return; } /* Amount is already included in tx_cache_offset. */ amount = data->async->tx_cache_offset; } else { /* TX was aborted, include tx_cache_offset in amount. */ amount += data->async->tx_cache_offset; } } k_timer_stop(&data->async->tx_timeout_timer); struct uart_event evt = { .data.tx.buf = data->async->tx_buf, .data.tx.len = amount, }; if (amount == data->async->tx_size) { evt.type = UART_TX_DONE; } else { evt.type = UART_TX_ABORTED; } nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); data->async->tx_buf = NULL; data->async->tx_size = 0; user_callback(dev, &evt); } static void uarte_nrfx_isr_async(const void *arg) { const struct device *dev = arg; NRF_UARTE_Type *uarte = get_uarte_instance(dev); const struct uarte_nrfx_config *config = dev->config; if (!HW_RX_COUNTING_ENABLED(config) && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) { struct uarte_nrfx_data *data = dev->data; nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); data->async->rx_cnt.cnt++; return; } if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR); error_isr(dev); } if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX) && nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDRX_MASK)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); endrx_isr(dev); } /* RXSTARTED must be handled after ENDRX because it starts the RX timeout * and if order is swapped then ENDRX will stop this timeout. * Skip if ENDRX is set when RXSTARTED is set. It means that * ENDRX occurred after check for ENDRX in isr which may happen when * UARTE interrupt got preempted. Events are not cleared * and isr will be called again. ENDRX will be handled first. */ if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED) && !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); rxstarted_isr(dev); } /* RXTO must be handled after ENDRX which should notify the buffer. * Skip if ENDRX is set when RXTO is set. It means that * ENDRX occurred after check for ENDRX in isr which may happen when * UARTE interrupt got preempted. Events are not cleared * and isr will be called again. ENDRX will be handled first. */ if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) && !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO); rxto_isr(dev); } if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX) && nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK)) { endtx_isr(dev); } if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) && nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_TXSTOPPED_MASK)) { txstopped_isr(dev); } } #endif /* UARTE_ANY_ASYNC */ /** * @brief Poll the device for input. * * @param dev UARTE device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer is empty. */ static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c) { const struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); #ifdef UARTE_ANY_ASYNC if (data->async) { return -ENOTSUP; } #endif if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { return -1; } *c = *data->rx_data; /* clear the interrupt */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); return 0; } /** * @brief Output a character in polled mode. * * @param dev UARTE device struct * @param c Character to send */ static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c) { struct uarte_nrfx_data *data = dev->data; bool isr_mode = k_is_in_isr() || k_is_pre_kernel(); unsigned int key; if (isr_mode) { while (1) { key = irq_lock(); if (is_tx_ready(dev)) { #if UARTE_ANY_ASYNC if (data->async && data->async->tx_size && data->async->tx_amount < 0) { data->async->tx_amount = nrf_uarte_tx_amount_get( get_uarte_instance(dev)); } #endif break; } irq_unlock(key); Z_SPIN_DELAY(3); } } else { key = wait_tx_ready(dev); } *data->char_out = c; tx_start(dev, data->char_out, 1); irq_unlock(key); } #ifdef UARTE_INTERRUPT_DRIVEN /** Interrupt driven FIFO fill function */ static int uarte_nrfx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { struct uarte_nrfx_data *data = dev->data; len = MIN(len, data->int_driven->tx_buff_size); if (!atomic_cas(&data->int_driven->fifo_fill_lock, 0, 1)) { return 0; } /* Copy data to RAM buffer for EasyDMA transfer */ memcpy(data->int_driven->tx_buffer, tx_data, len); unsigned int key = irq_lock(); if (!is_tx_ready(dev)) { data->int_driven->fifo_fill_lock = 0; len = 0; } else { tx_start(dev, data->int_driven->tx_buffer, len); } irq_unlock(key); return len; } /** Interrupt driven FIFO read function */ static int uarte_nrfx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { int num_rx = 0; NRF_UARTE_Type *uarte = get_uarte_instance(dev); const struct uarte_nrfx_data *data = dev->data; if (size > 0 && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { /* Clear the interrupt */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); /* Receive a character */ rx_data[num_rx++] = *data->rx_data; nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); } return num_rx; } /** Interrupt driven transfer enabling function */ static void uarte_nrfx_irq_tx_enable(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); struct uarte_nrfx_data *data = dev->data; unsigned int key = irq_lock(); data->int_driven->disable_tx_irq = false; nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); irq_unlock(key); } /** Interrupt driven transfer disabling function */ static void uarte_nrfx_irq_tx_disable(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; /* TX IRQ will be disabled after current transmission is finished */ data->int_driven->disable_tx_irq = true; } /** Interrupt driven transfer ready function */ static int uarte_nrfx_irq_tx_ready_complete(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); struct uarte_nrfx_data *data = dev->data; /* ENDTX flag is always on so that ISR is called when we enable TX IRQ. * Because of that we have to explicitly check if ENDTX interrupt is * enabled, otherwise this function would always return true no matter * what would be the source of interrupt. */ bool ready = !data->int_driven->disable_tx_irq && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) && nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); if (ready) { data->int_driven->fifo_fill_lock = 0; } return ready; } static int uarte_nrfx_irq_rx_ready(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); return nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX); } /** Interrupt driven receiver enabling function */ static void uarte_nrfx_irq_rx_enable(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK); } /** Interrupt driven receiver disabling function */ static void uarte_nrfx_irq_rx_disable(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK); } /** Interrupt driven error enabling function */ static void uarte_nrfx_irq_err_enable(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ERROR_MASK); } /** Interrupt driven error disabling function */ static void uarte_nrfx_irq_err_disable(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ERROR_MASK); } /** Interrupt driven pending status function */ static int uarte_nrfx_irq_is_pending(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); return ((nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_TXSTOPPED_MASK) && uarte_nrfx_irq_tx_ready_complete(dev)) || (nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDRX_MASK) && uarte_nrfx_irq_rx_ready(dev))); } /** Interrupt driven interrupt update function */ static int uarte_nrfx_irq_update(const struct device *dev) { return 1; } /** Set the callback function */ static void uarte_nrfx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uarte_nrfx_data *data = dev->data; data->int_driven->cb = cb; data->int_driven->cb_data = cb_data; } #endif /* UARTE_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_nrfx_uarte_driver_api = { .poll_in = uarte_nrfx_poll_in, .poll_out = uarte_nrfx_poll_out, .err_check = uarte_nrfx_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uarte_nrfx_configure, .config_get = uarte_nrfx_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef UARTE_ANY_ASYNC .callback_set = uarte_nrfx_callback_set, .tx = uarte_nrfx_tx, .tx_abort = uarte_nrfx_tx_abort, .rx_enable = uarte_nrfx_rx_enable, .rx_buf_rsp = uarte_nrfx_rx_buf_rsp, .rx_disable = uarte_nrfx_rx_disable, #endif /* UARTE_ANY_ASYNC */ #ifdef UARTE_INTERRUPT_DRIVEN .fifo_fill = uarte_nrfx_fifo_fill, .fifo_read = uarte_nrfx_fifo_read, .irq_tx_enable = uarte_nrfx_irq_tx_enable, .irq_tx_disable = uarte_nrfx_irq_tx_disable, .irq_tx_ready = uarte_nrfx_irq_tx_ready_complete, .irq_rx_enable = uarte_nrfx_irq_rx_enable, .irq_rx_disable = uarte_nrfx_irq_rx_disable, .irq_tx_complete = uarte_nrfx_irq_tx_ready_complete, .irq_rx_ready = uarte_nrfx_irq_rx_ready, .irq_err_enable = uarte_nrfx_irq_err_enable, .irq_err_disable = uarte_nrfx_irq_err_disable, .irq_is_pending = uarte_nrfx_irq_is_pending, .irq_update = uarte_nrfx_irq_update, .irq_callback_set = uarte_nrfx_irq_callback_set, #endif /* UARTE_INTERRUPT_DRIVEN */ }; static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte, struct uarte_nrfx_data *data) { nrfx_err_t ret; ret = gppi_channel_alloc(&data->ppi_ch_endtx); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel"); return -EIO; } nrfx_gppi_channel_endpoints_setup(data->ppi_ch_endtx, nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_ENDTX), nrf_uarte_task_address_get(uarte, NRF_UARTE_TASK_STOPTX)); nrfx_gppi_channels_enable(BIT(data->ppi_ch_endtx)); return 0; } static int uarte_instance_init(const struct device *dev, uint8_t interrupts_active) { int err; NRF_UARTE_Type *uarte = get_uarte_instance(dev); struct uarte_nrfx_data *data = dev->data; const struct uarte_nrfx_config *cfg = dev->config; nrf_uarte_disable(uarte); data->dev = dev; #ifdef CONFIG_ARCH_POSIX /* For simulation the DT provided peripheral address needs to be corrected */ ((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)cfg->uarte_regs; #endif err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE err = uarte_nrfx_configure(dev, &data->uart_config); if (err) { return err; } #else nrf_uarte_baudrate_set(uarte, cfg->baudrate); nrf_uarte_configure(uarte, &cfg->hw_config); #endif if (IS_ENABLED(UARTE_ENHANCED_POLL_OUT) && cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) { err = endtx_stoptx_ppi_init(uarte, data); if (err < 0) { return err; } } #ifdef UARTE_ANY_ASYNC if (data->async) { err = uarte_nrfx_init(dev); if (err < 0) { return err; } } else #endif { /* Enable receiver and transmitter */ nrf_uarte_enable(uarte); if (!cfg->disable_rx) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_rx_buffer_set(uarte, data->rx_data, 1); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); } } if (!(cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX)) { nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK); } if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); } /* Set TXSTOPPED event by requesting fake (zero-length) transfer. * Pointer to RAM variable (data->tx_buffer) is set because otherwise * such operation may result in HardFault or RAM corruption. */ nrf_uarte_tx_buffer_set(uarte, data->char_out, 0); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); /* switch off transmitter to save an energy */ nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); return 0; } #ifdef CONFIG_PM_DEVICE /** @brief Pend until TX is stopped. * * There are 2 configurations that must be handled: * - ENDTX->TXSTOPPED PPI enabled - just pend until TXSTOPPED event is set * - disable ENDTX interrupt and manually trigger STOPTX, pend for TXSTOPPED */ static void wait_for_tx_stopped(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; bool ppi_endtx = config->flags & UARTE_CFG_FLAG_PPI_ENDTX; NRF_UARTE_Type *uarte = get_uarte_instance(dev); bool res; if (!ppi_endtx) { /* We assume here that it can be called from any context, * including the one that uarte interrupt will not preempt. * Disable endtx interrupt to ensure that it will not be triggered * (if in lower priority context) and stop TX if necessary. */ nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDTX_MASK); NRFX_WAIT_FOR(is_tx_ready(dev), 1000, 1, res); if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); } } NRFX_WAIT_FOR(nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED), 1000, 1, res); if (!ppi_endtx) { nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK); } } static int uarte_nrfx_pm_action(const struct device *dev, enum pm_device_action action) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); #if defined(UARTE_ANY_ASYNC) || defined(UARTE_INTERRUPT_DRIVEN) struct uarte_nrfx_data *data = dev->data; #endif const struct uarte_nrfx_config *cfg = dev->config; int ret; #ifdef UARTE_ANY_ASYNC /* If low power mode for asynchronous mode is used then there is nothing to do here. * In low power mode UARTE is turned off whenever there is no activity. */ if (data->async && (cfg->flags & UARTE_CFG_FLAG_LOW_POWER)) { return 0; } #endif switch (action) { case PM_DEVICE_ACTION_RESUME: ret = pins_state_change(dev, true); if (ret < 0) { return ret; } nrf_uarte_enable(uarte); #ifdef UARTE_ANY_ASYNC if (data->async) { if (HW_RX_COUNTING_ENABLED(cfg)) { nrfx_timer_enable(&cfg->timer); } return 0; } #endif if (!cfg->disable_rx) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); #ifdef UARTE_INTERRUPT_DRIVEN if (data->int_driven && data->int_driven->rx_irq_enabled) { nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK); } #endif } break; case PM_DEVICE_ACTION_SUSPEND: /* Disabling UART requires stopping RX, but stop RX event is * only sent after each RX if async UART API is used. */ #ifdef UARTE_ANY_ASYNC if (data->async) { /* Entering inactive state requires device to be no * active asynchronous calls. */ __ASSERT_NO_MSG(!data->async->rx_enabled); __ASSERT_NO_MSG(!data->async->tx_size); } #endif if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { #ifdef UARTE_INTERRUPT_DRIVEN if (data->int_driven) { data->int_driven->rx_irq_enabled = nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDRX_MASK); if (data->int_driven->rx_irq_enabled) { nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK); } } #endif nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) && !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) { /* Busy wait for event to register */ Z_SPIN_DELAY(2); } nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); } wait_for_tx_stopped(dev); uart_disable(dev); ret = pins_state_change(dev, false); if (ret < 0) { return ret; } break; default: return -ENOTSUP; } return 0; } #endif /* CONFIG_PM_DEVICE */ #define UARTE(idx) DT_NODELABEL(uart##idx) #define UARTE_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(UARTE(idx), prop) #define UARTE_PROP(idx, prop) DT_PROP(UARTE(idx), prop) #define UARTE_IRQ_CONFIGURE(idx, isr_handler) \ do { \ IRQ_CONNECT(DT_IRQN(UARTE(idx)), DT_IRQ(UARTE(idx), priority), \ isr_handler, DEVICE_DT_GET(UARTE(idx)), 0); \ irq_enable(DT_IRQN(UARTE(idx))); \ } while (false) /* Low power mode is used when disable_rx is not defined or in async mode if * kconfig option is enabled. */ #define USE_LOW_POWER(idx) \ ((!UARTE_PROP(idx, disable_rx) && \ COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \ (!IS_ENABLED(CONFIG_UART_##idx##_NRF_ASYNC_LOW_POWER)), \ (1))) ? 0 : UARTE_CFG_FLAG_LOW_POWER) #define UARTE_DISABLE_RX_INIT(node_id) \ .disable_rx = DT_PROP(node_id, disable_rx) #define UARTE_GET_FREQ(idx) DT_PROP(DT_CLOCKS_CTLR(UARTE(idx)), clock_frequency) #define UARTE_GET_BAUDRATE_DIV(idx) \ COND_CODE_1(DT_CLOCKS_HAS_IDX(UARTE(idx), 0), \ ((UARTE_GET_FREQ(idx) / NRF_UARTE_BASE_FREQUENCY_16MHZ)), (1)) /* When calculating baudrate we need to take into account that some instances * must have baudrate adjusted to the ratio between UARTE clocking frequency and 16 MHz. */ #define UARTE_GET_BAUDRATE(idx) \ (NRF_BAUDRATE(UARTE_PROP(idx, current_speed)) / UARTE_GET_BAUDRATE_DIV(idx)) /* Macro for setting nRF specific configuration structures. */ #define UARTE_NRF_CONFIG(idx) { \ .hwfc = (UARTE_PROP(idx, hw_flow_control) == \ UART_CFG_FLOW_CTRL_RTS_CTS) ? \ NRF_UARTE_HWFC_ENABLED : NRF_UARTE_HWFC_DISABLED, \ .parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) ? \ NRF_UARTE_PARITY_INCLUDED : NRF_UARTE_PARITY_EXCLUDED, \ IF_ENABLED(UARTE_HAS_STOP_CONFIG, (.stop = NRF_UARTE_STOP_ONE,))\ IF_ENABLED(UARTE_ODD_PARITY_ALLOWED, \ (.paritytype = NRF_UARTE_PARITYTYPE_EVEN,)) \ } /* Macro for setting zephyr specific configuration structures. */ #define UARTE_CONFIG(idx) { \ .baudrate = UARTE_PROP(idx, current_speed), \ .data_bits = UART_CFG_DATA_BITS_8, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .parity = IS_ENABLED(CONFIG_UART_##idx##_NRF_PARITY_BIT) \ ? UART_CFG_PARITY_EVEN \ : UART_CFG_PARITY_NONE, \ .flow_ctrl = UARTE_PROP(idx, hw_flow_control) \ ? UART_CFG_FLOW_CTRL_RTS_CTS \ : UART_CFG_FLOW_CTRL_NONE, \ } #define UART_NRF_UARTE_DEVICE(idx) \ NRF_DT_CHECK_NODE_HAS_PINCTRL_SLEEP(UARTE(idx)); \ UARTE_INT_DRIVEN(idx); \ PINCTRL_DT_DEFINE(UARTE(idx)); \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, ( \ static uint8_t \ uarte##idx##_tx_cache[CONFIG_UART_ASYNC_TX_CACHE_SIZE] \ UARTE_MEMORY_SECTION(idx); \ struct uarte_async_cb uarte##idx##_async;)) \ static uint8_t uarte##idx##_char_out UARTE_MEMORY_SECTION(idx); \ static uint8_t uarte##idx##_rx_data UARTE_MEMORY_SECTION(idx); \ static struct uarte_nrfx_data uarte_##idx##_data = { \ .char_out = &uarte##idx##_char_out, \ .rx_data = &uarte##idx##_rx_data, \ IF_ENABLED(CONFIG_UART_USE_RUNTIME_CONFIGURE, \ (.uart_config = UARTE_CONFIG(idx),)) \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ (.async = &uarte##idx##_async,)) \ IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \ (.int_driven = &uarte##idx##_int_driven,)) \ }; \ COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, (), \ (BUILD_ASSERT(NRF_BAUDRATE(UARTE_PROP(idx, current_speed)) > 0,\ "Unsupported baudrate");)) \ static const struct uarte_nrfx_config uarte_##idx##z_config = { \ COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, (), \ (.baudrate = UARTE_GET_BAUDRATE(idx), \ .hw_config = UARTE_NRF_CONFIG(idx),)) \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)), \ .uarte_regs = _CONCAT(NRF_UARTE, idx), \ .flags = \ (IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT) ? \ UARTE_CFG_FLAG_GPIO_MGMT : 0) | \ (IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT) ? \ UARTE_CFG_FLAG_PPI_ENDTX : 0) | \ (IS_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC) ? \ UARTE_CFG_FLAG_HW_BYTE_COUNTING : 0) | \ USE_LOW_POWER(idx), \ UARTE_DISABLE_RX_INIT(UARTE(idx)), \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ (.tx_cache = uarte##idx##_tx_cache,)) \ IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \ (.timer = NRFX_TIMER_INSTANCE( \ CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \ IF_ENABLED(DT_CLOCKS_HAS_IDX(UARTE(idx), 0), \ (.clock_freq = DT_PROP(DT_CLOCKS_CTLR(UARTE(idx)), \ clock_frequency),)) \ }; \ static int uarte_##idx##_init(const struct device *dev) \ { \ COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \ (UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_async);), \ (UARTE_IRQ_CONFIGURE(idx, uarte_nrfx_isr_int);)) \ return uarte_instance_init( \ dev, \ IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN)); \ } \ \ PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action); \ \ DEVICE_DT_DEFINE(UARTE(idx), \ uarte_##idx##_init, \ PM_DEVICE_DT_GET(UARTE(idx)), \ &uarte_##idx##_data, \ &uarte_##idx##z_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_nrfx_uarte_driver_api) #define UARTE_INT_DRIVEN(idx) \ IF_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN, \ (static uint8_t uarte##idx##_tx_buffer \ [MIN(CONFIG_UART_##idx##_NRF_TX_BUFFER_SIZE, \ BIT_MASK(UARTE##idx##_EASYDMA_MAXCNT_SIZE))] \ UARTE_MEMORY_SECTION(idx); \ static struct uarte_nrfx_int_driven \ uarte##idx##_int_driven = { \ .tx_buffer = uarte##idx##_tx_buffer, \ .tx_buff_size = sizeof(uarte##idx##_tx_buffer),\ };)) #define UARTE_MEMORY_SECTION(idx) \ COND_CODE_1(UARTE_HAS_PROP(idx, memory_regions), \ (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ DT_PHANDLE(UARTE(idx), memory_regions)))))), \ ()) #define COND_UART_NRF_UARTE_DEVICE(unused, prefix, i, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i, (UART_NRF_UARTE_DEVICE(prefix##i);)) UARTE_FOR_EACH_INSTANCE(COND_UART_NRF_UARTE_DEVICE, (), ()) ```
/content/code_sandbox/drivers/serial/uart_nrfx_uarte.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
17,938
```unknown config UART_ITE_IT8XXX2 bool "ITE IT8XXX2 UART driver" default y select UART_NS16550_ITE_HIGH_SPEED_BAUDRATE depends on DT_HAS_ITE_IT8XXX2_UART_ENABLED help IT8XXX2 uses shared ns16550.c driver which does not provide a power management callback, so create driver to handle IT8XXX2 specific UART features. In addition to use pm_action_cb, we also need to make some setting at uart_it8xxx2_init. config UART_ITE_IT8XXX2_INIT_PRIORITY int "ITE IT8XXX2 UART wrapper init priority" default 51 depends on UART_ITE_IT8XXX2 help Initialization priority for the UART wrapper driver on ITE IT8XXX2, must be set to a lower priority than the matching ns16550 device (CONFIG_SERIAL_INIT_PRIORITY). ```
/content/code_sandbox/drivers/serial/Kconfig.it8xxx2
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
196
```unknown # # NUVOTON UART configuration # # Author: Saravanan Sekar <saravanan@linumiz.com> config UART_NUMICRO bool "NUVOTON MCU serial driver" default y depends on DT_HAS_NUVOTON_NUMICRO_UART_ENABLED select SERIAL_HAS_DRIVER select HAS_NUMICRO_UART help This option enables the UART driver for Nuvoton Numicro family of processors. Say y to use serial port on Nuvoton MCU. ```
/content/code_sandbox/drivers/serial/Kconfig.numicro
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
109
```unknown # Virtual UART HOSTLINK driver option config UART_HOSTLINK bool "ARC HOSTLINK UART driver" default y depends on ARC depends on DT_HAS_SNPS_HOSTLINK_UART_ENABLED select SERIAL_HAS_DRIVER help This option enables access to HOSTLINK channel as UART device. ```
/content/code_sandbox/drivers/serial/Kconfig.hostlink
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
63
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SERIAL_UART_PL011_RASPBERRYPI_PICO_H_ #define ZEPHYR_DRIVERS_SERIAL_UART_PL011_RASPBERRYPI_PICO_H_ #define RASPBERRYPI_PICO_UART_CLOCK_CTLR_SUBSYS_CELL clk_id #define RASPBERRYPI_PICO_UART_DEFINE(n) \ static inline int pwr_on_raspberrypi_pico_uart_##n(void) \ { \ return 0; \ } \ static inline int clk_enable_raspberrypi_pico_uart_##n(const struct device *dev, \ uint32_t clk) \ { \ return 0; \ } #endif /* ZEPHYR_DRIVERS_SERIAL_UART_PL011_RASPBERRYPI_PICO_H_ */ ```
/content/code_sandbox/drivers/serial/uart_pl011_raspberrypi_pico.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
181
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/uart.h> #include <zephyr/irq.h> /* Register offsets within the UART device register space. */ #define UART_INTR_STATE_REG_OFFSET 0x0 #define UART_INTR_ENABLE_REG_OFFSET 0x4 #define UART_CTRL_REG_OFFSET 0x10 #define UART_STATUS_REG_OFFSET 0x14 #define UART_RDATA_REG_OFFSET 0x18 #define UART_WDATA_REG_OFFSET 0x1c #define UART_FIFO_CTRL_REG_OFFSET 0x20 #define UART_OVRD_REG_OFFSET 0x28 #define UART_TIMEOUT_CTRL_REG_OFFSET 0x30 /* Control register bits. */ #define UART_CTRL_TX_BIT BIT(0) #define UART_CTRL_RX_BIT BIT(1) #define UART_CTRL_NCO_OFFSET 16 /* FIFO control register bits. */ #define UART_FIFO_CTRL_RXRST_BIT BIT(0) #define UART_FIFO_CTRL_TXRST_BIT BIT(1) /* Status register bits. */ #define UART_STATUS_TXFULL_BIT BIT(0) #define UART_STATUS_RXEMPTY_BIT BIT(5) #define DT_DRV_COMPAT lowrisc_opentitan_uart struct uart_opentitan_config { mem_addr_t base; uint32_t nco_reg; }; static int uart_opentitan_init(const struct device *dev) { const struct uart_opentitan_config *cfg = dev->config; /* Reset settings. */ sys_write32(0u, cfg->base + UART_CTRL_REG_OFFSET); /* Clear FIFOs. */ sys_write32(UART_FIFO_CTRL_RXRST_BIT | UART_FIFO_CTRL_TXRST_BIT, cfg->base + UART_FIFO_CTRL_REG_OFFSET); /* Clear other states. */ sys_write32(0u, cfg->base + UART_OVRD_REG_OFFSET); sys_write32(0u, cfg->base + UART_TIMEOUT_CTRL_REG_OFFSET); /* Disable interrupts. */ sys_write32(0u, cfg->base + UART_INTR_ENABLE_REG_OFFSET); /* Clear interrupts. */ sys_write32(0xffffffffu, cfg->base + UART_INTR_STATE_REG_OFFSET); /* Set baud and enable TX and RX. */ sys_write32(UART_CTRL_TX_BIT | UART_CTRL_RX_BIT | (cfg->nco_reg << UART_CTRL_NCO_OFFSET), cfg->base + UART_CTRL_REG_OFFSET); return 0; } static int uart_opentitan_poll_in(const struct device *dev, unsigned char *c) { const struct uart_opentitan_config *cfg = dev->config; if (sys_read32(cfg->base + UART_STATUS_REG_OFFSET) & UART_STATUS_RXEMPTY_BIT) { /* Empty RX FIFO */ return -1; } *c = sys_read32(cfg->base + UART_RDATA_REG_OFFSET); return 0; } static void uart_opentitan_poll_out(const struct device *dev, unsigned char c) { const struct uart_opentitan_config *cfg = dev->config; /* Wait for space in the TX FIFO */ while (sys_read32(cfg->base + UART_STATUS_REG_OFFSET) & UART_STATUS_TXFULL_BIT) { ; } sys_write32(c, cfg->base + UART_WDATA_REG_OFFSET); } static const struct uart_driver_api uart_opentitan_driver_api = { .poll_in = uart_opentitan_poll_in, .poll_out = uart_opentitan_poll_out, }; /* The baud rate is set by writing to the CTRL.NCO register, which is * calculated based on baud ticks per system clock tick multiplied by a * predefined scaler value. */ #define NCO_REG(baud, clk) (BIT64(20) * (baud) / (clk)) #define UART_OPENTITAN_INIT(n) \ static struct uart_opentitan_config uart_opentitan_config_##n = \ { \ .base = DT_INST_REG_ADDR(n), \ .nco_reg = NCO_REG(DT_INST_PROP(n, current_speed), \ DT_INST_PROP(n, clock_frequency)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, uart_opentitan_init, NULL, NULL, \ &uart_opentitan_config_##n, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_opentitan_driver_api); DT_INST_FOREACH_STATUS_OKAY(UART_OPENTITAN_INIT) ```
/content/code_sandbox/drivers/serial/uart_opentitan.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
922
```unknown config UART_BT bool "UART over NUS Bluetooth LE" depends on BT_ZEPHYR_NUS depends on DT_HAS_ZEPHYR_NUS_UART_ENABLED select UART_INTERRUPT_DRIVEN select RING_BUFFER select EXPERIMENTAL help Enable the UART over NUS Bluetooth driver, which can be used to pipe serial data over Bluetooth LE GATT using NUS (Nordic UART Service). if UART_BT config UART_BT_WORKQUEUE_PRIORITY int "UART NUS Work-queue Priority" default MAIN_THREAD_PRIORITY help Select UART NUS Work-queue priority based on the application context. config UART_BT_WORKQUEUE_STACK_SIZE int "UART NUS Work-queue Stack Size" default 1024 help Set UART NUS Work-queue Stack-size based on the application context. endif ```
/content/code_sandbox/drivers/serial/Kconfig.bt
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
180
```c /* * */ /** * @brief UART driver for Intel FPGA UART Core IP * Reference : Embedded Peripherals IP User Guide : 11. UART Core * Limitations: * 1. User should consider to always use polling mode, as IP core does not have fifo. * So IP can only send/receive 1 character at a time. * 2. CTS and RTS is purely software controlled. Assertion might not be on time. * 3. Full duplex mode is not supported. */ #define DT_DRV_COMPAT altr_uart #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/serial/uart_altera.h> #ifdef CONFIG_UART_LINE_CTRL #ifndef CONFIG_UART_INTERRUPT_DRIVEN /* CTS and RTS is purely software controlled. */ #error "uart_altera.c: Must enable UART_INTERRUPT_DRIVEN for line control!" #endif #endif /* register offsets */ #define ALTERA_AVALON_UART_OFFSET (0x4) #define ALTERA_AVALON_UART_RXDATA_REG_OFFSET (0 * ALTERA_AVALON_UART_OFFSET) #define ALTERA_AVALON_UART_TXDATA_REG_OFFSET (1 * ALTERA_AVALON_UART_OFFSET) #define ALTERA_AVALON_UART_STATUS_REG_OFFSET (2 * ALTERA_AVALON_UART_OFFSET) #define ALTERA_AVALON_UART_CONTROL_REG_OFFSET (3 * ALTERA_AVALON_UART_OFFSET) #define ALTERA_AVALON_UART_DIVISOR_REG_OFFSET (4 * ALTERA_AVALON_UART_OFFSET) #define ALTERA_AVALON_UART_EOP_REG_OFFSET (5 * ALTERA_AVALON_UART_OFFSET) /*status register mask */ #define ALTERA_AVALON_UART_STATUS_PE_MSK (0x1) #define ALTERA_AVALON_UART_STATUS_FE_MSK (0x2) #define ALTERA_AVALON_UART_STATUS_BRK_MSK (0x4) #define ALTERA_AVALON_UART_STATUS_ROE_MSK (0x8) #define ALTERA_AVALON_UART_STATUS_TMT_MSK (0x20) #define ALTERA_AVALON_UART_STATUS_TRDY_MSK (0x40) #define ALTERA_AVALON_UART_STATUS_RRDY_MSK (0x80) #define ALTERA_AVALON_UART_STATUS_DCTS_MSK (0x400) #define ALTERA_AVALON_UART_STATUS_CTS_MSK (0x800) #define ALTERA_AVALON_UART_STATUS_E_MSK (0x100) #define ALTERA_AVALON_UART_STATUS_EOP_MSK (0x1000) /* control register mask */ #define ALTERA_AVALON_UART_CONTROL_TMT_MSK (0x20) #define ALTERA_AVALON_UART_CONTROL_TRDY_MSK (0x40) #define ALTERA_AVALON_UART_CONTROL_RRDY_MSK (0x80) #define ALTERA_AVALON_UART_CONTROL_E_MSK (0x100) #define ALTERA_AVALON_UART_CONTROL_DCTS_MSK (0x400) #define ALTERA_AVALON_UART_CONTROL_RTS_MSK (0x800) #define ALTERA_AVALON_UART_CONTROL_EOP_MSK (0x1000) /* defined values */ #define UART_ALTERA_NO_ERROR (0u) #define ALTERA_AVALON_UART_CLEAR_STATUS_VAL (0u) #define ALTERA_AVALON_UART_PENDING_MASK (ALTERA_AVALON_UART_STATUS_RRDY_MSK | \ ALTERA_AVALON_UART_STATUS_TRDY_MSK | ALTERA_AVALON_UART_STATUS_E_MSK | \ ALTERA_AVALON_UART_STATUS_EOP_MSK) /***********************/ /* configuration flags */ /* * The value ALT_AVALON_UART_FB is a value set in the devices flag field to * indicate that the device has a fixed baud rate; i.e. if this flag is set * software can not control the baud rate of the device. */ #define ALT_AVALON_UART_FB 0x1 /* * The value ALT_AVALON_UART_FC is a value set in the device flag field to * indicate the device is using flow control, i.e. the driver must * throttle on transmit if the nCTS pin is low. */ #define ALT_AVALON_UART_FC 0x2 /* end of configuration flags */ /******************************/ /* device data */ struct uart_altera_device_data { struct uart_config uart_cfg; /* stores uart config from device tree*/ struct k_spinlock lock; uint32_t status_act; /* stores value of status register. */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; /**< Callback function pointer */ void *cb_data; /**< Callback function arg */ #ifdef CONFIG_UART_ALTERA_EOP uint8_t set_eop_cb; uart_irq_callback_user_data_t cb_eop; /**< Callback function pointer */ void *cb_data_eop; /**< Callback function arg */ #endif /* CONFIG_UART_ALTERA_EOP */ #ifdef CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND uint8_t dcts_rising; #endif /*CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND*/ uint32_t control_val; /* stores value to set control register. */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; /* * device config: * stores data that cannot be changed during run time. */ struct uart_altera_device_config { mm_reg_t base; uint32_t flags; /* refer to configuration flags */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; unsigned int irq_num; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN /** * function prototypes */ static int uart_altera_irq_update(const struct device *dev); static int uart_altera_irq_tx_ready(const struct device *dev); static int uart_altera_irq_rx_ready(const struct device *dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ /** * @brief Poll the device for input. * * This is a non-blocking function. * * @param dev UART device instance * @param p_char Pointer to character * * @return 0 if a character arrived, -1 if input buffer is empty. * -EINVAL if p_char is null pointer */ static int uart_altera_poll_in(const struct device *dev, unsigned char *p_char) { const struct uart_altera_device_config *config = dev->config; struct uart_altera_device_data *data = dev->data; int ret_val = -1; uint32_t status; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(p_char != NULL, "p_char is null pointer!"); /* Stop, if p_char is null pointer */ if (p_char == NULL) { return -EINVAL; } k_spinlock_key_t key = k_spin_lock(&data->lock); /* check if received character is ready.*/ status = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); if (status & ALTERA_AVALON_UART_STATUS_RRDY_MSK) { /* got a character */ *p_char = sys_read32(config->base + ALTERA_AVALON_UART_RXDATA_REG_OFFSET); ret_val = 0; } k_spin_unlock(&data->lock, key); return ret_val; } /** * @brief Output a character in polled mode. * * This function will block until transmitter is ready. * Then, a character will be transmitted. * * @param dev UART device instance * @param c Character to send */ static void uart_altera_poll_out(const struct device *dev, unsigned char c) { const struct uart_altera_device_config *config = dev->config; struct uart_altera_device_data *data = dev->data; uint32_t status; k_spinlock_key_t key = k_spin_lock(&data->lock); do { /* wait until uart is free to transmit.*/ status = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); } while ((status & ALTERA_AVALON_UART_STATUS_TRDY_MSK) == 0); sys_write32(c, config->base + ALTERA_AVALON_UART_TXDATA_REG_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Initialise an instance of the driver * * This function initialise the interrupt configuration for the driver. * * @param dev UART device instance * * @return 0 to indicate success. */ static int uart_altera_init(const struct device *dev) { #ifdef CONFIG_UART_INTERRUPT_DRIVEN struct uart_altera_device_data *data = dev->data; const struct uart_altera_device_config *config = dev->config; /* clear status to ensure, that interrupts are not triggered due to old status. */ sys_write32(ALTERA_AVALON_UART_CLEAR_STATUS_VAL, config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); /* * Enable hardware interrupt. * The corresponding csr from IP still needs to be set, * so that the IP generates interrupt signal. */ config->irq_config_func(dev); #ifdef CONFIG_UART_LINE_CTRL /* Enable DCTS interrupt. */ data->control_val = ALTERA_AVALON_UART_CONTROL_DCTS_MSK; #endif /* CONFIG_UART_LINE_CTRL */ sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ return 0; } /** * @brief Check if an error was received * If error is received, it will be mapped to uart_rx_stop_reason. * This function should be called after irq_update. * If interrupt driven API is not enabled, * this function will read and clear the status register. * * @param dev UART device struct * * @return UART_ERROR_OVERRUN, UART_ERROR_PARITY, UART_ERROR_FRAMING, * UART_BREAK if an error was detected, 0 otherwise. */ static int uart_altera_err_check(const struct device *dev) { struct uart_altera_device_data *data = dev->data; int err = UART_ALTERA_NO_ERROR; #ifndef CONFIG_UART_INTERRUPT_DRIVEN const struct uart_altera_device_config *config = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); data->status_act = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); #endif if (data->status_act & ALTERA_AVALON_UART_STATUS_E_MSK) { if (data->status_act & ALTERA_AVALON_UART_STATUS_PE_MSK) { err |= UART_ERROR_PARITY; } if (data->status_act & ALTERA_AVALON_UART_STATUS_FE_MSK) { err |= UART_ERROR_FRAMING; } if (data->status_act & ALTERA_AVALON_UART_STATUS_BRK_MSK) { err |= UART_BREAK; } if (data->status_act & ALTERA_AVALON_UART_STATUS_ROE_MSK) { err |= UART_ERROR_OVERRUN; } } #ifndef CONFIG_UART_INTERRUPT_DRIVEN /* clear status */ sys_write32(ALTERA_AVALON_UART_CLEAR_STATUS_VAL, config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); k_spin_unlock(&data->lock, key); #endif return err; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE /*** * @brief helper function to check, if the configuration is support. * @param cfg_stored : The original configuration. * @param cfg_in : The input configuration. * @return true if only baudrate is changed. otherwise false. */ static bool uart_altera_check_configuration(const struct uart_config *cfg_stored, const struct uart_config *cfg_in) { bool ret_val = false; if ((cfg_stored->parity == cfg_in->parity) && (cfg_stored->stop_bits == cfg_in->stop_bits) && (cfg_stored->data_bits == cfg_in->data_bits) && (cfg_stored->flow_ctrl == cfg_in->flow_ctrl)) { ret_val = true; } return ret_val; } /** * @brief Set UART configuration using data from *cfg_in. * * @param dev UART : Device struct * @param cfg_in : The input configuration. * * @return 0 if success, -ENOTSUP, if input from cfg_in is not configurable. * -EINVAL if cfg_in is null pointer */ static int uart_altera_configure(const struct device *dev, const struct uart_config *cfg_in) { const struct uart_altera_device_config *config = dev->config; struct uart_altera_device_data * const data = dev->data; struct uart_config * const cfg_stored = &data->uart_cfg; uint32_t divisor_val; int ret_val; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(cfg_in != NULL, "cfg_in is null pointer!"); /* Stop, if cfg_in is null pointer */ if (cfg_in == NULL) { return -EINVAL; } /* check if configuration is supported. */ if (uart_altera_check_configuration(cfg_stored, cfg_in) && !(config->flags & ALT_AVALON_UART_FB)) { /* calculate and set baudrate. */ divisor_val = (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC/cfg_in->baudrate) - 1; sys_write32(divisor_val, config->base + ALTERA_AVALON_UART_DIVISOR_REG_OFFSET); /* update stored data. */ cfg_stored->baudrate = cfg_in->baudrate; ret_val = 0; } else { /* return not supported */ ret_val = -ENOTSUP; } return ret_val; } /** * @brief Get UART configuration and stores in *cfg_out. * * @param dev UART : Device struct * @param cfg_out : The output configuration. * * @return 0 if success. * -EINVAL if cfg_out is null pointer */ static int uart_altera_config_get(const struct device *dev, struct uart_config *cfg_out) { const struct uart_altera_device_data *data = dev->data; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(cfg_out != NULL, "cfg_out is null pointer!"); /* Stop, if cfg_out is null pointer */ if (cfg_out == NULL) { return -EINVAL; } *cfg_out = data->uart_cfg; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN /** * @brief Fill FIFO with data * This function is expected to be called from UART interrupt handler (ISR), * if uart_irq_tx_ready() returns true. This function does not block! * IP has no fifo. Hence only 1 data can be sent at a time! * * @param dev UART device struct * @param tx_data Data to transmit * @param size Number of bytes to send (unused) * * @return Number of bytes sent */ static int uart_altera_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { ARG_UNUSED(size); const struct uart_altera_device_config *config = dev->config; struct uart_altera_device_data *data = dev->data; int ret_val; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(tx_data != NULL, "tx_data is null pointer!"); /* Stop, if tx_data is null pointer */ if (tx_data == NULL) { return 0; } k_spinlock_key_t key = k_spin_lock(&data->lock); if (data->status_act & ALTERA_AVALON_UART_STATUS_TRDY_MSK) { sys_write32(*tx_data, config->base + ALTERA_AVALON_UART_TXDATA_REG_OFFSET); ret_val = 1; /* function may be called in a loop. update the actual status! */ data->status_act = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); } else { ret_val = 0; } #ifdef CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND /* clear and CTS rising edge! */ data->dcts_rising = 0; #endif /* CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND */ k_spin_unlock(&data->lock, key); return ret_val; } /** * @brief Read data from FIFO * This function is expected to be called from UART interrupt handler (ISR), * if uart_irq_rx_ready() returns true. * IP has no fifo. Hence only 1 data can be read at a time! * * @param dev UART device struct * @param rx_data Data container * @param size Container size * * @return Number of bytes read */ static int uart_altera_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { ARG_UNUSED(size); const struct uart_altera_device_config *config = dev->config; struct uart_altera_device_data *data = dev->data; int ret_val; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(rx_data != NULL, "rx_data is null pointer!"); /* Stop, if rx_data is null pointer */ if (rx_data == NULL) { return 0; } k_spinlock_key_t key = k_spin_lock(&data->lock); if (data->status_act & ALTERA_AVALON_UART_STATUS_RRDY_MSK) { *rx_data = sys_read32(config->base + ALTERA_AVALON_UART_RXDATA_REG_OFFSET); ret_val = 1; /* function may be called in a loop. update the actual status! */ data->status_act = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); } else { ret_val = 0; } #ifdef CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND /* assert RTS as soon as rx data is read, as IP has no fifo. */ data->status_act = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); if (((data->status_act & ALTERA_AVALON_UART_STATUS_RRDY_MSK) == 0) && (data->status_act & ALTERA_AVALON_UART_STATUS_CTS_MSK)) { data->control_val |= ALTERA_AVALON_UART_CONTROL_RTS_MSK; sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); } #endif /* CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND */ k_spin_unlock(&data->lock, key); return ret_val; } /** * @brief Enable TX interrupt * * @param dev UART device struct */ static void uart_altera_irq_tx_enable(const struct device *dev) { struct uart_altera_device_data *data = dev->data; const struct uart_altera_device_config *config = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); data->control_val |= ALTERA_AVALON_UART_CONTROL_TRDY_MSK; #ifdef CONFIG_UART_LINE_CTRL /* also enable RTS, if flow control is enabled. */ data->control_val |= ALTERA_AVALON_UART_CONTROL_RTS_MSK; #endif sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Disable TX interrupt * * @param dev UART device struct */ static void uart_altera_irq_tx_disable(const struct device *dev) { struct uart_altera_device_data *data = dev->data; const struct uart_altera_device_config *config = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); data->control_val &= ~ALTERA_AVALON_UART_CONTROL_TRDY_MSK; #ifdef CONFIG_UART_LINE_CTRL /* also disable RTS, if flow control is enabled. */ data->control_val &= ~ALTERA_AVALON_UART_CONTROL_RTS_MSK; #endif sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Check if UART TX buffer can accept a new char. * * @param dev UART device struct * * @return 1 if TX interrupt is enabled and at least one char can be written to UART. * 0 if device is not ready to write a new byte. */ static int uart_altera_irq_tx_ready(const struct device *dev) { struct uart_altera_device_data *data = dev->data; int ret_val = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); /* if TX interrupt is enabled */ if (data->control_val & ALTERA_AVALON_UART_CONTROL_TRDY_MSK) { /* IP core does not have fifo. Wait until tx data is completely shifted. */ if (data->status_act & ALTERA_AVALON_UART_STATUS_TMT_MSK) { ret_val = 1; } } #ifdef CONFIG_UART_LINE_CTRL /* if flow control is enabled, set tx not ready, if CTS is low. */ if ((data->status_act & ALTERA_AVALON_UART_STATUS_CTS_MSK) == 0) { ret_val = 0; } #ifdef CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND if (data->dcts_rising == 0) { ret_val = 0; } #endif /* CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND */ #endif /* CONFIG_UART_LINE_CTRL */ k_spin_unlock(&data->lock, key); return ret_val; } /** * @brief Check if nothing remains to be transmitted * * @param dev UART device struct * * @return 1 if nothing remains to be transmitted, 0 otherwise */ static int uart_altera_irq_tx_complete(const struct device *dev) { struct uart_altera_device_data *data = dev->data; int ret_val = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); if (data->status_act & ALTERA_AVALON_UART_STATUS_TMT_MSK) { ret_val = 1; } k_spin_unlock(&data->lock, key); return ret_val; } /** * @brief Enable RX interrupt in * * @param dev UART device struct */ static void uart_altera_irq_rx_enable(const struct device *dev) { struct uart_altera_device_data *data = dev->data; const struct uart_altera_device_config *config = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); data->control_val |= ALTERA_AVALON_UART_CONTROL_RRDY_MSK; sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Disable RX interrupt * * @param dev UART device struct */ static void uart_altera_irq_rx_disable(const struct device *dev) { struct uart_altera_device_data *data = dev->data; const struct uart_altera_device_config *config = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); data->control_val &= ~ALTERA_AVALON_UART_CONTROL_RRDY_MSK; sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); k_spin_unlock(&data->lock, key); } /** * @brief Check if Rx IRQ has been raised * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_altera_irq_rx_ready(const struct device *dev) { struct uart_altera_device_data *data = dev->data; int ret_val = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); /* if RX interrupt is enabled */ if (data->control_val & ALTERA_AVALON_UART_CONTROL_RRDY_MSK) { /* check for space in rx data register */ if (data->status_act & ALTERA_AVALON_UART_STATUS_RRDY_MSK) { ret_val = 1; } } k_spin_unlock(&data->lock, key); return ret_val; } /** * @brief This function will cache the status register. * * @param dev UART device struct * * @return 1 for success. */ static int uart_altera_irq_update(const struct device *dev) { struct uart_altera_device_data *data = dev->data; const struct uart_altera_device_config *config = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); data->status_act = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); k_spin_unlock(&data->lock, key); return 1; } /** * @brief Check if any IRQ is pending * * @param dev UART device struct * * @return 1 if an IRQ is pending, 0 otherwise */ static int uart_altera_irq_is_pending(const struct device *dev) { struct uart_altera_device_data *data = dev->data; int ret_val = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); if (data->status_act & data->control_val & ALTERA_AVALON_UART_PENDING_MASK) { ret_val = 1; } k_spin_unlock(&data->lock, key); return ret_val; } /** * @brief Set the callback function pointer for IRQ. * * @param dev UART device struct * @param cb Callback function pointer. * @param cb_data Data to pass to callback function. */ static void uart_altera_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_altera_device_data *data = dev->data; /* generate fatal error if CONFIG_ASSERT is enabled. */ __ASSERT(cb != NULL, "uart_irq_callback_user_data_t cb is null pointer!"); k_spinlock_key_t key = k_spin_lock(&data->lock); #ifdef CONFIG_UART_ALTERA_EOP if (data->set_eop_cb) { data->cb_eop = cb; data->cb_data_eop = cb_data; data->set_eop_cb = 0; } else { data->cb = cb; data->cb_data = cb_data; } #else data->cb = cb; data->cb_data = cb_data; #endif /* CONFIG_UART_ALTERA_EOP */ k_spin_unlock(&data->lock, key); } #ifdef CONFIG_UART_LINE_CTRL /** * @brief DCTS Interrupt service routine. * * Handles assertion and deassettion of CTS/RTS stignal * * @param dev Pointer to UART device struct */ static void uart_altera_dcts_isr(const struct device *dev) { struct uart_altera_device_data *data = dev->data; const struct uart_altera_device_config *config = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); /* Assume that user follows zephyr requirement and update status in their call back. */ if (data->status_act & ALTERA_AVALON_UART_STATUS_CTS_MSK) { #ifdef CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND data->dcts_rising = 1; #endif /* CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND */ /* check if device is ready to receive character */ if ((data->status_act & ALTERA_AVALON_UART_STATUS_RRDY_MSK) == 0) { /* Assert RTS to inform other UART. */ data->control_val |= ALTERA_AVALON_UART_CONTROL_RTS_MSK; sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); } } else { /* other UART deasserts RTS */ if (data->status_act & ALTERA_AVALON_UART_STATUS_TMT_MSK) { /* only deasserts if not transmitting. */ data->control_val &= ~ALTERA_AVALON_UART_CONTROL_RTS_MSK; sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); } } k_spin_unlock(&data->lock, key); } #endif /* CONFIG_UART_LINE_CTRL */ /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param dev Pointer to UART device struct * */ static void uart_altera_isr(const struct device *dev) { struct uart_altera_device_data *data = dev->data; const struct uart_altera_device_config *config = dev->config; uart_irq_callback_user_data_t callback = data->cb; /* Pre ISR */ #ifdef CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND /* deassert RTS as soon as rx data is received, as IP has no fifo. */ data->status_act = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); if (data->status_act & ALTERA_AVALON_UART_STATUS_RRDY_MSK) { data->control_val &= ~ALTERA_AVALON_UART_CONTROL_RTS_MSK; sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); } #endif /* CONFIG_UART_ALTERA_LINE_CTRL_WORKAROUND */ if (callback) { callback(dev, data->cb_data); } /* Post ISR */ #if CONFIG_UART_ALTERA_EOP data->status_act = sys_read32(config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); if (data->status_act & ALTERA_AVALON_UART_STATUS_EOP_MSK) { callback = data->cb_eop; if (callback) { callback(dev, data->cb_data_eop); } } #endif /* CONFIG_UART_ALTERA_EOP */ #ifdef CONFIG_UART_LINE_CTRL /* handles RTS/CTS signal */ if (data->status_act & ALTERA_AVALON_UART_STATUS_DCTS_MSK) { uart_altera_dcts_isr(dev); } #endif /* clear status after all interrupts are handled. */ sys_write32(ALTERA_AVALON_UART_CLEAR_STATUS_VAL, config->base + ALTERA_AVALON_UART_STATUS_REG_OFFSET); } #ifdef CONFIG_UART_DRV_CMD /** * @brief Send extra command to driver * * @param dev UART device struct * @param cmd Command to driver * @param p Parameter to the command * * @return 0 if successful, failed otherwise */ static int uart_altera_drv_cmd(const struct device *dev, uint32_t cmd, uint32_t p) { struct uart_altera_device_data *data = dev->data; #if CONFIG_UART_ALTERA_EOP const struct uart_altera_device_config *config = dev->config; #endif int ret_val = -ENOTSUP; k_spinlock_key_t key = k_spin_lock(&data->lock); switch (cmd) { #if CONFIG_UART_ALTERA_EOP case CMD_ENABLE_EOP: /* enable EOP interrupt */ data->control_val |= ALTERA_AVALON_UART_CONTROL_EOP_MSK; sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); /* set EOP character */ sys_write32((uint8_t) p, config->base + ALTERA_AVALON_UART_EOP_REG_OFFSET); /* after this, user needs to call uart_irq_callback_set * to set data->cb_eop and data->cb_data_eop! */ data->set_eop_cb = 1; ret_val = 0; break; case CMD_DISABLE_EOP: /* Disable EOP interrupt */ data->control_val &= ~ALTERA_AVALON_UART_CONTROL_EOP_MSK; sys_write32(data->control_val, config->base + ALTERA_AVALON_UART_CONTROL_REG_OFFSET); /* clear call back */ data->cb_eop = NULL; data->cb_data_eop = NULL; ret_val = 0; break; #endif /* CONFIG_UART_ALTERA_EOP */ default: ret_val = -ENOTSUP; break; }; k_spin_unlock(&data->lock, key); return ret_val; } #endif /* CONFIG_UART_DRV_CMD */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_altera_driver_api = { .poll_in = uart_altera_poll_in, .poll_out = uart_altera_poll_out, .err_check = uart_altera_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_altera_configure, .config_get = uart_altera_config_get, #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_altera_fifo_fill, .fifo_read = uart_altera_fifo_read, .irq_tx_enable = uart_altera_irq_tx_enable, .irq_tx_disable = uart_altera_irq_tx_disable, .irq_tx_ready = uart_altera_irq_tx_ready, .irq_tx_complete = uart_altera_irq_tx_complete, .irq_rx_enable = uart_altera_irq_rx_enable, .irq_rx_disable = uart_altera_irq_rx_disable, .irq_rx_ready = uart_altera_irq_rx_ready, .irq_is_pending = uart_altera_irq_is_pending, .irq_update = uart_altera_irq_update, .irq_callback_set = uart_altera_irq_callback_set, #endif #ifdef CONFIG_UART_DRV_CMD .drv_cmd = uart_altera_drv_cmd, #endif }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_ALTERA_IRQ_CONFIG_FUNC(n) \ static void uart_altera_irq_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ uart_altera_isr, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } #define UART_ALTERA_IRQ_CONFIG_INIT(n) \ .irq_config_func = uart_altera_irq_config_func_##n, \ .irq_num = DT_INST_IRQN(n), #else #define UART_ALTERA_IRQ_CONFIG_FUNC(n) #define UART_ALTERA_IRQ_CONFIG_INIT(n) #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #define UART_ALTERA_DEVICE_INIT(n) \ UART_ALTERA_IRQ_CONFIG_FUNC(n) \ static struct uart_altera_device_data uart_altera_dev_data_##n = { \ .uart_cfg = \ { \ .baudrate = DT_INST_PROP(n, current_speed), \ .parity = DT_INST_ENUM_IDX_OR(n, parity, \ UART_CFG_PARITY_NONE), \ .stop_bits = DT_INST_ENUM_IDX_OR(n, stop_bits, \ UART_CFG_STOP_BITS_1), \ .data_bits = DT_INST_ENUM_IDX_OR(n, data_bits, \ UART_CFG_DATA_BITS_8), \ .flow_ctrl = DT_INST_PROP(n, hw_flow_control) ? \ UART_CFG_FLOW_CTRL_RTS_CTS : \ UART_CFG_FLOW_CTRL_NONE, \ }, \ }; \ \ static const struct uart_altera_device_config uart_altera_dev_cfg_##n = { \ .base = DT_INST_REG_ADDR(n), \ .flags = ((DT_INST_PROP(n, fixed_baudrate)?ALT_AVALON_UART_FB:0) \ |(DT_INST_PROP(n, hw_flow_control)?ALT_AVALON_UART_FC:0)), \ UART_ALTERA_IRQ_CONFIG_INIT(n) \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ uart_altera_init, \ NULL, \ &uart_altera_dev_data_##n, \ &uart_altera_dev_cfg_##n, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_altera_driver_api); DT_INST_FOREACH_STATUS_OKAY(UART_ALTERA_DEVICE_INIT) ```
/content/code_sandbox/drivers/serial/uart_altera.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,718
```c /* ns16550.c - NS16550D serial driver */ #define DT_DRV_COMPAT ns16550 /* * */ /** * @brief NS16550 Serial Driver * * This is the driver for the Intel NS16550 UART Chip used on the PC 386. * It uses the SCCs in asynchronous mode only. * * Before individual UART port can be used, uart_ns16550_port_init() has to be * called to setup the port. */ #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/types.h> #include <zephyr/init.h> #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/pm/policy.h> #include <zephyr/sys/sys_io.h> #include <zephyr/spinlock.h> #include <zephyr/irq.h> #if defined(CONFIG_PINCTRL) #include <zephyr/drivers/pinctrl.h> #endif #include <zephyr/drivers/serial/uart_ns16550.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uart_ns16550, CONFIG_UART_LOG_LEVEL); #define UART_NS16550_PCP_ENABLED DT_ANY_INST_HAS_PROP_STATUS_OKAY(pcp) #define UART_NS16550_DLF_ENABLED DT_ANY_INST_HAS_PROP_STATUS_OKAY(dlf) #define UART_NS16550_DMAS_ENABLED DT_ANY_INST_HAS_PROP_STATUS_OKAY(dmas) #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) BUILD_ASSERT(IS_ENABLED(CONFIG_PCIE), "NS16550(s) in DT need CONFIG_PCIE"); #include <zephyr/drivers/pcie/pcie.h> #endif /* Is UART module 'resets' line property defined */ #define UART_NS16550_RESET_ENABLED DT_ANY_INST_HAS_PROP_STATUS_OKAY(resets) #if UART_NS16550_RESET_ENABLED #include <zephyr/drivers/reset.h> #endif #if defined(CONFIG_UART_ASYNC_API) #include <zephyr/drivers/dma.h> #include <assert.h> #if defined(CONFIG_UART_NS16550_INTEL_LPSS_DMA) #include <zephyr/drivers/dma/dma_intel_lpss.h> #endif #endif /* If any node has property io-mapped set, we need to support IO port * access in the code and device config struct. * * Note that DT_ANY_INST_HAS_PROP_STATUS_OKAY() always returns true * as io-mapped property is considered always exists and present, * even if its value is zero. Therefore we cannot use it, and has to * resort to the follow helper to see if any okay nodes have io-mapped * as 1. */ #define UART_NS16550_DT_PROP_IOMAPPED_HELPER(inst, prop, def) \ DT_INST_PROP_OR(inst, prop, def) || #define UART_NS16550_IOPORT_ENABLED \ (DT_INST_FOREACH_STATUS_OKAY_VARGS(UART_NS16550_DT_PROP_IOMAPPED_HELPER, io_mapped, 0) 0) /* register definitions */ #define REG_THR 0x00 /* Transmitter holding reg. */ #define REG_RDR 0x00 /* Receiver data reg. */ #define REG_BRDL 0x00 /* Baud rate divisor (LSB) */ #define REG_BRDH 0x01 /* Baud rate divisor (MSB) */ #define REG_IER 0x01 /* Interrupt enable reg. */ #define REG_IIR 0x02 /* Interrupt ID reg. */ #define REG_FCR 0x02 /* FIFO control reg. */ #define REG_LCR 0x03 /* Line control reg. */ #define REG_MDC 0x04 /* Modem control reg. */ #define REG_LSR 0x05 /* Line status reg. */ #define REG_MSR 0x06 /* Modem status reg. */ #define REG_DLF 0xC0 /* Divisor Latch Fraction */ #define REG_PCP 0x200 /* PRV_CLOCK_PARAMS (Apollo Lake) */ #define REG_MDR1 0x08 /* Mode control reg. (TI_K3) */ #if defined(CONFIG_UART_NS16550_INTEL_LPSS_DMA) #define REG_LPSS_SRC_TRAN 0xAF8 /* SRC Transfer LPSS DMA */ #define REG_LPSS_CLR_SRC_TRAN 0xB48 /* Clear SRC Tran LPSS DMA */ #define REG_LPSS_MST 0xB20 /* Mask SRC Transfer LPSS DMA */ #endif /* equates for interrupt enable register */ #define IER_RXRDY 0x01 /* receiver data ready */ #define IER_TBE 0x02 /* transmit bit enable */ #define IER_LSR 0x04 /* line status interrupts */ #define IER_MSI 0x08 /* modem status interrupts */ /* equates for interrupt identification register */ #define IIR_MSTAT 0x00 /* modem status interrupt */ #define IIR_NIP 0x01 /* no interrupt pending */ #define IIR_THRE 0x02 /* transmit holding register empty interrupt */ #define IIR_RBRF 0x04 /* receiver buffer register full interrupt */ #define IIR_LS 0x06 /* receiver line status interrupt */ #define IIR_MASK 0x07 /* interrupt id bits mask */ #define IIR_ID 0x06 /* interrupt ID mask without NIP */ #define IIR_FE 0xC0 /* FIFO mode enabled */ #define IIR_CH 0x0C /* Character timeout*/ /* equates for FIFO control register */ #define FCR_FIFO 0x01 /* enable XMIT and RCVR FIFO */ #define FCR_RCVRCLR 0x02 /* clear RCVR FIFO */ #define FCR_XMITCLR 0x04 /* clear XMIT FIFO */ /* equates for Apollo Lake clock control register (PRV_CLOCK_PARAMS) */ #define PCP_UPDATE 0x80000000 /* update clock */ #define PCP_EN 0x00000001 /* enable clock output */ /* Fields for TI K3 UART module */ #define MDR1_MODE_SELECT_FIELD_MASK BIT_MASK(3) #define MDR1_MODE_SELECT_FIELD_SHIFT BIT_MASK(0) /* Modes available for TI K3 UART module */ #define MDR1_STD_MODE (0) #define MDR1_SIR_MODE (1) #define MDR1_UART_16X (2) #define MDR1_UART_13X (3) #define MDR1_MIR_MODE (4) #define MDR1_FIR_MODE (5) #define MDR1_CIR_MODE (6) #define MDR1_DISABLE (7) /* * Per PC16550D (Literature Number: SNLS378B): * * RXRDY, Mode 0: When in the 16450 Mode (FCR0 = 0) or in * the FIFO Mode (FCR0 = 1, FCR3 = 0) and there is at least 1 * character in the RCVR FIFO or RCVR holding register, the * RXRDY pin (29) will be low active. Once it is activated the * RXRDY pin will go inactive when there are no more charac- * ters in the FIFO or holding register. * * RXRDY, Mode 1: In the FIFO Mode (FCR0 = 1) when the * FCR3 = 1 and the trigger level or the timeout has been * reached, the RXRDY pin will go low active. Once it is acti- * vated it will go inactive when there are no more characters * in the FIFO or holding register. * * TXRDY, Mode 0: In the 16450 Mode (FCR0 = 0) or in the * FIFO Mode (FCR0 = 1, FCR3 = 0) and there are no charac- * ters in the XMIT FIFO or XMIT holding register, the TXRDY * pin (24) will be low active. Once it is activated the TXRDY * pin will go inactive after the first character is loaded into the * XMIT FIFO or holding register. * * TXRDY, Mode 1: In the FIFO Mode (FCR0 = 1) when * FCR3 = 1 and there are no characters in the XMIT FIFO, the * TXRDY pin will go low active. This pin will become inactive * when the XMIT FIFO is completely full. */ #define FCR_MODE0 0x00 /* set receiver in mode 0 */ #define FCR_MODE1 0x08 /* set receiver in mode 1 */ /* RCVR FIFO interrupt levels: trigger interrupt with this bytes in FIFO */ #define FCR_FIFO_1 0x00 /* 1 byte in RCVR FIFO */ #define FCR_FIFO_4 0x40 /* 4 bytes in RCVR FIFO */ #define FCR_FIFO_8 0x80 /* 8 bytes in RCVR FIFO */ #define FCR_FIFO_14 0xC0 /* 14 bytes in RCVR FIFO */ /* * UART NS16750 supports 64 bytes FIFO, which can be enabled * via the FCR register */ #define FCR_FIFO_64 0x20 /* Enable 64 bytes FIFO */ /* constants for line control register */ #define LCR_CS5 0x00 /* 5 bits data size */ #define LCR_CS6 0x01 /* 6 bits data size */ #define LCR_CS7 0x02 /* 7 bits data size */ #define LCR_CS8 0x03 /* 8 bits data size */ #define LCR_2_STB 0x04 /* 2 stop bits */ #define LCR_1_STB 0x00 /* 1 stop bit */ #define LCR_PEN 0x08 /* parity enable */ #define LCR_PDIS 0x00 /* parity disable */ #define LCR_EPS 0x10 /* even parity select */ #define LCR_SP 0x20 /* stick parity select */ #define LCR_SBRK 0x40 /* break control bit */ #define LCR_DLAB 0x80 /* divisor latch access enable */ /* constants for the modem control register */ #define MCR_DTR 0x01 /* dtr output */ #define MCR_RTS 0x02 /* rts output */ #define MCR_OUT1 0x04 /* output #1 */ #define MCR_OUT2 0x08 /* output #2 */ #define MCR_LOOP 0x10 /* loop back */ #define MCR_AFCE 0x20 /* auto flow control enable */ /* constants for line status register */ #define LSR_RXRDY 0x01 /* receiver data available */ #define LSR_OE 0x02 /* overrun error */ #define LSR_PE 0x04 /* parity error */ #define LSR_FE 0x08 /* framing error */ #define LSR_BI 0x10 /* break interrupt */ #define LSR_EOB_MASK 0x1E /* Error or Break mask */ #define LSR_THRE 0x20 /* transmit holding register empty */ #define LSR_TEMT 0x40 /* transmitter empty */ /* constants for modem status register */ #define MSR_DCTS 0x01 /* cts change */ #define MSR_DDSR 0x02 /* dsr change */ #define MSR_DRI 0x04 /* ring change */ #define MSR_DDCD 0x08 /* data carrier change */ #define MSR_CTS 0x10 /* complement of cts */ #define MSR_DSR 0x20 /* complement of dsr */ #define MSR_RI 0x40 /* complement of ring signal */ #define MSR_DCD 0x80 /* complement of dcd */ #define THR(dev) (get_port(dev) + (REG_THR * reg_interval(dev))) #define RDR(dev) (get_port(dev) + (REG_RDR * reg_interval(dev))) #define BRDL(dev) (get_port(dev) + (REG_BRDL * reg_interval(dev))) #define BRDH(dev) (get_port(dev) + (REG_BRDH * reg_interval(dev))) #define IER(dev) (get_port(dev) + (REG_IER * reg_interval(dev))) #define IIR(dev) (get_port(dev) + (REG_IIR * reg_interval(dev))) #define FCR(dev) (get_port(dev) + (REG_FCR * reg_interval(dev))) #define LCR(dev) (get_port(dev) + (REG_LCR * reg_interval(dev))) #define MDC(dev) (get_port(dev) + (REG_MDC * reg_interval(dev))) #define LSR(dev) (get_port(dev) + (REG_LSR * reg_interval(dev))) #define MSR(dev) (get_port(dev) + (REG_MSR * reg_interval(dev))) #define MDR1(dev) (get_port(dev) + (REG_MDR1 * reg_interval(dev))) #define DLF(dev) (get_port(dev) + REG_DLF) #define PCP(dev) (get_port(dev) + REG_PCP) #if defined(CONFIG_UART_NS16550_INTEL_LPSS_DMA) #define SRC_TRAN(dev) (get_port(dev) + REG_LPSS_SRC_TRAN) #define CLR_SRC_TRAN(dev) (get_port(dev) + REG_LPSS_CLR_SRC_TRAN) #define MST(dev) (get_port(dev) + REG_LPSS_MST) #define UNMASK_LPSS_INT(chan) (BIT(chan) | (BIT(8) << chan)) /* unmask LPSS DMA Interrupt */ #endif #define IIRC(dev) (((struct uart_ns16550_dev_data *)(dev)->data)->iir_cache) #ifdef CONFIG_UART_NS16550_ITE_HIGH_SPEED_BAUDRATE /* Register definitions (ITE_IT8XXX2) */ #define REG_ECSPMR 0x08 /* EC Serial port mode reg */ /* Fields for ITE IT8XXX2 UART module */ #define ECSPMR_ECHS 0x02 /* EC high speed select */ /* IT8XXX2 UART high speed baud rate settings */ #define UART_BAUDRATE_115200 115200 #define UART_BAUDRATE_230400 230400 #define UART_BAUDRATE_460800 460800 #define IT8XXX2_230400_DIVISOR 32770 #define IT8XXX2_460800_DIVISOR 32769 #define ECSPMR(dev) (get_port(dev) + REG_ECSPMR * reg_interval(dev)) #endif #if defined(CONFIG_UART_ASYNC_API) struct uart_ns16550_rx_dma_params { const struct device *dma_dev; uint8_t dma_channel; struct dma_config dma_cfg; struct dma_block_config active_dma_block; uint8_t *buf; size_t buf_len; size_t offset; size_t counter; struct k_work_delayable timeout_work; size_t timeout_us; }; struct uart_ns16550_tx_dma_params { const struct device *dma_dev; uint8_t dma_channel; struct dma_config dma_cfg; struct dma_block_config active_dma_block; const uint8_t *buf; size_t buf_len; struct k_work_delayable timeout_work; size_t timeout_us; }; struct uart_ns16550_async_data { const struct device *uart_dev; struct uart_ns16550_tx_dma_params tx_dma_params; struct uart_ns16550_rx_dma_params rx_dma_params; uint8_t *next_rx_buffer; size_t next_rx_buffer_len; uart_callback_t user_callback; void *user_data; }; static void uart_ns16550_async_rx_timeout(struct k_work *work); static void uart_ns16550_async_tx_timeout(struct k_work *work); #endif /* device config */ struct uart_ns16550_device_config { union { DEVICE_MMIO_ROM; uint32_t port; }; uint32_t sys_clk_freq; const struct device *clock_dev; clock_control_subsys_t clock_subsys; #if defined(CONFIG_UART_INTERRUPT_DRIVEN) || defined(CONFIG_UART_ASYNC_API) uart_irq_config_func_t irq_config_func; #endif #if UART_NS16550_PCP_ENABLED uint32_t pcp; #endif uint8_t reg_interval; #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) struct pcie_dev *pcie; #endif #if defined(CONFIG_PINCTRL) const struct pinctrl_dev_config *pincfg; #endif #if UART_NS16550_IOPORT_ENABLED bool io_map; #endif #if UART_NS16550_RESET_ENABLED struct reset_dt_spec reset_spec; #endif }; /** Device data structure */ struct uart_ns16550_dev_data { DEVICE_MMIO_RAM; struct uart_config uart_config; struct k_spinlock lock; uint8_t fifo_size; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uint8_t iir_cache; /**< cache of IIR since it clears when read */ uart_irq_callback_user_data_t cb; /**< Callback function pointer */ void *cb_data; /**< Callback function arg */ #endif #if UART_NS16550_DLF_ENABLED uint8_t dlf; /**< DLF value */ #endif #if defined(CONFIG_UART_INTERRUPT_DRIVEN) && defined(CONFIG_PM) bool tx_stream_on; #endif #if defined(CONFIG_UART_ASYNC_API) uint64_t phys_addr; struct uart_ns16550_async_data async; #endif }; static void ns16550_outbyte(const struct uart_ns16550_device_config *cfg, uintptr_t port, uint8_t val) { #if UART_NS16550_IOPORT_ENABLED if (cfg->io_map) { if (IS_ENABLED(CONFIG_UART_NS16550_ACCESS_WORD_ONLY)) { sys_out32(val, port); } else { sys_out8(val, port); } } else { #else { #endif /* MMIO mapped */ if (IS_ENABLED(CONFIG_UART_NS16550_ACCESS_WORD_ONLY)) { sys_write32(val, port); } else { sys_write8(val, port); } } } static uint8_t ns16550_inbyte(const struct uart_ns16550_device_config *cfg, uintptr_t port) { #if UART_NS16550_IOPORT_ENABLED if (cfg->io_map) { if (IS_ENABLED(CONFIG_UART_NS16550_ACCESS_WORD_ONLY)) { return sys_in32(port); } else { return sys_in8(port); } } else { #else { #endif /* MMIO mapped */ if (IS_ENABLED(CONFIG_UART_NS16550_ACCESS_WORD_ONLY)) { return sys_read32(port); } else { return sys_read8(port); } } return 0; } __maybe_unused static void ns16550_outword(const struct uart_ns16550_device_config *cfg, uintptr_t port, uint32_t val) { #if UART_NS16550_IOPORT_ENABLED if (cfg->io_map) { sys_out32(val, port); } else { #else { #endif /* MMIO mapped */ sys_write32(val, port); } } __maybe_unused static uint32_t ns16550_inword(const struct uart_ns16550_device_config *cfg, uintptr_t port) { #if UART_NS16550_IOPORT_ENABLED if (cfg->io_map) { return sys_in32(port); } #endif /* MMIO mapped */ return sys_read32(port); } static inline uint8_t reg_interval(const struct device *dev) { const struct uart_ns16550_device_config *config = dev->config; return config->reg_interval; } static inline uintptr_t get_port(const struct device *dev) { uintptr_t port; #if UART_NS16550_IOPORT_ENABLED const struct uart_ns16550_device_config *config = dev->config; if (config->io_map) { port = config->port; } else { #else { #endif port = DEVICE_MMIO_GET(dev); } return port; } static uint32_t get_uart_baudrate_divisor(const struct device *dev, uint32_t baud_rate, uint32_t pclk) { ARG_UNUSED(dev); /* * calculate baud rate divisor. a variant of * (uint32_t)(pclk / (16.0 * baud_rate) + 0.5) */ return ((pclk + (baud_rate << 3)) / baud_rate) >> 4; } #ifdef CONFIG_UART_NS16550_ITE_HIGH_SPEED_BAUDRATE static uint32_t get_ite_uart_baudrate_divisor(const struct device *dev, uint32_t baud_rate, uint32_t pclk) { const struct uart_ns16550_device_config * const dev_cfg = dev->config; uint32_t divisor = 0; if (baud_rate > UART_BAUDRATE_115200) { /* Baud rate divisor for high speed */ if (baud_rate == UART_BAUDRATE_230400) { divisor = IT8XXX2_230400_DIVISOR; } else if (baud_rate == UART_BAUDRATE_460800) { divisor = IT8XXX2_460800_DIVISOR; } /* * This bit indicates that the supported baud rate of * UART1/UART2 can be up to 230.4k and 460.8k. * Other bits are reserved and have no setting, so we * directly write the ECSPMR register. */ ns16550_outbyte(dev_cfg, ECSPMR(dev), ECSPMR_ECHS); } else { divisor = get_uart_baudrate_divisor(dev, baud_rate, pclk); /* Set ECSPMR register as default */ ns16550_outbyte(dev_cfg, ECSPMR(dev), 0); } return divisor; } #endif static void set_baud_rate(const struct device *dev, uint32_t baud_rate, uint32_t pclk) { struct uart_ns16550_dev_data * const dev_data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; uint32_t divisor; /* baud rate divisor */ uint8_t lcr_cache; if ((baud_rate != 0U) && (pclk != 0U)) { #ifdef CONFIG_UART_NS16550_ITE_HIGH_SPEED_BAUDRATE divisor = get_ite_uart_baudrate_divisor(dev, baud_rate, pclk); #else divisor = get_uart_baudrate_divisor(dev, baud_rate, pclk); #endif /* set the DLAB to access the baud rate divisor registers */ lcr_cache = ns16550_inbyte(dev_cfg, LCR(dev)); ns16550_outbyte(dev_cfg, LCR(dev), LCR_DLAB | lcr_cache); ns16550_outbyte(dev_cfg, BRDL(dev), (unsigned char)(divisor & 0xff)); ns16550_outbyte(dev_cfg, BRDH(dev), (unsigned char)((divisor >> 8) & 0xff)); /* restore the DLAB to access the baud rate divisor registers */ ns16550_outbyte(dev_cfg, LCR(dev), lcr_cache); dev_data->uart_config.baudrate = baud_rate; } } static int uart_ns16550_configure(const struct device *dev, const struct uart_config *cfg) { struct uart_ns16550_dev_data * const dev_data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; uint8_t mdc = 0U; uint32_t pclk = 0U; /* temp for return value if error occurs in this locked region */ int ret = 0; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); #if defined(CONFIG_PINCTRL) if (dev_cfg->pincfg != NULL) { pinctrl_apply_state(dev_cfg->pincfg, PINCTRL_STATE_DEFAULT); } #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN dev_data->iir_cache = 0U; #endif #if UART_NS16550_DLF_ENABLED ns16550_outbyte(dev_cfg, DLF(dev), dev_data->dlf); #endif #if UART_NS16550_PCP_ENABLED uint32_t pcp = dev_cfg->pcp; if (pcp) { pcp |= PCP_EN; ns16550_outbyte(dev_cfg, PCP(dev), pcp & ~PCP_UPDATE); ns16550_outbyte(dev_cfg, PCP(dev), pcp | PCP_UPDATE); } #endif #ifdef CONFIG_UART_NS16550_TI_K3 uint32_t mdr = ns16550_inbyte(dev_cfg, MDR1(dev)); mdr = ((mdr & ~MDR1_MODE_SELECT_FIELD_MASK) | ((((MDR1_STD_MODE) << MDR1_MODE_SELECT_FIELD_SHIFT)) & MDR1_MODE_SELECT_FIELD_MASK)); ns16550_outbyte(dev_cfg, MDR1(dev), mdr); #endif /* * set clock frequency from clock_frequency property if valid, * otherwise, get clock frequency from clock manager */ if (dev_cfg->sys_clk_freq != 0U) { pclk = dev_cfg->sys_clk_freq; } else { if (!device_is_ready(dev_cfg->clock_dev)) { ret = -EINVAL; goto out; } if (clock_control_get_rate(dev_cfg->clock_dev, dev_cfg->clock_subsys, &pclk) != 0) { ret = -EINVAL; goto out; } } set_baud_rate(dev, cfg->baudrate, pclk); /* Local structure to hold temporary values to pass to ns16550_outbyte() */ struct uart_config uart_cfg; switch (cfg->data_bits) { case UART_CFG_DATA_BITS_5: uart_cfg.data_bits = LCR_CS5; break; case UART_CFG_DATA_BITS_6: uart_cfg.data_bits = LCR_CS6; break; case UART_CFG_DATA_BITS_7: uart_cfg.data_bits = LCR_CS7; break; case UART_CFG_DATA_BITS_8: uart_cfg.data_bits = LCR_CS8; break; default: ret = -ENOTSUP; goto out; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: uart_cfg.stop_bits = LCR_1_STB; break; case UART_CFG_STOP_BITS_2: uart_cfg.stop_bits = LCR_2_STB; break; default: ret = -ENOTSUP; goto out; } switch (cfg->parity) { case UART_CFG_PARITY_NONE: uart_cfg.parity = LCR_PDIS; break; case UART_CFG_PARITY_EVEN: uart_cfg.parity = LCR_EPS; break; default: ret = -ENOTSUP; goto out; } dev_data->uart_config = *cfg; /* data bits, stop bits, parity, clear DLAB */ ns16550_outbyte(dev_cfg, LCR(dev), uart_cfg.data_bits | uart_cfg.stop_bits | uart_cfg.parity); mdc = MCR_OUT2 | MCR_RTS | MCR_DTR; #if defined(CONFIG_UART_NS16550_VARIANT_NS16750) || \ defined(CONFIG_UART_NS16550_VARIANT_NS16950) if (cfg->flow_ctrl == UART_CFG_FLOW_CTRL_RTS_CTS) { mdc |= MCR_AFCE; } #endif ns16550_outbyte(dev_cfg, MDC(dev), mdc); /* * Program FIFO: enabled, mode 0 (set for compatibility with quark), * generate the interrupt at 8th byte * Clear TX and RX FIFO */ ns16550_outbyte(dev_cfg, FCR(dev), FCR_FIFO | FCR_MODE0 | FCR_FIFO_8 | FCR_RCVRCLR | FCR_XMITCLR #ifdef CONFIG_UART_NS16550_VARIANT_NS16750 | FCR_FIFO_64 #endif ); if ((ns16550_inbyte(dev_cfg, IIR(dev)) & IIR_FE) == IIR_FE) { #ifdef CONFIG_UART_NS16550_VARIANT_NS16750 dev_data->fifo_size = 64; #elif defined(CONFIG_UART_NS16550_VARIANT_NS16950) dev_data->fifo_size = 128; #else dev_data->fifo_size = 16; #endif } else { dev_data->fifo_size = 1; } /* clear the port */ ns16550_inbyte(dev_cfg, RDR(dev)); /* disable interrupts */ ns16550_outbyte(dev_cfg, IER(dev), 0x00); out: k_spin_unlock(&dev_data->lock, key); return ret; }; #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_ns16550_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_ns16550_dev_data *data = dev->data; cfg->baudrate = data->uart_config.baudrate; cfg->parity = data->uart_config.parity; cfg->stop_bits = data->uart_config.stop_bits; cfg->data_bits = data->uart_config.data_bits; cfg->flow_ctrl = data->uart_config.flow_ctrl; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #if UART_NS16550_RESET_ENABLED /** * @brief Toggle the reset UART line * * This routine is called to bring UART IP out of reset state. * * @param reset_spec Reset controller device configuration struct * * @return 0 if successful, failed otherwise */ static int uart_reset_config(const struct reset_dt_spec *reset_spec) { int ret; if (!device_is_ready(reset_spec->dev)) { LOG_ERR("Reset controller device is not ready"); return -ENODEV; } ret = reset_line_toggle(reset_spec->dev, reset_spec->id); if (ret != 0) { LOG_ERR("UART toggle reset line failed"); return ret; } return 0; } #endif /* UART_NS16550_RESET_ENABLED */ #if (IS_ENABLED(CONFIG_UART_ASYNC_API)) static inline void async_timer_start(struct k_work_delayable *work, size_t timeout_us) { if ((timeout_us != SYS_FOREVER_US) && (timeout_us != 0)) { k_work_reschedule(work, K_USEC(timeout_us)); } } #endif /** * @brief Initialize individual UART port * * This routine is called to reset the chip in a quiescent state. * * @param dev UART device struct * * @return 0 if successful, failed otherwise */ static int uart_ns16550_init(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config *dev_cfg = dev->config; int ret; ARG_UNUSED(dev_cfg); #if UART_NS16550_RESET_ENABLED /* Assert the UART reset line if it is defined. */ if (dev_cfg->reset_spec.dev != NULL) { ret = uart_reset_config(&(dev_cfg->reset_spec)); if (ret != 0) { return ret; } } #endif #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) if (dev_cfg->pcie) { struct pcie_bar mbar; if (dev_cfg->pcie->bdf == PCIE_BDF_NONE) { return -EINVAL; } pcie_probe_mbar(dev_cfg->pcie->bdf, 0, &mbar); pcie_set_cmd(dev_cfg->pcie->bdf, PCIE_CONF_CMDSTAT_MEM, true); device_map(DEVICE_MMIO_RAM_PTR(dev), mbar.phys_addr, mbar.size, K_MEM_CACHE_NONE); #if defined(CONFIG_UART_ASYNC_API) if (data->async.tx_dma_params.dma_dev != NULL) { pcie_set_cmd(dev_cfg->pcie->bdf, PCIE_CONF_CMDSTAT_MASTER, true); data->phys_addr = mbar.phys_addr; } #endif } else #endif /* DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) */ { #if UART_NS16550_IOPORT_ENABLED /* Map directly from DTS */ if (!dev_cfg->io_map) { #else { #endif DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); } } #if defined(CONFIG_UART_ASYNC_API) if (data->async.tx_dma_params.dma_dev != NULL) { data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0; data->async.uart_dev = dev; k_work_init_delayable(&data->async.rx_dma_params.timeout_work, uart_ns16550_async_rx_timeout); k_work_init_delayable(&data->async.tx_dma_params.timeout_work, uart_ns16550_async_tx_timeout); data->async.rx_dma_params.dma_cfg.head_block = &data->async.rx_dma_params.active_dma_block; data->async.tx_dma_params.dma_cfg.head_block = &data->async.tx_dma_params.active_dma_block; #if defined(CONFIG_UART_NS16550_INTEL_LPSS_DMA) #if UART_NS16550_IOPORT_ENABLED if (!dev_cfg->io_map) #endif { uintptr_t base; base = DEVICE_MMIO_GET(dev) + DMA_INTEL_LPSS_OFFSET; dma_intel_lpss_set_base(data->async.tx_dma_params.dma_dev, base); dma_intel_lpss_setup(data->async.tx_dma_params.dma_dev); sys_write32((uint32_t)data->phys_addr, DEVICE_MMIO_GET(dev) + DMA_INTEL_LPSS_REMAP_LOW); sys_write32((uint32_t)(data->phys_addr >> DMA_INTEL_LPSS_ADDR_RIGHT_SHIFT), DEVICE_MMIO_GET(dev) + DMA_INTEL_LPSS_REMAP_HI); } #endif } #endif ret = uart_ns16550_configure(dev, &data->uart_config); if (ret != 0) { return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN dev_cfg->irq_config_func(dev); #endif return 0; } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_ns16550_poll_in(const struct device *dev, unsigned char *c) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; int ret = -1; k_spinlock_key_t key = k_spin_lock(&data->lock); if ((ns16550_inbyte(dev_cfg, LSR(dev)) & LSR_RXRDY) != 0) { /* got a character */ *c = ns16550_inbyte(dev_cfg, RDR(dev)); ret = 0; } k_spin_unlock(&data->lock, key); return ret; } /** * @brief Output a character in polled mode. * * Checks if the transmitter is empty. If empty, a character is written to * the data register. * * If the hardware flow control is enabled then the handshake signal CTS has to * be asserted in order to send a character. * * @param dev UART device struct * @param c Character to send */ static void uart_ns16550_poll_out(const struct device *dev, unsigned char c) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); while ((ns16550_inbyte(dev_cfg, LSR(dev)) & LSR_THRE) == 0) { } ns16550_outbyte(dev_cfg, THR(dev), c); k_spin_unlock(&data->lock, key); } /** * @brief Check if an error was received * * @param dev UART device struct * * @return one of UART_ERROR_OVERRUN, UART_ERROR_PARITY, UART_ERROR_FRAMING, * UART_BREAK if an error was detected, 0 otherwise. */ static int uart_ns16550_err_check(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); int check = (ns16550_inbyte(dev_cfg, LSR(dev)) & LSR_EOB_MASK); k_spin_unlock(&data->lock, key); return check >> 1; } #if CONFIG_UART_INTERRUPT_DRIVEN /** * @brief Fill FIFO with data * * @param dev UART device struct * @param tx_data Data to transmit * @param size Number of bytes to send * * @return Number of bytes sent */ static int uart_ns16550_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; int i; k_spinlock_key_t key = k_spin_lock(&data->lock); for (i = 0; (i < size) && (i < data->fifo_size); i++) { ns16550_outbyte(dev_cfg, THR(dev), tx_data[i]); } k_spin_unlock(&data->lock, key); return i; } /** * @brief Read data from FIFO * * @param dev UART device struct * @param rxData Data container * @param size Container size * * @return Number of bytes read */ static int uart_ns16550_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; int i; k_spinlock_key_t key = k_spin_lock(&data->lock); for (i = 0; (i < size) && (ns16550_inbyte(dev_cfg, LSR(dev)) & LSR_RXRDY) != 0; i++) { rx_data[i] = ns16550_inbyte(dev_cfg, RDR(dev)); } k_spin_unlock(&data->lock, key); return i; } /** * @brief Enable TX interrupt in IER * * @param dev UART device struct */ static void uart_ns16550_irq_tx_enable(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); #if defined(CONFIG_UART_INTERRUPT_DRIVEN) && defined(CONFIG_PM) struct uart_ns16550_dev_data *const dev_data = dev->data; if (!dev_data->tx_stream_on) { dev_data->tx_stream_on = true; uint8_t num_cpu_states; const struct pm_state_info *cpu_states; num_cpu_states = pm_state_cpu_get_all(0U, &cpu_states); /* * Power state to be disabled. Some platforms have multiple * states and need to be given a constraint set according to * different states. */ for (uint8_t i = 0U; i < num_cpu_states; i++) { pm_policy_state_lock_get(cpu_states[i].state, PM_ALL_SUBSTATES); } } #endif ns16550_outbyte(dev_cfg, IER(dev), ns16550_inbyte(dev_cfg, IER(dev)) | IER_TBE); k_spin_unlock(&data->lock, key); } /** * @brief Disable TX interrupt in IER * * @param dev UART device struct */ static void uart_ns16550_irq_tx_disable(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); ns16550_outbyte(dev_cfg, IER(dev), ns16550_inbyte(dev_cfg, IER(dev)) & (~IER_TBE)); #if defined(CONFIG_UART_INTERRUPT_DRIVEN) && defined(CONFIG_PM) struct uart_ns16550_dev_data *const dev_data = dev->data; if (dev_data->tx_stream_on) { dev_data->tx_stream_on = false; uint8_t num_cpu_states; const struct pm_state_info *cpu_states; num_cpu_states = pm_state_cpu_get_all(0U, &cpu_states); /* * Power state to be enabled. Some platforms have multiple * states and need to be given a constraint release according * to different states. */ for (uint8_t i = 0U; i < num_cpu_states; i++) { pm_policy_state_lock_put(cpu_states[i].state, PM_ALL_SUBSTATES); } } #endif k_spin_unlock(&data->lock, key); } /** * @brief Check if Tx IRQ has been raised * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_ns16550_irq_tx_ready(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); int ret = ((IIRC(dev) & IIR_ID) == IIR_THRE) ? 1 : 0; k_spin_unlock(&data->lock, key); return ret; } /** * @brief Check if nothing remains to be transmitted * * @param dev UART device struct * * @return 1 if nothing remains to be transmitted, 0 otherwise */ static int uart_ns16550_irq_tx_complete(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); int ret = ((ns16550_inbyte(dev_cfg, LSR(dev)) & (LSR_TEMT | LSR_THRE)) == (LSR_TEMT | LSR_THRE)) ? 1 : 0; k_spin_unlock(&data->lock, key); return ret; } /** * @brief Enable RX interrupt in IER * * @param dev UART device struct */ static void uart_ns16550_irq_rx_enable(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); ns16550_outbyte(dev_cfg, IER(dev), ns16550_inbyte(dev_cfg, IER(dev)) | IER_RXRDY); k_spin_unlock(&data->lock, key); } /** * @brief Disable RX interrupt in IER * * @param dev UART device struct */ static void uart_ns16550_irq_rx_disable(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); ns16550_outbyte(dev_cfg, IER(dev), ns16550_inbyte(dev_cfg, IER(dev)) & (~IER_RXRDY)); k_spin_unlock(&data->lock, key); } /** * @brief Check if Rx IRQ has been raised * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static int uart_ns16550_irq_rx_ready(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); int ret = ((IIRC(dev) & IIR_ID) == IIR_RBRF) ? 1 : 0; k_spin_unlock(&data->lock, key); return ret; } /** * @brief Enable error interrupt in IER * * @param dev UART device struct */ static void uart_ns16550_irq_err_enable(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); ns16550_outbyte(dev_cfg, IER(dev), ns16550_inbyte(dev_cfg, IER(dev)) | IER_LSR); k_spin_unlock(&data->lock, key); } /** * @brief Disable error interrupt in IER * * @param dev UART device struct * * @return 1 if an IRQ is ready, 0 otherwise */ static void uart_ns16550_irq_err_disable(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); ns16550_outbyte(dev_cfg, IER(dev), ns16550_inbyte(dev_cfg, IER(dev)) & (~IER_LSR)); k_spin_unlock(&data->lock, key); } /** * @brief Check if any IRQ is pending * * @param dev UART device struct * * @return 1 if an IRQ is pending, 0 otherwise */ static int uart_ns16550_irq_is_pending(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; k_spinlock_key_t key = k_spin_lock(&data->lock); int ret = (!(IIRC(dev) & IIR_NIP)) ? 1 : 0; k_spin_unlock(&data->lock, key); return ret; } /** * @brief Update cached contents of IIR * * @param dev UART device struct * * @return Always 1 */ static int uart_ns16550_irq_update(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&data->lock); IIRC(dev) = ns16550_inbyte(dev_cfg, IIR(dev)); k_spin_unlock(&data->lock, key); return 1; } /** * @brief Set the callback function pointer for IRQ. * * @param dev UART device struct * @param cb Callback function pointer. */ static void uart_ns16550_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_ns16550_dev_data * const dev_data = dev->data; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); dev_data->cb = cb; dev_data->cb_data = cb_data; k_spin_unlock(&dev_data->lock, key); } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ static void uart_ns16550_isr(const struct device *dev) { struct uart_ns16550_dev_data * const dev_data = dev->data; if (dev_data->cb) { dev_data->cb(dev, dev_data->cb_data); } #if (IS_ENABLED(CONFIG_UART_ASYNC_API)) if (dev_data->async.tx_dma_params.dma_dev != NULL) { const struct uart_ns16550_device_config * const config = dev->config; uint8_t IIR_status = ns16550_inbyte(config, IIR(dev)); #if (IS_ENABLED(CONFIG_UART_NS16550_INTEL_LPSS_DMA)) uint32_t dma_status = ns16550_inword(config, SRC_TRAN(dev)); if (dma_status & BIT(dev_data->async.rx_dma_params.dma_channel)) { async_timer_start(&dev_data->async.rx_dma_params.timeout_work, dev_data->async.rx_dma_params.timeout_us); ns16550_outword(config, CLR_SRC_TRAN(dev), BIT(dev_data->async.rx_dma_params.dma_channel)); return; } dma_intel_lpss_isr(dev_data->async.rx_dma_params.dma_dev); #endif if (IIR_status & IIR_RBRF) { async_timer_start(&dev_data->async.rx_dma_params.timeout_work, dev_data->async.rx_dma_params.timeout_us); return; } } #endif #ifdef CONFIG_UART_NS16550_WA_ISR_REENABLE_INTERRUPT const struct uart_ns16550_device_config * const dev_cfg = dev->config; uint8_t cached_ier = ns16550_inbyte(dev_cfg, IER(dev)); ns16550_outbyte(dev_cfg, IER(dev), 0U); ns16550_outbyte(dev_cfg, IER(dev), cached_ier); #endif } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_NS16550_LINE_CTRL /** * @brief Manipulate line control for UART. * * @param dev UART device struct * @param ctrl The line control to be manipulated * @param val Value to set the line control * * @return 0 if successful, failed otherwise */ static int uart_ns16550_line_ctrl_set(const struct device *dev, uint32_t ctrl, uint32_t val) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config *const dev_cfg = dev->config; uint32_t mdc, chg, pclk = 0U; k_spinlock_key_t key; if (dev_cfg->sys_clk_freq != 0U) { pclk = dev_cfg->sys_clk_freq; } else { if (device_is_ready(dev_cfg->clock_dev)) { clock_control_get_rate(dev_cfg->clock_dev, dev_cfg->clock_subsys, &pclk); } } switch (ctrl) { case UART_LINE_CTRL_BAUD_RATE: set_baud_rate(dev, val, pclk); return 0; case UART_LINE_CTRL_RTS: case UART_LINE_CTRL_DTR: key = k_spin_lock(&data->lock); mdc = ns16550_inbyte(dev_cfg, MDC(dev)); if (ctrl == UART_LINE_CTRL_RTS) { chg = MCR_RTS; } else { chg = MCR_DTR; } if (val) { mdc |= chg; } else { mdc &= ~(chg); } ns16550_outbyte(dev_cfg, MDC(dev), mdc); k_spin_unlock(&data->lock, key); return 0; } return -ENOTSUP; } #endif /* CONFIG_UART_NS16550_LINE_CTRL */ #ifdef CONFIG_UART_NS16550_DRV_CMD /** * @brief Send extra command to driver * * @param dev UART device struct * @param cmd Command to driver * @param p Parameter to the command * * @return 0 if successful, failed otherwise */ static int uart_ns16550_drv_cmd(const struct device *dev, uint32_t cmd, uint32_t p) { #if UART_NS16550_DLF_ENABLED if (cmd == CMD_SET_DLF) { struct uart_ns16550_dev_data * const dev_data = dev->data; const struct uart_ns16550_device_config * const dev_cfg = dev->config; k_spinlock_key_t key = k_spin_lock(&dev_data->lock); dev_data->dlf = p; ns16550_outbyte(dev_cfg, DLF(dev), dev_data->dlf); k_spin_unlock(&dev_data->lock, key); return 0; } #endif return -ENOTSUP; } #endif /* CONFIG_UART_NS16550_DRV_CMD */ #if (IS_ENABLED(CONFIG_UART_ASYNC_API)) static void async_user_callback(const struct device *dev, struct uart_event *evt) { const struct uart_ns16550_dev_data *data = dev->data; if (data->async.user_callback) { data->async.user_callback(dev, evt, data->async.user_data); } } #if UART_NS16550_DMAS_ENABLED static void async_evt_tx_done(struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; struct uart_ns16550_tx_dma_params *tx_params = &data->async.tx_dma_params; (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work); struct uart_event event = { .type = UART_TX_DONE, .data.tx.buf = tx_params->buf, .data.tx.len = tx_params->buf_len }; tx_params->buf = NULL; tx_params->buf_len = 0U; async_user_callback(dev, &event); } #endif static void async_evt_rx_rdy(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; struct uart_ns16550_rx_dma_params *dma_params = &data->async.rx_dma_params; struct uart_event event = { .type = UART_RX_RDY, .data.rx.buf = dma_params->buf, .data.rx.len = dma_params->counter - dma_params->offset, .data.rx.offset = dma_params->offset }; dma_params->offset = dma_params->counter; if (event.data.rx.len > 0) { async_user_callback(dev, &event); } } static void async_evt_rx_buf_release(const struct device *dev) { struct uart_ns16550_dev_data *data = (struct uart_ns16550_dev_data *)dev->data; struct uart_event evt = { .type = UART_RX_BUF_RELEASED, .data.rx_buf.buf = data->async.rx_dma_params.buf }; async_user_callback(dev, &evt); data->async.rx_dma_params.buf = NULL; data->async.rx_dma_params.buf_len = 0U; data->async.rx_dma_params.offset = 0U; data->async.rx_dma_params.counter = 0U; } static void async_evt_rx_buf_request(const struct device *dev) { struct uart_event evt = { .type = UART_RX_BUF_REQUEST }; async_user_callback(dev, &evt); } static void uart_ns16550_async_rx_flush(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; struct uart_ns16550_rx_dma_params *dma_params = &data->async.rx_dma_params; struct dma_status status; dma_get_status(dma_params->dma_dev, dma_params->dma_channel, &status); const int rx_count = dma_params->buf_len - status.pending_length; if (rx_count > dma_params->counter) { dma_params->counter = rx_count; async_evt_rx_rdy(dev); } } static int uart_ns16550_rx_disable(const struct device *dev) { struct uart_ns16550_dev_data *data = (struct uart_ns16550_dev_data *)dev->data; struct uart_ns16550_rx_dma_params *dma_params = &data->async.rx_dma_params; k_spinlock_key_t key = k_spin_lock(&data->lock); int ret = 0; if (!device_is_ready(dma_params->dma_dev)) { ret = -ENODEV; goto out; } (void)k_work_cancel_delayable(&data->async.rx_dma_params.timeout_work); if (dma_params->buf && (dma_params->buf_len > 0)) { uart_ns16550_async_rx_flush(dev); async_evt_rx_buf_release(dev); if (data->async.next_rx_buffer != NULL) { dma_params->buf = data->async.next_rx_buffer; dma_params->buf_len = data->async.next_rx_buffer_len; data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0; async_evt_rx_buf_release(dev); } } ret = dma_stop(dma_params->dma_dev, dma_params->dma_channel); struct uart_event event = { .type = UART_RX_DISABLED }; async_user_callback(dev, &event); out: k_spin_unlock(&data->lock, key); return ret; } static void prepare_rx_dma_block_config(const struct device *dev) { struct uart_ns16550_dev_data *data = (struct uart_ns16550_dev_data *)dev->data; struct uart_ns16550_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; assert(rx_dma_params->buf != NULL); assert(rx_dma_params->buf_len > 0); struct dma_block_config *head_block_config = &rx_dma_params->active_dma_block; head_block_config->dest_address = (uintptr_t)rx_dma_params->buf; head_block_config->source_address = data->phys_addr; head_block_config->block_size = rx_dma_params->buf_len; } #if UART_NS16550_DMAS_ENABLED static void dma_callback(const struct device *dev, void *user_data, uint32_t channel, int status) { struct device *uart_dev = (struct device *)user_data; struct uart_ns16550_dev_data *data = (struct uart_ns16550_dev_data *)uart_dev->data; struct uart_ns16550_rx_dma_params *rx_params = &data->async.rx_dma_params; struct uart_ns16550_tx_dma_params *tx_params = &data->async.tx_dma_params; if (channel == tx_params->dma_channel) { async_evt_tx_done(uart_dev); } else if (channel == rx_params->dma_channel) { rx_params->counter = rx_params->buf_len; async_evt_rx_rdy(uart_dev); async_evt_rx_buf_release(uart_dev); rx_params->buf = data->async.next_rx_buffer; rx_params->buf_len = data->async.next_rx_buffer_len; data->async.next_rx_buffer = NULL; data->async.next_rx_buffer_len = 0U; if (rx_params->buf != NULL && rx_params->buf_len > 0) { dma_reload(dev, rx_params->dma_channel, data->phys_addr, (uintptr_t)rx_params->buf, rx_params->buf_len); dma_start(dev, rx_params->dma_channel); async_evt_rx_buf_request(uart_dev); } else { uart_ns16550_rx_disable(uart_dev); } } } #endif static int uart_ns16550_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct uart_ns16550_dev_data *data = dev->data; data->async.user_callback = callback; data->async.user_data = user_data; return 0; } static int uart_ns16550_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout_us) { struct uart_ns16550_dev_data *data = dev->data; struct uart_ns16550_tx_dma_params *tx_params = &data->async.tx_dma_params; k_spinlock_key_t key = k_spin_lock(&data->lock); int ret = 0; if (!device_is_ready(tx_params->dma_dev)) { ret = -ENODEV; goto out; } tx_params->buf = buf; tx_params->buf_len = len; tx_params->active_dma_block.source_address = (uintptr_t)buf; tx_params->active_dma_block.dest_address = data->phys_addr; tx_params->active_dma_block.block_size = len; tx_params->active_dma_block.next_block = NULL; ret = dma_config(tx_params->dma_dev, tx_params->dma_channel, (struct dma_config *)&tx_params->dma_cfg); if (ret == 0) { ret = dma_start(tx_params->dma_dev, tx_params->dma_channel); if (ret) { ret = -EIO; goto out; } async_timer_start(&data->async.tx_dma_params.timeout_work, timeout_us); } out: k_spin_unlock(&data->lock, key); return ret; } static int uart_ns16550_tx_abort(const struct device *dev) { struct uart_ns16550_dev_data *data = dev->data; struct uart_ns16550_tx_dma_params *tx_params = &data->async.tx_dma_params; struct dma_status status; int ret = 0; size_t bytes_tx; k_spinlock_key_t key = k_spin_lock(&data->lock); if (!device_is_ready(tx_params->dma_dev)) { ret = -ENODEV; goto out; } (void)k_work_cancel_delayable(&data->async.tx_dma_params.timeout_work); ret = dma_stop(tx_params->dma_dev, tx_params->dma_channel); dma_get_status(tx_params->dma_dev, tx_params->dma_channel, &status); bytes_tx = tx_params->buf_len - status.pending_length; if (ret == 0) { struct uart_event tx_aborted_event = { .type = UART_TX_ABORTED, .data.tx.buf = tx_params->buf, .data.tx.len = bytes_tx }; async_user_callback(dev, &tx_aborted_event); } out: k_spin_unlock(&data->lock, key); return ret; } static int uart_ns16550_rx_enable(const struct device *dev, uint8_t *buf, const size_t len, const int32_t timeout_us) { struct uart_ns16550_dev_data *data = dev->data; const struct uart_ns16550_device_config *config = dev->config; struct uart_ns16550_rx_dma_params *rx_dma_params = &data->async.rx_dma_params; int ret = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); if (!device_is_ready(rx_dma_params->dma_dev)) { ret = -ENODEV; goto out; } rx_dma_params->timeout_us = timeout_us; rx_dma_params->buf = buf; rx_dma_params->buf_len = len; #if defined(CONFIG_UART_NS16550_INTEL_LPSS_DMA) ns16550_outword(config, MST(dev), UNMASK_LPSS_INT(rx_dma_params->dma_channel)); #else ns16550_outbyte(config, IER(dev), (ns16550_inbyte(config, IER(dev)) | IER_RXRDY)); ns16550_outbyte(config, FCR(dev), FCR_FIFO); #endif prepare_rx_dma_block_config(dev); dma_config(rx_dma_params->dma_dev, rx_dma_params->dma_channel, (struct dma_config *)&rx_dma_params->dma_cfg); dma_start(rx_dma_params->dma_dev, rx_dma_params->dma_channel); async_evt_rx_buf_request(dev); out: k_spin_unlock(&data->lock, key); return ret; } static int uart_ns16550_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uart_ns16550_dev_data *data = dev->data; assert(data->async.next_rx_buffer == NULL); assert(data->async.next_rx_buffer_len == 0); data->async.next_rx_buffer = buf; data->async.next_rx_buffer_len = len; return 0; } static void uart_ns16550_async_rx_timeout(struct k_work *work) { struct k_work_delayable *work_delay = CONTAINER_OF(work, struct k_work_delayable, work); struct uart_ns16550_rx_dma_params *rx_params = CONTAINER_OF(work_delay, struct uart_ns16550_rx_dma_params, timeout_work); struct uart_ns16550_async_data *async_data = CONTAINER_OF(rx_params, struct uart_ns16550_async_data, rx_dma_params); const struct device *dev = async_data->uart_dev; uart_ns16550_async_rx_flush(dev); } static void uart_ns16550_async_tx_timeout(struct k_work *work) { struct k_work_delayable *work_delay = CONTAINER_OF(work, struct k_work_delayable, work); struct uart_ns16550_tx_dma_params *tx_params = CONTAINER_OF(work_delay, struct uart_ns16550_tx_dma_params, timeout_work); struct uart_ns16550_async_data *async_data = CONTAINER_OF(tx_params, struct uart_ns16550_async_data, tx_dma_params); const struct device *dev = async_data->uart_dev; (void)uart_ns16550_tx_abort(dev); } #endif /* CONFIG_UART_ASYNC_API */ static const struct uart_driver_api uart_ns16550_driver_api = { .poll_in = uart_ns16550_poll_in, .poll_out = uart_ns16550_poll_out, .err_check = uart_ns16550_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_ns16550_configure, .config_get = uart_ns16550_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_ns16550_fifo_fill, .fifo_read = uart_ns16550_fifo_read, .irq_tx_enable = uart_ns16550_irq_tx_enable, .irq_tx_disable = uart_ns16550_irq_tx_disable, .irq_tx_ready = uart_ns16550_irq_tx_ready, .irq_tx_complete = uart_ns16550_irq_tx_complete, .irq_rx_enable = uart_ns16550_irq_rx_enable, .irq_rx_disable = uart_ns16550_irq_rx_disable, .irq_rx_ready = uart_ns16550_irq_rx_ready, .irq_err_enable = uart_ns16550_irq_err_enable, .irq_err_disable = uart_ns16550_irq_err_disable, .irq_is_pending = uart_ns16550_irq_is_pending, .irq_update = uart_ns16550_irq_update, .irq_callback_set = uart_ns16550_irq_callback_set, #endif #ifdef CONFIG_UART_ASYNC_API .callback_set = uart_ns16550_callback_set, .tx = uart_ns16550_tx, .tx_abort = uart_ns16550_tx_abort, .rx_enable = uart_ns16550_rx_enable, .rx_disable = uart_ns16550_rx_disable, .rx_buf_rsp = uart_ns16550_rx_buf_rsp, #endif #ifdef CONFIG_UART_NS16550_LINE_CTRL .line_ctrl_set = uart_ns16550_line_ctrl_set, #endif #ifdef CONFIG_UART_NS16550_DRV_CMD .drv_cmd = uart_ns16550_drv_cmd, #endif }; #define UART_NS16550_IRQ_FLAGS(n) \ COND_CODE_1(DT_INST_IRQ_HAS_CELL(n, sense), \ (DT_INST_IRQ(n, sense)), \ (0)) /* IO-port or MMIO based UART */ #define UART_NS16550_IRQ_CONFIG(n) \ static void uart_ns16550_irq_config_func##n(const struct device *dev) \ { \ ARG_UNUSED(dev); \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ uart_ns16550_isr, DEVICE_DT_INST_GET(n), \ UART_NS16550_IRQ_FLAGS(n)); \ irq_enable(DT_INST_IRQN(n)); \ } /* PCI(e) with auto IRQ detection */ #define UART_NS16550_IRQ_CONFIG_PCIE(n) \ static void uart_ns16550_irq_config_func##n(const struct device *dev) \ { \ BUILD_ASSERT(DT_INST_IRQN(n) == PCIE_IRQ_DETECT, \ "Only runtime IRQ configuration is supported"); \ BUILD_ASSERT(IS_ENABLED(CONFIG_DYNAMIC_INTERRUPTS), \ "NS16550 PCIe requires dynamic interrupts"); \ const struct uart_ns16550_device_config *dev_cfg = dev->config;\ unsigned int irq = pcie_alloc_irq(dev_cfg->pcie->bdf); \ if (irq == PCIE_CONF_INTR_IRQ_NONE) { \ return; \ } \ pcie_connect_dynamic_irq(dev_cfg->pcie->bdf, irq, \ DT_INST_IRQ(n, priority), \ (void (*)(const void *))uart_ns16550_isr, \ DEVICE_DT_INST_GET(n), \ UART_NS16550_IRQ_FLAGS(n)); \ pcie_irq_enable(dev_cfg->pcie->bdf, irq); \ } #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define DEV_CONFIG_IRQ_FUNC_INIT(n) \ .irq_config_func = uart_ns16550_irq_config_func##n, #define UART_NS16550_IRQ_FUNC_DECLARE(n) \ static void uart_ns16550_irq_config_func##n(const struct device *dev); #define UART_NS16550_IRQ_FUNC_DEFINE(n) \ UART_NS16550_IRQ_CONFIG(n) #define DEV_CONFIG_PCIE_IRQ_FUNC_INIT(n) \ .irq_config_func = uart_ns16550_irq_config_func##n, #define UART_NS16550_PCIE_IRQ_FUNC_DECLARE(n) \ static void uart_ns16550_irq_config_func##n(const struct device *dev); #define UART_NS16550_PCIE_IRQ_FUNC_DEFINE(n) \ UART_NS16550_IRQ_CONFIG_PCIE(n) #else /* !CONFIG_UART_INTERRUPT_DRIVEN */ #define DEV_CONFIG_IRQ_FUNC_INIT(n) #define UART_NS16550_IRQ_FUNC_DECLARE(n) #define UART_NS16550_IRQ_FUNC_DEFINE(n) #define DEV_CONFIG_PCIE_IRQ_FUNC_INIT(n) #define UART_NS16550_PCIE_IRQ_FUNC_DECLARE(n) #define UART_NS16550_PCIE_IRQ_FUNC_DEFINE(n) #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API #define DMA_PARAMS(n) \ .async.tx_dma_params = { \ .dma_dev = \ DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, tx)), \ .dma_channel = \ DT_INST_DMAS_CELL_BY_NAME(n, tx, channel), \ .dma_cfg = { \ .source_burst_length = 1, \ .dest_burst_length = 1, \ .source_data_size = 1, \ .dest_data_size = 1, \ .complete_callback_en = 0, \ .error_callback_dis = 1, \ .block_count = 1, \ .channel_direction = MEMORY_TO_PERIPHERAL, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, tx, channel), \ .dma_callback = dma_callback, \ .user_data = (void *)DEVICE_DT_INST_GET(n) \ }, \ }, \ .async.rx_dma_params = { \ .dma_dev = \ DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(n, rx)), \ .dma_channel = \ DT_INST_DMAS_CELL_BY_NAME(n, rx, channel), \ .dma_cfg = { \ .source_burst_length = 1, \ .dest_burst_length = 1, \ .source_data_size = 1, \ .dest_data_size = 1, \ .complete_callback_en = 0, \ .error_callback_dis = 1, \ .block_count = 1, \ .channel_direction = PERIPHERAL_TO_MEMORY, \ .dma_slot = DT_INST_DMAS_CELL_BY_NAME(n, rx, channel), \ .dma_callback = dma_callback, \ .user_data = (void *)DEVICE_DT_INST_GET(n) \ }, \ }, \ COND_CODE_0(DT_INST_ON_BUS(n, pcie), \ (.phys_addr = DT_INST_REG_ADDR(n),), ()) #define DMA_PARAMS_NULL(n) \ .async.tx_dma_params = { \ .dma_dev = NULL \ }, \ .async.rx_dma_params = { \ .dma_dev = NULL \ }, \ #define DEV_DATA_ASYNC(n) \ COND_CODE_0(DT_INST_PROP(n, io_mapped), \ (COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \ (DMA_PARAMS(n)), (DMA_PARAMS_NULL(n)))), \ (DMA_PARAMS_NULL(n))) #else #define DEV_DATA_ASYNC(n) #endif /* CONFIG_UART_ASYNC_API */ #define UART_NS16550_COMMON_DEV_CFG_INITIALIZER(n) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(n, clock_frequency), ( \ .sys_clk_freq = DT_INST_PROP(n, clock_frequency), \ .clock_dev = NULL, \ .clock_subsys = NULL, \ ), ( \ .sys_clk_freq = 0, \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t) DT_INST_PHA(\ 0, clocks, clkid), \ ) \ ) \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, pcp), \ (.pcp = DT_INST_PROP_OR(n, pcp, 0),)) \ .reg_interval = (1 << DT_INST_PROP(n, reg_shift)), \ IF_ENABLED(CONFIG_PINCTRL, \ (.pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),)) \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, resets), \ (.reset_spec = RESET_DT_SPEC_INST_GET(n),)) #define UART_NS16550_COMMON_DEV_DATA_INITIALIZER(n) \ .uart_config.baudrate = DT_INST_PROP_OR(n, current_speed, 0), \ .uart_config.parity = UART_CFG_PARITY_NONE, \ .uart_config.stop_bits = UART_CFG_STOP_BITS_1, \ .uart_config.data_bits = UART_CFG_DATA_BITS_8, \ .uart_config.flow_ctrl = \ COND_CODE_1(DT_INST_PROP_OR(n, hw_flow_control, 0), \ (UART_CFG_FLOW_CTRL_RTS_CTS), \ (UART_CFG_FLOW_CTRL_NONE)), \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, dlf), \ (.dlf = DT_INST_PROP_OR(n, dlf, 0),)) \ DEV_DATA_ASYNC(n) \ #define UART_NS16550_DEVICE_IO_MMIO_INIT(n) \ UART_NS16550_IRQ_FUNC_DECLARE(n); \ IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(n))); \ static const struct uart_ns16550_device_config uart_ns16550_dev_cfg_##n = { \ COND_CODE_1(DT_INST_PROP_OR(n, io_mapped, 0), \ (.port = DT_INST_REG_ADDR(n),), \ (DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)),)) \ IF_ENABLED(DT_INST_PROP_OR(n, io_mapped, 0), \ (.io_map = true,)) \ UART_NS16550_COMMON_DEV_CFG_INITIALIZER(n) \ DEV_CONFIG_IRQ_FUNC_INIT(n) \ }; \ static struct uart_ns16550_dev_data uart_ns16550_dev_data_##n = { \ UART_NS16550_COMMON_DEV_DATA_INITIALIZER(n) \ }; \ DEVICE_DT_INST_DEFINE(n, uart_ns16550_init, NULL, \ &uart_ns16550_dev_data_##n, &uart_ns16550_dev_cfg_##n, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_ns16550_driver_api); \ UART_NS16550_IRQ_FUNC_DEFINE(n) #define UART_NS16550_DEVICE_PCIE_INIT(n) \ UART_NS16550_PCIE_IRQ_FUNC_DECLARE(n); \ DEVICE_PCIE_INST_DECLARE(n); \ IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(n))); \ static const struct uart_ns16550_device_config uart_ns16550_dev_cfg_##n = { \ UART_NS16550_COMMON_DEV_CFG_INITIALIZER(n) \ DEV_CONFIG_PCIE_IRQ_FUNC_INIT(n) \ DEVICE_PCIE_INST_INIT(n, pcie) \ }; \ static struct uart_ns16550_dev_data uart_ns16550_dev_data_##n = { \ UART_NS16550_COMMON_DEV_DATA_INITIALIZER(n) \ }; \ DEVICE_DT_INST_DEFINE(n, uart_ns16550_init, NULL, \ &uart_ns16550_dev_data_##n, &uart_ns16550_dev_cfg_##n, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_ns16550_driver_api); \ UART_NS16550_PCIE_IRQ_FUNC_DEFINE(n) #define UART_NS16550_DEVICE_INIT(n) \ COND_CODE_1(DT_INST_ON_BUS(n, pcie), \ (UART_NS16550_DEVICE_PCIE_INIT(n)), \ (UART_NS16550_DEVICE_IO_MMIO_INIT(n))) DT_INST_FOREACH_STATUS_OKAY(UART_NS16550_DEVICE_INIT) ```
/content/code_sandbox/drivers/serial/uart_ns16550.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
16,549
```c /* * */ #define DT_DRV_COMPAT nxp_s32_linflexd #include <soc.h> #include <zephyr/irq.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> #include <Linflexd_Uart_Ip.h> #include <Linflexd_Uart_Ip_Irq.h> #include "uart_nxp_s32_linflexd.h" static int uart_nxp_s32_err_check(const struct device *dev) { const struct uart_nxp_s32_config *config = dev->config; Linflexd_Uart_Ip_StatusType status; int err = 0; status = Linflexd_Uart_Ip_GetReceiveStatus(config->instance, NULL); if (status == LINFLEXD_UART_IP_STATUS_RX_OVERRUN) { err |= UART_ERROR_OVERRUN; } if (status == LINFLEXD_UART_IP_STATUS_PARITY_ERROR) { err |= UART_ERROR_PARITY; } if (status == LINFLEXD_UART_IP_STATUS_FRAMING_ERROR) { err |= UART_ERROR_FRAMING; } return err; } static void uart_nxp_s32_poll_out(const struct device *dev, unsigned char c) { const struct uart_nxp_s32_config *config = dev->config; uint32_t linflexd_ier; uint8_t key; key = irq_lock(); /* Save enabled Linflexd's interrupts */ linflexd_ier = sys_read32(POINTER_TO_UINT(&config->base->LINIER)); Linflexd_Uart_Ip_SyncSend(config->instance, &c, 1, CONFIG_UART_NXP_S32_POLL_OUT_TIMEOUT); /* Restore Linflexd's interrupts */ sys_write32(linflexd_ier, POINTER_TO_UINT(&config->base->LINIER)); irq_unlock(key); } static int uart_nxp_s32_poll_in(const struct device *dev, unsigned char *c) { const struct uart_nxp_s32_config *config = dev->config; Linflexd_Uart_Ip_StatusType status; uint32_t linflexd_ier; int ret; status = LINFLEXD_UART_IP_STATUS_SUCCESS; /* Save enabled Linflexd's interrupts */ linflexd_ier = sys_read32(POINTER_TO_UINT(&config->base->LINIER)); /* Retrieves data with poll method */ status = Linflexd_Uart_Ip_SyncReceive(config->instance, c, 1, CONFIG_UART_NXP_S32_POLL_IN_TIMEOUT); /* Restore Linflexd's interrupts */ sys_write32(linflexd_ier, POINTER_TO_UINT(&config->base->LINIER)); if (status == LINFLEXD_UART_IP_STATUS_SUCCESS) { ret = 0; } else if (status == LINFLEXD_UART_IP_STATUS_TIMEOUT) { ret = -1; } else { ret = -EBUSY; } return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_nxp_s32_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_nxp_s32_config *config = dev->config; struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); if (int_data->tx_fifo_busy) { return 0; } int_data->tx_fifo_busy = true; Linflexd_Uart_Ip_AsyncSend(config->instance, tx_data, 1); return 1; } static int uart_nxp_s32_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_nxp_s32_config *config = dev->config; struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); if (int_data->rx_fifo_busy) { return 0; } *rx_data = int_data->rx_fifo_data; int_data->rx_fifo_busy = true; Linflexd_Uart_Ip_SetRxBuffer(config->instance, &(int_data->rx_fifo_data), 1); return 1; } static void uart_nxp_s32_irq_tx_enable(const struct device *dev) { struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); uint8_t key; int_data->irq_tx_enable = true; key = irq_lock(); /* Callback is called in order to transmit the data */ if (!int_data->tx_fifo_busy) { if (data->callback) { data->callback(dev, data->cb_data); } } irq_unlock(key); } static void uart_nxp_s32_irq_tx_disable(const struct device *dev) { const struct uart_nxp_s32_config *config = dev->config; struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); int_data->irq_tx_enable = false; int_data->tx_fifo_busy = false; Linflexd_Uart_Ip_AbortSendingData(config->instance); } static int uart_nxp_s32_irq_tx_ready(const struct device *dev) { struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); return !int_data->tx_fifo_busy && int_data->irq_tx_enable; } static void uart_nxp_s32_irq_rx_enable(const struct device *dev) { const struct uart_nxp_s32_config *config = dev->config; struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); int_data->irq_rx_enable = true; Linflexd_Uart_Ip_AsyncReceive(config->instance, &(int_data->rx_fifo_data), 1); } static void uart_nxp_s32_irq_rx_disable(const struct device *dev) { const struct uart_nxp_s32_config *config = dev->config; struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); int_data->irq_rx_enable = false; int_data->rx_fifo_busy = false; Linflexd_Uart_Ip_AbortReceivingData(config->instance); } static int uart_nxp_s32_irq_rx_ready(const struct device *dev) { struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); return !int_data->rx_fifo_busy && int_data->irq_rx_enable; } static void uart_nxp_s32_irq_err_enable(const struct device *dev) { const struct uart_nxp_s32_config *config = dev->config; uint32_t linflexd_ier; linflexd_ier = sys_read32(POINTER_TO_UINT(&config->base->LINIER)); /* Enable frame error interrupt and buffer overrun error interrupt */ linflexd_ier |= (LINFLEXD_LINIER_FEIE_MASK | LINFLEXD_LINIER_BOIE_MASK); sys_write32(linflexd_ier, POINTER_TO_UINT(&config->base->LINIER)); } static void uart_nxp_s32_irq_err_disable(const struct device *dev) { const struct uart_nxp_s32_config *config = dev->config; uint32_t linflexd_ier; linflexd_ier = sys_read32(POINTER_TO_UINT(&config->base->LINIER)); /* Disable frame error interrupt and buffer overrun error interrupt */ linflexd_ier &= ~(LINFLEXD_LINIER_FEIE_MASK | LINFLEXD_LINIER_BOIE_MASK); sys_write32(linflexd_ier, POINTER_TO_UINT(&config->base->LINIER)); } static int uart_nxp_s32_irq_is_pending(const struct device *dev) { return (uart_nxp_s32_irq_tx_ready(dev)) || (uart_nxp_s32_irq_rx_ready(dev)); } static int uart_nxp_s32_irq_update(const struct device *dev) { return 1; } static void uart_nxp_s32_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_nxp_s32_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } void uart_nxp_s32_isr(const struct device *dev) { const struct uart_nxp_s32_config *config = dev->config; Linflexd_Uart_Ip_IRQHandler(config->instance); } static void uart_nxp_s32_event_handler(const uint8 instance, Linflexd_Uart_Ip_EventType event, const void *user_data) { const struct device *dev = (const struct device *)user_data; const struct uart_nxp_s32_config *config = dev->config; struct uart_nxp_s32_data *data = dev->data; struct uart_nxp_s32_int *int_data = &(data->int_data); Linflexd_Uart_Ip_StatusType status; if (event == LINFLEXD_UART_IP_EVENT_END_TRANSFER) { /* * Check the previous UART transmit has finished * because Rx may also trigger this event */ status = Linflexd_Uart_Ip_GetTransmitStatus(config->instance, NULL); if (status != LINFLEXD_UART_IP_STATUS_BUSY) { int_data->tx_fifo_busy = false; if (data->callback) { data->callback(dev, data->cb_data); } } } else if (event == LINFLEXD_UART_IP_EVENT_RX_FULL) { int_data->rx_fifo_busy = false; if (data->callback) { data->callback(dev, data->cb_data); } } else if (event == LINFLEXD_UART_IP_EVENT_ERROR) { if (data->callback) { data->callback(dev, data->cb_data); } } else { /* Other events are not used */ } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int uart_nxp_s32_init(const struct device *dev) { const struct uart_nxp_s32_config *config = dev->config; int err; err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } Linflexd_Uart_Ip_Init(config->instance, &config->hw_cfg); return 0; } static const struct uart_driver_api uart_nxp_s32_driver_api = { .poll_in = uart_nxp_s32_poll_in, .poll_out = uart_nxp_s32_poll_out, .err_check = uart_nxp_s32_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_nxp_s32_fifo_fill, .fifo_read = uart_nxp_s32_fifo_read, .irq_tx_enable = uart_nxp_s32_irq_tx_enable, .irq_tx_disable = uart_nxp_s32_irq_tx_disable, .irq_tx_ready = uart_nxp_s32_irq_tx_ready, .irq_rx_enable = uart_nxp_s32_irq_rx_enable, .irq_rx_disable = uart_nxp_s32_irq_rx_disable, .irq_rx_ready = uart_nxp_s32_irq_rx_ready, .irq_err_enable = uart_nxp_s32_irq_err_enable, .irq_err_disable = uart_nxp_s32_irq_err_disable, .irq_is_pending = uart_nxp_s32_irq_is_pending, .irq_update = uart_nxp_s32_irq_update, .irq_callback_set = uart_nxp_s32_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define UART_NXP_S32_HW_INSTANCE_CHECK(i, n) \ ((DT_INST_REG_ADDR(n) == IP_LINFLEX_##i##_BASE) ? i : 0) #define UART_NXP_S32_HW_INSTANCE(n) \ LISTIFY(__DEBRACKET LINFLEXD_INSTANCE_COUNT, UART_NXP_S32_HW_INSTANCE_CHECK, (|), n) #define UART_NXP_S32_INTERRUPT_DEFINE(n) \ do { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ uart_nxp_s32_isr, DEVICE_DT_INST_GET(n), \ DT_INST_IRQ(n, flags)); \ irq_enable(DT_INST_IRQN(n)); \ } while (0) #define UART_NXP_S32_HW_CONFIG(n) \ { \ .BaudRate = 115200, \ .BaudRateMantissa = 26U, \ .BaudRateDivisor = 16U, \ .BaudRateFractionalDivisor = 1U, \ .ParityCheck = false, \ .ParityType = LINFLEXD_UART_IP_PARITY_EVEN, \ .StopBitsCount = LINFLEXD_UART_IP_ONE_STOP_BIT, \ .WordLength = LINFLEXD_UART_IP_8_BITS, \ .TransferType = LINFLEXD_UART_IP_USING_INTERRUPTS, \ .StateStruct = &Linflexd_Uart_Ip_apStateStructure[n], \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, ( \ .Callback = uart_nxp_s32_event_handler, \ .CallbackParam = (void *)DEVICE_DT_INST_GET(n), \ )) \ } #define UART_NXP_S32_INIT_DEVICE(n) \ PINCTRL_DT_INST_DEFINE(n); \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ (static struct uart_nxp_s32_data uart_nxp_s32_data_##n;)) \ static const struct uart_nxp_s32_config uart_nxp_s32_config_##n = { \ .instance = UART_NXP_S32_HW_INSTANCE(n), \ .base = (LINFLEXD_Type *)DT_INST_REG_ADDR(n), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .hw_cfg = UART_NXP_S32_HW_CONFIG(n), \ }; \ static int uart_nxp_s32_init_##n(const struct device *dev) \ { \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ (UART_NXP_S32_INTERRUPT_DEFINE(n);)) \ \ return uart_nxp_s32_init(dev); \ } \ DEVICE_DT_INST_DEFINE(n, \ uart_nxp_s32_init_##n, \ NULL, \ COND_CODE_1(CONFIG_UART_INTERRUPT_DRIVEN, \ (&uart_nxp_s32_data_##n), (NULL)), \ &uart_nxp_s32_config_##n, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_nxp_s32_driver_api); DT_INST_FOREACH_STATUS_OKAY(UART_NXP_S32_INIT_DEVICE) ```
/content/code_sandbox/drivers/serial/uart_nxp_s32_linflexd.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,327
```unknown # MCUXpresso SDK LPSCI config UART_MCUX_LPSCI bool "MCUX LPSCI driver" default y depends on DT_HAS_NXP_KINETIS_LPSCI_ENABLED depends on CLOCK_CONTROL select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help Enable the MCUX LPSCI driver. ```
/content/code_sandbox/drivers/serial/Kconfig.mcux_lpsci
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
68
```unknown # Xilinx UART configuration config UART_XLNX_PS bool "Xilinx Zynq 7000/ZynqMP serial driver" default y depends on DT_HAS_XLNX_XUARTPS_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the UART driver for Xilinx MPSoC platforms. config UART_XLNX_UARTLITE bool "Xilinx UART Lite" default y depends on DT_HAS_XLNX_XPS_UARTLITE_1_00_A_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the UART driver for Xilinx UART Lite IP. ```
/content/code_sandbox/drivers/serial/Kconfig.xlnx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
139
```c /* * */ #define DT_DRV_COMPAT microchip_coreuart #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/uart.h> /* UART REGISTERS DEFINITIONS */ /* TX register */ #define TXDATA_REG_OFFSET 0x0 #define TXDATA_OFFSET 0x0 #define TXDATA_MASK 0xFF #define TXDATA_SHIFT 0 /* RX register */ #define RXDATA_REG_OFFSET 0x4 #define RXDATA_OFFSET 0x4 #define RXDATA_MASK 0xFF #define RXDATA_SHIFT 0 /* Control1 register */ #define CTRL1_REG_OFFSET 0x8 /* Baud value lower 8 bits */ #define CTRL1_BAUDVALUE_OFFSET 0x8 #define CTRL1_BAUDVALUE_MASK 0xFF #define CTRL1_BAUDVALUE_SHIFT 0 /* Control2 register */ #define CTRL2_REG_OFFSET 0xC /* Bit length */ #define CTRL2_BIT_LENGTH_OFFSET 0xC #define CTRL2_BIT_LENGTH_MASK 0x01 #define CTRL2_BIT_LENGTH_SHIFT 0 /* Parity enable */ #define CTRL2_PARITY_EN_OFFSET 0xC #define CTRL2_PARITY_EN_MASK 0x02 #define CTRL2_PARITY_EN_SHIFT 1 /* Odd/even parity configuration */ #define CTRL2_ODD_EVEN_OFFSET 0xC #define CTRL2_ODD_EVEN_MASK 0x04 #define CTRL2_ODD_EVEN_SHIFT 2 /* Baud value higher 5 bits */ #define CTRL2_BAUDVALUE_OFFSET 0xC #define CTRL2_BAUDVALUE_MASK 0xF8 #define CTRL2_BAUDVALUE_SHIFT 3 /* Status register */ #define StatusReg_REG_OFFSET 0x10 #define STATUS_REG_OFFSET 0x10 /* TX ready */ #define STATUS_TXRDY_OFFSET 0x10 #define STATUS_TXRDY_MASK 0x01 #define STATUS_TXRDY_SHIFT 0 /* Receive full - raised even when 1 char arrived */ #define STATUS_RXFULL_OFFSET 0x10 #define STATUS_RXFULL_MASK 0x02 #define STATUS_RXFULL_SHIFT 1 /* Parity error */ #define STATUS_PARITYERR_OFFSET 0x10 #define STATUS_PARITYERR_MASK 0x04 #define STATUS_PARITYERR_SHIFT 2 /* Overflow */ #define STATUS_OVERFLOW_OFFSET 0x10 #define STATUS_OVERFLOW_MASK 0x08 #define STATUS_OVERFLOW_SHIFT 3 /* Frame error */ #define STATUS_FRAMERR_OFFSET 0x10 #define STATUS_FRAMERR_MASK 0x10 #define STATUS_FRAMERR_SHIFT 4 /* Data bits length defines */ #define DATA_7_BITS 0x00 #define DATA_8_BITS 0x01 /* Parity defines */ #define NO_PARITY 0x00 #define EVEN_PARITY 0x02 #define ODD_PARITY 0x06 /* Error Status definitions */ #define UART_PARITY_ERROR 0x01 #define UART_OVERFLOW_ERROR 0x02 #define UART_FRAMING_ERROR 0x04 #define BAUDVALUE_LSB ((uint16_t)(0x00FF)) #define BAUDVALUE_MSB ((uint16_t)(0xFF00)) #define BAUDVALUE_SHIFT ((uint8_t)(5)) #define MIV_UART_0_LINECFG 0x1 #ifdef CONFIG_UART_INTERRUPT_DRIVEN static struct k_thread rx_thread; static K_KERNEL_STACK_DEFINE(rx_stack, 512); #endif struct uart_miv_regs_t { uint8_t tx; uint8_t reserved0[3]; uint8_t rx; uint8_t reserved1[3]; uint8_t ctrlreg1; uint8_t reserved2[3]; uint8_t ctrlreg2; uint8_t reserved3[3]; uint8_t status; }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN typedef void (*irq_cfg_func_t)(const struct device *dev); #endif struct uart_miv_device_config { uint32_t uart_addr; uint32_t sys_clk_freq; uint32_t line_config; uint32_t baud_rate; #ifdef CONFIG_UART_INTERRUPT_DRIVEN irq_cfg_func_t cfg_func; #endif }; struct uart_miv_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN const struct device *dev; uart_irq_callback_user_data_t callback; void *cb_data; #endif }; #define DEV_UART(dev) \ ((struct uart_miv_regs_t *) \ ((const struct uart_miv_device_config * const)(dev)->config)->uart_addr) static void uart_miv_poll_out(const struct device *dev, unsigned char c) { volatile struct uart_miv_regs_t *uart = DEV_UART(dev); while (!(uart->status & STATUS_TXRDY_MASK)) { } uart->tx = c; } static int uart_miv_poll_in(const struct device *dev, unsigned char *c) { volatile struct uart_miv_regs_t *uart = DEV_UART(dev); if (uart->status & STATUS_RXFULL_MASK) { *c = (unsigned char)(uart->rx & RXDATA_MASK); return 0; } return -1; } static int uart_miv_err_check(const struct device *dev) { volatile struct uart_miv_regs_t *uart = DEV_UART(dev); uint32_t flags = uart->status; int err = 0; if (flags & STATUS_PARITYERR_MASK) { err |= UART_PARITY_ERROR; } if (flags & STATUS_OVERFLOW_MASK) { err |= UART_OVERFLOW_ERROR; } if (flags & STATUS_FRAMERR_MASK) { err |= UART_FRAMING_ERROR; } return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_miv_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { volatile struct uart_miv_regs_t *uart = DEV_UART(dev); int i; for (i = 0; i < size && (uart->status & STATUS_TXRDY_MASK); i++) { uart->tx = tx_data[i]; } return i; } static int uart_miv_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { volatile struct uart_miv_regs_t *uart = DEV_UART(dev); int i; for (i = 0; i < size; i++) { if (uart->status & STATUS_RXFULL_MASK) { rx_data[i] = (unsigned char)(uart->rx & RXDATA_MASK); } else { break; } } return i; } static void uart_miv_irq_tx_enable(const struct device *dev) { ARG_UNUSED(dev); } static void uart_miv_irq_tx_disable(const struct device *dev) { ARG_UNUSED(dev); } static int uart_miv_irq_tx_ready(const struct device *dev) { volatile struct uart_miv_regs_t *uart = DEV_UART(dev); return !(uart->status & STATUS_TXRDY_MASK); } static int uart_miv_irq_tx_complete(const struct device *dev) { ARG_UNUSED(dev); return 1; } static void uart_miv_irq_rx_enable(const struct device *dev) { ARG_UNUSED(dev); } static void uart_miv_irq_rx_disable(const struct device *dev) { ARG_UNUSED(dev); } static int uart_miv_irq_rx_ready(const struct device *dev) { volatile struct uart_miv_regs_t *uart = DEV_UART(dev); return !!(uart->status & STATUS_RXFULL_MASK); } static void uart_miv_irq_err_enable(const struct device *dev) { ARG_UNUSED(dev); } static void uart_miv_irq_err_disable(const struct device *dev) { ARG_UNUSED(dev); } static int uart_miv_irq_is_pending(const struct device *dev) { volatile struct uart_miv_regs_t *uart = DEV_UART(dev); return !!(uart->status & STATUS_RXFULL_MASK); } static int uart_miv_irq_update(const struct device *dev) { return 1; } static void uart_miv_irq_handler(const struct device *dev) { struct uart_miv_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } /* * This thread is a workaround for IRQs that are not connected in Mi-V. * Since we cannot rely on IRQs, the rx_thread is working instead and * polling for data. The thread calls the registered callback when data * arrives. */ void uart_miv_rx_thread(void *arg1, void *arg2, void *arg3) { struct uart_miv_data *data = (struct uart_miv_data *)arg1; const struct device *dev = data->dev; volatile struct uart_miv_regs_t *uart = DEV_UART(dev); const struct uart_miv_device_config *const cfg = dev->config; /* Make it go to sleep for a period no longer than * time to receive next character. */ uint32_t delay = 1000000 / cfg->baud_rate; ARG_UNUSED(arg2); ARG_UNUSED(arg3); while (1) { if (uart->status & STATUS_RXFULL_MASK) { uart_miv_irq_handler(dev); } k_sleep(K_USEC(delay)); } } static void uart_miv_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_miv_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int uart_miv_init(const struct device *dev) { const struct uart_miv_device_config *const cfg = dev->config; volatile struct uart_miv_regs_t *uart = DEV_UART(dev); /* Calculate divider value to set baudrate */ uint16_t baud_value = (cfg->sys_clk_freq / (cfg->baud_rate * 16U)) - 1; /* Set baud rate */ uart->ctrlreg1 = (uint8_t)(baud_value & BAUDVALUE_LSB); uart->ctrlreg2 = (uint8_t)(cfg->line_config) | (uint8_t)((baud_value & BAUDVALUE_MSB) >> BAUDVALUE_SHIFT); #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* Setup thread polling for data */ cfg->cfg_func(dev); #endif return 0; } static const struct uart_driver_api uart_miv_driver_api = { .poll_in = uart_miv_poll_in, .poll_out = uart_miv_poll_out, .err_check = uart_miv_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_miv_fifo_fill, .fifo_read = uart_miv_fifo_read, .irq_tx_enable = uart_miv_irq_tx_enable, .irq_tx_disable = uart_miv_irq_tx_disable, .irq_tx_ready = uart_miv_irq_tx_ready, .irq_tx_complete = uart_miv_irq_tx_complete, .irq_rx_enable = uart_miv_irq_rx_enable, .irq_rx_disable = uart_miv_irq_rx_disable, .irq_rx_ready = uart_miv_irq_rx_ready, .irq_err_enable = uart_miv_irq_err_enable, .irq_err_disable = uart_miv_irq_err_disable, .irq_is_pending = uart_miv_irq_is_pending, .irq_update = uart_miv_irq_update, .irq_callback_set = uart_miv_irq_callback_set, #endif }; /* This driver is single-instance. */ BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) <= 1, "unsupported uart_miv instance"); #if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) static struct uart_miv_data uart_miv_data_0; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_miv_irq_cfg_func_0(const struct device *dev); #endif static const struct uart_miv_device_config uart_miv_dev_cfg_0 = { .uart_addr = DT_INST_REG_ADDR(0), .sys_clk_freq = DT_INST_PROP(0, clock_frequency), .line_config = MIV_UART_0_LINECFG, .baud_rate = DT_INST_PROP(0, current_speed), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .cfg_func = uart_miv_irq_cfg_func_0, #endif }; DEVICE_DT_INST_DEFINE(0, uart_miv_init, NULL, &uart_miv_data_0, &uart_miv_dev_cfg_0, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, (void *)&uart_miv_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_miv_irq_cfg_func_0(const struct device *dev) { struct uart_miv_data *data = dev->data; data->dev = dev; /* Create a thread which will poll for data - replacement for IRQ */ k_thread_create(&rx_thread, rx_stack, 500, uart_miv_rx_thread, data, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); } #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */ ```
/content/code_sandbox/drivers/serial/uart_miv.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,837
```c /* * */ #define DT_DRV_COMPAT zephyr_native_posix_uart #include <stdbool.h> #include <zephyr/drivers/uart.h> #include <zephyr/kernel.h> #include "cmdline.h" /* native_posix command line options header */ #include "posix_native_task.h" #include "uart_native_ptty_bottom.h" #include "nsi_host_trampolines.h" /* * UART driver for POSIX ARCH based boards. * It can support up to two UARTs. * * For the first UART: * * It can either be connected to the process STDIN+STDOUT * OR * to a dedicated pseudo terminal * * The 2nd option is the recommended one for interactive use, as the pseudo * terminal driver will be configured in "raw" mode, and will therefore behave * more like a real UART. * * When connected to its own pseudo terminal, it may also auto attach a terminal * emulator to it, if set so from command line. */ static int np_uart_stdin_poll_in(const struct device *dev, unsigned char *p_char); static int np_uart_tty_poll_in(const struct device *dev, unsigned char *p_char); static void np_uart_poll_out(const struct device *dev, unsigned char out_char); static bool auto_attach; static bool wait_pts; static char *auto_attach_cmd = CONFIG_NATIVE_UART_AUTOATTACH_DEFAULT_CMD; struct native_uart_status { int out_fd; /* File descriptor used for output */ int in_fd; /* File descriptor used for input */ }; static struct native_uart_status native_uart_status_0; static struct uart_driver_api np_uart_driver_api_0 = { .poll_out = np_uart_poll_out, .poll_in = np_uart_tty_poll_in, }; #if defined(CONFIG_UART_NATIVE_POSIX_PORT_1_ENABLE) static struct native_uart_status native_uart_status_1; static struct uart_driver_api np_uart_driver_api_1 = { .poll_out = np_uart_poll_out, .poll_in = np_uart_tty_poll_in, }; #endif /* CONFIG_UART_NATIVE_POSIX_PORT_1_ENABLE */ #define ERROR posix_print_error_and_exit #define WARN posix_print_warning /** * @brief Initialize the first native_posix serial port * * @param dev UART_0 device struct * * @return 0 (if it fails catastrophically, the execution is terminated) */ static int np_uart_0_init(const struct device *dev) { struct native_uart_status *d; d = (struct native_uart_status *)dev->data; if (IS_ENABLED(CONFIG_NATIVE_UART_0_ON_OWN_PTY)) { int tty_fn = np_uart_open_ptty(dev->name, auto_attach_cmd, auto_attach, wait_pts); d->in_fd = tty_fn; d->out_fd = tty_fn; np_uart_driver_api_0.poll_in = np_uart_tty_poll_in; } else { /* NATIVE_UART_0_ON_STDINOUT */ d->in_fd = np_uart_ptty_get_stdin_fileno(); d->out_fd = np_uart_ptty_get_stdout_fileno(); np_uart_driver_api_0.poll_in = np_uart_stdin_poll_in; } return 0; } #if defined(CONFIG_UART_NATIVE_POSIX_PORT_1_ENABLE) /* * Initialize the 2nd UART port. * This port will be always attached to its own new pseudoterminal. */ static int np_uart_1_init(const struct device *dev) { struct native_uart_status *d; int tty_fn; d = (struct native_uart_status *)dev->data; tty_fn = np_uart_open_ptty(dev->name, NULL, false, wait_pts); d->in_fd = tty_fn; d->out_fd = tty_fn; return 0; } #endif /* * @brief Output a character towards the serial port * * @param dev UART device struct * @param out_char Character to send. */ static void np_uart_poll_out(const struct device *dev, unsigned char out_char) { int ret; struct native_uart_status *d = (struct native_uart_status *)dev->data; if (wait_pts) { while (1) { int rc = np_uart_slave_connected(d->out_fd); if (rc == 1) { break; } k_sleep(K_MSEC(100)); } } /* The return value of write() cannot be ignored (there is a warning) * but we do not need the return value for anything. */ ret = nsi_host_write(d->out_fd, &out_char, 1); (void) ret; } /** * @brief Poll the device for input. * * @param dev UART device structure. * @param p_char Pointer to character. * * @retval 0 If a character arrived and was stored in p_char * @retval -1 If no character was available to read */ static int np_uart_stdin_poll_in(const struct device *dev, unsigned char *p_char) { int in_f = ((struct native_uart_status *)dev->data)->in_fd; static bool disconnected; int rc; if (disconnected == true) { return -1; } rc = np_uart_stdin_poll_in_bottom(in_f, p_char); if (rc == -2) { disconnected = true; return -1; } return rc; } /** * @brief Poll the device for input. * * @param dev UART device structure. * @param p_char Pointer to character. * * @retval 0 If a character arrived and was stored in p_char * @retval -1 If no character was available to read */ static int np_uart_tty_poll_in(const struct device *dev, unsigned char *p_char) { int n = -1; int in_f = ((struct native_uart_status *)dev->data)->in_fd; n = nsi_host_read(in_f, p_char, 1); if (n == -1) { return -1; } return 0; } DEVICE_DT_INST_DEFINE(0, np_uart_0_init, NULL, (void *)&native_uart_status_0, NULL, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &np_uart_driver_api_0); #if defined(CONFIG_UART_NATIVE_POSIX_PORT_1_ENABLE) DEVICE_DT_INST_DEFINE(1, np_uart_1_init, NULL, (void *)&native_uart_status_1, NULL, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &np_uart_driver_api_1); #endif /* CONFIG_UART_NATIVE_POSIX_PORT_1_ENABLE */ static void auto_attach_cmd_cb(char *argv, int offset) { auto_attach_cmd = &argv[offset]; auto_attach = true; } static void np_add_uart_options(void) { if (!IS_ENABLED(CONFIG_NATIVE_UART_0_ON_OWN_PTY)) { return; } static struct args_struct_t uart_options[] = { { .is_switch = true, .option = "attach_uart", .type = 'b', .dest = (void *)&auto_attach, .descript = "Automatically attach to the UART terminal" }, { .option = "attach_uart_cmd", .name = "\"cmd\"", .type = 's', .call_when_found = auto_attach_cmd_cb, .descript = "Command used to automatically attach to the terminal (implies " "auto_attach), by default: " "'" CONFIG_NATIVE_UART_AUTOATTACH_DEFAULT_CMD "'" }, IF_ENABLED(CONFIG_UART_NATIVE_WAIT_PTS_READY_ENABLE, ( { .is_switch = true, .option = "wait_uart", .type = 'b', .dest = (void *)&wait_pts, .descript = "Hold writes to the uart/pts until a client is connected/ready" }, )) ARG_TABLE_ENDMARKER }; native_add_command_line_opts(uart_options); } static void np_cleanup_uart(void) { if (IS_ENABLED(CONFIG_NATIVE_UART_0_ON_OWN_PTY)) { if (native_uart_status_0.in_fd != 0) { nsi_host_close(native_uart_status_0.in_fd); } } #if defined(CONFIG_UART_NATIVE_POSIX_PORT_1_ENABLE) if (native_uart_status_1.in_fd != 0) { nsi_host_close(native_uart_status_1.in_fd); } #endif } NATIVE_TASK(np_add_uart_options, PRE_BOOT_1, 11); NATIVE_TASK(np_cleanup_uart, ON_EXIT, 99); ```
/content/code_sandbox/drivers/serial/uart_native_ptty.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,813
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SERIAL_UART_RZT2M_H_ #define ZEPHYR_DRIVERS_SERIAL_UART_RZT2M_H_ #include <stdint.h> #define MAX_FIFO_DEPTH 16 #define RDR(base) ((volatile uint32_t *)(base)) #define TDR(base) ((volatile uint32_t *)(base + 0x04)) #define CCR0(base) ((volatile uint32_t *)(base + 0x08)) #define CCR1(base) ((volatile uint32_t *)(base + 0x0c)) #define CCR2(base) ((volatile uint32_t *)(base + 0x10)) #define CCR3(base) ((volatile uint32_t *)(base + 0x14)) #define CCR4(base) ((volatile uint32_t *)(base + 0x18)) #define FCR(base) ((volatile uint32_t *)(base + 0x24)) #define CSR(base) ((volatile uint32_t *)(base + 0x48)) #define FRSR(base) ((volatile uint32_t *)(base + 0x50)) #define FTSR(base) ((volatile uint32_t *)(base + 0x54)) #define CFCLR(base) ((volatile uint32_t *)(base + 0x68)) #define FFCLR(base) ((volatile uint32_t *)(base + 0x70)) #define CCR0_DEFAULT_VALUE 0x0 #define CCR1_DEFAULT_VALUE 0x00000010 #define CCR2_DEFAULT_VALUE 0xff00ff04 #define CCR3_DEFAULT_VALUE 0x00001203 #define CCR4_DEFAULT_VALUE 0x0 #define RDR_MASK_RDAT GENMASK(8, 0) #define CCR0_MASK_RE BIT(0) #define CCR0_MASK_TE BIT(4) #define CCR0_MASK_DCME BIT(9) #define CCR0_MASK_IDSEL BIT(10) #define CCR0_MASK_RIE BIT(16) #define CCR0_MASK_TIE BIT(20) #define CCR0_MASK_TEIE BIT(21) #define CCR0_MASK_SSE BIT(24) #define CCR1_MASK_CTSE BIT(0) #define CCR1_MASK_SPB2DT BIT(4) #define CCR1_MASK_SPB2IO BIT(5) #define CCR1_MASK_PE BIT(8) #define CCR1_MASK_PM BIT(9) #define CCR1_MASK_NFEN BIT(28) #define CCR2_MASK_BGDM BIT(4) #define CCR2_MASK_ABCS BIT(5) #define CCR2_MASK_ABCSE BIT(6) #define CCR2_MASK_BRR GENMASK(15, 8) #define CCR2_MASK_BRME BIT(16) #define CCR2_MASK_CKS GENMASK(21, 20) #define CCR2_MASK_MDDR GENMASK(31, 24) #define CCR2_MASK_BAUD_SETTING \ (CCR2_MASK_BRME | CCR2_MASK_ABCSE | CCR2_MASK_ABCS | CCR2_MASK_BGDM | CCR2_MASK_CKS | \ CCR2_MASK_BRR | CCR2_MASK_MDDR) #define CCR3_MASK_STP BIT(14) #define CCR3_MASK_MP BIT(19) #define CCR3_MASK_FM BIT(20) #define CCR3_MASK_CKE (BIT(24) | BIT(25)) #define CCR3_CKE_ENABLE BIT(24) #define CCR3_CHR_7BIT (BIT(8) | BIT(9)) #define CCR3_CHR_8BIT BIT(9) #define CCR4_MASK_ASEN BIT(16) #define CCR4_MASK_ATEN BIT(17) #define FCR_MASK_TFRST BIT(15) #define FCR_MASK_RFRST BIT(23) #define FCR_MASK_TTRG GENMASK(12, 8) #define FCR_MASK_RTRG GENMASK(20, 16) #define FCR_TTRG_15 (15 << 8) #define FCR_RTRG_15 (15 << 16) #define CSR_MASK_ORER BIT(24) #define CSR_MASK_PER BIT(27) #define CSR_MASK_FER BIT(28) #define CSR_MASK_TDRE BIT(29) #define CSR_MASK_TEND BIT(30) #define CSR_MASK_RDRF BIT(31) #define FRSR_MASK_DR BIT(0) #define FRSR_R(val) ((val >> 7) & 0x3f) #define FTSR_T(val) (val & 0x3f) #define CFCLR_MASK_ERSC BIT(4) #define CFCLR_MASK_DCMFC BIT(16) #define CFCLR_MASK_DPERC BIT(17) #define CFCLR_MASK_DFERC BIT(18) #define CFCLR_MASK_ORERC BIT(24) #define CFCLR_MASK_MFFC BIT(26) #define CFCLR_MASK_PERC BIT(27) #define CFCLR_MASK_FERC BIT(28) #define CFCLR_MASK_TDREC BIT(29) #define CFCLR_MASK_RDRFC BIT(31) #define CFCLR_ALL_FLAG_CLEAR \ (CFCLR_MASK_ERSC | CFCLR_MASK_DCMFC | CFCLR_MASK_DPERC | CFCLR_MASK_DFERC | \ CFCLR_MASK_ORERC | CFCLR_MASK_MFFC | CFCLR_MASK_PERC | CFCLR_MASK_FERC | \ CFCLR_MASK_TDREC | CFCLR_MASK_RDRFC) #define FFCLR_MASK_DRC BIT(0) #define MSTPCRA (volatile uint32_t *)(0x80280000 + 0x300) #define MSTPCRA_MASK_SCIx(x) BIT(x + 8) #define BASE_TO_IFACE_ID(base) ((base & 0x1000000) ? 5 : ((base & 0xff00) >> 10) - 4) #define CCR2_MDDR_128 BIT(31) #define CCR2_CKS_0 0 #define CCR2_BRME_0 0 #define CCR2_BRR_243 (0xf3 << 8) #define CCR2_BRR_39 (0x27 << 8) #define CCR2_BGDM_1 BIT(4) #define CCR2_BAUD_SETTING_9600 (CCR2_MDDR_128 | CCR2_BRR_243) #define CCR2_BAUD_SETTING_115200 (CCR2_MDDR_128 | CCR2_BRR_39 | CCR2_BGDM_1) #endif /* ZEPHYR_DRIVERS_SERIAL_UART_RZT2M_H_ */ ```
/content/code_sandbox/drivers/serial/uart_rzt2m.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,437
```c /* * */ #define DT_DRV_COMPAT ti_cc32xx_uart #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/pinctrl.h> /* Driverlib includes */ #include <inc/hw_types.h> #include <driverlib/rom.h> #include <driverlib/rom_map.h> #include <driverlib/prcm.h> #include <driverlib/uart.h> #include <zephyr/irq.h> struct uart_cc32xx_dev_config { unsigned long base; uint32_t sys_clk_freq; const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif }; struct uart_cc32xx_dev_data_t { uint32_t prcm; uint32_t baud_rate; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t cb; /**< Callback function pointer */ void *cb_data; /**< Callback function arg */ #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define PRIME_CHAR '\r' /* Forward decls: */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_cc32xx_isr(const struct device *dev); #endif /* * CC32XX UART has a configurable FIFO length, from 1 to 8 characters. * However, the Zephyr console driver, and the Zephyr uart sample test, assume * a RX FIFO depth of one: meaning, one interrupt == one character received. * Keeping with this assumption, this driver leaves the FIFOs disabled, * and at depth 1. */ static int uart_cc32xx_init(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; const struct uart_cc32xx_dev_data_t *data = dev->data; int ret; MAP_PRCMPeripheralClkEnable(data->prcm, PRCM_RUN_MODE_CLK | PRCM_SLP_MODE_CLK); MAP_PRCMPeripheralReset(data->prcm); ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } /* This also calls MAP_UARTEnable() to enable the FIFOs: */ MAP_UARTConfigSetExpClk(config->base, MAP_PRCMPeripheralClockGet(data->prcm), data->baud_rate, (UART_CONFIG_WLEN_8 | UART_CONFIG_STOP_ONE | UART_CONFIG_PAR_NONE)); MAP_UARTFlowControlSet(config->base, UART_FLOWCONTROL_NONE); /* Re-disable the FIFOs: */ MAP_UARTFIFODisable(config->base); #ifdef CONFIG_UART_INTERRUPT_DRIVEN /* Clear any pending UART RX interrupts: */ MAP_UARTIntClear(config->base, UART_INT_RX); config->irq_config_func(dev); /* Fill the tx fifo, so Zephyr console & shell subsystems get "primed" * with first tx fifo empty interrupt when they first call * uart_irq_tx_enable(). */ MAP_UARTCharPutNonBlocking(config->base, PRIME_CHAR); #endif return 0; } static int uart_cc32xx_poll_in(const struct device *dev, unsigned char *c) { const struct uart_cc32xx_dev_config *config = dev->config; if (MAP_UARTCharsAvail(config->base)) { *c = MAP_UARTCharGetNonBlocking(config->base); } else { return (-1); } return 0; } static void uart_cc32xx_poll_out(const struct device *dev, unsigned char c) { const struct uart_cc32xx_dev_config *config = dev->config; MAP_UARTCharPut(config->base, c); } static int uart_cc32xx_err_check(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; unsigned long cc32xx_errs = 0L; unsigned int z_err = 0U; cc32xx_errs = MAP_UARTRxErrorGet(config->base); /* Map cc32xx SDK uart.h defines to zephyr uart.h defines */ z_err = ((cc32xx_errs & UART_RXERROR_OVERRUN) ? UART_ERROR_OVERRUN : 0) | ((cc32xx_errs & UART_RXERROR_BREAK) ? UART_BREAK : 0) | ((cc32xx_errs & UART_RXERROR_PARITY) ? UART_ERROR_PARITY : 0) | ((cc32xx_errs & UART_RXERROR_FRAMING) ? UART_ERROR_FRAMING : 0); MAP_UARTRxErrorClear(config->base); return (int)z_err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_cc32xx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { const struct uart_cc32xx_dev_config *config = dev->config; unsigned int num_tx = 0U; while ((size - num_tx) > 0) { /* Send a character */ if (MAP_UARTCharPutNonBlocking(config->base, tx_data[num_tx])) { num_tx++; } else { break; } } return (int)num_tx; } static int uart_cc32xx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_cc32xx_dev_config *config = dev->config; unsigned int num_rx = 0U; while (((size - num_rx) > 0) && MAP_UARTCharsAvail(config->base)) { /* Receive a character */ rx_data[num_rx++] = MAP_UARTCharGetNonBlocking(config->base); } return num_rx; } static void uart_cc32xx_irq_tx_enable(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; MAP_UARTIntEnable(config->base, UART_INT_TX); } static void uart_cc32xx_irq_tx_disable(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; MAP_UARTIntDisable(config->base, UART_INT_TX); } static int uart_cc32xx_irq_tx_ready(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; unsigned int int_status; int_status = MAP_UARTIntStatus(config->base, 1); return (int_status & UART_INT_TX); } static void uart_cc32xx_irq_rx_enable(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; /* FIFOs are left disabled from reset, so UART_INT_RT flag not used. */ MAP_UARTIntEnable(config->base, UART_INT_RX); } static void uart_cc32xx_irq_rx_disable(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; MAP_UARTIntDisable(config->base, UART_INT_RX); } static int uart_cc32xx_irq_tx_complete(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; return (!MAP_UARTBusy(config->base)); } static int uart_cc32xx_irq_rx_ready(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; unsigned int int_status; int_status = MAP_UARTIntStatus(config->base, 1); return (int_status & UART_INT_RX); } static void uart_cc32xx_irq_err_enable(const struct device *dev) { /* Not yet used in zephyr */ } static void uart_cc32xx_irq_err_disable(const struct device *dev) { /* Not yet used in zephyr */ } static int uart_cc32xx_irq_is_pending(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; unsigned int int_status; int_status = MAP_UARTIntStatus(config->base, 1); return (int_status & (UART_INT_TX | UART_INT_RX)); } static int uart_cc32xx_irq_update(const struct device *dev) { return 1; } static void uart_cc32xx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_cc32xx_dev_data_t * const dev_data = dev->data; dev_data->cb = cb; dev_data->cb_data = cb_data; } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * Note: CC32XX UART Tx interrupts when ready to send; Rx interrupts when char * received. * * @param arg Argument to ISR. */ static void uart_cc32xx_isr(const struct device *dev) { const struct uart_cc32xx_dev_config *config = dev->config; struct uart_cc32xx_dev_data_t * const dev_data = dev->data; unsigned long intStatus = MAP_UARTIntStatus(config->base, 1); if (dev_data->cb) { dev_data->cb(dev, dev_data->cb_data); } /* * RX/TX interrupt should have been implicitly cleared by Zephyr UART * clients calling uart_fifo_read() or uart_fifo_write(). * Still, clear any error interrupts here, as they're not yet handled. */ MAP_UARTIntClear(config->base, intStatus & ~(UART_INT_RX | UART_INT_TX)); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_cc32xx_driver_api = { .poll_in = uart_cc32xx_poll_in, .poll_out = uart_cc32xx_poll_out, .err_check = uart_cc32xx_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_cc32xx_fifo_fill, .fifo_read = uart_cc32xx_fifo_read, .irq_tx_enable = uart_cc32xx_irq_tx_enable, .irq_tx_disable = uart_cc32xx_irq_tx_disable, .irq_tx_ready = uart_cc32xx_irq_tx_ready, .irq_rx_enable = uart_cc32xx_irq_rx_enable, .irq_rx_disable = uart_cc32xx_irq_rx_disable, .irq_tx_complete = uart_cc32xx_irq_tx_complete, .irq_rx_ready = uart_cc32xx_irq_rx_ready, .irq_err_enable = uart_cc32xx_irq_err_enable, .irq_err_disable = uart_cc32xx_irq_err_disable, .irq_is_pending = uart_cc32xx_irq_is_pending, .irq_update = uart_cc32xx_irq_update, .irq_callback_set = uart_cc32xx_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define UART_32XX_DEVICE(idx) \ PINCTRL_DT_INST_DEFINE(idx); \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ (static void uart_cc32xx_cfg_func_##idx(const struct device *dev) \ { \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, ( \ IRQ_CONNECT(DT_INST_IRQN(idx), \ DT_INST_IRQ(idx, priority), \ uart_cc32xx_isr, DEVICE_DT_INST_GET(idx), \ 0); \ irq_enable(DT_INST_IRQN(idx))) \ ); \ })); \ static const struct uart_cc32xx_dev_config uart_cc32xx_dev_cfg_##idx = { \ .base = DT_INST_REG_ADDR(idx), \ .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(idx, clocks, clock_frequency),\ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, \ (.irq_config_func = uart_cc32xx_cfg_func_##idx,)) \ }; \ static struct uart_cc32xx_dev_data_t uart_cc32xx_dev_data_##idx = { \ .prcm = PRCM_UARTA##idx, \ .baud_rate = DT_INST_PROP(idx, current_speed), \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, (.cb = NULL,)) \ }; \ DEVICE_DT_INST_DEFINE(idx, uart_cc32xx_init, \ NULL, &uart_cc32xx_dev_data_##idx, \ &uart_cc32xx_dev_cfg_##idx, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ (void *)&uart_cc32xx_driver_api); \ DT_INST_FOREACH_STATUS_OKAY(UART_32XX_DEVICE); ```
/content/code_sandbox/drivers/serial/uart_cc32xx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,631
```unknown # Xen hypervisor console via UART setup # # config UART_XEN_HVC bool "Xen hypervisor DomU console UART driver" default y depends on DT_HAS_XEN_HVC_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT depends on XEN && !XEN_DOM0 && !XEN_DOM0LESS help Enable Xen ring buffer based hypervisor console driver. Used for Zephyr as unprivileged domain. config UART_XEN_HVC_CONSOLEIO bool "Xen hypervisor consoleio UART driver" select SERIAL_HAS_DRIVER depends on DT_HAS_XEN_HVC_CONSOLEIO_ENABLED && (XEN_DOM0 || XEN_DOM0LESS) default y help Enable Xen hypervisor console driver. Used for Zephyr as privileged domain (Dom0) or for Zephyr DomU in Dom0less configuration. Dom0less configuration does not have privileged domain. Thus, there is no console daemon and Xen manages all domain outputs through the consoleio interface. config XEN_HVC_INIT_PRIORITY int "Xen hypervisor console init priority" depends on UART_XEN_HVC || UART_XEN_HVC_CONSOLEIO default 55 help Set init priority for Xen HVC, should be inited before UART console driver (HVC gets inited on PRE_KERNEL_1 stage). config XEN_EARLY_CONSOLEIO bool "Early printk/stdout through console_io Xen interface" depends on UART_XEN_HVC help Enable setting of console_io symbol hook for stdout and printk. Log output will become available on PRE_KERNEL_1 stage. Requires Xen, compiled with CONFIG_DEBUG flag. ```
/content/code_sandbox/drivers/serial/Kconfig.xen
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
367
```c /* * */ #define DT_DRV_COMPAT arm_pl011 #define SBSA_COMPAT arm_sbsa_uart #include <string.h> #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/init.h> #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/sys/device_mmio.h> #include <zephyr/sys/barrier.h> #include <zephyr/irq.h> #if defined(CONFIG_PINCTRL) #include <zephyr/drivers/pinctrl.h> #endif #if defined(CONFIG_RESET) #include <zephyr/drivers/reset.h> #endif #if defined(CONFIG_CLOCK_CONTROL) #include <zephyr/drivers/clock_control.h> #endif #ifdef CONFIG_CPU_CORTEX_M #include <cmsis_compiler.h> #endif #include "uart_pl011_registers.h" #if defined(CONFIG_SOC_FAMILY_AMBIQ) #include "uart_pl011_ambiq.h" #endif #if defined(CONFIG_SOC_SERIES_APOLLO3X) #define PM_INST_GET(n) PM_DEVICE_DT_INST_GET(n) #else #define PM_INST_GET(n) NULL #endif #include "uart_pl011_raspberrypi_pico.h" struct pl011_config { DEVICE_MMIO_ROM; #if defined(CONFIG_PINCTRL) const struct pinctrl_dev_config *pincfg; #endif #if defined(CONFIG_RESET) const struct reset_dt_spec reset; #endif #if defined(CONFIG_CLOCK_CONTROL) const struct device *clock_dev; clock_control_subsys_t clock_id; #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif int (*clk_enable_func)(const struct device *dev, uint32_t clk); int (*pwr_on_func)(void); }; /* Device data structure */ struct pl011_data { DEVICE_MMIO_RAM; struct uart_config uart_cfg; bool sbsa; /* SBSA mode */ uint32_t clk_freq; #ifdef CONFIG_UART_INTERRUPT_DRIVEN volatile bool sw_call_txdrdy; uart_irq_callback_user_data_t irq_cb; void *irq_cb_data; #endif }; static void pl011_enable(const struct device *dev) { get_uart(dev)->cr |= PL011_CR_UARTEN; } static void pl011_disable(const struct device *dev) { get_uart(dev)->cr &= ~PL011_CR_UARTEN; } static void pl011_enable_fifo(const struct device *dev) { get_uart(dev)->lcr_h |= PL011_LCRH_FEN; } static void pl011_disable_fifo(const struct device *dev) { get_uart(dev)->lcr_h &= ~PL011_LCRH_FEN; } static void pl011_set_flow_control(const struct device *dev, bool rts, bool cts) { if (rts) { get_uart(dev)->cr |= PL011_CR_RTSEn; } else { get_uart(dev)->cr &= ~PL011_CR_RTSEn; } if (cts) { get_uart(dev)->cr |= PL011_CR_CTSEn; } else { get_uart(dev)->cr &= ~PL011_CR_CTSEn; } } static int pl011_set_baudrate(const struct device *dev, uint32_t clk, uint32_t baudrate) { /* Avoiding float calculations, bauddiv is left shifted by 6 */ uint64_t bauddiv = (((uint64_t)clk) << PL011_FBRD_WIDTH) / (baudrate * 16U); /* Valid bauddiv value * uart_clk (min) >= 16 x baud_rate (max) * uart_clk (max) <= 16 x 65535 x baud_rate (min) */ if ((bauddiv < (1u << PL011_FBRD_WIDTH)) || (bauddiv > (65535u << PL011_FBRD_WIDTH))) { return -EINVAL; } get_uart(dev)->ibrd = bauddiv >> PL011_FBRD_WIDTH; get_uart(dev)->fbrd = bauddiv & ((1u << PL011_FBRD_WIDTH) - 1u); barrier_dmem_fence_full(); /* In order to internally update the contents of ibrd or fbrd, a * lcr_h write must always be performed at the end * ARM DDI 0183F, Pg 3-13 */ get_uart(dev)->lcr_h = get_uart(dev)->lcr_h; return 0; } static bool pl011_is_readable(const struct device *dev) { struct pl011_data *data = dev->data; if (!data->sbsa && (!(get_uart(dev)->cr & PL011_CR_UARTEN) || !(get_uart(dev)->cr & PL011_CR_RXE))) { return false; } return (get_uart(dev)->fr & PL011_FR_RXFE) == 0U; } static int pl011_poll_in(const struct device *dev, unsigned char *c) { if (!pl011_is_readable(dev)) { return -1; } /* got a character */ *c = (unsigned char)get_uart(dev)->dr; return get_uart(dev)->rsr & PL011_RSR_ERROR_MASK; } static void pl011_poll_out(const struct device *dev, unsigned char c) { /* Wait for space in FIFO */ while (get_uart(dev)->fr & PL011_FR_TXFF) { ; /* Wait */ } /* Send a character */ get_uart(dev)->dr = (uint32_t)c; } static int pl011_err_check(const struct device *dev) { int errors = 0; if (get_uart(dev)->rsr & PL011_RSR_ECR_OE) { errors |= UART_ERROR_OVERRUN; } if (get_uart(dev)->rsr & PL011_RSR_ECR_BE) { errors |= UART_BREAK; } if (get_uart(dev)->rsr & PL011_RSR_ECR_PE) { errors |= UART_ERROR_PARITY; } if (get_uart(dev)->rsr & PL011_RSR_ECR_FE) { errors |= UART_ERROR_FRAMING; } return errors; } static int pl011_runtime_configure_internal(const struct device *dev, const struct uart_config *cfg, bool disable) { struct pl011_data *data = dev->data; uint32_t lcrh; int ret = -ENOTSUP; if (data->sbsa) { goto out; } if (disable) { pl011_disable(dev); pl011_disable_fifo(dev); } lcrh = get_uart(dev)->lcr_h & ~(PL011_LCRH_FORMAT_MASK | PL011_LCRH_STP2); switch (cfg->parity) { case UART_CFG_PARITY_NONE: lcrh &= ~(BIT(1) | BIT(2)); break; case UART_CFG_PARITY_ODD: lcrh |= PL011_LCRH_PARITY_ODD; break; case UART_CFG_PARITY_EVEN: lcrh |= PL011_LCRH_PARTIY_EVEN; break; default: goto enable; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: lcrh &= ~(PL011_LCRH_STP2); break; case UART_CFG_STOP_BITS_2: lcrh |= PL011_LCRH_STP2; break; default: goto enable; } switch (cfg->data_bits) { case UART_CFG_DATA_BITS_5: lcrh |= PL011_LCRH_WLEN_SIZE(5) << PL011_LCRH_WLEN_SHIFT; break; case UART_CFG_DATA_BITS_6: lcrh |= PL011_LCRH_WLEN_SIZE(6) << PL011_LCRH_WLEN_SHIFT; break; case UART_CFG_DATA_BITS_7: lcrh |= PL011_LCRH_WLEN_SIZE(7) << PL011_LCRH_WLEN_SHIFT; break; case UART_CFG_DATA_BITS_8: lcrh |= PL011_LCRH_WLEN_SIZE(8) << PL011_LCRH_WLEN_SHIFT; break; default: goto enable; } switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: pl011_set_flow_control(dev, false, false); break; case UART_CFG_FLOW_CTRL_RTS_CTS: pl011_set_flow_control(dev, true, true); break; default: goto enable; } /* Set baud rate */ ret = pl011_set_baudrate(dev, data->clk_freq, cfg->baudrate); if (ret != 0) { goto enable; } /* Update settings */ get_uart(dev)->lcr_h = lcrh; memcpy(&data->uart_cfg, cfg, sizeof(data->uart_cfg)); enable: if (disable) { pl011_enable_fifo(dev); pl011_enable(dev); } out: return ret; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int pl011_runtime_configure(const struct device *dev, const struct uart_config *cfg) { return pl011_runtime_configure_internal(dev, cfg, true); } static int pl011_runtime_config_get(const struct device *dev, struct uart_config *cfg) { struct pl011_data *data = dev->data; *cfg = data->uart_cfg; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int pl011_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { uint8_t num_tx = 0U; while (!(get_uart(dev)->fr & PL011_FR_TXFF) && (len - num_tx > 0)) { get_uart(dev)->dr = tx_data[num_tx++]; } return num_tx; } static int pl011_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { uint8_t num_rx = 0U; while ((len - num_rx > 0) && !(get_uart(dev)->fr & PL011_FR_RXFE)) { rx_data[num_rx++] = get_uart(dev)->dr; } return num_rx; } static void pl011_irq_tx_enable(const struct device *dev) { struct pl011_data *data = dev->data; get_uart(dev)->imsc |= PL011_IMSC_TXIM; if (data->sw_call_txdrdy) { /* Verify if the callback has been registered */ if (data->irq_cb) { /* * Due to HW limitation, the first TX interrupt should * be triggered by the software. * * PL011 TX interrupt is based on a transition through * a level, rather than on the level itself[1]. So that, * enable TX interrupt can not trigger TX interrupt if * no data was filled to TX FIFO at the beginning. * * [1]: PrimeCell UART (PL011) Technical Reference Manual * functional-overview/interrupts */ data->irq_cb(dev, data->irq_cb_data); } data->sw_call_txdrdy = false; } } static void pl011_irq_tx_disable(const struct device *dev) { get_uart(dev)->imsc &= ~PL011_IMSC_TXIM; } static int pl011_irq_tx_complete(const struct device *dev) { /* Check for UART is busy transmitting data. */ return ((get_uart(dev)->fr & PL011_FR_BUSY) == 0); } static int pl011_irq_tx_ready(const struct device *dev) { struct pl011_data *data = dev->data; if (!data->sbsa && !(get_uart(dev)->cr & PL011_CR_TXE)) return false; return ((get_uart(dev)->imsc & PL011_IMSC_TXIM) && /* Check for TX interrupt status is set or TX FIFO is empty. */ (get_uart(dev)->ris & PL011_RIS_TXRIS || get_uart(dev)->fr & PL011_FR_TXFE)); } static void pl011_irq_rx_enable(const struct device *dev) { get_uart(dev)->imsc |= PL011_IMSC_RXIM | PL011_IMSC_RTIM; } static void pl011_irq_rx_disable(const struct device *dev) { get_uart(dev)->imsc &= ~(PL011_IMSC_RXIM | PL011_IMSC_RTIM); } static int pl011_irq_rx_ready(const struct device *dev) { struct pl011_data *data = dev->data; if (!data->sbsa && !(get_uart(dev)->cr & PL011_CR_RXE)) return false; return ((get_uart(dev)->imsc & PL011_IMSC_RXIM) && (!(get_uart(dev)->fr & PL011_FR_RXFE))); } static void pl011_irq_err_enable(const struct device *dev) { /* enable framing, parity, break, and overrun */ get_uart(dev)->imsc |= PL011_IMSC_ERROR_MASK; } static void pl011_irq_err_disable(const struct device *dev) { get_uart(dev)->imsc &= ~PL011_IMSC_ERROR_MASK; } static int pl011_irq_is_pending(const struct device *dev) { return pl011_irq_rx_ready(dev) || pl011_irq_tx_ready(dev); } static int pl011_irq_update(const struct device *dev) { return 1; } static void pl011_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct pl011_data *data = dev->data; data->irq_cb = cb; data->irq_cb_data = cb_data; } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api pl011_driver_api = { .poll_in = pl011_poll_in, .poll_out = pl011_poll_out, .err_check = pl011_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = pl011_runtime_configure, .config_get = pl011_runtime_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = pl011_fifo_fill, .fifo_read = pl011_fifo_read, .irq_tx_enable = pl011_irq_tx_enable, .irq_tx_disable = pl011_irq_tx_disable, .irq_tx_ready = pl011_irq_tx_ready, .irq_rx_enable = pl011_irq_rx_enable, .irq_rx_disable = pl011_irq_rx_disable, .irq_tx_complete = pl011_irq_tx_complete, .irq_rx_ready = pl011_irq_rx_ready, .irq_err_enable = pl011_irq_err_enable, .irq_err_disable = pl011_irq_err_disable, .irq_is_pending = pl011_irq_is_pending, .irq_update = pl011_irq_update, .irq_callback_set = pl011_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; static int pl011_init(const struct device *dev) { const struct pl011_config *config = dev->config; struct pl011_data *data = dev->data; int ret; DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); #if defined(CONFIG_RESET) if (config->reset.dev) { ret = reset_line_toggle_dt(&config->reset); if (ret) { return ret; } } #endif #if defined(CONFIG_CLOCK_CONTROL) if (config->clock_dev) { clock_control_on(config->clock_dev, config->clock_id); clock_control_get_rate(config->clock_dev, config->clock_id, &data->clk_freq); } #endif /* * If working in SBSA mode, we assume that UART is already configured, * or does not require configuration at all (if UART is emulated by * virtualization software). */ if (!data->sbsa) { #if defined(CONFIG_PINCTRL) ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } #endif /* Call vendor-specific function to power on the peripheral */ if (config->pwr_on_func != NULL) { ret = config->pwr_on_func(); } /* disable the uart */ pl011_disable(dev); pl011_disable_fifo(dev); /* Call vendor-specific function to enable clock for the peripheral */ if (config->clk_enable_func != NULL) { ret = config->clk_enable_func(dev, data->clk_freq); if (ret) { return ret; } } pl011_runtime_configure_internal(dev, &data->uart_cfg, false); /* Setting transmit and receive interrupt FIFO level */ get_uart(dev)->ifls = FIELD_PREP(PL011_IFLS_TXIFLSEL_M, TXIFLSEL_1_8_FULL) | FIELD_PREP(PL011_IFLS_RXIFLSEL_M, RXIFLSEL_1_2_FULL); /* Enabling the FIFOs */ pl011_enable_fifo(dev); } /* initialize all IRQs as masked */ get_uart(dev)->imsc = 0U; get_uart(dev)->icr = PL011_IMSC_MASK_ALL; if (!data->sbsa) { get_uart(dev)->dmacr = 0U; barrier_isync_fence_full(); get_uart(dev)->cr &= ~PL011_CR_SIREN; get_uart(dev)->cr |= PL011_CR_RXE | PL011_CR_TXE; barrier_isync_fence_full(); } #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); data->sw_call_txdrdy = true; #endif if (!data->sbsa) { pl011_enable(dev); } return 0; } #define COMPAT_SPECIFIC_FUNC_NAME(prefix, name) _CONCAT(_CONCAT(prefix, name), _) /* * The first element of compatible is used to determine the type. * When compatible defines as "ambiq,uart", "arm,pl011", * this macro expands to pwr_on_ambiq_uart_[n]. */ #define COMPAT_SPECIFIC_PWR_ON_FUNC(n) \ _CONCAT(COMPAT_SPECIFIC_FUNC_NAME(pwr_on_, DT_INST_STRING_TOKEN_BY_IDX(n, compatible, 0)), \ n) /* * The first element of compatible is used to determine the type. * When compatible defines as "ambiq,uart", "arm,pl011", * this macro expands to clk_enable_ambiq_uart_[n]. */ #define COMPAT_SPECIFIC_CLK_ENABLE_FUNC(n) \ _CONCAT(COMPAT_SPECIFIC_FUNC_NAME(clk_enable_, \ DT_INST_STRING_TOKEN_BY_IDX(n, compatible, 0)), n) /* * The first element of compatible is used to determine the type. * When compatible defines as "ambiq,uart", "arm,pl011", * this macro expands to AMBIQ_UART_DEFINE(n). */ #define COMPAT_SPECIFIC_DEFINE(n) \ _CONCAT(DT_INST_STRING_UPPER_TOKEN_BY_IDX(n, compatible, 0), _DEFINE)(n) #define COMPAT_SPECIFIC_CLOCK_CTLR_SUBSYS_CELL(n) \ _CONCAT(DT_INST_STRING_UPPER_TOKEN_BY_IDX(n, compatible, 0), _CLOCK_CTLR_SUBSYS_CELL) #if defined(CONFIG_PINCTRL) #define PINCTRL_DEFINE(n) PINCTRL_DT_INST_DEFINE(n); #define PINCTRL_INIT(n) .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), #else #define PINCTRL_DEFINE(n) #define PINCTRL_INIT(n) #endif /* CONFIG_PINCTRL */ #if defined(CONFIG_RESET) #define RESET_INIT(n) \ IF_ENABLED(DT_INST_NODE_HAS_PROP(0, resets), (.reset = RESET_DT_SPEC_INST_GET(n),)) #else #define RESET_INIT(n) #endif #define CLOCK_INIT(n) \ COND_CODE_1(DT_NODE_HAS_COMPAT(DT_INST_CLOCKS_CTLR(n), fixed_clock), (), \ (.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_id = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, \ COMPAT_SPECIFIC_CLOCK_CTLR_SUBSYS_CELL(n)),)) #define ARM_PL011_DEFINE(n) \ static inline int pwr_on_arm_pl011_##n(void) \ { \ return 0; \ } \ static inline int clk_enable_arm_pl011_##n(const struct device *dev, uint32_t clk) \ { \ return 0; \ } #ifdef CONFIG_UART_INTERRUPT_DRIVEN void pl011_isr(const struct device *dev) { struct pl011_data *data = dev->data; /* Verify if the callback has been registered */ if (data->irq_cb) { data->irq_cb(dev, data->irq_cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define PL011_IRQ_CONFIG_FUNC_BODY(n, prop, i) \ { \ IRQ_CONNECT(DT_IRQ_BY_IDX(n, i, irq), \ DT_IRQ_BY_IDX(n, i, priority), \ pl011_isr, \ DEVICE_DT_GET(n), \ 0); \ irq_enable(DT_IRQ_BY_IDX(n, i, irq)); \ } #define PL011_CONFIG_PORT(n) \ static void pl011_irq_config_func_##n(const struct device *dev) \ { \ DT_INST_FOREACH_PROP_ELEM(n, interrupt_names, \ PL011_IRQ_CONFIG_FUNC_BODY) \ }; \ \ static struct pl011_config pl011_cfg_port_##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ CLOCK_INIT(n) \ PINCTRL_INIT(n) \ .irq_config_func = pl011_irq_config_func_##n, \ .clk_enable_func = COMPAT_SPECIFIC_CLK_ENABLE_FUNC(n), \ .pwr_on_func = COMPAT_SPECIFIC_PWR_ON_FUNC(n), \ }; #else #define PL011_CONFIG_PORT(n) \ static struct pl011_config pl011_cfg_port_##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ CLOCK_INIT(n) \ PINCTRL_INIT(n) \ }; #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #define PL011_INIT(n) \ PINCTRL_DEFINE(n) \ COMPAT_SPECIFIC_DEFINE(n) \ PL011_CONFIG_PORT(n) \ \ static struct pl011_data pl011_data_port_##n = { \ .uart_cfg = \ { \ .baudrate = DT_INST_PROP(n, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = DT_INST_PROP(n, hw_flow_control) \ ? UART_CFG_FLOW_CTRL_RTS_CTS \ : UART_CFG_FLOW_CTRL_NONE, \ }, \ .clk_freq = \ COND_CODE_1(DT_NODE_HAS_COMPAT(DT_INST_CLOCKS_CTLR(n), fixed_clock), \ (DT_INST_PROP_BY_PHANDLE(n, clocks, clock_frequency)), (0)), \ }; \ \ DEVICE_DT_INST_DEFINE(n, pl011_init, PM_INST_GET(n), &pl011_data_port_##n, \ &pl011_cfg_port_##n, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \ &pl011_driver_api); DT_INST_FOREACH_STATUS_OKAY(PL011_INIT) #ifdef CONFIG_UART_PL011_SBSA #undef DT_DRV_COMPAT #define DT_DRV_COMPAT SBSA_COMPAT #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define PL011_SBSA_CONFIG_PORT(n) \ static void pl011_irq_config_func_sbsa_##n(const struct device *dev) \ { \ DT_INST_FOREACH_PROP_ELEM(n, interrupt_names, \ PL011_IRQ_CONFIG_FUNC_BODY) \ }; \ \ static struct pl011_config pl011_cfg_sbsa_##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ .irq_config_func = pl011_irq_config_func_sbsa_##n, \ }; #else #define PL011_SBSA_CONFIG_PORT(n) \ static struct pl011_config pl011_cfg_sbsa_##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ }; #endif #define PL011_SBSA_INIT(n) \ PL011_SBSA_CONFIG_PORT(n) \ \ static struct pl011_data pl011_data_sbsa_##n = { \ .sbsa = true, \ }; \ \ DEVICE_DT_INST_DEFINE(n, pl011_init, \ NULL, \ &pl011_data_sbsa_##n, \ &pl011_cfg_sbsa_##n, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &pl011_driver_api); DT_INST_FOREACH_STATUS_OKAY(PL011_SBSA_INIT) #endif /* CONFIG_UART_PL011_SBSA */ ```
/content/code_sandbox/drivers/serial/uart_pl011.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,452
```c /* * */ #include <errno.h> #include <zephyr/drivers/uart.h> #include <zephyr/irq.h> #include <em_usart.h> #include <em_cmu.h> #include <soc.h> #ifdef CONFIG_PINCTRL #include <zephyr/drivers/pinctrl.h> #else #include <em_gpio.h> #endif /* CONFIG_PINCTRL */ #if DT_NODE_HAS_PROP(id, peripheral_id) #define USART_PREFIX cmuClock_USART #define UART_PREFIX cmuClock_UART #define CLOCK_USART(id) _CONCAT(USART_PREFIX, id) #define CLOCK_UART(id) _CONCAT(UART_PREFIX, id) #define GET_GECKO_USART_CLOCK(id) CLOCK_USART(DT_INST_PROP(id, peripheral_id)) #define GET_GECKO_UART_CLOCK(id) CLOCK_UART(DT_INST_PROP(id, peripheral_id)) #else #if (USART_COUNT == 1) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : -1) #elif (USART_COUNT == 2) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : -1) #elif (USART_COUNT == 3) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : ((ref) == USART2) ? cmuClock_USART2 \ : -1) #elif (USART_COUNT == 4) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : ((ref) == USART2) ? cmuClock_USART2 \ : ((ref) == USART3) ? cmuClock_USART3 \ : -1) #elif (USART_COUNT == 5) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : ((ref) == USART2) ? cmuClock_USART2 \ : ((ref) == USART3) ? cmuClock_USART3 \ : ((ref) == USART4) ? cmuClock_USART4 \ : -1) #elif (USART_COUNT == 6) #define CLOCK_USART(ref) (((ref) == USART0) ? cmuClock_USART0 \ : ((ref) == USART1) ? cmuClock_USART1 \ : ((ref) == USART2) ? cmuClock_USART2 \ : ((ref) == USART3) ? cmuClock_USART3 \ : ((ref) == USART4) ? cmuClock_USART4 \ : ((ref) == USART5) ? cmuClock_USART5 \ : -1) #else #error "Undefined number of USARTs." #endif /* USART_COUNT */ #define CLOCK_UART(ref) (((ref) == UART0) ? cmuClock_UART0 \ : ((ref) == UART1) ? cmuClock_UART1 \ : -1) #define GET_GECKO_USART_CLOCK(id) CLOCK_USART((USART_TypeDef *)DT_INST_REG_ADDR(id)) #define GET_GECKO_UART_CLOCK(id) CLOCK_UART((USART_TypeDef *)DT_INST_REG_ADDR(id)) #endif /* DT_NODE_HAS_PROP(id, peripheral_id) */ /* Helper define to determine if SOC supports hardware flow control */ #if ((_SILICON_LABS_32B_SERIES > 0) || \ (defined(_USART_ROUTEPEN_RTSPEN_MASK) && \ defined(_USART_ROUTEPEN_CTSPEN_MASK))) #define HW_FLOWCONTROL_IS_SUPPORTED_BY_SOC #endif #define HAS_HFC_OR(inst) DT_INST_PROP(inst, hw_flow_control) || #define DT_DRV_COMPAT silabs_gecko_uart /* Has any enabled uart instance hw-flow-control enabled? */ #define UART_GECKO_UART_HW_FLOW_CONTROL_ENABLED \ DT_INST_FOREACH_STATUS_OKAY(HAS_HFC_OR) 0 #undef DT_DRV_COMPAT #define DT_DRV_COMPAT silabs_gecko_usart /* Has any enabled usart instance hw-flow-control enabled? */ #define UART_GECKO_USART_HW_FLOW_CONTROL_ENABLED \ DT_INST_FOREACH_STATUS_OKAY(HAS_HFC_OR) 0 #if UART_GECKO_USART_HW_FLOW_CONTROL_ENABLED || \ UART_GECKO_UART_HW_FLOW_CONTROL_ENABLED #define UART_GECKO_HW_FLOW_CONTROL #endif /* Sanity check for hardware flow control */ #if defined(UART_GECKO_HW_FLOW_CONTROL) && \ (!(defined(HW_FLOWCONTROL_IS_SUPPORTED_BY_SOC))) #error "Hardware flow control is activated for at least one UART/USART, \ but not supported by this SOC" #endif #if defined(UART_GECKO_HW_FLOW_CONTROL) && \ (!defined(CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION) && \ !defined(GPIO_USART_ROUTEEN_RTSPEN)) #error "Driver not supporting hardware flow control for this SOC" #endif /** * @brief Config struct for UART */ struct uart_gecko_config { #ifdef CONFIG_PINCTRL const struct pinctrl_dev_config *pcfg; #endif /* CONFIG_PINCTRL */ USART_TypeDef *base; CMU_Clock_TypeDef clock; uint32_t baud_rate; #ifndef CONFIG_PINCTRL #ifdef UART_GECKO_HW_FLOW_CONTROL bool hw_flowcontrol; #endif /* UART_GECKO_HW_FLOW_CONTROL */ #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifndef CONFIG_PINCTRL struct soc_gpio_pin pin_rx; struct soc_gpio_pin pin_tx; #ifdef UART_GECKO_HW_FLOW_CONTROL struct soc_gpio_pin pin_rts; struct soc_gpio_pin pin_cts; #endif /* UART_GECKO_HW_FLOW_CONTROL */ #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION uint8_t loc_rx; uint8_t loc_tx; #ifdef UART_GECKO_HW_FLOW_CONTROL uint8_t loc_rts; uint8_t loc_cts; #endif /* UART_GECKO_HW_FLOW_CONTROL */ #else /* CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION */ uint8_t loc; #endif /* CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION */ #endif }; struct uart_gecko_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; static int uart_gecko_poll_in(const struct device *dev, unsigned char *c) { const struct uart_gecko_config *config = dev->config; uint32_t flags = USART_StatusGet(config->base); if (flags & USART_STATUS_RXDATAV) { *c = USART_Rx(config->base); return 0; } return -1; } static void uart_gecko_poll_out(const struct device *dev, unsigned char c) { const struct uart_gecko_config *config = dev->config; USART_Tx(config->base, c); } static int uart_gecko_err_check(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t flags = USART_IntGet(config->base); int err = 0; if (flags & USART_IF_RXOF) { err |= UART_ERROR_OVERRUN; } if (flags & USART_IF_PERR) { err |= UART_ERROR_PARITY; } if (flags & USART_IF_FERR) { err |= UART_ERROR_FRAMING; } USART_IntClear(config->base, USART_IF_RXOF | USART_IF_PERR | USART_IF_FERR); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_gecko_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct uart_gecko_config *config = dev->config; uint8_t num_tx = 0U; while ((len - num_tx > 0) && (config->base->STATUS & USART_STATUS_TXBL)) { config->base->TXDATA = (uint32_t)tx_data[num_tx++]; } return num_tx; } static int uart_gecko_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { const struct uart_gecko_config *config = dev->config; uint8_t num_rx = 0U; while ((len - num_rx > 0) && (config->base->STATUS & USART_STATUS_RXDATAV)) { rx_data[num_rx++] = (uint8_t)config->base->RXDATA; } return num_rx; } static void uart_gecko_irq_tx_enable(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t mask = USART_IEN_TXBL | USART_IEN_TXC; USART_IntEnable(config->base, mask); } static void uart_gecko_irq_tx_disable(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t mask = USART_IEN_TXBL | USART_IEN_TXC; USART_IntDisable(config->base, mask); } static int uart_gecko_irq_tx_complete(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t flags = USART_IntGet(config->base); USART_IntClear(config->base, USART_IF_TXC); return (flags & USART_IF_TXC) != 0U; } static int uart_gecko_irq_tx_ready(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t flags = USART_IntGetEnabled(config->base); return (flags & USART_IF_TXBL) != 0U; } static void uart_gecko_irq_rx_enable(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t mask = USART_IEN_RXDATAV; USART_IntEnable(config->base, mask); } static void uart_gecko_irq_rx_disable(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t mask = USART_IEN_RXDATAV; USART_IntDisable(config->base, mask); } static int uart_gecko_irq_rx_full(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t flags = USART_IntGet(config->base); return (flags & USART_IF_RXDATAV) != 0U; } static int uart_gecko_irq_rx_ready(const struct device *dev) { const struct uart_gecko_config *config = dev->config; uint32_t mask = USART_IEN_RXDATAV; return (config->base->IEN & mask) && uart_gecko_irq_rx_full(dev); } static void uart_gecko_irq_err_enable(const struct device *dev) { const struct uart_gecko_config *config = dev->config; USART_IntEnable(config->base, USART_IF_RXOF | USART_IF_PERR | USART_IF_FERR); } static void uart_gecko_irq_err_disable(const struct device *dev) { const struct uart_gecko_config *config = dev->config; USART_IntDisable(config->base, USART_IF_RXOF | USART_IF_PERR | USART_IF_FERR); } static int uart_gecko_irq_is_pending(const struct device *dev) { return uart_gecko_irq_tx_ready(dev) || uart_gecko_irq_rx_ready(dev); } static int uart_gecko_irq_update(const struct device *dev) { return 1; } static void uart_gecko_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_gecko_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void uart_gecko_isr(const struct device *dev) { struct uart_gecko_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ /** * @brief Subroutine initializer of UART pins * * @param dev UART device to configure */ #ifndef CONFIG_PINCTRL static void uart_gecko_init_pins(const struct device *dev) { const struct uart_gecko_config *config = dev->config; /* Configure RX and TX */ GPIO_PinModeSet(config->pin_rx.port, config->pin_rx.pin, config->pin_rx.mode, config->pin_rx.out); GPIO_PinModeSet(config->pin_tx.port, config->pin_tx.pin, config->pin_tx.mode, config->pin_tx.out); #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION /* For SOCs with configurable pin locations (set in SOC Kconfig) */ config->base->ROUTEPEN = USART_ROUTEPEN_RXPEN | USART_ROUTEPEN_TXPEN; config->base->ROUTELOC0 = (config->loc_tx << _USART_ROUTELOC0_TXLOC_SHIFT) | (config->loc_rx << _USART_ROUTELOC0_RXLOC_SHIFT); config->base->ROUTELOC1 = _USART_ROUTELOC1_RESETVALUE; #elif defined(USART_ROUTE_RXPEN) && defined(USART_ROUTE_TXPEN) /* For olders SOCs with only one pin location */ config->base->ROUTE = USART_ROUTE_RXPEN | USART_ROUTE_TXPEN | (config->loc << 8); #elif defined(GPIO_USART_ROUTEEN_RXPEN) && defined(GPIO_USART_ROUTEEN_TXPEN) GPIO->USARTROUTE[USART_NUM(config->base)].ROUTEEN = GPIO_USART_ROUTEEN_TXPEN | GPIO_USART_ROUTEEN_RXPEN; GPIO->USARTROUTE[USART_NUM(config->base)].TXROUTE = (config->pin_tx.pin << _GPIO_USART_TXROUTE_PIN_SHIFT) | (config->pin_tx.port << _GPIO_USART_TXROUTE_PORT_SHIFT); GPIO->USARTROUTE[USART_NUM(config->base)].RXROUTE = (config->pin_rx.pin << _GPIO_USART_RXROUTE_PIN_SHIFT) | (config->pin_rx.port << _GPIO_USART_RXROUTE_PORT_SHIFT); #endif /* CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION */ #ifdef UART_GECKO_HW_FLOW_CONTROL /* Configure HW flow control (RTS, CTS) */ if (config->hw_flowcontrol) { GPIO_PinModeSet(config->pin_rts.port, config->pin_rts.pin, config->pin_rts.mode, config->pin_rts.out); GPIO_PinModeSet(config->pin_cts.port, config->pin_cts.pin, config->pin_cts.mode, config->pin_cts.out); #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION config->base->ROUTEPEN = USART_ROUTEPEN_RXPEN | USART_ROUTEPEN_TXPEN | USART_ROUTEPEN_RTSPEN | USART_ROUTEPEN_CTSPEN; config->base->ROUTELOC1 = (config->loc_rts << _USART_ROUTELOC1_RTSLOC_SHIFT) | (config->loc_cts << _USART_ROUTELOC1_CTSLOC_SHIFT); #elif defined(GPIO_USART_ROUTEEN_RTSPEN) && defined(GPIO_USART_ROUTEEN_CTSPEN) GPIO->USARTROUTE[USART_NUM(config->base)].ROUTEEN = GPIO_USART_ROUTEEN_TXPEN | GPIO_USART_ROUTEEN_RXPEN | GPIO_USART_ROUTEPEN_RTSPEN | GPIO_USART_ROUTEPEN_CTSPEN; GPIO->USARTROUTE[USART_NUM(config->base)].RTSROUTE = (config->pin_rts.pin << _GPIO_USART_RTSROUTE_PIN_SHIFT) | (config->pin_rts.port << _GPIO_USART_RTSROUTE_PORT_SHIFT); GPIO->USARTROUTE[USART_NUM(config->base)].CTSROUTE = (config->pin_cts.pin << _GPIO_USART_CTSROUTE_PIN_SHIFT) | (config->pin_cts.port << _GPIO_USART_CTSROUTE_PORT_SHIFT); #endif /* CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION */ } #endif /* UART_GECKO_HW_FLOW_CONTROL */ } #endif /* !CONFIG_PINCTRL */ /** * @brief Main initializer for UART * * @param dev UART device to be initialized * @return int 0 */ static int uart_gecko_init(const struct device *dev) { #ifdef CONFIG_PINCTRL int err; #endif /* CONFIG_PINCTRL */ const struct uart_gecko_config *config = dev->config; USART_InitAsync_TypeDef usartInit = USART_INITASYNC_DEFAULT; /* The peripheral and gpio clock are already enabled from soc and gpio * driver */ /* Enable USART clock */ CMU_ClockEnable(config->clock, true); /* Init USART */ usartInit.baudrate = config->baud_rate; #ifdef UART_GECKO_HW_FLOW_CONTROL usartInit.hwFlowControl = config->hw_flowcontrol ? usartHwFlowControlCtsAndRts : usartHwFlowControlNone; #endif USART_InitAsync(config->base, &usartInit); #ifdef CONFIG_PINCTRL err = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } #else /* Initialize USART pins */ uart_gecko_init_pins(dev); #endif /* CONFIG_PINCTRL */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif return 0; } static const struct uart_driver_api uart_gecko_driver_api = { .poll_in = uart_gecko_poll_in, .poll_out = uart_gecko_poll_out, .err_check = uart_gecko_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_gecko_fifo_fill, .fifo_read = uart_gecko_fifo_read, .irq_tx_enable = uart_gecko_irq_tx_enable, .irq_tx_disable = uart_gecko_irq_tx_disable, .irq_tx_complete = uart_gecko_irq_tx_complete, .irq_tx_ready = uart_gecko_irq_tx_ready, .irq_rx_enable = uart_gecko_irq_rx_enable, .irq_rx_disable = uart_gecko_irq_rx_disable, .irq_rx_ready = uart_gecko_irq_rx_ready, .irq_err_enable = uart_gecko_irq_err_enable, .irq_err_disable = uart_gecko_irq_err_disable, .irq_is_pending = uart_gecko_irq_is_pending, .irq_update = uart_gecko_irq_update, .irq_callback_set = uart_gecko_irq_callback_set, #endif }; #undef DT_DRV_COMPAT #define DT_DRV_COMPAT silabs_gecko_uart #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define GECKO_UART_IRQ_HANDLER_DECL(idx) \ static void uart_gecko_config_func_##idx(const struct device *dev) #define GECKO_UART_IRQ_HANDLER_FUNC(idx) \ .irq_config_func = uart_gecko_config_func_##idx, #define GECKO_UART_IRQ_HANDLER(idx) \ static void uart_gecko_config_func_##idx(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(idx, rx, irq), \ DT_INST_IRQ_BY_NAME(idx, rx, priority), \ uart_gecko_isr, DEVICE_DT_INST_GET(idx), 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(idx, tx, irq), \ DT_INST_IRQ_BY_NAME(idx, tx, priority), \ uart_gecko_isr, DEVICE_DT_INST_GET(idx), 0); \ \ irq_enable(DT_INST_IRQ_BY_NAME(idx, rx, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(idx, tx, irq)); \ } #else /* CONFIG_UART_INTERRUPT_DRIVEN */ #define GECKO_UART_IRQ_HANDLER_DECL(idx) #define GECKO_UART_IRQ_HANDLER_FUNC(idx) #define GECKO_UART_IRQ_HANDLER(idx) #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION #define GECKO_UART_RX_TX_PIN_LOCATIONS(idx) \ .loc_rx = DT_INST_PROP_BY_IDX(idx, location_rx, 0), \ .loc_tx = DT_INST_PROP_BY_IDX(idx, location_tx, 0), #define VALIDATE_GECKO_UART_RX_TX_PIN_LOCATIONS(idx) #else #define GECKO_UART_RX_TX_PIN_LOCATIONS(idx) \ .loc = DT_INST_PROP_BY_IDX(idx, location_rx, 0), #define VALIDATE_GECKO_UART_RX_TX_PIN_LOCATIONS(idx) \ BUILD_ASSERT(DT_INST_PROP_BY_IDX(idx, location_rx, 0) == \ DT_INST_PROP_BY_IDX(idx, location_tx, 0), \ "DTS location-* properties must have identical value") #endif #define PIN_UART_RXD(idx) \ { \ DT_INST_PROP_BY_IDX(idx, location_rx, 1), \ DT_INST_PROP_BY_IDX(idx, location_rx, 2), \ gpioModeInput, 1 \ } #define PIN_UART_TXD(idx) \ { \ DT_INST_PROP_BY_IDX(idx, location_tx, 1), \ DT_INST_PROP_BY_IDX(idx, location_tx, 2), \ gpioModePushPull, 1 \ } #define GECKO_UART_RX_TX_PINS(idx) \ .pin_rx = PIN_UART_RXD(idx), \ .pin_tx = PIN_UART_TXD(idx), #ifdef UART_GECKO_HW_FLOW_CONTROL #ifdef CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION #define GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx) \ .loc_rts = COND_CODE_1(DT_INST_PROP(idx, hw_flow_control), \ (DT_INST_PROP_BY_IDX(idx, location_rts, 0)), \ (0)), \ .loc_cts = COND_CODE_1(DT_INST_PROP(idx, hw_flow_control), \ (DT_INST_PROP_BY_IDX(idx, location_cts, 0)), \ (0)), #define VALIDATE_GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx) \ COND_CODE_1(DT_INST_PROP(idx, hw_flow_control), \ (BUILD_ASSERT(DT_INST_NODE_HAS_PROP(idx, location_rts) && \ DT_INST_NODE_HAS_PROP(idx, location_cts), \ "DTS location-rts and location-cts are mandatory")), \ ()) #else /* CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION */ /* Hardware flow control not supported for these SOCs */ #define GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx) #define VALIDATE_GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx) #endif /* CONFIG_SOC_GECKO_HAS_INDIVIDUAL_PIN_LOCATION */ #define PIN_UART_RTS(idx) \ COND_CODE_1(DT_INST_PROP(idx, hw_flow_control), \ ({ \ DT_INST_PROP_BY_IDX(idx, location_rts, 1), \ DT_INST_PROP_BY_IDX(idx, location_rts, 2), \ gpioModePushPull, 1 \ }), \ ({0})) #define PIN_UART_CTS(idx) \ COND_CODE_1(DT_INST_PROP(idx, hw_flow_control), \ ({ \ DT_INST_PROP_BY_IDX(idx, location_cts, 1), \ DT_INST_PROP_BY_IDX(idx, location_cts, 2), \ gpioModeInput, 1 \ }), \ ({0})) #define GECKO_UART_RTS_CTS_PINS(idx) \ .pin_rts = PIN_UART_RTS(idx), \ .pin_cts = PIN_UART_CTS(idx), #define GECKO_UART_HW_FLOW_CONTROL(idx) \ .hw_flowcontrol = DT_INST_PROP(idx, hw_flow_control), #else /* UART_GECKO_HW_FLOW_CONTROL */ #define GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx) #define VALIDATE_GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx) #define GECKO_UART_RTS_CTS_PINS(idx) #define GECKO_UART_HW_FLOW_CONTROL(idx) #endif /* UART_GECKO_HW_FLOW_CONTROL */ #define GECKO_UART_INIT(idx) \ VALIDATE_GECKO_UART_RX_TX_PIN_LOCATIONS(idx); \ VALIDATE_GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx); \ \ GECKO_UART_IRQ_HANDLER_DECL(idx); \ \ static const struct uart_gecko_config uart_gecko_cfg_##idx = { \ .base = (USART_TypeDef *)DT_INST_REG_ADDR(idx), \ .clock = GET_GECKO_UART_CLOCK(idx), \ .baud_rate = DT_INST_PROP(idx, current_speed), \ GECKO_UART_HW_FLOW_CONTROL(idx) \ GECKO_UART_RX_TX_PINS(idx) \ GECKO_UART_RTS_CTS_PINS(idx) \ GECKO_UART_RX_TX_PIN_LOCATIONS(idx) \ GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx) \ GECKO_UART_IRQ_HANDLER_FUNC(idx) \ }; \ \ static struct uart_gecko_data uart_gecko_data_##idx; \ \ DEVICE_DT_INST_DEFINE(idx, uart_gecko_init, \ NULL, &uart_gecko_data_##idx, \ &uart_gecko_cfg_##idx, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_gecko_driver_api); \ \ \ GECKO_UART_IRQ_HANDLER(idx) DT_INST_FOREACH_STATUS_OKAY(GECKO_UART_INIT) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT silabs_gecko_usart #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define GECKO_USART_IRQ_HANDLER_DECL(idx) \ static void usart_gecko_config_func_##idx(const struct device *dev) #define GECKO_USART_IRQ_HANDLER_FUNC(idx) \ .irq_config_func = usart_gecko_config_func_##idx, #define GECKO_USART_IRQ_HANDLER(idx) \ static void usart_gecko_config_func_##idx(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(idx, rx, irq), \ DT_INST_IRQ_BY_NAME(idx, rx, priority), \ uart_gecko_isr, DEVICE_DT_INST_GET(idx), 0); \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(idx, tx, irq), \ DT_INST_IRQ_BY_NAME(idx, tx, priority), \ uart_gecko_isr, DEVICE_DT_INST_GET(idx), 0); \ \ irq_enable(DT_INST_IRQ_BY_NAME(idx, rx, irq)); \ irq_enable(DT_INST_IRQ_BY_NAME(idx, tx, irq)); \ } #else #define GECKO_USART_IRQ_HANDLER_DECL(idx) #define GECKO_USART_IRQ_HANDLER_FUNC(idx) #define GECKO_USART_IRQ_HANDLER(idx) #endif #ifdef CONFIG_PINCTRL #define GECKO_USART_INIT(idx) \ PINCTRL_DT_INST_DEFINE(idx); \ GECKO_USART_IRQ_HANDLER_DECL(idx); \ \ static const struct uart_gecko_config usart_gecko_cfg_##idx = { \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \ .base = (USART_TypeDef *)DT_INST_REG_ADDR(idx), \ .clock = GET_GECKO_USART_CLOCK(idx), \ .baud_rate = DT_INST_PROP(idx, current_speed), \ GECKO_USART_IRQ_HANDLER_FUNC(idx) \ }; \ \ static struct uart_gecko_data usart_gecko_data_##idx; \ \ DEVICE_DT_INST_DEFINE(idx, uart_gecko_init, NULL, \ &usart_gecko_data_##idx, \ &usart_gecko_cfg_##idx, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_gecko_driver_api); \ \ GECKO_USART_IRQ_HANDLER(idx) #else #define GECKO_USART_INIT(idx) \ VALIDATE_GECKO_UART_RX_TX_PIN_LOCATIONS(idx); \ VALIDATE_GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx); \ \ GECKO_USART_IRQ_HANDLER_DECL(idx); \ \ static const struct uart_gecko_config usart_gecko_cfg_##idx = { \ .base = (USART_TypeDef *)DT_INST_REG_ADDR(idx), \ .clock = GET_GECKO_USART_CLOCK(idx), \ .baud_rate = DT_INST_PROP(idx, current_speed), \ GECKO_UART_HW_FLOW_CONTROL(idx) \ GECKO_UART_RX_TX_PINS(idx) \ GECKO_UART_RTS_CTS_PINS(idx) \ GECKO_UART_RX_TX_PIN_LOCATIONS(idx) \ GECKO_UART_RTS_CTS_PIN_LOCATIONS(idx) \ GECKO_USART_IRQ_HANDLER_FUNC(idx) \ }; \ \ static struct uart_gecko_data usart_gecko_data_##idx; \ \ DEVICE_DT_INST_DEFINE(idx, uart_gecko_init, NULL, \ &usart_gecko_data_##idx, \ &usart_gecko_cfg_##idx, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_gecko_driver_api); \ \ GECKO_USART_IRQ_HANDLER(idx) #endif DT_INST_FOREACH_STATUS_OKAY(GECKO_USART_INIT) ```
/content/code_sandbox/drivers/serial/uart_gecko.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,355
```c /* * */ #define DT_DRV_COMPAT nxp_kinetis_lpsci #include <errno.h> #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <fsl_lpsci.h> #include <soc.h> #include <zephyr/irq.h> struct mcux_lpsci_config { UART0_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; uint32_t baud_rate; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif const struct pinctrl_dev_config *pincfg; }; struct mcux_lpsci_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; static int mcux_lpsci_poll_in(const struct device *dev, unsigned char *c) { const struct mcux_lpsci_config *config = dev->config; uint32_t flags = LPSCI_GetStatusFlags(config->base); int ret = -1; if (flags & kLPSCI_RxDataRegFullFlag) { *c = LPSCI_ReadByte(config->base); ret = 0; } return ret; } static void mcux_lpsci_poll_out(const struct device *dev, unsigned char c) { const struct mcux_lpsci_config *config = dev->config; while (!(LPSCI_GetStatusFlags(config->base) & kLPSCI_TxDataRegEmptyFlag)) { } LPSCI_WriteByte(config->base, c); } static int mcux_lpsci_err_check(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t flags = LPSCI_GetStatusFlags(config->base); int err = 0; if (flags & kLPSCI_RxOverrunFlag) { err |= UART_ERROR_OVERRUN; } if (flags & kLPSCI_ParityErrorFlag) { err |= UART_ERROR_PARITY; } if (flags & kLPSCI_FramingErrorFlag) { err |= UART_ERROR_FRAMING; } LPSCI_ClearStatusFlags(config->base, kLPSCI_RxOverrunFlag | kLPSCI_ParityErrorFlag | kLPSCI_FramingErrorFlag); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int mcux_lpsci_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct mcux_lpsci_config *config = dev->config; uint8_t num_tx = 0U; while ((len - num_tx > 0) && (LPSCI_GetStatusFlags(config->base) & kLPSCI_TxDataRegEmptyFlag)) { LPSCI_WriteByte(config->base, tx_data[num_tx++]); } return num_tx; } static int mcux_lpsci_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { const struct mcux_lpsci_config *config = dev->config; uint8_t num_rx = 0U; while ((len - num_rx > 0) && (LPSCI_GetStatusFlags(config->base) & kLPSCI_RxDataRegFullFlag)) { rx_data[num_rx++] = LPSCI_ReadByte(config->base); } return num_rx; } static void mcux_lpsci_irq_tx_enable(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t mask = kLPSCI_TxDataRegEmptyInterruptEnable; LPSCI_EnableInterrupts(config->base, mask); } static void mcux_lpsci_irq_tx_disable(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t mask = kLPSCI_TxDataRegEmptyInterruptEnable; LPSCI_DisableInterrupts(config->base, mask); } static int mcux_lpsci_irq_tx_complete(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t flags = LPSCI_GetStatusFlags(config->base); return (flags & kLPSCI_TransmissionCompleteFlag) != 0U; } static int mcux_lpsci_irq_tx_ready(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t mask = kLPSCI_TxDataRegEmptyInterruptEnable; uint32_t flags = LPSCI_GetStatusFlags(config->base); return (LPSCI_GetEnabledInterrupts(config->base) & mask) && (flags & kLPSCI_TxDataRegEmptyFlag); } static void mcux_lpsci_irq_rx_enable(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t mask = kLPSCI_RxDataRegFullInterruptEnable; LPSCI_EnableInterrupts(config->base, mask); } static void mcux_lpsci_irq_rx_disable(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t mask = kLPSCI_RxDataRegFullInterruptEnable; LPSCI_DisableInterrupts(config->base, mask); } static int mcux_lpsci_irq_rx_full(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t flags = LPSCI_GetStatusFlags(config->base); return (flags & kLPSCI_RxDataRegFullFlag) != 0U; } static int mcux_lpsci_irq_rx_pending(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t mask = kLPSCI_RxDataRegFullInterruptEnable; return (LPSCI_GetEnabledInterrupts(config->base) & mask) && mcux_lpsci_irq_rx_full(dev); } static void mcux_lpsci_irq_err_enable(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t mask = kLPSCI_NoiseErrorInterruptEnable | kLPSCI_FramingErrorInterruptEnable | kLPSCI_ParityErrorInterruptEnable; LPSCI_EnableInterrupts(config->base, mask); } static void mcux_lpsci_irq_err_disable(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; uint32_t mask = kLPSCI_NoiseErrorInterruptEnable | kLPSCI_FramingErrorInterruptEnable | kLPSCI_ParityErrorInterruptEnable; LPSCI_DisableInterrupts(config->base, mask); } static int mcux_lpsci_irq_is_pending(const struct device *dev) { return (mcux_lpsci_irq_tx_ready(dev) || mcux_lpsci_irq_rx_pending(dev)); } static int mcux_lpsci_irq_update(const struct device *dev) { return 1; } static void mcux_lpsci_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct mcux_lpsci_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void mcux_lpsci_isr(const struct device *dev) { struct mcux_lpsci_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int mcux_lpsci_init(const struct device *dev) { const struct mcux_lpsci_config *config = dev->config; lpsci_config_t uart_config; uint32_t clock_freq; int err; if (!device_is_ready(config->clock_dev)) { return -ENODEV; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } LPSCI_GetDefaultConfig(&uart_config); uart_config.enableTx = true; uart_config.enableRx = true; uart_config.baudRate_Bps = config->baud_rate; LPSCI_Init(config->base, &uart_config, clock_freq); err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err < 0) { return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif return 0; } static const struct uart_driver_api mcux_lpsci_driver_api = { .poll_in = mcux_lpsci_poll_in, .poll_out = mcux_lpsci_poll_out, .err_check = mcux_lpsci_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = mcux_lpsci_fifo_fill, .fifo_read = mcux_lpsci_fifo_read, .irq_tx_enable = mcux_lpsci_irq_tx_enable, .irq_tx_disable = mcux_lpsci_irq_tx_disable, .irq_tx_complete = mcux_lpsci_irq_tx_complete, .irq_tx_ready = mcux_lpsci_irq_tx_ready, .irq_rx_enable = mcux_lpsci_irq_rx_enable, .irq_rx_disable = mcux_lpsci_irq_rx_disable, .irq_rx_ready = mcux_lpsci_irq_rx_full, .irq_err_enable = mcux_lpsci_irq_err_enable, .irq_err_disable = mcux_lpsci_irq_err_disable, .irq_is_pending = mcux_lpsci_irq_is_pending, .irq_update = mcux_lpsci_irq_update, .irq_callback_set = mcux_lpsci_irq_callback_set, #endif }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define MCUX_LPSCI_CONFIG_FUNC(n) \ static void mcux_lpsci_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ mcux_lpsci_isr, DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } #define MCUX_LPSCI_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = mcux_lpsci_config_func_##n #define MCUX_LPSCI_INIT_CFG(n) \ MCUX_LPSCI_DECLARE_CFG(n, MCUX_LPSCI_IRQ_CFG_FUNC_INIT(n)) #else #define MCUX_LPSCI_CONFIG_FUNC(n) #define MCUX_LPSCI_IRQ_CFG_FUNC_INIT #define MCUX_LPSCI_INIT_CFG(n) \ MCUX_LPSCI_DECLARE_CFG(n, MCUX_LPSCI_IRQ_CFG_FUNC_INIT) #endif #define MCUX_LPSCI_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct mcux_lpsci_config mcux_lpsci_##n##_config = { \ .base = (UART0_Type *)DT_INST_REG_ADDR(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name),\ .baud_rate = DT_INST_PROP(n, current_speed), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ IRQ_FUNC_INIT \ } #define MCUX_LPSCI_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ static struct mcux_lpsci_data mcux_lpsci_##n##_data; \ \ static const struct mcux_lpsci_config mcux_lpsci_##n##_config; \ \ DEVICE_DT_INST_DEFINE(n, \ mcux_lpsci_init, \ NULL, \ &mcux_lpsci_##n##_data, \ &mcux_lpsci_##n##_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &mcux_lpsci_driver_api); \ \ MCUX_LPSCI_CONFIG_FUNC(n) \ \ MCUX_LPSCI_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(MCUX_LPSCI_INIT) ```
/content/code_sandbox/drivers/serial/uart_mcux_lpsci.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,664
```unknown config UART_SMARTBOND bool "Renesas SmartBond(tm) UART driver" default y depends on DT_HAS_RENESAS_SMARTBOND_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select UART_INTERRUPT_DRIVEN if PM_DEVICE help Enable UART driver for Renesas SmartBond(tm) DA1469x series MCU. ```
/content/code_sandbox/drivers/serial/Kconfig.smartbond
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
79
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SERIAL_UART_PL011_AMBIQ_H_ #define ZEPHYR_DRIVERS_SERIAL_UART_PL011_AMBIQ_H_ #include <zephyr/device.h> #include <zephyr/kernel.h> #include <zephyr/pm/device.h> #include <zephyr/pm/policy.h> #include "uart_pl011_registers.h" #include <am_mcu_apollo.h> #define PWRCTRL_MAX_WAIT_US 5 static inline void pl011_ambiq_enable_clk(const struct device *dev) { get_uart(dev)->cr |= PL011_CR_AMBIQ_CLKEN; } static inline int pl011_ambiq_clk_set(const struct device *dev, uint32_t clk) { uint8_t clksel; switch (clk) { case 3000000: clksel = PL011_CR_AMBIQ_CLKSEL_3MHZ; break; case 6000000: clksel = PL011_CR_AMBIQ_CLKSEL_6MHZ; break; case 12000000: clksel = PL011_CR_AMBIQ_CLKSEL_12MHZ; break; case 24000000: clksel = PL011_CR_AMBIQ_CLKSEL_24MHZ; break; default: return -EINVAL; } get_uart(dev)->cr |= FIELD_PREP(PL011_CR_AMBIQ_CLKSEL, clksel); return 0; } static inline int clk_enable_ambiq_uart(const struct device *dev, uint32_t clk) { pl011_ambiq_enable_clk(dev); return pl011_ambiq_clk_set(dev, clk); } #ifdef CONFIG_PM_DEVICE /* Register status record. * The register status will be preserved to this variable before entering sleep mode, * and they will be restored after wake up. */ typedef struct { bool bValid; uint32_t regILPR; uint32_t regIBRD; uint32_t regFBRD; uint32_t regLCRH; uint32_t regCR; uint32_t regIFLS; uint32_t regIER; } uart_register_state_t; static uart_register_state_t sRegState[2]; static int uart_ambiq_pm_action(const struct device *dev, enum pm_device_action action) { int key; /*Uart module number*/ uint32_t ui32Module = ((uint32_t)get_uart(dev) == UART0_BASE) ? 0 : 1; /*Uart Power module*/ am_hal_pwrctrl_periph_e eUARTPowerModule = ((am_hal_pwrctrl_periph_e)(AM_HAL_PWRCTRL_PERIPH_UART0 + ui32Module)); /*Uart register status*/ uart_register_state_t *pRegisterStatus = &sRegState[ui32Module]; /* Decode the requested power state and update UART operation accordingly.*/ switch (action) { /* Turn on the UART. */ case PM_DEVICE_ACTION_RESUME: /* Make sure we don't try to restore an invalid state.*/ if (!pRegisterStatus->bValid) { return -EPERM; } /*The resume and suspend actions may be executed back-to-back, * so we add a busy wait here for stabilization. */ k_busy_wait(100); /* Enable power control.*/ am_hal_pwrctrl_periph_enable(eUARTPowerModule); /* Restore UART registers*/ key = irq_lock(); UARTn(ui32Module)->ILPR = pRegisterStatus->regILPR; UARTn(ui32Module)->IBRD = pRegisterStatus->regIBRD; UARTn(ui32Module)->FBRD = pRegisterStatus->regFBRD; UARTn(ui32Module)->LCRH = pRegisterStatus->regLCRH; UARTn(ui32Module)->CR = pRegisterStatus->regCR; UARTn(ui32Module)->IFLS = pRegisterStatus->regIFLS; UARTn(ui32Module)->IER = pRegisterStatus->regIER; pRegisterStatus->bValid = false; irq_unlock(key); return 0; case PM_DEVICE_ACTION_SUSPEND: while ((get_uart(dev)->fr & PL011_FR_BUSY) != 0) ; /* Preserve UART registers*/ key = irq_lock(); pRegisterStatus->regILPR = UARTn(ui32Module)->ILPR; pRegisterStatus->regIBRD = UARTn(ui32Module)->IBRD; pRegisterStatus->regFBRD = UARTn(ui32Module)->FBRD; pRegisterStatus->regLCRH = UARTn(ui32Module)->LCRH; pRegisterStatus->regCR = UARTn(ui32Module)->CR; pRegisterStatus->regIFLS = UARTn(ui32Module)->IFLS; pRegisterStatus->regIER = UARTn(ui32Module)->IER; pRegisterStatus->bValid = true; irq_unlock(key); /* Clear all interrupts before sleeping as having a pending UART * interrupt burns power. */ UARTn(ui32Module)->IEC = 0xFFFFFFFF; /* If the user is going to sleep, certain bits of the CR register * need to be 0 to be low power and have the UART shut off. * Since the user either wishes to retain state which takes place * above or the user does not wish to retain state, it is acceptable * to set the entire CR register to 0. */ UARTn(ui32Module)->CR = 0; /* Disable power control.*/ am_hal_pwrctrl_periph_disable(eUARTPowerModule); return 0; default: return -ENOTSUP; } } #endif /* CONFIG_PM_DEVICE */ /* Problem: writes to power configure register takes some time to take effective. * Solution: Check device's power status to ensure that register has taken effective. * Note: busy wait is not allowed to use here due to UART is initiated before timer starts. */ #if defined(CONFIG_SOC_SERIES_APOLLO3X) #define DEVPWRSTATUS_OFFSET 0x10 #define HCPA_MASK 0x4 #define AMBIQ_UART_DEFINE(n) \ PM_DEVICE_DT_INST_DEFINE(n, uart_ambiq_pm_action); \ static int pwr_on_ambiq_uart_##n(void) \ { \ uint32_t addr = DT_REG_ADDR(DT_INST_PHANDLE(n, ambiq_pwrcfg)) + \ DT_INST_PHA(n, ambiq_pwrcfg, offset); \ uint32_t pwr_status_addr = addr + DEVPWRSTATUS_OFFSET; \ sys_write32((sys_read32(addr) | DT_INST_PHA(n, ambiq_pwrcfg, mask)), addr); \ while (!(sys_read32(pwr_status_addr) & HCPA_MASK)) { \ }; \ return 0; \ } \ static inline int clk_enable_ambiq_uart_##n(const struct device *dev, uint32_t clk) \ { \ return clk_enable_ambiq_uart(dev, clk); \ } #else #define DEVPWRSTATUS_OFFSET 0x4 #define AMBIQ_UART_DEFINE(n) \ static int pwr_on_ambiq_uart_##n(void) \ { \ uint32_t addr = DT_REG_ADDR(DT_INST_PHANDLE(n, ambiq_pwrcfg)) + \ DT_INST_PHA(n, ambiq_pwrcfg, offset); \ uint32_t pwr_status_addr = addr + DEVPWRSTATUS_OFFSET; \ sys_write32((sys_read32(addr) | DT_INST_PHA(n, ambiq_pwrcfg, mask)), addr); \ while ((sys_read32(pwr_status_addr) & DT_INST_PHA(n, ambiq_pwrcfg, mask)) != \ DT_INST_PHA(n, ambiq_pwrcfg, mask)) { \ }; \ return 0; \ } \ static inline int clk_enable_ambiq_uart_##n(const struct device *dev, uint32_t clk) \ { \ return clk_enable_ambiq_uart(dev, clk); \ } #endif #endif /* ZEPHYR_DRIVERS_SERIAL_UART_PL011_AMBIQ_H_ */ ```
/content/code_sandbox/drivers/serial/uart_pl011_ambiq.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,816
```unknown config USART_GD32 bool "GD32 serial driver" default y depends on DT_HAS_GD_GD32_USART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select USE_GD32_USART help This option enables the USART driver for GD32 SoC family. ```
/content/code_sandbox/drivers/serial/Kconfig.gd32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
64
```unknown # Emulated UART configuration options config UART_EMUL bool "Emulated UART driver [EXPERIMENTAL]" default y depends on DT_HAS_ZEPHYR_UART_EMUL_ENABLED depends on EMUL select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select SERIAL_SUPPORT_ASYNC select RING_BUFFER select EXPERIMENTAL help Enable the emulated UART driver. if UART_EMUL config UART_EMUL_DEVICE_INIT_PRIORITY int "UART emulated devices' init priority" default 0 help The init priority of emulated driver on the UART bus. config UART_EMUL_WORK_Q_STACK_SIZE int "UART emulator work queue stack size" default 2048 config UART_EMUL_WORK_Q_PRIORITY int "UART emulator work queue thread priority" default 1 endif # UART_EMUL ```
/content/code_sandbox/drivers/serial/Kconfig.emul
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
175
```unknown # Virtual UART RTT driver option menuconfig UART_RTT bool "UART RTT driver" default y depends on DT_HAS_SEGGER_RTT_UART_ENABLED depends on USE_SEGGER_RTT select SEGGER_RTT_CUSTOM_LOCKING select SERIAL_SUPPORT_ASYNC help This option enables access RTT channel as UART device. if UART_RTT config UART_RTT_0 def_bool $(dt_nodelabel_enabled_with_compat,rtt0,$(DT_COMPAT_SEGGER_RTT_UART)) depends on SEGGER_RTT_MAX_NUM_UP_BUFFERS >= 1 && SEGGER_RTT_MAX_NUM_DOWN_BUFFERS >= 1 depends on SEGGER_RTT_MODE_NO_BLOCK_SKIP select SERIAL_HAS_DRIVER select UART_RTT_DRIVER help Enable UART on (default) RTT channel 0. Default channel has to be configured in non-blocking skip mode. config UART_RTT_1 def_bool $(dt_nodelabel_enabled_with_compat,rtt1,$(DT_COMPAT_SEGGER_RTT_UART)) depends on SEGGER_RTT_MAX_NUM_UP_BUFFERS >= 2 && SEGGER_RTT_MAX_NUM_DOWN_BUFFERS >= 2 select SERIAL_HAS_DRIVER select UART_RTT_DRIVER help Enable UART on RTT channel 1 config UART_RTT_2 def_bool $(dt_nodelabel_enabled_with_compat,rtt2,$(DT_COMPAT_SEGGER_RTT_UART)) depends on SEGGER_RTT_MAX_NUM_UP_BUFFERS >= 3 && SEGGER_RTT_MAX_NUM_DOWN_BUFFERS >= 3 select SERIAL_HAS_DRIVER select UART_RTT_DRIVER help Enable UART on RTT channel 2 config UART_RTT_3 def_bool $(dt_nodelabel_enabled_with_compat,rtt3,$(DT_COMPAT_SEGGER_RTT_UART)) depends on SEGGER_RTT_MAX_NUM_UP_BUFFERS >= 4 && SEGGER_RTT_MAX_NUM_DOWN_BUFFERS >= 4 select SERIAL_HAS_DRIVER select UART_RTT_DRIVER help Enable UART on RTT channel 3 config UART_RTT_DRIVER bool endif ```
/content/code_sandbox/drivers/serial/Kconfig.rtt
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
448
```c /* * */ #undef _XOPEN_SOURCE /* Note: This is used only for interaction with the host C library, and is therefore exempt of * coding guidelines rule A.4&5 which applies to the embedded code using embedded libraries */ #define _XOPEN_SOURCE 600 #include <stdbool.h> #include <errno.h> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <pty.h> #include <fcntl.h> #include <sys/select.h> #include <unistd.h> #include <poll.h> #include <nsi_tracing.h> #define ERROR nsi_print_error_and_exit #define WARN nsi_print_warning /** * @brief Poll the device for input. * * @param in_f Input file descriptor * @param p_char Pointer to character. * * @retval 0 If a character arrived and was stored in p_char * @retval -1 If no character was available to read * @retval -2 if the stdin is disconnected */ int np_uart_stdin_poll_in_bottom(int in_f, unsigned char *p_char) { if (feof(stdin)) { /* * The stdinput is fed from a file which finished or the user * pressed Ctrl+D */ return -2; } int n = -1; int ready; fd_set readfds; static struct timeval timeout; /* just zero */ FD_ZERO(&readfds); FD_SET(in_f, &readfds); ready = select(in_f+1, &readfds, NULL, NULL, &timeout); if (ready == 0) { return -1; } else if (ready == -1) { ERROR("%s: Error on select ()\n", __func__); } n = read(in_f, p_char, 1); if ((n == -1) || (n == 0)) { return -1; } return 0; } /** * @brief Check if the output descriptor has something connected to the slave side * * @param fd file number * * @retval 0 Nothing connected yet * @retval 1 Something connected to the slave side */ int np_uart_slave_connected(int fd) { struct pollfd pfd = { .fd = fd, .events = POLLHUP }; int ret; ret = poll(&pfd, 1, 0); if (ret == -1) { int err = errno; /* * Possible errors are: * * EINTR :A signal was received => ok * * EFAULT and EINVAL: parameters/programming error * * ENOMEM no RAM left */ if (err != EINTR) { ERROR("%s: unexpected error during poll, errno=%i,%s\n", __func__, err, strerror(err)); } } if (!(pfd.revents & POLLHUP)) { /* There is now a reader on the slave side */ return 1; } return 0; } /** * Attempt to connect a terminal emulator to the slave side of the pty * If -attach_uart_cmd=<cmd> is provided as a command line option, <cmd> will be * used. Otherwise, the default command, * CONFIG_NATIVE_UART_AUTOATTACH_DEFAULT_CMD, will be used instead */ static void attach_to_tty(const char *slave_tty, const char *auto_attach_cmd) { char command[strlen(auto_attach_cmd) + strlen(slave_tty) + 1]; sprintf(command, auto_attach_cmd, slave_tty); int ret = system(command); if (ret != 0) { WARN("Could not attach to the UART with \"%s\"\n", command); WARN("The command returned %i\n", WEXITSTATUS(ret)); } } /** * Attempt to allocate and open a new pseudoterminal * * Returns the file descriptor of the master side * If auto_attach was set, it will also attempt to connect a new terminal * emulator to its slave side. */ int np_uart_open_ptty(const char *uart_name, const char *auto_attach_cmd, bool do_auto_attach, bool wait_pts) { int master_pty; char *slave_pty_name; struct termios ter; int err_nbr; int ret; int flags; master_pty = posix_openpt(O_RDWR | O_NOCTTY); if (master_pty == -1) { ERROR("Could not open a new TTY for the UART\n"); } ret = grantpt(master_pty); if (ret == -1) { err_nbr = errno; close(master_pty); ERROR("Could not grant access to the slave PTY side (%i)\n", err_nbr); } ret = unlockpt(master_pty); if (ret == -1) { err_nbr = errno; close(master_pty); ERROR("Could not unlock the slave PTY side (%i)\n", err_nbr); } slave_pty_name = ptsname(master_pty); if (slave_pty_name == NULL) { err_nbr = errno; close(master_pty); ERROR("Error getting slave PTY device name (%i)\n", err_nbr); } /* Set the master PTY as non blocking */ flags = fcntl(master_pty, F_GETFL); if (flags == -1) { err_nbr = errno; close(master_pty); ERROR("Could not read the master PTY file status flags (%i)\n", err_nbr); } ret = fcntl(master_pty, F_SETFL, flags | O_NONBLOCK); if (ret == -1) { err_nbr = errno; close(master_pty); ERROR("Could not set the master PTY as non-blocking (%i)\n", err_nbr); } (void) err_nbr; /* * Set terminal in "raw" mode: * Not canonical (no line input) * No signal generation from Ctr+{C|Z..} * No echoing, no input or output processing * No replacing of NL or CR * No flow control */ ret = tcgetattr(master_pty, &ter); if (ret == -1) { ERROR("Could not read terminal driver settings\n"); } ter.c_cc[VMIN] = 0; ter.c_cc[VTIME] = 0; ter.c_lflag &= ~(ICANON | ISIG | IEXTEN | ECHO); ter.c_iflag &= ~(BRKINT | ICRNL | IGNBRK | IGNCR | INLCR | INPCK | ISTRIP | IXON | PARMRK); ter.c_oflag &= ~OPOST; ret = tcsetattr(master_pty, TCSANOW, &ter); if (ret == -1) { ERROR("Could not change terminal driver settings\n"); } nsi_print_trace("%s connected to pseudotty: %s\n", uart_name, slave_pty_name); if (wait_pts) { /* * This trick sets the HUP flag on the tty master, making it * possible to detect a client connection using poll. * The connection of the client would cause the HUP flag to be * cleared, and in turn set again at disconnect. */ ret = open(slave_pty_name, O_RDWR | O_NOCTTY); if (ret == -1) { err_nbr = errno; ERROR("%s: Could not open terminal from the slave side (%i,%s)\n", __func__, err_nbr, strerror(err_nbr)); } ret = close(ret); if (ret == -1) { err_nbr = errno; ERROR("%s: Could not close terminal from the slave side (%i,%s)\n", __func__, err_nbr, strerror(err_nbr)); } } if (do_auto_attach) { attach_to_tty(slave_pty_name, auto_attach_cmd); } return master_pty; } int np_uart_ptty_get_stdin_fileno(void) { return STDIN_FILENO; } int np_uart_ptty_get_stdout_fileno(void) { return STDOUT_FILENO; } ```
/content/code_sandbox/drivers/serial/uart_native_ptty_bottom.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,768
```objective-c /** * @brief "Bottom" of native tty uart driver * * When built with the native_simulator this will be built in the runner context, * that is, with the host C library, and with the host include paths. * */ #ifndef DRIVERS_SERIAL_UART_NATIVE_TTY_BOTTOM_H #define DRIVERS_SERIAL_UART_NATIVE_TTY_BOTTOM_H #ifdef __cplusplus extern "C" { #endif #include <stdint.h> /* Below enums are just differently namespaced copies of uart_config_* enums. Options that are not * supported on the host are not listed. */ enum native_tty_bottom_parity { NTB_PARITY_NONE, NTB_PARITY_ODD, NTB_PARITY_EVEN, }; enum native_tty_bottom_stop_bits { NTB_STOP_BITS_1, NTB_STOP_BITS_2, }; enum native_tty_bottom_data_bits { NTB_DATA_BITS_5, NTB_DATA_BITS_6, NTB_DATA_BITS_7, NTB_DATA_BITS_8, }; enum native_tty_bottom_flow_control { NTB_FLOW_CTRL_NONE, }; struct native_tty_bottom_cfg { uint32_t baudrate; enum native_tty_bottom_parity parity; enum native_tty_bottom_stop_bits stop_bits; enum native_tty_bottom_data_bits data_bits; enum native_tty_bottom_flow_control flow_ctrl; }; /* Note: None of these functions are public interfaces. They are internal to the native tty driver. */ /** * @brief Check for available input on tty file descriptor * * @param fd * * @retval 1 if data is available * @retval 0 if data is not available * @retval <0 on error */ int native_tty_poll_bottom(int fd); /** * @brief Opens tty port on the given pathname * * Returned file descriptor can be then passed to native_tty_configure_bottom to configure it. * * @param pathname * * @return file descriptor */ int native_tty_open_tty_bottom(const char *pathname); /** * @brief Configure tty port * * @param fd File descriptor of the tty port. * @param cfg Configuration struct. * * @retval 0 if successful, * @retval -1 otherwise. */ int native_tty_configure_bottom(int fd, struct native_tty_bottom_cfg *cfg); #ifdef __cplusplus } #endif #endif /* DRIVERS_SERIAL_UART_NATIVE_TTY_BOTTOM_H */ ```
/content/code_sandbox/drivers/serial/uart_native_tty_bottom.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
507
```unknown config UART_BCM2711_MU bool "bcm2711_mu" default y depends on DT_HAS_BRCM_BCM2711_AUX_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help bcm2711_mu Low Power Serial Port. ```
/content/code_sandbox/drivers/serial/Kconfig.bcm2711
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
59
```c /* * */ #define DT_DRV_COMPAT arm_cmsdk_uart /** * @brief Driver for UART on ARM CMSDK APB UART. * * UART has two wires for RX and TX, and does not provide CTS or RTS. */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/clock_control/arm_clock_control.h> #include <zephyr/sys/__assert.h> #include <zephyr/init.h> #include <zephyr/drivers/uart.h> #include <zephyr/linker/sections.h> #include <zephyr/irq.h> /* UART registers struct */ struct uart_cmsdk_apb { /* offset: 0x000 (r/w) data register */ volatile uint32_t data; /* offset: 0x004 (r/w) status register */ volatile uint32_t state; /* offset: 0x008 (r/w) control register */ volatile uint32_t ctrl; union { /* offset: 0x00c (r/ ) interrupt status register */ volatile uint32_t intstatus; /* offset: 0x00c ( /w) interrupt clear register */ volatile uint32_t intclear; }; /* offset: 0x010 (r/w) baudrate divider register */ volatile uint32_t bauddiv; }; /* UART Bits */ /* CTRL Register */ #define UART_TX_EN (1 << 0) #define UART_RX_EN (1 << 1) #define UART_TX_IN_EN (1 << 2) #define UART_RX_IN_EN (1 << 3) #define UART_TX_OV_EN (1 << 4) #define UART_RX_OV_EN (1 << 5) #define UART_HS_TM_TX (1 << 6) /* STATE Register */ #define UART_TX_BF (1 << 0) #define UART_RX_BF (1 << 1) #define UART_TX_B_OV (1 << 2) #define UART_RX_B_OV (1 << 3) /* INTSTATUS Register */ #define UART_TX_IN (1 << 0) #define UART_RX_IN (1 << 1) #define UART_TX_OV_IN (1 << 2) #define UART_RX_OV_IN (1 << 3) struct uart_cmsdk_apb_config { volatile struct uart_cmsdk_apb *uart; uint32_t sys_clk_freq; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_config_func_t irq_config_func; #endif }; /* Device data structure */ struct uart_cmsdk_apb_dev_data { uint32_t baud_rate; /* Baud rate */ #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t irq_cb; void *irq_cb_data; #endif /* UART Clock control in Active State */ const struct arm_clock_control_t uart_cc_as; /* UART Clock control in Sleep State */ const struct arm_clock_control_t uart_cc_ss; /* UART Clock control in Deep Sleep State */ const struct arm_clock_control_t uart_cc_dss; }; static const struct uart_driver_api uart_cmsdk_apb_driver_api; #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_cmsdk_apb_isr(const struct device *dev); #endif /** * @brief Set the baud rate * * This routine set the given baud rate for the UART. * * @param dev UART device struct */ static void baudrate_set(const struct device *dev) { const struct uart_cmsdk_apb_config * const dev_cfg = dev->config; struct uart_cmsdk_apb_dev_data *const dev_data = dev->data; /* * If baudrate and/or sys_clk_freq are 0 the configuration remains * unchanged. It can be useful in case that Zephyr it is run via * a bootloader that brings up the serial and sets the baudrate. */ if ((dev_data->baud_rate != 0U) && (dev_cfg->sys_clk_freq != 0U)) { /* calculate baud rate divisor */ dev_cfg->uart->bauddiv = (dev_cfg->sys_clk_freq / dev_data->baud_rate); } } /** * @brief Initialize UART channel * * This routine is called to reset the chip in a quiescent state. * It is assumed that this function is called only once per UART. * * @param dev UART device struct * * @return 0 */ static int uart_cmsdk_apb_init(const struct device *dev) { const struct uart_cmsdk_apb_config * const dev_cfg = dev->config; #ifdef CONFIG_CLOCK_CONTROL /* Enable clock for subsystem */ const struct device *const clk = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR_BY_IDX(0, 1)); struct uart_cmsdk_apb_dev_data * const data = dev->data; if (!device_is_ready(clk)) { return -ENODEV; } #ifdef CONFIG_SOC_SERIES_BEETLE clock_control_on(clk, (clock_control_subsys_t) &data->uart_cc_as); clock_control_on(clk, (clock_control_subsys_t) &data->uart_cc_ss); clock_control_on(clk, (clock_control_subsys_t) &data->uart_cc_dss); #endif /* CONFIG_SOC_SERIES_BEETLE */ #endif /* CONFIG_CLOCK_CONTROL */ /* Set baud rate */ baudrate_set(dev); /* Enable receiver and transmitter */ dev_cfg->uart->ctrl = UART_RX_EN | UART_TX_EN; #ifdef CONFIG_UART_INTERRUPT_DRIVEN dev_cfg->irq_config_func(dev); #endif return 0; } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_cmsdk_apb_poll_in(const struct device *dev, unsigned char *c) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; /* If the receiver is not ready returns -1 */ if (!(dev_cfg->uart->state & UART_RX_BF)) { return -1; } /* got a character */ *c = (unsigned char)dev_cfg->uart->data; return 0; } /** * @brief Output a character in polled mode. * * Checks if the transmitter is empty. If empty, a character is written to * the data register. * * @param dev UART device struct * @param c Character to send */ static void uart_cmsdk_apb_poll_out(const struct device *dev, unsigned char c) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; /* Wait for transmitter to be ready */ while (dev_cfg->uart->state & UART_TX_BF) { ; /* Wait */ } /* Send a character */ dev_cfg->uart->data = (uint32_t)c; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN /** * @brief Fill FIFO with data * * @param dev UART device struct * @param tx_data Data to transmit * @param len Number of bytes to send * * @return the number of characters that have been read */ static int uart_cmsdk_apb_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; /* * No hardware FIFO present. Only 1 byte * to write if TX buffer is empty. */ if (len && !(dev_cfg->uart->state & UART_TX_BF)) { /* * Clear TX int. pending flag before pushing byte to "FIFO". * If TX interrupt is enabled the UART_TX_IN bit will be set * again automatically by the UART hardware machinery once * the "FIFO" becomes empty again. */ dev_cfg->uart->intclear = UART_TX_IN; dev_cfg->uart->data = *tx_data; return 1; } return 0; } /** * @brief Read data from FIFO * * @param dev UART device struct * @param rx_data Pointer to data container * @param size Container size in bytes * * @return the number of characters that have been read */ static int uart_cmsdk_apb_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; /* * No hardware FIFO present. Only 1 byte * to read if RX buffer is full. */ if (size && dev_cfg->uart->state & UART_RX_BF) { /* * Clear RX int. pending flag before popping byte from "FIFO". * If RX interrupt is enabled the UART_RX_IN bit will be set * again automatically by the UART hardware machinery once * the "FIFO" becomes full again. */ dev_cfg->uart->intclear = UART_RX_IN; *rx_data = (unsigned char)dev_cfg->uart->data; return 1; } return 0; } /** * @brief Enable TX interrupt * * @param dev UART device struct */ static void uart_cmsdk_apb_irq_tx_enable(const struct device *dev) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; unsigned int key; dev_cfg->uart->ctrl |= UART_TX_IN_EN; /* The expectation is that TX is a level interrupt, active for as * long as TX buffer is empty. But in CMSDK UART it's an edge * interrupt, firing on a state change of TX buffer from full to * empty. So, we need to "prime" it here by calling ISR directly, * to get interrupt processing going, as there is no previous * full state to allow a transition from full to empty buffer * that will trigger a TX interrupt. */ key = irq_lock(); uart_cmsdk_apb_isr(dev); irq_unlock(key); } /** * @brief Disable TX interrupt * * @param dev UART device struct */ static void uart_cmsdk_apb_irq_tx_disable(const struct device *dev) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; dev_cfg->uart->ctrl &= ~UART_TX_IN_EN; /* Clear any pending TX interrupt after disabling it */ dev_cfg->uart->intclear = UART_TX_IN; } /** * @brief Verify if Tx interrupt has been raised * * @param dev UART device struct * * @return 1 if an interrupt is ready, 0 otherwise */ static int uart_cmsdk_apb_irq_tx_ready(const struct device *dev) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; return !(dev_cfg->uart->state & UART_TX_BF); } /** * @brief Enable RX interrupt * * @param dev UART device struct */ static void uart_cmsdk_apb_irq_rx_enable(const struct device *dev) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; dev_cfg->uart->ctrl |= UART_RX_IN_EN; } /** * @brief Disable RX interrupt * * @param dev UART device struct */ static void uart_cmsdk_apb_irq_rx_disable(const struct device *dev) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; dev_cfg->uart->ctrl &= ~UART_RX_IN_EN; /* Clear any pending RX interrupt after disabling it */ dev_cfg->uart->intclear = UART_RX_IN; } /** * @brief Verify if Tx complete interrupt has been raised * * @param dev UART device struct * * @return 1 if an interrupt is ready, 0 otherwise */ static int uart_cmsdk_apb_irq_tx_complete(const struct device *dev) { return uart_cmsdk_apb_irq_tx_ready(dev); } /** * @brief Verify if Rx interrupt has been raised * * @param dev UART device struct * * @return 1 if an interrupt is ready, 0 otherwise */ static int uart_cmsdk_apb_irq_rx_ready(const struct device *dev) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; return (dev_cfg->uart->state & UART_RX_BF) == UART_RX_BF; } /** * @brief Enable error interrupt * * @param dev UART device struct */ static void uart_cmsdk_apb_irq_err_enable(const struct device *dev) { ARG_UNUSED(dev); } /** * @brief Disable error interrupt * * @param dev UART device struct */ static void uart_cmsdk_apb_irq_err_disable(const struct device *dev) { ARG_UNUSED(dev); } /** * @brief Verify if Tx or Rx interrupt is pending * * @param dev UART device struct * * @return 1 if Tx or Rx interrupt is pending, 0 otherwise */ static int uart_cmsdk_apb_irq_is_pending(const struct device *dev) { const struct uart_cmsdk_apb_config *dev_cfg = dev->config; return (dev_cfg->uart->intstatus & (UART_RX_IN | UART_TX_IN)); } /** * @brief Update the interrupt status * * @param dev UART device struct * * @return always 1 */ static int uart_cmsdk_apb_irq_update(const struct device *dev) { return 1; } /** * @brief Set the callback function pointer for an Interrupt. * * @param dev UART device structure * @param cb Callback function pointer. */ static void uart_cmsdk_apb_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_cmsdk_apb_dev_data *data = dev->data; data->irq_cb = cb; data->irq_cb_data = cb_data; } /** * @brief Interrupt service routine. * * Calls the callback function, if exists. * * @param arg argument to interrupt service routine. */ void uart_cmsdk_apb_isr(const struct device *dev) { struct uart_cmsdk_apb_dev_data *data = dev->data; /* Verify if the callback has been registered */ if (data->irq_cb) { data->irq_cb(dev, data->irq_cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_cmsdk_apb_driver_api = { .poll_in = uart_cmsdk_apb_poll_in, .poll_out = uart_cmsdk_apb_poll_out, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_cmsdk_apb_fifo_fill, .fifo_read = uart_cmsdk_apb_fifo_read, .irq_tx_enable = uart_cmsdk_apb_irq_tx_enable, .irq_tx_disable = uart_cmsdk_apb_irq_tx_disable, .irq_tx_ready = uart_cmsdk_apb_irq_tx_ready, .irq_rx_enable = uart_cmsdk_apb_irq_rx_enable, .irq_rx_disable = uart_cmsdk_apb_irq_rx_disable, .irq_tx_complete = uart_cmsdk_apb_irq_tx_complete, .irq_rx_ready = uart_cmsdk_apb_irq_rx_ready, .irq_err_enable = uart_cmsdk_apb_irq_err_enable, .irq_err_disable = uart_cmsdk_apb_irq_err_disable, .irq_is_pending = uart_cmsdk_apb_irq_is_pending, .irq_update = uart_cmsdk_apb_irq_update, .irq_callback_set = uart_cmsdk_apb_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #if DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_cmsdk_apb_irq_config_func_0(const struct device *dev); #endif static const struct uart_cmsdk_apb_config uart_cmsdk_apb_dev_cfg_0 = { .uart = (volatile struct uart_cmsdk_apb *)DT_INST_REG_ADDR(0), .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(0, clocks, clock_frequency), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = uart_cmsdk_apb_irq_config_func_0, #endif }; static struct uart_cmsdk_apb_dev_data uart_cmsdk_apb_dev_data_0 = { .baud_rate = DT_INST_PROP(0, current_speed), .uart_cc_as = {.bus = CMSDK_APB, .state = SOC_ACTIVE, .device = DT_INST_REG_ADDR(0),}, .uart_cc_ss = {.bus = CMSDK_APB, .state = SOC_SLEEP, .device = DT_INST_REG_ADDR(0),}, .uart_cc_dss = {.bus = CMSDK_APB, .state = SOC_DEEPSLEEP, .device = DT_INST_REG_ADDR(0),}, }; DEVICE_DT_INST_DEFINE(0, uart_cmsdk_apb_init, NULL, &uart_cmsdk_apb_dev_data_0, &uart_cmsdk_apb_dev_cfg_0, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_cmsdk_apb_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN #if DT_NUM_IRQS(DT_DRV_INST(0)) == 1 static void uart_cmsdk_apb_irq_config_func_0(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } #else static void uart_cmsdk_apb_irq_config_func_0(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, tx, irq), DT_INST_IRQ_BY_NAME(0, tx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, tx, irq)); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, rx, irq), DT_INST_IRQ_BY_NAME(0, rx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, rx, irq)); } #endif #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(0), okay) */ #if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_cmsdk_apb_irq_config_func_1(const struct device *dev); #endif static const struct uart_cmsdk_apb_config uart_cmsdk_apb_dev_cfg_1 = { .uart = (volatile struct uart_cmsdk_apb *)DT_INST_REG_ADDR(1), .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(1, clocks, clock_frequency), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = uart_cmsdk_apb_irq_config_func_1, #endif }; static struct uart_cmsdk_apb_dev_data uart_cmsdk_apb_dev_data_1 = { .baud_rate = DT_INST_PROP(1, current_speed), .uart_cc_as = {.bus = CMSDK_APB, .state = SOC_ACTIVE, .device = DT_INST_REG_ADDR(1),}, .uart_cc_ss = {.bus = CMSDK_APB, .state = SOC_SLEEP, .device = DT_INST_REG_ADDR(1),}, .uart_cc_dss = {.bus = CMSDK_APB, .state = SOC_DEEPSLEEP, .device = DT_INST_REG_ADDR(1),}, }; DEVICE_DT_INST_DEFINE(1, uart_cmsdk_apb_init, NULL, &uart_cmsdk_apb_dev_data_1, &uart_cmsdk_apb_dev_cfg_1, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_cmsdk_apb_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN #if DT_NUM_IRQS(DT_DRV_INST(1)) == 1 static void uart_cmsdk_apb_irq_config_func_1(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(1), DT_INST_IRQ(1, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(1), 0); irq_enable(DT_INST_IRQN(1)); } #else static void uart_cmsdk_apb_irq_config_func_1(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQ_BY_NAME(1, tx, irq), DT_INST_IRQ_BY_NAME(1, tx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(1), 0); irq_enable(DT_INST_IRQ_BY_NAME(1, tx, irq)); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(1, rx, irq), DT_INST_IRQ_BY_NAME(1, rx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(1), 0); irq_enable(DT_INST_IRQ_BY_NAME(1, rx, irq)); } #endif #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) */ #if DT_NODE_HAS_STATUS(DT_DRV_INST(2), okay) #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_cmsdk_apb_irq_config_func_2(const struct device *dev); #endif static const struct uart_cmsdk_apb_config uart_cmsdk_apb_dev_cfg_2 = { .uart = (volatile struct uart_cmsdk_apb *)DT_INST_REG_ADDR(2), .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(2, clocks, clock_frequency), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = uart_cmsdk_apb_irq_config_func_2, #endif }; static struct uart_cmsdk_apb_dev_data uart_cmsdk_apb_dev_data_2 = { .baud_rate = DT_INST_PROP(2, current_speed), .uart_cc_as = {.bus = CMSDK_APB, .state = SOC_ACTIVE, .device = DT_INST_REG_ADDR(2),}, .uart_cc_ss = {.bus = CMSDK_APB, .state = SOC_SLEEP, .device = DT_INST_REG_ADDR(2),}, .uart_cc_dss = {.bus = CMSDK_APB, .state = SOC_DEEPSLEEP, .device = DT_INST_REG_ADDR(2),}, }; DEVICE_DT_INST_DEFINE(2, uart_cmsdk_apb_init, NULL, &uart_cmsdk_apb_dev_data_2, &uart_cmsdk_apb_dev_cfg_2, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_cmsdk_apb_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN #if DT_NUM_IRQS(DT_DRV_INST(2)) == 1 static void uart_cmsdk_apb_irq_config_func_2(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(2), DT_INST_IRQ_BY_NAME(2, priority, irq), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(2), 0); irq_enable(DT_INST_IRQN(2)); } #else static void uart_cmsdk_apb_irq_config_func_2(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQ_BY_NAME(2, tx, irq), DT_INST_IRQ_BY_NAME(2, tx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(2), 0); irq_enable(DT_INST_IRQ_BY_NAME(2, tx, irq)); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(2, rx, irq), DT_INST_IRQ_BY_NAME(2, rx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(2), 0); irq_enable(DT_INST_IRQ_BY_NAME(2, rx, irq)); } #endif #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(2), okay) */ #if DT_NODE_HAS_STATUS(DT_DRV_INST(3), okay) #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_cmsdk_apb_irq_config_func_3(const struct device *dev); #endif static const struct uart_cmsdk_apb_config uart_cmsdk_apb_dev_cfg_3 = { .uart = (volatile struct uart_cmsdk_apb *)DT_INST_REG_ADDR(3), .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(3, clocks, clock_frequency), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = uart_cmsdk_apb_irq_config_func_3, #endif }; static struct uart_cmsdk_apb_dev_data uart_cmsdk_apb_dev_data_3 = { .baud_rate = DT_INST_PROP(3, current_speed), .uart_cc_as = {.bus = CMSDK_APB, .state = SOC_ACTIVE, .device = DT_INST_REG_ADDR(3),}, .uart_cc_ss = {.bus = CMSDK_APB, .state = SOC_SLEEP, .device = DT_INST_REG_ADDR(3),}, .uart_cc_dss = {.bus = CMSDK_APB, .state = SOC_DEEPSLEEP, .device = DT_INST_REG_ADDR(3),}, }; DEVICE_DT_INST_DEFINE(3, uart_cmsdk_apb_init, NULL, &uart_cmsdk_apb_dev_data_3, &uart_cmsdk_apb_dev_cfg_3, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_cmsdk_apb_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN #if DT_NUM_IRQS(DT_DRV_INST(3)) == 1 static void uart_cmsdk_apb_irq_config_func_3(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(3), DT_INST_IRQ(3, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(3), 0); irq_enable(DT_INST_IRQN(3)); } #else static void uart_cmsdk_apb_irq_config_func_3(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQ_BY_NAME(3, tx, irq), DT_INST_IRQ_BY_NAME(3, tx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(3), 0); irq_enable(DT_INST_IRQ_BY_NAME(3, tx, irq)); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(3, rx, irq), DT_INST_IRQ_BY_NAME(3, rx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(3), 0); irq_enable(DT_INST_IRQ_BY_NAME(3, rx, irq)); } #endif #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(3), okay) */ #if DT_NODE_HAS_STATUS(DT_DRV_INST(4), okay) #ifdef CONFIG_UART_INTERRUPT_DRIVEN static void uart_cmsdk_apb_irq_config_func_4(const struct device *dev); #endif static const struct uart_cmsdk_apb_config uart_cmsdk_apb_dev_cfg_4 = { .uart = (volatile struct uart_cmsdk_apb *)DT_INST_REG_ADDR(4), .sys_clk_freq = DT_INST_PROP_BY_PHANDLE(4, clocks, clock_frequency), #ifdef CONFIG_UART_INTERRUPT_DRIVEN .irq_config_func = uart_cmsdk_apb_irq_config_func_4, #endif }; static struct uart_cmsdk_apb_dev_data uart_cmsdk_apb_dev_data_4 = { .baud_rate = DT_INST_PROP(4, current_speed), .uart_cc_as = {.bus = CMSDK_APB, .state = SOC_ACTIVE, .device = DT_INST_REG_ADDR(4),}, .uart_cc_ss = {.bus = CMSDK_APB, .state = SOC_SLEEP, .device = DT_INST_REG_ADDR(4),}, .uart_cc_dss = {.bus = CMSDK_APB, .state = SOC_DEEPSLEEP, .device = DT_INST_REG_ADDR(4),}, }; DEVICE_DT_INST_DEFINE(4, uart_cmsdk_apb_init, NULL, &uart_cmsdk_apb_dev_data_4, &uart_cmsdk_apb_dev_cfg_4, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_cmsdk_apb_driver_api); #ifdef CONFIG_UART_INTERRUPT_DRIVEN #if DT_NUM_IRQS(DT_DRV_INST(4)) == 1 static void uart_cmsdk_apb_irq_config_func_4(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(4), DT_INST_IRQ_BY_NAME(4, priority, irq), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(4), 0); irq_enable(DT_INST_IRQN(4)); } #else static void uart_cmsdk_apb_irq_config_func_4(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQ_BY_NAME(4, tx, irq), DT_INST_IRQ_BY_NAME(4, tx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(4), 0); irq_enable(DT_INST_IRQ_BY_NAME(4, tx, irq)); IRQ_CONNECT(DT_INST_IRQ_BY_NAME(4, rx, irq), DT_INST_IRQ_BY_NAME(4, rx, priority), uart_cmsdk_apb_isr, DEVICE_DT_INST_GET(4), 0); irq_enable(DT_INST_IRQ_BY_NAME(4, rx, irq)); } #endif #endif #endif /* DT_NODE_HAS_STATUS(DT_DRV_INST(4), okay) */ ```
/content/code_sandbox/drivers/serial/uart_cmsdk_apb.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,392
```unknown # Microchip XEC UART configuration options config UART_XEC bool "Microchip XEC family UART driver" default y depends on DT_HAS_MICROCHIP_XEC_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the UARTx driver for Microchip XEC MCUs. if UART_XEC config UART_XEC_LINE_CTRL bool "Serial Line Control for Apps" depends on UART_LINE_CTRL help This enables the API for apps to control the serial line, such as CTS and RTS. Says n if not sure. endif # UART_XEC ```
/content/code_sandbox/drivers/serial/Kconfig.xec
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
132
```c /* * */ #define DT_DRV_COMPAT renesas_ra_uart_sci #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_ra_icu.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include <zephyr/spinlock.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ra_uart_sci, CONFIG_UART_LOG_LEVEL); enum { UART_RA_INT_RXI, UART_RA_INT_TXI, UART_RA_INT_ERI, NUM_OF_UART_RA_INT, }; struct uart_ra_cfg { mem_addr_t regs; const struct device *clock_dev; clock_control_subsys_t clock_id; const struct pinctrl_dev_config *pcfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN int (*irq_config_func)(const struct device *dev); #endif }; struct uart_ra_data { struct uart_config current_config; uint32_t clk_rate; struct k_spinlock lock; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uint32_t irqn[NUM_OF_UART_RA_INT]; uart_irq_callback_user_data_t callback; void *cb_data; #endif }; #define REG_MASK(reg) (BIT_MASK(_CONCAT(reg, _LEN)) << _CONCAT(reg, _POS)) /* Registers */ #define SMR 0x00 /*!< Serial Mode Register */ #define BRR 0x01 /*!< Bit Rate Register */ #define SCR 0x02 /*!< Serial Control Register */ #define TDR 0x03 /*!< Transmit Data Register */ #define SSR 0x04 /*!< Serial Status Register */ #define RDR 0x05 /*!< Receive Data Register */ #define SEMR 0x07 /*!< Serial Extended Mode Register */ #define MDDR 0x12 /*!< Modulation Duty Register */ #define LSR 0x18 /*!< Line Status Register */ /* * SMR (Serial Mode Register) * * - CKS[0..2]: Clock Select * - MP[2..3]: Multi-Processor Mode(Valid only in asynchronous mode) * - STOP[3..4]: Stop Bit Length(Valid only in asynchronous mode) * - PM[4..5]: Parity Mode (Valid only when the PE bit is 1) * - PE[5..6]: Parity Enable(Valid only in asynchronous mode) * - CHR[6..7]: Character Length(Valid only in asynchronous mode) * - CM[7..8]: Communication Mode */ #define SMR_CKS_POS (0) #define SMR_CKS_LEN (2) #define SMR_MP_POS (2) #define SMR_MP_LEN (1) #define SMR_STOP_POS (3) #define SMR_STOP_LEN (1) #define SMR_PM_POS (4) #define SMR_PM_LEN (1) #define SMR_PE_POS (5) #define SMR_PE_LEN (1) #define SMR_CHR_POS (6) #define SMR_CHR_LEN (1) #define SMR_CM_POS (7) #define SMR_CM_LEN (1) /** * SCR (Serial Control Register) * * - CKE[0..2]: Clock Enable * - TEIE[2..3]: Transmit End Interrupt Enable * - MPIE[3..4]: Multi-Processor Interrupt Enable (Valid in asynchronous * - RE[4..5]: Receive Enable * - TE[5..6]: Transmit Enable * - RIE[6..7]: Receive Interrupt Enable * - TIE[7..8]: Transmit Interrupt Enable */ #define SCR_CKE_POS (0) #define SCR_CKE_LEN (2) #define SCR_TEIE_POS (2) #define SCR_TEIE_LEN (1) #define SCR_MPIE_POS (3) #define SCR_MPIE_LEN (1) #define SCR_RE_POS (4) #define SCR_RE_LEN (1) #define SCR_TE_POS (5) #define SCR_TE_LEN (1) #define SCR_RIE_POS (6) #define SCR_RIE_LEN (1) #define SCR_TIE_POS (7) #define SCR_TIE_LEN (1) /** * SSR (Serial Status Register) * * - MPBT[0..1]: Multi-Processor Bit Transfer * - MPB[1..2]: Multi-Processor * - TEND[2..3]: Transmit End Flag * - PER[3..4]: Parity Error Flag * - FER[4..5]: Framing Error Flag * - ORER[5..6]: Overrun Error Flag * - RDRF[6..7]: Receive Data Full Flag * - TDRE[7..8]: Transmit Data Empty Flag */ #define SSR_MPBT_POS (0) #define SSR_MPBT_LEN (1) #define SSR_MPB_POS (1) #define SSR_MPB_LEN (1) #define SSR_TEND_POS (2) #define SSR_TEND_LEN (1) #define SSR_PER_POS (3) #define SSR_PER_LEN (1) #define SSR_FER_POS (4) #define SSR_FER_LEN (1) #define SSR_ORER_POS (5) #define SSR_ORER_LEN (1) #define SSR_RDRF_POS (6) #define SSR_RDRF_LEN (1) #define SSR_TDRE_POS (7) #define SSR_TDRE_LEN (1) /** * SEMR (Serial Extended Mode Register) * * - ACS0[0..1]: Asynchronous Mode Clock Source Select * - PADIS[1..2]: Preamble function Disable * - BRME[2..3]: Bit Rate Modulation Enable * - ABCSE[3..4]: Asynchronous Mode Extended Base Clock Select * - ABCS[4..5]: Asynchronous Mode Base Clock Select * - NFEN[5..6]: Digital Noise Filter Function Enable * - BGDM[6..7]: Baud Rate Generator Double-Speed Mode Select * - RXDESEL[7..8]: Asynchronous Start Bit Edge Detection Select */ #define SEMR_ACS0_POS (0) #define SEMR_ACS0_LEN (1) #define SEMR_PADIS_POS (1) #define SEMR_PADIS_LEN (1) #define SEMR_BRME_POS (2) #define SEMR_BRME_LEN (1) #define SEMR_ABCSE_POS (3) #define SEMR_ABCSE_LEN (1) #define SEMR_ABCS_POS (4) #define SEMR_ABCS_LEN (1) #define SEMR_NFEN_POS (5) #define SEMR_NFEN_LEN (1) #define SEMR_BGDM_POS (6) #define SEMR_BGDM_LEN (1) #define SEMR_RXDESEL_POS (7) #define SEMR_RXDESEL_LEN (1) /** * LSR (Line Status Register) * * - ORER[0..1]: Overrun Error Flag * - FNUM[2..7]: Framing Error Count * - PNUM[8..13]: Parity Error Count */ #define LSR_ORER_POS (0) #define LSR_ORER_LEN (1) #define LSR_FNUM_POS (2) #define LSR_FNUM_LEN (5) #define LSR_PNUM_POS (8) #define LSR_PNUM_LEN (5) static uint8_t uart_ra_read_8(const struct device *dev, uint32_t offs) { const struct uart_ra_cfg *config = dev->config; return sys_read8(config->regs + offs); } static void uart_ra_write_8(const struct device *dev, uint32_t offs, uint8_t value) { const struct uart_ra_cfg *config = dev->config; sys_write8(value, config->regs + offs); } static uint16_t uart_ra_read_16(const struct device *dev, uint32_t offs) { const struct uart_ra_cfg *config = dev->config; return sys_read16(config->regs + offs); } static void uart_ra_write_16(const struct device *dev, uint32_t offs, uint16_t value) { const struct uart_ra_cfg *config = dev->config; sys_write16(value, config->regs + offs); } static void uart_ra_set_baudrate(const struct device *dev, uint32_t baud_rate) { struct uart_ra_data *data = dev->data; uint8_t reg_val; reg_val = uart_ra_read_8(dev, SEMR); reg_val |= (REG_MASK(SEMR_BGDM) | REG_MASK(SEMR_ABCS)); reg_val &= ~(REG_MASK(SEMR_BRME) | REG_MASK(SEMR_ABCSE)); uart_ra_write_8(dev, SEMR, reg_val); reg_val = (data->clk_rate / (8 * data->current_config.baudrate)) - 1; uart_ra_write_8(dev, BRR, reg_val); } static int uart_ra_poll_in(const struct device *dev, unsigned char *p_char) { struct uart_ra_data *data = dev->data; int ret = 0; k_spinlock_key_t key = k_spin_lock(&data->lock); /* If interrupts are enabled, return -EINVAL */ if ((uart_ra_read_8(dev, SCR) & REG_MASK(SCR_RIE))) { ret = -EINVAL; goto unlock; } if ((uart_ra_read_8(dev, SSR) & REG_MASK(SSR_RDRF)) == 0) { ret = -1; goto unlock; } *p_char = uart_ra_read_8(dev, RDR); unlock: k_spin_unlock(&data->lock, key); return ret; } static void uart_ra_poll_out(const struct device *dev, unsigned char out_char) { struct uart_ra_data *data = dev->data; uint8_t reg_val; k_spinlock_key_t key = k_spin_lock(&data->lock); while (!(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_TEND)) || !(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_TDRE))) { ; } /* If interrupts are enabled, temporarily disable them */ reg_val = uart_ra_read_8(dev, SCR); uart_ra_write_8(dev, SCR, reg_val & ~REG_MASK(SCR_TIE)); uart_ra_write_8(dev, TDR, out_char); while (!(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_TEND)) || !(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_TDRE))) { ; } uart_ra_write_8(dev, SCR, reg_val); k_spin_unlock(&data->lock, key); } static int uart_ra_err_check(const struct device *dev) { struct uart_ra_data *data = dev->data; uint8_t reg_val; int errors = 0; k_spinlock_key_t key; key = k_spin_lock(&data->lock); reg_val = uart_ra_read_8(dev, SSR); if (reg_val & REG_MASK(SSR_PER)) { errors |= UART_ERROR_PARITY; } if (reg_val & REG_MASK(SSR_FER)) { errors |= UART_ERROR_FRAMING; } if (reg_val & REG_MASK(SSR_ORER)) { errors |= UART_ERROR_OVERRUN; } reg_val &= ~(REG_MASK(SSR_PER) | REG_MASK(SSR_FER) | REG_MASK(SSR_ORER)); uart_ra_write_8(dev, SSR, reg_val); k_spin_unlock(&data->lock, key); return errors; } static int uart_ra_configure(const struct device *dev, const struct uart_config *cfg) { struct uart_ra_data *data = dev->data; uint16_t reg_val; k_spinlock_key_t key; if (cfg->parity != UART_CFG_PARITY_NONE || cfg->stop_bits != UART_CFG_STOP_BITS_1 || cfg->data_bits != UART_CFG_DATA_BITS_8 || cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) { return -ENOTSUP; } key = k_spin_lock(&data->lock); /* Disable Transmit and Receive */ reg_val = uart_ra_read_8(dev, SCR); reg_val &= ~(REG_MASK(SCR_TE) | REG_MASK(SCR_RE)); uart_ra_write_8(dev, SCR, reg_val); /* Resetting Errors Registers */ reg_val = uart_ra_read_8(dev, SSR); reg_val &= ~(REG_MASK(SSR_PER) | REG_MASK(SSR_FER) | REG_MASK(SSR_ORER) | REG_MASK(SSR_RDRF) | REG_MASK(SSR_TDRE)); uart_ra_write_8(dev, SSR, reg_val); reg_val = uart_ra_read_16(dev, LSR); reg_val &= ~(REG_MASK(LSR_ORER)); uart_ra_write_16(dev, LSR, reg_val); /* Select internal clock */ reg_val = uart_ra_read_8(dev, SCR); reg_val &= ~(REG_MASK(SCR_CKE)); uart_ra_write_8(dev, SCR, reg_val); /* Serial Configuration (8N1) & Clock divider selection */ reg_val = uart_ra_read_8(dev, SMR); reg_val &= ~(REG_MASK(SMR_CM) | REG_MASK(SMR_CHR) | REG_MASK(SMR_PE) | REG_MASK(SMR_PM) | REG_MASK(SMR_STOP) | REG_MASK(SMR_CKS)); uart_ra_write_8(dev, SMR, reg_val); /* Set baudrate */ uart_ra_set_baudrate(dev, cfg->baudrate); /* Enable Transmit & Receive + disable Interrupts */ reg_val = uart_ra_read_8(dev, SCR); reg_val |= (REG_MASK(SCR_TE) | REG_MASK(SCR_RE)); reg_val &= ~(REG_MASK(SCR_TIE) | REG_MASK(SCR_RIE) | REG_MASK(SCR_MPIE) | REG_MASK(SCR_TEIE)); uart_ra_write_8(dev, SCR, reg_val); data->current_config = *cfg; k_spin_unlock(&data->lock, key); return 0; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_ra_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_ra_data *data = dev->data; *cfg = data->current_config; return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int uart_ra_init(const struct device *dev) { const struct uart_ra_cfg *config = dev->config; struct uart_ra_data *data = dev->data; int ret; /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } if (!device_is_ready(config->clock_dev)) { return -ENODEV; } ret = clock_control_on(config->clock_dev, config->clock_id); if (ret < 0) { return ret; } ret = clock_control_get_rate(config->clock_dev, config->clock_id, &data->clk_rate); if (ret < 0) { return ret; } ret = uart_ra_configure(dev, &data->current_config); if (ret != 0) { return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN ret = config->irq_config_func(dev); if (ret != 0) { return ret; } #endif return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static bool uart_ra_irq_is_enabled(const struct device *dev, uint32_t irq) { return uart_ra_read_8(dev, SCR) & irq; } static int uart_ra_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { struct uart_ra_data *data = dev->data; uint8_t reg_val; k_spinlock_key_t key; if (len <= 0 || tx_data == NULL) { return 0; } key = k_spin_lock(&data->lock); reg_val = uart_ra_read_8(dev, SCR); reg_val &= ~(REG_MASK(SCR_TIE)); uart_ra_write_8(dev, SCR, reg_val); uart_ra_write_8(dev, TDR, tx_data[0]); reg_val |= REG_MASK(SCR_TIE); uart_ra_write_8(dev, SCR, reg_val); k_spin_unlock(&data->lock, key); return 1; } static int uart_ra_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { uint8_t data; if (size <= 0) { return 0; } if ((uart_ra_read_8(dev, SSR) & REG_MASK(SSR_RDRF)) == 0) { return 0; } data = uart_ra_read_8(dev, RDR); if (rx_data) { rx_data[0] = data; } return 1; } static void uart_ra_irq_tx_enable(const struct device *dev) { struct uart_ra_data *data = dev->data; k_spinlock_key_t key; uint16_t reg_val; key = k_spin_lock(&data->lock); reg_val = uart_ra_read_8(dev, SCR); reg_val |= (REG_MASK(SCR_TIE)); uart_ra_write_8(dev, SCR, reg_val); irq_enable(data->irqn[UART_RA_INT_TXI]); k_spin_unlock(&data->lock, key); } static void uart_ra_irq_tx_disable(const struct device *dev) { struct uart_ra_data *data = dev->data; k_spinlock_key_t key; uint16_t reg_val; key = k_spin_lock(&data->lock); reg_val = uart_ra_read_8(dev, SCR); reg_val &= ~(REG_MASK(SCR_TIE)); uart_ra_write_8(dev, SCR, reg_val); irq_disable(data->irqn[UART_RA_INT_TXI]); k_spin_unlock(&data->lock, key); } static int uart_ra_irq_tx_ready(const struct device *dev) { const uint8_t reg_val = uart_ra_read_8(dev, SSR); const uint8_t mask = REG_MASK(SSR_TEND) & REG_MASK(SSR_TDRE); return (reg_val & mask) == mask; } static void uart_ra_irq_rx_enable(const struct device *dev) { struct uart_ra_data *data = dev->data; k_spinlock_key_t key; uint16_t reg_val; key = k_spin_lock(&data->lock); reg_val = uart_ra_read_8(dev, SCR); reg_val |= REG_MASK(SCR_RIE); uart_ra_write_8(dev, SCR, reg_val); irq_enable(data->irqn[UART_RA_INT_RXI]); k_spin_unlock(&data->lock, key); } static void uart_ra_irq_rx_disable(const struct device *dev) { struct uart_ra_data *data = dev->data; k_spinlock_key_t key; uint16_t reg_val; key = k_spin_lock(&data->lock); reg_val = uart_ra_read_8(dev, SCR); reg_val &= ~REG_MASK(SCR_RIE); uart_ra_write_8(dev, SCR, reg_val); irq_disable(data->irqn[UART_RA_INT_RXI]); k_spin_unlock(&data->lock, key); } static int uart_ra_irq_rx_ready(const struct device *dev) { return !!(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_RDRF)); } static void uart_ra_irq_err_enable(const struct device *dev) { struct uart_ra_data *data = dev->data; irq_enable(data->irqn[UART_RA_INT_ERI]); } static void uart_ra_irq_err_disable(const struct device *dev) { struct uart_ra_data *data = dev->data; irq_disable(data->irqn[UART_RA_INT_ERI]); } static int uart_ra_irq_is_pending(const struct device *dev) { return (uart_ra_irq_rx_ready(dev) && uart_ra_irq_is_enabled(dev, REG_MASK(SCR_RIE))) || (uart_ra_irq_tx_ready(dev) && uart_ra_irq_is_enabled(dev, REG_MASK(SCR_TIE))); } static int uart_ra_irq_update(const struct device *dev) { return 1; } static void uart_ra_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_ra_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * @param arg Argument to ISR. */ static inline void uart_ra_isr(const struct device *dev) { struct uart_ra_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } static void uart_ra_isr_rxi(const void *param) { const struct device *dev = param; struct uart_ra_data *data = dev->data; uart_ra_isr(dev); ra_icu_clear_int_flag(data->irqn[UART_RA_INT_RXI]); } static void uart_ra_isr_txi(const void *param) { const struct device *dev = param; struct uart_ra_data *data = dev->data; uart_ra_isr(dev); ra_icu_clear_int_flag(data->irqn[UART_RA_INT_TXI]); } static void uart_ra_isr_eri(const void *param) { const struct device *dev = param; struct uart_ra_data *data = dev->data; uart_ra_isr(dev); ra_icu_clear_int_flag(data->irqn[UART_RA_INT_ERI]); } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_ra_driver_api = { .poll_in = uart_ra_poll_in, .poll_out = uart_ra_poll_out, .err_check = uart_ra_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_ra_configure, .config_get = uart_ra_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_ra_fifo_fill, .fifo_read = uart_ra_fifo_read, .irq_tx_enable = uart_ra_irq_tx_enable, .irq_tx_disable = uart_ra_irq_tx_disable, .irq_tx_ready = uart_ra_irq_tx_ready, .irq_rx_enable = uart_ra_irq_rx_enable, .irq_rx_disable = uart_ra_irq_rx_disable, .irq_rx_ready = uart_ra_irq_rx_ready, .irq_err_enable = uart_ra_irq_err_enable, .irq_err_disable = uart_ra_irq_err_disable, .irq_is_pending = uart_ra_irq_is_pending, .irq_update = uart_ra_irq_update, .irq_callback_set = uart_ra_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; /* Device Instantiation */ #define UART_RA_INIT_CFG(n) \ PINCTRL_DT_DEFINE(DT_INST_PARENT(n)); \ static const struct uart_ra_cfg uart_ra_cfg_##n = { \ .regs = DT_REG_ADDR(DT_INST_PARENT(n)), \ .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_INST_PARENT(n))), \ .clock_id = \ (clock_control_subsys_t)DT_CLOCKS_CELL_BY_IDX(DT_INST_PARENT(n), 0, id), \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(DT_INST_PARENT(n)), \ IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, ( \ .irq_config_func = irq_config_func_##n, \ )) \ } #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define RA_IRQ_CONNECT_DYNAMIC(n, name, dev, isr) \ ra_icu_irq_connect_dynamic(DT_IRQ_BY_NAME(DT_INST_PARENT(n), name, irq), \ DT_IRQ_BY_NAME(DT_INST_PARENT(n), name, priority), isr, dev, \ DT_IRQ_BY_NAME(DT_INST_PARENT(n), name, flags)); #define RA_IRQ_DISCONNECT_DYNAMIC(n, name, dev, isr) \ ra_icu_irq_disconnect_dynamic(irqn, 0, NULL, NULL, 0) #define UART_RA_CONFIG_FUNC(n) \ static int irq_config_func_##n(const struct device *dev) \ { \ struct uart_ra_data *data = dev->data; \ int irqn; \ \ irqn = RA_IRQ_CONNECT_DYNAMIC(n, rxi, dev, uart_ra_isr_rxi); \ if (irqn < 0) { \ return irqn; \ } \ data->irqn[UART_RA_INT_RXI] = irqn; \ irqn = RA_IRQ_CONNECT_DYNAMIC(n, txi, dev, uart_ra_isr_txi); \ if (irqn < 0) { \ goto err_txi; \ } \ data->irqn[UART_RA_INT_TXI] = irqn; \ irqn = RA_IRQ_CONNECT_DYNAMIC(n, eri, dev, uart_ra_isr_eri); \ if (irqn < 0) { \ goto err_eri; \ } \ data->irqn[UART_RA_INT_ERI] = irqn; \ return 0; \ \ err_eri: \ RA_IRQ_DISCONNECT_DYNAMIC(data->irq[UART_RA_INT_TXI], eri, dev, uart_ra_isr_eri); \ err_txi: \ RA_IRQ_DISCONNECT_DYNAMIC(data->irq[UART_RA_INT_RXI], txi, dev, uart_ra_isr_txi); \ \ return irqn; \ } #else #define UART_RA_CONFIG_FUNC(n) #endif #define UART_RA_INIT(n) \ UART_RA_CONFIG_FUNC(n) \ UART_RA_INIT_CFG(n); \ \ static struct uart_ra_data uart_ra_data_##n = { \ .current_config = { \ .baudrate = DT_INST_PROP(n, current_speed), \ .parity = UART_CFG_PARITY_NONE, \ .stop_bits = UART_CFG_STOP_BITS_1, \ .data_bits = UART_CFG_DATA_BITS_8, \ .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, \ }, \ }; \ \ DEVICE_DT_INST_DEFINE(n, uart_ra_init, NULL, &uart_ra_data_##n, &uart_ra_cfg_##n, \ PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_ra_driver_api); DT_INST_FOREACH_STATUS_OKAY(UART_RA_INIT) ```
/content/code_sandbox/drivers/serial/uart_renesas_ra.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,686
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SERIAL_UART_QL_USBSERIALPORT_S3B_H_ #define ZEPHYR_DRIVERS_SERIAL_UART_QL_USBSERIALPORT_S3B_H_ #include <stdint.h> #define USBSERIAL_TX_FIFOSIZE (512) #define USBSERIAL_RX_FIFOSIZE (512) /* USB-Serial FIFO status values */ #define USBSERIAL_RX_FIFO_EMPTY (0x00) /* 0000 Empty */ #define USBSERIAL_RX_FIFO_E1 (0x01) /* 0001 1 entry in FIFO */ #define USBSERIAL_RX_FIFO_GE_2 (0x02) /* 0010 At least 2 entries */ #define USBSERIAL_RX_FIFO_GE_4 (0x03) /* 0011 At least 4 entries */ #define USBSERIAL_RX_FIFO_GE_8 (0x04) /* 0100 At least 8 entries */ #define USBSERIAL_RX_FIFO_GE_16 (0x0A) /* 1010 At least 16 entries */ #define USBSERIAL_RX_FIFO_GE_32 (0x0B) /* 1011 At least 32 entries */ #define USBSERIAL_RX_FIFO_LT_QUARTER (0x0C) /* 1100 Less than 1/4 to 64 entries */ #define USBSERIAL_RX_FIFO_GT_QUARTE (0x0D) /* 1101 1/4 or more full */ #define USBSERIAL_RX_FIFO_GT_HALF (0x0E) /* 1110 1/2 or more full */ #define USBSERIAL_RX_FIFO_FULL (0x0F) /* 1111 Full */ #define USBSERIAL_TX_FIFO_FULL (0x00) /* 0000 Full */ #define USBSERIAL_TX_FIFO_EMPTY (0x01) /* 0001 Empty */ #define USBSERIAL_TX_FIFO_GT_HALF (0x02) /* 0010 Room for more than 1/2 */ #define USBSERIAL_TX_FIFO_GT_QUARTER (0x03) /* 0011 Room for more than 1/4 */ #define USBSERIAL_TX_FIFO_LT_QUARTER (0x04) /* 0100 Room for less than 1/4 */ #define USBSERIAL_TX_FIFO_32_TO_63 (0x0A) /* 1010 Room for 32 to 63 */ #define USBSERIAL_TX_FIFO_16_TO_31 (0x0B) /* 1011 Room for 16 to 31 */ #define USBSERIAL_TX_FIFO_8_TO_15 (0x0C) /* 1100 Room for 8 to 15 */ #define USBSERIAL_TX_FIFO_4_TO_7 (0x0D) /* 1101 Room for 4 to 7 */ #define USBSERIAL_TX_FIFO_GE_2 (0x0E) /* 1110 Room for at least 2 */ #define USBSERIAL_TX_FIFO_GE_1 (0x0F) /* 1111 Room for at least 1 */ struct fpga_usbserial_regs { uint32_t device_id; uint32_t rev_num; uint16_t scratch_reg; uint16_t reserved1; uint32_t clock_select; uint32_t usbpid; uint32_t reserved2[11]; unsigned u2m_fifo_flags : 4; unsigned reserved3 : 28; unsigned rdata : 8; unsigned reserved4 : 24; uint32_t reserved5[14]; unsigned m2u_fifo_flags : 4; unsigned reserved6 : 28; unsigned wdata : 8; unsigned reserved7 : 24; uint32_t reserved8[14]; unsigned u2m_fifo_int_en : 1; unsigned reserved9 : 31; }; #endif /* ZEPHYR_DRIVERS_SERIAL_UART_QL_USBSERIALPORT_S3B_H_ */ ```
/content/code_sandbox/drivers/serial/uart_ql_usbserialport_s3b.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
876
```unknown menuconfig UART_PL011 bool "ARM PL011 UART Driver" default y depends on DT_HAS_ARM_PL011_ENABLED || DT_HAS_ARM_SBSA_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select PINCTRL if SOC_EOS_S3 select PINCTRL if DT_HAS_AMBIQ_UART_ENABLED select PINCTRL if DT_HAS_RASPBERRYPI_PICO_UART_ENABLED help This option enables the UART driver for the PL011 if UART_PL011 config UART_PL011_SBSA bool "SBSA UART" default y if DT_HAS_ARM_SBSA_UART_ENABLED help Enable SBSA mode for PL011 driver. SBSA stands for Server Based System Architecture. This specification among other things defines simplified UART interface which is subset of PL011 interface. endif # UART_PL011 ```
/content/code_sandbox/drivers/serial/Kconfig.pl011
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
184
```c /* * */ #include <zephyr/drivers/uart.h> #include <zephyr/kernel.h> #include <SEGGER_RTT.h> #define DT_DRV_COMPAT segger_rtt_uart extern struct k_mutex rtt_term_mutex; struct uart_rtt_config { void *up_buffer; size_t up_size; void *down_buffer; size_t down_size; uint8_t channel; }; struct uart_rtt_data { #ifdef CONFIG_UART_ASYNC_API uart_callback_t callback; void *user_data; #endif /* CONFIG_UART_ASYNC_API */ }; static int uart_rtt_init(const struct device *dev) { /* * Channel 0 is initialized at compile-time, Kconfig ensures that * it is configured in correct, non-blocking mode. Other channels * need to be configured at run-time. */ if (dev->config) { const struct uart_rtt_config *cfg = dev->config; SEGGER_RTT_ConfigUpBuffer(cfg->channel, dev->name, cfg->up_buffer, cfg->up_size, SEGGER_RTT_MODE_NO_BLOCK_SKIP); SEGGER_RTT_ConfigDownBuffer(cfg->channel, dev->name, cfg->down_buffer, cfg->down_size, SEGGER_RTT_MODE_NO_BLOCK_SKIP); } return 0; } /** * @brief Poll the device for input. * * @param dev UART device struct * @param c Pointer to character * * @return 0 if a character arrived, -1 if the input buffer if empty. */ static int uart_rtt_poll_in(const struct device *dev, unsigned char *c) { const struct uart_rtt_config *config = dev->config; unsigned int ch = config ? config->channel : 0; unsigned int ret = SEGGER_RTT_Read(ch, c, 1); return ret ? 0 : -1; } /** * @brief Output a character in polled mode. * * @param dev UART device struct * @param c Character to send */ static void uart_rtt_poll_out(const struct device *dev, unsigned char c) { const struct uart_rtt_config *config = dev->config; unsigned int ch = config ? config->channel : 0; SEGGER_RTT_Write(ch, &c, 1); } #ifdef CONFIG_UART_ASYNC_API static int uart_rtt_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct uart_rtt_data *data = dev->data; data->callback = callback; data->user_data = user_data; return 0; } static int uart_rtt_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { const struct uart_rtt_config *cfg = dev->config; struct uart_rtt_data *data = dev->data; unsigned int ch = cfg ? cfg->channel : 0; ARG_UNUSED(timeout); /* RTT mutex cannot be claimed in ISRs */ if (k_is_in_isr()) { return -ENOTSUP; } /* Claim the RTT lock */ if (k_mutex_lock(&rtt_term_mutex, K_NO_WAIT) != 0) { return -EBUSY; } /* Output the buffer */ SEGGER_RTT_WriteNoLock(ch, buf, len); /* Return RTT lock */ SEGGER_RTT_UNLOCK(); /* Send the TX complete callback */ if (data->callback) { struct uart_event evt = { .type = UART_TX_DONE, .data.tx.buf = buf, .data.tx.len = len }; data->callback(dev, &evt, data->user_data); } return 0; } static int uart_rtt_tx_abort(const struct device *dev) { /* RTT TX is a memcpy, there is never a transmission to abort */ ARG_UNUSED(dev); return -EFAULT; } static int uart_rtt_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { /* SEGGER RTT reception is implemented as a direct memory write to RAM * by a connected debugger. As such there is no hardware interrupt * or other mechanism to know when the debugger has added data to be * read. Asynchronous RX does not make sense in such a context, and is * therefore not supported. */ ARG_UNUSED(dev); ARG_UNUSED(buf); ARG_UNUSED(len); ARG_UNUSED(timeout); return -ENOTSUP; } static int uart_rtt_rx_disable(const struct device *dev) { /* Asynchronous RX not supported, see uart_rtt_rx_enable */ ARG_UNUSED(dev); return -EFAULT; } static int uart_rtt_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { /* Asynchronous RX not supported, see uart_rtt_rx_enable */ ARG_UNUSED(dev); ARG_UNUSED(buf); ARG_UNUSED(len); return -ENOTSUP; } #endif /* CONFIG_UART_ASYNC_API */ static const struct uart_driver_api uart_rtt_driver_api = { .poll_in = uart_rtt_poll_in, .poll_out = uart_rtt_poll_out, #ifdef CONFIG_UART_ASYNC_API .callback_set = uart_rtt_callback_set, .tx = uart_rtt_tx, .tx_abort = uart_rtt_tx_abort, .rx_enable = uart_rtt_rx_enable, .rx_buf_rsp = uart_rtt_rx_buf_rsp, .rx_disable = uart_rtt_rx_disable, #endif /* CONFIG_UART_ASYNC_API */ }; #define UART_RTT(idx) DT_NODELABEL(rtt##idx) #define UART_RTT_PROP(idx, prop) DT_PROP(UART_RTT(idx), prop) #define UART_RTT_CONFIG_NAME(idx) uart_rtt##idx##_config #define UART_RTT_CONFIG(idx) \ static \ uint8_t uart_rtt##idx##_tx_buf[UART_RTT_PROP(idx, tx_buffer_size)]; \ static \ uint8_t uart_rtt##idx##_rx_buf[UART_RTT_PROP(idx, rx_buffer_size)]; \ \ static const struct uart_rtt_config UART_RTT_CONFIG_NAME(idx) = { \ .up_buffer = uart_rtt##idx##_tx_buf, \ .up_size = sizeof(uart_rtt##idx##_tx_buf), \ .down_buffer = uart_rtt##idx##_rx_buf, \ .down_size = sizeof(uart_rtt##idx##_rx_buf), \ .channel = idx, \ } #define UART_RTT_INIT(idx, config) \ struct uart_rtt_data uart_rtt##idx##_data; \ \ DEVICE_DT_DEFINE(UART_RTT(idx), uart_rtt_init, NULL, \ &uart_rtt##idx##_data, config, \ PRE_KERNEL_2, CONFIG_SERIAL_INIT_PRIORITY, \ &uart_rtt_driver_api) #ifdef CONFIG_UART_RTT_0 UART_RTT_INIT(0, NULL); #endif #ifdef CONFIG_UART_RTT_1 UART_RTT_CONFIG(1); UART_RTT_INIT(1, &UART_RTT_CONFIG_NAME(1)); #endif #ifdef CONFIG_UART_RTT_2 UART_RTT_CONFIG(2); UART_RTT_INIT(2, &UART_RTT_CONFIG_NAME(2)); #endif #ifdef CONFIG_UART_RTT_3 UART_RTT_CONFIG(3); UART_RTT_INIT(3, &UART_RTT_CONFIG_NAME(3)); #endif ```
/content/code_sandbox/drivers/serial/uart_rtt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,589
```unknown # NPCX UART driver configuration options DT_UART_NPCX:=$(dt_nodelabel_path,uart1) config UART_NPCX bool "Nuvoton NPCX embedded controller (EC) serial driver" default y depends on DT_HAS_NUVOTON_NPCX_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the UART driver for NPCX family of processors. Say y if you wish to use serial port on NPCX MCU. # Expose this option when the reg porperty has two register base address. # i.e. One UART register bass address and one MDMA register base address. config UART_NPCX_USE_MDMA bool "Nuvoton NPCX embedded controller (EC) serial driver DMA support" depends on UART_NPCX && "$(dt_node_reg_addr_hex,$(DT_UART_NPCX),1)" != 0 select SERIAL_SUPPORT_ASYNC help Enable support for npcx UART DMA mode. ```
/content/code_sandbox/drivers/serial/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
204
```c /* * */ #define DT_DRV_COMPAT nxp_imx_uart /** * @brief Driver for UART on NXP IMX family processor. * * For full serial function, use the USART controller. * */ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys/__assert.h> #include <soc.h> #include <zephyr/init.h> #include <zephyr/drivers/uart.h> #include <uart_imx.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #define UART_STRUCT(dev) \ ((UART_Type *)((const struct imx_uart_config *const)(dev)->config)->base) struct imx_uart_config { UART_Type *base; uint32_t baud_rate; uint8_t modem_mode; const struct pinctrl_dev_config *pincfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif }; struct imx_uart_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; /** * @brief Initialize UART channel * * This routine is called to reset the chip in a quiescent state. * It is assumed that this function is called only once per UART. * * @param dev UART device struct * * @return 0 */ static int uart_imx_init(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); const struct imx_uart_config *config = dev->config; unsigned int old_level; int err; /* disable interrupts */ old_level = irq_lock(); /* Setup UART init structure */ uart_init_config_t initConfig = { .baudRate = config->baud_rate, .wordLength = uartWordLength8Bits, .stopBitNum = uartStopBitNumOne, .parity = uartParityDisable, .direction = uartDirectionTxRx }; err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } /* Get current module clock frequency */ initConfig.clockRate = get_uart_clock_freq(uart); UART_Init(uart, &initConfig); /* Set UART built-in hardware FIFO Watermark. */ UART_SetTxFifoWatermark(uart, 2); UART_SetRxFifoWatermark(uart, 1); /* restore interrupt state */ irq_unlock(old_level); #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif /* Set UART modem mode */ UART_SetModemMode(uart, config->modem_mode); /* Finally, enable the UART module */ UART_Enable(uart); return 0; } static void uart_imx_poll_out(const struct device *dev, unsigned char c) { UART_Type *uart = UART_STRUCT(dev); while (!UART_GetStatusFlag(uart, uartStatusTxReady)) { } UART_Putchar(uart, c); } static int uart_imx_poll_in(const struct device *dev, unsigned char *c) { UART_Type *uart = UART_STRUCT(dev); int ret = -1; if (UART_GetStatusFlag(uart, uartStatusRxDataReady)) { *c = UART_Getchar(uart); if (UART_GetStatusFlag(uart, uartStatusRxOverrun)) { UART_ClearStatusFlag(uart, uartStatusRxOverrun); } ret = 0; } return ret; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_imx_fifo_fill(const struct device *dev, const uint8_t *tx_data, int size) { UART_Type *uart = UART_STRUCT(dev); unsigned int num_tx = 0U; while (((size - num_tx) > 0) && UART_GetStatusFlag(uart, uartStatusTxReady)) { /* Send a character */ UART_Putchar(uart, tx_data[num_tx]); num_tx++; } return (int)num_tx; } static int uart_imx_fifo_read(const struct device *dev, uint8_t *rx_data, const int size) { UART_Type *uart = UART_STRUCT(dev); unsigned int num_rx = 0U; while (((size - num_rx) > 0) && UART_GetStatusFlag(uart, uartStatusRxReady)) { /* Receive a character */ rx_data[num_rx++] = UART_Getchar(uart); } if (UART_GetStatusFlag(uart, uartStatusRxOverrun)) { UART_ClearStatusFlag(uart, uartStatusRxOverrun); } return num_rx; } static void uart_imx_irq_tx_enable(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); UART_SetIntCmd(uart, uartIntTxReady, true); } static void uart_imx_irq_tx_disable(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); UART_SetIntCmd(uart, uartIntTxReady, false); } static int uart_imx_irq_tx_ready(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); return UART_GetStatusFlag(uart, uartStatusTxReady); } static void uart_imx_irq_rx_enable(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); UART_SetIntCmd(uart, uartIntRxReady, true); } static void uart_imx_irq_rx_disable(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); UART_SetIntCmd(uart, uartIntRxReady, false); } static int uart_imx_irq_rx_ready(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); return UART_GetStatusFlag(uart, uartStatusRxReady); } static void uart_imx_irq_err_enable(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); UART_SetIntCmd(uart, uartIntParityError, true); UART_SetIntCmd(uart, uartIntFrameError, true); } static void uart_imx_irq_err_disable(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); UART_SetIntCmd(uart, uartIntParityError, false); UART_SetIntCmd(uart, uartIntFrameError, false); } static int uart_imx_irq_is_pending(const struct device *dev) { UART_Type *uart = UART_STRUCT(dev); return UART_GetStatusFlag(uart, uartStatusRxReady) || UART_GetStatusFlag(uart, uartStatusTxReady); } static int uart_imx_irq_update(const struct device *dev) { return 1; } static void uart_imx_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct imx_uart_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } /** * @brief Interrupt service routine. * * This simply calls the callback function, if one exists. * * Note: imx UART Tx interrupts when ready to send; Rx interrupts when char * received. * * @param arg Argument to ISR. */ void uart_imx_isr(const struct device *dev) { struct imx_uart_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static const struct uart_driver_api uart_imx_driver_api = { .poll_in = uart_imx_poll_in, .poll_out = uart_imx_poll_out, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_imx_fifo_fill, .fifo_read = uart_imx_fifo_read, .irq_tx_enable = uart_imx_irq_tx_enable, .irq_tx_disable = uart_imx_irq_tx_disable, .irq_tx_ready = uart_imx_irq_tx_ready, .irq_rx_enable = uart_imx_irq_rx_enable, .irq_rx_disable = uart_imx_irq_rx_disable, .irq_rx_ready = uart_imx_irq_rx_ready, .irq_err_enable = uart_imx_irq_err_enable, .irq_err_disable = uart_imx_irq_err_disable, .irq_is_pending = uart_imx_irq_is_pending, .irq_update = uart_imx_irq_update, .irq_callback_set = uart_imx_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ }; #define UART_IMX_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct imx_uart_config imx_uart_##n##_config = { \ .base = (UART_Type *) DT_INST_REG_ADDR(n), \ .baud_rate = DT_INST_PROP(n, current_speed), \ .modem_mode = DT_INST_PROP(n, modem_mode), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ IRQ_FUNC_INIT \ } #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define UART_IMX_CONFIG_FUNC(n) \ static void irq_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), \ DT_INST_IRQ(n, priority), \ uart_imx_isr, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } #define UART_IMX_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = irq_config_func_##n #define UART_IMX_INIT_CFG(n) \ UART_IMX_DECLARE_CFG(n, UART_IMX_IRQ_CFG_FUNC_INIT(n)) #else #define UART_IMX_CONFIG_FUNC(n) #define UART_IMX_IRQ_CFG_FUNC_INIT #define UART_IMX_INIT_CFG(n) \ UART_IMX_DECLARE_CFG(n, UART_IMX_IRQ_CFG_FUNC_INIT) #endif #define UART_IMX_INIT(n) \ static struct imx_uart_data imx_uart_##n##_data; \ \ static const struct imx_uart_config imx_uart_##n##_config; \ \ PINCTRL_DT_INST_DEFINE(n); \ \ DEVICE_DT_INST_DEFINE(n, uart_imx_init, NULL, \ &imx_uart_##n##_data, &imx_uart_##n##_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &uart_imx_driver_api); \ \ UART_IMX_CONFIG_FUNC(n) \ \ UART_IMX_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(UART_IMX_INIT) ```
/content/code_sandbox/drivers/serial/uart_imx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,309
```unknown config UART_OPENTITAN bool "OpenTitan UART" default y depends on DT_HAS_LOWRISC_OPENTITAN_UART_ENABLED depends on !SERIAL_SUPPORT_INTERRUPT select SERIAL_HAS_DRIVER help Enable OpenTitan UART serial driver ```
/content/code_sandbox/drivers/serial/Kconfig.opentitan
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
57
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_uart /* Include esp-idf headers first to avoid redefining BIT() macro */ /* TODO: include w/o prefix */ #ifdef CONFIG_SOC_SERIES_ESP32 #include <esp32/rom/ets_sys.h> #include <esp32/rom/gpio.h> #include <soc/dport_reg.h> #elif defined(CONFIG_SOC_SERIES_ESP32S2) #include <esp32s2/rom/ets_sys.h> #include <esp32s2/rom/gpio.h> #include <soc/dport_reg.h> #elif defined(CONFIG_SOC_SERIES_ESP32S3) #include <esp32s3/rom/ets_sys.h> #include <esp32s3/rom/gpio.h> #include <zephyr/dt-bindings/clock/esp32s3_clock.h> #elif defined(CONFIG_SOC_SERIES_ESP32C2) #include <esp32c2/rom/ets_sys.h> #include <esp32c2/rom/gpio.h> #include <zephyr/dt-bindings/clock/esp32c2_clock.h> #elif defined(CONFIG_SOC_SERIES_ESP32C3) #include <esp32c3/rom/ets_sys.h> #include <esp32c3/rom/gpio.h> #include <zephyr/dt-bindings/clock/esp32c3_clock.h> #elif defined(CONFIG_SOC_SERIES_ESP32C6) #include <esp32c6/rom/ets_sys.h> #include <esp32c6/rom/gpio.h> #include <zephyr/dt-bindings/clock/esp32c6_clock.h> #endif #ifdef CONFIG_UART_ASYNC_API #include <zephyr/drivers/dma.h> #include <zephyr/drivers/dma/dma_esp32.h> #include <hal/uhci_ll.h> #endif #include <soc/uart_struct.h> #include <hal/uart_ll.h> #include <hal/uart_hal.h> #include <hal/uart_types.h> #include <esp_clk_tree.h> #include <zephyr/drivers/pinctrl.h> #include <soc/uart_reg.h> #include <zephyr/device.h> #include <soc.h> #include <zephyr/drivers/uart.h> #if defined(CONFIG_SOC_SERIES_ESP32C2) || \ defined(CONFIG_SOC_SERIES_ESP32C3) || \ defined(CONFIG_SOC_SERIES_ESP32C6) #include <zephyr/drivers/interrupt_controller/intc_esp32c3.h> #else #include <zephyr/drivers/interrupt_controller/intc_esp32.h> #endif #include <zephyr/drivers/clock_control.h> #include <errno.h> #include <zephyr/sys/util.h> #include <esp_attr.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uart_esp32, CONFIG_UART_LOG_LEVEL); #if defined(CONFIG_SOC_SERIES_ESP32C2) || \ defined(CONFIG_SOC_SERIES_ESP32C3) || \ defined(CONFIG_SOC_SERIES_ESP32C6) #define ISR_HANDLER isr_handler_t #else #define ISR_HANDLER intr_handler_t #endif struct uart_esp32_config { const struct device *clock_dev; const struct pinctrl_dev_config *pcfg; const clock_control_subsys_t clock_subsys; int irq_source; int irq_priority; bool tx_invert; bool rx_invert; #if CONFIG_UART_ASYNC_API const struct device *dma_dev; uint8_t tx_dma_channel; uint8_t rx_dma_channel; #endif }; #if CONFIG_UART_ASYNC_API struct uart_esp32_async_data { struct k_work_delayable tx_timeout_work; const uint8_t *tx_buf; size_t tx_len; struct k_work_delayable rx_timeout_work; uint8_t *rx_buf; uint8_t *rx_next_buf; size_t rx_len; size_t rx_next_len; size_t rx_timeout; volatile size_t rx_counter; size_t rx_offset; uart_callback_t cb; void *user_data; }; #endif /* driver data */ struct uart_esp32_data { struct uart_config uart_config; uart_hal_context_t hal; #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t irq_cb; void *irq_cb_data; #endif #if CONFIG_UART_ASYNC_API struct uart_esp32_async_data async; uhci_dev_t *uhci_dev; const struct device *uart_dev; #endif }; #define UART_FIFO_LIMIT (UART_LL_FIFO_DEF_LEN) #define UART_TX_FIFO_THRESH (CONFIG_UART_ESP32_TX_FIFO_THRESH) #define UART_RX_FIFO_THRESH (CONFIG_UART_ESP32_RX_FIFO_THRESH) #if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API static void uart_esp32_isr(void *arg); #endif static int uart_esp32_poll_in(const struct device *dev, unsigned char *p_char) { struct uart_esp32_data *data = dev->data; int inout_rd_len = 1; if (uart_hal_get_rxfifo_len(&data->hal) == 0) { return -1; } uart_hal_read_rxfifo(&data->hal, p_char, &inout_rd_len); return 0; } static void uart_esp32_poll_out(const struct device *dev, unsigned char c) { struct uart_esp32_data *data = dev->data; uint32_t written; /* Wait for space in FIFO */ while (uart_hal_get_txfifo_len(&data->hal) == 0) { ; /* Wait */ } /* Send a character */ uart_hal_write_txfifo(&data->hal, &c, 1, &written); } static int uart_esp32_err_check(const struct device *dev) { struct uart_esp32_data *data = dev->data; uint32_t mask = uart_hal_get_intsts_mask(&data->hal); uint32_t err = mask & (UART_INTR_PARITY_ERR | UART_INTR_FRAM_ERR); return err; } #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE static int uart_esp32_config_get(const struct device *dev, struct uart_config *cfg) { struct uart_esp32_data *data = dev->data; uart_parity_t parity; uart_stop_bits_t stop_bit; uart_word_length_t data_bit; uart_hw_flowcontrol_t hw_flow; uart_sclk_t src_clk; uint32_t sclk_freq; uart_hal_get_sclk(&data->hal, &src_clk); esp_clk_tree_src_get_freq_hz((soc_module_clk_t)src_clk, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &sclk_freq); uart_hal_get_baudrate(&data->hal, &cfg->baudrate, sclk_freq); uart_hal_get_parity(&data->hal, &parity); switch (parity) { case UART_PARITY_DISABLE: cfg->parity = UART_CFG_PARITY_NONE; break; case UART_PARITY_EVEN: cfg->parity = UART_CFG_PARITY_EVEN; break; case UART_PARITY_ODD: cfg->parity = UART_CFG_PARITY_ODD; break; default: return -ENOTSUP; } uart_hal_get_stop_bits(&data->hal, &stop_bit); switch (stop_bit) { case UART_STOP_BITS_1: cfg->stop_bits = UART_CFG_STOP_BITS_1; break; case UART_STOP_BITS_1_5: cfg->stop_bits = UART_CFG_STOP_BITS_1_5; break; case UART_STOP_BITS_2: cfg->stop_bits = UART_CFG_STOP_BITS_2; break; default: return -ENOTSUP; } uart_hal_get_data_bit_num(&data->hal, &data_bit); switch (data_bit) { case UART_DATA_5_BITS: cfg->data_bits = UART_CFG_DATA_BITS_5; break; case UART_DATA_6_BITS: cfg->data_bits = UART_CFG_DATA_BITS_6; break; case UART_DATA_7_BITS: cfg->data_bits = UART_CFG_DATA_BITS_7; break; case UART_DATA_8_BITS: cfg->data_bits = UART_CFG_DATA_BITS_8; break; default: return -ENOTSUP; } uart_hal_get_hw_flow_ctrl(&data->hal, &hw_flow); switch (hw_flow) { case UART_HW_FLOWCTRL_DISABLE: cfg->flow_ctrl = UART_CFG_FLOW_CTRL_NONE; break; case UART_HW_FLOWCTRL_CTS_RTS: cfg->flow_ctrl = UART_CFG_FLOW_CTRL_RTS_CTS; break; default: return -ENOTSUP; } if (uart_hal_is_mode_rs485_half_duplex(&data->hal)) { cfg->flow_ctrl = UART_CFG_FLOW_CTRL_RS485; } return 0; } #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ static int uart_esp32_configure(const struct device *dev, const struct uart_config *cfg) { const struct uart_esp32_config *config = dev->config; struct uart_esp32_data *data = dev->data; uart_sclk_t src_clk; uint32_t sclk_freq; int ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { return ret; } if (!device_is_ready(config->clock_dev)) { return -ENODEV; } clock_control_on(config->clock_dev, config->clock_subsys); uart_hal_set_sclk(&data->hal, UART_SCLK_DEFAULT); uart_hal_set_rxfifo_full_thr(&data->hal, UART_RX_FIFO_THRESH); uart_hal_set_txfifo_empty_thr(&data->hal, UART_TX_FIFO_THRESH); uart_hal_rxfifo_rst(&data->hal); switch (cfg->parity) { case UART_CFG_PARITY_NONE: uart_hal_set_parity(&data->hal, UART_PARITY_DISABLE); break; case UART_CFG_PARITY_EVEN: uart_hal_set_parity(&data->hal, UART_PARITY_EVEN); break; case UART_CFG_PARITY_ODD: uart_hal_set_parity(&data->hal, UART_PARITY_ODD); break; default: return -ENOTSUP; } switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: uart_hal_set_stop_bits(&data->hal, UART_STOP_BITS_1); break; case UART_CFG_STOP_BITS_1_5: uart_hal_set_stop_bits(&data->hal, UART_STOP_BITS_1_5); break; case UART_CFG_STOP_BITS_2: uart_hal_set_stop_bits(&data->hal, UART_STOP_BITS_2); break; default: return -ENOTSUP; } switch (cfg->data_bits) { case UART_CFG_DATA_BITS_5: uart_hal_set_data_bit_num(&data->hal, UART_DATA_5_BITS); break; case UART_CFG_DATA_BITS_6: uart_hal_set_data_bit_num(&data->hal, UART_DATA_6_BITS); break; case UART_CFG_DATA_BITS_7: uart_hal_set_data_bit_num(&data->hal, UART_DATA_7_BITS); break; case UART_CFG_DATA_BITS_8: uart_hal_set_data_bit_num(&data->hal, UART_DATA_8_BITS); break; default: return -ENOTSUP; } uart_hal_set_mode(&data->hal, UART_MODE_UART); switch (cfg->flow_ctrl) { case UART_CFG_FLOW_CTRL_NONE: uart_hal_set_hw_flow_ctrl(&data->hal, UART_HW_FLOWCTRL_DISABLE, 0); break; case UART_CFG_FLOW_CTRL_RTS_CTS: uart_hal_set_hw_flow_ctrl(&data->hal, UART_HW_FLOWCTRL_CTS_RTS, 10); break; case UART_CFG_FLOW_CTRL_RS485: uart_hal_set_mode(&data->hal, UART_MODE_RS485_HALF_DUPLEX); break; default: return -ENOTSUP; } uart_hal_get_sclk(&data->hal, &src_clk); esp_clk_tree_src_get_freq_hz((soc_module_clk_t)src_clk, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &sclk_freq); uart_hal_set_baudrate(&data->hal, cfg->baudrate, sclk_freq); uart_hal_set_rx_timeout(&data->hal, 0x16); if (config->tx_invert) { uart_hal_inverse_signal(&data->hal, UART_SIGNAL_TXD_INV); } if (config->rx_invert) { uart_hal_inverse_signal(&data->hal, UART_SIGNAL_RXD_INV); } return 0; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int uart_esp32_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { struct uart_esp32_data *data = dev->data; uint32_t written = 0; if (len < 0) { return 0; } uart_hal_write_txfifo(&data->hal, tx_data, len, &written); return written; } static int uart_esp32_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { struct uart_esp32_data *data = dev->data; const int num_rx = uart_hal_get_rxfifo_len(&data->hal); int read = MIN(len, num_rx); if (!read) { return 0; } uart_hal_read_rxfifo(&data->hal, rx_data, &read); return read; } static void uart_esp32_irq_tx_enable(const struct device *dev) { struct uart_esp32_data *data = dev->data; uart_hal_clr_intsts_mask(&data->hal, UART_INTR_TXFIFO_EMPTY); uart_hal_ena_intr_mask(&data->hal, UART_INTR_TXFIFO_EMPTY); } static void uart_esp32_irq_tx_disable(const struct device *dev) { struct uart_esp32_data *data = dev->data; uart_hal_disable_intr_mask(&data->hal, UART_INTR_TXFIFO_EMPTY); } static int uart_esp32_irq_tx_ready(const struct device *dev) { struct uart_esp32_data *data = dev->data; return (uart_hal_get_txfifo_len(&data->hal) > 0 && uart_hal_get_intr_ena_status(&data->hal) & UART_INTR_TXFIFO_EMPTY); } static void uart_esp32_irq_rx_disable(const struct device *dev) { struct uart_esp32_data *data = dev->data; uart_hal_disable_intr_mask(&data->hal, UART_INTR_RXFIFO_FULL); uart_hal_disable_intr_mask(&data->hal, UART_INTR_RXFIFO_TOUT); } static int uart_esp32_irq_tx_complete(const struct device *dev) { struct uart_esp32_data *data = dev->data; return uart_hal_is_tx_idle(&data->hal); } static int uart_esp32_irq_rx_ready(const struct device *dev) { struct uart_esp32_data *data = dev->data; return (uart_hal_get_rxfifo_len(&data->hal) > 0); } static void uart_esp32_irq_err_enable(const struct device *dev) { struct uart_esp32_data *data = dev->data; /* enable framing, parity */ uart_hal_ena_intr_mask(&data->hal, UART_INTR_FRAM_ERR); uart_hal_ena_intr_mask(&data->hal, UART_INTR_PARITY_ERR); } static void uart_esp32_irq_err_disable(const struct device *dev) { struct uart_esp32_data *data = dev->data; uart_hal_disable_intr_mask(&data->hal, UART_INTR_FRAM_ERR); uart_hal_disable_intr_mask(&data->hal, UART_INTR_PARITY_ERR); } static int uart_esp32_irq_is_pending(const struct device *dev) { return uart_esp32_irq_rx_ready(dev) || uart_esp32_irq_tx_ready(dev); } static int uart_esp32_irq_update(const struct device *dev) { struct uart_esp32_data *data = dev->data; uart_hal_clr_intsts_mask(&data->hal, UART_INTR_RXFIFO_FULL); uart_hal_clr_intsts_mask(&data->hal, UART_INTR_RXFIFO_TOUT); uart_hal_clr_intsts_mask(&data->hal, UART_INTR_TXFIFO_EMPTY); return 1; } static void uart_esp32_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct uart_esp32_data *data = dev->data; data->irq_cb = cb; data->irq_cb_data = cb_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->async.cb = NULL; data->async.user_data = NULL; #endif } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #ifdef CONFIG_UART_ASYNC_API static inline void uart_esp32_async_timer_start(struct k_work_delayable *work, size_t timeout) { if ((timeout != SYS_FOREVER_US) && (timeout != 0)) { LOG_DBG("Async timer started for %d us", timeout); k_work_reschedule(work, K_USEC(timeout)); } } #endif #if CONFIG_UART_ASYNC_API || CONFIG_UART_INTERRUPT_DRIVEN static void uart_esp32_irq_rx_enable(const struct device *dev) { struct uart_esp32_data *data = dev->data; uart_hal_clr_intsts_mask(&data->hal, UART_INTR_RXFIFO_FULL); uart_hal_clr_intsts_mask(&data->hal, UART_INTR_RXFIFO_TOUT); uart_hal_ena_intr_mask(&data->hal, UART_INTR_RXFIFO_FULL); uart_hal_ena_intr_mask(&data->hal, UART_INTR_RXFIFO_TOUT); } static void uart_esp32_isr(void *arg) { const struct device *dev = (const struct device *)arg; struct uart_esp32_data *data = dev->data; uint32_t uart_intr_status = uart_hal_get_intsts_mask(&data->hal); const struct uart_esp32_config *config = dev->config; if (uart_intr_status == 0) { return; } uart_hal_clr_intsts_mask(&data->hal, uart_intr_status); #if CONFIG_UART_INTERRUPT_DRIVEN /* Verify if the callback has been registered */ if (data->irq_cb) { data->irq_cb(dev, data->irq_cb_data); } #endif #if CONFIG_UART_ASYNC_API if (uart_intr_status & UART_INTR_RXFIFO_FULL) { data->async.rx_counter++; uart_esp32_async_timer_start(&data->async.rx_timeout_work, data->async.rx_timeout); } #endif } #endif #if CONFIG_UART_ASYNC_API static void IRAM_ATTR uart_esp32_dma_rx_done(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *uart_dev = user_data; const struct uart_esp32_config *config = uart_dev->config; struct uart_esp32_data *data = uart_dev->data; struct uart_event evt = {0}; struct dma_status dma_status = {0}; unsigned int key = irq_lock(); /* If the receive buffer is not complete we reload the DMA at current buffer position and * let the timeout callback handle the notifications */ if (data->async.rx_counter != data->async.rx_len) { dma_reload(config->dma_dev, config->rx_dma_channel, 0, (uint32_t)data->async.rx_buf + data->async.rx_counter, data->async.rx_len - data->async.rx_counter); dma_start(config->dma_dev, config->rx_dma_channel); data->uhci_dev->pkt_thres.thrs = data->async.rx_len - data->async.rx_counter; irq_unlock(key); return; } /*Notify RX_RDY*/ evt.type = UART_RX_RDY; evt.data.rx.buf = data->async.rx_buf; evt.data.rx.len = data->async.rx_counter - data->async.rx_offset; evt.data.rx.offset = data->async.rx_offset; if (data->async.cb && evt.data.rx.len) { data->async.cb(data->uart_dev, &evt, data->async.user_data); } data->async.rx_offset = 0; data->async.rx_counter = 0; /*Release current buffer*/ evt.type = UART_RX_BUF_RELEASED; evt.data.rx_buf.buf = data->async.rx_buf; if (data->async.cb) { data->async.cb(uart_dev, &evt, data->async.user_data); } /*Load next buffer and request another*/ data->async.rx_buf = data->async.rx_next_buf; data->async.rx_len = data->async.rx_next_len; data->async.rx_next_buf = NULL; data->async.rx_next_len = 0U; evt.type = UART_RX_BUF_REQUEST; if (data->async.cb) { data->async.cb(uart_dev, &evt, data->async.user_data); } /*Notify RX_DISABLED when there is no buffer*/ if (!data->async.rx_buf) { evt.type = UART_RX_DISABLED; if (data->async.cb) { data->async.cb(uart_dev, &evt, data->async.user_data); } } else { /*Reload DMA with new buffer*/ dma_reload(config->dma_dev, config->rx_dma_channel, 0, (uint32_t)data->async.rx_buf, data->async.rx_len); dma_start(config->dma_dev, config->rx_dma_channel); data->uhci_dev->pkt_thres.thrs = data->async.rx_len; } irq_unlock(key); } static void IRAM_ATTR uart_esp32_dma_tx_done(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *uart_dev = user_data; const struct uart_esp32_config *config = uart_dev->config; struct uart_esp32_data *data = uart_dev->data; struct uart_event evt = {0}; unsigned int key = irq_lock(); k_work_cancel_delayable(&data->async.tx_timeout_work); evt.type = UART_TX_DONE; evt.data.tx.buf = data->async.tx_buf; evt.data.tx.len = data->async.tx_len; if (data->async.cb) { data->async.cb(uart_dev, &evt, data->async.user_data); } /* Reset TX Buffer */ data->async.tx_buf = NULL; data->async.tx_len = 0U; irq_unlock(key); } static int uart_esp32_async_tx_abort(const struct device *dev) { const struct uart_esp32_config *config = dev->config; struct uart_esp32_data *data = dev->data; struct uart_event evt = {0}; int err = 0; unsigned int key = irq_lock(); k_work_cancel_delayable(&data->async.tx_timeout_work); err = dma_stop(config->dma_dev, config->tx_dma_channel); if (err) { LOG_ERR("Error stopping Tx DMA (%d)", err); goto unlock; } evt.type = UART_TX_ABORTED; evt.data.tx.buf = data->async.tx_buf; evt.data.tx.len = data->async.tx_len; if (data->async.cb) { data->async.cb(dev, &evt, data->async.user_data); } unlock: irq_unlock(key); return err; } static void uart_esp32_async_tx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_esp32_async_data *async = CONTAINER_OF(dwork, struct uart_esp32_async_data, tx_timeout_work); struct uart_esp32_data *data = CONTAINER_OF(async, struct uart_esp32_data, async); uart_esp32_async_tx_abort(data->uart_dev); } static void uart_esp32_async_rx_timeout(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct uart_esp32_async_data *async = CONTAINER_OF(dwork, struct uart_esp32_async_data, rx_timeout_work); struct uart_esp32_data *data = CONTAINER_OF(async, struct uart_esp32_data, async); const struct uart_esp32_config *config = data->uart_dev->config; struct uart_event evt = {0}; int err = 0; unsigned int key = irq_lock(); evt.type = UART_RX_RDY; evt.data.rx.buf = data->async.rx_buf; evt.data.rx.len = data->async.rx_counter - data->async.rx_offset; evt.data.rx.offset = data->async.rx_offset; if (data->async.cb && evt.data.rx.len) { data->async.cb(data->uart_dev, &evt, data->async.user_data); } data->async.rx_offset = data->async.rx_counter; k_work_cancel_delayable(&data->async.rx_timeout_work); irq_unlock(key); } static int uart_esp32_async_callback_set(const struct device *dev, uart_callback_t callback, void *user_data) { struct uart_esp32_data *data = dev->data; if (!callback) { return -EINVAL; } data->async.cb = callback; data->async.user_data = user_data; #if defined(CONFIG_UART_EXCLUSIVE_API_CALLBACKS) data->irq_cb = NULL; data->irq_cb_data = NULL; #endif return 0; } static int uart_esp32_async_tx(const struct device *dev, const uint8_t *buf, size_t len, int32_t timeout) { const struct uart_esp32_config *config = dev->config; struct uart_esp32_data *data = dev->data; struct dma_config dma_cfg = {0}; struct dma_block_config dma_blk = {0}; struct dma_status dma_status = {0}; int err = 0; unsigned int key = irq_lock(); if (config->tx_dma_channel == 0xFF) { LOG_ERR("Tx DMA channel is not configured"); err = -ENOTSUP; goto unlock; } err = dma_get_status(config->dma_dev, config->tx_dma_channel, &dma_status); if (err) { LOG_ERR("Unable to get Tx status (%d)", err); goto unlock; } if (dma_status.busy) { LOG_ERR("Tx DMA Channel is busy"); err = -EBUSY; goto unlock; } data->async.tx_buf = buf; data->async.tx_len = len; dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL; dma_cfg.dma_callback = uart_esp32_dma_tx_done; dma_cfg.user_data = (void *)dev; dma_cfg.dma_slot = ESP_GDMA_TRIG_PERIPH_UHCI0; dma_cfg.block_count = 1; dma_cfg.head_block = &dma_blk; dma_blk.block_size = len; dma_blk.source_address = (uint32_t)buf; err = dma_config(config->dma_dev, config->tx_dma_channel, &dma_cfg); if (err) { LOG_ERR("Error configuring Tx DMA (%d)", err); goto unlock; } uart_esp32_async_timer_start(&data->async.tx_timeout_work, timeout); err = dma_start(config->dma_dev, config->tx_dma_channel); if (err) { LOG_ERR("Error starting Tx DMA (%d)", err); goto unlock; } unlock: irq_unlock(key); return err; } static int uart_esp32_async_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { const struct uart_esp32_config *config = dev->config; struct uart_esp32_data *data = dev->data; struct dma_config dma_cfg = {0}; struct dma_block_config dma_blk = {0}; struct dma_status dma_status = {0}; int err = 0; struct uart_event evt = {0}; if (config->rx_dma_channel == 0xFF) { LOG_ERR("Rx DMA channel is not configured"); return -ENOTSUP; } err = dma_get_status(config->dma_dev, config->rx_dma_channel, &dma_status); if (err) { LOG_ERR("Unable to get Rx status (%d)", err); return err; } if (dma_status.busy) { LOG_ERR("Rx DMA Channel is busy"); return -EBUSY; } unsigned int key = irq_lock(); data->async.rx_buf = buf; data->async.rx_len = len; data->async.rx_timeout = timeout; dma_cfg.channel_direction = PERIPHERAL_TO_MEMORY; dma_cfg.dma_callback = uart_esp32_dma_rx_done; dma_cfg.user_data = (void *)dev; dma_cfg.dma_slot = ESP_GDMA_TRIG_PERIPH_UHCI0; dma_cfg.block_count = 1; dma_cfg.head_block = &dma_blk; dma_blk.block_size = len; dma_blk.dest_address = (uint32_t)data->async.rx_buf; err = dma_config(config->dma_dev, config->rx_dma_channel, &dma_cfg); if (err) { LOG_ERR("Error configuring Rx DMA (%d)", err); goto unlock; } /* * Enable interrupt on first receive byte so we can start async timer */ uart_hal_set_rxfifo_full_thr(&data->hal, 1); uart_esp32_irq_rx_enable(dev); err = dma_start(config->dma_dev, config->rx_dma_channel); if (err) { LOG_ERR("Error starting Rx DMA (%d)", err); goto unlock; } data->uhci_dev->pkt_thres.thrs = len; /** * Request next buffer */ evt.type = UART_RX_BUF_REQUEST; if (data->async.cb) { data->async.cb(dev, &evt, data->async.user_data); } unlock: irq_unlock(key); return err; } static int uart_esp32_async_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { const struct uart_esp32_config *config = dev->config; struct uart_esp32_data *data = dev->data; data->async.rx_next_buf = buf; data->async.rx_next_len = len; return 0; } static int uart_esp32_async_rx_disable(const struct device *dev) { const struct uart_esp32_config *config = dev->config; struct uart_esp32_data *data = dev->data; unsigned int key = irq_lock(); int err = 0; struct uart_event evt = {0}; k_work_cancel_delayable(&data->async.rx_timeout_work); if (!data->async.rx_len) { err = -EINVAL; goto unlock; } err = dma_stop(config->dma_dev, config->rx_dma_channel); if (err) { LOG_ERR("Error stopping Rx DMA (%d)", err); goto unlock; } /*If any bytes have been received notify RX_RDY*/ evt.type = UART_RX_RDY; evt.data.rx.buf = data->async.rx_buf; evt.data.rx.len = data->async.rx_counter - data->async.rx_offset; evt.data.rx.offset = data->async.rx_offset; if (data->async.cb && evt.data.rx.len) { data->async.cb(data->uart_dev, &evt, data->async.user_data); } data->async.rx_offset = 0; data->async.rx_counter = 0; /* Release current buffer*/ evt.type = UART_RX_BUF_RELEASED; evt.data.rx_buf.buf = data->async.rx_buf; if (data->async.cb) { data->async.cb(dev, &evt, data->async.user_data); } data->async.rx_len = 0; data->async.rx_buf = NULL; /*Release next buffer*/ if (data->async.rx_next_len) { evt.type = UART_RX_BUF_RELEASED; evt.data.rx_buf.buf = data->async.rx_next_buf; if (data->async.cb) { data->async.cb(dev, &evt, data->async.user_data); } data->async.rx_next_len = 0; data->async.rx_next_buf = NULL; } /*Notify UART_RX_DISABLED*/ evt.type = UART_RX_DISABLED; if (data->async.cb) { data->async.cb(dev, &evt, data->async.user_data); } unlock: irq_unlock(key); return err; } #endif /* CONFIG_UART_ASYNC_API */ static int uart_esp32_init(const struct device *dev) { const struct uart_esp32_config *config = dev->config; struct uart_esp32_data *data = dev->data; int ret = uart_esp32_configure(dev, &data->uart_config); if (ret < 0) { LOG_ERR("Error configuring UART (%d)", ret); return ret; } #if CONFIG_UART_INTERRUPT_DRIVEN || CONFIG_UART_ASYNC_API ret = esp_intr_alloc(config->irq_source, config->irq_priority, (ISR_HANDLER)uart_esp32_isr, (void *)dev, NULL); if (ret < 0) { LOG_ERR("Error allocating UART interrupt (%d)", ret); return ret; } #endif #if CONFIG_UART_ASYNC_API if (config->dma_dev) { if (!device_is_ready(config->dma_dev)) { LOG_ERR("DMA device is not ready"); return -ENODEV; } clock_control_on(config->clock_dev, (clock_control_subsys_t)ESP32_UHCI0_MODULE); uhci_ll_init(data->uhci_dev); uhci_ll_set_eof_mode(data->uhci_dev, UHCI_RX_IDLE_EOF | UHCI_RX_LEN_EOF); uhci_ll_attach_uart_port(data->uhci_dev, uart_hal_get_port_num(&data->hal)); data->uart_dev = dev; k_work_init_delayable(&data->async.tx_timeout_work, uart_esp32_async_tx_timeout); k_work_init_delayable(&data->async.rx_timeout_work, uart_esp32_async_rx_timeout); } #endif return 0; } static const DRAM_ATTR struct uart_driver_api uart_esp32_api = { .poll_in = uart_esp32_poll_in, .poll_out = uart_esp32_poll_out, .err_check = uart_esp32_err_check, #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE .configure = uart_esp32_configure, .config_get = uart_esp32_config_get, #endif #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = uart_esp32_fifo_fill, .fifo_read = uart_esp32_fifo_read, .irq_tx_enable = uart_esp32_irq_tx_enable, .irq_tx_disable = uart_esp32_irq_tx_disable, .irq_tx_ready = uart_esp32_irq_tx_ready, .irq_rx_enable = uart_esp32_irq_rx_enable, .irq_rx_disable = uart_esp32_irq_rx_disable, .irq_tx_complete = uart_esp32_irq_tx_complete, .irq_rx_ready = uart_esp32_irq_rx_ready, .irq_err_enable = uart_esp32_irq_err_enable, .irq_err_disable = uart_esp32_irq_err_disable, .irq_is_pending = uart_esp32_irq_is_pending, .irq_update = uart_esp32_irq_update, .irq_callback_set = uart_esp32_irq_callback_set, #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ #if CONFIG_UART_ASYNC_API .callback_set = uart_esp32_async_callback_set, .tx = uart_esp32_async_tx, .tx_abort = uart_esp32_async_tx_abort, .rx_enable = uart_esp32_async_rx_enable, .rx_buf_rsp = uart_esp32_async_rx_buf_rsp, .rx_disable = uart_esp32_async_rx_disable, #endif /*CONFIG_UART_ASYNC_API*/ }; #if CONFIG_UART_ASYNC_API #define ESP_UART_DMA_INIT(n) \ .dma_dev = ESP32_DT_INST_DMA_CTLR(n, tx), \ .tx_dma_channel = ESP32_DT_INST_DMA_CELL(n, tx, channel), \ .rx_dma_channel = ESP32_DT_INST_DMA_CELL(n, rx, channel) #define ESP_UART_UHCI_INIT(n) \ .uhci_dev = COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), (&UHCI0), (NULL)) #define UART_IRQ_PRIORITY ESP_INTR_FLAG_LEVEL2 #else #define ESP_UART_DMA_INIT(n) #define ESP_UART_UHCI_INIT(n) #define UART_IRQ_PRIORITY (0) #endif #define ESP32_UART_INIT(idx) \ \ PINCTRL_DT_INST_DEFINE(idx); \ \ static const DRAM_ATTR struct uart_esp32_config uart_esp32_cfg_port_##idx = { \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(idx)), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(idx), \ .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(idx, offset), \ .irq_source = DT_INST_IRQN(idx), \ .irq_priority = UART_IRQ_PRIORITY, \ .tx_invert = DT_INST_PROP_OR(idx, tx_invert, false), \ .rx_invert = DT_INST_PROP_OR(idx, rx_invert, false), \ ESP_UART_DMA_INIT(idx)}; \ \ static struct uart_esp32_data uart_esp32_data_##idx = { \ .uart_config = {.baudrate = DT_INST_PROP(idx, current_speed), \ .parity = DT_INST_ENUM_IDX_OR(idx, parity, UART_CFG_PARITY_NONE), \ .stop_bits = DT_INST_ENUM_IDX_OR(idx, stop_bits, \ UART_CFG_STOP_BITS_1), \ .data_bits = DT_INST_ENUM_IDX_OR(idx, data_bits, \ UART_CFG_DATA_BITS_8), \ .flow_ctrl = MAX(COND_CODE_1(DT_INST_PROP(idx, hw_rs485_hd_mode), \ (UART_CFG_FLOW_CTRL_RS485), \ (UART_CFG_FLOW_CTRL_NONE)), \ COND_CODE_1(DT_INST_PROP(idx, hw_flow_control), \ (UART_CFG_FLOW_CTRL_RTS_CTS), \ (UART_CFG_FLOW_CTRL_NONE)))}, \ .hal = \ { \ .dev = (uart_dev_t *)DT_INST_REG_ADDR(idx), \ }, \ ESP_UART_UHCI_INIT(idx)}; \ \ DEVICE_DT_INST_DEFINE(idx, uart_esp32_init, NULL, &uart_esp32_data_##idx, \ &uart_esp32_cfg_port_##idx, PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, &uart_esp32_api); DT_INST_FOREACH_STATUS_OKAY(ESP32_UART_INIT); ```
/content/code_sandbox/drivers/serial/uart_esp32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,360
```objective-c /* */ #ifndef ZEPHYR_DRIVERS_SERIAL_UART_CDNS_H_ #define ZEPHYR_DRIVERS_SERIAL_UART_CDNS_H_ #include <zephyr/arch/cpu.h> #include <zephyr/drivers/uart.h> #include <zephyr/kernel.h> enum csr_parity_val { EVEN_PARITY_VAL, ODD_PARITY_VAL, SPACE_PARITY_VAL, MARK_PARITY_VAL, NO_PARITY_VAL }; /* @brief Control(CTRL) Registers offset 0x00 */ #define CTRL_STPBRK_MASK (1 << 8) #define CTRL_STPBRK_SHIFT (8) #define CTRL_STTBRK_MASK (1 << 7) #define CTRL_STTBRK_SHIFT (7) #define CTRL_RSTTO_MASK (1 << 6) #define CTRL_RSTTO_SHIFT (6) #define CTRL_TXDIS_MASK (1 << 5) #define CTRL_TXDIS_SHIFT (5) #define CTRL_TXEN_MASK (1 << 4) #define CTRL_TXEN_SHIFT (4) #define CTRL_RXDIS_MASK (1 << 3) #define CTRL_RXDIS_SHIFT (3) #define CTRL_RXEN_MASK (1 << 2) #define CTRL_RXEN_SHIFT (2) #define CTRL_TXRES_MASK (1 << 1) #define CTRL_TXRES_SHIFT (1) #define CTRL_RXRES_MASK (1 << 0) #define CTRL_RXRES_SHIFT (0) /* @brief Mode Registers offset 0x04 */ #define MODE_WSIZE_MASK (0x3 << 12) #define MODE_WSIZE_SHIFT (12) #define MODE_WSIZE_SIZE (2) #define MODE_IRMODE_MASK (1 << 11) #define MODE_IRMODE_SHIFT (11) #define MODE_UCLKEN_MASK (1 << 10) #define MODE_UCLKEN_SHIFT (10) #define MODE_CHMOD_MASK (0x3 << 8) #define MODE_CHMOD_SHIFT (8) #define MODE_CHMOD_SIZE (2) #define MODE_NBSTOP_MASK (0x3 << 6) #define MODE_NBSTOP_SHIFT (6) #define MODE_NBSTOP_SIZE (2) #define MODE_PAR_MASK (0x7 << 3) #define MODE_PAR_SHIFT (3) #define MODE_PAR_SIZE (3) #define MODE_CHRL_MASK (0x3 << 1) #define MODE_CHRL_SHIFT (2) #define MODE_CHRL_SIZE (2) #define MODE_CLKS_MASK (1 << 0) #define MODE_CLKS_SHIFT (0) /* @brief IER, IDR, IMR and CSIR Registers offset 0x08, 0xC, 0x10 and 0x14 */ #define CSR_RBRK_MASK (1 << 13) #define CSR_RBRK_SHIFT (13) #define CSR_TOVR_MASK (1 << 12) #define CSR_TOVR_SHIFT (12) #define CSR_TNFUL_MASK (1 << 11) #define CSR_TNFUL_SHIFT (11) #define CSR_TTRIG_MASK (1 << 10) #define CSR_TTRIG_SHIFT (10) #define CSR_DMSI_MASK (1 << 9) #define CSR_DMSI_SHIFT (9) #define CSR_TOUT_MASK (1 << 8) #define CSR_TOUT_SHIFT (8) #define CSR_PARE_MASK (1 << 7) #define CSR_PARE_SHIFT (7) #define CSR_FRAME_MASK (1 << 6) #define CSR_FRAME_SHIFT (6) #define CSR_ROVR_MASK (1 << 5) #define CSR_ROVR_SHIFT (5) #define CSR_TFUL_MASK (1 << 4) #define CSR_TFUL_SHIFT (4) #define CSR_TEMPTY_MASK (1 << 3) #define CSR_TEMPTY_SHIFT (3) #define CSR_RFUL_MASK (1 << 2) #define CSR_RFUL_SHIFT (2) #define CSR_REMPTY_MASK (1 << 1) #define CSR_REMPTY_SHIFT (1) #define CSR_RTRIG_MASK (1 << 0) #define CSR_RTRIG_SHIFT (0) #define RXDATA_MASK 0xFF /* Receive Data Mask */ #define MAX_FIFO_SIZE (64) #define DEFAULT_RTO_PERIODS_FACTOR 8 #define SET_VAL32(name, val) (((uint32_t)(val) << name##_SHIFT) & name##_MASK) #define CDNS_PARTITY_MAP(parity) \ (parity == UART_CFG_PARITY_NONE) ? NO_PARITY_VAL \ : (parity == UART_CFG_PARITY_ODD) ? ODD_PARITY_VAL \ : (parity == UART_CFG_PARITY_MARK) ? MARK_PARITY_VAL \ : (parity == UART_CFG_PARITY_SPACE) ? SPACE_PARITY_VAL \ : EVEN_PARITY_VAL struct uart_cdns_regs { volatile uint32_t ctrl; /* Control Register */ volatile uint32_t mode; /* Mode Register */ volatile uint32_t intr_enable; /* Interrupt Enable Register */ volatile uint32_t intr_disable; /* Interrupt Disable Register */ volatile uint32_t intr_mask; /* Interrupt Mask Register */ volatile uint32_t channel_intr_status; /* Channel Interrupt Status Register */ volatile uint32_t baud_rate_gen; /* Baud Rate Generator Register */ volatile uint32_t rx_timeout; /* Receiver Timeout Register */ volatile uint32_t rx_fifo_trigger_level; /* Receiver FIFO Trigger Level Register */ volatile uint32_t modem_control; /* Modem Control Register */ volatile uint32_t modem_status; /* Modem Status Register */ volatile uint32_t channel_status; /* Channel status */ volatile uint32_t rx_tx_fifo; /* RX TX FIFO Register */ volatile uint32_t baud_rate_div; /* Baud Rate Divider Register */ volatile uint32_t flow_ctrl_delay; /* Flow Control Delay Register */ volatile uint32_t rpwr; /* IR Minimum Received Pulse Register */ volatile uint32_t tpwr; /* IR TRansmitted Pulse Width Register */ volatile uint32_t tx_fifo_trigger_level; /* Transmitter FIFO trigger level */ volatile uint32_t rbrs; /* RX FIFO Byte Status Register */ }; struct uart_cdns_device_config { uint32_t port; uint32_t bdiv; uint32_t sys_clk_freq; uint32_t baud_rate; uint8_t parity; void (*cfg_func)(void); }; struct uart_cdns_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; #endif /* ZEPHYR_DRIVERS_SERIAL_UART_CDNS_H_ */ ```
/content/code_sandbox/drivers/serial/uart_cdns.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,388
```c /* * */ #define DT_DRV_COMPAT nxp_imx_iuart #include <zephyr/device.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/irq.h> #include <errno.h> #include <fsl_uart.h> #include <zephyr/drivers/pinctrl.h> struct mcux_iuart_config { UART_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; uint32_t baud_rate; /* initial parity, 0 for none, 1 for odd, 2 for even */ uint8_t parity; const struct pinctrl_dev_config *pincfg; #ifdef CONFIG_UART_INTERRUPT_DRIVEN void (*irq_config_func)(const struct device *dev); #endif }; struct mcux_iuart_data { #ifdef CONFIG_UART_INTERRUPT_DRIVEN uart_irq_callback_user_data_t callback; void *cb_data; #endif }; static int mcux_iuart_poll_in(const struct device *dev, unsigned char *c) { const struct mcux_iuart_config *config = dev->config; int ret = -1; if (UART_GetStatusFlag(config->base, kUART_RxDataReadyFlag)) { *c = UART_ReadByte(config->base); ret = 0; } return ret; } static void mcux_iuart_poll_out(const struct device *dev, unsigned char c) { const struct mcux_iuart_config *config = dev->config; while (!(UART_GetStatusFlag(config->base, kUART_TxReadyFlag))) { } UART_WriteByte(config->base, c); } static int mcux_iuart_err_check(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; int err = 0; if (UART_GetStatusFlag(config->base, kUART_RxOverrunFlag)) { err |= UART_ERROR_OVERRUN; UART_ClearStatusFlag(config->base, kUART_RxOverrunFlag); } if (UART_GetStatusFlag(config->base, kUART_ParityErrorFlag)) { err |= UART_ERROR_PARITY; UART_ClearStatusFlag(config->base, kUART_ParityErrorFlag); } if (UART_GetStatusFlag(config->base, kUART_FrameErrorFlag)) { err |= UART_ERROR_FRAMING; UART_ClearStatusFlag(config->base, kUART_FrameErrorFlag); } return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN static int mcux_iuart_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len) { const struct mcux_iuart_config *config = dev->config; uint8_t num_tx = 0U; while ((len - num_tx > 0) && (UART_GetStatusFlag(config->base, kUART_TxEmptyFlag))) { UART_WriteByte(config->base, tx_data[num_tx++]); } return num_tx; } static int mcux_iuart_fifo_read(const struct device *dev, uint8_t *rx_data, const int len) { const struct mcux_iuart_config *config = dev->config; uint8_t num_rx = 0U; while ((len - num_rx > 0) && (UART_GetStatusFlag(config->base, kUART_RxDataReadyFlag))) { rx_data[num_rx++] = UART_ReadByte(config->base); } return num_rx; } static void mcux_iuart_irq_tx_enable(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; UART_EnableInterrupts(config->base, kUART_TxEmptyEnable); } static void mcux_iuart_irq_tx_disable(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; UART_DisableInterrupts(config->base, kUART_TxEmptyEnable); } static int mcux_iuart_irq_tx_complete(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; return (UART_GetStatusFlag(config->base, kUART_TxEmptyFlag)) != 0U; } static int mcux_iuart_irq_tx_ready(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; uint32_t mask = kUART_TxEmptyEnable; return (UART_GetEnabledInterrupts(config->base) & mask) && mcux_iuart_irq_tx_complete(dev); } static void mcux_iuart_irq_rx_enable(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; uint32_t mask = kUART_RxDataReadyEnable; UART_EnableInterrupts(config->base, mask); } static void mcux_iuart_irq_rx_disable(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; uint32_t mask = kUART_RxDataReadyEnable; UART_DisableInterrupts(config->base, mask); } static int mcux_iuart_irq_rx_full(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; return (UART_GetStatusFlag(config->base, kUART_RxDataReadyFlag)) != 0U; } static int mcux_iuart_irq_rx_pending(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; uint32_t mask = kUART_RxDataReadyEnable; return (UART_GetEnabledInterrupts(config->base) & mask) && mcux_iuart_irq_rx_full(dev); } static void mcux_iuart_irq_err_enable(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; uint32_t mask = kUART_RxOverrunEnable | kUART_ParityErrorEnable | kUART_FrameErrorEnable; UART_EnableInterrupts(config->base, mask); } static void mcux_iuart_irq_err_disable(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; uint32_t mask = kUART_RxOverrunEnable | kUART_ParityErrorEnable | kUART_FrameErrorEnable; UART_DisableInterrupts(config->base, mask); } static int mcux_iuart_irq_is_pending(const struct device *dev) { return mcux_iuart_irq_tx_ready(dev) || mcux_iuart_irq_rx_pending(dev); } static int mcux_iuart_irq_update(const struct device *dev) { return 1; } static void mcux_iuart_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb, void *cb_data) { struct mcux_iuart_data *data = dev->data; data->callback = cb; data->cb_data = cb_data; } static void mcux_iuart_isr(const struct device *dev) { struct mcux_iuart_data *data = dev->data; if (data->callback) { data->callback(dev, data->cb_data); } } #endif /* CONFIG_UART_INTERRUPT_DRIVEN */ static int mcux_iuart_init(const struct device *dev) { const struct mcux_iuart_config *config = dev->config; uart_config_t uart_config; uint32_t clock_freq; int err; if (!device_is_ready(config->clock_dev)) { return -ENODEV; } if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_freq)) { return -EINVAL; } UART_GetDefaultConfig(&uart_config); uart_config.enableTx = true; uart_config.enableRx = true; uart_config.baudRate_Bps = config->baud_rate; clock_control_on(config->clock_dev, config->clock_subsys); switch (config->parity) { case UART_CFG_PARITY_NONE: uart_config.parityMode = kUART_ParityDisabled; break; case UART_CFG_PARITY_EVEN: uart_config.parityMode = kUART_ParityEven; break; case UART_CFG_PARITY_ODD: uart_config.parityMode = kUART_ParityOdd; break; default: return -ENOTSUP; } UART_Init(config->base, &uart_config, clock_freq); err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { clock_control_off(config->clock_dev, config->clock_subsys); return err; } #ifdef CONFIG_UART_INTERRUPT_DRIVEN config->irq_config_func(dev); #endif return 0; } static const struct uart_driver_api mcux_iuart_driver_api = { .poll_in = mcux_iuart_poll_in, .poll_out = mcux_iuart_poll_out, .err_check = mcux_iuart_err_check, #ifdef CONFIG_UART_INTERRUPT_DRIVEN .fifo_fill = mcux_iuart_fifo_fill, .fifo_read = mcux_iuart_fifo_read, .irq_tx_enable = mcux_iuart_irq_tx_enable, .irq_tx_disable = mcux_iuart_irq_tx_disable, .irq_tx_complete = mcux_iuart_irq_tx_complete, .irq_tx_ready = mcux_iuart_irq_tx_ready, .irq_rx_enable = mcux_iuart_irq_rx_enable, .irq_rx_disable = mcux_iuart_irq_rx_disable, .irq_rx_ready = mcux_iuart_irq_rx_full, .irq_err_enable = mcux_iuart_irq_err_enable, .irq_err_disable = mcux_iuart_irq_err_disable, .irq_is_pending = mcux_iuart_irq_is_pending, .irq_update = mcux_iuart_irq_update, .irq_callback_set = mcux_iuart_irq_callback_set, #endif }; #ifdef CONFIG_UART_INTERRUPT_DRIVEN #define MCUX_IUART_IRQ_INIT(n, i) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_IDX(n, i, irq), \ DT_INST_IRQ_BY_IDX(n, i, priority), \ mcux_iuart_isr, DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQ_BY_IDX(n, i, irq)); \ } while (false) #define IUART_MCUX_CONFIG_FUNC(n) \ static void mcux_iuart_config_func_##n(const struct device *dev) \ { \ MCUX_IUART_IRQ_INIT(n, 0); \ \ IF_ENABLED(DT_INST_IRQ_HAS_IDX(n, 1), \ (MCUX_IUART_IRQ_INIT(n, 1);)) \ } #define IUART_MCUX_IRQ_CFG_FUNC_INIT(n) \ .irq_config_func = mcux_iuart_config_func_##n #define IUART_MCUX_INIT_CFG(n) \ IUART_MCUX_DECLARE_CFG(n, IUART_MCUX_IRQ_CFG_FUNC_INIT(n)) #else #define IUART_MCUX_CONFIG_FUNC(n) #define IUART_MCUX_IRQ_CFG_FUNC_INIT #define IUART_MCUX_INIT_CFG(n) \ IUART_MCUX_DECLARE_CFG(n, IUART_MCUX_IRQ_CFG_FUNC_INIT) #endif #define IUART_MCUX_DECLARE_CFG(n, IRQ_FUNC_INIT) \ static const struct mcux_iuart_config mcux_iuart_##n##_config = { \ .base = (UART_Type *) DT_INST_REG_ADDR(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name),\ .baud_rate = DT_INST_PROP(n, current_speed), \ .parity = DT_INST_ENUM_IDX_OR(n, parity, UART_CFG_PARITY_NONE), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ IRQ_FUNC_INIT \ } #define IUART_MCUX_INIT(n) \ \ static struct mcux_iuart_data mcux_iuart_##n##_data; \ \ static const struct mcux_iuart_config mcux_iuart_##n##_config;\ \ DEVICE_DT_INST_DEFINE(n, \ mcux_iuart_init, \ NULL, \ &mcux_iuart_##n##_data, \ &mcux_iuart_##n##_config, \ PRE_KERNEL_1, \ CONFIG_SERIAL_INIT_PRIORITY, \ &mcux_iuart_driver_api); \ \ PINCTRL_DT_INST_DEFINE(n); \ \ IUART_MCUX_CONFIG_FUNC(n) \ \ IUART_MCUX_INIT_CFG(n); DT_INST_FOREACH_STATUS_OKAY(IUART_MCUX_INIT) ```
/content/code_sandbox/drivers/serial/uart_mcux_iuart.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,718
```unknown menuconfig UART_STELLARIS bool "Stellaris serial driver" default y depends on DT_HAS_TI_STELLARIS_UART_ENABLED select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT help This option enables the Stellaris serial driver. This specific driver can be used for the serial hardware available at the Texas Instrument LM3S6965 board. if UART_STELLARIS # ---------- Port 0 ---------- config UART_STELLARIS_PORT_0 bool "Stellaris UART Port 0" help This tells the driver to configure the UART port at boot, depending on the additional configure options below. # ---------- Port 1 ---------- config UART_STELLARIS_PORT_1 bool "Stellaris UART Port 1" help This tells the driver to configure the UART port at boot, depending on the additional configure options below. # ---------- Port 2 ---------- config UART_STELLARIS_PORT_2 bool "Stellaris UART Port 2" help This tells the driver to configure the UART port at boot, depending on the additional configure options below. endif # UART_STELLARIS ```
/content/code_sandbox/drivers/serial/Kconfig.stellaris
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
252
```objective-c /* * * * "Bottom" of native ptty uart driver * When built with the native_simulator this will be built in the runner context, * that is, with the host C library, and with the host include paths. */ #ifndef DRIVERS_SERIAL_UART_NATIVE_PTTY_BOTTOM_H #define DRIVERS_SERIAL_UART_NATIVE_PTTY_BOTTOM_H #include <stdbool.h> #include <stdint.h> #ifdef __cplusplus extern "C" { #endif /* Note: None of these functions are public interfaces. But internal to the native ptty driver */ int np_uart_stdin_poll_in_bottom(int in_f, unsigned char *p_char); int np_uart_slave_connected(int fd); int np_uart_open_ptty(const char *uart_name, const char *auto_attach_cmd, bool do_auto_attach, bool wait_pts); int np_uart_ptty_get_stdin_fileno(void); int np_uart_ptty_get_stdout_fileno(void); #ifdef __cplusplus } #endif #endif /* DRIVERS_SERIAL_UART_NATIVE_PTTY_BOTTOM_H */ ```
/content/code_sandbox/drivers/serial/uart_native_ptty_bottom.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
210
```unknown # nrfx UART configuration menuconfig UART_NRFX bool "nRF UART nrfx drivers" default y select SERIAL_HAS_DRIVER select SERIAL_SUPPORT_INTERRUPT select SERIAL_SUPPORT_ASYNC select PINCTRL depends on DT_HAS_NORDIC_NRF_UART_ENABLED || DT_HAS_NORDIC_NRF_UARTE_ENABLED help Enable support for nrfx UART drivers for nRF MCU series. Peripherals with the same instance ID cannot be used together, e.g. UART_0 and UARTE_0. if UART_NRFX config UART_NRFX_UART def_bool y depends on DT_HAS_NORDIC_NRF_UART_ENABLED config UART_NRFX_UARTE def_bool y depends on DT_HAS_NORDIC_NRF_UARTE_ENABLED imply NRFX_UARTE_CONFIG_SKIP_PSEL_CONFIG if !UART_NRFX_UARTE_LEGACY_SHIM imply NRFX_UARTE_CONFIG_SKIP_GPIO_CONFIG if !UART_NRFX_UARTE_LEGACY_SHIM config UART_NRFX_UARTE_LEGACY_SHIM bool "Legacy UARTE shim" depends on UART_NRFX_UARTE depends on !SOC_SERIES_NRF54LX depends on RISCV || !SOC_SERIES_NRF54HX # New shim takes more ROM. Until it is fixed use legacy shim. default y config UART_ASYNC_TX_CACHE_SIZE int "TX cache buffer size" depends on UART_ASYNC_API depends on UART_NRFX_UARTE_LEGACY_SHIM default 8 help For UARTE, TX cache buffer is used when provided TX buffer is not located in RAM, because EasyDMA in UARTE peripherals can only transfer data from RAM. if HAS_HW_NRF_UART0 || HAS_HW_NRF_UARTE0 nrfx_uart_num = 0 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE1 nrfx_uart_num = 1 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE2 nrfx_uart_num = 2 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE3 nrfx_uart_num = 3 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE00 nrfx_uart_num = 00 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE20 nrfx_uart_num = 20 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE21 nrfx_uart_num = 21 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE22 nrfx_uart_num = 22 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE30 nrfx_uart_num = 30 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE120 nrfx_uart_num = 120 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE130 nrfx_uart_num = 130 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE131 nrfx_uart_num = 131 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE132 nrfx_uart_num = 132 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE133 nrfx_uart_num = 133 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE134 nrfx_uart_num = 134 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE135 nrfx_uart_num = 135 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE136 nrfx_uart_num = 136 rsource "Kconfig.nrfx_uart_instance" endif if HAS_HW_NRF_UARTE137 nrfx_uart_num = 137 rsource "Kconfig.nrfx_uart_instance" endif config NRFX_TIMER0 default y depends on UART_0_NRF_HW_ASYNC_TIMER = 0 \ || UART_1_NRF_HW_ASYNC_TIMER = 0 \ || UART_2_NRF_HW_ASYNC_TIMER = 0 \ || UART_3_NRF_HW_ASYNC_TIMER = 0 config NRFX_TIMER1 default y depends on UART_0_NRF_HW_ASYNC_TIMER = 1 \ || UART_1_NRF_HW_ASYNC_TIMER = 1 \ || UART_2_NRF_HW_ASYNC_TIMER = 1 \ || UART_3_NRF_HW_ASYNC_TIMER = 1 config NRFX_TIMER2 default y depends on UART_0_NRF_HW_ASYNC_TIMER = 2 \ || UART_1_NRF_HW_ASYNC_TIMER = 2 \ || UART_2_NRF_HW_ASYNC_TIMER = 2 \ || UART_3_NRF_HW_ASYNC_TIMER = 2 config NRFX_TIMER3 default y depends on UART_0_NRF_HW_ASYNC_TIMER = 3 \ || UART_1_NRF_HW_ASYNC_TIMER = 3 \ || UART_2_NRF_HW_ASYNC_TIMER = 3 \ || UART_3_NRF_HW_ASYNC_TIMER = 3 config NRFX_TIMER4 default y depends on UART_0_NRF_HW_ASYNC_TIMER = 4 \ || UART_1_NRF_HW_ASYNC_TIMER = 4 \ || UART_2_NRF_HW_ASYNC_TIMER = 4 \ || UART_3_NRF_HW_ASYNC_TIMER = 4 endif # UART_NRFX ```
/content/code_sandbox/drivers/serial/Kconfig.nrfx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,312
```c /** @file * @brief Pipe UART driver * * A pipe UART driver allowing application to handle all aspects of received * protocol data. */ /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(uart_pipe); #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/uart_pipe.h> #include <zephyr/sys/printk.h> static const struct device *const uart_pipe_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_uart_pipe)); static uint8_t *recv_buf; static size_t recv_buf_len; static uart_pipe_recv_cb app_cb; static size_t recv_off; static void uart_pipe_rx(const struct device *dev) { /* As per the API, the interrupt may be an edge so keep * reading from the FIFO until it's empty. */ for (;;) { int avail = recv_buf_len - recv_off; int got; got = uart_fifo_read(uart_pipe_dev, recv_buf + recv_off, avail); if (got <= 0) { break; } LOG_HEXDUMP_DBG(recv_buf + recv_off, got, "RX"); /* * Call application callback with received data. Application * may provide new buffer or alter data offset. */ recv_off += got; recv_buf = app_cb(recv_buf, &recv_off); } } static void uart_pipe_isr(const struct device *dev, void *user_data) { ARG_UNUSED(user_data); uart_irq_update(dev); if (uart_irq_rx_ready(dev)) { uart_pipe_rx(dev); } } int uart_pipe_send(const uint8_t *data, int len) { LOG_HEXDUMP_DBG(data, len, "TX"); while (len--) { uart_poll_out(uart_pipe_dev, *data++); } return 0; } static void uart_pipe_setup(const struct device *uart) { uint8_t c; uart_irq_rx_disable(uart); uart_irq_tx_disable(uart); /* Drain the fifo */ while (uart_fifo_read(uart, &c, 1)) { continue; } uart_irq_callback_set(uart, uart_pipe_isr); uart_irq_rx_enable(uart); } void uart_pipe_register(uint8_t *buf, size_t len, uart_pipe_recv_cb cb) { recv_buf = buf; recv_buf_len = len; app_cb = cb; if (device_is_ready(uart_pipe_dev)) { uart_pipe_setup(uart_pipe_dev); } } ```
/content/code_sandbox/drivers/serial/uart_pipe.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
549