text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /* * */ #define DT_DRV_COMPAT siemens_ivshmem_eth #include <zephyr/drivers/virtualization/ivshmem.h> #include <zephyr/logging/log.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include "eth.h" #include "eth_ivshmem_priv.h" LOG_MODULE_REGISTER(eth_ivshmem, CONFIG_ETHERNET_LOG_LEVEL); #define ETH_IVSHMEM_STATE_RESET 0 #define ETH_IVSHMEM_STATE_INIT 1 #define ETH_IVSHMEM_STATE_READY 2 #define ETH_IVSHMEM_STATE_RUN 3 static const char * const eth_ivshmem_state_names[] = { [ETH_IVSHMEM_STATE_RESET] = "RESET", [ETH_IVSHMEM_STATE_INIT] = "INIT", [ETH_IVSHMEM_STATE_READY] = "READY", [ETH_IVSHMEM_STATE_RUN] = "RUN" }; struct eth_ivshmem_dev_data { struct net_if *iface; uint32_t tx_rx_vector; uint32_t peer_id; uint8_t mac_addr[6]; struct k_poll_signal poll_signal; struct eth_ivshmem_queue ivshmem_queue; K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_ETH_IVSHMEM_THREAD_STACK_SIZE); struct k_thread thread; bool enabled; uint32_t state; #if defined(CONFIG_NET_STATISTICS_ETHERNET) struct net_stats_eth stats; #endif }; struct eth_ivshmem_cfg_data { const struct device *ivshmem; const char *name; void (*generate_mac_addr)(uint8_t mac_addr[6]); }; #if defined(CONFIG_NET_STATISTICS_ETHERNET) static struct net_stats_eth *eth_ivshmem_get_stats(const struct device *dev) { struct eth_ivshmem_dev_data *dev_data = dev->data; return &dev_data->stats; } #endif static int eth_ivshmem_start(const struct device *dev) { struct eth_ivshmem_dev_data *dev_data = dev->data; dev_data->enabled = true; /* Wake up thread to check/update state */ k_poll_signal_raise(&dev_data->poll_signal, 0); return 0; } static int eth_ivshmem_stop(const struct device *dev) { struct eth_ivshmem_dev_data *dev_data = dev->data; dev_data->enabled = false; /* Wake up thread to check/update state */ k_poll_signal_raise(&dev_data->poll_signal, 0); return 0; } static enum ethernet_hw_caps eth_ivshmem_caps(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_1000BASE_T; } static int eth_ivshmem_send(const struct device *dev, struct net_pkt *pkt) { struct eth_ivshmem_dev_data *dev_data = dev->data; const struct eth_ivshmem_cfg_data *cfg_data = dev->config; size_t len = net_pkt_get_len(pkt); void *data; int res = eth_ivshmem_queue_tx_get_buff(&dev_data->ivshmem_queue, &data, len); if (res != 0) { LOG_ERR("Failed to allocate tx buffer"); eth_stats_update_errors_tx(dev_data->iface); return res; } if (net_pkt_read(pkt, data, len)) { LOG_ERR("Failed to read tx packet"); eth_stats_update_errors_tx(dev_data->iface); return -EIO; } res = eth_ivshmem_queue_tx_commit_buff(&dev_data->ivshmem_queue); if (res == 0) { /* Notify peer */ ivshmem_int_peer(cfg_data->ivshmem, dev_data->peer_id, dev_data->tx_rx_vector); } return res; } static struct net_pkt *eth_ivshmem_rx(const struct device *dev) { struct eth_ivshmem_dev_data *dev_data = dev->data; const struct eth_ivshmem_cfg_data *cfg_data = dev->config; const void *rx_data; size_t rx_len; int res = eth_ivshmem_queue_rx(&dev_data->ivshmem_queue, &rx_data, &rx_len); if (res != 0) { if (res != -EWOULDBLOCK) { LOG_ERR("Queue RX failed"); eth_stats_update_errors_rx(dev_data->iface); } return NULL; } struct net_pkt *pkt = net_pkt_rx_alloc_with_buffer( dev_data->iface, rx_len, AF_UNSPEC, 0, K_MSEC(100)); if (pkt == NULL) { LOG_ERR("Failed to allocate rx buffer"); eth_stats_update_errors_rx(dev_data->iface); goto dequeue; } if (net_pkt_write(pkt, rx_data, rx_len) != 0) { LOG_ERR("Failed to write rx packet"); eth_stats_update_errors_rx(dev_data->iface); net_pkt_unref(pkt); } dequeue: if (eth_ivshmem_queue_rx_complete(&dev_data->ivshmem_queue) == 0) { /* Notify peer */ ivshmem_int_peer(cfg_data->ivshmem, dev_data->peer_id, dev_data->tx_rx_vector); } return pkt; } static void eth_ivshmem_set_state(const struct device *dev, uint32_t state) { struct eth_ivshmem_dev_data *dev_data = dev->data; const struct eth_ivshmem_cfg_data *cfg_data = dev->config; LOG_DBG("State update: %s -> %s", eth_ivshmem_state_names[dev_data->state], eth_ivshmem_state_names[state]); dev_data->state = state; ivshmem_set_state(cfg_data->ivshmem, state); } static void eth_ivshmem_state_update(const struct device *dev) { struct eth_ivshmem_dev_data *dev_data = dev->data; const struct eth_ivshmem_cfg_data *cfg_data = dev->config; uint32_t peer_state = ivshmem_get_state(cfg_data->ivshmem, dev_data->peer_id); switch (dev_data->state) { case ETH_IVSHMEM_STATE_RESET: switch (peer_state) { case ETH_IVSHMEM_STATE_RESET: case ETH_IVSHMEM_STATE_INIT: eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_INIT); break; default: /* Wait for peer to reset */ break; } break; case ETH_IVSHMEM_STATE_INIT: if (dev_data->iface == NULL || peer_state == ETH_IVSHMEM_STATE_RESET) { /* Peer is not ready for init */ break; } eth_ivshmem_queue_reset(&dev_data->ivshmem_queue); eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_READY); break; case ETH_IVSHMEM_STATE_READY: case ETH_IVSHMEM_STATE_RUN: switch (peer_state) { case ETH_IVSHMEM_STATE_RESET: net_eth_carrier_off(dev_data->iface); eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RESET); break; case ETH_IVSHMEM_STATE_READY: case ETH_IVSHMEM_STATE_RUN: if (dev_data->enabled && dev_data->state == ETH_IVSHMEM_STATE_READY) { eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RUN); net_eth_carrier_on(dev_data->iface); } else if (!dev_data->enabled && dev_data->state == ETH_IVSHMEM_STATE_RUN) { net_eth_carrier_off(dev_data->iface); eth_ivshmem_set_state(dev, ETH_IVSHMEM_STATE_RESET); } break; } break; } } FUNC_NORETURN static void eth_ivshmem_thread(void *arg1, void *arg2, void *arg3) { const struct device *dev = arg1; struct eth_ivshmem_dev_data *dev_data = dev->data; struct k_poll_event poll_event; ARG_UNUSED(arg2); ARG_UNUSED(arg3); k_poll_event_init(&poll_event, K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &dev_data->poll_signal); while (true) { k_poll(&poll_event, 1, K_FOREVER); poll_event.signal->signaled = 0; poll_event.state = K_POLL_STATE_NOT_READY; eth_ivshmem_state_update(dev); if (dev_data->state != ETH_IVSHMEM_STATE_RUN) { continue; } while (true) { struct net_pkt *pkt = eth_ivshmem_rx(dev); if (pkt == NULL) { break; } if (net_recv_data(dev_data->iface, pkt) < 0) { /* Upper layers are not ready to receive packets */ net_pkt_unref(pkt); } k_yield(); }; } } int eth_ivshmem_initialize(const struct device *dev) { struct eth_ivshmem_dev_data *dev_data = dev->data; const struct eth_ivshmem_cfg_data *cfg_data = dev->config; int res; k_poll_signal_init(&dev_data->poll_signal); if (!device_is_ready(cfg_data->ivshmem)) { LOG_ERR("ivshmem device not ready"); return -ENODEV; } uint16_t protocol = ivshmem_get_protocol(cfg_data->ivshmem); if (protocol != IVSHMEM_V2_PROTO_NET) { LOG_ERR("Invalid ivshmem protocol %hu", protocol); return -EINVAL; } uint32_t id = ivshmem_get_id(cfg_data->ivshmem); uint32_t max_peers = ivshmem_get_max_peers(cfg_data->ivshmem); LOG_INF("ivshmem: id %u, max_peers %u", id, max_peers); if (id > 1) { LOG_ERR("Invalid ivshmem ID %u", id); return -EINVAL; } if (max_peers != 2) { LOG_ERR("Invalid ivshmem max peers %u", max_peers); return -EINVAL; } dev_data->peer_id = (id == 0) ? 1 : 0; uintptr_t output_sections[2]; size_t output_section_size = ivshmem_get_output_mem_section( cfg_data->ivshmem, 0, &output_sections[0]); ivshmem_get_output_mem_section( cfg_data->ivshmem, 1, &output_sections[1]); res = eth_ivshmem_queue_init( &dev_data->ivshmem_queue, output_sections[id], output_sections[dev_data->peer_id], output_section_size); if (res != 0) { LOG_ERR("Failed to init ivshmem queue"); return res; } LOG_INF("shmem queue: desc len 0x%hX, header size 0x%X, data size 0x%X", dev_data->ivshmem_queue.desc_max_len, dev_data->ivshmem_queue.vring_header_size, dev_data->ivshmem_queue.vring_data_max_len); uint16_t n_vectors = ivshmem_get_vectors(cfg_data->ivshmem); /* For simplicity, state and TX/RX vectors do the same thing */ ivshmem_register_handler(cfg_data->ivshmem, &dev_data->poll_signal, 0); dev_data->tx_rx_vector = 0; if (n_vectors == 0) { LOG_ERR("Error no ivshmem ISR vectors"); return -EINVAL; } else if (n_vectors > 1) { ivshmem_register_handler(cfg_data->ivshmem, &dev_data->poll_signal, 1); dev_data->tx_rx_vector = 1; } ivshmem_set_state(cfg_data->ivshmem, ETH_IVSHMEM_STATE_RESET); cfg_data->generate_mac_addr(dev_data->mac_addr); LOG_INF("MAC Address %02X:%02X:%02X:%02X:%02X:%02X", dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2], dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]); k_tid_t tid = k_thread_create( &dev_data->thread, dev_data->thread_stack, K_KERNEL_STACK_SIZEOF(dev_data->thread_stack), eth_ivshmem_thread, (void *) dev, NULL, NULL, CONFIG_ETH_IVSHMEM_THREAD_PRIORITY, K_ESSENTIAL, K_NO_WAIT); k_thread_name_set(tid, cfg_data->name); ivshmem_enable_interrupts(cfg_data->ivshmem, true); /* Wake up thread to check/update state */ k_poll_signal_raise(&dev_data->poll_signal, 0); return 0; } static void eth_ivshmem_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_ivshmem_dev_data *dev_data = dev->data; if (dev_data->iface == NULL) { dev_data->iface = iface; } net_if_set_link_addr( iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); ethernet_init(iface); /* Do not start the interface until PHY link is up */ net_if_carrier_off(iface); /* Wake up thread to check/update state */ k_poll_signal_raise(&dev_data->poll_signal, 0); } static const struct ethernet_api eth_ivshmem_api = { .iface_api.init = eth_ivshmem_iface_init, #if defined(CONFIG_NET_STATISTICS_ETHERNET) .get_stats = eth_ivshmem_get_stats, #endif .start = eth_ivshmem_start, .stop = eth_ivshmem_stop, .get_capabilities = eth_ivshmem_caps, .send = eth_ivshmem_send, }; #define ETH_IVSHMEM_RANDOM_MAC_ADDR(inst) \ static void generate_mac_addr_##inst(uint8_t mac_addr[6]) \ { \ sys_rand_get(mac_addr, 3U); \ /* Clear multicast bit */ \ mac_addr[0] &= 0xFE; \ gen_random_mac(mac_addr, mac_addr[0], mac_addr[1], mac_addr[2]); \ } #define ETH_IVSHMEM_LOCAL_MAC_ADDR(inst) \ static void generate_mac_addr_##inst(uint8_t mac_addr[6]) \ { \ const uint8_t addr[6] = DT_INST_PROP(0, local_mac_address); \ memcpy(mac_addr, addr, sizeof(addr)); \ } #define ETH_IVSHMEM_GENERATE_MAC_ADDR(inst) \ BUILD_ASSERT(DT_INST_PROP(inst, zephyr_random_mac_address) || \ NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(inst)), \ "eth_ivshmem requires either a fixed or random mac address"); \ COND_CODE_1(DT_INST_PROP(inst, zephyr_random_mac_address), \ (ETH_IVSHMEM_RANDOM_MAC_ADDR(inst)), \ (ETH_IVSHMEM_LOCAL_MAC_ADDR(inst))) #define ETH_IVSHMEM_INIT(inst) \ ETH_IVSHMEM_GENERATE_MAC_ADDR(inst); \ static struct eth_ivshmem_dev_data eth_ivshmem_dev_##inst = {}; \ static const struct eth_ivshmem_cfg_data eth_ivshmem_cfg_##inst = { \ .ivshmem = DEVICE_DT_GET(DT_INST_PHANDLE(inst, ivshmem_v2)), \ .name = "ivshmem_eth" STRINGIFY(inst), \ .generate_mac_addr = generate_mac_addr_##inst, \ }; \ ETH_NET_DEVICE_DT_INST_DEFINE(inst, \ eth_ivshmem_initialize, \ NULL, \ &eth_ivshmem_dev_##inst, \ &eth_ivshmem_cfg_##inst, \ CONFIG_ETH_INIT_PRIORITY, \ &eth_ivshmem_api, \ NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(ETH_IVSHMEM_INIT); ```
/content/code_sandbox/drivers/ethernet/eth_ivshmem.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,541
```unknown # Ethernet drivers configuration options menuconfig ETH_DRIVER bool "Ethernet drivers" depends on NETWORKING depends on !NET_TEST default y if NET_L2_ETHERNET if ETH_DRIVER config ETH_DRIVER_RAW_MODE bool "Ethernet driver without the MAC stack" select NET_RAW_MODE depends on !NET_L2_ETHERNET help This option enables using the drivers in a so-called "raw" mode, i.e. without a MAC stack (the net L2 layer for ethernet will not be built). module = ETHERNET module-dep = LOG module-str = Log level for Ethernet driver module-help = Sets log level for Ethernet Device Drivers. source "subsys/net/Kconfig.template.log_config.net" config ETH_QEMU_IFACE_NAME string "Network interface name for QEMU" default "zeth" depends on NET_QEMU_ETHERNET help The network interface name for QEMU. This value is given as a parameter to -nic qemu command line option. The network interface must be created before starting QEMU. The net-setup.sh script from net-tools project can be used to create the network interface. config ETH_QEMU_EXTRA_ARGS string "Extra arguments to QEMU -nic option" depends on NET_QEMU_ETHERNET default "" help Extra arguments passed to QEMU -nic option when Ethernet Networking is enabled. Typically this is used to set the network MAC address of Zephyr instance. This option can contain multiple QEMU option arguments. Each QEMU argument must be separated by comma "," and no spaces between arguments. Example: "mac=02:03:04:f0:0d:01" or "mac=02:03:04:f0:0d:01,downscript=no" source "drivers/ethernet/Kconfig.enc28j60" source "drivers/ethernet/Kconfig.enc424j600" source "drivers/ethernet/Kconfig.esp32" source "drivers/ethernet/Kconfig.e1000" source "drivers/ethernet/Kconfig.sam_gmac" source "drivers/ethernet/Kconfig.stm32_hal" source "drivers/ethernet/Kconfig.dwmac" source "drivers/ethernet/Kconfig.smsc911x" source "drivers/ethernet/Kconfig.native_posix" source "drivers/ethernet/Kconfig.stellaris" source "drivers/ethernet/Kconfig.litex" source "drivers/ethernet/Kconfig.gecko" source "drivers/ethernet/Kconfig.w5500" source "drivers/ethernet/Kconfig.dsa" source "drivers/ethernet/Kconfig.xlnx_gem" source "drivers/ethernet/Kconfig.cyclonev" source "drivers/ethernet/Kconfig.nxp_s32_netc" source "drivers/ethernet/Kconfig.nxp_s32_gmac" source "drivers/ethernet/Kconfig.smsc91x" source "drivers/ethernet/Kconfig.ivshmem" source "drivers/ethernet/Kconfig.adin2111" source "drivers/ethernet/Kconfig.numaker" source "drivers/ethernet/Kconfig.lan865x" source "drivers/ethernet/Kconfig.xmc4xxx" source "drivers/ethernet/eth_nxp_enet_qos/Kconfig" source "drivers/ethernet/phy/Kconfig" source "drivers/ethernet/nxp_enet/Kconfig" endif # "Ethernet Drivers" config ETH_INIT_PRIORITY int "Ethernet driver init priority" default 80 depends on NET_L2_ETHERNET || ETH_DRIVER help Ethernet device driver initialization priority. Do not mess with it unless you know what you are doing. Note that the priority needs to be lower than the net stack so that it can start before the networking sub-system. ```
/content/code_sandbox/drivers/ethernet/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
817
```objective-c /* */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_STM32_HAL_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_STM32_HAL_PRIV_H_ #include <zephyr/kernel.h> #include <zephyr/types.h> /* Naming of the ETH PTP Config Status changes depending on the stm32 serie */ #if defined(CONFIG_SOC_SERIES_STM32F4X) #define ETH_STM32_PTP_CONFIGURED HAL_ETH_PTP_CONFIGURATED #define ETH_STM32_PTP_NOT_CONFIGURED HAL_ETH_PTP_NOT_CONFIGURATED #else #define ETH_STM32_PTP_CONFIGURED HAL_ETH_PTP_CONFIGURED #define ETH_STM32_PTP_NOT_CONFIGURED HAL_ETH_PTP_NOT_CONFIGURED #endif /* stm32F7x or sm32F4x */ #define ST_OUI_B0 0x00 #define ST_OUI_B1 0x80 #define ST_OUI_B2 0xE1 #define ETH_STM32_HAL_MTU NET_ETH_MTU #define ETH_STM32_HAL_FRAME_SIZE_MAX (ETH_STM32_HAL_MTU + 18) /* Definition of the Ethernet driver buffers size and count */ #define ETH_STM32_RX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for receive */ #define ETH_STM32_TX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for transmit */ /* Device constant configuration parameters */ struct eth_stm32_hal_dev_cfg { void (*config_func)(void); struct stm32_pclken pclken; struct stm32_pclken pclken_rx; struct stm32_pclken pclken_tx; #if DT_INST_CLOCKS_HAS_NAME(0, mac_clk_ptp) struct stm32_pclken pclken_ptp; #endif const struct pinctrl_dev_config *pcfg; }; /* Device run time data */ struct eth_stm32_hal_dev_data { struct net_if *iface; uint8_t mac_addr[6]; ETH_HandleTypeDef heth; /* clock device */ const struct device *clock; struct k_mutex tx_mutex; struct k_sem rx_int_sem; #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_ETH_STM32_HAL_API_V2) struct k_sem tx_int_sem; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2*/ K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_STM32_HAL_RX_THREAD_STACK_SIZE); struct k_thread rx_thread; bool link_up; #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER) uint8_t hash_index_cnt[64]; #endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */ #if defined(CONFIG_PTP_CLOCK_STM32_HAL) const struct device *ptp_clock; float clk_ratio; float clk_ratio_adj; #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ #if defined(CONFIG_NET_STATISTICS_ETHERNET) struct net_stats_eth stats; #endif }; #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_STM32_HAL_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_stm32_hal_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
659
```c /** * @file * @brief Ethernet Driver raw mode * * This file contains a collection of functions called from the ethernet drivers * to the missing upper layer. */ /* * */ #include <zephyr/net/ethernet.h> __weak void ethernet_init(struct net_if *iface) { ARG_UNUSED(iface); } __weak void net_eth_carrier_on(struct net_if *iface) { ARG_UNUSED(iface); } __weak void net_eth_carrier_off(struct net_if *iface) { ARG_UNUSED(iface); } __weak int net_recv_data(struct net_if *iface, struct net_pkt *pkt) { ARG_UNUSED(iface); ARG_UNUSED(pkt); return -ENOTSUP; } __weak void net_if_carrier_on(struct net_if *iface) { ARG_UNUSED(iface); } __weak void net_if_carrier_off(struct net_if *iface) { ARG_UNUSED(iface); } ```
/content/code_sandbox/drivers/ethernet/eth_raw.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
195
```objective-c /* * */ /** @file * @brief Private functions for native posix ethernet driver. */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_NATIVE_POSIX_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_NATIVE_POSIX_PRIV_H_ int eth_iface_create(const char *dev_name, const char *if_name, bool tun_only); int eth_iface_remove(int fd); int eth_wait_data(int fd); int eth_clock_gettime(uint64_t *second, uint32_t *nanosecond); int eth_promisc_mode(const char *if_name, bool enable); #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_NATIVE_POSIX_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_native_posix_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
139
```c /* * */ #include "oa_tc6.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(oa_tc6, CONFIG_ETHERNET_LOG_LEVEL); /* * When IPv6 support enabled - the minimal size of network buffer * shall be at least 128 bytes (i.e. default value). */ #if defined(CONFIG_NET_IPV6) && (CONFIG_NET_BUF_DATA_SIZE < 128) #error IPv6 requires at least 128 bytes of continuous data to handle headers! #endif int oa_tc6_reg_read(struct oa_tc6 *tc6, const uint32_t reg, uint32_t *val) { uint8_t buf[OA_TC6_HDR_SIZE + 12] = { 0 }; struct spi_buf tx_buf = { .buf = buf, .len = sizeof(buf) }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; struct spi_buf rx_buf = { .buf = buf, .len = sizeof(buf) }; const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1 }; uint32_t rv, rvn, hdr_bkp, *hdr = (uint32_t *) &buf[0]; int ret = 0; /* * Buffers are allocated for protected (larger) case (by 4 bytes). * When non-protected case - we need to decrase them */ if (!tc6->protected) { tx_buf.len -= sizeof(rvn); rx_buf.len -= sizeof(rvn); } *hdr = FIELD_PREP(OA_CTRL_HDR_DNC, 0) | FIELD_PREP(OA_CTRL_HDR_WNR, 0) | FIELD_PREP(OA_CTRL_HDR_AID, 0) | FIELD_PREP(OA_CTRL_HDR_MMS, reg >> 16) | FIELD_PREP(OA_CTRL_HDR_ADDR, reg) | FIELD_PREP(OA_CTRL_HDR_LEN, 0); /* To read single register len = 0 */ *hdr |= FIELD_PREP(OA_CTRL_HDR_P, oa_tc6_get_parity(*hdr)); hdr_bkp = *hdr; *hdr = sys_cpu_to_be32(*hdr); ret = spi_transceive_dt(tc6->spi, &tx, &rx); if (ret < 0) { return ret; } /* Check if echoed control command header is correct */ rv = sys_be32_to_cpu(*(uint32_t *)&buf[4]); if (hdr_bkp != rv) { LOG_ERR("Header transmission error!"); return -1; } rv = sys_be32_to_cpu(*(uint32_t *)&buf[8]); /* In protected mode read data is followed by its compliment value */ if (tc6->protected) { rvn = sys_be32_to_cpu(*(uint32_t *)&buf[12]); if (rv != ~rvn) { LOG_ERR("Protected mode transmission error!"); return -1; } } *val = rv; return ret; } int oa_tc6_reg_write(struct oa_tc6 *tc6, const uint32_t reg, uint32_t val) { uint8_t buf_tx[OA_TC6_HDR_SIZE + 12] = { 0 }; uint8_t buf_rx[OA_TC6_HDR_SIZE + 12] = { 0 }; struct spi_buf tx_buf = { .buf = buf_tx, .len = sizeof(buf_tx) }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; struct spi_buf rx_buf = { .buf = buf_rx, .len = sizeof(buf_rx) }; const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1 }; uint32_t rv, rvn, hdr_bkp, *hdr = (uint32_t *) &buf_tx[0]; int ret; /* * Buffers are allocated for protected (larger) case (by 4 bytes). * When non-protected case - we need to decrase them */ if (!tc6->protected) { tx_buf.len -= sizeof(rvn); rx_buf.len -= sizeof(rvn); } *hdr = FIELD_PREP(OA_CTRL_HDR_DNC, 0) | FIELD_PREP(OA_CTRL_HDR_WNR, 1) | FIELD_PREP(OA_CTRL_HDR_AID, 0) | FIELD_PREP(OA_CTRL_HDR_MMS, reg >> 16) | FIELD_PREP(OA_CTRL_HDR_ADDR, reg) | FIELD_PREP(OA_CTRL_HDR_LEN, 0); /* To read single register len = 0 */ *hdr |= FIELD_PREP(OA_CTRL_HDR_P, oa_tc6_get_parity(*hdr)); hdr_bkp = *hdr; *hdr = sys_cpu_to_be32(*hdr); *(uint32_t *)&buf_tx[4] = sys_cpu_to_be32(val); if (tc6->protected) { *(uint32_t *)&buf_tx[8] = sys_be32_to_cpu(~val); } ret = spi_transceive_dt(tc6->spi, &tx, &rx); if (ret < 0) { return ret; } /* Check if echoed control command header is correct */ rv = sys_be32_to_cpu(*(uint32_t *)&buf_rx[4]); if (hdr_bkp != rv) { LOG_ERR("Header transmission error!"); return -1; } /* Check if echoed value is correct */ rv = sys_be32_to_cpu(*(uint32_t *)&buf_rx[8]); if (val != rv) { LOG_ERR("Header transmission error!"); return -1; } /* * In protected mode check if read value is followed by its * compliment value */ if (tc6->protected) { rvn = sys_be32_to_cpu(*(uint32_t *)&buf_rx[12]); if (val != ~rvn) { LOG_ERR("Protected mode transmission error!"); return -1; } } return ret; } int oa_tc6_reg_rmw(struct oa_tc6 *tc6, const uint32_t reg, uint32_t mask, uint32_t val) { uint32_t tmp; int ret; ret = oa_tc6_reg_read(tc6, reg, &tmp); if (ret < 0) { return ret; } tmp &= ~mask; if (val) { tmp |= val; } return oa_tc6_reg_write(tc6, reg, tmp); } int oa_tc6_set_protected_ctrl(struct oa_tc6 *tc6, bool prote) { int ret = oa_tc6_reg_rmw(tc6, OA_CONFIG0, OA_CONFIG0_PROTE, prote ? OA_CONFIG0_PROTE : 0); if (ret < 0) { return ret; } tc6->protected = prote; return 0; } int oa_tc6_send_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt) { uint16_t len = net_pkt_get_len(pkt); uint8_t oa_tx[tc6->cps]; uint32_t hdr, ftr; uint8_t chunks, i; int ret; if (len == 0) { return -ENODATA; } chunks = len / tc6->cps; if (len % tc6->cps) { chunks++; } /* Check if LAN865x has any free internal buffer space */ if (chunks > tc6->txc) { return -EIO; } /* Transform struct net_pkt content into chunks */ for (i = 1; i <= chunks; i++) { hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1) | FIELD_PREP(OA_DATA_HDR_DV, 1) | FIELD_PREP(OA_DATA_HDR_NORX, 1) | FIELD_PREP(OA_DATA_HDR_SWO, 0); if (i == 1) { hdr |= FIELD_PREP(OA_DATA_HDR_SV, 1); } if (i == chunks) { hdr |= FIELD_PREP(OA_DATA_HDR_EBO, len - 1) | FIELD_PREP(OA_DATA_HDR_EV, 1); } hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr)); ret = net_pkt_read(pkt, oa_tx, len > tc6->cps ? tc6->cps : len); if (ret < 0) { return ret; } ret = oa_tc6_chunk_spi_transfer(tc6, NULL, oa_tx, hdr, &ftr); if (ret < 0) { return ret; } len -= tc6->cps; } return 0; } int oa_tc6_check_status(struct oa_tc6 *tc6) { uint32_t sts; if (!tc6->sync) { LOG_ERR("SYNC: Configuration lost, reset IC!"); return -EIO; } if (tc6->exst) { /* * Just clear any pending interrupts. * The RESETC is handled separately as it requires per * device configuration. */ oa_tc6_reg_read(tc6, OA_STATUS0, &sts); if (sts != 0) { oa_tc6_reg_write(tc6, OA_STATUS0, sts); LOG_WRN("EXST: OA_STATUS0: 0x%x", sts); } oa_tc6_reg_read(tc6, OA_STATUS1, &sts); if (sts != 0) { oa_tc6_reg_write(tc6, OA_STATUS1, sts); LOG_WRN("EXST: OA_STATUS1: 0x%x", sts); } } return 0; } static int oa_tc6_update_status(struct oa_tc6 *tc6, uint32_t ftr) { if (oa_tc6_get_parity(ftr)) { LOG_DBG("OA Status Update: Footer parity error!"); return -EIO; } tc6->exst = FIELD_GET(OA_DATA_FTR_EXST, ftr); tc6->sync = FIELD_GET(OA_DATA_FTR_SYNC, ftr); tc6->rca = FIELD_GET(OA_DATA_FTR_RCA, ftr); tc6->txc = FIELD_GET(OA_DATA_FTR_TXC, ftr); return 0; } int oa_tc6_chunk_spi_transfer(struct oa_tc6 *tc6, uint8_t *buf_rx, uint8_t *buf_tx, uint32_t hdr, uint32_t *ftr) { struct spi_buf tx_buf[2]; struct spi_buf rx_buf[2]; struct spi_buf_set tx; struct spi_buf_set rx; int ret; hdr = sys_cpu_to_be32(hdr); tx_buf[0].buf = &hdr; tx_buf[0].len = sizeof(hdr); tx_buf[1].buf = buf_tx; tx_buf[1].len = tc6->cps; tx.buffers = tx_buf; tx.count = ARRAY_SIZE(tx_buf); rx_buf[0].buf = buf_rx; rx_buf[0].len = tc6->cps; rx_buf[1].buf = ftr; rx_buf[1].len = sizeof(*ftr); rx.buffers = rx_buf; rx.count = ARRAY_SIZE(rx_buf); ret = spi_transceive_dt(tc6->spi, &tx, &rx); if (ret < 0) { return ret; } *ftr = sys_be32_to_cpu(*ftr); return oa_tc6_update_status(tc6, *ftr); } int oa_tc6_read_status(struct oa_tc6 *tc6, uint32_t *ftr) { uint32_t hdr; hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1) | FIELD_PREP(OA_DATA_HDR_DV, 0) | FIELD_PREP(OA_DATA_HDR_NORX, 1); hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr)); return oa_tc6_chunk_spi_transfer(tc6, NULL, NULL, hdr, ftr); } int oa_tc6_read_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt) { const uint16_t buf_rx_size = CONFIG_NET_BUF_DATA_SIZE; struct net_buf *buf_rx = NULL; uint32_t buf_rx_used = 0; uint32_t hdr, ftr; uint8_t sbo, ebo; int ret; /* * Special case - append already received data (extracted from previous * chunk) to new packet. * * This code is NOT used when OA_CONFIG0 RFA [13:12] is set to 01 * (ZAREFE) - so received ethernet frames will always start on the * beginning of new chunks. */ if (tc6->concat_buf) { net_pkt_append_buffer(pkt, tc6->concat_buf); tc6->concat_buf = NULL; } do { if (!buf_rx) { buf_rx = net_pkt_get_frag(pkt, buf_rx_size, OA_TC6_BUF_ALLOC_TIMEOUT); if (!buf_rx) { LOG_ERR("OA RX: Can't allocate RX buffer fordata!"); return -ENOMEM; } } hdr = FIELD_PREP(OA_DATA_HDR_DNC, 1); hdr |= FIELD_PREP(OA_DATA_HDR_P, oa_tc6_get_parity(hdr)); ret = oa_tc6_chunk_spi_transfer(tc6, buf_rx->data + buf_rx_used, NULL, hdr, &ftr); if (ret < 0) { LOG_ERR("OA RX: transmission error: %d!", ret); goto unref_buf; } ret = -EIO; if (oa_tc6_get_parity(ftr)) { LOG_ERR("OA RX: Footer parity error!"); goto unref_buf; } if (!FIELD_GET(OA_DATA_FTR_SYNC, ftr)) { LOG_ERR("OA RX: Configuration not SYNC'ed!"); goto unref_buf; } if (!FIELD_GET(OA_DATA_FTR_DV, ftr)) { LOG_DBG("OA RX: Data chunk not valid, skip!"); goto unref_buf; } sbo = FIELD_GET(OA_DATA_FTR_SWO, ftr) * sizeof(uint32_t); ebo = FIELD_GET(OA_DATA_FTR_EBO, ftr) + 1; if (FIELD_GET(OA_DATA_FTR_SV, ftr)) { /* * Adjust beginning of the buffer with SWO only when * we DO NOT have two frames concatenated together * in one chunk. */ if (!(FIELD_GET(OA_DATA_FTR_EV, ftr) && (ebo <= sbo))) { if (sbo) { net_buf_pull(buf_rx, sbo); } } } if (FIELD_GET(OA_DATA_FTR_EV, ftr)) { /* * Check if received frame shall be dropped - i.e. MAC has * detected error condition, which shall result in frame drop * by the SPI host. */ if (FIELD_GET(OA_DATA_FTR_FD, ftr)) { ret = -EIO; goto unref_buf; } /* * Concatenation of frames in a single chunk - one frame ends * and second one starts just afterwards (ebo == sbo). */ if (FIELD_GET(OA_DATA_FTR_SV, ftr) && (ebo <= sbo)) { tc6->concat_buf = net_buf_clone(buf_rx, OA_TC6_BUF_ALLOC_TIMEOUT); if (!tc6->concat_buf) { LOG_ERR("OA RX: Can't allocate RX buffer for data!"); ret = -ENOMEM; goto unref_buf; } net_buf_pull(tc6->concat_buf, sbo); } /* Set final size of the buffer */ buf_rx_used += ebo; buf_rx->len = buf_rx_used; net_pkt_append_buffer(pkt, buf_rx); /* * Exit when complete packet is read and added to * struct net_pkt */ break; } else { buf_rx_used += tc6->cps; if ((buf_rx_size - buf_rx_used) < tc6->cps) { net_pkt_append_buffer(pkt, buf_rx); buf_rx->len = buf_rx_used; buf_rx_used = 0; buf_rx = NULL; } } } while (tc6->rca > 0); return 0; unref_buf: net_buf_unref(buf_rx); return ret; } ```
/content/code_sandbox/drivers/ethernet/oa_tc6.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,651
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_NUMAKER_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_NUMAKER_PRIV_H_ #include <zephyr/types.h> #define NU_ETH_MAX_FLEN (1518) #define NU_HWADDR_SIZE (6) #define NU_ETH_MTU_SIZE 1500 #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_NUMAKER_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_numaker_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
93
```objective-c /* */ /** @file * @brief Atmel SAM MCU family Ethernet MAC (GMAC) driver. */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_SAM_GMAC_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_SAM_GMAC_PRIV_H_ #include <zephyr/types.h> #define ATMEL_OUI_B0 0x00 #define ATMEL_OUI_B1 0x04 #define ATMEL_OUI_B2 0x25 /* This option enables support to push multiple packets to the DMA engine. * This currently doesn't work given the current version of net_pkt or * net_buf does not allowed access from multiple threads. This option is * therefore currently disabled. */ #define GMAC_MULTIPLE_TX_PACKETS 0 #define GMAC_MTU NET_ETH_MTU #define GMAC_FRAME_SIZE_MAX (GMAC_MTU + 18) /** Cache alignment */ #define GMAC_DCACHE_ALIGNMENT 32 /** Memory alignment of the RX/TX Buffer Descriptor List */ #define GMAC_DESC_ALIGNMENT 4 /** Total number of queues supported by GMAC hardware module */ #define GMAC_QUEUE_NUM DT_INST_PROP(0, num_queues) #define GMAC_PRIORITY_QUEUE_NUM (GMAC_QUEUE_NUM - 1) #if (GMAC_PRIORITY_QUEUE_NUM >= 1) BUILD_ASSERT(ARRAY_SIZE(GMAC->GMAC_TBQBAPQ) + 1 == GMAC_QUEUE_NUM, "GMAC_QUEUE_NUM doesn't match soc header"); #endif /** Number of priority queues used */ #define GMAC_ACTIVE_QUEUE_NUM (CONFIG_ETH_SAM_GMAC_QUEUES) #define GMAC_ACTIVE_PRIORITY_QUEUE_NUM (GMAC_ACTIVE_QUEUE_NUM - 1) /** RX descriptors count for main queue */ #define MAIN_QUEUE_RX_DESC_COUNT (CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT + 1) /** TX descriptors count for main queue */ #define MAIN_QUEUE_TX_DESC_COUNT (CONFIG_NET_BUF_TX_COUNT + 1) /** RX/TX descriptors count for priority queues */ #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 #define PRIORITY_QUEUE1_RX_DESC_COUNT MAIN_QUEUE_RX_DESC_COUNT #define PRIORITY_QUEUE1_TX_DESC_COUNT MAIN_QUEUE_TX_DESC_COUNT #else #define PRIORITY_QUEUE1_RX_DESC_COUNT 1 #define PRIORITY_QUEUE1_TX_DESC_COUNT 1 #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2 #define PRIORITY_QUEUE2_RX_DESC_COUNT MAIN_QUEUE_RX_DESC_COUNT #define PRIORITY_QUEUE2_TX_DESC_COUNT MAIN_QUEUE_TX_DESC_COUNT #else #define PRIORITY_QUEUE2_RX_DESC_COUNT 1 #define PRIORITY_QUEUE2_TX_DESC_COUNT 1 #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3 #define PRIORITY_QUEUE3_RX_DESC_COUNT MAIN_QUEUE_RX_DESC_COUNT #define PRIORITY_QUEUE3_TX_DESC_COUNT MAIN_QUEUE_TX_DESC_COUNT #else #define PRIORITY_QUEUE3_RX_DESC_COUNT 1 #define PRIORITY_QUEUE3_TX_DESC_COUNT 1 #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4 #define PRIORITY_QUEUE4_RX_DESC_COUNT MAIN_QUEUE_RX_DESC_COUNT #define PRIORITY_QUEUE4_TX_DESC_COUNT MAIN_QUEUE_TX_DESC_COUNT #else #define PRIORITY_QUEUE4_RX_DESC_COUNT 1 #define PRIORITY_QUEUE4_TX_DESC_COUNT 1 #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5 #define PRIORITY_QUEUE5_RX_DESC_COUNT MAIN_QUEUE_RX_DESC_COUNT #define PRIORITY_QUEUE5_TX_DESC_COUNT MAIN_QUEUE_TX_DESC_COUNT #else #define PRIORITY_QUEUE5_RX_DESC_COUNT 1 #define PRIORITY_QUEUE5_TX_DESC_COUNT 1 #endif /* * Receive buffer descriptor bit field definitions */ /** Buffer ownership, needs to be 0 for the GMAC to write data to the buffer */ #define GMAC_RXW0_OWNERSHIP (0x1u << 0) /** Last descriptor in the receive buffer descriptor list */ #define GMAC_RXW0_WRAP (0x1u << 1) /** Address of beginning of buffer */ #define GMAC_RXW0_ADDR (0x3FFFFFFFu << 2) /** Receive frame length including FCS */ #define GMAC_RXW1_LEN (0x1FFFu << 0) /** FCS status */ #define GMAC_RXW1_FCS_STATUS (0x1u << 13) /** Start of frame */ #define GMAC_RXW1_SOF (0x1u << 14) /** End of frame */ #define GMAC_RXW1_EOF (0x1u << 15) /** Canonical Format Indicator */ #define GMAC_RXW1_CFI (0x1u << 16) /** VLAN priority (if VLAN detected) */ #define GMAC_RXW1_VLANPRIORITY (0x7u << 17) /** Priority tag detected */ #define GMAC_RXW1_PRIORITYDETECTED (0x1u << 20) /** VLAN tag detected */ #define GMAC_RXW1_VLANDETECTED (0x1u << 21) /** Type ID match */ #define GMAC_RXW1_TYPEIDMATCH (0x3u << 22) /** Type ID register match found */ #define GMAC_RXW1_TYPEIDFOUND (0x1u << 24) /** Specific Address Register match */ #define GMAC_RXW1_ADDRMATCH (0x3u << 25) /** Specific Address Register match found */ #define GMAC_RXW1_ADDRFOUND (0x1u << 27) /** Unicast hash match */ #define GMAC_RXW1_UNIHASHMATCH (0x1u << 29) /** Multicast hash match */ #define GMAC_RXW1_MULTIHASHMATCH (0x1u << 30) /** Global all ones broadcast address detected */ #define GMAC_RXW1_BROADCASTDETECTED (0x1u << 31) /* * Transmit buffer descriptor bit field definitions */ /** Transmit buffer length */ #define GMAC_TXW1_LEN (0x3FFFu << 0) /** Last buffer in the current frame */ #define GMAC_TXW1_LASTBUFFER (0x1u << 15) /** No CRC */ #define GMAC_TXW1_NOCRC (0x1u << 16) /** Transmit IP/TCP/UDP checksum generation offload errors */ #define GMAC_TXW1_CHKSUMERR (0x7u << 20) /** Late collision, transmit error detected */ #define GMAC_TXW1_LATECOLERR (0x1u << 26) /** Transmit frame corruption due to AHB error */ #define GMAC_TXW1_TRANSERR (0x1u << 27) /** Retry limit exceeded, transmit error detected */ #define GMAC_TXW1_RETRYEXC (0x1u << 29) /** Last descriptor in Transmit Descriptor list */ #define GMAC_TXW1_WRAP (0x1u << 30) /** Buffer used, must be 0 for the GMAC to read data to the transmit buffer */ #define GMAC_TXW1_USED (0x1u << 31) /* * Interrupt Status/Enable/Disable/Mask register bit field definitions */ #define GMAC_INT_RX_ERR_BITS \ (GMAC_IER_RXUBR | GMAC_IER_ROVR) #define GMAC_INT_TX_ERR_BITS \ (GMAC_IER_TUR | GMAC_IER_RLEX | GMAC_IER_TFC) #define GMAC_INT_EN_FLAGS \ (GMAC_IER_RCOMP | GMAC_INT_RX_ERR_BITS | \ GMAC_IER_TCOMP | GMAC_INT_TX_ERR_BITS | GMAC_IER_HRESP) #define GMAC_INTPQ_RX_ERR_BITS \ (GMAC_IERPQ_RXUBR | GMAC_IERPQ_ROVR) #define GMAC_INTPQ_TX_ERR_BITS \ (GMAC_IERPQ_RLEX | GMAC_IERPQ_TFC) #define GMAC_INTPQ_EN_FLAGS \ (GMAC_IERPQ_RCOMP | GMAC_INTPQ_RX_ERR_BITS | \ GMAC_IERPQ_TCOMP | GMAC_INTPQ_TX_ERR_BITS | GMAC_IERPQ_HRESP) /** GMAC Priority Queues DMA flags */ #if GMAC_PRIORITY_QUEUE_NUM >= 1 /* 4 kB Receiver Packet Buffer Memory Size */ /* 4 kB Transmitter Packet Buffer Memory Size */ /* Transmitter Checksum Generation Offload Enable */ #define GMAC_DMA_QUEUE_FLAGS \ (GMAC_DCFGR_RXBMS_FULL | GMAC_DCFGR_TXPBMS | \ GMAC_DCFGR_TXCOEN) #else #define GMAC_DMA_QUEUE_FLAGS (0) #endif /** List of GMAC queues */ enum queue_idx { GMAC_QUE_0, /** Main queue */ GMAC_QUE_1, /** Priority queue 1 */ GMAC_QUE_2, /** Priority queue 2 */ GMAC_QUE_3, /** Priority queue 3 */ GMAC_QUE_4, /** Priority queue 4 */ GMAC_QUE_5, /** Priority queue 5 */ }; #if (DT_INST_PROP(0, max_frame_size) == 1518) /* Maximum frame length is 1518 bytes */ #define GMAC_MAX_FRAME_SIZE 0 #elif (DT_INST_PROP(0, max_frame_size) == 1536) /* Enable Max Frame Size of 1536 */ #define GMAC_MAX_FRAME_SIZE GMAC_NCFGR_MAXFS #elif (DT_INST_PROP(0, max_frame_size) == 10240) /* Jumbo Frame Enable */ #define GMAC_MAX_FRAME_SIZE GMAC_NCFGR_JFRAME #else #error "GMAC_MAX_FRAME_SIZE is invalid, fix it at device tree." #endif /** Minimal ring buffer implementation */ struct ring_buf { uint32_t *buf; uint16_t len; uint16_t head; uint16_t tail; }; /** Receive/transmit buffer descriptor */ struct gmac_desc { uint32_t w0; uint32_t w1; }; /** Ring list of receive/transmit buffer descriptors */ struct gmac_desc_list { struct gmac_desc *buf; uint16_t len; uint16_t head; uint16_t tail; }; /** GMAC Queue data */ struct gmac_queue { struct gmac_desc_list rx_desc_list; struct gmac_desc_list tx_desc_list; #if GMAC_MULTIPLE_TX_PACKETS == 1 struct k_sem tx_desc_sem; #else struct k_sem tx_sem; #endif struct net_buf **rx_frag_list; #if GMAC_MULTIPLE_TX_PACKETS == 1 struct ring_buf tx_frag_list; #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) struct ring_buf tx_frames; #endif #endif /** Number of RX frames dropped by the driver */ volatile uint32_t err_rx_frames_dropped; /** Number of times receive queue was flushed */ volatile uint32_t err_rx_flushed_count; /** Number of times transmit queue was flushed */ volatile uint32_t err_tx_flushed_count; enum queue_idx que_idx; }; /* Device constant configuration parameters */ struct eth_sam_dev_cfg { Gmac *regs; #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM const struct atmel_sam_pmc_config clock_cfg; #endif const struct pinctrl_dev_config *pcfg; void (*config_func)(void); const struct device *phy_dev; }; /* Device run time data */ struct eth_sam_dev_data { struct net_if *iface; #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) const struct device *ptp_clock; #endif uint8_t mac_addr[6]; bool link_up; struct gmac_queue queue_list[GMAC_QUEUE_NUM]; }; #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_SAM_GMAC_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_sam_gmac_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,497
```unknown # IVSHMEM Ethernet driver configuration options menuconfig ETH_IVSHMEM bool "Inter-VM shared memory Ethernet driver" select PCIE select VIRTUALIZATION select IVSHMEM_V2 select IVSHMEM_DOORBELL select OPENAMP help Enable Inter-VM Shared Memory Ethernet driver. Used for Ethernet communication between "cells" in the Jailhouse hypervisor. if ETH_IVSHMEM config ETH_IVSHMEM_THREAD_STACK_SIZE int "IVSHMEM Ethernet thread stack size" default 4096 config ETH_IVSHMEM_THREAD_PRIORITY int "IVSHMEM Ethernet thread priority" default 2 endif # ETH_IVSHMEM ```
/content/code_sandbox/drivers/ethernet/Kconfig.ivshmem
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
141
```c /* * */ /* SiLabs Giant Gecko GG11 Ethernet PHY driver. */ #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/net/mii.h> #include "phy_gecko.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(eth_gecko_phy, CONFIG_ETHERNET_LOG_LEVEL); /* Maximum time to establish a link through auto-negotiation for * 10BASE-T, 100BASE-TX is 3.7s, to add an extra margin the timeout * is set at 4s. */ #define PHY_AUTONEG_TIMEOUT_MS 4000 /* Enable MDIO serial bus between MAC and PHY. */ static void mdio_bus_enable(ETH_TypeDef *eth) { eth->NETWORKCTRL |= ETH_NETWORKCTRL_MANPORTEN; } /* Enable MDIO serial bus between MAC and PHY. */ static void mdio_bus_disable(ETH_TypeDef *eth) { eth->NETWORKCTRL &= ~ETH_NETWORKCTRL_MANPORTEN; } /* Wait PHY operation complete. */ static int mdio_bus_wait(ETH_TypeDef *eth) { uint32_t retries = 100U; /* will wait up to 1 s */ while (!(eth->NETWORKSTATUS & ETH_NETWORKSTATUS_MANDONE)) { if (retries-- == 0U) { LOG_ERR("timeout"); return -ETIMEDOUT; } k_sleep(K_MSEC(10)); } return 0; } /* Send command to PHY over MDIO serial bus */ static int mdio_bus_send(ETH_TypeDef *eth, uint8_t phy_addr, uint8_t reg_addr, uint8_t rw, uint16_t data) { int retval; /* Write PHY management register */ eth->PHYMNGMNT = ETH_PHYMNGMNT_WRITE0_DEFAULT | ETH_PHYMNGMNT_WRITE1 | ((rw ? 0x02 : 0x01) << _ETH_PHYMNGMNT_OPERATION_SHIFT) | ((phy_addr << _ETH_PHYMNGMNT_PHYADDR_SHIFT) & _ETH_PHYMNGMNT_PHYADDR_MASK) | ((reg_addr << _ETH_PHYMNGMNT_REGADDR_SHIFT) & _ETH_PHYMNGMNT_REGADDR_MASK) | (0x2 << _ETH_PHYMNGMNT_WRITE10_SHIFT) | (data & _ETH_PHYMNGMNT_PHYRWDATA_MASK); /* Wait until PHY is ready */ retval = mdio_bus_wait(eth); if (retval < 0) { return retval; } return 0; } /* Read PHY register. */ static int phy_read(const struct phy_gecko_dev *phy, uint8_t reg_addr, uint32_t *value) { ETH_TypeDef *const eth = phy->regs; uint8_t phy_addr = phy->address; int retval; retval = mdio_bus_send(eth, phy_addr, reg_addr, 1, 0); if (retval < 0) { return retval; } /* Read data */ *value = eth->PHYMNGMNT & _ETH_PHYMNGMNT_PHYRWDATA_MASK; return 0; } /* Write PHY register. */ static int phy_write(const struct phy_gecko_dev *phy, uint8_t reg_addr, uint32_t value) { ETH_TypeDef *const eth = phy->regs; uint8_t phy_addr = phy->address; return mdio_bus_send(eth, phy_addr, reg_addr, 0, value); } /* Issue a PHY soft reset. */ static int phy_soft_reset(const struct phy_gecko_dev *phy) { uint32_t phy_reg; uint32_t retries = 12U; int retval; /* Issue a soft reset */ retval = phy_write(phy, MII_BMCR, MII_BMCR_RESET); if (retval < 0) { return retval; } /* Wait up to 0.6s for the reset sequence to finish. According to * IEEE 802.3, Section 2, Subsection 22.2.4.1.1 a PHY reset may take * up to 0.5 s. */ do { if (retries-- == 0U) { return -ETIMEDOUT; } k_sleep(K_MSEC(50)); retval = phy_read(phy, MII_BMCR, &phy_reg); if (retval < 0) { return retval; } } while (phy_reg & MII_BMCR_RESET); return 0; } int phy_gecko_init(const struct phy_gecko_dev *phy) { ETH_TypeDef *const eth = phy->regs; int phy_id; mdio_bus_enable(eth); LOG_INF("Soft Reset of ETH PHY"); phy_soft_reset(phy); /* Verify that the PHY device is responding */ phy_id = phy_gecko_id_get(phy); if (phy_id == 0xFFFFFFFF) { LOG_ERR("Unable to detect a valid PHY"); return -1; } LOG_INF("PHYID: 0x%X at addr: %d", phy_id, phy->address); mdio_bus_disable(eth); return 0; } uint32_t phy_gecko_id_get(const struct phy_gecko_dev *phy) { ETH_TypeDef *const eth = phy->regs; uint32_t phy_reg; uint32_t phy_id; mdio_bus_enable(eth); if (phy_read(phy, MII_PHYID1R, &phy_reg) < 0) { return 0xFFFFFFFF; } phy_id = (phy_reg & 0xFFFF) << 16; if (phy_read(phy, MII_PHYID2R, &phy_reg) < 0) { return 0xFFFFFFFF; } phy_id |= (phy_reg & 0xFFFF); mdio_bus_disable(eth); return phy_id; } int phy_gecko_auto_negotiate(const struct phy_gecko_dev *phy, uint32_t *status) { ETH_TypeDef *const eth = phy->regs; uint32_t val; uint32_t ability_adv; uint32_t ability_rcvd; uint32_t retries = PHY_AUTONEG_TIMEOUT_MS / 100; int retval; mdio_bus_enable(eth); LOG_DBG("Starting ETH PHY auto-negotiate sequence"); /* Read PHY default advertising parameters */ retval = phy_read(phy, MII_ANAR, &ability_adv); if (retval < 0) { goto auto_negotiate_exit; } /* Configure and start auto-negotiation process */ retval = phy_read(phy, MII_BMCR, &val); if (retval < 0) { goto auto_negotiate_exit; } val |= MII_BMCR_AUTONEG_ENABLE | MII_BMCR_AUTONEG_RESTART; val &= ~MII_BMCR_ISOLATE; /* Don't isolate the PHY */ retval = phy_write(phy, MII_BMCR, val); if (retval < 0) { goto auto_negotiate_exit; } /* Wait for the auto-negotiation process to complete */ do { if (retries-- == 0U) { retval = -ETIMEDOUT; goto auto_negotiate_exit; } k_sleep(K_MSEC(100)); retval = phy_read(phy, MII_BMSR, &val); if (retval < 0) { goto auto_negotiate_exit; } } while (!(val & MII_BMSR_AUTONEG_COMPLETE)); LOG_DBG("PHY auto-negotiate sequence completed"); /* Read abilities of the remote device */ retval = phy_read(phy, MII_ANLPAR, &ability_rcvd); if (retval < 0) { goto auto_negotiate_exit; } /* Determine the best possible mode of operation */ if ((ability_adv & ability_rcvd) & MII_ADVERTISE_100_FULL) { *status = ETH_NETWORKCFG_FULLDUPLEX | ETH_NETWORKCFG_SPEED; } else if ((ability_adv & ability_rcvd) & MII_ADVERTISE_100_HALF) { *status = ETH_NETWORKCFG_SPEED; } else if ((ability_adv & ability_rcvd) & MII_ADVERTISE_10_FULL) { *status = ETH_NETWORKCFG_FULLDUPLEX; } else { *status = 0; } LOG_DBG("common abilities: speed %s Mb, %s duplex", *status & ETH_NETWORKCFG_SPEED ? "100" : "10", *status & ETH_NETWORKCFG_FULLDUPLEX ? "full" : "half"); auto_negotiate_exit: mdio_bus_disable(eth); return retval; } bool phy_gecko_is_linked(const struct phy_gecko_dev *phy) { ETH_TypeDef *const eth = phy->regs; uint32_t phy_reg; bool phy_linked = false; mdio_bus_enable(eth); if (phy_read(phy, MII_BMSR, &phy_reg) < 0) { return phy_linked; } phy_linked = (phy_reg & MII_BMSR_LINK_STATUS); mdio_bus_disable(eth); return phy_linked; } ```
/content/code_sandbox/drivers/ethernet/phy_gecko.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,002
```c /* * Driver for Synopsys DesignWare MAC * * */ #define LOG_MODULE_NAME dwmac_plat #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #define DT_DRV_COMPAT snps_designware_ethernet #include <sys/types.h> #include <zephyr/kernel/mm.h> #include <zephyr/kernel.h> #include <zephyr/cache.h> #include <zephyr/net/ethernet.h> #include <zephyr/irq.h> #include "eth_dwmac_priv.h" int dwmac_bus_init(struct dwmac_priv *p) { p->base_addr = DT_INST_REG_ADDR(0); return 0; } #if (CONFIG_DCACHE_LINE_SIZE+0 == 0) #error "CONFIG_DCACHE_LINE_SIZE must be configured to a non-zero value" #endif static struct dwmac_dma_desc __aligned(CONFIG_DCACHE_LINE_SIZE) dwmac_tx_rx_descriptors[NB_TX_DESCS + NB_RX_DESCS]; static const uint8_t dwmac_mac_addr[6] = DT_INST_PROP(0, local_mac_address); void dwmac_platform_init(struct dwmac_priv *p) { uint8_t *desc_uncached_addr; uintptr_t desc_phys_addr; /* make sure no valid cache lines map to the descriptor area */ sys_cache_data_invd_range(dwmac_tx_rx_descriptors, sizeof(dwmac_tx_rx_descriptors)); desc_phys_addr = k_mem_phys_addr(dwmac_tx_rx_descriptors); /* remap descriptor rings uncached */ k_mem_map_phys_bare(&desc_uncached_addr, desc_phys_addr, sizeof(dwmac_tx_rx_descriptors), K_MEM_PERM_RW | K_MEM_CACHE_NONE); LOG_DBG("desc virt %p uncached %p phys 0x%lx", dwmac_tx_rx_descriptors, desc_uncached_addr, desc_phys_addr); p->tx_descs = (void *)desc_uncached_addr; desc_uncached_addr += NB_TX_DESCS * sizeof(struct dwmac_dma_desc); p->rx_descs = (void *)desc_uncached_addr; p->tx_descs_phys = desc_phys_addr; desc_phys_addr += NB_TX_DESCS * sizeof(struct dwmac_dma_desc); p->rx_descs_phys = desc_phys_addr; /* basic configuration for this platform */ REG_WRITE(MAC_CONF, MAC_CONF_PS | MAC_CONF_FES | MAC_CONF_DM); REG_WRITE(DMA_SYSBUS_MODE, DMA_SYSBUS_MODE_AAL | #ifdef CONFIG_64BIT DMA_SYSBUS_MODE_EAME | #endif DMA_SYSBUS_MODE_FB); /* set up IRQs (still masked for now) */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), dwmac_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); /* retrieve MAC address */ memcpy(p->mac_addr, dwmac_mac_addr, sizeof(p->mac_addr)); } /* Our private device instance */ static struct dwmac_priv dwmac_instance; ETH_NET_DEVICE_DT_INST_DEFINE(0, dwmac_probe, NULL, &dwmac_instance, NULL, CONFIG_ETH_INIT_PRIORITY, &dwmac_api, NET_ETH_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_dwmac_mmu.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
692
```unknown # ETH_XMC4XXX Ethernet driver configuration options menuconfig ETH_XMC4XXX bool "XMC4XXX Ethernet driver" default y depends on DT_HAS_INFINEON_XMC4XXX_ETHERNET_ENABLED help Enable XMC4XXX Ethernet driver. if ETH_XMC4XXX config ETH_XMC4XXX_TX_FRAME_POOL_SIZE int "Number of TX frames in the pool size" default 4 help Number of TX frames which can be buffered in the driver. config ETH_XMC4XXX_NUM_TX_DMA_DESCRIPTORS int "Number of TX DMA descriptors" default 32 help Number of TX DMA descriptors. Each descriptor stores the memory address of a data fragment and its size. config ETH_XMC4XXX_NUM_RX_DMA_DESCRIPTORS int "Number of RX DMA descriptors" default 12 help Number of RX DMA descriptors. Each descriptor stores the memory address of a data fragment and its size. The data fragments are pre-allocated from the rx network buffers (CONFIG_NET_BUF_RX_COUNT). When a frame is received, it is forwarded to the network stack without copying the data. The buffers in the descriptors are replaced by new pre-allocated buffers. config ETH_XMC4XXX_VLAN_HW_FILTER bool "Hardware filter VLAN frames" default y if NET_VLAN_COUNT=1 depends on NET_VLAN help Hardware filter VLAN frames in hardware. Only ethernet frames with a tag configured using vlan_setup() call will be received. The filtering can only be done on one vlan tag. If vlan_setup() is called multiple times, the filtering will be done on the latest tag. config PTP_CLOCK_XMC4XXX bool "XMC4XXX PTP clock driver support" default y depends on PTP_CLOCK help Enable XMC4XXX PTP Clock support. endif # ETH_XMC4XXX ```
/content/code_sandbox/drivers/ethernet/Kconfig.xmc4xxx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
412
```unknown # Native posix ethernet driver configuration options menuconfig ETH_NATIVE_POSIX bool "Native Posix Ethernet driver" depends on ARCH_POSIX help Enable native posix ethernet driver. Note, this driver is run inside a process in your host system. if ETH_NATIVE_POSIX config ETH_NATIVE_POSIX_INTERFACE_COUNT int "Number of network interfaces created" default NET_GPTP_NUM_PORTS if NET_GPTP default PTP_NUM_PORTS if PTP default 1 range 1 32 help By default only one network interface is created. It is possible to create multiple interfaces in certain use cases. For example if multiple ports are defined in gPTP or PTP, then multiple network interfaces must be created here. config ETH_NATIVE_POSIX_DRV_NAME string "Ethernet driver name" default "zeth" help This option sets the driver name and name of the network interface in your host system. If there are multiple network interfaces defined, then this value is used as a prefix and the interface names will be zeth0, zeth1, etc. config ETH_NATIVE_POSIX_DEV_NAME string "Host ethernet TUN/TAP device name" default "/dev/net/tun" help This option sets the TUN/TAP device name in your host system. config ETH_NATIVE_POSIX_PTP_CLOCK bool "PTP clock driver support" default y if NET_GPTP || PTP select PTP_CLOCK depends on NET_GPTP || PTP help Enable PTP clock support. config ETH_NATIVE_POSIX_RANDOM_MAC bool "Random MAC address" depends on ENTROPY_GENERATOR default y help Generate a random MAC address dynamically. config ETH_NATIVE_POSIX_VLAN_TAG_STRIP bool "Strip VLAN tag from Rx frames" depends on NET_VLAN help Native posix ethernet driver will strip of VLAN tag from Rx Ethernet frames and sets tag information in net packet metadata. config ETH_NATIVE_POSIX_MAC_ADDR string "MAC address for the interface" default "" depends on !ETH_NATIVE_POSIX_RANDOM_MAC help Specify a MAC address for the ethernet interface in the form of six hex 8-bit chars separated by colons (e.g.: aa:33:cc:22:e2:c0). The default is an empty string, which means the code will make 00:00:5E:00:53:XX, where XX will be random. config ETH_NATIVE_POSIX_RX_TIMEOUT int "Ethernet RX timeout" default 1 if NET_GPTP default 50 range 1 100 help Native posix ethernet driver repeatedly checks for new data. Specify how long the thread sleeps between these checks if no new data available. endif # ETH_NATIVE_POSIX ```
/content/code_sandbox/drivers/ethernet/Kconfig.native_posix
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
609
```objective-c /* ENC424J600 Stand-alone Ethernet Controller with SPI * * */ #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #ifndef _ENC424J600_ #define _ENC424J600_ /* Bank 0 Registers */ #define ENC424J600_SFR0_ETXSTL 0x00 #define ENC424J600_SFR0_ETXSTH 0x01 #define ENC424J600_SFR0_ETXLENL 0x02 #define ENC424J600_SFR0_ETXLENH 0x03 #define ENC424J600_SFR0_ERXSTL 0x04 #define ENC424J600_SFR0_ERXSTH 0x05 #define ENC424J600_SFR0_ERXTAILL 0x06 #define ENC424J600_SFR0_ERXTAILH 0x07 #define ENC424J600_SFR0_ERXHEADL 0x08 #define ENC424J600_SFR0_ERXHEADH 0x09 #define ENC424J600_SFR0_EDMASTL 0x0A #define ENC424J600_SFR0_EDMASTH 0x0B #define ENC424J600_SFR0_EDMALENL 0x0C #define ENC424J600_SFR0_EDMALENH 0x0D #define ENC424J600_SFR0_EDMADSTL 0x0E #define ENC424J600_SFR0_EDMADSTH 0x0F #define ENC424J600_SFR0_EDMACSL 0x10 #define ENC424J600_SFR0_EDMACSH 0x11 #define ENC424J600_SFR0_ETXSTATL 0x12 #define ENC424J600_SFR0_ETXSTATH 0x13 #define ENC424J600_SFR0_ETXWIREL 0x14 #define ENC424J600_SFR0_ETXWIREH 0x15 /* Common Registers */ #define ENC424J600_SFRX_EUDASTL 0x16 #define ENC424J600_SFRX_EUDASTH 0x17 #define ENC424J600_SFRX_EUDANDL 0x18 #define ENC424J600_SFRX_EUDANDH 0x19 #define ENC424J600_SFRX_ESTATL 0x1A #define ENC424J600_SFRX_ESTATH 0x1B #define ENC424J600_SFRX_EIRL 0x1C #define ENC424J600_SFRX_EIRH 0x1D #define ENC424J600_SFRX_ECON1L 0x1E #define ENC424J600_SFRX_ECON1H 0x1F /* Bank 1 Registers */ #define ENC424J600_SFR1_EHT1L 0x20 #define ENC424J600_SFR1_EHT1H 0x21 #define ENC424J600_SFR1_EHT2L 0x22 #define ENC424J600_SFR1_EHT2H 0x23 #define ENC424J600_SFR1_EHT3L 0x24 #define ENC424J600_SFR1_EHT3H 0x25 #define ENC424J600_SFR1_EHT4L 0x26 #define ENC424J600_SFR1_EHT4H 0x27 #define ENC424J600_SFR1_EPMM1L 0x28 #define ENC424J600_SFR1_EPMM1H 0x29 #define ENC424J600_SFR1_EPMM2L 0x2A #define ENC424J600_SFR1_EPMM2H 0x2B #define ENC424J600_SFR1_EPMM3L 0x2C #define ENC424J600_SFR1_EPMM3H 0x2D #define ENC424J600_SFR1_EPMM4L 0x2E #define ENC424J600_SFR1_EPMM4H 0x2F #define ENC424J600_SFR1_EPMCSL 0x30 #define ENC424J600_SFR1_EPMCSH 0x31 #define ENC424J600_SFR1_EPMOL 0x32 #define ENC424J600_SFR1_EPMOH 0x33 #define ENC424J600_SFR1_ERXFCONL 0x34 #define ENC424J600_SFR1_ERXFCONH 0x35 /* Bank 2 Registers */ #define ENC424J600_SFR2_MACON1L 0x40 #define ENC424J600_SFR2_MACON1H 0x41 #define ENC424J600_SFR2_MACON2L 0x42 #define ENC424J600_SFR2_MACON2H 0x43 #define ENC424J600_SFR2_MABBIPGL 0x44 #define ENC424J600_SFR2_MABBIPGH 0x45 #define ENC424J600_SFR2_MAIPGL 0x46 #define ENC424J600_SFR2_MAIPGH 0x47 #define ENC424J600_SFR2_MACLCONL 0x48 #define ENC424J600_SFR2_MACLCONH 0x49 #define ENC424J600_SFR2_MAMXFLL 0x4A #define ENC424J600_SFR2_MAMXFLH 0x4B #define ENC424J600_SFR2_MICMDL 0x52 #define ENC424J600_SFR2_MICMDH 0x53 #define ENC424J600_SFR2_MIREGADRL 0x54 #define ENC424J600_SFR2_MIREGADRH 0x55 /* Bank 3 Registers */ #define ENC424J600_SFR3_MAADR3L 0x60 #define ENC424J600_SFR3_MAADR3H 0x61 #define ENC424J600_SFR3_MAADR2L 0x62 #define ENC424J600_SFR3_MAADR2H 0x63 #define ENC424J600_SFR3_MAADR1L 0x64 #define ENC424J600_SFR3_MAADR1H 0x65 #define ENC424J600_SFR3_MIWRL 0x66 #define ENC424J600_SFR3_MIWRH 0x67 #define ENC424J600_SFR3_MIRDL 0x68 #define ENC424J600_SFR3_MIRDH 0x69 #define ENC424J600_SFR3_MISTATL 0x6A #define ENC424J600_SFR3_MISTATH 0x6B #define ENC424J600_SFR3_EPAUSL 0x6C #define ENC424J600_SFR3_EPAUSH 0x6D #define ENC424J600_SFR3_ECON2L 0x6E #define ENC424J600_SFR3_ECON2H 0x6F #define ENC424J600_SFR3_ERXWML 0x70 #define ENC424J600_SFR3_ERXWMH 0x71 #define ENC424J600_SFR3_EIEL 0x72 #define ENC424J600_SFR3_EIEH 0x73 #define ENC424J600_SFR3_EIDLEDL 0x74 #define ENC424J600_SFR3_EIDLEDH 0x75 /* Unbanked SFRs */ #define ENC424J600_SFR4_EGPDATA 0x80 #define ENC424J600_SFR4_ERXDATA 0x82 #define ENC424J600_SFR4_EUDADATA 0x84 #define ENC424J600_SFR4_EGPRDPTL 0x86 #define ENC424J600_SFR4_EGPRDPTH 0x87 #define ENC424J600_SFR4_EGPWRPTL 0x88 #define ENC424J600_SFR4_EGPWRPTH 0x89 #define ENC424J600_SFR4_ERXRDPTL 0x8A #define ENC424J600_SFR4_ERXRDPTH 0x8B #define ENC424J600_SFR4_ERXWRPTL 0x8C #define ENC424J600_SFR4_ERXWRPTH 0x8D #define ENC424J600_SFR4_EUDARDPTL 0x8E #define ENC424J600_SFR4_EUDARDPTH 0x8F #define ENC424J600_SFR4_EUDAWRPTL 0x90 #define ENC424J600_SFR4_EUDAWRPTH 0x91 /* PHY Registers */ #define ENC424J600_PSFR_PHCON1 (BIT(8) | 0x00) #define ENC424J600_PSFR_PHSTAT1 (BIT(8) | 0x01) #define ENC424J600_PSFR_PHANA (BIT(8) | 0x04) #define ENC424J600_PSFR_PHANLPA (BIT(8) | 0x05) #define ENC424J600_PSFR_PHANE (BIT(8) | 0x06) #define ENC424J600_PSFR_PHCON2 (BIT(8) | 0x11) #define ENC424J600_PSFR_PHSTAT2 (BIT(8) | 0x1B) #define ENC424J600_PSFR_PHSTAT3 (BIT(8) | 0x1F) /* SPI Instructions */ #define ENC424J600_1BC_B0SEL 0xC0 #define ENC424J600_1BC_B1SEL 0xC2 #define ENC424J600_1BC_B2SEL 0xC4 #define ENC424J600_1BC_B3SEL 0xC6 #define ENC424J600_1BC_SETETHRST 0xCA #define ENC424J600_1BC_FCDISABLE 0xE0 #define ENC424J600_1BC_FCSINGLE 0xE2 #define ENC424J600_1BC_FCMULTIPLE 0xE4 #define ENC424J600_1BC_FCCLEAR 0xE6 #define ENC424J600_1BC_SETPKTDEC 0xCC #define ENC424J600_1BC_DMASTOP 0xD2 #define ENC424J600_1BC_DMACKSUM 0xD8 #define ENC424J600_1BC_DMACKSUMS 0xDA #define ENC424J600_1BC_DMACOPY 0xDC #define ENC424J600_1BC_DMACOPYS 0xDE #define ENC424J600_1BC_SETTXRTS 0xD4 #define ENC424J600_1BC_ENABLERX 0xE8 #define ENC424J600_1BC_DISABLERX 0xEA #define ENC424J600_1BC_SETEIE 0xEC #define ENC424J600_1BC_CLREIE 0xEE #define ENC424J600_2BC_RBSEL 0xC8 #define ENC424J600_3BC_WGPRDPT 0x60 #define ENC424J600_3BC_RGPRDPT 0x62 #define ENC424J600_3BC_WRXRDPT 0x64 #define ENC424J600_3BC_RRXRDPT 0x66 #define ENC424J600_3BC_WUDARDPT 0x68 #define ENC424J600_3BC_RUDARDPT 0x6A #define ENC424J600_3BC_WGPWRPT 0x6C #define ENC424J600_3BC_RGPWRPT 0x6E #define ENC424J600_3BC_WRXWRPT 0x70 #define ENC424J600_3BC_RRXWRPT 0x72 #define ENC424J600_3BC_WUDAWRPT 0x74 #define ENC424J600_3BC_RUDAWRPT 0x76 #define ENC424J600_NBC_RCR 0x00 #define ENC424J600_NBC_WCR 0x40 #define ENC424J600_NBC_RCRU 0x20 #define ENC424J600_NBC_WCRU 0x22 #define ENC424J600_NBC_BFS 0x80 #define ENC424J600_NBC_BFC 0xA0 #define ENC424J600_NBC_BFSU 0x24 #define ENC424J600_NBC_BFCU 0x26 #define ENC424J600_NBC_RGPDATA 0x28 #define ENC424J600_NBC_WGPDATA 0x2A #define ENC424J600_NBC_RRXDATA 0x2C #define ENC424J600_NBC_WRXDATA 0x2E #define ENC424J600_NBC_RUDADATA 0x30 #define ENC424J600_NBC_WUDADATA 0x32 /* Significant bits */ #define ENC424J600_MICMD_MIIRD BIT(0) #define ENC424J600_MISTAT_BUSY BIT(0) #define ENC424J600_ESTAT_RXBUSY BIT(13) #define ENC424J600_ESTAT_CLKRDY BIT(12) #define ENC424J600_ESTAT_PHYLNK BIT(8) #define ENC424J600_MACON2_FULDPX BIT(0) #define ENC424J600_ERXFCON_CRCEN BIT(6) #define ENC424J600_ERXFCON_RUNTEEN BIT(5) #define ENC424J600_ERXFCON_RUNTEN BIT(4) #define ENC424J600_ERXFCON_UCEN BIT(3) #define ENC424J600_ERXFCON_NOTMEEN BIT(2) #define ENC424J600_ERXFCON_MCEN BIT(1) #define ENC424J600_ERXFCON_BCEN BIT(0) #define ENC424J600_PHANA_ADNP BIT(15) #define ENC424J600_PHANA_ADFAULT BIT(13) #define ENC424J600_PHANA_ADPAUS_SYMMETRIC_ONLY BIT(10) #define ENC424J600_PHANA_AD100FD BIT(8) #define ENC424J600_PHANA_AD100 BIT(7) #define ENC424J600_PHANA_AD10FD BIT(6) #define ENC424J600_PHANA_AD10 BIT(5) #define ENC424J600_PHANA_ADIEEE_DEFAULT BIT(0) #define ENC424J600_EIE_INTIE BIT(15) #define ENC424J600_EIE_MODEXIE BIT(14) #define ENC424J600_EIE_HASHIE BIT(13) #define ENC424J600_EIE_AESIE BIT(12) #define ENC424J600_EIE_LINKIE BIT(11) #define ENC424J600_EIE_PKTIE BIT(6) #define ENC424J600_EIE_DMAIE BIT(5) #define ENC424J600_EIE_TXIE BIT(3) #define ENC424J600_EIE_TXABTIE BIT(2) #define ENC424J600_EIE_RXABTIE BIT(1) #define ENC424J600_EIE_PCFULIE BIT(0) #define ENC424J600_ECON1_PKTDEC BIT(8) #define ENC424J600_ECON1_TXRTS BIT(1) #define ENC424J600_ECON1_RXEN BIT(0) #define ENC424J600_ECON2_ETHEN BIT(15) #define ENC424J600_ECON2_STRCH BIT(14) #define ENC424J600_EIR_LINKIF BIT(11) #define ENC424J600_EIR_PKTIF BIT(6) #define ENC424J600_EIR_TXIF BIT(3) #define ENC424J600_EIR_TXABTIF BIT(2) #define ENC424J600_EIR_RXABTIF BIT(1) #define ENC424J600_EIR_PCFULIF BIT(0) #define ENC424J600_PHCON1_PSLEEP BIT(11) #define ENC424J600_PHCON1_RENEG BIT(9) #define ENC424J600_PHSTAT3_SPDDPX_FD BIT(4) #define ENC424J600_PHSTAT3_SPDDPX_100 BIT(3) #define ENC424J600_PHSTAT3_SPDDPX_10 BIT(2) /* Buffer Configuration */ #define ENC424J600_TXSTART 0x0000U #define ENC424J600_TXEND 0x2FFFU #define ENC424J600_RXSTART (ENC424J600_TXEND + 1) #define ENC424J600_RXEND 0x5FFFU #define ENC424J600_EUDAST_DEFAULT 0x6000U #define ENC424J600_EUDAND_DEFAULT (ENC424J600_EUDAST + 1) /* Status vectors array size */ #define ENC424J600_RSV_SIZE 6U #define ENC424J600_PTR_NXP_PKT_SIZE 2U /* Full-Duplex mode Inter-Packet Gap default value */ #define ENC424J600_MABBIPG_DEFAULT 0x15U #define ENC424J600_DEFAULT_NUMOF_RETRIES 3U /* Delay for PHY write/read operations (25.6 us) */ #define ENC424J600_PHY_ACCESS_DELAY 26U #define ENC424J600_PHY_READY_DELAY 260U struct enc424j600_config { struct spi_dt_spec spi; struct gpio_dt_spec interrupt; uint8_t full_duplex; int32_t timeout; }; struct enc424j600_runtime { struct net_if *iface; const struct device *dev; K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_ETH_ENC424J600_RX_THREAD_STACK_SIZE); struct k_thread thread; uint8_t mac_address[6]; struct gpio_callback gpio_cb; struct k_sem tx_rx_sem; struct k_sem int_sem; uint16_t next_pkt_ptr; bool suspended : 1; bool iface_initialized : 1; }; #endif /*_ENC424J600_*/ ```
/content/code_sandbox/drivers/ethernet/eth_enc424j600_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,134
```c /* * */ #define DT_DRV_COMPAT microchip_lan865x #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(eth_lan865x, CONFIG_ETHERNET_LOG_LEVEL); #include <zephyr/net/ethernet.h> #include <zephyr/net/phy.h> #include <string.h> #include <errno.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/phy.h> #include "eth_lan865x_priv.h" static int lan865x_mac_rxtx_control(const struct device *dev, bool en) { struct lan865x_data *ctx = dev->data; uint32_t ctl = 0; if (en) { ctl = LAN865x_MAC_NCR_TXEN | LAN865x_MAC_NCR_RXEN; } return oa_tc6_reg_write(ctx->tc6, LAN865x_MAC_NCR, ctl); } static void lan865x_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct lan865x_data *ctx = dev->data; net_if_set_link_addr(iface, ctx->mac_address, sizeof(ctx->mac_address), NET_LINK_ETHERNET); if (ctx->iface == NULL) { ctx->iface = iface; } ethernet_init(iface); net_eth_carrier_on(iface); ctx->iface_initialized = true; } static enum ethernet_hw_caps lan865x_port_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_PROMISC_MODE; } static int lan865x_gpio_reset(const struct device *dev); static void lan865x_write_macaddress(const struct device *dev); static int lan865x_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { const struct lan865x_config *cfg = dev->config; struct lan865x_data *ctx = dev->data; int ret = -ENOTSUP; if (type == ETHERNET_CONFIG_TYPE_PROMISC_MODE) { return oa_tc6_reg_write(ctx->tc6, LAN865x_MAC_NCFGR, LAN865x_MAC_NCFGR_CAF); } if (type == ETHERNET_CONFIG_TYPE_MAC_ADDRESS) { memcpy(ctx->mac_address, config->mac_address.addr, sizeof(ctx->mac_address)); lan865x_write_macaddress(dev); return net_if_set_link_addr(ctx->iface, ctx->mac_address, sizeof(ctx->mac_address), NET_LINK_ETHERNET); } if (type == ETHERNET_CONFIG_TYPE_T1S_PARAM) { ret = lan865x_mac_rxtx_control(dev, LAN865x_MAC_TXRX_OFF); if (ret) { return ret; } if (config->t1s_param.type == ETHERNET_T1S_PARAM_TYPE_PLCA_CONFIG) { cfg->plca->enable = config->t1s_param.plca.enable; cfg->plca->node_id = config->t1s_param.plca.node_id; cfg->plca->node_count = config->t1s_param.plca.node_count; cfg->plca->burst_count = config->t1s_param.plca.burst_count; cfg->plca->burst_timer = config->t1s_param.plca.burst_timer; cfg->plca->to_timer = config->t1s_param.plca.to_timer; } /* Reset is required to re-program PLCA new configuration */ lan865x_gpio_reset(dev); } return ret; } static int lan865x_wait_for_reset(const struct device *dev) { struct lan865x_data *ctx = dev->data; uint8_t i; /* Wait for end of LAN865x reset */ for (i = 0; !ctx->reset && i < LAN865X_RESET_TIMEOUT; i++) { k_msleep(1); } if (i == LAN865X_RESET_TIMEOUT) { LOG_ERR("LAN865x reset timeout reached!"); return -ENODEV; } return 0; } static int lan865x_gpio_reset(const struct device *dev) { const struct lan865x_config *cfg = dev->config; struct lan865x_data *ctx = dev->data; ctx->reset = false; ctx->tc6->protected = false; /* Perform (GPIO based) HW reset */ /* assert RESET_N low for 10 s (5 s min) */ gpio_pin_set_dt(&cfg->reset, 1); k_busy_wait(10U); /* deassert - end of reset indicated by IRQ_N low */ gpio_pin_set_dt(&cfg->reset, 0); return lan865x_wait_for_reset(dev); } static int lan865x_check_spi(const struct device *dev) { struct lan865x_data *ctx = dev->data; uint32_t val; int ret; ret = oa_tc6_reg_read(ctx->tc6, LAN865x_DEVID, &val); if (ret < 0) { return -ENODEV; } ctx->silicon_rev = val & LAN865X_REV_MASK; if (ctx->silicon_rev != 1 && ctx->silicon_rev != 2) { return -ENODEV; } ctx->chip_id = (val >> 4) & 0xFFFF; if (ctx->chip_id != LAN8650_DEVID && ctx->chip_id != LAN8651_DEVID) { return -ENODEV; } return ret; } /* Implementation of pseudo code from AN1760 */ static uint8_t lan865x_read_indirect_reg(const struct device *dev, uint8_t addr, uint8_t mask) { struct lan865x_data *ctx = dev->data; uint32_t val; oa_tc6_reg_write(ctx->tc6, 0x000400D8, addr); oa_tc6_reg_write(ctx->tc6, 0x000400DA, 0x02); oa_tc6_reg_read(ctx->tc6, 0x000400D9, &val); return (uint8_t) val & mask; } static int lan865x_init_chip(const struct device *dev, uint8_t silicon_rev) { struct lan865x_data *ctx = dev->data; uint8_t value1, value2; int8_t offset1 = 0, offset2 = 0, ret; uint16_t value3, value4, value5, value6, value7; uint16_t cfgparam1, cfgparam2, cfgparam3, cfgparam4, cfgparam5; uint32_t val; ret = lan865x_read_indirect_reg(dev, 0x05, 0x40); if (ret == 0) { LOG_ERR("LAN865x error! Please contact microchip support for replacement."); return -EIO; } value1 = lan865x_read_indirect_reg(dev, 0x04, 0x1F); if ((value1 & 0x10) != 0) { /* Convert uint8_t to int8_t */ offset1 = value1 | 0xE0; if (offset1 < -5) { LOG_ERR("LAN865x internal error!"); return -EIO; } } else { offset1 = value1; } value2 = lan865x_read_indirect_reg(dev, 0x08, 0x1F); if ((value2 & 0x10) != 0) { /* Convert uint8_t to int8_t */ offset2 = value2 | 0xE0; } else { offset2 = value2; } oa_tc6_reg_read(ctx->tc6, 0x00040084, &val); value3 = (uint16_t)val; oa_tc6_reg_read(ctx->tc6, 0x0004008A, &val); value4 = (uint16_t)val; oa_tc6_reg_read(ctx->tc6, 0x000400AD, &val); value5 = (uint16_t)val; oa_tc6_reg_read(ctx->tc6, 0x000400AE, &val); value6 = (uint8_t)val; oa_tc6_reg_read(ctx->tc6, 0x000400AF, &val); value7 = (uint8_t)val; cfgparam1 = (value3 & 0xF) | (((9 + offset1) << 10) | ((14 + offset1) << 4)); cfgparam2 = (value4 & 0x3FF) | ((40 + offset2) << 10); cfgparam3 = (value5 & 0xC0C0) | (((5 + offset1) << 8) | (9 + offset1)); cfgparam4 = (value6 & 0xC0C0) | (((9 + offset1) << 8) | (14 + offset1)); cfgparam5 = (value7 & 0xC0C0) | (((17 + offset1) << 8) | (22 + offset1)); oa_tc6_reg_write(ctx->tc6, 0x00040084, (uint32_t) cfgparam1); oa_tc6_reg_write(ctx->tc6, 0x0004008A, (uint32_t) cfgparam2); oa_tc6_reg_write(ctx->tc6, 0x000400AD, (uint32_t) cfgparam3); oa_tc6_reg_write(ctx->tc6, 0x000400AE, (uint32_t) cfgparam4); oa_tc6_reg_write(ctx->tc6, 0x000400AF, (uint32_t) cfgparam5); return 0; } /* Implementation of pseudo code from AN1760 - END */ static int lan865x_config_plca(const struct device *dev, uint8_t node_id, uint8_t node_cnt, uint8_t burst_cnt, uint8_t burst_timer) { struct lan865x_data *ctx = dev->data; uint32_t val; /* Collision Detection */ oa_tc6_reg_write(ctx->tc6, 0x00040087, 0x0083u); /* COL_DET_CTRL0 */ /* T1S Phy Node Id and Max Node Count */ val = ((uint32_t)node_cnt << 8) | node_id; oa_tc6_reg_write(ctx->tc6, 0x0004CA02, val); /* PLCA_CONTROL_1_REGISTER */ /* PLCA Burst Count and Burst Timer */ val = ((uint32_t)burst_cnt << 8) | burst_timer; oa_tc6_reg_write(ctx->tc6, 0x0004CA05, val); /* PLCA_BURST_MODE_REGISTER */ /* Enable PLCA */ oa_tc6_reg_write(ctx->tc6, 0x0004CA01, BIT(15)); /* PLCA_CONTROL_0_REGISTER */ return 0; } static void lan865x_write_macaddress(const struct device *dev) { struct lan865x_data *ctx = dev->data; uint8_t *mac = &ctx->mac_address[0]; uint32_t val; /* SPEC_ADD2_BOTTOM */ val = (mac[3] << 24) | (mac[2] << 16) | (mac[1] << 8) | mac[0]; oa_tc6_reg_write(ctx->tc6, LAN865x_MAC_SAB2, val); /* SPEC_ADD2_TOP */ val = (mac[5] << 8) | mac[4]; oa_tc6_reg_write(ctx->tc6, LAN865x_MAC_SAT2, val); /* * SPEC_ADD1_BOTTOM - setting unique lower MAC address, back off time is * generated out of it. */ val = (mac[5] << 24) | (mac[4] << 16) | (mac[3] << 8) | mac[2]; oa_tc6_reg_write(ctx->tc6, LAN865x_MAC_SAB1, val); } static int lan865x_set_specific_multicast_addr(const struct device *dev) { struct lan865x_data *ctx = dev->data; uint32_t mac_h_hash = 0xffffffff; uint32_t mac_l_hash = 0xffffffff; int ret; /* Enable hash for all multicast addresses */ ret = oa_tc6_reg_write(ctx->tc6, LAN865x_MAC_HRT, mac_h_hash); if (ret) { return ret; } ret = oa_tc6_reg_write(ctx->tc6, LAN865x_MAC_HRB, mac_l_hash); if (ret) { return ret; } return oa_tc6_reg_write(ctx->tc6, LAN865x_MAC_NCFGR, LAN865x_MAC_NCFGR_MTIHEN); } static int lan865x_default_config(const struct device *dev, uint8_t silicon_rev) { /* Values in the below table are the same for LAN865x rev. B0 and B1 */ static const oa_mem_map_t lan865x_conf[] = { { .address = 0x00010000, .value = 0x00000000 }, { .address = 0x00040091, .value = 0x00009660 }, { .address = 0x00040081, .value = 0x00000080 }, { .address = 0x00010077, .value = 0x00000028 }, { .address = 0x00040043, .value = 0x000000FF }, { .address = 0x00040044, .value = 0x0000FFFF }, { .address = 0x00040045, .value = 0x00000000 }, { .address = 0x00040053, .value = 0x000000FF }, { .address = 0x00040054, .value = 0x0000FFFF }, { .address = 0x00040055, .value = 0x00000000 }, { .address = 0x00040040, .value = 0x00000002 }, { .address = 0x00040050, .value = 0x00000002 }, { .address = 0x000400E9, .value = 0x00009E50 }, { .address = 0x000400F5, .value = 0x00001CF8 }, { .address = 0x000400F4, .value = 0x0000C020 }, { .address = 0x000400F8, .value = 0x00009B00 }, { .address = 0x000400F9, .value = 0x00004E53 }, { .address = 0x000400B0, .value = 0x00000103 }, { .address = 0x000400B1, .value = 0x00000910 }, { .address = 0x000400B2, .value = 0x00001D26 }, { .address = 0x000400B3, .value = 0x0000002A }, { .address = 0x000400B4, .value = 0x00000103 }, { .address = 0x000400B5, .value = 0x0000070D }, { .address = 0x000400B6, .value = 0x00001720 }, { .address = 0x000400B7, .value = 0x00000027 }, { .address = 0x000400B8, .value = 0x00000509 }, { .address = 0x000400B9, .value = 0x00000E13 }, { .address = 0x000400BA, .value = 0x00001C25 }, { .address = 0x000400BB, .value = 0x0000002B }, { .address = 0x0000000C, .value = 0x00000100 }, { .address = 0x00040081, .value = 0x000000E0 }, }; const struct lan865x_config *cfg = dev->config; uint8_t i, size = ARRAY_SIZE(lan865x_conf); struct lan865x_data *ctx = dev->data; int ret; /* Enable protected control RW */ oa_tc6_set_protected_ctrl(ctx->tc6, true); for (i = 0; i < size; i++) { oa_tc6_reg_write(ctx->tc6, lan865x_conf[i].address, lan865x_conf[i].value); } if (silicon_rev == 1) { /* For silicon rev 1 (B0): (bit [3..0] from 0x0A0084 */ oa_tc6_reg_write(ctx->tc6, 0x000400D0, 0x5F21); } lan865x_write_macaddress(dev); lan865x_set_specific_multicast_addr(dev); ret = lan865x_init_chip(dev, silicon_rev); if (ret < 0) { return ret; } if (cfg->plca->enable) { ret = lan865x_config_plca(dev, cfg->plca->node_id, cfg->plca->node_count, cfg->plca->burst_count, cfg->plca->burst_timer); if (ret < 0) { return ret; } } return 0; } static void lan865x_int_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { ARG_UNUSED(dev); ARG_UNUSED(pins); struct lan865x_data *ctx = CONTAINER_OF(cb, struct lan865x_data, gpio_int_callback); k_sem_give(&ctx->int_sem); } static void lan865x_read_chunks(const struct device *dev) { const struct lan865x_config *cfg = dev->config; struct lan865x_data *ctx = dev->data; struct oa_tc6 *tc6 = ctx->tc6; struct net_pkt *pkt; int ret; pkt = net_pkt_rx_alloc(K_MSEC(cfg->timeout)); if (!pkt) { LOG_ERR("OA RX: Could not allocate packet!"); return; } k_sem_take(&ctx->tx_rx_sem, K_FOREVER); ret = oa_tc6_read_chunks(tc6, pkt); if (ret < 0) { eth_stats_update_errors_rx(ctx->iface); net_pkt_unref(pkt); k_sem_give(&ctx->tx_rx_sem); return; } /* Feed buffer frame to IP stack */ ret = net_recv_data(ctx->iface, pkt); if (ret < 0) { LOG_ERR("OA RX: Could not process packet (%d)!", ret); net_pkt_unref(pkt); } k_sem_give(&ctx->tx_rx_sem); } static void lan865x_int_thread(const struct device *dev) { struct lan865x_data *ctx = dev->data; struct oa_tc6 *tc6 = ctx->tc6; uint32_t sts, val, ftr; int ret; while (true) { k_sem_take(&ctx->int_sem, K_FOREVER); if (!ctx->reset) { oa_tc6_reg_read(tc6, OA_STATUS0, &sts); if (sts & OA_STATUS0_RESETC) { oa_tc6_reg_write(tc6, OA_STATUS0, sts); lan865x_default_config(dev, ctx->silicon_rev); oa_tc6_reg_read(tc6, OA_CONFIG0, &val); val |= OA_CONFIG0_SYNC | OA_CONFIG0_RFA_ZARFE; oa_tc6_reg_write(tc6, OA_CONFIG0, val); lan865x_mac_rxtx_control(dev, LAN865x_MAC_TXRX_ON); ctx->reset = true; /* * According to OA T1S standard - it is mandatory to * read chunk of data to get the IRQ_N negated (deasserted). */ oa_tc6_read_status(tc6, &ftr); continue; } } /* * The IRQ_N is asserted when RCA becomes > 0. As described in * OPEN Alliance 10BASE-T1x standard it is deasserted when first * data header is received by LAN865x. * * Hence, it is mandatory to ALWAYS read at least one data chunk! */ do { lan865x_read_chunks(dev); } while (tc6->rca > 0); ret = oa_tc6_check_status(tc6); if (ret == -EIO) { lan865x_gpio_reset(dev); } } } static int lan865x_init(const struct device *dev) { const struct lan865x_config *cfg = dev->config; struct lan865x_data *ctx = dev->data; int ret; __ASSERT(cfg->spi.config.frequency <= LAN865X_SPI_MAX_FREQUENCY, "SPI frequency exceeds supported maximum\n"); if (!spi_is_ready_dt(&cfg->spi)) { LOG_ERR("SPI bus %s not ready", cfg->spi.bus->name); return -ENODEV; } if (!gpio_is_ready_dt(&cfg->interrupt)) { LOG_ERR("Interrupt GPIO device %s is not ready", cfg->interrupt.port->name); return -ENODEV; } /* Check SPI communication after reset */ ret = lan865x_check_spi(dev); if (ret < 0) { LOG_ERR("SPI communication not working, %d", ret); return ret; } /* * Configure interrupt service routine for LAN865x IRQ */ ret = gpio_pin_configure_dt(&cfg->interrupt, GPIO_INPUT); if (ret < 0) { LOG_ERR("Failed to configure interrupt GPIO, %d", ret); return ret; } gpio_init_callback(&(ctx->gpio_int_callback), lan865x_int_callback, BIT(cfg->interrupt.pin)); ret = gpio_add_callback(cfg->interrupt.port, &ctx->gpio_int_callback); if (ret < 0) { LOG_ERR("Failed to add INT callback, %d", ret); return ret; } gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE); /* Start interruption-poll thread */ ctx->tid_int = k_thread_create(&ctx->thread, ctx->thread_stack, CONFIG_ETH_LAN865X_IRQ_THREAD_STACK_SIZE, (k_thread_entry_t)lan865x_int_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_LAN865X_IRQ_THREAD_PRIO), 0, K_NO_WAIT); k_thread_name_set(ctx->tid_int, "lan865x_interrupt"); /* Perform HW reset - 'rst-gpios' required property set in DT */ if (!gpio_is_ready_dt(&cfg->reset)) { LOG_ERR("Reset GPIO device %s is not ready", cfg->reset.port->name); return -ENODEV; } ret = gpio_pin_configure_dt(&cfg->reset, GPIO_OUTPUT_INACTIVE); if (ret < 0) { LOG_ERR("Failed to configure reset GPIO, %d", ret); return ret; } return lan865x_gpio_reset(dev); } static int lan865x_port_send(const struct device *dev, struct net_pkt *pkt) { struct lan865x_data *ctx = dev->data; struct oa_tc6 *tc6 = ctx->tc6; int ret; k_sem_take(&ctx->tx_rx_sem, K_FOREVER); ret = oa_tc6_send_chunks(tc6, pkt); /* Check if rca > 0 during half-duplex TX transmission */ if (tc6->rca > 0) { k_sem_give(&ctx->int_sem); } k_sem_give(&ctx->tx_rx_sem); if (ret < 0) { LOG_ERR("TX transmission error, %d", ret); eth_stats_update_errors_tx(net_pkt_iface(pkt)); return ret; } return 0; } static const struct ethernet_api lan865x_api_func = { .iface_api.init = lan865x_iface_init, .get_capabilities = lan865x_port_get_capabilities, .set_config = lan865x_set_config, .send = lan865x_port_send, }; #define LAN865X_DEFINE(inst) \ static struct lan865x_config_plca lan865x_config_plca_##inst = { \ .node_id = DT_INST_PROP(inst, plca_node_id), \ .node_count = DT_INST_PROP(inst, plca_node_count), \ .burst_count = DT_INST_PROP(inst, plca_burst_count), \ .burst_timer = DT_INST_PROP(inst, plca_burst_timer), \ .to_timer = DT_INST_PROP(inst, plca_to_timer), \ .enable = DT_INST_PROP(inst, plca_enable), \ }; \ \ static const struct lan865x_config lan865x_config_##inst = { \ .spi = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8), 0), \ .interrupt = GPIO_DT_SPEC_INST_GET(inst, int_gpios), \ .reset = GPIO_DT_SPEC_INST_GET(inst, rst_gpios), \ .timeout = CONFIG_ETH_LAN865X_TIMEOUT, \ .plca = &lan865x_config_plca_##inst, \ }; \ \ struct oa_tc6 oa_tc6_##inst = { \ .cps = 64, \ .protected = 0, \ .spi = &lan865x_config_##inst.spi \ }; \ static struct lan865x_data lan865x_data_##inst = { \ .mac_address = DT_INST_PROP(inst, local_mac_address), \ .tx_rx_sem = \ Z_SEM_INITIALIZER((lan865x_data_##inst).tx_rx_sem, 1, 1), \ .int_sem = Z_SEM_INITIALIZER((lan865x_data_##inst).int_sem, 0, 1), \ .tc6 = &oa_tc6_##inst \ }; \ \ ETH_NET_DEVICE_DT_INST_DEFINE(inst, lan865x_init, NULL, &lan865x_data_##inst, \ &lan865x_config_##inst, CONFIG_ETH_INIT_PRIORITY, \ &lan865x_api_func, NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(LAN865X_DEFINE); ```
/content/code_sandbox/drivers/ethernet/eth_lan865x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,863
```c /* * */ #define DT_DRV_COMPAT litex_liteeth #define LOG_MODULE_NAME eth_litex_liteeth #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <soc.h> #include <stdbool.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/phy.h> #include <zephyr/sys/printk.h> #include <zephyr/irq.h> #include "eth.h" #define MAX_TX_FAILURE 100 struct eth_litex_dev_data { struct net_if *iface; uint8_t mac_addr[6]; uint8_t txslot; }; struct eth_litex_config { const struct device *phy_dev; void (*config_func)(const struct device *dev); bool random_mac_address; uint32_t rx_slot_addr; uint32_t rx_length_addr; uint32_t rx_ev_pending_addr; uint32_t rx_ev_enable_addr; uint32_t tx_start_addr; uint32_t tx_ready_addr; uint32_t tx_slot_addr; uint32_t tx_length_addr; uint32_t tx_ev_pending_addr; uint32_t tx_ev_enable_addr; uint8_t *tx_buf[2]; uint8_t *rx_buf[2]; }; static int eth_initialize(const struct device *dev) { const struct eth_litex_config *config = dev->config; struct eth_litex_dev_data *context = dev->data; config->config_func(dev); /* TX event is disabled because it isn't used by this driver */ litex_write8(0, config->tx_ev_enable_addr); if (config->random_mac_address) { /* generate random MAC address */ gen_random_mac(context->mac_addr, 0x10, 0xe2, 0xd5); } return 0; } static int eth_tx(const struct device *dev, struct net_pkt *pkt) { unsigned int key; uint16_t len; struct eth_litex_dev_data *context = dev->data; const struct eth_litex_config *config = dev->config; key = irq_lock(); int attempts = 0; /* get data from packet and send it */ len = net_pkt_get_len(pkt); net_pkt_read(pkt, config->tx_buf[context->txslot], len); litex_write8(context->txslot, config->tx_slot_addr); litex_write16(len, config->tx_length_addr); /* wait for the device to be ready to transmit */ while (litex_read8(config->tx_ready_addr) == 0) { if (attempts++ == MAX_TX_FAILURE) { goto error; } k_sleep(K_MSEC(1)); } /* start transmitting */ litex_write8(1, config->tx_start_addr); /* change slot */ context->txslot = (context->txslot + 1) % 2; irq_unlock(key); return 0; error: irq_unlock(key); LOG_ERR("TX fifo failed"); return -1; } static void eth_rx(const struct device *port) { struct net_pkt *pkt; struct eth_litex_dev_data *context = port->data; const struct eth_litex_config *config = port->config; int r; unsigned int key; uint16_t len = 0; uint8_t rxslot = 0; if (!net_if_flag_is_set(context->iface, NET_IF_UP)) { return; } key = irq_lock(); /* get frame's length */ len = litex_read16(config->rx_length_addr); /* which slot is the frame in */ rxslot = litex_read8(config->rx_slot_addr); /* obtain rx buffer */ pkt = net_pkt_rx_alloc_with_buffer(context->iface, len, AF_UNSPEC, 0, K_NO_WAIT); if (pkt == NULL) { LOG_ERR("Failed to obtain RX buffer"); goto out; } /* copy data to buffer */ if (net_pkt_write(pkt, (void *)config->rx_buf[rxslot], len) != 0) { LOG_ERR("Failed to append RX buffer to context buffer"); net_pkt_unref(pkt); goto out; } /* receive data */ r = net_recv_data(context->iface, pkt); if (r < 0) { LOG_ERR("Failed to enqueue frame into RX queue: %d", r); net_pkt_unref(pkt); } out: irq_unlock(key); } static void eth_irq_handler(const struct device *port) { const struct eth_litex_config *config = port->config; /* check sram reader events (tx) */ if (litex_read8(config->tx_ev_pending_addr) & BIT(0)) { /* TX event is not enabled nor used by this driver; ack just * in case if some rogue TX event appeared */ litex_write8(BIT(0), config->tx_ev_pending_addr); } /* check sram writer events (rx) */ if (litex_read8(config->rx_ev_pending_addr) & BIT(0)) { eth_rx(port); /* ack writer irq */ litex_write8(BIT(0), config->rx_ev_pending_addr); } } static int eth_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct eth_litex_dev_data *context = dev->data; int ret = -ENOTSUP; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(context->mac_addr, config->mac_address.addr, sizeof(context->mac_addr)); ret = net_if_set_link_addr(context->iface, context->mac_addr, sizeof(context->mac_addr), NET_LINK_ETHERNET); break; default: break; } return ret; } static int eth_start(const struct device *dev) { const struct eth_litex_config *config = dev->config; litex_write8(1, config->rx_ev_enable_addr); litex_write8(BIT(0), config->tx_ev_pending_addr); litex_write8(BIT(0), config->rx_ev_pending_addr); return 0; } static int eth_stop(const struct device *dev) { const struct eth_litex_config *config = dev->config; litex_write8(0, config->rx_ev_enable_addr); return 0; } static const struct device *eth_get_phy(const struct device *dev) { const struct eth_litex_config *config = dev->config; return config->phy_dev; } static void phy_link_state_changed(const struct device *phy_dev, struct phy_link_state *state, void *user_data) { const struct device *dev = (const struct device *)user_data; struct eth_litex_dev_data *context = dev->data; ARG_UNUSED(phy_dev); if (state->is_up) { net_eth_carrier_on(context->iface); } else { net_eth_carrier_off(context->iface); } } static void eth_iface_init(struct net_if *iface) { const struct device *port = net_if_get_device(iface); const struct eth_litex_config *config = port->config; struct eth_litex_dev_data *context = port->data; /* set interface */ if (context->iface == NULL) { context->iface = iface; } /* initialize ethernet L2 */ ethernet_init(iface); /* set MAC address */ if (net_if_set_link_addr(iface, context->mac_addr, sizeof(context->mac_addr), NET_LINK_ETHERNET) < 0) { LOG_ERR("setting mac failed"); return; } if (config->phy_dev == NULL) { LOG_WRN("No PHY device"); return; } net_if_carrier_off(iface); if (device_is_ready(config->phy_dev)) { phy_link_callback_set(config->phy_dev, phy_link_state_changed, (void *)port); } else { LOG_ERR("PHY device not ready"); } } static enum ethernet_hw_caps eth_caps(const struct device *dev) { ARG_UNUSED(dev); return #ifdef CONFIG_NET_VLAN ETHERNET_HW_VLAN | #endif ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_1000BASE_T; } static const struct ethernet_api eth_api = { .iface_api.init = eth_iface_init, .start = eth_start, .stop = eth_stop, .get_capabilities = eth_caps, .set_config = eth_set_config, .get_phy = eth_get_phy, .send = eth_tx }; #define ETH_LITEX_SLOT_SIZE 0x0800 #define ETH_LITEX_SLOT_RX0_ADDR(n) \ DT_INST_REG_ADDR_BY_NAME_OR(n, rx_buffers, (DT_INST_REG_ADDR_BY_NAME(n, buffers))) #define ETH_LITEX_SLOT_RX1_ADDR(n) (ETH_LITEX_SLOT_RX0_ADDR(n) + ETH_LITEX_SLOT_SIZE) #define ETH_LITEX_SLOT_TX0_ADDR(n) \ DT_INST_REG_ADDR_BY_NAME_OR(n, tx_buffers, \ (DT_INST_REG_ADDR_BY_NAME(n, buffers) + \ (DT_INST_REG_SIZE_BY_NAME(n, buffers) / 2))) #define ETH_LITEX_SLOT_TX1_ADDR(n) (ETH_LITEX_SLOT_TX0_ADDR(n) + ETH_LITEX_SLOT_SIZE) #define ETH_LITEX_INIT(n) \ \ static void eth_irq_config##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), eth_irq_handler, \ DEVICE_DT_INST_GET(n), 0); \ \ irq_enable(DT_INST_IRQN(n)); \ } \ \ static struct eth_litex_dev_data eth_data##n = { \ .mac_addr = DT_INST_PROP(n, local_mac_address)}; \ \ static const struct eth_litex_config eth_config##n = { \ IF_ENABLED(DT_INST_NODE_HAS_PROP(n, phy_handle), \ (.phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle)),)) \ .config_func = eth_irq_config##n, \ .random_mac_address = DT_INST_PROP(n, zephyr_random_mac_address), \ .rx_slot_addr = DT_INST_REG_ADDR_BY_NAME(n, rx_slot), \ .rx_length_addr = DT_INST_REG_ADDR_BY_NAME(n, rx_length), \ .rx_ev_pending_addr = DT_INST_REG_ADDR_BY_NAME(n, rx_ev_pending), \ .rx_ev_enable_addr = DT_INST_REG_ADDR_BY_NAME(n, rx_ev_enable), \ .tx_start_addr = DT_INST_REG_ADDR_BY_NAME(n, tx_start), \ .tx_ready_addr = DT_INST_REG_ADDR_BY_NAME(n, tx_ready), \ .tx_slot_addr = DT_INST_REG_ADDR_BY_NAME(n, tx_slot), \ .tx_length_addr = DT_INST_REG_ADDR_BY_NAME(n, tx_length), \ .tx_ev_pending_addr = DT_INST_REG_ADDR_BY_NAME(n, tx_ev_pending), \ .tx_ev_enable_addr = DT_INST_REG_ADDR_BY_NAME(n, tx_ev_enable), \ .rx_buf = { \ (uint8_t *)ETH_LITEX_SLOT_RX0_ADDR(n), \ (uint8_t *)ETH_LITEX_SLOT_RX1_ADDR(n), \ \ }, \ .tx_buf = { \ (uint8_t *)ETH_LITEX_SLOT_TX0_ADDR(n), \ (uint8_t *)ETH_LITEX_SLOT_TX1_ADDR(n), \ }, \ \ }; \ \ ETH_NET_DEVICE_DT_INST_DEFINE(n, eth_initialize, NULL, &eth_data##n, &eth_config##n, \ CONFIG_ETH_INIT_PRIORITY, &eth_api, NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(ETH_LITEX_INIT); ```
/content/code_sandbox/drivers/ethernet/eth_litex_liteeth.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,621
```unknown # NXP PHY TJA1103 driver configuration options menuconfig PHY_TJA1103 bool "TJA1103 PHY driver" default y depends on DT_HAS_NXP_TJA1103_ENABLED depends on MDIO help Enable TJA1103 PHY driver. if PHY_TJA1103 config PHY_TJA1103_IRQ_THREAD_STACK_SIZE int "Stack size for a thread that processes TJA1103 IRQ" default 2048 help Size of the stack used for internal thread which is ran to process raised INT IRQ. config PHY_TJA1103_IRQ_THREAD_PRIO int "Priority for internal incoming packet handler" default 2 help Priority level for internal thread which is ran for TJA1103 INT IRQ processing. endif # PHY_TJA1103 ```
/content/code_sandbox/drivers/ethernet/phy/Kconfig.tja1103
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
177
```c /* * * Inspiration from phy_mii.c, which is: * */ #define DT_DRV_COMPAT microchip_ksz8081 #include <zephyr/kernel.h> #include <zephyr/net/phy.h> #include <zephyr/net/mii.h> #include <zephyr/drivers/mdio.h> #include <string.h> #include <zephyr/sys/util_macro.h> #include <zephyr/drivers/gpio.h> #define LOG_MODULE_NAME phy_mc_ksz8081 #define LOG_LEVEL CONFIG_PHY_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #define PHY_MC_KSZ8081_OMSO_REG 0x16 #define PHY_MC_KSZ8081_OMSO_FACTORY_MODE_MASK BIT(15) #define PHY_MC_KSZ8081_OMSO_NAND_TREE_MASK BIT(5) #define PHY_MC_KSZ8081_CTRL2_REG 0x1F #define PHY_MC_KSZ8081_CTRL2_REF_CLK_SEL BIT(7) #define PHY_MC_KSZ8081_RESET_HOLD_TIME enum ksz8081_interface { KSZ8081_MII, KSZ8081_RMII, KSZ8081_RMII_25MHZ, }; struct mc_ksz8081_config { uint8_t addr; const struct device *mdio_dev; enum ksz8081_interface phy_iface; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) const struct gpio_dt_spec reset_gpio; #endif #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) const struct gpio_dt_spec interrupt_gpio; #endif }; struct mc_ksz8081_data { const struct device *dev; struct phy_link_state state; phy_callback_t cb; void *cb_data; struct k_mutex mutex; struct k_work_delayable phy_monitor_work; }; static int phy_mc_ksz8081_read(const struct device *dev, uint16_t reg_addr, uint32_t *data) { const struct mc_ksz8081_config *config = dev->config; int ret; /* Make sure excessive bits 16-31 are reset */ *data = 0U; ret = mdio_read(config->mdio_dev, config->addr, reg_addr, (uint16_t *)data); if (ret) { return ret; } return 0; } static int phy_mc_ksz8081_write(const struct device *dev, uint16_t reg_addr, uint32_t data) { const struct mc_ksz8081_config *config = dev->config; int ret; ret = mdio_write(config->mdio_dev, config->addr, reg_addr, (uint16_t)data); if (ret) { return ret; } return 0; } static int phy_mc_ksz8081_autonegotiate(const struct device *dev) { const struct mc_ksz8081_config *config = dev->config; int ret; uint32_t bmcr = 0; uint32_t bmsr = 0; uint16_t timeout = CONFIG_PHY_AUTONEG_TIMEOUT_MS / 100; /* Read control register to write back with autonegotiation bit */ ret = phy_mc_ksz8081_read(dev, MII_BMCR, &bmcr); if (ret) { LOG_ERR("Error reading phy (%d) basic control register", config->addr); return ret; } /* (re)start autonegotiation */ LOG_DBG("PHY (%d) is entering autonegotiation sequence", config->addr); bmcr |= MII_BMCR_AUTONEG_ENABLE | MII_BMCR_AUTONEG_RESTART; bmcr &= ~MII_BMCR_ISOLATE; ret = phy_mc_ksz8081_write(dev, MII_BMCR, bmcr); if (ret) { LOG_ERR("Error writing phy (%d) basic control register", config->addr); return ret; } /* TODO change this to GPIO interrupt driven */ do { if (timeout-- == 0) { LOG_DBG("PHY (%d) autonegotiation timed out", config->addr); /* The value -ETIMEDOUT can be returned by PHY read/write functions, so * return -ENETDOWN instead to distinguish link timeout from PHY timeout. */ return -ENETDOWN; } k_msleep(100); ret = phy_mc_ksz8081_read(dev, MII_BMSR, &bmsr); if (ret) { LOG_ERR("Error reading phy (%d) basic status register", config->addr); return ret; } } while (!(bmsr & MII_BMSR_AUTONEG_COMPLETE)); LOG_DBG("PHY (%d) autonegotiation completed", config->addr); return 0; } static int phy_mc_ksz8081_get_link(const struct device *dev, struct phy_link_state *state) { const struct mc_ksz8081_config *config = dev->config; struct mc_ksz8081_data *data = dev->data; int ret; uint32_t bmsr = 0; uint32_t anar = 0; uint32_t anlpar = 0; struct phy_link_state old_state = data->state; /* Lock mutex */ ret = k_mutex_lock(&data->mutex, K_FOREVER); if (ret) { LOG_ERR("PHY mutex lock error"); return ret; } /* Read link state */ ret = phy_mc_ksz8081_read(dev, MII_BMSR, &bmsr); if (ret) { LOG_ERR("Error reading phy (%d) basic status register", config->addr); k_mutex_unlock(&data->mutex); return ret; } state->is_up = bmsr & MII_BMSR_LINK_STATUS; if (!state->is_up) { k_mutex_unlock(&data->mutex); goto result; } /* Read currently configured advertising options */ ret = phy_mc_ksz8081_read(dev, MII_ANAR, &anar); if (ret) { LOG_ERR("Error reading phy (%d) advertising register", config->addr); k_mutex_unlock(&data->mutex); return ret; } /* Read link partner capability */ ret = phy_mc_ksz8081_read(dev, MII_ANLPAR, &anlpar); if (ret) { LOG_ERR("Error reading phy (%d) link partner register", config->addr); k_mutex_unlock(&data->mutex); return ret; } /* Unlock mutex */ k_mutex_unlock(&data->mutex); uint32_t mutual_capabilities = anar & anlpar; if (mutual_capabilities & MII_ADVERTISE_100_FULL) { state->speed = LINK_FULL_100BASE_T; } else if (mutual_capabilities & MII_ADVERTISE_100_HALF) { state->speed = LINK_HALF_100BASE_T; } else if (mutual_capabilities & MII_ADVERTISE_10_FULL) { state->speed = LINK_FULL_10BASE_T; } else if (mutual_capabilities & MII_ADVERTISE_10_HALF) { state->speed = LINK_HALF_10BASE_T; } else { ret = -EIO; } result: if (memcmp(&old_state, state, sizeof(struct phy_link_state)) != 0) { LOG_DBG("PHY %d is %s", config->addr, state->is_up ? "up" : "down"); if (state->is_up) { LOG_DBG("PHY (%d) Link speed %s Mb, %s duplex\n", config->addr, (PHY_LINK_IS_SPEED_100M(state->speed) ? "100" : "10"), PHY_LINK_IS_FULL_DUPLEX(state->speed) ? "full" : "half"); } } return ret; } /* * Configuration set statically (DT) that should never change * This function is needed in case the PHY is reset then the next call * to configure the phy will ensure this configuration will be redone */ static int phy_mc_ksz8081_static_cfg(const struct device *dev) { const struct mc_ksz8081_config *config = dev->config; uint32_t omso = 0; uint32_t ctrl2 = 0; int ret = 0; /* Force normal operation in the case of factory mode */ ret = phy_mc_ksz8081_read(dev, PHY_MC_KSZ8081_OMSO_REG, (uint32_t *)&omso); if (ret) { return ret; } omso &= ~PHY_MC_KSZ8081_OMSO_FACTORY_MODE_MASK & ~PHY_MC_KSZ8081_OMSO_NAND_TREE_MASK; ret = phy_mc_ksz8081_write(dev, PHY_MC_KSZ8081_OMSO_REG, (uint32_t)omso); if (ret) { return ret; } /* Select correct reference clock mode depending on interface setup */ ret = phy_mc_ksz8081_read(dev, PHY_MC_KSZ8081_CTRL2_REG, (uint32_t *)&ctrl2); if (ret) { return ret; } if (config->phy_iface == KSZ8081_RMII) { ctrl2 |= PHY_MC_KSZ8081_CTRL2_REF_CLK_SEL; } else { ctrl2 &= ~PHY_MC_KSZ8081_CTRL2_REF_CLK_SEL; } ret = phy_mc_ksz8081_write(dev, PHY_MC_KSZ8081_CTRL2_REG, (uint32_t)ctrl2); if (ret) { return ret; } return 0; } static int phy_mc_ksz8081_reset(const struct device *dev) { #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) const struct mc_ksz8081_config *config = dev->config; #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) */ struct mc_ksz8081_data *data = dev->data; int ret; /* Lock mutex */ ret = k_mutex_lock(&data->mutex, K_FOREVER); if (ret) { LOG_ERR("PHY mutex lock error"); return ret; } #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) if (!config->reset_gpio.port) { goto skip_reset_gpio; } /* Start reset */ ret = gpio_pin_set_dt(&config->reset_gpio, 0); if (ret) { goto done; } /* Wait for 500 ms as specified by datasheet */ k_busy_wait(USEC_PER_MSEC * 500); /* Reset over */ ret = gpio_pin_set_dt(&config->reset_gpio, 1); goto done; skip_reset_gpio: #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) */ ret = phy_mc_ksz8081_write(dev, MII_BMCR, MII_BMCR_RESET); if (ret) { goto done; } /* Wait for 500 ms as specified by datasheet */ k_busy_wait(USEC_PER_MSEC * 500); done: /* Unlock mutex */ k_mutex_unlock(&data->mutex); return ret; } static int phy_mc_ksz8081_cfg_link(const struct device *dev, enum phy_link_speed speeds) { const struct mc_ksz8081_config *config = dev->config; struct mc_ksz8081_data *data = dev->data; struct phy_link_state state = {}; int ret; uint32_t anar; /* Lock mutex */ ret = k_mutex_lock(&data->mutex, K_FOREVER); if (ret) { LOG_ERR("PHY mutex lock error"); goto done; } /* We are going to reconfigure the phy, don't need to monitor until done */ k_work_cancel_delayable(&data->phy_monitor_work); /* Reset PHY */ ret = phy_mc_ksz8081_reset(dev); if (ret) { goto done; } /* DT configurations */ ret = phy_mc_ksz8081_static_cfg(dev); if (ret) { goto done; } /* Read ANAR register to write back */ ret = phy_mc_ksz8081_read(dev, MII_ANAR, &anar); if (ret) { LOG_ERR("Error reading phy (%d) advertising register", config->addr); goto done; } /* Setup advertising register */ if (speeds & LINK_FULL_100BASE_T) { anar |= MII_ADVERTISE_100_FULL; } else { anar &= ~MII_ADVERTISE_100_FULL; } if (speeds & LINK_HALF_100BASE_T) { anar |= MII_ADVERTISE_100_HALF; } else { anar &= ~MII_ADVERTISE_100_HALF; } if (speeds & LINK_FULL_10BASE_T) { anar |= MII_ADVERTISE_10_FULL; } else { anar &= ~MII_ADVERTISE_10_FULL; } if (speeds & LINK_HALF_10BASE_T) { anar |= MII_ADVERTISE_10_HALF; } else { anar &= ~MII_ADVERTISE_10_HALF; } /* Write capabilities to advertising register */ ret = phy_mc_ksz8081_write(dev, MII_ANAR, anar); if (ret) { LOG_ERR("Error writing phy (%d) advertising register", config->addr); goto done; } /* (re)do autonegotiation */ ret = phy_mc_ksz8081_autonegotiate(dev); if (ret && (ret != -ENETDOWN)) { LOG_ERR("Error in autonegotiation"); goto done; } /* Get link status */ ret = phy_mc_ksz8081_get_link(dev, &state); if (ret == 0 && memcmp(&state, &data->state, sizeof(struct phy_link_state)) != 0) { memcpy(&data->state, &state, sizeof(struct phy_link_state)); if (data->cb) { data->cb(dev, &data->state, data->cb_data); } } /* Log the results of the configuration */ LOG_INF("PHY %d is %s", config->addr, data->state.is_up ? "up" : "down"); if (data->state.is_up) { LOG_INF("PHY (%d) Link speed %s Mb, %s duplex\n", config->addr, (PHY_LINK_IS_SPEED_100M(data->state.speed) ? "100" : "10"), PHY_LINK_IS_FULL_DUPLEX(data->state.speed) ? "full" : "half"); } done: /* Unlock mutex */ k_mutex_unlock(&data->mutex); /* Start monitoring */ k_work_reschedule(&data->phy_monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); return ret; } static int phy_mc_ksz8081_link_cb_set(const struct device *dev, phy_callback_t cb, void *user_data) { struct mc_ksz8081_data *data = dev->data; data->cb = cb; data->cb_data = user_data; phy_mc_ksz8081_get_link(dev, &data->state); data->cb(dev, &data->state, data->cb_data); return 0; } static void phy_mc_ksz8081_monitor_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct mc_ksz8081_data *data = CONTAINER_OF(dwork, struct mc_ksz8081_data, phy_monitor_work); const struct device *dev = data->dev; struct phy_link_state state = {}; int rc; rc = phy_mc_ksz8081_get_link(dev, &state); if (rc == 0 && memcmp(&state, &data->state, sizeof(struct phy_link_state)) != 0) { memcpy(&data->state, &state, sizeof(struct phy_link_state)); if (data->cb) { data->cb(dev, &data->state, data->cb_data); } } /* TODO change this to GPIO interrupt driven */ k_work_reschedule(&data->phy_monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); } static int phy_mc_ksz8081_init(const struct device *dev) { const struct mc_ksz8081_config *config = dev->config; struct mc_ksz8081_data *data = dev->data; int ret; data->dev = dev; ret = k_mutex_init(&data->mutex); if (ret) { return ret; } mdio_bus_enable(config->mdio_dev); #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) if (!config->interrupt_gpio.port) { goto skip_int_gpio; } /* Prevent NAND TREE mode */ ret = gpio_pin_configure_dt(&config->interrupt_gpio, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } skip_int_gpio: #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) if (config->reset_gpio.port) { ret = gpio_pin_configure_dt(&config->reset_gpio, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } } #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) */ /* Reset PHY */ ret = phy_mc_ksz8081_reset(dev); if (ret) { return ret; } k_work_init_delayable(&data->phy_monitor_work, phy_mc_ksz8081_monitor_work_handler); return 0; } static const struct ethphy_driver_api mc_ksz8081_phy_api = { .get_link = phy_mc_ksz8081_get_link, .cfg_link = phy_mc_ksz8081_cfg_link, .link_cb_set = phy_mc_ksz8081_link_cb_set, .read = phy_mc_ksz8081_read, .write = phy_mc_ksz8081_write, }; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) #define RESET_GPIO(n) \ .reset_gpio = GPIO_DT_SPEC_INST_GET_OR(n, reset_gpios, {0}), #else #define RESET_GPIO(n) #endif /* reset gpio */ #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) #define INTERRUPT_GPIO(n) \ .interrupt_gpio = GPIO_DT_SPEC_INST_GET_OR(n, int_gpios, {0}), #else #define INTERRUPT_GPIO(n) #endif /* interrupt gpio */ #define MICROCHIP_KSZ8081_INIT(n) \ static const struct mc_ksz8081_config mc_ksz8081_##n##_config = { \ .addr = DT_INST_REG_ADDR(n), \ .mdio_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \ .phy_iface = DT_INST_ENUM_IDX(n, microchip_interface_type), \ RESET_GPIO(n) \ INTERRUPT_GPIO(n) \ }; \ \ static struct mc_ksz8081_data mc_ksz8081_##n##_data; \ \ DEVICE_DT_INST_DEFINE(n, &phy_mc_ksz8081_init, NULL, \ &mc_ksz8081_##n##_data, &mc_ksz8081_##n##_config, \ POST_KERNEL, CONFIG_PHY_INIT_PRIORITY, \ &mc_ksz8081_phy_api); DT_INST_FOREACH_STATUS_OKAY(MICROCHIP_KSZ8081_INIT) ```
/content/code_sandbox/drivers/ethernet/phy/phy_microchip_ksz8081.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,241
```c /* * */ #include <zephyr/logging/log.h> #if DT_NODE_HAS_STATUS(DT_INST(0, adi_adin2111_phy), okay) #define DT_DRV_COMPAT adi_adin2111_phy #else #define DT_DRV_COMPAT adi_adin1100_phy #endif LOG_MODULE_REGISTER(DT_DRV_COMPAT, CONFIG_PHY_LOG_LEVEL); #include <errno.h> #include <stdint.h> #include <stdbool.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/util.h> #include <zephyr/net/phy.h> #include <zephyr/net/mii.h> #include <zephyr/net/mdio.h> #include <zephyr/drivers/mdio.h> /* PHYs out of reset check retry delay */ #define ADIN2111_PHY_AWAIT_DELAY_POLL_US 15U /* * Number of retries for PHYs out of reset check, * rmii variants as ADIN11XX need 70ms maximum after hw reset to be up, * so the increasing the count for that, as default 25ms (sw reset) + 45. */ #define ADIN2111_PHY_AWAIT_RETRY_COUNT 3000U /* PHY's software powerdown check retry delay */ #define ADIN2111_PHY_SFT_PD_DELAY_POLL_US 15U /* Number of retries for PHY's software powerdown check */ #define ADIN2111_PHY_SFT_PD_RETRY_COUNT 200U /* Software reset, CLK_25 disabled time*/ #define ADIN1100_PHY_SFT_RESET_MS 25U /* PHYs autonegotiation complete timeout */ #define ADIN2111_AN_COMPLETE_AWAIT_TIMEOUT_MS 3000U /* ADIN2111 PHY identifier */ #define ADIN2111_PHY_ID 0x0283BCA1U #define ADIN1110_PHY_ID 0x0283BC91U #define ADIN1100_PHY_ID 0x0283BC81U /* System Interrupt Mask Register */ #define ADIN2111_PHY_CRSM_IRQ_MASK 0x0020U /* System Interrupt Status Register */ #define ADIN2111_PHY_CRSM_IRQ_STATUS 0x0010U /** * Mask of reserved interrupts that indicates a fatal error in the system. * * There is inconsistency between RM and ADI driver example: * - RM mask 0x6FFF * - ADI driver example mask 0x2BFF * * The value from the example doesn't include reserved bits 10 and 14. * The tests show that PHY is still functioning when bit 10 is raised. * * Here the value from ADI driver example is used instead of RM. */ #define ADIN2111_PHY_CRSM_IRQ_STATUS_FATAL_ERR 0x2BFFU /* PHY Subsystem Interrupt Mask Register */ #define ADIN2111_PHY_SUBSYS_IRQ_MASK 0x0021U /* PHY Subsystem Interrupt Status Register */ #define ADIN2111_PHY_SUBSYS_IRQ_STATUS 0x0011U /* Link Status Change */ #define ADIN2111_PHY_SUBSYS_IRQ_STATUS_LINK_STAT_CHNG_LH BIT(1) /* Software Power-down Control Register */ #define ADIN2111_PHY_CRSM_SFT_PD_CNTRL 0x8812U /* System Status Register */ #define ADIN2111_PHY_CRSM_STAT 0x8818U /* Software Power-down Status */ #define ADIN2111_CRSM_STAT_CRSM_SFT_PD_RDY BIT(1) /* LED Control Register */ #define ADIN2111_PHY_LED_CNTRL 0x8C82U /* LED 1 Enable */ #define ADIN2111_PHY_LED_CNTRL_LED1_EN BIT(15) /* LED 0 Enable */ #define ADIN2111_PHY_LED_CNTRL_LED0_EN BIT(7) /* MMD bridge regs */ #define ADIN1100_MMD_ACCESS_CNTRL 0x0DU #define ADIN1100_MMD_ACCESS 0x0EU struct phy_adin2111_config { const struct device *mdio; uint8_t phy_addr; bool led0_en; bool led1_en; bool tx_24v; bool mii; }; struct phy_adin2111_data { const struct device *dev; struct phy_link_state state; struct k_sem sem; struct k_work_delayable monitor_work; phy_callback_t cb; void *cb_data; }; static inline int phy_adin2111_c22_read(const struct device *dev, uint16_t reg, uint16_t *val) { const struct phy_adin2111_config *const cfg = dev->config; return mdio_read(cfg->mdio, cfg->phy_addr, reg, val); } static inline int phy_adin2111_c22_write(const struct device *dev, uint16_t reg, uint16_t val) { const struct phy_adin2111_config *const cfg = dev->config; return mdio_write(cfg->mdio, cfg->phy_addr, reg, val); } static int phy_adin2111_c45_setup_dev_reg(const struct device *dev, uint16_t devad, uint16_t reg) { const struct phy_adin2111_config *cfg = dev->config; int rval; rval = mdio_write(cfg->mdio, cfg->phy_addr, ADIN1100_MMD_ACCESS_CNTRL, devad); if (rval < 0) { return rval; } rval = mdio_write(cfg->mdio, cfg->phy_addr, ADIN1100_MMD_ACCESS, reg); if (rval < 0) { return rval; } return mdio_write(cfg->mdio, cfg->phy_addr, ADIN1100_MMD_ACCESS_CNTRL, devad | BIT(14)); } static int phy_adin2111_c45_read(const struct device *dev, uint16_t devad, uint16_t reg, uint16_t *val) { const struct phy_adin2111_config *cfg = dev->config; int rval; if (cfg->mii) { /* Using C22 -> devad bridge */ rval = phy_adin2111_c45_setup_dev_reg(dev, devad, reg); if (rval < 0) { return rval; } return mdio_read(cfg->mdio, cfg->phy_addr, ADIN1100_MMD_ACCESS, val); } return mdio_read_c45(cfg->mdio, cfg->phy_addr, devad, reg, val); } static int phy_adin2111_c45_write(const struct device *dev, uint16_t devad, uint16_t reg, uint16_t val) { const struct phy_adin2111_config *cfg = dev->config; int rval; if (cfg->mii) { /* Using C22 -> devad bridge */ rval = phy_adin2111_c45_setup_dev_reg(dev, devad, reg); if (rval < 0) { return rval; } return mdio_write(cfg->mdio, cfg->phy_addr, ADIN1100_MMD_ACCESS, val); } return mdio_write_c45(cfg->mdio, cfg->phy_addr, devad, reg, val); } static int phy_adin2111_reg_read(const struct device *dev, uint16_t reg_addr, uint32_t *data) { const struct phy_adin2111_config *cfg = dev->config; int ret; mdio_bus_enable(cfg->mdio); ret = phy_adin2111_c22_read(dev, reg_addr, (uint16_t *) data); mdio_bus_disable(cfg->mdio); return ret; } static int phy_adin2111_reg_write(const struct device *dev, uint16_t reg_addr, uint32_t data) { const struct phy_adin2111_config *cfg = dev->config; int ret; mdio_bus_enable(cfg->mdio); ret = phy_adin2111_c22_write(dev, reg_addr, (uint16_t) data); mdio_bus_disable(cfg->mdio); return ret; } static int phy_adin2111_await_phy(const struct device *dev) { int ret; uint32_t count; uint16_t val; /** * Port 2 PHY comes out of reset after Port 1 PHY, * wait until both are out of reset. * Reading Port 2 PHY registers returns 0s until * it comes out from reset. */ for (count = 0U; count < ADIN2111_PHY_AWAIT_RETRY_COUNT; ++count) { ret = phy_adin2111_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, ADIN2111_PHY_CRSM_IRQ_MASK, &val); if (ret >= 0) { if (val != 0U) { break; } ret = -ETIMEDOUT; } k_sleep(K_USEC(ADIN2111_PHY_AWAIT_DELAY_POLL_US)); } return ret; } static int phy_adin2111_an_state_read(const struct device *dev) { struct phy_adin2111_data *const data = dev->data; uint16_t bmsr; int ret; /* read twice to get actual link status, latch low */ ret = phy_adin2111_c22_read(dev, MII_BMSR, &bmsr); if (ret < 0) { return ret; } ret = phy_adin2111_c22_read(dev, MII_BMSR, &bmsr); if (ret < 0) { return ret; } data->state.is_up = !!(bmsr & MII_BMSR_LINK_STATUS); return 0; } int phy_adin2111_handle_phy_irq(const struct device *dev, struct phy_link_state *state) { struct phy_adin2111_data *const data = dev->data; uint16_t subsys_status; int ret; ret = phy_adin2111_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC2, ADIN2111_PHY_SUBSYS_IRQ_STATUS, &subsys_status); if (ret < 0) { return ret; } if ((subsys_status & ADIN2111_PHY_SUBSYS_IRQ_STATUS_LINK_STAT_CHNG_LH) == 0U) { /* nothing to process */ return -EAGAIN; } k_sem_take(&data->sem, K_FOREVER); ret = phy_adin2111_an_state_read(dev); memcpy(state, &data->state, sizeof(struct phy_link_state)); k_sem_give(&data->sem); return ret; } static int phy_adin2111_sft_pd(const struct device *dev, bool enter) { int ret; uint32_t count; const uint16_t expected = enter ? ADIN2111_CRSM_STAT_CRSM_SFT_PD_RDY : 0U; uint16_t val; ret = phy_adin2111_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, ADIN2111_PHY_CRSM_SFT_PD_CNTRL, enter ? 1U : 0U); if (ret < 0) { return ret; } for (count = 0U; count < ADIN2111_PHY_SFT_PD_RETRY_COUNT; ++count) { ret = phy_adin2111_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, ADIN2111_PHY_CRSM_STAT, &val); if (ret >= 0) { if ((val & ADIN2111_CRSM_STAT_CRSM_SFT_PD_RDY) == expected) { break; } ret = -ETIMEDOUT; } k_sleep(K_USEC(ADIN2111_PHY_SFT_PD_DELAY_POLL_US)); } return ret; } static int phy_adin2111_id(const struct device *dev, uint32_t *phy_id) { uint16_t val; if (phy_adin2111_c22_read(dev, MII_PHYID1R, &val) < 0) { return -EIO; } *phy_id = (val & UINT16_MAX) << 16; if (phy_adin2111_c22_read(dev, MII_PHYID2R, &val) < 0) { return -EIO; } *phy_id |= (val & UINT16_MAX); return 0; } static int phy_adin2111_get_link_state(const struct device *dev, struct phy_link_state *state) { struct phy_adin2111_data *const data = dev->data; k_sem_take(&data->sem, K_FOREVER); memcpy(state, &data->state, sizeof(struct phy_link_state)); k_sem_give(&data->sem); return 0; } static int phy_adin2111_cfg_link(const struct device *dev, enum phy_link_speed adv_speeds) { ARG_UNUSED(dev); if (!!(adv_speeds & LINK_FULL_10BASE_T)) { return 0; } return -ENOTSUP; } static int phy_adin2111_reset(const struct device *dev) { int ret; ret = phy_adin2111_c22_write(dev, MII_BMCR, MII_BMCR_RESET); if (ret < 0) { return ret; } k_msleep(ADIN1100_PHY_SFT_RESET_MS); return 0; } static void invoke_link_cb(const struct device *dev) { struct phy_adin2111_data *const data = dev->data; struct phy_link_state state; if (data->cb == NULL) { return; } data->cb(dev, &state, data->cb_data); } static int update_link_state(const struct device *dev) { struct phy_adin2111_data *const data = dev->data; const struct phy_adin2111_config *config = dev->config; struct phy_link_state old_state; uint16_t bmsr; int ret; ret = phy_adin2111_c22_read(dev, MII_BMSR, &bmsr); if (ret < 0) { return ret; } old_state = data->state; data->state.is_up = !!(bmsr & MII_BMSR_LINK_STATUS); if (old_state.speed != data->state.speed || old_state.is_up != data->state.is_up) { LOG_INF("PHY (%d) Link is %s", config->phy_addr, data->state.is_up ? "up" : "down"); if (data->state.is_up == false) { return 0; } invoke_link_cb(dev); LOG_INF("PHY (%d) Link speed %s Mb, %s duplex\n", config->phy_addr, (PHY_LINK_IS_SPEED_100M(data->state.speed) ? "100" : "10"), PHY_LINK_IS_FULL_DUPLEX(data->state.speed) ? "full" : "half"); } return 0; } static void monitor_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct phy_adin2111_data *const data = CONTAINER_OF(dwork, struct phy_adin2111_data, monitor_work); const struct device *dev = data->dev; int rc; k_sem_take(&data->sem, K_FOREVER); rc = update_link_state(dev); k_sem_give(&data->sem); /* Submit delayed work */ k_work_reschedule(&data->monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); } static int phy_adin2111_init(const struct device *dev) { const struct phy_adin2111_config *const cfg = dev->config; struct phy_adin2111_data *const data = dev->data; uint32_t phy_id; uint16_t val; bool tx_24v_supported = false; int ret; data->dev = dev; data->state.is_up = false; data->state.speed = LINK_FULL_10BASE_T; /* * For adin1100 and further mii stuff, * reset may not be performed from the mac layer, doing a clean reset here. */ if (cfg->mii) { ret = phy_adin2111_reset(dev); if (ret < 0) { return ret; } } ret = phy_adin2111_await_phy(dev); if (ret < 0) { LOG_ERR("PHY %u didn't come out of reset, %d", cfg->phy_addr, ret); return -ENODEV; } ret = phy_adin2111_id(dev, &phy_id); if (ret < 0) { LOG_ERR("Failed to read PHY %u ID, %d", cfg->phy_addr, ret); return -ENODEV; } if (phy_id != ADIN2111_PHY_ID && phy_id != ADIN1110_PHY_ID && phy_id != ADIN1100_PHY_ID) { LOG_ERR("PHY %u unexpected PHY ID %X", cfg->phy_addr, phy_id); return -EINVAL; } LOG_INF("PHY %u ID %X", cfg->phy_addr, phy_id); /* enter software powerdown */ ret = phy_adin2111_sft_pd(dev, true); if (ret < 0) { return ret; } /* disable interrupts */ ret = phy_adin2111_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, ADIN2111_PHY_CRSM_IRQ_MASK, 0U); if (ret < 0) { return ret; } /* enable link status change irq */ ret = phy_adin2111_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC2, ADIN2111_PHY_SUBSYS_IRQ_MASK, ADIN2111_PHY_SUBSYS_IRQ_STATUS_LINK_STAT_CHNG_LH); if (ret < 0) { return ret; } /* clear PHY IRQ status before enabling ADIN IRQs */ ret = phy_adin2111_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, ADIN2111_PHY_CRSM_IRQ_STATUS, &val); if (ret < 0) { return ret; } if (val & ADIN2111_PHY_CRSM_IRQ_STATUS_FATAL_ERR) { LOG_ERR("PHY %u CRSM reports fatal system error", cfg->phy_addr); return -ENODEV; } ret = phy_adin2111_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC2, ADIN2111_PHY_SUBSYS_IRQ_STATUS, &val); if (ret < 0) { return ret; } if (!cfg->led0_en || !cfg->led1_en) { ret = phy_adin2111_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, ADIN2111_PHY_LED_CNTRL, &val); if (ret < 0) { return ret; } if (!cfg->led0_en) { val &= ~(ADIN2111_PHY_LED_CNTRL_LED0_EN); } if (!cfg->led1_en) { val &= ~(ADIN2111_PHY_LED_CNTRL_LED1_EN); } ret = phy_adin2111_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, ADIN2111_PHY_LED_CNTRL, val); if (ret < 0) { return ret; } } /* check 2.4V support */ ret = phy_adin2111_c45_read(dev, MDIO_MMD_PMAPMD, MDIO_PMA_B10L_STAT, &val); if (ret < 0) { return ret; } tx_24v_supported = !!(val & MDIO_PMA_B10L_STAT_2V4_ABLE); LOG_INF("PHY %u 2.4V mode %s", cfg->phy_addr, tx_24v_supported ? "supported" : "not supported"); if (!cfg->tx_24v & tx_24v_supported) { LOG_ERR("PHY %u 2.4V mode supported, but not enabled", cfg->phy_addr); } /* config 2.4V auto-negotiation */ ret = phy_adin2111_c45_read(dev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H, &val); if (ret < 0) { return ret; } if (tx_24v_supported) { val |= MDIO_AN_T1_ADV_H_10L_TX_HI; } else { val &= ~MDIO_AN_T1_ADV_H_10L_TX_HI; } if (cfg->tx_24v) { if (!tx_24v_supported) { LOG_ERR("PHY %u 2.4V mode enabled, but not supported", cfg->phy_addr); return -EINVAL; } val |= MDIO_AN_T1_ADV_H_10L_TX_HI_REQ; } else { val &= ~MDIO_AN_T1_ADV_H_10L_TX_HI_REQ; } ret = phy_adin2111_c45_write(dev, MDIO_MMD_AN, MDIO_AN_T1_ADV_H, val); if (ret < 0) { return ret; } /* enable auto-negotiation */ ret = phy_adin2111_c45_write(dev, MDIO_MMD_AN, MDIO_AN_T1_CTRL, MDIO_AN_T1_CTRL_EN); if (ret < 0) { return ret; } if (cfg->mii) { k_work_init_delayable(&data->monitor_work, monitor_work_handler); monitor_work_handler(&data->monitor_work.work); } /** * done, PHY is in software powerdown (SFT PD) * exit software powerdown, PHY 1 has to exit before PHY 2 * correct PHY order is expected to be in DTS to guarantee that */ return phy_adin2111_sft_pd(dev, false); } static int phy_adin2111_link_cb_set(const struct device *dev, phy_callback_t cb, void *user_data) { struct phy_adin2111_data *const data = dev->data; data->cb = cb; data->cb_data = user_data; /* Invoke the callback to notify the caller of the current * link status. */ invoke_link_cb(dev); return 0; } static const struct ethphy_driver_api phy_adin2111_api = { .get_link = phy_adin2111_get_link_state, .cfg_link = phy_adin2111_cfg_link, .link_cb_set = phy_adin2111_link_cb_set, .read = phy_adin2111_reg_read, .write = phy_adin2111_reg_write, }; #define ADIN2111_PHY_INITIALIZE(n) \ static const struct phy_adin2111_config phy_adin2111_config_##n = { \ .mdio = DEVICE_DT_GET(DT_INST_BUS(n)), \ .phy_addr = DT_INST_REG_ADDR(n), \ .led0_en = DT_INST_PROP(n, led0_en), \ .led1_en = DT_INST_PROP(n, led1_en), \ .tx_24v = !(DT_INST_PROP(n, disable_tx_mode_24v)), \ IF_ENABLED(DT_HAS_COMPAT_STATUS_OKAY(adi_adin1100_phy), \ (.mii = 1)) \ }; \ static struct phy_adin2111_data phy_adin2111_data_##n = { \ .sem = Z_SEM_INITIALIZER(phy_adin2111_data_##n.sem, 1, 1), \ }; \ DEVICE_DT_INST_DEFINE(n, &phy_adin2111_init, NULL, \ &phy_adin2111_data_##n, &phy_adin2111_config_##n, \ POST_KERNEL, CONFIG_PHY_INIT_PRIORITY, \ &phy_adin2111_api); DT_INST_FOREACH_STATUS_OKAY(ADIN2111_PHY_INITIALIZE) ```
/content/code_sandbox/drivers/ethernet/phy/phy_adin2111.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,225
```objective-c /* * */ #ifndef PHY_ADIN2111_PRIV_H__ #define PHY_ADIN2111_PRIV_H__ #include <zephyr/device.h> #include <zephyr/net/phy.h> /** * @brief Handles PHY interrupt. * * @note Used internally by the ADIN offloaded ISR handler. * The caller is responsible for device lock. * Shall not be called from ISR. * * @param[in] dev PHY device. * @param[out] state Output of the link state. * * @retval 0 Successful and link state changed. * @retval -EAGAIN Successful but link state didn't change. * @retval <0 MDIO error. */ int phy_adin2111_handle_phy_irq(const struct device *dev, struct phy_link_state *state); #endif /* PHY_ADIN2111_PRIV_H__ */ ```
/content/code_sandbox/drivers/ethernet/phy/phy_adin2111_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
174
```c /* * * Inspiration from phy_mii.c, which is: * */ #define DT_DRV_COMPAT qca_ar8031 #include <errno.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/drivers/mdio.h> #include <zephyr/net/phy.h> #include <zephyr/net/mdio.h> #include <zephyr/net/mii.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(phy_qc_ar8031, CONFIG_PHY_LOG_LEVEL); #define AR8031_PHY_ID1 0x004DU #define PHY_READID_TIMEOUT_COUNT 1000U /* PHY Specific Status Register */ #define PHY_SPECIFIC_STATUS_REG 0x11 #define SPEC_STATUS_REG_LINK_MASK (1U << 10) #define SPEC_STATUS_REG_DUPLEX_MASK (1U << 13) #define PHY_DUPLEX_HALF (0U << 13) #define PHY_DUPLEX_FULL (1U << 13) #define SPEC_STATUS_REG_SPEED_MASK (0x3U << 14) #define PHY_SPEED_10M (0U << 14) #define PHY_SPEED_100M (1U << 14) #define PHY_SPEED_1000M (2U << 14) /* The PHY Debug port address register */ #define PHY_DEBUGPORT_ADDR_REG 0x1DU /* The PHY Debug port data register */ #define PHY_DEBUGPORT_DATA_REG 0x1EU /* PCS Register: smartEEE control 3 Register */ #define MDIO_PCS_SMARTEEE_CTRL3 0x805DU #define MDIO_PCS_SMARTEEE_CTRL3_LPI_EN (1U << 8) /* Debug port registers */ /* Analog Test Control */ #define PHY_DEBUGPORT_ANALOG_CTRL 0x0 #define PHY_DEBUGPORT_ANALOG_CTRL_RX_DELAY (1U << 15) /* SerDes Test and System Mode Control */ #define PHY_DEBUGPORT_SD_SM_CTRL 0x5 #define PHY_DEBUGPORT_SD_SM_CTRL_TX_DELAY (1U << 8) struct qc_ar8031_config { uint8_t addr; bool fixed_link; bool enable_eee; int fixed_speed; const struct device *mdio_dev; }; struct qc_ar8031_data { const struct device *dev; phy_callback_t cb; void *cb_data; struct k_work_delayable monitor_work; struct phy_link_state state; struct k_sem sem; }; static int qc_ar8031_get_link_state(const struct device *dev, struct phy_link_state *state); static int qc_ar8031_read(const struct device *dev, uint16_t reg_addr, uint32_t *data) { const struct qc_ar8031_config *config = dev->config; int ret; /* Make sure excessive bits 16-31 are reset */ *data = 0U; /* Read the PHY register */ ret = mdio_read(config->mdio_dev, config->addr, reg_addr, (uint16_t *)data); if (ret) { return ret; } return 0; } static int qc_ar8031_write(const struct device *dev, uint16_t reg_addr, uint32_t data) { const struct qc_ar8031_config *config = dev->config; int ret; ret = mdio_write(config->mdio_dev, config->addr, reg_addr, (uint16_t)data); if (ret) { return ret; } return 0; } static int qc_ar8031_mmd_set_device(const struct device *dev, uint32_t device, uint32_t addr, uint16_t mode) { uint16_t reg_value = (device & MII_MMD_ACR_DEVAD_MASK) | MII_MMD_ACR_ADDR; if (qc_ar8031_write(dev, MII_MMD_ACR, reg_value) < 0) { return -EIO; } if (qc_ar8031_write(dev, MII_MMD_AADR, addr) < 0) { return -EIO; } /* Set Function mode of data access(b01~11) and device address. */ if (qc_ar8031_write(dev, MII_MMD_ACR, (device & MII_MMD_ACR_DEVAD_MASK) | mode) < 0) { return -EIO; } return 0; } static int qc_ar8031_mmd_read(const struct device *dev, uint32_t device, uint32_t addr, uint32_t *data) { int ret; *data = 0U; ret = qc_ar8031_mmd_set_device(dev, device, addr, MII_MMD_ACR_DATA_NO_POS_INC); if (ret) { return -EIO; } ret = qc_ar8031_read(dev, MII_MMD_AADR, data); return ret; } static int qc_ar8031_mmd_write(const struct device *dev, uint32_t device, uint32_t addr, uint32_t data) { int ret; ret = qc_ar8031_mmd_set_device(dev, device, addr, MII_MMD_ACR_DATA_NO_POS_INC); if (ret) { return -EIO; } ret = qc_ar8031_write(dev, MII_MMD_AADR, data); return ret; } static int qc_ar8031_update_link_state(const struct device *dev) { const struct qc_ar8031_config *const cfg = dev->config; struct qc_ar8031_data *const data = dev->data; bool link_up; uint32_t reg_value; uint16_t speed, duplex; if (qc_ar8031_read(dev, PHY_SPECIFIC_STATUS_REG, &reg_value) < 0) { return -EIO; } link_up = (uint16_t)reg_value & SPEC_STATUS_REG_LINK_MASK; /* If there is no change in link state don't proceed. */ if (link_up == data->state.is_up) { return -EAGAIN; } data->state.is_up = link_up; /* If link is down, there is nothing more to be done */ if (data->state.is_up == false) { return 0; } if (qc_ar8031_read(dev, PHY_SPECIFIC_STATUS_REG, &reg_value) < 0) { return -EIO; } speed = reg_value & SPEC_STATUS_REG_SPEED_MASK; duplex = reg_value & SPEC_STATUS_REG_DUPLEX_MASK; switch (speed | duplex) { case PHY_SPEED_10M | PHY_DUPLEX_FULL: data->state.speed = LINK_FULL_10BASE_T; break; case PHY_SPEED_10M | PHY_DUPLEX_HALF: data->state.speed = LINK_HALF_10BASE_T; break; case PHY_SPEED_100M | PHY_DUPLEX_FULL: data->state.speed = LINK_FULL_100BASE_T; break; case PHY_SPEED_100M | PHY_DUPLEX_HALF: data->state.speed = LINK_HALF_100BASE_T; break; case PHY_SPEED_1000M | PHY_DUPLEX_FULL: data->state.speed = LINK_FULL_1000BASE_T; break; case PHY_SPEED_1000M | PHY_DUPLEX_HALF: data->state.speed = LINK_HALF_1000BASE_T; break; } LOG_DBG("PHY (%d) Link speed %s Mb, %s duplex", cfg->addr, PHY_LINK_IS_SPEED_1000M(data->state.speed) ? "1000" : (PHY_LINK_IS_SPEED_100M(data->state.speed) ? "100" : "10"), PHY_LINK_IS_FULL_DUPLEX(data->state.speed) ? "full" : "half"); return 0; } static void invoke_link_cb(const struct device *dev) { struct qc_ar8031_data *const data = dev->data; struct phy_link_state state; if (data->cb == NULL) { return; } qc_ar8031_get_link_state(dev, &state); data->cb(data->dev, &state, data->cb_data); } static void monitor_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct qc_ar8031_data *const data = CONTAINER_OF(dwork, struct qc_ar8031_data, monitor_work); const struct device *dev = data->dev; int rc; k_sem_take(&data->sem, K_FOREVER); rc = qc_ar8031_update_link_state(dev); k_sem_give(&data->sem); /* If link state has changed and a callback is set, invoke callback */ if (rc == 0) { invoke_link_cb(dev); } /* Submit delayed work */ k_work_reschedule(&data->monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); } static int qc_ar8031_cfg_link(const struct device *dev, enum phy_link_speed adv_speeds) { uint32_t anar_reg; uint32_t bmcr_reg; uint32_t c1kt_reg; if (qc_ar8031_read(dev, MII_ANAR, &anar_reg) < 0) { return -EIO; } if (qc_ar8031_read(dev, MII_BMCR, &bmcr_reg) < 0) { return -EIO; } if (qc_ar8031_read(dev, MII_1KTCR, &c1kt_reg) < 0) { return -EIO; } if (adv_speeds & LINK_FULL_10BASE_T) { anar_reg |= MII_ADVERTISE_10_FULL; } else { anar_reg &= ~MII_ADVERTISE_10_FULL; } if (adv_speeds & LINK_HALF_10BASE_T) { anar_reg |= MII_ADVERTISE_10_HALF; } else { anar_reg &= ~MII_ADVERTISE_10_HALF; } if (adv_speeds & LINK_FULL_100BASE_T) { anar_reg |= MII_ADVERTISE_100_FULL; } else { anar_reg &= ~MII_ADVERTISE_100_FULL; } if (adv_speeds & LINK_HALF_100BASE_T) { anar_reg |= MII_ADVERTISE_100_HALF; } else { anar_reg &= ~MII_ADVERTISE_100_HALF; } if (adv_speeds & LINK_FULL_1000BASE_T) { c1kt_reg |= MII_ADVERTISE_1000_FULL; } else { c1kt_reg &= ~MII_ADVERTISE_1000_FULL; } if (adv_speeds & LINK_HALF_1000BASE_T) { c1kt_reg |= MII_ADVERTISE_1000_HALF; } else { c1kt_reg &= ~MII_ADVERTISE_1000_HALF; } if (qc_ar8031_write(dev, MII_1KTCR, c1kt_reg) < 0) { return -EIO; } bmcr_reg |= MII_BMCR_AUTONEG_ENABLE | MII_BMCR_AUTONEG_RESTART; if (qc_ar8031_write(dev, MII_ANAR, anar_reg) < 0) { return -EIO; } if (qc_ar8031_write(dev, MII_BMCR, bmcr_reg) < 0) { return -EIO; } return 0; } static int qc_ar8031_get_link_state(const struct device *dev, struct phy_link_state *state) { struct qc_ar8031_data *const data = dev->data; k_sem_take(&data->sem, K_FOREVER); memcpy(state, &data->state, sizeof(struct phy_link_state)); k_sem_give(&data->sem); return 0; } static int qc_ar8031_link_cb_set(const struct device *dev, phy_callback_t cb, void *user_data) { struct qc_ar8031_data *const data = dev->data; data->cb = cb; data->cb_data = user_data; /** * Immediately invoke the callback to notify the caller of the * current link status. */ invoke_link_cb(dev); return 0; } static int qc_ar8031_init(const struct device *dev) { const struct qc_ar8031_config *const cfg = dev->config; struct qc_ar8031_data *const data = dev->data; uint32_t counter = PHY_READID_TIMEOUT_COUNT; uint32_t reg_value = 0; int ret; k_sem_init(&data->sem, 1, 1); mdio_bus_enable(cfg->mdio_dev); data->state.is_up = false; data->dev = dev; data->cb = NULL; do { if (qc_ar8031_read(dev, MII_PHYID1R, &reg_value) < 0) { return -EIO; } } while (reg_value != AR8031_PHY_ID1 && counter-- > 0); if (counter == 0U) { LOG_ERR("PHY (%d) can't read ID", cfg->addr); return -EIO; } /* Reset PHY */ ret = qc_ar8031_write(dev, MII_BMCR, MII_BMCR_RESET); if (ret) { return -EIO; } /* Close smartEEE */ ret = qc_ar8031_mmd_set_device(dev, MDIO_MMD_PCS, MDIO_PCS_SMARTEEE_CTRL3, MII_MMD_ACR_DATA_NO_POS_INC); if (ret) { return -EIO; } ret = qc_ar8031_read(dev, MII_MMD_AADR, &reg_value); if (ret) { return -EIO; } ret = qc_ar8031_write(dev, MII_MMD_AADR, reg_value & ~(MDIO_PCS_SMARTEEE_CTRL3_LPI_EN)); if (ret) { return -EIO; } /* Enable Tx clock delay */ ret = qc_ar8031_write(dev, PHY_DEBUGPORT_ADDR_REG, PHY_DEBUGPORT_SD_SM_CTRL); if (ret) { return -EIO; } ret = qc_ar8031_read(dev, PHY_DEBUGPORT_DATA_REG, &reg_value); if (ret) { return -EIO; } ret = qc_ar8031_write(dev, PHY_DEBUGPORT_DATA_REG, reg_value | PHY_DEBUGPORT_SD_SM_CTRL_TX_DELAY); if (ret) { return -EIO; } /* Enable Rx clock delay */ ret = qc_ar8031_write(dev, PHY_DEBUGPORT_ADDR_REG, PHY_DEBUGPORT_ANALOG_CTRL); if (ret) { return -EIO; } ret = qc_ar8031_read(dev, PHY_DEBUGPORT_DATA_REG, &reg_value); if (ret) { return -EIO; } ret = qc_ar8031_write(dev, PHY_DEBUGPORT_DATA_REG, reg_value | PHY_DEBUGPORT_ANALOG_CTRL_RX_DELAY); if (ret) { return -EIO; } /* Energy Efficient Ethernet configuration */ if (cfg->enable_eee) { ret = qc_ar8031_mmd_read(dev, MDIO_MMD_PCS, MDIO_PCS_EEE_CAP, &reg_value); if (ret) { return -EIO; } ret = qc_ar8031_mmd_write(dev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, reg_value & (MDIO_AN_EEE_ADV_1000T | MDIO_AN_EEE_ADV_100TX)); if (ret) { return -EIO; } } else { ret = qc_ar8031_mmd_write(dev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0); if (ret) { return -EIO; } } /* Fixed Link */ if (cfg->fixed_link) { /* Disable isolate */ ret = qc_ar8031_read(dev, MII_BMCR, &reg_value); if (ret) { return -EIO; } reg_value &= ~MII_BMCR_ISOLATE; ret = qc_ar8031_write(dev, MII_BMCR, reg_value); if (ret) { return -EIO; } const static int speed_to_phy_link_speed[] = { LINK_HALF_10BASE_T, LINK_FULL_10BASE_T, LINK_HALF_100BASE_T, LINK_FULL_100BASE_T, LINK_HALF_1000BASE_T, LINK_FULL_1000BASE_T, }; data->state.speed = speed_to_phy_link_speed[cfg->fixed_speed]; data->state.is_up = true; } else { /* Auto negotiation */ /* Advertise all speeds */ qc_ar8031_cfg_link(dev, LINK_HALF_10BASE_T | LINK_FULL_10BASE_T | LINK_HALF_100BASE_T | LINK_FULL_100BASE_T | LINK_HALF_1000BASE_T | LINK_FULL_1000BASE_T); k_work_init_delayable(&data->monitor_work, monitor_work_handler); monitor_work_handler(&data->monitor_work.work); } return 0; } static const struct ethphy_driver_api ar8031_driver_api = { .get_link = qc_ar8031_get_link_state, .cfg_link = qc_ar8031_cfg_link, .link_cb_set = qc_ar8031_link_cb_set, .read = qc_ar8031_read, .write = qc_ar8031_write, }; #define AR8031_CONFIG(n) \ static const struct qc_ar8031_config qc_ar8031_config_##n = { \ .addr = DT_INST_REG_ADDR(n), \ .fixed_link = DT_INST_NODE_HAS_PROP(n, fixed_link), \ .fixed_speed = DT_INST_ENUM_IDX_OR(n, fixed_link, 0), \ .mdio_dev = DEVICE_DT_GET(DT_INST_BUS(n)), \ .enable_eee = DT_INST_NODE_HAS_PROP(n, eee_en), \ }; #define AR8031_DEVICE(n) \ AR8031_CONFIG(n); \ static struct qc_ar8031_data qc_ar8031_data_##n; \ DEVICE_DT_INST_DEFINE(n, &qc_ar8031_init, NULL, &qc_ar8031_data_##n, \ &qc_ar8031_config_##n, POST_KERNEL, CONFIG_PHY_INIT_PRIORITY, \ &ar8031_driver_api); DT_INST_FOREACH_STATUS_OKAY(AR8031_DEVICE) ```
/content/code_sandbox/drivers/ethernet/phy/phy_qualcomm_ar8031.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,008
```unknown # Ethernet PHY drivers configuration options menuconfig ETH_PHY_DRIVER bool "Ethernet PHY drivers" default y if NET_L2_ETHERNET || ETH_DRIVER if ETH_PHY_DRIVER module = PHY module-dep = LOG module-str = Log level for Ethernet PHY driver module-help = Sets log level for Ethernet PHY Device Drivers. source "subsys/net/Kconfig.template.log_config.net" source "drivers/ethernet/phy/Kconfig.tja1103" config PHY_INIT_PRIORITY int "Ethernet PHY driver init priority" default 70 help Ethernet PHY device driver initialization priority. Do not mess with it unless you know what you are doing. Note that the priority needs to be lower than the net stack so that it can start before the networking sub-system. config PHY_GENERIC_MII bool "Generic MII PHY Driver" default y if DT_HAS_ETHERNET_PHY_ENABLED depends on MDIO help This is a generic MII PHY interface that communicates with the PHY using the MDIO bus. config PHY_ADIN2111 bool "ADIN2111 PHY driver" default y depends on DT_HAS_ADI_ADIN2111_PHY_ENABLED || DT_HAS_ADI_ADIN1100_PHY_ENABLED help Enable ADIN2111 PHY driver. config PHY_MICROCHIP_KSZ8081 bool "Microchip KSZ8081 PHY Driver" default y depends on DT_HAS_MICROCHIP_KSZ8081_ENABLED depends on MDIO depends on GPIO help Enable Microchip KSZ8081 Ethernet PHY Driver config PHY_REALTEK_RTL8211F bool "Realtek RTL8211F PHY Driver" default y depends on DT_HAS_REALTEK_RTL8211F_ENABLED depends on MDIO depends on GPIO || (!$(dt_compat_any_has_prop,$(DT_COMPAT_REALTEK_RTL8211F),reset-gpios) && \ !$(dt_compat_any_has_prop,$(DT_COMPAT_REALTEK_RTL8211F),int-gpios)) help Enable Realtek RTL8211F Ethernet PHY Driver config PHY_QUALCOMM_AR8031 bool "Qualcomm Atheros AR8031 Ethernet PHY Driver" default y depends on DT_HAS_QCA_AR8031_ENABLED depends on MDIO help Enable Qualcomm Atheros AR8031 Ethernet PHY Driver config PHY_AUTONEG_TIMEOUT_MS int "Auto-negotiation timeout value in milliseconds" default 4000 help Maximum duration of auto-negotiation sequence in milliseconds before the process fails config PHY_MONITOR_PERIOD int "Monitor task execution period" default 500 help Monitor task execution period in milliseconds. The monitor task is periodically executed to detect and report any changes in the PHY link status to the operating system. endif # "Ethernet PHY Drivers" ```
/content/code_sandbox/drivers/ethernet/phy/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
612
```c /* * * Inspiration from phy_mii.c, which is: * */ #define DT_DRV_COMPAT realtek_rtl8211f #include <zephyr/kernel.h> #include <zephyr/net/phy.h> #include <zephyr/net/mii.h> #include <zephyr/drivers/mdio.h> #include <string.h> #include <zephyr/sys/util_macro.h> #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) || DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) #include <zephyr/drivers/gpio.h> #endif #define LOG_MODULE_NAME phy_rt_rtl8211f #define LOG_LEVEL CONFIG_PHY_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #define REALTEK_OUI_MSB (0x1CU) #define PHY_RT_RTL8211F_PHYSR_REG (0x1A) #define PHY_RT_RTL8211F_PHYSR_LINKSTATUS_MASK BIT(2) #define PHY_RT_RTL8211F_PHYSR_LINKDUPLEX_MASK BIT(3) #define PHY_RT_RTL8211F_PHYSR_LINKSPEED_MASK (BIT(4) | BIT(5)) #define PHY_RT_RTL8211F_PHYSR_LINKSPEED_SHIFT (4U) #define PHY_RT_RTL8211F_PHYSR_LINKSPEED_10M (0U) #define PHY_RT_RTL8211F_PHYSR_LINKSPEED_100M (1U) #define PHY_RT_RTL8211F_PHYSR_LINKSPEED_1000M (2U) #define PHY_RT_RTL8211F_PAGSR_REG (0x1F) #define PHY_RT_RTL8211F_PAGE_MIICR_ADDR (0xD08) #define PHY_RT_RTL8211F_MIICR1_REG (0x11) #define PHY_RT_RTL8211F_MIICR2_REG (0x15) #define PHY_RT_RTL8211F_MIICR1_TXDLY_MASK BIT(8) #define PHY_RT_RTL8211F_MIICR2_RXDLY_MASK BIT(3) #define PHY_RT_RTL8211F_PAGE_INTR_PIN_ADDR (0xD40) #define PHY_RT_RTL8211F_INTR_PIN_REG (0x16) #define PHY_RT_RTL8211F_INTR_PIN_MASK BIT(5) #define PHY_RT_RTL8211F_PAGE_INTR_ADDR (0xA42U) #define PHY_RT_RTL8211F_INER_REG (0x12U) #define PHY_RT_RTL8211F_INER_LINKSTATUS_CHANGE_MASK BIT(4) #define PHY_RT_RTL8211F_INSR_REG (0x1DU) #define PHY_RT_RTL8211F_RESET_HOLD_TIME_MS 10 struct rt_rtl8211f_config { uint8_t addr; const struct device *mdio_dev; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) const struct gpio_dt_spec reset_gpio; #endif #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) const struct gpio_dt_spec interrupt_gpio; #endif }; struct rt_rtl8211f_data { const struct device *dev; struct phy_link_state state; phy_callback_t cb; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) struct gpio_callback gpio_callback; #endif void *cb_data; struct k_mutex mutex; struct k_work_delayable phy_monitor_work; }; static int phy_rt_rtl8211f_read(const struct device *dev, uint16_t reg_addr, uint32_t *data) { const struct rt_rtl8211f_config *config = dev->config; int ret; /* Make sure excessive bits 16-31 are reset */ *data = 0U; /* Read the PHY register */ ret = mdio_read(config->mdio_dev, config->addr, reg_addr, (uint16_t *)data); if (ret) { return ret; } return 0; } static int phy_rt_rtl8211f_write(const struct device *dev, uint16_t reg_addr, uint32_t data) { const struct rt_rtl8211f_config *config = dev->config; int ret; ret = mdio_write(config->mdio_dev, config->addr, reg_addr, (uint16_t)data); if (ret) { return ret; } return 0; } static int phy_rt_rtl8211f_reset(const struct device *dev) { const struct rt_rtl8211f_config *config = dev->config; uint32_t reg_val; int ret; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) if (config->reset_gpio.port) { /* Start reset */ ret = gpio_pin_set_dt(&config->reset_gpio, 0); if (ret) { return ret; } /* Hold reset for the minimum time specified by datasheet */ k_busy_wait(USEC_PER_MSEC * PHY_RT_RTL8211F_RESET_HOLD_TIME_MS); /* Reset over */ ret = gpio_pin_set_dt(&config->reset_gpio, 1); if (ret) { return ret; } /* Wait another 30 ms (circuits settling time) before accessing registers */ k_busy_wait(USEC_PER_MSEC * 30); goto finalize_reset; } #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) */ /* Reset PHY using register */ ret = phy_rt_rtl8211f_write(dev, MII_BMCR, MII_BMCR_RESET); if (ret) { LOG_ERR("Error writing phy (%d) basic control register", config->addr); return ret; } /* Wait for the minimum reset time specified by datasheet */ k_busy_wait(USEC_PER_MSEC * PHY_RT_RTL8211F_RESET_HOLD_TIME_MS); /* Wait for the reset to be cleared */ do { ret = phy_rt_rtl8211f_read(dev, MII_BMCR, &reg_val); if (ret) { LOG_ERR("Error reading phy (%d) basic control register", config->addr); return ret; } } while (reg_val & MII_BMCR_RESET); goto finalize_reset; finalize_reset: /* Wait until correct data can be read from registers */ do { ret = phy_rt_rtl8211f_read(dev, MII_PHYID1R, &reg_val); if (ret) { LOG_ERR("Error reading phy (%d) identifier register 1", config->addr); return ret; } } while (reg_val != REALTEK_OUI_MSB); return 0; } static int phy_rt_rtl8211f_restart_autonegotiation(const struct device *dev) { const struct rt_rtl8211f_config *config = dev->config; uint32_t bmcr = 0; int ret; /* Read control register to write back with autonegotiation bit */ ret = phy_rt_rtl8211f_read(dev, MII_BMCR, &bmcr); if (ret) { LOG_ERR("Error reading phy (%d) basic control register", config->addr); return ret; } /* (re)start autonegotiation */ LOG_DBG("PHY (%d) is entering autonegotiation sequence", config->addr); bmcr |= MII_BMCR_AUTONEG_ENABLE | MII_BMCR_AUTONEG_RESTART; ret = phy_rt_rtl8211f_write(dev, MII_BMCR, bmcr); if (ret) { LOG_ERR("Error writing phy (%d) basic control register", config->addr); return ret; } return 0; } static int phy_rt_rtl8211f_get_link(const struct device *dev, struct phy_link_state *state) { const struct rt_rtl8211f_config *config = dev->config; struct rt_rtl8211f_data *data = dev->data; int ret; uint32_t physr = 0; uint32_t duplex = 0; struct phy_link_state old_state = data->state; struct phy_link_state new_state = {}; /* Lock mutex */ ret = k_mutex_lock(&data->mutex, K_FOREVER); if (ret) { LOG_ERR("PHY mutex lock error"); return ret; } /* Read PHY specific status register */ ret = phy_rt_rtl8211f_read(dev, PHY_RT_RTL8211F_PHYSR_REG, &physr); if (ret) { LOG_ERR("Error reading phy (%d) specific status register", config->addr); (void)k_mutex_unlock(&data->mutex); return ret; } /* Unlock mutex */ (void)k_mutex_unlock(&data->mutex); new_state.is_up = physr & PHY_RT_RTL8211F_PHYSR_LINKSTATUS_MASK; if (!new_state.is_up) { goto result; } duplex = (physr & PHY_RT_RTL8211F_PHYSR_LINKDUPLEX_MASK); switch ((physr & PHY_RT_RTL8211F_PHYSR_LINKSPEED_MASK) >> PHY_RT_RTL8211F_PHYSR_LINKSPEED_SHIFT) { case PHY_RT_RTL8211F_PHYSR_LINKSPEED_100M: if (duplex) { new_state.speed = LINK_FULL_100BASE_T; } else { new_state.speed = LINK_HALF_100BASE_T; } break; case PHY_RT_RTL8211F_PHYSR_LINKSPEED_1000M: if (duplex) { new_state.speed = LINK_FULL_1000BASE_T; } else { new_state.speed = LINK_HALF_1000BASE_T; } break; case PHY_RT_RTL8211F_PHYSR_LINKSPEED_10M: default: if (duplex) { new_state.speed = LINK_FULL_10BASE_T; } else { new_state.speed = LINK_HALF_10BASE_T; } break; } result: if (memcmp(&old_state, &new_state, sizeof(struct phy_link_state)) != 0) { LOG_INF("PHY %d is %s", config->addr, new_state.is_up ? "up" : "down"); if (new_state.is_up) { LOG_INF("PHY (%d) Link speed %s Mb, %s duplex", config->addr, (PHY_LINK_IS_SPEED_1000M(new_state.speed) ? "1000" : (PHY_LINK_IS_SPEED_100M(new_state.speed) ? "100" : "10")), PHY_LINK_IS_FULL_DUPLEX(new_state.speed) ? "full" : "half"); } } memcpy(state, &new_state, sizeof(struct phy_link_state)); return ret; } static int phy_rt_rtl8211f_cfg_link(const struct device *dev, enum phy_link_speed speeds) { const struct rt_rtl8211f_config *config = dev->config; struct rt_rtl8211f_data *data = dev->data; uint32_t anar; uint32_t gbcr; int ret; /* Lock mutex */ ret = k_mutex_lock(&data->mutex, K_FOREVER); if (ret) { LOG_ERR("PHY mutex lock error"); return ret; } /* We are going to reconfigure the phy, don't need to monitor until done */ #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) if (!config->interrupt_gpio.port) { k_work_cancel_delayable(&data->phy_monitor_work); } #else k_work_cancel_delayable(&data->phy_monitor_work); #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ /* Read ANAR register to write back */ ret = phy_rt_rtl8211f_read(dev, MII_ANAR, &anar); if (ret) { LOG_ERR("Error reading phy (%d) advertising register", config->addr); goto done; } /* Read GBCR register to write back */ ret = phy_rt_rtl8211f_read(dev, MII_1KTCR, &gbcr); if (ret) { LOG_ERR("Error reading phy (%d) 1000Base-T control register", config->addr); goto done; } /* Setup advertising register */ if (speeds & LINK_FULL_100BASE_T) { anar |= MII_ADVERTISE_100_FULL; } else { anar &= ~MII_ADVERTISE_100_FULL; } if (speeds & LINK_HALF_100BASE_T) { anar |= MII_ADVERTISE_100_HALF; } else { anar &= ~MII_ADVERTISE_100_HALF; } if (speeds & LINK_FULL_10BASE_T) { anar |= MII_ADVERTISE_10_FULL; } else { anar &= ~MII_ADVERTISE_10_FULL; } if (speeds & LINK_HALF_10BASE_T) { anar |= MII_ADVERTISE_10_HALF; } else { anar &= ~MII_ADVERTISE_10_HALF; } /* Setup 1000Base-T control register */ if (speeds & LINK_FULL_1000BASE_T) { gbcr |= MII_ADVERTISE_1000_FULL; } else { gbcr &= ~MII_ADVERTISE_1000_FULL; } /* Write capabilities to advertising register */ ret = phy_rt_rtl8211f_write(dev, MII_ANAR, anar); if (ret) { LOG_ERR("Error writing phy (%d) advertising register", config->addr); goto done; } /* Write capabilities to 1000Base-T control register */ ret = phy_rt_rtl8211f_write(dev, MII_1KTCR, gbcr); if (ret) { LOG_ERR("Error writing phy (%d) 1000Base-T control register", config->addr); goto done; } /* (Re)start autonegotiation */ ret = phy_rt_rtl8211f_restart_autonegotiation(dev); if (ret) { LOG_ERR("Error restarting autonegotiation"); goto done; } done: /* Unlock mutex */ (void)k_mutex_unlock(&data->mutex); /* Start monitoring */ #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) if (!config->interrupt_gpio.port) { k_work_reschedule(&data->phy_monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); } #else k_work_reschedule(&data->phy_monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ return ret; } static int phy_rt_rtl8211f_link_cb_set(const struct device *dev, phy_callback_t cb, void *user_data) { struct rt_rtl8211f_data *data = dev->data; data->cb = cb; data->cb_data = user_data; phy_rt_rtl8211f_get_link(dev, &data->state); data->cb(dev, &data->state, data->cb_data); return 0; } #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) static int phy_rt_rtl8211f_clear_interrupt(struct rt_rtl8211f_data *data) { const struct device *dev = data->dev; const struct rt_rtl8211f_config *config = dev->config; uint32_t reg_val; int ret; /* Lock mutex */ ret = k_mutex_lock(&data->mutex, K_FOREVER); if (ret) { LOG_ERR("PHY mutex lock error"); return ret; } /* Read/clear PHY interrupt status register */ ret = phy_rt_rtl8211f_read(dev, PHY_RT_RTL8211F_INSR_REG, &reg_val); if (ret) { LOG_ERR("Error reading phy (%d) interrupt status register", config->addr); } /* Unlock mutex */ (void)k_mutex_unlock(&data->mutex); return ret; } static void phy_rt_rtl8211f_interrupt_handler(const struct device *port, struct gpio_callback *cb, gpio_port_pins_t pins) { struct rt_rtl8211f_data *data = CONTAINER_OF(cb, struct rt_rtl8211f_data, gpio_callback); int ret; ret = k_work_reschedule(&data->phy_monitor_work, K_NO_WAIT); if (ret < 0) { LOG_ERR("Failed to schedule phy_monitor_work from ISR"); } } #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ static void phy_rt_rtl8211f_monitor_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct rt_rtl8211f_data *data = CONTAINER_OF(dwork, struct rt_rtl8211f_data, phy_monitor_work); const struct device *dev = data->dev; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) const struct rt_rtl8211f_config *config = dev->config; #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ struct phy_link_state state = {}; int ret; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) if (config->interrupt_gpio.port) { ret = phy_rt_rtl8211f_clear_interrupt(data); if (ret) { return; } } #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ ret = phy_rt_rtl8211f_get_link(dev, &state); if (ret == 0 && memcmp(&state, &data->state, sizeof(struct phy_link_state)) != 0) { memcpy(&data->state, &state, sizeof(struct phy_link_state)); if (data->cb) { data->cb(dev, &data->state, data->cb_data); } } #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) if (!config->interrupt_gpio.port) { k_work_reschedule(&data->phy_monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); } #else k_work_reschedule(&data->phy_monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ } static int phy_rt_rtl8211f_init(const struct device *dev) { const struct rt_rtl8211f_config *config = dev->config; struct rt_rtl8211f_data *data = dev->data; uint32_t reg_val; int ret; data->dev = dev; ret = k_mutex_init(&data->mutex); if (ret) { return ret; } mdio_bus_enable(config->mdio_dev); #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) /* Configure reset pin */ if (config->reset_gpio.port) { ret = gpio_pin_configure_dt(&config->reset_gpio, GPIO_OUTPUT_ACTIVE); if (ret) { return ret; } } #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) */ /* Reset PHY */ ret = phy_rt_rtl8211f_reset(dev); if (ret) { LOG_ERR("Failed to reset phy (%d)", config->addr); return ret; } /* Set RGMII Tx/Rx Delay. */ ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_PAGSR_REG, PHY_RT_RTL8211F_PAGE_MIICR_ADDR); if (ret) { LOG_ERR("Error writing phy (%d) page select register", config->addr); return ret; } ret = phy_rt_rtl8211f_read(dev, PHY_RT_RTL8211F_MIICR1_REG, &reg_val); if (ret) { LOG_ERR("Error reading phy (%d) mii control register1", config->addr); return ret; } reg_val |= PHY_RT_RTL8211F_MIICR1_TXDLY_MASK; ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_MIICR1_REG, reg_val); if (ret) { LOG_ERR("Error writing phy (%d) mii control register1", config->addr); return ret; } ret = phy_rt_rtl8211f_read(dev, PHY_RT_RTL8211F_MIICR2_REG, &reg_val); if (ret) { LOG_ERR("Error reading phy (%d) mii control register2", config->addr); return ret; } reg_val |= PHY_RT_RTL8211F_MIICR2_RXDLY_MASK; ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_MIICR2_REG, reg_val); if (ret) { LOG_ERR("Error writing phy (%d) mii control register2", config->addr); return ret; } /* Restore to default page 0 */ ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_PAGSR_REG, 0); if (ret) { LOG_ERR("Error writing phy (%d) page select register", config->addr); return ret; } k_work_init_delayable(&data->phy_monitor_work, phy_rt_rtl8211f_monitor_work_handler); #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) if (!config->interrupt_gpio.port) { phy_rt_rtl8211f_monitor_work_handler(&data->phy_monitor_work.work); goto skip_int_gpio; } /* Set INTB/PMEB pin to interrupt mode */ ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_PAGSR_REG, PHY_RT_RTL8211F_PAGE_INTR_PIN_ADDR); if (ret) { LOG_ERR("Error writing phy (%d) page select register", config->addr); return ret; } ret = phy_rt_rtl8211f_read(dev, PHY_RT_RTL8211F_INTR_PIN_REG, &reg_val); if (!ret) { reg_val &= ~PHY_RT_RTL8211F_INTR_PIN_MASK; ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_INTR_PIN_REG, reg_val); if (ret) { LOG_ERR("Error writing phy (%d) interrupt pin setting register", config->addr); return ret; } } else { LOG_ERR("Error reading phy (%d) interrupt pin setting register", config->addr); return ret; } /* Restore to default page 0 */ ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_PAGSR_REG, 0); if (ret) { LOG_ERR("Error writing phy (%d) page select register", config->addr); return ret; } /* Clear interrupt */ ret = phy_rt_rtl8211f_clear_interrupt(data); if (ret) { return ret; } /* Configure interrupt pin */ ret = gpio_pin_configure_dt(&config->interrupt_gpio, GPIO_INPUT); if (ret) { return ret; } gpio_init_callback(&data->gpio_callback, phy_rt_rtl8211f_interrupt_handler, BIT(config->interrupt_gpio.pin)); ret = gpio_add_callback_dt(&config->interrupt_gpio, &data->gpio_callback); if (ret) { return ret; } ret = gpio_pin_interrupt_configure_dt(&config->interrupt_gpio, GPIO_INT_EDGE_TO_ACTIVE); if (ret) { return ret; } /* Enable PHY interrupt. */ ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_PAGSR_REG, PHY_RT_RTL8211F_PAGE_INTR_ADDR); if (ret) { LOG_ERR("Error writing phy (%d) page select register", config->addr); return ret; } ret = phy_rt_rtl8211f_read(dev, PHY_RT_RTL8211F_INER_REG, &reg_val); if (ret) { LOG_ERR("Error reading phy (%d) interrupt enable register", config->addr); return ret; } reg_val |= PHY_RT_RTL8211F_INER_LINKSTATUS_CHANGE_MASK; ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_INER_REG, reg_val); if (ret) { LOG_ERR("Error writing phy (%d) interrupt enable register", config->addr); return ret; } /* Restore to default page 0 */ ret = phy_rt_rtl8211f_write(dev, PHY_RT_RTL8211F_PAGSR_REG, 0); if (ret) { LOG_ERR("Error writing phy (%d) page select register", config->addr); return ret; } skip_int_gpio: #else phy_rt_rtl8211f_monitor_work_handler(&data->phy_monitor_work.work); #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ return 0; } static const struct ethphy_driver_api rt_rtl8211f_phy_api = { .get_link = phy_rt_rtl8211f_get_link, .cfg_link = phy_rt_rtl8211f_cfg_link, .link_cb_set = phy_rt_rtl8211f_link_cb_set, .read = phy_rt_rtl8211f_read, .write = phy_rt_rtl8211f_write, }; #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) #define RESET_GPIO(n) \ .reset_gpio = GPIO_DT_SPEC_INST_GET_OR(n, reset_gpios, {0}), #else #define RESET_GPIO(n) #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(reset_gpios) */ #if DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) #define INTERRUPT_GPIO(n) \ .interrupt_gpio = GPIO_DT_SPEC_INST_GET_OR(n, int_gpios, {0}), #else #define INTERRUPT_GPIO(n) #endif /* DT_ANY_INST_HAS_PROP_STATUS_OKAY(int_gpios) */ #define REALTEK_RTL8211F_INIT(n) \ static const struct rt_rtl8211f_config rt_rtl8211f_##n##_config = { \ .addr = DT_INST_REG_ADDR(n), \ .mdio_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \ RESET_GPIO(n) \ INTERRUPT_GPIO(n) \ }; \ \ static struct rt_rtl8211f_data rt_rtl8211f_##n##_data; \ \ DEVICE_DT_INST_DEFINE(n, &phy_rt_rtl8211f_init, NULL, \ &rt_rtl8211f_##n##_data, &rt_rtl8211f_##n##_config, \ POST_KERNEL, CONFIG_PHY_INIT_PRIORITY, \ &rt_rtl8211f_phy_api); DT_INST_FOREACH_STATUS_OKAY(REALTEK_RTL8211F_INIT) ```
/content/code_sandbox/drivers/ethernet/phy/phy_realtek_rtl8211f.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,819
```c /* * */ #define DT_DRV_COMPAT ethernet_phy #include <errno.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/drivers/mdio.h> #include <zephyr/net/phy.h> #include <zephyr/net/mii.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(phy_mii, CONFIG_PHY_LOG_LEVEL); struct phy_mii_dev_config { uint8_t phy_addr; bool no_reset; bool fixed; int fixed_speed; const struct device * const mdio; }; struct phy_mii_dev_data { const struct device *dev; phy_callback_t cb; void *cb_data; struct k_work_delayable monitor_work; struct phy_link_state state; struct k_sem sem; bool gigabit_supported; }; /* Offset to align capabilities bits of 1000BASE-T Control and Status regs */ #define MII_1KSTSR_OFFSET 2 #define MII_INVALID_PHY_ID UINT32_MAX static int phy_mii_get_link_state(const struct device *dev, struct phy_link_state *state); static inline int reg_read(const struct device *dev, uint16_t reg_addr, uint16_t *value) { const struct phy_mii_dev_config *const cfg = dev->config; /* if there is no mdio (fixed-link) it is not supported to read */ if (cfg->mdio == NULL) { return -ENOTSUP; } return mdio_read(cfg->mdio, cfg->phy_addr, reg_addr, value); } static inline int reg_write(const struct device *dev, uint16_t reg_addr, uint16_t value) { const struct phy_mii_dev_config *const cfg = dev->config; /* if there is no mdio (fixed-link) it is not supported to write */ if (cfg->mdio == NULL) { return -ENOTSUP; } return mdio_write(cfg->mdio, cfg->phy_addr, reg_addr, value); } static bool is_gigabit_supported(const struct device *dev) { uint16_t bmsr_reg; uint16_t estat_reg; if (reg_read(dev, MII_BMSR, &bmsr_reg) < 0) { return -EIO; } if (bmsr_reg & MII_BMSR_EXTEND_STATUS) { if (reg_read(dev, MII_ESTAT, &estat_reg) < 0) { return -EIO; } if (estat_reg & (MII_ESTAT_1000BASE_T_HALF | MII_ESTAT_1000BASE_T_FULL)) { return true; } } return false; } static int reset(const struct device *dev) { uint32_t timeout = 12U; uint16_t value; /* Issue a soft reset */ if (reg_write(dev, MII_BMCR, MII_BMCR_RESET) < 0) { return -EIO; } /* Wait up to 0.6s for the reset sequence to finish. According to * IEEE 802.3, Section 2, Subsection 22.2.4.1.1 a PHY reset may take * up to 0.5 s. */ do { if (timeout-- == 0U) { return -ETIMEDOUT; } k_sleep(K_MSEC(50)); if (reg_read(dev, MII_BMCR, &value) < 0) { return -EIO; } } while (value & MII_BMCR_RESET); return 0; } static int get_id(const struct device *dev, uint32_t *phy_id) { uint16_t value; if (reg_read(dev, MII_PHYID1R, &value) < 0) { return -EIO; } *phy_id = value << 16; if (reg_read(dev, MII_PHYID2R, &value) < 0) { return -EIO; } *phy_id |= value; return 0; } static int update_link_state(const struct device *dev) { const struct phy_mii_dev_config *const cfg = dev->config; struct phy_mii_dev_data *const data = dev->data; bool link_up; uint16_t anar_reg = 0; uint16_t bmcr_reg = 0; uint16_t bmsr_reg = 0; uint16_t anlpar_reg = 0; uint16_t c1kt_reg = 0; uint16_t s1kt_reg = 0; uint32_t timeout = CONFIG_PHY_AUTONEG_TIMEOUT_MS / 100; if (reg_read(dev, MII_BMSR, &bmsr_reg) < 0) { return -EIO; } link_up = bmsr_reg & MII_BMSR_LINK_STATUS; /* If there is no change in link state don't proceed. */ if (link_up == data->state.is_up) { return -EAGAIN; } data->state.is_up = link_up; /* If link is down, there is nothing more to be done */ if (data->state.is_up == false) { return 0; } /** * Perform auto-negotiation sequence. */ LOG_DBG("PHY (%d) Starting MII PHY auto-negotiate sequence", cfg->phy_addr); /* Read PHY default advertising parameters */ if (reg_read(dev, MII_ANAR, &anar_reg) < 0) { return -EIO; } /* Configure and start auto-negotiation process */ if (reg_read(dev, MII_BMCR, &bmcr_reg) < 0) { return -EIO; } bmcr_reg |= MII_BMCR_AUTONEG_ENABLE | MII_BMCR_AUTONEG_RESTART; bmcr_reg &= ~MII_BMCR_ISOLATE; /* Don't isolate the PHY */ if (reg_write(dev, MII_BMCR, bmcr_reg) < 0) { return -EIO; } /* Wait for the auto-negotiation process to complete */ do { if (timeout-- == 0U) { LOG_DBG("PHY (%d) auto-negotiate timedout", cfg->phy_addr); return -ETIMEDOUT; } k_sleep(K_MSEC(100)); if (reg_read(dev, MII_BMSR, &bmsr_reg) < 0) { return -EIO; } } while (!(bmsr_reg & MII_BMSR_AUTONEG_COMPLETE)); LOG_DBG("PHY (%d) auto-negotiate sequence completed", cfg->phy_addr); /** Read peer device capability */ if (reg_read(dev, MII_ANLPAR, &anlpar_reg) < 0) { return -EIO; } if (data->gigabit_supported) { if (reg_read(dev, MII_1KTCR, &c1kt_reg) < 0) { return -EIO; } if (reg_read(dev, MII_1KSTSR, &s1kt_reg) < 0) { return -EIO; } s1kt_reg = (uint16_t)(s1kt_reg >> MII_1KSTSR_OFFSET); } if (data->gigabit_supported && ((c1kt_reg & s1kt_reg) & MII_ADVERTISE_1000_FULL)) { data->state.speed = LINK_FULL_1000BASE_T; } else if (data->gigabit_supported && ((c1kt_reg & s1kt_reg) & MII_ADVERTISE_1000_HALF)) { data->state.speed = LINK_HALF_1000BASE_T; } else if ((anar_reg & anlpar_reg) & MII_ADVERTISE_100_FULL) { data->state.speed = LINK_FULL_100BASE_T; } else if ((anar_reg & anlpar_reg) & MII_ADVERTISE_100_HALF) { data->state.speed = LINK_HALF_100BASE_T; } else if ((anar_reg & anlpar_reg) & MII_ADVERTISE_10_FULL) { data->state.speed = LINK_FULL_10BASE_T; } else { data->state.speed = LINK_HALF_10BASE_T; } LOG_INF("PHY (%d) Link speed %s Mb, %s duplex\n", cfg->phy_addr, PHY_LINK_IS_SPEED_1000M(data->state.speed) ? "1000" : (PHY_LINK_IS_SPEED_100M(data->state.speed) ? "100" : "10"), PHY_LINK_IS_FULL_DUPLEX(data->state.speed) ? "full" : "half"); return 0; } static void invoke_link_cb(const struct device *dev) { struct phy_mii_dev_data *const data = dev->data; struct phy_link_state state; if (data->cb == NULL) { return; } phy_mii_get_link_state(dev, &state); data->cb(data->dev, &state, data->cb_data); } static void monitor_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct phy_mii_dev_data *const data = CONTAINER_OF(dwork, struct phy_mii_dev_data, monitor_work); const struct device *dev = data->dev; int rc; k_sem_take(&data->sem, K_FOREVER); rc = update_link_state(dev); k_sem_give(&data->sem); /* If link state has changed and a callback is set, invoke callback */ if (rc == 0) { invoke_link_cb(dev); } /* Submit delayed work */ k_work_reschedule(&data->monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); } static int phy_mii_read(const struct device *dev, uint16_t reg_addr, uint32_t *data) { return reg_read(dev, reg_addr, (uint16_t *)data); } static int phy_mii_write(const struct device *dev, uint16_t reg_addr, uint32_t data) { return reg_write(dev, reg_addr, (uint16_t)data); } static int phy_mii_cfg_link(const struct device *dev, enum phy_link_speed adv_speeds) { struct phy_mii_dev_data *const data = dev->data; uint16_t anar_reg; uint16_t bmcr_reg; uint16_t c1kt_reg; if (reg_read(dev, MII_ANAR, &anar_reg) < 0) { return -EIO; } if (reg_read(dev, MII_BMCR, &bmcr_reg) < 0) { return -EIO; } if (data->gigabit_supported) { if (reg_read(dev, MII_1KTCR, &c1kt_reg) < 0) { return -EIO; } } if (adv_speeds & LINK_FULL_10BASE_T) { anar_reg |= MII_ADVERTISE_10_FULL; } else { anar_reg &= ~MII_ADVERTISE_10_FULL; } if (adv_speeds & LINK_HALF_10BASE_T) { anar_reg |= MII_ADVERTISE_10_HALF; } else { anar_reg &= ~MII_ADVERTISE_10_HALF; } if (adv_speeds & LINK_FULL_100BASE_T) { anar_reg |= MII_ADVERTISE_100_FULL; } else { anar_reg &= ~MII_ADVERTISE_100_FULL; } if (adv_speeds & LINK_HALF_100BASE_T) { anar_reg |= MII_ADVERTISE_100_HALF; } else { anar_reg &= ~MII_ADVERTISE_100_HALF; } if (data->gigabit_supported) { if (adv_speeds & LINK_FULL_1000BASE_T) c1kt_reg |= MII_ADVERTISE_1000_FULL; else c1kt_reg &= ~MII_ADVERTISE_1000_FULL; if (adv_speeds & LINK_HALF_1000BASE_T) c1kt_reg |= MII_ADVERTISE_1000_HALF; else c1kt_reg &= ~MII_ADVERTISE_1000_HALF; if (reg_write(dev, MII_1KTCR, c1kt_reg) < 0) { return -EIO; } } bmcr_reg |= MII_BMCR_AUTONEG_ENABLE; if (reg_write(dev, MII_ANAR, anar_reg) < 0) { return -EIO; } if (reg_write(dev, MII_BMCR, bmcr_reg) < 0) { return -EIO; } return 0; } static int phy_mii_get_link_state(const struct device *dev, struct phy_link_state *state) { struct phy_mii_dev_data *const data = dev->data; k_sem_take(&data->sem, K_FOREVER); memcpy(state, &data->state, sizeof(struct phy_link_state)); k_sem_give(&data->sem); return 0; } static int phy_mii_link_cb_set(const struct device *dev, phy_callback_t cb, void *user_data) { struct phy_mii_dev_data *const data = dev->data; data->cb = cb; data->cb_data = user_data; /** * Immediately invoke the callback to notify the caller of the * current link status. */ invoke_link_cb(dev); return 0; } static int phy_mii_initialize(const struct device *dev) { const struct phy_mii_dev_config *const cfg = dev->config; struct phy_mii_dev_data *const data = dev->data; uint32_t phy_id; k_sem_init(&data->sem, 1, 1); data->dev = dev; data->cb = NULL; /** * If this is a *fixed* link then we don't need to communicate * with a PHY. We set the link parameters as configured * and set link state to up. */ if (cfg->fixed) { const static int speed_to_phy_link_speed[] = { LINK_HALF_10BASE_T, LINK_FULL_10BASE_T, LINK_HALF_100BASE_T, LINK_FULL_100BASE_T, LINK_HALF_1000BASE_T, LINK_FULL_1000BASE_T, }; data->state.speed = speed_to_phy_link_speed[cfg->fixed_speed]; data->state.is_up = true; } else { data->state.is_up = false; mdio_bus_enable(cfg->mdio); if (cfg->no_reset == false) { reset(dev); } if (get_id(dev, &phy_id) == 0) { if (phy_id == MII_INVALID_PHY_ID) { LOG_ERR("No PHY found at address %d", cfg->phy_addr); return -EINVAL; } LOG_INF("PHY (%d) ID %X\n", cfg->phy_addr, phy_id); } data->gigabit_supported = is_gigabit_supported(dev); /* Advertise all speeds */ phy_mii_cfg_link(dev, LINK_HALF_10BASE_T | LINK_FULL_10BASE_T | LINK_HALF_100BASE_T | LINK_FULL_100BASE_T | LINK_HALF_1000BASE_T | LINK_FULL_1000BASE_T); k_work_init_delayable(&data->monitor_work, monitor_work_handler); monitor_work_handler(&data->monitor_work.work); } return 0; } #define IS_FIXED_LINK(n) DT_INST_NODE_HAS_PROP(n, fixed_link) static const struct ethphy_driver_api phy_mii_driver_api = { .get_link = phy_mii_get_link_state, .cfg_link = phy_mii_cfg_link, .link_cb_set = phy_mii_link_cb_set, .read = phy_mii_read, .write = phy_mii_write, }; #define PHY_MII_CONFIG(n) \ static const struct phy_mii_dev_config phy_mii_dev_config_##n = { \ .phy_addr = DT_INST_REG_ADDR(n), \ .no_reset = DT_INST_PROP(n, no_reset), \ .fixed = IS_FIXED_LINK(n), \ .fixed_speed = DT_INST_ENUM_IDX_OR(n, fixed_link, 0), \ .mdio = UTIL_AND(UTIL_NOT(IS_FIXED_LINK(n)), \ DEVICE_DT_GET(DT_INST_BUS(n))) \ }; #define PHY_MII_DEVICE(n) \ PHY_MII_CONFIG(n); \ static struct phy_mii_dev_data phy_mii_dev_data_##n; \ DEVICE_DT_INST_DEFINE(n, \ &phy_mii_initialize, \ NULL, \ &phy_mii_dev_data_##n, \ &phy_mii_dev_config_##n, POST_KERNEL, \ CONFIG_PHY_INIT_PRIORITY, \ &phy_mii_driver_api); DT_INST_FOREACH_STATUS_OKAY(PHY_MII_DEVICE) ```
/content/code_sandbox/drivers/ethernet/phy/phy_mii.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,768
```c /* * */ #define DT_DRV_COMPAT nxp_tja1103 #include <errno.h> #include <stdint.h> #include <stdbool.h> #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/util.h> #include <zephyr/net/phy.h> #include <zephyr/net/mii.h> #include <zephyr/net/mdio.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/mdio.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(phy_tja1103, CONFIG_PHY_LOG_LEVEL); /* PHYs out of reset check retry delay */ #define TJA1103_AWAIT_DELAY_POLL_US 15000U /* Number of retries for PHYs out of reset check */ #define TJA1103_AWAIT_RETRY_COUNT 200U /* TJA1103 PHY identifier */ #define TJA1103_ID 0x1BB013 /* MMD30 - Device status register */ #define TJA1103_DEVICE_CONTROL (0x0040U) #define TJA1103_DEVICE_CONTROL_GLOBAL_CFG_EN BIT(14) #define TJA1103_DEVICE_CONTROL_SUPER_CFG_EN BIT(13) /* Shared - PHY control register */ #define TJA1103_PHY_CONTROL (0x8100U) #define TJA1103_PHY_CONTROL_CFG_EN BIT(14) /* Shared - PHY status register */ #define TJA1103_PHY_STATUS (0x8102U) #define TJA1103_PHY_STATUS_LINK_STAT BIT(2) /* Shared - PHY functional IRQ masked status register */ #define TJA1103_PHY_FUNC_IRQ_MSTATUS (0x80A2) #define TJA1103_PHY_FUNC_IRQ_LINK_EVENT BIT(1) #define TJA1103_PHY_FUNC_IRQ_LINK_AVAIL BIT(2) /* Shared -PHY functional IRQ source & enable registers */ #define TJA1103_PHY_FUNC_IRQ_ACK (0x80A0) #define TJA1103_PHY_FUNC_IRQ_EN (0x80A1) #define TJA1103_PHY_FUNC_IRQ_LINK_EVENT_EN BIT(1) #define TJA1103_PHY_FUNC_IRQ_LINK_AVAIL_EN BIT(2) /* Always accessible reg for NMIs */ #define TJA1103_ALWAYS_ACCESSIBLE (0x801F) #define TJA1103_ALWAYS_ACCESSIBLE_FUSA_PASS_IRQ BIT(4) struct phy_tja1103_config { const struct device *mdio; struct gpio_dt_spec gpio_interrupt; uint8_t phy_addr; uint8_t master_slave; }; struct phy_tja1103_data { const struct device *dev; struct phy_link_state state; struct k_sem sem; struct k_sem offload_sem; phy_callback_t cb; struct gpio_callback phy_tja1103_int_callback; void *cb_data; K_KERNEL_STACK_MEMBER(irq_thread_stack, CONFIG_PHY_TJA1103_IRQ_THREAD_STACK_SIZE); struct k_thread irq_thread; struct k_work_delayable monitor_work; }; static inline int phy_tja1103_c22_read(const struct device *dev, uint16_t reg, uint16_t *val) { const struct phy_tja1103_config *const cfg = dev->config; return mdio_read(cfg->mdio, cfg->phy_addr, reg, val); } static inline int phy_tja1103_c22_write(const struct device *dev, uint16_t reg, uint16_t val) { const struct phy_tja1103_config *const cfg = dev->config; return mdio_write(cfg->mdio, cfg->phy_addr, reg, val); } static inline int phy_tja1103_c45_write(const struct device *dev, uint16_t devad, uint16_t reg, uint16_t val) { const struct phy_tja1103_config *cfg = dev->config; return mdio_write_c45(cfg->mdio, cfg->phy_addr, devad, reg, val); } static inline int phy_tja1103_c45_read(const struct device *dev, uint16_t devad, uint16_t reg, uint16_t *val) { const struct phy_tja1103_config *cfg = dev->config; return mdio_read_c45(cfg->mdio, cfg->phy_addr, devad, reg, val); } static int phy_tja1103_reg_read(const struct device *dev, uint16_t reg_addr, uint32_t *data) { const struct phy_tja1103_config *cfg = dev->config; int ret; mdio_bus_enable(cfg->mdio); ret = phy_tja1103_c22_read(dev, reg_addr, (uint16_t *)data); mdio_bus_disable(cfg->mdio); return ret; } static int phy_tja1103_reg_write(const struct device *dev, uint16_t reg_addr, uint32_t data) { const struct phy_tja1103_config *cfg = dev->config; int ret; mdio_bus_enable(cfg->mdio); ret = phy_tja1103_c22_write(dev, reg_addr, (uint16_t)data); mdio_bus_disable(cfg->mdio); return ret; } static int phy_tja1103_id(const struct device *dev, uint32_t *phy_id) { uint16_t val; if (phy_tja1103_c22_read(dev, MII_PHYID1R, &val) < 0) { return -EIO; } *phy_id = (val & UINT16_MAX) << 16; if (phy_tja1103_c22_read(dev, MII_PHYID2R, &val) < 0) { return -EIO; } *phy_id |= (val & UINT16_MAX); return 0; } static int update_link_state(const struct device *dev) { struct phy_tja1103_data *const data = dev->data; bool link_up; uint16_t val; if (phy_tja1103_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_PHY_STATUS, &val) < 0) { return -EIO; } link_up = (val & TJA1103_PHY_STATUS_LINK_STAT) != 0; /* Let workqueue re-schedule and re-check if the * link status is unchanged this time */ if (data->state.is_up == link_up) { return -EAGAIN; } data->state.is_up = link_up; return 0; } static int phy_tja1103_get_link_state(const struct device *dev, struct phy_link_state *state) { struct phy_tja1103_data *const data = dev->data; const struct phy_tja1103_config *const cfg = dev->config; int rc = 0; k_sem_take(&data->sem, K_FOREVER); /* If Interrupt is configured then the workqueue will not * update the link state periodically so do it explicitly */ if (cfg->gpio_interrupt.port != NULL) { rc = update_link_state(dev); } memcpy(state, &data->state, sizeof(struct phy_link_state)); k_sem_give(&data->sem); return rc; } static void invoke_link_cb(const struct device *dev) { struct phy_tja1103_data *const data = dev->data; struct phy_link_state state; if (data->cb == NULL) { return; } /* Send callback only on link state change */ if (phy_tja1103_get_link_state(dev, &state) != 0) { return; } data->cb(dev, &state, data->cb_data); } static void monitor_work_handler(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct phy_tja1103_data *const data = CONTAINER_OF(dwork, struct phy_tja1103_data, monitor_work); const struct device *dev = data->dev; int rc; k_sem_take(&data->sem, K_FOREVER); rc = update_link_state(dev); k_sem_give(&data->sem); /* If link state has changed and a callback is set, invoke callback */ if (rc == 0) { invoke_link_cb(dev); } /* Submit delayed work */ k_work_reschedule(&data->monitor_work, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); } static void phy_tja1103_irq_offload_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct device *dev = p1; struct phy_tja1103_data *const data = dev->data; uint16_t irq; for (;;) { /* await trigger from ISR */ k_sem_take(&data->offload_sem, K_FOREVER); if (phy_tja1103_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_PHY_FUNC_IRQ_MSTATUS, &irq) < 0) { return; } /* Handling Link related Functional IRQs */ if (irq & (TJA1103_PHY_FUNC_IRQ_LINK_EVENT | TJA1103_PHY_FUNC_IRQ_LINK_AVAIL)) { /* Send callback to MAC on link status changed */ invoke_link_cb(dev); /* Ack the assered link related interrupts */ phy_tja1103_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_PHY_FUNC_IRQ_ACK, irq); } } } static void phy_tja1103_handle_irq(const struct device *port, struct gpio_callback *cb, uint32_t pins) { ARG_UNUSED(pins); ARG_UNUSED(port); struct phy_tja1103_data *const data = CONTAINER_OF(cb, struct phy_tja1103_data, phy_tja1103_int_callback); /* Trigger BH before leaving the ISR */ k_sem_give(&data->offload_sem); } static void phy_tja1103_cfg_irq_poll(const struct device *dev) { struct phy_tja1103_data *const data = dev->data; const struct phy_tja1103_config *const cfg = dev->config; int ret; if (cfg->gpio_interrupt.port != NULL) { if (!gpio_is_ready_dt(&cfg->gpio_interrupt)) { LOG_ERR("Interrupt GPIO device %s is not ready", cfg->gpio_interrupt.port->name); return; } ret = gpio_pin_configure_dt(&cfg->gpio_interrupt, GPIO_INPUT); if (ret < 0) { LOG_ERR("Failed to configure interrupt GPIO, %d", ret); return; } gpio_init_callback(&(data->phy_tja1103_int_callback), phy_tja1103_handle_irq, BIT(cfg->gpio_interrupt.pin)); /* Add callback structure to global syslist */ ret = gpio_add_callback(cfg->gpio_interrupt.port, &data->phy_tja1103_int_callback); if (ret < 0) { LOG_ERR("Failed to add INT callback, %d", ret); return; } ret = phy_tja1103_c45_write( dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_PHY_FUNC_IRQ_EN, (TJA1103_PHY_FUNC_IRQ_LINK_EVENT_EN | TJA1103_PHY_FUNC_IRQ_LINK_AVAIL_EN)); if (ret < 0) { return; } ret = gpio_pin_interrupt_configure_dt(&cfg->gpio_interrupt, GPIO_INT_EDGE_FALLING); if (ret < 0) { LOG_ERR("Failed to enable INT, %d", ret); return; } /* PHY initialized, IRQ configured, now initialize the BH handler */ k_thread_create(&data->irq_thread, data->irq_thread_stack, CONFIG_PHY_TJA1103_IRQ_THREAD_STACK_SIZE, phy_tja1103_irq_offload_thread, (void *)dev, NULL, NULL, CONFIG_PHY_TJA1103_IRQ_THREAD_PRIO, K_ESSENTIAL, K_NO_WAIT); k_thread_name_set(&data->irq_thread, "phy_tja1103_irq_offload"); } else { k_work_init_delayable(&data->monitor_work, monitor_work_handler); monitor_work_handler(&data->monitor_work.work); } } static int phy_tja1103_cfg_link(const struct device *dev, enum phy_link_speed adv_speeds) { ARG_UNUSED(dev); if (adv_speeds & LINK_FULL_100BASE_T) { return 0; } return -ENOTSUP; } static int phy_tja1103_init(const struct device *dev) { const struct phy_tja1103_config *const cfg = dev->config; struct phy_tja1103_data *const data = dev->data; uint32_t phy_id = 0; uint16_t val; int ret; data->dev = dev; data->cb = NULL; data->state.is_up = false; data->state.speed = LINK_FULL_100BASE_T; ret = WAIT_FOR(!phy_tja1103_id(dev, &phy_id) && phy_id == TJA1103_ID, TJA1103_AWAIT_RETRY_COUNT * TJA1103_AWAIT_DELAY_POLL_US, k_sleep(K_USEC(TJA1103_AWAIT_DELAY_POLL_US))); if (ret < 0) { LOG_ERR("Unable to obtain PHY ID for device 0x%x", cfg->phy_addr); return -ENODEV; } /* enable config registers */ ret = phy_tja1103_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_DEVICE_CONTROL, TJA1103_DEVICE_CONTROL_GLOBAL_CFG_EN | TJA1103_DEVICE_CONTROL_SUPER_CFG_EN); if (ret < 0) { return ret; } ret = phy_tja1103_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_PHY_CONTROL, TJA1103_PHY_CONTROL_CFG_EN); if (ret < 0) { return ret; } ret = phy_tja1103_c45_read(dev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, &val); if (ret < 0) { return ret; } /* Change master/slave mode if need */ if (cfg->master_slave == 1) { val |= MDIO_PMA_PMD_BT1_CTRL_CFG_MST; } else if (cfg->master_slave == 2) { val &= ~MDIO_PMA_PMD_BT1_CTRL_CFG_MST; } ret = phy_tja1103_c45_write(dev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, val); if (ret < 0) { return ret; } /* Check always accesible register for handling NMIs */ ret = phy_tja1103_c45_read(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_ALWAYS_ACCESSIBLE, &val); if (ret < 0) { return ret; } /* Ack Fusa Pass Interrupt if Startup Self Test Passed successfully */ if (val & TJA1103_ALWAYS_ACCESSIBLE_FUSA_PASS_IRQ) { ret = phy_tja1103_c45_write(dev, MDIO_MMD_VENDOR_SPECIFIC1, TJA1103_ALWAYS_ACCESSIBLE, TJA1103_ALWAYS_ACCESSIBLE_FUSA_PASS_IRQ); } /* Configure interrupt or poll mode for reporting link changes */ phy_tja1103_cfg_irq_poll(dev); return ret; } static int phy_tja1103_link_cb_set(const struct device *dev, phy_callback_t cb, void *user_data) { struct phy_tja1103_data *const data = dev->data; data->cb = cb; data->cb_data = user_data; /* Invoke the callback to notify the caller of the current * link status. */ invoke_link_cb(dev); return 0; } static const struct ethphy_driver_api phy_tja1103_api = { .get_link = phy_tja1103_get_link_state, .cfg_link = phy_tja1103_cfg_link, .link_cb_set = phy_tja1103_link_cb_set, .read = phy_tja1103_reg_read, .write = phy_tja1103_reg_write, }; #define TJA1103_INITIALIZE(n) \ static const struct phy_tja1103_config phy_tja1103_config_##n = { \ .phy_addr = DT_INST_REG_ADDR(n), \ .mdio = DEVICE_DT_GET(DT_INST_BUS(n)), \ .gpio_interrupt = GPIO_DT_SPEC_INST_GET_OR(n, int_gpios, {0}), \ .master_slave = DT_INST_ENUM_IDX(n, master_slave), \ }; \ static struct phy_tja1103_data phy_tja1103_data_##n = { \ .sem = Z_SEM_INITIALIZER(phy_tja1103_data_##n.sem, 1, 1), \ .offload_sem = Z_SEM_INITIALIZER(phy_tja1103_data_##n.offload_sem, 0, 1), \ }; \ DEVICE_DT_INST_DEFINE(n, &phy_tja1103_init, NULL, &phy_tja1103_data_##n, \ &phy_tja1103_config_##n, POST_KERNEL, CONFIG_PHY_INIT_PRIORITY, \ &phy_tja1103_api); DT_INST_FOREACH_STATUS_OKAY(TJA1103_INITIALIZE) ```
/content/code_sandbox/drivers/ethernet/phy/phy_tja1103.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,746
```unknown # NXP ENET ethernet drivers configuration options menu "NXP ENET" choice NXP_ENET_DRIVER prompt "NXP ENET Driver Version" default ETH_NXP_ENET if DT_HAS_NXP_ENET_MAC_ENABLED default ETH_MCUX if DT_HAS_NXP_KINETIS_ETHERNET_ENABLED depends on DT_HAS_NXP_ENET_MAC_ENABLED || DT_HAS_NXP_KINETIS_ETHERNET_ENABLED config ETH_NXP_ENET bool "NXP ENET Ethernet driver" select NOCACHE_MEMORY if HAS_MCUX_CACHE && CPU_HAS_DCACHE select ARM_MPU if CPU_CORTEX_M7 select MDIO if DT_HAS_NXP_ENET_MDIO_ENABLED select NET_POWER_MANAGEMENT if (PM_DEVICE && SOC_FAMILY_KINETIS) select ETH_DSA_SUPPORT help Enable NXP ENET Ethernet driver. config ETH_MCUX bool "MCUX Ethernet driver" select NOCACHE_MEMORY if HAS_MCUX_CACHE && CPU_HAS_DCACHE select ARM_MPU if CPU_CORTEX_M7 select NET_POWER_MANAGEMENT if PM_DEVICE select ETH_DSA_SUPPORT select DEPRECATED help Enable deprecated legacy MCUX Ethernet driver. Note, this driver performs one shot PHY setup. There is no support for PHY disconnect, reconnect or configuration change. endchoice if ETH_NXP_ENET config ETH_NXP_ENET_1G bool "1G mode for ENET1G instance" default y depends on DT_HAS_NXP_ENET1G_ENABLED help Enable the use of the ENET1G ethernet instance in 1G mode. config ETH_NXP_ENET_USE_DTCM_FOR_DMA_BUFFER bool "Use DTCM for hardware DMA buffers" default y help Place the hardware DMA buffers into DTCM for better networking performance. config ETH_NXP_ENET_HW_ACCELERATION bool "Hardware acceleration" default y depends on !NET_IPV6 help Enable hardware acceleration for the following: - IPv4, UDP and TCP checksum (both Rx and Tx) config ETH_NXP_ENET_RX_BUFFERS int "Number of RX buffers for ethernet driver" default 6 range 6 16 help Set the number of RX buffers provided to the NXP ENET driver. config ETH_NXP_ENET_TX_BUFFERS int "Number of TX buffers for ethernet driver" default 1 range 1 16 help Set the number of TX buffers provided to the NXP ENET driver. config ETH_NXP_ENET_RX_THREAD_STACK_SIZE int "NXP ENET RX thread stack size" default 1600 help ENET RX thread stack size in bytes. config ETH_NXP_ENET_RX_THREAD_PRIORITY int "NXP ENET driver RX cooperative thread priority" default 2 help ENET MAC Driver handles RX in cooperative workqueue thread. This options sets the priority of that thread. endif # ETH_NXP_ENET if ETH_MCUX config ETH_MCUX_PROMISCUOUS_MODE bool "Promiscuous mode" help Place the Ethernet receiver in promiscuous mode. This may be useful for debugging and not needed for normal work. config ETH_MCUX_USE_DTCM_FOR_DMA_BUFFER bool "Use DTCM for hardware DMA buffers" default y help Place the hardware DMA buffers into DTCM for better networking performance. config ETH_MCUX_HW_ACCELERATION bool "Hardware acceleration" default y help Enable hardware acceleration for the following: - IPv4, UDP and TCP checksum (both Rx and Tx) config ETH_MCUX_RX_BUFFERS int "Number of RX buffers for ethernet driver" default 6 range 6 16 help Set the number of RX buffers provided to the NXP ENET driver. config ETH_MCUX_TX_BUFFERS int "Number of TX buffers for ethernet driver" default 1 range 1 16 help Set the number of TX buffers provided to the NXP ENET driver. menu "Legacy driver options" config ETH_MCUX_RMII_EXT_CLK bool "RMII clock from external sources" help Setting this option will configure MCUX clock block to feed RMII reference clock from external source (ENET_1588_CLKIN) config ETH_MCUX_NO_PHY_SMI bool "Do not use SMI for PHY communication" help Some PHY devices, with DSA capabilities do not use SMI for communication with MAC ENET controller. Other busses - like SPI or I2C are used instead. config ETH_MCUX_PHY_TICK_MS int "PHY poll period (ms)" default 1000 range 100 30000 help Set the PHY status polling period. config ETH_MCUX_PHY_EXTRA_DEBUG bool "Additional detailed PHY debug" help Enable additional PHY related debug information related to PHY status polling. config ETH_MCUX_PHY_RESET bool "Reset the PHY at boot" help Reset the ethernet PHY at boot. Requires dts properties int-gpios and reset-gpios to be present. config PTP_CLOCK_MCUX bool "MCUX PTP clock driver support" default y depends on PTP_CLOCK || NET_L2_PTP help Enable MCUX PTP clock support. if PTP_CLOCK_MCUX config ETH_MCUX_PTP_CLOCK_SRC_HZ int "Frequency of the clock source for the PTP timer" default 50000000 if SOC_SERIES_KINETIS_K6X default 50000000 if SOC_SERIES_IMXRT10XX default 24000000 if SOC_SERIES_IMXRT11XX help Set the frequency in Hz sourced to the PTP timer. If the value is set properly, the timer will be accurate. config ETH_MCUX_PTP_CLOCK_INIT_PRIO int default 85 help MCUX PTP Clock initialization priority level. There is a dependency from the network stack that this device initializes before network stack (NET_INIT_PRIO). endif # PTP_CLOCK_MCUX endmenu # Legacy options endif # ETH_MCUX endmenu # NXP ENET ```
/content/code_sandbox/drivers/ethernet/nxp_enet/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,326
```c /* NXP ENET MAC Driver * * * Inspiration from eth_mcux.c, which was: * */ #define DT_DRV_COMPAT nxp_enet_mac /* Set up logging module for this driver */ #define LOG_MODULE_NAME eth_nxp_enet_mac #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/device.h> #include <zephyr/sys/util.h> #include <zephyr/kernel.h> #include <zephyr/sys/__assert.h> #include <zephyr/kernel/thread_stack.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/phy.h> #include <zephyr/net/mii.h> #include <ethernet/eth_stats.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control.h> #ifdef CONFIG_PTP_CLOCK #include <zephyr/drivers/ptp_clock.h> #endif #ifdef CONFIG_NET_DSA #include <zephyr/net/dsa.h> #endif #if defined(CONFIG_NET_POWER_MANAGEMENT) && defined(CONFIG_PM_DEVICE) #include <zephyr/pm/device.h> #endif #include "../eth.h" #include <zephyr/drivers/ethernet/eth_nxp_enet.h> #include <zephyr/dt-bindings/ethernet/nxp_enet.h> #include <fsl_enet.h> #define FREESCALE_OUI_B0 0x00 #define FREESCALE_OUI_B1 0x04 #define FREESCALE_OUI_B2 0x9f #if defined(CONFIG_SOC_SERIES_IMXRT10XX) #define ETH_NXP_ENET_UNIQUE_ID (OCOTP->CFG1 ^ OCOTP->CFG2) #elif defined(CONFIG_SOC_SERIES_IMXRT11XX) #define ETH_NXP_ENET_UNIQUE_ID (OCOTP->FUSEN[40].FUSE) #elif defined(CONFIG_SOC_SERIES_KINETIS_K6X) #define ETH_NXP_ENET_UNIQUE_ID (SIM->UIDH ^ SIM->UIDMH ^ SIM->UIDML ^ SIM->UIDL) #elif defined(CONFIG_SOC_SERIES_RW6XX) #define ETH_NXP_ENET_UNIQUE_ID (OCOTP->OTP_SHADOW[46]) #else #define ETH_NXP_ENET_UNIQUE_ID 0xFFFFFF #endif #define RING_ID 0 enum mac_address_source { MAC_ADDR_SOURCE_LOCAL, MAC_ADDR_SOURCE_RANDOM, MAC_ADDR_SOURCE_UNIQUE, MAC_ADDR_SOURCE_FUSED, MAC_ADDR_SOURCE_INVALID, }; struct nxp_enet_mac_config { const struct device *module_dev; const struct device *clock_dev; clock_control_subsys_t clock_subsys; enum mac_address_source mac_addr_source; const struct pinctrl_dev_config *pincfg; enet_buffer_config_t buffer_config[1]; uint8_t phy_mode; void (*irq_config_func)(void); const struct device *phy_dev; const struct device *mdio; #ifdef CONFIG_PTP_CLOCK_NXP_ENET const struct device *ptp_clock; #endif }; struct nxp_enet_mac_data { ENET_Type *base; struct net_if *iface; uint8_t mac_addr[6]; enet_handle_t enet_handle; struct k_sem tx_buf_sem; struct k_work rx_work; const struct device *dev; struct k_sem rx_thread_sem; struct k_mutex tx_frame_buf_mutex; struct k_mutex rx_frame_buf_mutex; #ifdef CONFIG_PTP_CLOCK_NXP_ENET struct k_sem ptp_ts_sem; struct k_mutex *ptp_mutex; /* created in PTP driver */ #endif uint8_t *tx_frame_buf; uint8_t *rx_frame_buf; }; static K_THREAD_STACK_DEFINE(enet_rx_stack, CONFIG_ETH_NXP_ENET_RX_THREAD_STACK_SIZE); static struct k_work_q rx_work_queue; static int rx_queue_init(void) { struct k_work_queue_config cfg = {.name = "ENET_RX"}; k_work_queue_init(&rx_work_queue); k_work_queue_start(&rx_work_queue, enet_rx_stack, K_THREAD_STACK_SIZEOF(enet_rx_stack), K_PRIO_COOP(CONFIG_ETH_NXP_ENET_RX_THREAD_PRIORITY), &cfg); return 0; } SYS_INIT(rx_queue_init, POST_KERNEL, 0); static inline struct net_if *get_iface(struct nxp_enet_mac_data *data) { return data->iface; } #if defined(CONFIG_PTP_CLOCK_NXP_ENET) static bool eth_get_ptp_data(struct net_if *iface, struct net_pkt *pkt) { struct net_eth_vlan_hdr *hdr_vlan = (struct net_eth_vlan_hdr *)NET_ETH_HDR(pkt); struct ethernet_context *eth_ctx = net_if_l2_data(iface); bool pkt_is_ptp; if (net_eth_is_vlan_enabled(eth_ctx, iface)) { pkt_is_ptp = ntohs(hdr_vlan->type) == NET_ETH_PTYPE_PTP; } else { pkt_is_ptp = ntohs(NET_ETH_HDR(pkt)->type) == NET_ETH_PTYPE_PTP; } if (pkt_is_ptp) { net_pkt_set_priority(pkt, NET_PRIORITY_CA); } return pkt_is_ptp; } static inline void ts_register_tx_event(const struct device *dev, enet_frame_info_t *frameinfo) { struct nxp_enet_mac_data *data = dev->data; struct net_pkt *pkt = frameinfo->context; if (pkt && atomic_get(&pkt->atomic_ref) > 0) { if (eth_get_ptp_data(net_pkt_iface(pkt), pkt) && frameinfo->isTsAvail) { k_mutex_lock(data->ptp_mutex, K_FOREVER); pkt->timestamp.nanosecond = frameinfo->timeStamp.nanosecond; pkt->timestamp.second = frameinfo->timeStamp.second; net_if_add_tx_timestamp(pkt); k_sem_give(&data->ptp_ts_sem); k_mutex_unlock(data->ptp_mutex); } net_pkt_unref(pkt); } } static inline void eth_wait_for_ptp_ts(const struct device *dev, struct net_pkt *pkt) { struct nxp_enet_mac_data *data = dev->data; net_pkt_ref(pkt); k_sem_take(&data->ptp_ts_sem, K_FOREVER); } #else #define eth_get_ptp_data(...) false #define ts_register_tx_event(...) #define eth_wait_for_ptp_ts(...) #endif /* CONFIG_PTP_CLOCK_NXP_ENET */ #ifdef CONFIG_PTP_CLOCK static const struct device *eth_nxp_enet_get_ptp_clock(const struct device *dev) { const struct nxp_enet_mac_config *config = dev->config; return config->ptp_clock; } #endif /* CONFIG_PTP_CLOCK */ static int eth_nxp_enet_tx(const struct device *dev, struct net_pkt *pkt) { struct nxp_enet_mac_data *data = dev->data; uint16_t total_len = net_pkt_get_len(pkt); bool frame_is_timestamped; status_t ret; /* Wait for a TX buffer descriptor to be available */ k_sem_take(&data->tx_buf_sem, K_FOREVER); /* Enter critical section for TX frame buffer access */ k_mutex_lock(&data->tx_frame_buf_mutex, K_FOREVER); ret = net_pkt_read(pkt, data->tx_frame_buf, total_len); if (ret) { k_sem_give(&data->tx_buf_sem); goto exit; } frame_is_timestamped = eth_get_ptp_data(net_pkt_iface(pkt), pkt); ret = ENET_SendFrame(data->base, &data->enet_handle, data->tx_frame_buf, total_len, RING_ID, frame_is_timestamped, pkt); if (ret == kStatus_Success) { goto exit; } if (frame_is_timestamped) { eth_wait_for_ptp_ts(dev, pkt); } else { LOG_ERR("ENET_SendFrame error: %d", ret); ENET_ReclaimTxDescriptor(data->base, &data->enet_handle, RING_ID); } exit: /* Leave critical section for TX frame buffer access */ k_mutex_unlock(&data->tx_frame_buf_mutex); return ret; } static void eth_nxp_enet_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct nxp_enet_mac_data *data = dev->data; const struct nxp_enet_mac_config *config = dev->config; net_if_set_link_addr(iface, data->mac_addr, sizeof(data->mac_addr), NET_LINK_ETHERNET); if (data->iface == NULL) { data->iface = iface; } #if defined(CONFIG_NET_DSA) dsa_register_master_tx(iface, &eth_nxp_enet_tx); #endif ethernet_init(iface); net_eth_carrier_off(data->iface); config->irq_config_func(); nxp_enet_driver_cb(config->mdio, NXP_ENET_MDIO, NXP_ENET_INTERRUPT_ENABLED, NULL); } static enum ethernet_hw_caps eth_nxp_enet_get_capabilities(const struct device *dev) { #if defined(CONFIG_ETH_NXP_ENET_1G) const struct nxp_enet_mac_config *config = dev->config; #else ARG_UNUSED(dev); #endif enum ethernet_hw_caps caps; caps = ETHERNET_LINK_10BASE_T | ETHERNET_HW_FILTERING | #if defined(CONFIG_NET_VLAN) ETHERNET_HW_VLAN | #endif #if defined(CONFIG_PTP_CLOCK_NXP_ENET) ETHERNET_PTP | #endif #if defined(CONFIG_NET_DSA) ETHERNET_DSA_MASTER_PORT | #endif #if defined(CONFIG_ETH_NXP_ENET_HW_ACCELERATION) ETHERNET_HW_TX_CHKSUM_OFFLOAD | ETHERNET_HW_RX_CHKSUM_OFFLOAD | #endif ETHERNET_LINK_100BASE_T; if (COND_CODE_1(IS_ENABLED(CONFIG_ETH_NXP_ENET_1G), (config->phy_mode == NXP_ENET_RGMII_MODE), (0))) { caps |= ETHERNET_LINK_1000BASE_T; } return caps; } static int eth_nxp_enet_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *cfg) { struct nxp_enet_mac_data *data = dev->data; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(data->mac_addr, cfg->mac_address.addr, sizeof(data->mac_addr)); ENET_SetMacAddr(data->base, data->mac_addr); net_if_set_link_addr(data->iface, data->mac_addr, sizeof(data->mac_addr), NET_LINK_ETHERNET); LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name, data->mac_addr[0], data->mac_addr[1], data->mac_addr[2], data->mac_addr[3], data->mac_addr[4], data->mac_addr[5]); return 0; case ETHERNET_CONFIG_TYPE_FILTER: /* The ENET driver does not modify the address buffer but the API is not const */ if (cfg->filter.set) { ENET_AddMulticastGroup(data->base, (uint8_t *)cfg->filter.mac_address.addr); } else { ENET_LeaveMulticastGroup(data->base, (uint8_t *)cfg->filter.mac_address.addr); } return 0; default: break; } return -ENOTSUP; } static int eth_nxp_enet_rx(const struct device *dev) { #if defined(CONFIG_PTP_CLOCK_NXP_ENET) const struct nxp_enet_mac_config *config = dev->config; #endif struct nxp_enet_mac_data *data = dev->data; uint32_t frame_length = 0U; struct net_if *iface; struct net_pkt *pkt = NULL; status_t status; uint32_t ts; status = ENET_GetRxFrameSize(&data->enet_handle, (uint32_t *)&frame_length, RING_ID); if (status == kStatus_ENET_RxFrameEmpty) { return 0; } else if (status == kStatus_ENET_RxFrameError) { enet_data_error_stats_t error_stats; LOG_ERR("ENET_GetRxFrameSize return: %d", (int)status); ENET_GetRxErrBeforeReadFrame(&data->enet_handle, &error_stats, RING_ID); goto flush; } if (frame_length > NET_ETH_MAX_FRAME_SIZE) { LOG_ERR("Frame too large (%d)", frame_length); goto flush; } /* Using root iface. It will be updated in net_recv_data() */ pkt = net_pkt_rx_alloc_with_buffer(data->iface, frame_length, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { goto flush; } k_mutex_lock(&data->rx_frame_buf_mutex, K_FOREVER); status = ENET_ReadFrame(data->base, &data->enet_handle, data->rx_frame_buf, frame_length, RING_ID, &ts); k_mutex_unlock(&data->rx_frame_buf_mutex); if (status) { LOG_ERR("ENET_ReadFrame failed: %d", (int)status); goto error; } if (net_pkt_write(pkt, data->rx_frame_buf, frame_length)) { LOG_ERR("Unable to write frame into the packet"); goto error; } #if defined(CONFIG_PTP_CLOCK_NXP_ENET) k_mutex_lock(data->ptp_mutex, K_FOREVER); /* Invalid value by default. */ pkt->timestamp.nanosecond = UINT32_MAX; pkt->timestamp.second = UINT64_MAX; /* Timestamp the packet using PTP clock */ if (eth_get_ptp_data(get_iface(data), pkt)) { struct net_ptp_time ptp_time; ptp_clock_get(config->ptp_clock, &ptp_time); /* If latest timestamp reloads after getting from Rx BD, * then second - 1 to make sure the actual Rx timestamp is accurate */ if (ptp_time.nanosecond < ts) { ptp_time.second--; } pkt->timestamp.nanosecond = ts; pkt->timestamp.second = ptp_time.second; } k_mutex_unlock(data->ptp_mutex); #endif /* CONFIG_PTP_CLOCK_NXP_ENET */ iface = get_iface(data); #if defined(CONFIG_NET_DSA) iface = dsa_net_recv(iface, &pkt); #endif if (net_recv_data(iface, pkt) < 0) { goto error; } return 1; flush: /* Flush the current read buffer. This operation can * only report failure if there is no frame to flush, * which cannot happen in this context. */ status = ENET_ReadFrame(data->base, &data->enet_handle, NULL, 0, RING_ID, NULL); __ASSERT_NO_MSG(status == kStatus_Success); error: if (pkt) { net_pkt_unref(pkt); } eth_stats_update_errors_rx(get_iface(data)); return -EIO; } static void eth_nxp_enet_rx_thread(struct k_work *work) { struct nxp_enet_mac_data *data = CONTAINER_OF(work, struct nxp_enet_mac_data, rx_work); const struct device *dev = data->dev; int ret; if (k_sem_take(&data->rx_thread_sem, K_FOREVER)) { return; } do { ret = eth_nxp_enet_rx(dev); } while (ret == 1); ENET_EnableInterrupts(data->base, kENET_RxFrameInterrupt); } static int nxp_enet_phy_configure(const struct device *phy, uint8_t phy_mode) { enum phy_link_speed speeds = LINK_HALF_10BASE_T | LINK_FULL_10BASE_T | LINK_HALF_100BASE_T | LINK_FULL_100BASE_T; if (COND_CODE_1(IS_ENABLED(CONFIG_ETH_NXP_ENET_1G), (phy_mode == NXP_ENET_RGMII_MODE), (0))) { speeds |= (LINK_HALF_1000BASE_T | LINK_FULL_1000BASE_T); } /* Configure the PHY */ return phy_configure_link(phy, speeds); } static void nxp_enet_phy_cb(const struct device *phy, struct phy_link_state *state, void *eth_dev) { const struct device *dev = eth_dev; struct nxp_enet_mac_data *data = dev->data; const struct nxp_enet_mac_config *config = dev->config; enet_mii_speed_t speed; enet_mii_duplex_t duplex; if (state->is_up) { #if defined(CONFIG_ETH_NXP_ENET_1G) if (PHY_LINK_IS_SPEED_1000M(state->speed)) { speed = kENET_MiiSpeed1000M; } else if (PHY_LINK_IS_SPEED_100M(state->speed)) { #else if (PHY_LINK_IS_SPEED_100M(state->speed)) { #endif speed = kENET_MiiSpeed100M; } else { speed = kENET_MiiSpeed10M; } if (PHY_LINK_IS_FULL_DUPLEX(state->speed)) { duplex = kENET_MiiFullDuplex; } else { duplex = kENET_MiiHalfDuplex; } ENET_SetMII(data->base, speed, duplex); } if (!data->iface) { return; } LOG_INF("Link is %s", state->is_up ? "up" : "down"); if (!state->is_up) { net_eth_carrier_off(data->iface); nxp_enet_phy_configure(phy, config->phy_mode); } else { net_eth_carrier_on(data->iface); } } static int nxp_enet_phy_init(const struct device *dev) { const struct nxp_enet_mac_config *config = dev->config; int ret = 0; ret = nxp_enet_phy_configure(config->phy_dev, config->phy_mode); if (ret) { return ret; } ret = phy_link_callback_set(config->phy_dev, nxp_enet_phy_cb, (void *)dev); if (ret) { return ret; } return ret; } void nxp_enet_driver_cb(const struct device *dev, enum nxp_enet_driver dev_type, enum nxp_enet_callback_reason event, void *data) { if (dev_type == NXP_ENET_MDIO) { nxp_enet_mdio_callback(dev, event, data); } else if (dev_type == NXP_ENET_PTP_CLOCK) { nxp_enet_ptp_clock_callback(dev, event, data); } } static void eth_callback(ENET_Type *base, enet_handle_t *handle, #if FSL_FEATURE_ENET_QUEUE > 1 uint32_t ringId, #endif /* FSL_FEATURE_ENET_QUEUE > 1 */ enet_event_t event, enet_frame_info_t *frameinfo, void *param) { const struct device *dev = param; struct nxp_enet_mac_data *data = dev->data; switch (event) { case kENET_RxEvent: k_sem_give(&data->rx_thread_sem); break; case kENET_TxEvent: ts_register_tx_event(dev, frameinfo); k_sem_give(&data->tx_buf_sem); break; case kENET_TimeStampEvent: /* Reset periodic timer to default value. */ data->base->ATPER = NSEC_PER_SEC; break; default: break; } } #if FSL_FEATURE_ENET_QUEUE > 1 #define ENET_IRQ_HANDLER_ARGS(base, handle) base, handle, 0 #else #define ENET_IRQ_HANDLER_ARGS(base, handle) base, handle #endif /* FSL_FEATURE_ENET_QUEUE > 1 */ static void eth_nxp_enet_isr(const struct device *dev) { const struct nxp_enet_mac_config *config = dev->config; struct nxp_enet_mac_data *data = dev->data; unsigned int irq_lock_key = irq_lock(); uint32_t eir = ENET_GetInterruptStatus(data->base); if (eir & (kENET_RxFrameInterrupt)) { ENET_ReceiveIRQHandler(ENET_IRQ_HANDLER_ARGS(data->base, &data->enet_handle)); ENET_DisableInterrupts(data->base, kENET_RxFrameInterrupt); k_work_submit_to_queue(&rx_work_queue, &data->rx_work); } if (eir & kENET_TxFrameInterrupt) { ENET_TransmitIRQHandler(ENET_IRQ_HANDLER_ARGS(data->base, &data->enet_handle)); } if (eir & ENET_EIR_MII_MASK) { nxp_enet_driver_cb(config->mdio, NXP_ENET_MDIO, NXP_ENET_INTERRUPT, NULL); } irq_unlock(irq_lock_key); } static const struct device *eth_nxp_enet_get_phy(const struct device *dev) { const struct nxp_enet_mac_config *config = dev->config; return config->phy_dev; } /* Note this is not universally unique, it just is probably unique on a network */ static inline void nxp_enet_unique_mac(uint8_t *mac_addr) { uint32_t id = ETH_NXP_ENET_UNIQUE_ID; if (id == 0xFFFFFF) { LOG_ERR("No unique MAC can be provided in this platform"); } /* Setting LAA bit because it is not guaranteed universally unique */ mac_addr[0] = FREESCALE_OUI_B0 | 0x02; mac_addr[1] = FREESCALE_OUI_B1; mac_addr[2] = FREESCALE_OUI_B2; mac_addr[3] = FIELD_GET(0xFF0000, id); mac_addr[4] = FIELD_GET(0x00FF00, id); mac_addr[5] = FIELD_GET(0x0000FF, id); } #ifdef CONFIG_SOC_FAMILY_NXP_IMXRT #include <fsl_ocotp.h> #endif static inline void nxp_enet_fused_mac(uint8_t *mac_addr) { #ifdef CONFIG_SOC_FAMILY_NXP_IMXRT uint32_t mac_addr_fuse[2] = {0}; #if defined(CONFIG_SOC_SERIES_IMXRT10XX) OCOTP_Init((OCOTP_Type *)OCOTP_BASE, CLOCK_GetIpgFreq()); /* OTP bank 4, word 2: MAC0 */ OCOTP_ReadFuseShadowRegisterExt((OCOTP_Type *)OCOTP_BASE, 0x22, &mac_addr_fuse[0], 1); /* OTP bank 4, word 3: MAC1*/ OCOTP_ReadFuseShadowRegisterExt((OCOTP_Type *)OCOTP_BASE, 0x23, &mac_addr_fuse[1], 1); #elif defined(CONFIG_SOC_SERIES_IMXRT11XX) OCOTP_Init((OCOTP_Type *)OCOTP_BASE, 0); OCOTP_ReadFuseShadowRegisterExt((OCOTP_Type *)OCOTP_BASE, 0x28, &mac_addr_fuse[0], 2); #endif mac_addr[0] = mac_addr_fuse[0] & 0x000000FF; mac_addr[1] = (mac_addr_fuse[0] & 0x0000FF00) >> 8; mac_addr[2] = (mac_addr_fuse[0] & 0x00FF0000) >> 16; mac_addr[3] = (mac_addr_fuse[0] & 0xFF000000) >> 24; mac_addr[4] = (mac_addr_fuse[1] & 0x00FF); mac_addr[5] = (mac_addr_fuse[1] & 0xFF00) >> 8; #else ARG_UNUSED(mac_addr); #endif } static int eth_nxp_enet_init(const struct device *dev) { struct nxp_enet_mac_data *data = dev->data; const struct nxp_enet_mac_config *config = dev->config; enet_config_t enet_config; uint32_t enet_module_clock_rate; int err; data->base = (ENET_Type *)DEVICE_MMIO_GET(config->module_dev); err = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } k_mutex_init(&data->rx_frame_buf_mutex); k_mutex_init(&data->tx_frame_buf_mutex); k_sem_init(&data->rx_thread_sem, 0, CONFIG_ETH_NXP_ENET_RX_BUFFERS); k_sem_init(&data->tx_buf_sem, CONFIG_ETH_NXP_ENET_TX_BUFFERS, CONFIG_ETH_NXP_ENET_TX_BUFFERS); #if defined(CONFIG_PTP_CLOCK_NXP_ENET) k_sem_init(&data->ptp_ts_sem, 0, 1); #endif k_work_init(&data->rx_work, eth_nxp_enet_rx_thread); switch (config->mac_addr_source) { case MAC_ADDR_SOURCE_LOCAL: break; case MAC_ADDR_SOURCE_RANDOM: gen_random_mac(data->mac_addr, FREESCALE_OUI_B0, FREESCALE_OUI_B1, FREESCALE_OUI_B2); break; case MAC_ADDR_SOURCE_UNIQUE: nxp_enet_unique_mac(data->mac_addr); break; case MAC_ADDR_SOURCE_FUSED: nxp_enet_fused_mac(data->mac_addr); break; default: return -ENOTSUP; } err = clock_control_get_rate(config->clock_dev, config->clock_subsys, &enet_module_clock_rate); if (err) { return err; } ENET_GetDefaultConfig(&enet_config); if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE)) { enet_config.macSpecialConfig |= kENET_ControlPromiscuousEnable; } if (IS_ENABLED(CONFIG_NET_VLAN)) { enet_config.macSpecialConfig |= kENET_ControlVLANTagEnable; } if (IS_ENABLED(CONFIG_ETH_NXP_ENET_HW_ACCELERATION)) { enet_config.txAccelerConfig |= kENET_TxAccelIpCheckEnabled | kENET_TxAccelProtoCheckEnabled; enet_config.rxAccelerConfig |= kENET_RxAccelIpCheckEnabled | kENET_RxAccelProtoCheckEnabled; } enet_config.interrupt |= kENET_RxFrameInterrupt; enet_config.interrupt |= kENET_TxFrameInterrupt; if (config->phy_mode == NXP_ENET_MII_MODE) { enet_config.miiMode = kENET_MiiMode; } else if (config->phy_mode == NXP_ENET_RMII_MODE) { enet_config.miiMode = kENET_RmiiMode; #if defined(CONFIG_ETH_NXP_ENET_1G) } else if (config->phy_mode == NXP_ENET_RGMII_MODE) { enet_config.miiMode = kENET_RgmiiMode; #endif } else { return -EINVAL; } enet_config.callback = eth_callback; enet_config.userData = (void *)dev; ENET_Up(data->base, &data->enet_handle, &enet_config, config->buffer_config, data->mac_addr, enet_module_clock_rate); nxp_enet_driver_cb(config->mdio, NXP_ENET_MDIO, NXP_ENET_MODULE_RESET, NULL); #if defined(CONFIG_PTP_CLOCK_NXP_ENET) nxp_enet_driver_cb(config->ptp_clock, NXP_ENET_PTP_CLOCK, NXP_ENET_MODULE_RESET, &data->ptp_mutex); ENET_SetTxReclaim(&data->enet_handle, true, 0); #endif ENET_ActiveRead(data->base); err = nxp_enet_phy_init(dev); if (err) { return err; } LOG_DBG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x", dev->name, data->mac_addr[0], data->mac_addr[1], data->mac_addr[2], data->mac_addr[3], data->mac_addr[4], data->mac_addr[5]); return 0; } #if defined(CONFIG_NET_POWER_MANAGEMENT) static int eth_nxp_enet_device_pm_action(const struct device *dev, enum pm_device_action action) { const struct nxp_enet_mac_config *config = dev->config; struct nxp_enet_mac_data *data = dev->data; int ret; if (!device_is_ready(config->clock_dev)) { return -ENODEV; } if (action == PM_DEVICE_ACTION_SUSPEND) { LOG_DBG("Suspending"); ret = net_if_suspend(data->iface); if (ret) { return ret; } ENET_Reset(data->base); ENET_Down(data->base); clock_control_off(config->clock_dev, (clock_control_subsys_t)config->clock_subsys); } else if (action == PM_DEVICE_ACTION_RESUME) { LOG_DBG("Resuming"); clock_control_on(config->clock_dev, (clock_control_subsys_t)config->clock_subsys); eth_nxp_enet_init(dev); net_if_resume(data->iface); } else { return -ENOTSUP; } return 0; } #define ETH_NXP_ENET_PM_DEVICE_INIT(n) \ PM_DEVICE_DT_INST_DEFINE(n, eth_nxp_enet_device_pm_action); #define ETH_NXP_ENET_PM_DEVICE_GET(n) PM_DEVICE_DT_INST_GET(n) #else #define ETH_NXP_ENET_PM_DEVICE_INIT(n) #define ETH_NXP_ENET_PM_DEVICE_GET(n) NULL #endif /* CONFIG_NET_POWER_MANAGEMENT */ #ifdef CONFIG_NET_DSA #define NXP_ENET_SEND_FUNC dsa_tx #else #define NXP_ENET_SEND_FUNC eth_nxp_enet_tx #endif /* CONFIG_NET_DSA */ static const struct ethernet_api api_funcs = { .iface_api.init = eth_nxp_enet_iface_init, .get_capabilities = eth_nxp_enet_get_capabilities, .get_phy = eth_nxp_enet_get_phy, .set_config = eth_nxp_enet_set_config, .send = NXP_ENET_SEND_FUNC, #if defined(CONFIG_PTP_CLOCK) .get_ptp_clock = eth_nxp_enet_get_ptp_clock, #endif }; #define NXP_ENET_CONNECT_IRQ(node_id, irq_names, idx) \ do { \ IRQ_CONNECT(DT_IRQ_BY_IDX(node_id, idx, irq), \ DT_IRQ_BY_IDX(node_id, idx, priority), \ eth_nxp_enet_isr, \ DEVICE_DT_GET(node_id), \ 0); \ irq_enable(DT_IRQ_BY_IDX(node_id, idx, irq)); \ } while (false); #define NXP_ENET_DT_PHY_DEV(node_id, phy_phandle, idx) \ DEVICE_DT_GET(DT_PHANDLE_BY_IDX(node_id, phy_phandle, idx)) #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) && \ CONFIG_ETH_NXP_ENET_USE_DTCM_FOR_DMA_BUFFER #define _nxp_enet_dma_desc_section __dtcm_bss_section #define _nxp_enet_dma_buffer_section __dtcm_noinit_section #define _nxp_enet_driver_buffer_section __dtcm_noinit_section #define driver_cache_maintain false #elif defined(CONFIG_NOCACHE_MEMORY) #define _nxp_enet_dma_desc_section __nocache #define _nxp_enet_dma_buffer_section __nocache #define _nxp_enet_driver_buffer_section #define driver_cache_maintain false #else #define _nxp_enet_dma_desc_section #define _nxp_enet_dma_buffer_section #define _nxp_enet_driver_buffer_section #define driver_cache_maintain true #endif /* Use ENET_FRAME_MAX_VLANFRAMELEN for VLAN frame size * Use ENET_FRAME_MAX_FRAMELEN for Ethernet frame size */ #if defined(CONFIG_NET_VLAN) #if !defined(ENET_FRAME_MAX_VLANFRAMELEN) #define ENET_FRAME_MAX_VLANFRAMELEN (ENET_FRAME_MAX_FRAMELEN + 4) #endif #define ETH_NXP_ENET_BUFFER_SIZE \ ROUND_UP(ENET_FRAME_MAX_VLANFRAMELEN, ENET_BUFF_ALIGNMENT) #else #define ETH_NXP_ENET_BUFFER_SIZE \ ROUND_UP(ENET_FRAME_MAX_FRAMELEN, ENET_BUFF_ALIGNMENT) #endif /* CONFIG_NET_VLAN */ #define NXP_ENET_PHY_MODE(node_id) \ DT_ENUM_HAS_VALUE(node_id, phy_connection_type, mii) ? NXP_ENET_MII_MODE : \ (DT_ENUM_HAS_VALUE(node_id, phy_connection_type, rmii) ? NXP_ENET_RMII_MODE : \ (DT_ENUM_HAS_VALUE(node_id, phy_connection_type, rgmii) ? NXP_ENET_RGMII_MODE : \ NXP_ENET_INVALID_MII_MODE)) #ifdef CONFIG_PTP_CLOCK_NXP_ENET #define NXP_ENET_PTP_DEV(n) .ptp_clock = DEVICE_DT_GET(DT_INST_PHANDLE(n, nxp_ptp_clock)), #define NXP_ENET_FRAMEINFO_ARRAY(n) \ static enet_frame_info_t \ nxp_enet_##n##_tx_frameinfo_array[CONFIG_ETH_NXP_ENET_TX_BUFFERS]; #define NXP_ENET_FRAMEINFO(n) \ .txFrameInfo = nxp_enet_##n##_tx_frameinfo_array, #else #define NXP_ENET_PTP_DEV(n) #define NXP_ENET_FRAMEINFO_ARRAY(n) #define NXP_ENET_FRAMEINFO(n) \ .txFrameInfo = NULL #endif #define NXP_ENET_NODE_HAS_MAC_ADDR_CHECK(n) \ BUILD_ASSERT(NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)) || \ DT_INST_PROP(n, zephyr_random_mac_address) || \ DT_INST_PROP(n, nxp_unique_mac) || \ DT_INST_PROP(n, nxp_fused_mac), \ "MAC address not specified on ENET DT node"); #define NXP_ENET_NODE_PHY_MODE_CHECK(n) \ BUILD_ASSERT(NXP_ENET_PHY_MODE(DT_DRV_INST(n)) != NXP_ENET_RGMII_MODE || \ (IS_ENABLED(CONFIG_ETH_NXP_ENET_1G) && \ DT_NODE_HAS_COMPAT(DT_INST_PARENT(n), nxp_enet1g)), \ "RGMII mode requires nxp,enet1g compatible on ENET DT node" \ " and CONFIG_ETH_NXP_ENET_1G enabled"); #define NXP_ENET_MAC_ADDR_SOURCE(n) \ COND_CODE_1(DT_NODE_HAS_PROP(DT_DRV_INST(n), local_mac_address), \ (MAC_ADDR_SOURCE_LOCAL), \ (COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), \ (MAC_ADDR_SOURCE_RANDOM), \ (COND_CODE_1(DT_INST_PROP(n, nxp_unique_mac), (MAC_ADDR_SOURCE_UNIQUE), \ (COND_CODE_1(DT_INST_PROP(n, nxp_fused_mac), (MAC_ADDR_SOURCE_FUSED), \ (MAC_ADDR_SOURCE_INVALID)))))))) #define NXP_ENET_MAC_INIT(n) \ NXP_ENET_NODE_HAS_MAC_ADDR_CHECK(n) \ \ NXP_ENET_NODE_PHY_MODE_CHECK(n) \ \ PINCTRL_DT_INST_DEFINE(n); \ \ NXP_ENET_FRAMEINFO_ARRAY(n) \ \ static void nxp_enet_##n##_irq_config_func(void) \ { \ DT_INST_FOREACH_PROP_ELEM(n, interrupt_names, \ NXP_ENET_CONNECT_IRQ); \ } \ \ volatile static __aligned(ENET_BUFF_ALIGNMENT) \ _nxp_enet_dma_desc_section \ enet_rx_bd_struct_t \ nxp_enet_##n##_rx_buffer_desc[CONFIG_ETH_NXP_ENET_RX_BUFFERS]; \ \ volatile static __aligned(ENET_BUFF_ALIGNMENT) \ _nxp_enet_dma_desc_section \ enet_tx_bd_struct_t \ nxp_enet_##n##_tx_buffer_desc[CONFIG_ETH_NXP_ENET_TX_BUFFERS]; \ \ static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \ _nxp_enet_dma_buffer_section \ nxp_enet_##n##_rx_buffer[CONFIG_ETH_NXP_ENET_RX_BUFFERS] \ [ETH_NXP_ENET_BUFFER_SIZE]; \ \ static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \ _nxp_enet_dma_buffer_section \ nxp_enet_##n##_tx_buffer[CONFIG_ETH_NXP_ENET_TX_BUFFERS] \ [ETH_NXP_ENET_BUFFER_SIZE]; \ \ const struct nxp_enet_mac_config nxp_enet_##n##_config = { \ .irq_config_func = nxp_enet_##n##_irq_config_func, \ .module_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \ .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_INST_PARENT(n))), \ .clock_subsys = (void *)DT_CLOCKS_CELL_BY_IDX( \ DT_INST_PARENT(n), 0, name), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .buffer_config = {{ \ .rxBdNumber = CONFIG_ETH_NXP_ENET_RX_BUFFERS, \ .txBdNumber = CONFIG_ETH_NXP_ENET_TX_BUFFERS, \ .rxBuffSizeAlign = ETH_NXP_ENET_BUFFER_SIZE, \ .txBuffSizeAlign = ETH_NXP_ENET_BUFFER_SIZE, \ .rxBdStartAddrAlign = nxp_enet_##n##_rx_buffer_desc, \ .txBdStartAddrAlign = nxp_enet_##n##_tx_buffer_desc, \ .rxBufferAlign = nxp_enet_##n##_rx_buffer[0], \ .txBufferAlign = nxp_enet_##n##_tx_buffer[0], \ .rxMaintainEnable = driver_cache_maintain, \ .txMaintainEnable = driver_cache_maintain, \ NXP_ENET_FRAMEINFO(n) \ }}, \ .phy_mode = NXP_ENET_PHY_MODE(DT_DRV_INST(n)), \ .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle)), \ .mdio = DEVICE_DT_GET(DT_INST_PHANDLE(n, nxp_mdio)), \ NXP_ENET_PTP_DEV(n) \ .mac_addr_source = NXP_ENET_MAC_ADDR_SOURCE(n), \ }; \ \ static _nxp_enet_driver_buffer_section uint8_t \ nxp_enet_##n##_tx_frame_buf[NET_ETH_MAX_FRAME_SIZE]; \ static _nxp_enet_driver_buffer_section uint8_t \ nxp_enet_##n##_rx_frame_buf[NET_ETH_MAX_FRAME_SIZE]; \ \ struct nxp_enet_mac_data nxp_enet_##n##_data = { \ .tx_frame_buf = nxp_enet_##n##_tx_frame_buf, \ .rx_frame_buf = nxp_enet_##n##_rx_frame_buf, \ .dev = DEVICE_DT_INST_GET(n), \ .mac_addr = DT_INST_PROP_OR(n, local_mac_address, {0}), \ }; \ \ ETH_NXP_ENET_PM_DEVICE_INIT(n) \ \ ETH_NET_DEVICE_DT_INST_DEFINE(n, eth_nxp_enet_init, \ ETH_NXP_ENET_PM_DEVICE_GET(n), \ &nxp_enet_##n##_data, &nxp_enet_##n##_config, \ CONFIG_ETH_INIT_PRIORITY, \ &api_funcs, NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_MAC_INIT) struct nxp_enet_mod_config { DEVICE_MMIO_ROM; const struct device *clock_dev; clock_control_subsys_t clock_subsys; }; struct nxp_enet_mod_data { DEVICE_MMIO_RAM; }; static int nxp_enet_mod_init(const struct device *dev) { const struct nxp_enet_mod_config *config = dev->config; int ret; ret = clock_control_on(config->clock_dev, config->clock_subsys); if (ret) { LOG_ERR("ENET module clock error"); return ret; } DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP); ENET_Reset((ENET_Type *)DEVICE_MMIO_GET(dev)); return 0; } #define NXP_ENET_INIT(n, compat) \ \ static const struct nxp_enet_mod_config nxp_enet_mod_cfg_##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_DRV_INST(n))), \ .clock_subsys = (void *) DT_CLOCKS_CELL_BY_IDX( \ DT_DRV_INST(n), 0, name), \ }; \ \ static struct nxp_enet_mod_data nxp_enet_mod_data_##n; \ \ /* Init the module before any of the MAC, MDIO, or PTP clock */ \ DEVICE_DT_INST_DEFINE(n, nxp_enet_mod_init, NULL, \ &nxp_enet_mod_data_##n, &nxp_enet_mod_cfg_##n, \ POST_KERNEL, 0, NULL); #undef DT_DRV_COMPAT #define DT_DRV_COMPAT nxp_enet DT_INST_FOREACH_STATUS_OKAY_VARGS(NXP_ENET_INIT, DT_DRV_COMPAT) #define NXP_ENET1G_INIT(n, compat) \ \ static const struct nxp_enet_mod_config nxp_enet1g_mod_cfg_##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_DRV_INST(n))), \ .clock_subsys = (void *) DT_CLOCKS_CELL_BY_IDX( \ DT_DRV_INST(n), 0, name), \ }; \ \ static struct nxp_enet_mod_data nxp_enet1g_mod_data_##n; \ \ /* Init the module before any of the MAC, MDIO, or PTP clock */ \ DEVICE_DT_INST_DEFINE(n, nxp_enet_mod_init, NULL, \ &nxp_enet1g_mod_data_##n, &nxp_enet1g_mod_cfg_##n, \ POST_KERNEL, 0, NULL); #undef DT_DRV_COMPAT #define DT_DRV_COMPAT nxp_enet1g DT_INST_FOREACH_STATUS_OKAY_VARGS(NXP_ENET1G_INIT, DT_DRV_COMPAT) ```
/content/code_sandbox/drivers/ethernet/nxp_enet/eth_nxp_enet.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,540
```c /* * */ #define DT_DRV_COMPAT nxp_enet_qos_mac #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(eth_nxp_enet_qos_mac, CONFIG_ETHERNET_LOG_LEVEL); #include <zephyr/net/phy.h> #include <zephyr/kernel/thread_stack.h> #include <zephyr/sys_clock.h> #include <ethernet/eth_stats.h> #include "../eth.h" #include "nxp_enet_qos_priv.h" static const uint32_t rx_desc_refresh_flags = OWN_FLAG | RX_INTERRUPT_ON_COMPLETE_FLAG | BUF1_ADDR_VALID_FLAG; K_THREAD_STACK_DEFINE(enet_qos_rx_stack, CONFIG_ETH_NXP_ENET_QOS_RX_THREAD_STACK_SIZE); static struct k_work_q rx_work_queue; static int rx_queue_init(void) { struct k_work_queue_config cfg = {.name = "ENETQOS_RX"}; k_work_queue_init(&rx_work_queue); k_work_queue_start(&rx_work_queue, enet_qos_rx_stack, K_THREAD_STACK_SIZEOF(enet_qos_rx_stack), K_PRIO_COOP(CONFIG_ETH_NXP_ENET_QOS_RX_THREAD_PRIORITY), &cfg); return 0; } SYS_INIT(rx_queue_init, POST_KERNEL, 0); static void eth_nxp_enet_qos_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct nxp_enet_qos_mac_data *data = dev->data; net_if_set_link_addr(iface, data->mac_addr.addr, sizeof(((struct net_eth_addr *)NULL)->addr), NET_LINK_ETHERNET); if (data->iface == NULL) { data->iface = iface; } ethernet_init(iface); } static int eth_nxp_enet_qos_tx(const struct device *dev, struct net_pkt *pkt) { const struct nxp_enet_qos_mac_config *config = dev->config; struct nxp_enet_qos_mac_data *data = dev->data; enet_qos_t *base = config->base; volatile union nxp_enet_qos_tx_desc *tx_desc_ptr = data->tx.descriptors; volatile union nxp_enet_qos_tx_desc *last_desc_ptr; struct net_buf *fragment = pkt->frags; int frags_count = 0, total_bytes = 0; /* Only allow send of the maximum normal packet size */ while (fragment != NULL) { frags_count++; total_bytes += fragment->len; fragment = fragment->frags; } if (total_bytes > config->hw_info.max_frame_len || frags_count > NUM_TX_BUFDESC) { LOG_ERR("TX packet too large"); return -E2BIG; } /* One TX at a time in the current implementation */ k_sem_take(&data->tx.tx_sem, K_FOREVER); net_pkt_ref(pkt); data->tx.pkt = pkt; /* Need to save the header because the ethernet stack * otherwise discards it from the packet after this call */ data->tx.tx_header = pkt->frags; LOG_DBG("Setting up TX descriptors for packet %p", pkt); /* Reset the descriptors */ memset((void *)data->tx.descriptors, 0, sizeof(union nxp_enet_qos_tx_desc) * frags_count); /* Setting up the descriptors */ fragment = pkt->frags; tx_desc_ptr->read.control2 |= FIRST_TX_DESCRIPTOR_FLAG; for (int i = 0; i < frags_count; i++) { net_pkt_frag_ref(fragment); tx_desc_ptr->read.buf1_addr = (uint32_t)fragment->data; tx_desc_ptr->read.control1 = FIELD_PREP(0x3FFF, fragment->len); tx_desc_ptr->read.control2 |= FIELD_PREP(0x7FFF, total_bytes); fragment = fragment->frags; tx_desc_ptr++; } last_desc_ptr = tx_desc_ptr - 1; last_desc_ptr->read.control2 |= LAST_TX_DESCRIPTOR_FLAG; last_desc_ptr->read.control1 |= TX_INTERRUPT_ON_COMPLETE_FLAG; LOG_DBG("Starting TX DMA on packet %p", pkt); /* Set the DMA ownership of all the used descriptors */ for (int i = 0; i < frags_count; i++) { data->tx.descriptors[i].read.control2 |= OWN_FLAG; } /* This implementation is clearly naive and basic, it just changes the * ring length for every TX send, there is room for optimization */ base->DMA_CH[0].DMA_CHX_TXDESC_RING_LENGTH = frags_count - 1; base->DMA_CH[0].DMA_CHX_TXDESC_TAIL_PTR = ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_TAIL_PTR, TDTP, ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t) tx_desc_ptr)); return 0; } static void tx_dma_done(struct k_work *work) { struct nxp_enet_qos_tx_data *tx_data = CONTAINER_OF(work, struct nxp_enet_qos_tx_data, tx_done_work); struct nxp_enet_qos_mac_data *data = CONTAINER_OF(tx_data, struct nxp_enet_qos_mac_data, tx); struct net_pkt *pkt = tx_data->pkt; struct net_buf *fragment = pkt->frags; LOG_DBG("TX DMA completed on packet %p", pkt); /* Returning the buffers and packet to the pool */ while (fragment != NULL) { net_pkt_frag_unref(fragment); fragment = fragment->frags; } net_pkt_frag_unref(data->tx.tx_header); net_pkt_unref(pkt); eth_stats_update_pkts_tx(data->iface); /* Allows another send */ k_sem_give(&data->tx.tx_sem); } static enum ethernet_hw_caps eth_nxp_enet_qos_get_capabilities(const struct device *dev) { return ETHERNET_LINK_100BASE_T | ETHERNET_LINK_10BASE_T; } static void eth_nxp_enet_qos_rx(struct k_work *work) { struct nxp_enet_qos_rx_data *rx_data = CONTAINER_OF(work, struct nxp_enet_qos_rx_data, rx_work); struct nxp_enet_qos_mac_data *data = CONTAINER_OF(rx_data, struct nxp_enet_qos_mac_data, rx); volatile union nxp_enet_qos_rx_desc *desc_arr = data->rx.descriptors; volatile union nxp_enet_qos_rx_desc *desc; struct net_pkt *pkt; struct net_buf *new_buf; struct net_buf *buf; size_t pkt_len; /* We are going to find all of the descriptors we own and update them */ for (int i = 0; i < NUM_RX_BUFDESC; i++) { desc = &desc_arr[i]; if (desc->write.control3 & OWN_FLAG) { /* The DMA owns the descriptor, we cannot touch it */ continue; } /* Otherwise, we found a packet that we need to process */ pkt = net_pkt_rx_alloc(K_NO_WAIT); if (!pkt) { LOG_ERR("Could not alloc RX pkt"); goto error; } LOG_DBG("Created RX pkt %p", pkt); /* We need to know if we can replace the reserved fragment in advance. * At no point can we allow the driver to have less the amount of reserved * buffers it needs to function, so we will not give up our previous buffer * unless we know we can get a new one. */ new_buf = net_pkt_get_frag(pkt, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT); if (new_buf == NULL) { /* We have no choice but to lose the previous packet, * as the buffer is more important. If we recv this packet, * we don't know what the upper layer will do to our poor buffer. */ LOG_ERR("No RX buf available"); goto error; } buf = data->rx.reserved_bufs[i]; pkt_len = desc->write.control3 & DESC_RX_PKT_LEN; LOG_DBG("Receiving RX packet"); /* Finally, we have decided that it is time to wrap the buffer nicely * up within a packet, and try to send it. It's only one buffer, * thanks to ENET QOS hardware handing the fragmentation, * so the construction of the packet is very simple. */ net_buf_add(buf, pkt_len); net_pkt_frag_insert(pkt, buf); if (net_recv_data(data->iface, pkt)) { LOG_ERR("RECV failed"); /* Quite a shame. */ goto error; } LOG_DBG("Recycling RX buf"); /* Fresh meat */ data->rx.reserved_bufs[i] = new_buf; desc->read.buf1_addr = (uint32_t)new_buf->data; desc->read.control |= rx_desc_refresh_flags; /* Record our glorious victory */ eth_stats_update_pkts_rx(data->iface); } return; error: net_pkt_unref(pkt); eth_stats_update_errors_rx(data->iface); } static void eth_nxp_enet_qos_mac_isr(const struct device *dev) { const struct nxp_enet_qos_mac_config *config = dev->config; struct nxp_enet_qos_mac_data *data = dev->data; enet_qos_t *base = config->base; /* cleared on read */ volatile uint32_t mac_interrupts = base->MAC_INTERRUPT_STATUS; volatile uint32_t mac_rx_tx_status = base->MAC_RX_TX_STATUS; volatile uint32_t dma_interrupts = base->DMA_INTERRUPT_STATUS; volatile uint32_t dma_ch0_interrupts = base->DMA_CH[0].DMA_CHX_STAT; mac_interrupts; mac_rx_tx_status; base->DMA_CH[0].DMA_CHX_STAT = 0xFFFFFFFF; if (ENET_QOS_REG_GET(DMA_INTERRUPT_STATUS, DC0IS, dma_interrupts)) { if (ENET_QOS_REG_GET(DMA_CH_DMA_CHX_STAT, TI, dma_ch0_interrupts)) { k_work_submit(&data->tx.tx_done_work); } if (ENET_QOS_REG_GET(DMA_CH_DMA_CHX_STAT, RI, dma_ch0_interrupts)) { k_work_submit_to_queue(&rx_work_queue, &data->rx.rx_work); } } } static void eth_nxp_enet_qos_phy_cb(const struct device *phy, struct phy_link_state *state, void *eth_dev) { const struct device *dev = eth_dev; struct nxp_enet_qos_mac_data *data = dev->data; if (!data->iface) { return; } if (state->is_up) { net_eth_carrier_on(data->iface); } else { net_eth_carrier_off(data->iface); } LOG_INF("Link is %s", state->is_up ? "up" : "down"); } static inline int enet_qos_dma_reset(enet_qos_t *base) { /* Set the software reset of the DMA */ base->DMA_MODE |= ENET_QOS_REG_PREP(DMA_MODE, SWR, 0b1); if (CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME == 0) { /* spin and wait forever for the reset flag to clear */ while (ENET_QOS_REG_GET(DMA_MODE, SWR, base->DMA_MODE)) { ; } goto done; } int wait_chunk = DIV_ROUND_UP(CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME, NUM_SWR_WAIT_CHUNKS); for (int time_elapsed = 0; time_elapsed < CONFIG_ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME; time_elapsed += wait_chunk) { k_busy_wait(wait_chunk); if (!ENET_QOS_REG_GET(DMA_MODE, SWR, base->DMA_MODE)) { /* DMA cleared the bit */ goto done; } } /* all ENET QOS domain clocks must resolve to clear software reset, * if getting this error, try checking phy clock connection */ LOG_ERR("Can't clear SWR"); return -EIO; done: return 0; } static inline void enet_qos_dma_config_init(enet_qos_t *base) { base->DMA_CH[0].DMA_CHX_TX_CTRL |= ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TX_CTRL, TxPBL, 0b1); base->DMA_CH[0].DMA_CHX_RX_CTRL |= ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, RxPBL, 0b1); } static inline void enet_qos_mtl_config_init(enet_qos_t *base) { base->MTL_QUEUE[0].MTL_TXQX_OP_MODE |= /* Flush the queue */ ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, FTQ, 0b1); /* Wait for flush to finish */ while (ENET_QOS_REG_GET(MTL_QUEUE_MTL_TXQX_OP_MODE, FTQ, base->MTL_QUEUE[0].MTL_TXQX_OP_MODE)) { ; } /* Enable only Transmit Queue 0 (optimization/configuration pending) with maximum size */ base->MTL_QUEUE[0].MTL_TXQX_OP_MODE = /* Sets the size */ ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, TQS, 0b111) | /* Sets it to on */ ENET_QOS_REG_PREP(MTL_QUEUE_MTL_TXQX_OP_MODE, TXQEN, 0b10); /* Enable only Receive Queue 0 (optimization/configuration pending) with maximum size */ base->MTL_QUEUE[0].MTL_RXQX_OP_MODE |= /* Sets the size */ ENET_QOS_REG_PREP(MTL_QUEUE_MTL_RXQX_OP_MODE, RQS, 0b111) | /* Keep small packets */ ENET_QOS_REG_PREP(MTL_QUEUE_MTL_RXQX_OP_MODE, FUP, 0b1); } static inline void enet_qos_mac_config_init(enet_qos_t *base, struct nxp_enet_qos_mac_data *data, uint32_t clk_rate) { /* Set MAC address */ base->MAC_ADDRESS0_HIGH = ENET_QOS_REG_PREP(MAC_ADDRESS0_HIGH, ADDRHI, data->mac_addr.addr[5] << 8 | data->mac_addr.addr[4]); base->MAC_ADDRESS0_LOW = ENET_QOS_REG_PREP(MAC_ADDRESS0_LOW, ADDRLO, data->mac_addr.addr[3] << 24 | data->mac_addr.addr[2] << 16 | data->mac_addr.addr[1] << 8 | data->mac_addr.addr[0]); /* Set the reference for 1 microsecond of ENET QOS CSR clock cycles */ base->MAC_ONEUS_TIC_COUNTER = ENET_QOS_REG_PREP(MAC_ONEUS_TIC_COUNTER, TIC_1US_CNTR, (clk_rate / USEC_PER_SEC) - 1); base->MAC_CONFIGURATION |= /* For 10/100 Mbps operation */ ENET_QOS_REG_PREP(MAC_CONFIGURATION, PS, 0b1) | /* Full duplex mode */ ENET_QOS_REG_PREP(MAC_CONFIGURATION, DM, 0b1) | /* 100 Mbps mode */ ENET_QOS_REG_PREP(MAC_CONFIGURATION, FES, 0b1) | /* Don't talk unless no one else is talking */ ENET_QOS_REG_PREP(MAC_CONFIGURATION, ECRSFD, 0b1); /* Enable the MAC RX channel 0 */ base->MAC_RXQ_CTRL[0] |= ENET_QOS_REG_PREP(MAC_RXQ_CTRL, RXQ0EN, 0b1); } static inline void enet_qos_start(enet_qos_t *base) { /* Set start bits of the RX and TX DMAs */ base->DMA_CH[0].DMA_CHX_RX_CTRL |= ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, SR, 0b1); base->DMA_CH[0].DMA_CHX_TX_CTRL |= ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TX_CTRL, ST, 0b1); /* Enable interrupts */ base->DMA_CH[0].DMA_CHX_INT_EN = /* Normal interrupts (includes tx, rx) */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, NIE, 0b1) | /* Transmit interrupt */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, TIE, 0b1) | /* Receive interrupt */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_INT_EN, RIE, 0b1); base->MAC_INTERRUPT_ENABLE = /* Receive and Transmit IRQs */ ENET_QOS_REG_PREP(MAC_INTERRUPT_ENABLE, TXSTSIE, 0b1) | ENET_QOS_REG_PREP(MAC_INTERRUPT_ENABLE, RXSTSIE, 0b1); /* Start the TX and RX on the MAC */ base->MAC_CONFIGURATION |= ENET_QOS_REG_PREP(MAC_CONFIGURATION, TE, 0b1) | ENET_QOS_REG_PREP(MAC_CONFIGURATION, RE, 0b1); } static inline void enet_qos_tx_desc_init(enet_qos_t *base, struct nxp_enet_qos_tx_data *tx) { memset((void *)tx->descriptors, 0, sizeof(union nxp_enet_qos_tx_desc) * NUM_TX_BUFDESC); base->DMA_CH[0].DMA_CHX_TXDESC_LIST_ADDR = /* Start of tx descriptors buffer */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_LIST_ADDR, TDESLA, ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)tx->descriptors)); base->DMA_CH[0].DMA_CHX_TXDESC_TAIL_PTR = /* Do not move the tail pointer past the start until send is requested */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_TAIL_PTR, TDTP, ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)tx->descriptors)); base->DMA_CH[0].DMA_CHX_TXDESC_RING_LENGTH = ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_TXDESC_RING_LENGTH, TDRL, NUM_TX_BUFDESC); } static inline int enet_qos_rx_desc_init(enet_qos_t *base, struct nxp_enet_qos_rx_data *rx) { struct net_buf *buf; memset((void *)rx->descriptors, 0, sizeof(union nxp_enet_qos_rx_desc) * NUM_RX_BUFDESC); /* Here we reserve an RX buffer for each of the DMA descriptors. */ for (int i = 0; i < NUM_RX_BUFDESC; i++) { buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT); if (buf == NULL) { LOG_ERR("Missing a buf"); return -ENOMEM; } rx->reserved_bufs[i] = buf; rx->descriptors[i].read.buf1_addr = (uint32_t)buf->data; rx->descriptors[i].read.control |= rx_desc_refresh_flags; } /* Set up RX descriptors on channel 0 */ base->DMA_CH[0].DMA_CHX_RXDESC_LIST_ADDR = /* Start of tx descriptors buffer */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RXDESC_LIST_ADDR, RDESLA, ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)&rx->descriptors[0])); base->DMA_CH[0].DMA_CHX_RXDESC_TAIL_PTR = /* When the DMA reaches the tail pointer, it suspends. Set to last descriptor */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RXDESC_TAIL_PTR, RDTP, ENET_QOS_ALIGN_ADDR_SHIFT((uint32_t)&rx->descriptors[NUM_RX_BUFDESC])); base->DMA_CH[0].DMA_CHX_RX_CONTROL2 = /* Ring length == Buffer size. Register is this value minus one. */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CONTROL2, RDRL, NUM_RX_BUFDESC - 1); base->DMA_CH[0].DMA_CHX_RX_CTRL |= /* Set DMA receive buffer size. The low 2 bits are not entered to this field. */ ENET_QOS_REG_PREP(DMA_CH_DMA_CHX_RX_CTRL, RBSZ_13_Y, NET_ETH_MAX_FRAME_SIZE >> 2); return 0; } static int eth_nxp_enet_qos_mac_init(const struct device *dev) { const struct nxp_enet_qos_mac_config *config = dev->config; struct nxp_enet_qos_mac_data *data = dev->data; struct nxp_enet_qos_config *module_cfg = ENET_QOS_MODULE_CFG(config->enet_dev); enet_qos_t *base = module_cfg->base; uint32_t clk_rate; int ret; /* Used to configure timings of the MAC */ ret = clock_control_get_rate(module_cfg->clock_dev, module_cfg->clock_subsys, &clk_rate); if (ret) { return ret; } /* For reporting the status of the link connection */ ret = phy_link_callback_set(config->phy_dev, eth_nxp_enet_qos_phy_cb, (void *)dev); if (ret) { return ret; } /* Random mac therefore overrides local mac that may have been initialized */ if (config->random_mac) { gen_random_mac(data->mac_addr.addr, NXP_OUI_BYTE_0, NXP_OUI_BYTE_1, NXP_OUI_BYTE_2); } /* This driver cannot work without interrupts. */ if (config->irq_config_func) { config->irq_config_func(); } else { return -ENOSYS; } /* Effectively reset of the peripheral */ ret = enet_qos_dma_reset(base); if (ret) { return ret; } /* DMA is the interface presented to software for interaction by the ENET module */ enet_qos_dma_config_init(base); /* * MTL = MAC Translation Layer. * MTL is an asynchronous circuit needed because the MAC transmitter/receiver * and the DMA interface are on different clock domains, MTL compromises the two. */ enet_qos_mtl_config_init(base); /* Configuration of the actual MAC hardware */ enet_qos_mac_config_init(base, data, clk_rate); /* Current use of TX descriptor in the driver is such that * one packet is sent at a time, and each descriptor is used * to collect the fragments of it from the networking stack, * and send them with a zero copy implementation. */ enet_qos_tx_desc_init(base, &data->tx); /* Current use of RX descriptor in the driver is such that * each RX descriptor corresponds to a reserved fragment, that will * hold the entirety of the contents of a packet. And these fragments * are recycled in and out of the RX pkt buf pool to achieve a zero copy implementation. */ ret = enet_qos_rx_desc_init(base, &data->rx); if (ret) { return ret; } /* Clearly, start the cogs to motion. */ enet_qos_start(base); /* The tx sem is taken during ethernet send function, * and given when DMA transmission is finished. Ie, send calls will be blocked * until the DMA is available again. This is therefore a simple but naive implementation. */ k_sem_init(&data->tx.tx_sem, 1, 1); /* Work upon a reception of a packet to a buffer */ k_work_init(&data->rx.rx_work, eth_nxp_enet_qos_rx); /* Work upon a complete transmission by a channel's TX DMA */ k_work_init(&data->tx.tx_done_work, tx_dma_done); return ret; } static const struct device *eth_nxp_enet_qos_get_phy(const struct device *dev) { const struct nxp_enet_qos_mac_config *config = dev->config; return config->phy_dev; } static const struct ethernet_api api_funcs = { .iface_api.init = eth_nxp_enet_qos_iface_init, .send = eth_nxp_enet_qos_tx, .get_capabilities = eth_nxp_enet_qos_get_capabilities, .get_phy = eth_nxp_enet_qos_get_phy, }; #define NXP_ENET_QOS_NODE_HAS_MAC_ADDR_CHECK(n) \ BUILD_ASSERT(NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)) || \ DT_INST_PROP(n, zephyr_random_mac_address), \ "MAC address not specified on ENET QOS DT node"); #define NXP_ENET_QOS_CONNECT_IRQS(node_id, prop, idx) \ do { \ IRQ_CONNECT(DT_IRQN_BY_IDX(node_id, idx), \ DT_IRQ_BY_IDX(node_id, idx, priority), \ eth_nxp_enet_qos_mac_isr, \ DEVICE_DT_GET(node_id), \ 0); \ irq_enable(DT_IRQN_BY_IDX(node_id, idx)); \ } while (false); #define NXP_ENET_QOS_IRQ_CONFIG_FUNC(n) \ static void nxp_enet_qos_##n##_irq_config_func(void) \ { \ DT_FOREACH_PROP_ELEM(DT_DRV_INST(n), \ interrupt_names, \ NXP_ENET_QOS_CONNECT_IRQS) \ } #define NXP_ENET_QOS_DRIVER_STRUCTS_INIT(n) \ static const struct nxp_enet_qos_mac_config enet_qos_##n##_mac_config = { \ .enet_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \ .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle)), \ .base = (enet_qos_t *)DT_REG_ADDR(DT_INST_PARENT(n)), \ .hw_info = { \ .max_frame_len = ENET_QOS_MAX_NORMAL_FRAME_LEN, \ }, \ .irq_config_func = nxp_enet_qos_##n##_irq_config_func, \ .random_mac = DT_INST_PROP(n, zephyr_random_mac_address), \ }; \ \ static struct nxp_enet_qos_mac_data enet_qos_##n##_mac_data = \ { \ .mac_addr.addr = DT_INST_PROP_OR(n, local_mac_address, {0}), \ }; #define NXP_ENET_QOS_DRIVER_INIT(n) \ NXP_ENET_QOS_NODE_HAS_MAC_ADDR_CHECK(n) \ NXP_ENET_QOS_IRQ_CONFIG_FUNC(n) \ NXP_ENET_QOS_DRIVER_STRUCTS_INIT(n) DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_QOS_DRIVER_INIT) #define NXP_ENET_QOS_MAC_DEVICE_DEFINE(n) \ ETH_NET_DEVICE_DT_INST_DEFINE(n, eth_nxp_enet_qos_mac_init, NULL, \ &enet_qos_##n##_mac_data, &enet_qos_##n##_mac_config, \ CONFIG_ETH_INIT_PRIORITY, &api_funcs, NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_QOS_MAC_DEVICE_DEFINE) ```
/content/code_sandbox/drivers/ethernet/eth_nxp_enet_qos/eth_nxp_enet_qos_mac.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,025
```c /* NXP ENET QOS Ethernet MAC Driver * * */ #define DT_DRV_COMPAT nxp_enet_qos #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/ethernet/eth_nxp_enet_qos.h> int nxp_enet_qos_init(const struct device *dev) { const struct nxp_enet_qos_config *config = dev->config; int ret; /* TODO: once NXP reset drivers are created, use that to reset * until then, make sure reset is done in platform init */ ret = clock_control_on(config->clock_dev, config->clock_subsys); if (ret) { return ret; } return pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); } #define NXP_ENET_QOS_INIT(n) \ PINCTRL_DT_INST_DEFINE(n); \ \ static const struct nxp_enet_qos_config enet_qos_##n##_config = { \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = (void *)DT_INST_CLOCKS_CELL_BY_IDX(n, 0, name), \ .base = (enet_qos_t *)DT_INST_REG_ADDR(n), \ }; \ \ /* Init the module before any enet device inits so priority 0 */ \ DEVICE_DT_INST_DEFINE(n, nxp_enet_qos_init, NULL, NULL, \ &enet_qos_##n##_config, POST_KERNEL, 0, NULL); DT_INST_FOREACH_STATUS_OKAY(NXP_ENET_QOS_INIT) ```
/content/code_sandbox/drivers/ethernet/eth_nxp_enet_qos/eth_nxp_enet_qos.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
374
```objective-c /* NXP ENET QOS Header * * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/util.h> #include <zephyr/net/ethernet.h> #include <zephyr/drivers/ethernet/eth_nxp_enet_qos.h> /* shorthands */ #define NUM_TX_BUFDESC CONFIG_ETH_NXP_ENET_QOS_TX_BUFFER_DESCRIPTORS #define NUM_RX_BUFDESC CONFIG_ETH_NXP_ENET_QOS_RX_BUFFER_DESCRIPTORS #define LAST_TX_DESC_INDEX NUM_TX_BUFDESC - 1 #define LAST_RX_DESC_INDEX NUM_RX_BUFDESC - 1 /* NXP Organizational Unique Identifier */ #define NXP_OUI_BYTE_0 0xAC #define NXP_OUI_BYTE_1 0x9A #define NXP_OUI_BYTE_2 0x22 #define FIRST_TX_DESCRIPTOR_FLAG BIT(29) #define LAST_TX_DESCRIPTOR_FLAG BIT(28) #define OWN_FLAG BIT(31) #define RX_INTERRUPT_ON_COMPLETE_FLAG BIT(30) #define TX_INTERRUPT_ON_COMPLETE_FLAG BIT(31) #define BUF1_ADDR_VALID_FLAG BIT(24) #define DESC_RX_PKT_LEN GENMASK(14, 0) #define ENET_QOS_MAX_NORMAL_FRAME_LEN 1518 #define NUM_SWR_WAIT_CHUNKS 5 struct nxp_enet_qos_tx_read_desc { union { uint32_t buf1_addr; uint32_t head_addr; }; union { uint32_t buf2_addr; uint32_t buf1_addr_alt; }; uint32_t control1; uint32_t control2; }; struct nxp_enet_qos_tx_write_desc { uint32_t timestamp_low; uint32_t timestamp_high; uint32_t reserved; uint32_t status; }; union nxp_enet_qos_tx_desc { struct nxp_enet_qos_tx_read_desc read; struct nxp_enet_qos_tx_write_desc write; }; struct nxp_enet_qos_rx_read_desc { union { uint32_t buf1_addr; uint32_t head_addr; }; uint32_t reserved; uint32_t buf2_addr; uint32_t control; }; struct nxp_enet_qos_rx_write_desc { uint32_t vlan_tag; uint32_t control1; uint32_t control2; uint32_t control3; }; union nxp_enet_qos_rx_desc { struct nxp_enet_qos_rx_read_desc read; struct nxp_enet_qos_rx_write_desc write; }; struct nxp_enet_qos_hw_info { uint16_t max_frame_len; }; struct nxp_enet_qos_mac_config { const struct device *enet_dev; const struct device *phy_dev; enet_qos_t *base; struct nxp_enet_qos_hw_info hw_info; void (*irq_config_func)(void); bool random_mac; }; struct nxp_enet_qos_tx_data { struct k_sem tx_sem; struct net_pkt *pkt; struct k_work tx_done_work; struct net_buf *tx_header; volatile union nxp_enet_qos_tx_desc descriptors[NUM_TX_BUFDESC]; }; struct nxp_enet_qos_rx_data { struct k_work rx_work; volatile union nxp_enet_qos_rx_desc descriptors[NUM_RX_BUFDESC]; struct net_buf *reserved_bufs[NUM_RX_BUFDESC]; }; struct nxp_enet_qos_mac_data { struct net_if *iface; struct net_eth_addr mac_addr; struct nxp_enet_qos_tx_data tx; struct nxp_enet_qos_rx_data rx; }; ```
/content/code_sandbox/drivers/ethernet/eth_nxp_enet_qos/nxp_enet_qos_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
724
```c /* MCUX Ethernet Driver * * */ #define DT_DRV_COMPAT nxp_kinetis_ethernet /* Driver Limitations: * * There is no statistics collection for either normal operation or * error behaviour. */ #define LOG_MODULE_NAME eth_mcux #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #define RING_ID 0 #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/device.h> #include <zephyr/sys/util.h> #include <zephyr/kernel.h> #include <zephyr/sys/__assert.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include <zephyr/pm/device.h> #include <zephyr/irq.h> #if defined(CONFIG_PTP_CLOCK_MCUX) #include <zephyr/drivers/ptp_clock.h> #endif #if defined(CONFIG_NET_DSA) #include <zephyr/net/dsa.h> #endif #include "fsl_enet.h" #include "fsl_phy.h" #include "fsl_phyksz8081.h" #include "fsl_enet_mdio.h" #if defined(CONFIG_NET_POWER_MANAGEMENT) #include "fsl_clock.h" #include <zephyr/drivers/clock_control.h> #endif #include <zephyr/devicetree.h> #include <zephyr/drivers/gpio.h> #if defined(CONFIG_PINCTRL) #include <zephyr/drivers/pinctrl.h> #endif #include "../eth.h" #define PHY_OMS_OVERRIDE_REG 0x16U /* The PHY Operation Mode Strap Override register. */ #define PHY_OMS_STATUS_REG 0x17U /* The PHY Operation Mode Strap Status register. */ #define PHY_OMS_NANDTREE_MASK 0x0020U /* The PHY NAND Tree Strap-In Override/Status mask. */ #define PHY_OMS_FACTORY_MODE_MASK 0x8000U /* The factory mode Override/Status mask. */ /* Defines the PHY KSZ8081 vendor defined registers. */ #define PHY_CONTROL1_REG 0x1EU /* The PHY control one register. */ #define PHY_CONTROL2_REG 0x1FU /* The PHY control two register. */ /* Defines the PHY KSZ8081 ID number. */ #define PHY_CONTROL_ID1 0x22U /* The PHY ID1 */ /* Defines the mask flag of operation mode in control registers */ #define PHY_CTL2_REMOTELOOP_MASK 0x0004U /* The PHY remote loopback mask. */ #define PHY_CTL2_REFCLK_SELECT_MASK 0x0080U /* The PHY RMII reference clock select. */ #define PHY_CTL1_10HALFDUPLEX_MASK 0x0001U /* The PHY 10M half duplex mask. */ #define PHY_CTL1_100HALFDUPLEX_MASK 0x0002U /* The PHY 100M half duplex mask. */ #define PHY_CTL1_10FULLDUPLEX_MASK 0x0005U /* The PHY 10M full duplex mask. */ #define PHY_CTL1_100FULLDUPLEX_MASK 0x0006U /* The PHY 100M full duplex mask. */ #define PHY_CTL1_SPEEDUPLX_MASK 0x0007U /* The PHY speed and duplex mask. */ #define PHY_CTL1_ENERGYDETECT_MASK 0x10U /* The PHY signal present on rx differential pair. */ #define PHY_CTL1_LINKUP_MASK 0x100U /* The PHY link up. */ #define PHY_LINK_READY_MASK (PHY_CTL1_ENERGYDETECT_MASK | PHY_CTL1_LINKUP_MASK) /* Defines the timeout macro. */ #define PHY_READID_TIMEOUT_COUNT 1000U /* Define RX and TX thread stack sizes */ #define ETH_MCUX_RX_THREAD_STACK_SIZE 1600 #define ETH_MCUX_TX_THREAD_STACK_SIZE 1600 #define FREESCALE_OUI_B0 0x00 #define FREESCALE_OUI_B1 0x04 #define FREESCALE_OUI_B2 0x9f #define ETH_MCUX_FIXED_LINK_NODE \ DT_CHILD(DT_NODELABEL(enet), fixed_link) #define ETH_MCUX_FIXED_LINK \ DT_NODE_EXISTS(ETH_MCUX_FIXED_LINK_NODE) #define ETH_MCUX_FIXED_LINK_SPEED \ DT_PROP(ETH_MCUX_FIXED_LINK_NODE, speed) #define ETH_MCUX_FIXED_LINK_FULL_DUPLEX \ DT_PROP(ETH_MCUX_FIXED_LINK_NODE, full_duplex) enum eth_mcux_phy_state { eth_mcux_phy_state_initial, eth_mcux_phy_state_reset, eth_mcux_phy_state_autoneg, eth_mcux_phy_state_restart, eth_mcux_phy_state_read_status, eth_mcux_phy_state_read_duplex, eth_mcux_phy_state_wait, eth_mcux_phy_state_closing }; struct _phy_resource { mdioWrite write; mdioRead read; }; #if defined(CONFIG_NET_POWER_MANAGEMENT) extern uint32_t ENET_GetInstance(ENET_Type * base); static const clock_ip_name_t enet_clocks[] = ENET_CLOCKS; #endif static void eth_mcux_init(const struct device *dev); #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG) static const char *phy_state_name(enum eth_mcux_phy_state state) { static const char * const name[] = { "initial", "reset", "autoneg", "restart", "read-status", "read-duplex", "wait", "closing" }; return name[state]; } #endif static const char *eth_name(ENET_Type *base) { switch ((int)base) { case DT_INST_REG_ADDR(0): return "ETH_0"; #if DT_NODE_HAS_STATUS(DT_DRV_INST(1), okay) case DT_INST_REG_ADDR(1): return "ETH_1"; #endif default: return "unknown"; } } struct eth_context { ENET_Type *base; void (*config_func)(void); struct net_if *iface; #if defined(CONFIG_NET_POWER_MANAGEMENT) clock_ip_name_t clock; const struct device *clock_dev; #endif enet_handle_t enet_handle; #if defined(CONFIG_PTP_CLOCK_MCUX) const struct device *ptp_clock; enet_ptp_config_t ptp_config; double clk_ratio; struct k_mutex ptp_mutex; struct k_sem ptp_ts_sem; #endif struct k_sem tx_buf_sem; phy_handle_t *phy_handle; struct _phy_resource *phy_config; struct k_sem rx_thread_sem; enum eth_mcux_phy_state phy_state; bool enabled; bool link_up; uint32_t phy_addr; uint32_t rx_irq_num; uint32_t tx_irq_num; phy_duplex_t phy_duplex; phy_speed_t phy_speed; uint8_t mac_addr[6]; void (*generate_mac)(uint8_t *); struct k_work phy_work; struct k_work_delayable delayed_phy_work; K_KERNEL_STACK_MEMBER(rx_thread_stack, ETH_MCUX_RX_THREAD_STACK_SIZE); struct k_thread rx_thread; /* TODO: FIXME. This Ethernet frame sized buffer is used for * interfacing with MCUX. How it works is that hardware uses * DMA scatter buffers to receive a frame, and then public * MCUX call gathers them into this buffer (there's no other * public interface). All this happens only for this driver * to scatter this buffer again into Zephyr fragment buffers. * This is not efficient, but proper resolution of this issue * depends on introduction of zero-copy networking support * in Zephyr, and adding needed interface to MCUX (or * bypassing it and writing a more complex driver working * directly with hardware). * * Note that we do not copy FCS into this buffer thus the * size is 1514 bytes. */ struct k_mutex tx_frame_buf_mutex; struct k_mutex rx_frame_buf_mutex; uint8_t *tx_frame_buf; /* Max MTU + ethernet header */ uint8_t *rx_frame_buf; /* Max MTU + ethernet header */ #if defined(CONFIG_PINCTRL) const struct pinctrl_dev_config *pincfg; #endif #if defined(CONFIG_ETH_MCUX_PHY_RESET) const struct gpio_dt_spec int_gpio; const struct gpio_dt_spec reset_gpio; #endif }; /* Use ENET_FRAME_MAX_VLANFRAMELEN for VLAN frame size * Use ENET_FRAME_MAX_FRAMELEN for Ethernet frame size */ #if defined(CONFIG_NET_VLAN) #if !defined(ENET_FRAME_MAX_VLANFRAMELEN) #define ENET_FRAME_MAX_VLANFRAMELEN (ENET_FRAME_MAX_FRAMELEN + 4) #endif #define ETH_MCUX_BUFFER_SIZE \ ROUND_UP(ENET_FRAME_MAX_VLANFRAMELEN, ENET_BUFF_ALIGNMENT) #else #define ETH_MCUX_BUFFER_SIZE \ ROUND_UP(ENET_FRAME_MAX_FRAMELEN, ENET_BUFF_ALIGNMENT) #endif /* CONFIG_NET_VLAN */ #ifdef CONFIG_SOC_FAMILY_KINETIS #if defined(CONFIG_NET_POWER_MANAGEMENT) static void eth_mcux_phy_enter_reset(struct eth_context *context); void eth_mcux_phy_stop(struct eth_context *context); static int eth_mcux_device_pm_action(const struct device *dev, enum pm_device_action action) { struct eth_context *eth_ctx = dev->data; int ret = 0; if (!device_is_ready(eth_ctx->clock_dev)) { LOG_ERR("No CLOCK dev"); ret = -EIO; goto out; } switch (action) { case PM_DEVICE_ACTION_SUSPEND: LOG_DBG("Suspending"); ret = net_if_suspend(eth_ctx->iface); if (ret == -EBUSY) { goto out; } eth_mcux_phy_enter_reset(eth_ctx); eth_mcux_phy_stop(eth_ctx); ENET_Reset(eth_ctx->base); ENET_Deinit(eth_ctx->base); clock_control_off(eth_ctx->clock_dev, (clock_control_subsys_t)eth_ctx->clock); break; case PM_DEVICE_ACTION_RESUME: LOG_DBG("Resuming"); clock_control_on(eth_ctx->clock_dev, (clock_control_subsys_t)eth_ctx->clock); eth_mcux_init(dev); net_if_resume(eth_ctx->iface); break; default: ret = -ENOTSUP; break; } out: return ret; } #endif /* CONFIG_NET_POWER_MANAGEMENT */ #endif /* CONFIG_SOC_FAMILY_KINETIS */ #if ETH_MCUX_FIXED_LINK static void eth_mcux_get_phy_params(phy_duplex_t *p_phy_duplex, phy_speed_t *p_phy_speed) { *p_phy_duplex = kPHY_HalfDuplex; #if ETH_MCUX_FIXED_LINK_FULL_DUPLEX *p_phy_duplex = kPHY_FullDuplex; #endif *p_phy_speed = kPHY_Speed10M; #if ETH_MCUX_FIXED_LINK_SPEED == 100 *p_phy_speed = kPHY_Speed100M; #endif } #else static void eth_mcux_decode_duplex_and_speed(uint32_t status, phy_duplex_t *p_phy_duplex, phy_speed_t *p_phy_speed) { switch (status & PHY_CTL1_SPEEDUPLX_MASK) { case PHY_CTL1_10FULLDUPLEX_MASK: *p_phy_duplex = kPHY_FullDuplex; *p_phy_speed = kPHY_Speed10M; break; case PHY_CTL1_100FULLDUPLEX_MASK: *p_phy_duplex = kPHY_FullDuplex; *p_phy_speed = kPHY_Speed100M; break; case PHY_CTL1_100HALFDUPLEX_MASK: *p_phy_duplex = kPHY_HalfDuplex; *p_phy_speed = kPHY_Speed100M; break; case PHY_CTL1_10HALFDUPLEX_MASK: *p_phy_duplex = kPHY_HalfDuplex; *p_phy_speed = kPHY_Speed10M; break; } } #endif /* ETH_MCUX_FIXED_LINK */ static inline struct net_if *get_iface(struct eth_context *ctx) { return ctx->iface; } static void eth_mcux_phy_enter_reset(struct eth_context *context) { /* Reset the PHY. */ #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI) ENET_StartSMIWrite(context->base, context->phy_addr, PHY_BASICCONTROL_REG, kENET_MiiWriteValidFrame, PHY_BCTL_RESET_MASK); #endif context->phy_state = eth_mcux_phy_state_reset; #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) k_work_submit(&context->phy_work); #endif } static void eth_mcux_phy_start(struct eth_context *context) { #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG) LOG_DBG("%s phy_state=%s", eth_name(context->base), phy_state_name(context->phy_state)); #endif context->enabled = true; switch (context->phy_state) { case eth_mcux_phy_state_initial: context->phy_handle->phyAddr = context->phy_addr; ENET_ActiveRead(context->base); /* Reset the PHY. */ #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI) ENET_StartSMIWrite(context->base, context->phy_addr, PHY_BASICCONTROL_REG, kENET_MiiWriteValidFrame, PHY_BCTL_RESET_MASK); #else /* * With no SMI communication one needs to wait for * iface being up by the network core. */ k_work_submit(&context->phy_work); break; #endif #if defined(CONFIG_SOC_SERIES_IMXRT10XX) || defined(CONFIG_SOC_SERIES_IMXRT11XX) context->phy_state = eth_mcux_phy_state_initial; #else context->phy_state = eth_mcux_phy_state_reset; #endif break; case eth_mcux_phy_state_reset: eth_mcux_phy_enter_reset(context); break; case eth_mcux_phy_state_autoneg: case eth_mcux_phy_state_restart: case eth_mcux_phy_state_read_status: case eth_mcux_phy_state_read_duplex: case eth_mcux_phy_state_wait: case eth_mcux_phy_state_closing: break; } } void eth_mcux_phy_stop(struct eth_context *context) { #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG) LOG_DBG("%s phy_state=%s", eth_name(context->base), phy_state_name(context->phy_state)); #endif context->enabled = false; switch (context->phy_state) { case eth_mcux_phy_state_initial: case eth_mcux_phy_state_reset: case eth_mcux_phy_state_autoneg: case eth_mcux_phy_state_restart: case eth_mcux_phy_state_read_status: case eth_mcux_phy_state_read_duplex: /* Do nothing, let the current communication complete * then deal with shutdown. */ context->phy_state = eth_mcux_phy_state_closing; break; case eth_mcux_phy_state_wait: k_work_cancel_delayable(&context->delayed_phy_work); /* @todo, actually power down the PHY ? */ context->phy_state = eth_mcux_phy_state_initial; break; case eth_mcux_phy_state_closing: /* We are already going down. */ break; } } static void eth_mcux_phy_event(struct eth_context *context) { #if !(defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK) uint32_t status; #endif bool link_up; #if defined(CONFIG_SOC_SERIES_IMXRT10XX) || defined(CONFIG_SOC_SERIES_IMXRT11XX) status_t res; uint16_t ctrl2; #endif phy_duplex_t phy_duplex = kPHY_FullDuplex; phy_speed_t phy_speed = kPHY_Speed100M; #if defined(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG) LOG_DBG("%s phy_state=%s", eth_name(context->base), phy_state_name(context->phy_state)); #endif switch (context->phy_state) { case eth_mcux_phy_state_initial: #if defined(CONFIG_SOC_SERIES_IMXRT10XX) || defined(CONFIG_SOC_SERIES_IMXRT11XX) ENET_DisableInterrupts(context->base, ENET_EIR_MII_MASK); res = PHY_Read(context->phy_handle, PHY_CONTROL2_REG, &ctrl2); ENET_EnableInterrupts(context->base, ENET_EIR_MII_MASK); if (res != kStatus_Success) { LOG_WRN("Reading PHY reg failed (status 0x%x)", res); k_work_submit(&context->phy_work); } else { ctrl2 |= PHY_CTL2_REFCLK_SELECT_MASK; ENET_StartSMIWrite(context->base, context->phy_addr, PHY_CONTROL2_REG, kENET_MiiWriteValidFrame, ctrl2); } context->phy_state = eth_mcux_phy_state_reset; #endif #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) /* * When the iface is available proceed with the eth link setup, * otherwise reschedule the eth_mcux_phy_event and check after * 1ms */ if (context->iface) { context->phy_state = eth_mcux_phy_state_reset; } k_work_reschedule(&context->delayed_phy_work, K_MSEC(1)); #endif break; case eth_mcux_phy_state_closing: if (context->enabled) { eth_mcux_phy_enter_reset(context); } else { /* @todo, actually power down the PHY ? */ context->phy_state = eth_mcux_phy_state_initial; } break; case eth_mcux_phy_state_reset: /* Setup PHY autonegotiation. */ #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI) ENET_StartSMIWrite(context->base, context->phy_addr, PHY_AUTONEG_ADVERTISE_REG, kENET_MiiWriteValidFrame, (PHY_100BASETX_FULLDUPLEX_MASK | PHY_100BASETX_HALFDUPLEX_MASK | PHY_10BASETX_FULLDUPLEX_MASK | PHY_10BASETX_HALFDUPLEX_MASK | PHY_IEEE802_3_SELECTOR_MASK)); #endif context->phy_state = eth_mcux_phy_state_autoneg; #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) k_work_submit(&context->phy_work); #endif break; case eth_mcux_phy_state_autoneg: #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI) /* Setup PHY autonegotiation. */ ENET_StartSMIWrite(context->base, context->phy_addr, PHY_BASICCONTROL_REG, kENET_MiiWriteValidFrame, (PHY_BCTL_AUTONEG_MASK | PHY_BCTL_RESTART_AUTONEG_MASK)); #endif context->phy_state = eth_mcux_phy_state_restart; #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) k_work_submit(&context->phy_work); #endif break; case eth_mcux_phy_state_wait: case eth_mcux_phy_state_restart: /* Start reading the PHY basic status. */ #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI) ENET_StartSMIRead(context->base, context->phy_addr, PHY_BASICSTATUS_REG, kENET_MiiReadValidFrame); #endif context->phy_state = eth_mcux_phy_state_read_status; #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) k_work_submit(&context->phy_work); #endif break; case eth_mcux_phy_state_read_status: /* PHY Basic status is available. */ #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK link_up = true; #else status = ENET_ReadSMIData(context->base); link_up = status & PHY_BSTATUS_LINKSTATUS_MASK; #endif if (link_up && !context->link_up && context->iface != NULL) { /* Start reading the PHY control register. */ #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI) ENET_StartSMIRead(context->base, context->phy_addr, PHY_CONTROL1_REG, kENET_MiiReadValidFrame); #endif context->link_up = link_up; context->phy_state = eth_mcux_phy_state_read_duplex; net_eth_carrier_on(context->iface); k_msleep(1); #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) k_work_submit(&context->phy_work); #endif } else if (!link_up && context->link_up && context->iface != NULL) { LOG_INF("%s link down", eth_name(context->base)); context->link_up = link_up; k_work_reschedule(&context->delayed_phy_work, K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS)); context->phy_state = eth_mcux_phy_state_wait; net_eth_carrier_off(context->iface); } else { k_work_reschedule(&context->delayed_phy_work, K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS)); context->phy_state = eth_mcux_phy_state_wait; } break; case eth_mcux_phy_state_read_duplex: /* PHY control register is available. */ #if defined(CONFIG_ETH_MCUX_NO_PHY_SMI) && ETH_MCUX_FIXED_LINK eth_mcux_get_phy_params(&phy_duplex, &phy_speed); LOG_INF("%s - Fixed Link", eth_name(context->base)); #else status = ENET_ReadSMIData(context->base); eth_mcux_decode_duplex_and_speed(status, &phy_duplex, &phy_speed); #endif if (phy_speed != context->phy_speed || phy_duplex != context->phy_duplex) { context->phy_speed = phy_speed; context->phy_duplex = phy_duplex; ENET_SetMII(context->base, (enet_mii_speed_t) phy_speed, (enet_mii_duplex_t) phy_duplex); } LOG_INF("%s enabled %sM %s-duplex mode.", eth_name(context->base), (phy_speed ? "100" : "10"), (phy_duplex ? "full" : "half")); k_work_reschedule(&context->delayed_phy_work, K_MSEC(CONFIG_ETH_MCUX_PHY_TICK_MS)); context->phy_state = eth_mcux_phy_state_wait; break; } } static void eth_mcux_phy_work(struct k_work *item) { struct eth_context *context = CONTAINER_OF(item, struct eth_context, phy_work); eth_mcux_phy_event(context); } static void eth_mcux_delayed_phy_work(struct k_work *item) { struct k_work_delayable *dwork = k_work_delayable_from_work(item); struct eth_context *context = CONTAINER_OF(dwork, struct eth_context, delayed_phy_work); eth_mcux_phy_event(context); } static void eth_mcux_phy_setup(struct eth_context *context) { #if defined(CONFIG_SOC_SERIES_IMXRT10XX) || defined(CONFIG_SOC_SERIES_IMXRT11XX) status_t res; uint16_t oms_override; /* Disable MII interrupts to prevent triggering PHY events. */ ENET_DisableInterrupts(context->base, ENET_EIR_MII_MASK); res = PHY_Read(context->phy_handle, PHY_OMS_OVERRIDE_REG, &oms_override); if (res != kStatus_Success) { LOG_WRN("Reading PHY reg failed (status 0x%x)", res); } else { /* Based on strap-in pins the PHY can be in factory test mode. * Force normal operation. */ oms_override &= ~PHY_OMS_FACTORY_MODE_MASK; /* Prevent PHY entering NAND Tree mode override. */ if (oms_override & PHY_OMS_NANDTREE_MASK) { oms_override &= ~PHY_OMS_NANDTREE_MASK; } res = PHY_Write(context->phy_handle, PHY_OMS_OVERRIDE_REG, oms_override); if (res != kStatus_Success) { LOG_WRN("Writing PHY reg failed (status 0x%x)", res); } } ENET_EnableInterrupts(context->base, ENET_EIR_MII_MASK); #endif } #if defined(CONFIG_PTP_CLOCK_MCUX) static bool eth_get_ptp_data(struct net_if *iface, struct net_pkt *pkt) { int eth_hlen; if (ntohs(NET_ETH_HDR(pkt)->type) != NET_ETH_PTYPE_PTP) { return false; } eth_hlen = sizeof(struct net_eth_hdr); net_pkt_set_priority(pkt, NET_PRIORITY_CA); return true; } #endif /* CONFIG_PTP_CLOCK_MCUX */ static int eth_tx(const struct device *dev, struct net_pkt *pkt) { struct eth_context *context = dev->data; uint16_t total_len = net_pkt_get_len(pkt); status_t status; #if defined(CONFIG_PTP_CLOCK_MCUX) bool timestamped_frame; #endif /* Wait for a TX buffer descriptor to be available */ k_sem_take(&context->tx_buf_sem, K_FOREVER); k_mutex_lock(&context->tx_frame_buf_mutex, K_FOREVER); if (net_pkt_read(pkt, context->tx_frame_buf, total_len)) { k_mutex_unlock(&context->tx_frame_buf_mutex); return -EIO; } #if defined(CONFIG_PTP_CLOCK_MCUX) timestamped_frame = eth_get_ptp_data(net_pkt_iface(pkt), pkt); if (timestamped_frame) { status = ENET_SendFrame(context->base, &context->enet_handle, context->tx_frame_buf, total_len, RING_ID, true, pkt); if (!status) { net_pkt_ref(pkt); /* * Network stack will modify the packet upon return, * so wait for the packet to be timestamped, * which will occur within the TX ISR, before * returning */ k_sem_take(&context->ptp_ts_sem, K_FOREVER); } } else #endif { status = ENET_SendFrame(context->base, &context->enet_handle, context->tx_frame_buf, total_len, RING_ID, false, NULL); } if (status) { LOG_ERR("ENET_SendFrame error: %d", (int)status); k_mutex_unlock(&context->tx_frame_buf_mutex); ENET_ReclaimTxDescriptor(context->base, &context->enet_handle, RING_ID); return -1; } k_mutex_unlock(&context->tx_frame_buf_mutex); return 0; } static int eth_rx(struct eth_context *context) { uint32_t frame_length = 0U; struct net_if *iface; struct net_pkt *pkt; status_t status; uint32_t ts; #if defined(CONFIG_PTP_CLOCK_MCUX) enet_ptp_time_t ptpTimeData; #endif status = ENET_GetRxFrameSize(&context->enet_handle, (uint32_t *)&frame_length, RING_ID); if (status == kStatus_ENET_RxFrameEmpty) { return 0; } else if (status == kStatus_ENET_RxFrameError) { enet_data_error_stats_t error_stats; LOG_ERR("ENET_GetRxFrameSize return: %d", (int)status); ENET_GetRxErrBeforeReadFrame(&context->enet_handle, &error_stats, RING_ID); goto flush; } if (frame_length > NET_ETH_MAX_FRAME_SIZE) { LOG_ERR("frame too large (%d)", frame_length); goto flush; } /* Using root iface. It will be updated in net_recv_data() */ pkt = net_pkt_rx_alloc_with_buffer(context->iface, frame_length, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { goto flush; } /* in case multiply thread access * we need to protect it with mutex. */ k_mutex_lock(&context->rx_frame_buf_mutex, K_FOREVER); status = ENET_ReadFrame(context->base, &context->enet_handle, context->rx_frame_buf, frame_length, RING_ID, &ts); if (status) { LOG_ERR("ENET_ReadFrame failed: %d", (int)status); net_pkt_unref(pkt); k_mutex_unlock(&context->rx_frame_buf_mutex); goto error; } if (net_pkt_write(pkt, context->rx_frame_buf, frame_length)) { LOG_ERR("Unable to write frame into the pkt"); net_pkt_unref(pkt); k_mutex_unlock(&context->rx_frame_buf_mutex); goto error; } k_mutex_unlock(&context->rx_frame_buf_mutex); /* * Use MAC timestamp */ #if defined(CONFIG_PTP_CLOCK_MCUX) k_mutex_lock(&context->ptp_mutex, K_FOREVER); if (eth_get_ptp_data(get_iface(context), pkt)) { ENET_Ptp1588GetTimer(context->base, &context->enet_handle, &ptpTimeData); /* If latest timestamp reloads after getting from Rx BD, * then second - 1 to make sure the actual Rx timestamp is * accurate */ if (ptpTimeData.nanosecond < ts) { ptpTimeData.second--; } pkt->timestamp.nanosecond = ts; pkt->timestamp.second = ptpTimeData.second; } else { /* Invalid value. */ pkt->timestamp.nanosecond = UINT32_MAX; pkt->timestamp.second = UINT64_MAX; } k_mutex_unlock(&context->ptp_mutex); #endif /* CONFIG_PTP_CLOCK_MCUX */ iface = get_iface(context); #if defined(CONFIG_NET_DSA) iface = dsa_net_recv(iface, &pkt); #endif if (net_recv_data(iface, pkt) < 0) { net_pkt_unref(pkt); goto error; } return 1; flush: /* Flush the current read buffer. This operation can * only report failure if there is no frame to flush, * which cannot happen in this context. */ status = ENET_ReadFrame(context->base, &context->enet_handle, NULL, 0, RING_ID, NULL); __ASSERT_NO_MSG(status == kStatus_Success); error: eth_stats_update_errors_rx(get_iface(context)); return -EIO; } #if defined(CONFIG_PTP_CLOCK_MCUX) && defined(CONFIG_NET_L2_PTP) static inline void ts_register_tx_event(struct eth_context *context, enet_frame_info_t *frameinfo) { struct net_pkt *pkt; pkt = frameinfo->context; if (pkt && atomic_get(&pkt->atomic_ref) > 0) { if (eth_get_ptp_data(net_pkt_iface(pkt), pkt)) { if (frameinfo->isTsAvail) { k_mutex_lock(&context->ptp_mutex, K_FOREVER); pkt->timestamp.nanosecond = frameinfo->timeStamp.nanosecond; pkt->timestamp.second = frameinfo->timeStamp.second; net_if_add_tx_timestamp(pkt); k_sem_give(&context->ptp_ts_sem); k_mutex_unlock(&context->ptp_mutex); } } net_pkt_unref(pkt); } else { if (IS_ENABLED(CONFIG_ETH_MCUX_PHY_EXTRA_DEBUG) && pkt) { LOG_ERR("pkt %p already freed", pkt); } } } #endif /* CONFIG_PTP_CLOCK_MCUX && CONFIG_NET_L2_PTP */ static void eth_callback(ENET_Type *base, enet_handle_t *handle, #if FSL_FEATURE_ENET_QUEUE > 1 uint32_t ringId, #endif /* FSL_FEATURE_ENET_QUEUE > 1 */ enet_event_t event, enet_frame_info_t *frameinfo, void *param) { struct eth_context *context = param; switch (event) { case kENET_RxEvent: k_sem_give(&context->rx_thread_sem); break; case kENET_TxEvent: #if defined(CONFIG_PTP_CLOCK_MCUX) && defined(CONFIG_NET_L2_PTP) /* Register event */ ts_register_tx_event(context, frameinfo); #endif /* CONFIG_PTP_CLOCK_MCUX && CONFIG_NET_L2_PTP */ /* Free the TX buffer. */ k_sem_give(&context->tx_buf_sem); break; case kENET_ErrEvent: /* Error event: BABR/BABT/EBERR/LC/RL/UN/PLR. */ break; case kENET_WakeUpEvent: /* Wake up from sleep mode event. */ break; case kENET_TimeStampEvent: /* Time stamp event. */ /* Reset periodic timer to default value. */ context->base->ATPER = NSEC_PER_SEC; break; case kENET_TimeStampAvailEvent: /* Time stamp available event. */ break; } } static void eth_rx_thread(void *arg1, void *unused1, void *unused2) { struct eth_context *context = (struct eth_context *)arg1; while (1) { if (k_sem_take(&context->rx_thread_sem, K_FOREVER) == 0) { while (eth_rx(context) == 1) { ; } /* enable the IRQ for RX */ ENET_EnableInterrupts(context->base, kENET_RxFrameInterrupt | kENET_RxBufferInterrupt); } } } #if defined(CONFIG_ETH_MCUX_PHY_RESET) static int eth_phy_reset(const struct device *dev) { int err; struct eth_context *context = dev->data; /* pull up the ENET_INT before RESET. */ err = gpio_pin_configure_dt(&context->int_gpio, GPIO_OUTPUT_ACTIVE); if (err) { return err; } return gpio_pin_configure_dt(&context->reset_gpio, GPIO_OUTPUT_INACTIVE); } static int eth_phy_init(const struct device *dev) { struct eth_context *context = dev->data; /* RESET PHY chip. */ k_busy_wait(USEC_PER_MSEC * 500); return gpio_pin_set_dt(&context->reset_gpio, 1); } #endif static void eth_mcux_init(const struct device *dev) { struct eth_context *context = dev->data; const enet_buffer_config_t *buffer_config = dev->config; enet_config_t enet_config; uint32_t sys_clock; #if defined(CONFIG_PTP_CLOCK_MCUX) uint8_t ptp_multicast[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 }; uint8_t ptp_peer_multicast[6] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x0E }; #endif #if defined(CONFIG_MDNS_RESPONDER) || defined(CONFIG_MDNS_RESOLVER) /* standard multicast MAC address */ uint8_t mdns_multicast[6] = { 0x01, 0x00, 0x5E, 0x00, 0x00, 0xFB }; #endif context->phy_state = eth_mcux_phy_state_initial; context->phy_handle->ops = &phyksz8081_ops; #if defined(CONFIG_SOC_SERIES_IMXRT10XX) #if DT_NODE_HAS_STATUS(DT_NODELABEL(enet), okay) sys_clock = CLOCK_GetFreq(kCLOCK_IpgClk); #endif #if DT_NODE_HAS_STATUS(DT_NODELABEL(enet2), okay) sys_clock = CLOCK_GetFreq(kCLOCK_EnetPll1Clk); #endif #elif defined(CONFIG_SOC_SERIES_IMXRT11XX) sys_clock = CLOCK_GetRootClockFreq(kCLOCK_Root_Bus); #else sys_clock = CLOCK_GetFreq(kCLOCK_CoreSysClk); #endif ENET_GetDefaultConfig(&enet_config); enet_config.interrupt |= kENET_RxFrameInterrupt; enet_config.interrupt |= kENET_TxFrameInterrupt; #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI) enet_config.interrupt |= kENET_MiiInterrupt; #endif enet_config.miiMode = kENET_RmiiMode; enet_config.callback = eth_callback; enet_config.userData = context; if (IS_ENABLED(CONFIG_ETH_MCUX_PROMISCUOUS_MODE)) { enet_config.macSpecialConfig |= kENET_ControlPromiscuousEnable; } if (IS_ENABLED(CONFIG_NET_VLAN)) { enet_config.macSpecialConfig |= kENET_ControlVLANTagEnable; } if (IS_ENABLED(CONFIG_ETH_MCUX_HW_ACCELERATION)) { enet_config.txAccelerConfig |= kENET_TxAccelIpCheckEnabled | kENET_TxAccelProtoCheckEnabled; enet_config.rxAccelerConfig |= kENET_RxAccelIpCheckEnabled | kENET_RxAccelProtoCheckEnabled; } ENET_Init(context->base, &context->enet_handle, &enet_config, buffer_config, context->mac_addr, sys_clock); #if defined(CONFIG_PTP_CLOCK_MCUX) ENET_AddMulticastGroup(context->base, ptp_multicast); ENET_AddMulticastGroup(context->base, ptp_peer_multicast); /* only for ERRATA_2579 */ context->ptp_config.channel = kENET_PtpTimerChannel3; context->ptp_config.ptp1588ClockSrc_Hz = CONFIG_ETH_MCUX_PTP_CLOCK_SRC_HZ; context->clk_ratio = 1.0; ENET_Ptp1588SetChannelMode(context->base, kENET_PtpTimerChannel3, kENET_PtpChannelPulseHighonCompare, true); ENET_Ptp1588Configure(context->base, &context->enet_handle, &context->ptp_config); #endif #if defined(CONFIG_MDNS_RESPONDER) || defined(CONFIG_MDNS_RESOLVER) ENET_AddMulticastGroup(context->base, mdns_multicast); #endif #if !defined(CONFIG_ETH_MCUX_NO_PHY_SMI) ENET_SetSMI(context->base, sys_clock, false); #endif /* handle PHY setup after SMI initialization */ eth_mcux_phy_setup(context); #if defined(CONFIG_PTP_CLOCK_MCUX) /* Enable reclaim of tx descriptors that will have the tx timestamp */ ENET_SetTxReclaim(&context->enet_handle, true, 0); #endif eth_mcux_phy_start(context); } static int eth_init(const struct device *dev) { struct eth_context *context = dev->data; #if defined(CONFIG_PINCTRL) int err; err = pinctrl_apply_state(context->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } #endif /* CONFIG_PINCTRL */ #if defined(CONFIG_NET_POWER_MANAGEMENT) const uint32_t inst = ENET_GetInstance(context->base); context->clock = enet_clocks[inst]; #endif #if defined(CONFIG_ETH_MCUX_PHY_RESET) eth_phy_reset(dev); eth_phy_init(dev); #endif #if defined(CONFIG_PTP_CLOCK_MCUX) k_mutex_init(&context->ptp_mutex); k_sem_init(&context->ptp_ts_sem, 0, 1); #endif k_mutex_init(&context->rx_frame_buf_mutex); k_mutex_init(&context->tx_frame_buf_mutex); k_sem_init(&context->rx_thread_sem, 0, CONFIG_ETH_MCUX_RX_BUFFERS); k_sem_init(&context->tx_buf_sem, CONFIG_ETH_MCUX_TX_BUFFERS, CONFIG_ETH_MCUX_TX_BUFFERS); k_work_init(&context->phy_work, eth_mcux_phy_work); k_work_init_delayable(&context->delayed_phy_work, eth_mcux_delayed_phy_work); /* Start interruption-poll thread */ k_thread_create(&context->rx_thread, context->rx_thread_stack, K_KERNEL_STACK_SIZEOF(context->rx_thread_stack), eth_rx_thread, (void *) context, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); k_thread_name_set(&context->rx_thread, "mcux_eth_rx"); if (context->generate_mac) { context->generate_mac(context->mac_addr); } eth_mcux_init(dev); LOG_DBG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x", dev->name, context->mac_addr[0], context->mac_addr[1], context->mac_addr[2], context->mac_addr[3], context->mac_addr[4], context->mac_addr[5]); return 0; } static void eth_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_context *context = dev->data; net_if_set_link_addr(iface, context->mac_addr, sizeof(context->mac_addr), NET_LINK_ETHERNET); if (context->iface == NULL) { context->iface = iface; } #if defined(CONFIG_NET_DSA) dsa_register_master_tx(iface, &eth_tx); #endif ethernet_init(iface); net_if_carrier_off(iface); context->config_func(); } static enum ethernet_hw_caps eth_mcux_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_HW_FILTERING | #if defined(CONFIG_NET_VLAN) ETHERNET_HW_VLAN | #endif #if defined(CONFIG_PTP_CLOCK_MCUX) ETHERNET_PTP | #endif #if defined(CONFIG_NET_DSA) ETHERNET_DSA_MASTER_PORT | #endif #if defined(CONFIG_ETH_MCUX_HW_ACCELERATION) ETHERNET_HW_TX_CHKSUM_OFFLOAD | ETHERNET_HW_RX_CHKSUM_OFFLOAD | #endif ETHERNET_AUTO_NEGOTIATION_SET | ETHERNET_LINK_100BASE_T; } static int eth_mcux_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct eth_context *context = dev->data; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(context->mac_addr, config->mac_address.addr, sizeof(context->mac_addr)); ENET_SetMacAddr(context->base, context->mac_addr); net_if_set_link_addr(context->iface, context->mac_addr, sizeof(context->mac_addr), NET_LINK_ETHERNET); LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name, context->mac_addr[0], context->mac_addr[1], context->mac_addr[2], context->mac_addr[3], context->mac_addr[4], context->mac_addr[5]); return 0; case ETHERNET_CONFIG_TYPE_FILTER: /* The ENET driver does not modify the address buffer but the API is not const */ if (config->filter.set) { ENET_AddMulticastGroup(context->base, (uint8_t *)config->filter.mac_address.addr); } else { ENET_LeaveMulticastGroup(context->base, (uint8_t *)config->filter.mac_address.addr); } return 0; default: break; } return -ENOTSUP; } #if defined(CONFIG_PTP_CLOCK_MCUX) static const struct device *eth_mcux_get_ptp_clock(const struct device *dev) { struct eth_context *context = dev->data; return context->ptp_clock; } #endif static const struct ethernet_api api_funcs = { .iface_api.init = eth_iface_init, #if defined(CONFIG_PTP_CLOCK_MCUX) .get_ptp_clock = eth_mcux_get_ptp_clock, #endif .get_capabilities = eth_mcux_get_capabilities, .set_config = eth_mcux_set_config, #if defined(CONFIG_NET_DSA) .send = dsa_tx, #else .send = eth_tx, #endif }; #if defined(CONFIG_PTP_CLOCK_MCUX) static void eth_mcux_ptp_isr(const struct device *dev) { struct eth_context *context = dev->data; unsigned int irq_lock_key = irq_lock(); enet_ptp_timer_channel_t channel; /* clear channel */ for (channel = kENET_PtpTimerChannel1; channel <= kENET_PtpTimerChannel4; channel++) { if (ENET_Ptp1588GetChannelStatus(context->base, channel)) { ENET_Ptp1588ClearChannelStatus(context->base, channel); } } ENET_TimeStampIRQHandler(context->base, &context->enet_handle); irq_unlock(irq_lock_key); } #endif #if DT_INST_IRQ_HAS_NAME(0, common) || DT_INST_IRQ_HAS_NAME(1, common) static void eth_mcux_common_isr(const struct device *dev) { struct eth_context *context = dev->data; uint32_t EIR = ENET_GetInterruptStatus(context->base); unsigned int irq_lock_key = irq_lock(); if (EIR & (kENET_RxBufferInterrupt | kENET_RxFrameInterrupt)) { /* disable the IRQ for RX */ context->rx_irq_num++; #if FSL_FEATURE_ENET_QUEUE > 1 /* Only use ring 0 in this driver */ ENET_ReceiveIRQHandler(context->base, &context->enet_handle, 0); #else ENET_ReceiveIRQHandler(context->base, &context->enet_handle); #endif ENET_DisableInterrupts(context->base, kENET_RxFrameInterrupt | kENET_RxBufferInterrupt); } if (EIR & kENET_TxFrameInterrupt) { #if FSL_FEATURE_ENET_QUEUE > 1 ENET_TransmitIRQHandler(context->base, &context->enet_handle, 0); #else ENET_TransmitIRQHandler(context->base, &context->enet_handle); #endif } if (EIR | kENET_TxBufferInterrupt) { ENET_ClearInterruptStatus(context->base, kENET_TxBufferInterrupt); ENET_DisableInterrupts(context->base, kENET_TxBufferInterrupt); } if (EIR & ENET_EIR_MII_MASK) { k_work_submit(&context->phy_work); ENET_ClearInterruptStatus(context->base, kENET_MiiInterrupt); } #if defined(CONFIG_PTP_CLOCK_MCUX) if (EIR & ENET_TS_INTERRUPT) { ENET_TimeStampIRQHandler(context->base, &context->enet_handle); } #endif irq_unlock(irq_lock_key); } #endif #if DT_INST_IRQ_HAS_NAME(0, rx) || DT_INST_IRQ_HAS_NAME(1, rx) static void eth_mcux_rx_isr(const struct device *dev) { struct eth_context *context = dev->data; ENET_DisableInterrupts(context->base, kENET_RxFrameInterrupt | kENET_RxBufferInterrupt); ENET_ReceiveIRQHandler(context->base, &context->enet_handle); } #endif #if DT_INST_IRQ_HAS_NAME(0, tx) || DT_INST_IRQ_HAS_NAME(1, tx) static void eth_mcux_tx_isr(const struct device *dev) { struct eth_context *context = dev->data; #if FSL_FEATURE_ENET_QUEUE > 1 ENET_TransmitIRQHandler(context->base, &context->enet_handle, 0); #else ENET_TransmitIRQHandler(context->base, &context->enet_handle); #endif } #endif #if DT_INST_IRQ_HAS_NAME(0, err) || DT_INST_IRQ_HAS_NAME(1, err) static void eth_mcux_err_isr(const struct device *dev) { struct eth_context *context = dev->data; uint32_t pending = ENET_GetInterruptStatus(context->base); if (pending & ENET_EIR_MII_MASK) { k_work_submit(&context->phy_work); ENET_ClearInterruptStatus(context->base, kENET_MiiInterrupt); } } #endif #if defined(CONFIG_SOC_SERIES_IMXRT10XX) #define ETH_MCUX_UNIQUE_ID (OCOTP->CFG1 ^ OCOTP->CFG2) #elif defined(CONFIG_SOC_SERIES_IMXRT11XX) #define ETH_MCUX_UNIQUE_ID (OCOTP->FUSEN[40].FUSE) #elif defined(CONFIG_SOC_SERIES_KINETIS_K6X) #define ETH_MCUX_UNIQUE_ID (SIM->UIDH ^ SIM->UIDMH ^ SIM->UIDML ^ SIM->UIDL) #else #error "Unsupported SOC" #endif #define ETH_MCUX_NONE #define ETH_MCUX_IRQ_INIT(n, name) \ do { \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, name, irq), \ DT_INST_IRQ_BY_NAME(n, name, priority), \ eth_mcux_##name##_isr, \ DEVICE_DT_INST_GET(n), \ 0); \ irq_enable(DT_INST_IRQ_BY_NAME(n, name, irq)); \ } while (false) #define ETH_MCUX_IRQ(n, name) \ COND_CODE_1(DT_INST_IRQ_HAS_NAME(n, name), \ (ETH_MCUX_IRQ_INIT(n, name)), \ (ETH_MCUX_NONE)) #if defined(CONFIG_PTP_CLOCK_MCUX) #define PTP_INST_NODEID(n) DT_INST_CHILD(n, ptp) #define ETH_MCUX_IRQ_PTP_INIT(n) \ do { \ IRQ_CONNECT(DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, irq), \ DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, priority), \ eth_mcux_ptp_isr, \ DEVICE_DT_INST_GET(n), \ 0); \ irq_enable(DT_IRQ_BY_NAME(PTP_INST_NODEID(n), ieee1588_tmr, irq)); \ } while (false) #define ETH_MCUX_IRQ_PTP(n) \ COND_CODE_1(DT_NODE_HAS_STATUS(PTP_INST_NODEID(n), okay), \ (ETH_MCUX_IRQ_PTP_INIT(n)), \ (ETH_MCUX_NONE)) #define ETH_MCUX_PTP_FRAMEINFO_ARRAY(n) \ static enet_frame_info_t \ eth##n##_tx_frameinfo_array[CONFIG_ETH_MCUX_TX_BUFFERS]; #define ETH_MCUX_PTP_FRAMEINFO(n) \ .txFrameInfo = eth##n##_tx_frameinfo_array, #else #define ETH_MCUX_IRQ_PTP(n) #define ETH_MCUX_PTP_FRAMEINFO_ARRAY(n) #define ETH_MCUX_PTP_FRAMEINFO(n) \ .txFrameInfo = NULL, #endif #define ETH_MCUX_GENERATE_MAC_RANDOM(n) \ static void generate_eth##n##_mac(uint8_t *mac_addr) \ { \ gen_random_mac(mac_addr, \ FREESCALE_OUI_B0, \ FREESCALE_OUI_B1, \ FREESCALE_OUI_B2); \ } #define ETH_MCUX_GENERATE_MAC_UNIQUE(n) \ static void generate_eth##n##_mac(uint8_t *mac_addr) \ { \ uint32_t id = ETH_MCUX_UNIQUE_ID; \ \ mac_addr[0] = FREESCALE_OUI_B0; \ mac_addr[0] |= 0x02; /* force LAA bit */ \ mac_addr[1] = FREESCALE_OUI_B1; \ mac_addr[2] = FREESCALE_OUI_B2; \ mac_addr[3] = id >> 8; \ mac_addr[4] = id >> 16; \ mac_addr[5] = id >> 0; \ mac_addr[5] += n; \ } #define ETH_MCUX_GENERATE_MAC(n) \ COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), \ (ETH_MCUX_GENERATE_MAC_RANDOM(n)), \ (ETH_MCUX_GENERATE_MAC_UNIQUE(n))) #define ETH_MCUX_MAC_ADDR_LOCAL(n) \ .mac_addr = DT_INST_PROP(n, local_mac_address), \ .generate_mac = NULL, #define ETH_MCUX_MAC_ADDR_GENERATE(n) \ .mac_addr = {0}, \ .generate_mac = generate_eth##n##_mac, #define ETH_MCUX_MAC_ADDR(n) \ COND_CODE_1(ETH_MCUX_MAC_ADDR_TO_BOOL(n), \ (ETH_MCUX_MAC_ADDR_LOCAL(n)), \ (ETH_MCUX_MAC_ADDR_GENERATE(n))) #ifdef CONFIG_SOC_FAMILY_KINETIS #define ETH_MCUX_POWER_INIT(n) \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ #define ETH_MCUX_POWER(n) \ COND_CODE_1(CONFIG_NET_POWER_MANAGEMENT, \ (ETH_MCUX_POWER_INIT(n)), \ (ETH_MCUX_NONE)) #define ETH_MCUX_PM_DEVICE_INIT(n) \ PM_DEVICE_DT_INST_DEFINE(n, eth_mcux_device_pm_action); #define ETH_MCUX_PM_DEVICE_GET(n) PM_DEVICE_DT_INST_GET(n) #else #define ETH_MCUX_POWER(n) #define ETH_MCUX_PM_DEVICE_INIT(n) #define ETH_MCUX_PM_DEVICE_GET(n) NULL #endif /* CONFIG_SOC_FAMILY_KINETIS */ #define ETH_MCUX_GEN_MAC(n) \ COND_CODE_0(ETH_MCUX_MAC_ADDR_TO_BOOL(n), \ (ETH_MCUX_GENERATE_MAC(n)), \ (ETH_MCUX_NONE)) /* * In the below code we explicitly define * ETH_MCUX_MAC_ADDR_TO_BOOL_0 for the '0' instance of enet driver. * * For instance N one shall add definition for ETH_MCUX_MAC_ADDR_TO_BOOL_N */ #if (NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))) == 0 #define ETH_MCUX_MAC_ADDR_TO_BOOL_0 0 #else #define ETH_MCUX_MAC_ADDR_TO_BOOL_0 1 #endif #define ETH_MCUX_MAC_ADDR_TO_BOOL(n) ETH_MCUX_MAC_ADDR_TO_BOOL_##n #if defined(CONFIG_PINCTRL) #define ETH_MCUX_PINCTRL_DEFINE(n) PINCTRL_DT_INST_DEFINE(n); #define ETH_MCUX_PINCTRL_INIT(n) .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), #else #define ETH_MCUX_PINCTRL_DEFINE(n) #define ETH_MCUX_PINCTRL_INIT(n) #endif #if DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) && \ CONFIG_ETH_MCUX_USE_DTCM_FOR_DMA_BUFFER /* Use DTCM for hardware DMA buffers */ #define _mcux_dma_desc __dtcm_bss_section #define _mcux_dma_buffer __dtcm_noinit_section #define _mcux_driver_buffer __dtcm_noinit_section #elif defined(CONFIG_NOCACHE_MEMORY) #define _mcux_dma_desc __nocache #define _mcux_dma_buffer __nocache #define _mcux_driver_buffer #else #define _mcux_dma_desc #define _mcux_dma_buffer #define _mcux_driver_buffer #endif #if defined(CONFIG_ETH_MCUX_PHY_RESET) #define ETH_MCUX_PHY_GPIOS(n) \ .int_gpio = GPIO_DT_SPEC_INST_GET(n, int_gpios), \ .reset_gpio = GPIO_DT_SPEC_INST_GET(n, reset_gpios), #else #define ETH_MCUX_PHY_GPIOS(n) #endif #define ETH_MCUX_INIT(n) \ ETH_MCUX_GEN_MAC(n) \ \ ETH_MCUX_PINCTRL_DEFINE(n) \ \ static void eth##n##_config_func(void); \ static _mcux_driver_buffer uint8_t \ tx_enet_frame_##n##_buf[NET_ETH_MAX_FRAME_SIZE]; \ static _mcux_driver_buffer uint8_t \ rx_enet_frame_##n##_buf[NET_ETH_MAX_FRAME_SIZE]; \ static status_t _MDIO_Write(uint8_t phyAddr, uint8_t regAddr, uint16_t data) \ { \ return ENET_MDIOWrite((ENET_Type *)DT_INST_REG_ADDR(n), phyAddr, regAddr, data);\ }; \ \ static status_t _MDIO_Read(uint8_t phyAddr, uint8_t regAddr, uint16_t *pData) \ { \ return ENET_MDIORead((ENET_Type *)DT_INST_REG_ADDR(n), phyAddr, regAddr, pData); \ }; \ \ static struct _phy_resource eth##n##_phy_resource = { \ .read = _MDIO_Read, \ .write = _MDIO_Write \ }; \ static phy_handle_t eth##n##_phy_handle = { \ .resource = (void *)&eth##n##_phy_resource \ }; \ static struct _phy_resource eth##n##_phy_config; \ \ static struct eth_context eth##n##_context = { \ .base = (ENET_Type *)DT_INST_REG_ADDR(n), \ .config_func = eth##n##_config_func, \ .phy_config = &eth##n##_phy_config, \ .phy_addr = DT_INST_PROP(n, phy_addr), \ .phy_duplex = kPHY_FullDuplex, \ .phy_speed = kPHY_Speed100M, \ .phy_handle = &eth##n##_phy_handle, \ .tx_frame_buf = tx_enet_frame_##n##_buf, \ .rx_frame_buf = rx_enet_frame_##n##_buf, \ ETH_MCUX_PINCTRL_INIT(n) \ ETH_MCUX_PHY_GPIOS(n) \ ETH_MCUX_MAC_ADDR(n) \ ETH_MCUX_POWER(n) \ }; \ \ static __aligned(ENET_BUFF_ALIGNMENT) \ _mcux_dma_desc \ enet_rx_bd_struct_t \ eth##n##_rx_buffer_desc[CONFIG_ETH_MCUX_RX_BUFFERS]; \ \ static __aligned(ENET_BUFF_ALIGNMENT) \ _mcux_dma_desc \ enet_tx_bd_struct_t \ eth##n##_tx_buffer_desc[CONFIG_ETH_MCUX_TX_BUFFERS]; \ \ static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \ _mcux_dma_buffer \ eth##n##_rx_buffer[CONFIG_ETH_MCUX_RX_BUFFERS] \ [ETH_MCUX_BUFFER_SIZE]; \ \ static uint8_t __aligned(ENET_BUFF_ALIGNMENT) \ _mcux_dma_buffer \ eth##n##_tx_buffer[CONFIG_ETH_MCUX_TX_BUFFERS] \ [ETH_MCUX_BUFFER_SIZE]; \ \ ETH_MCUX_PTP_FRAMEINFO_ARRAY(n) \ \ static const enet_buffer_config_t eth##n##_buffer_config = { \ .rxBdNumber = CONFIG_ETH_MCUX_RX_BUFFERS, \ .txBdNumber = CONFIG_ETH_MCUX_TX_BUFFERS, \ .rxBuffSizeAlign = ETH_MCUX_BUFFER_SIZE, \ .txBuffSizeAlign = ETH_MCUX_BUFFER_SIZE, \ .rxBdStartAddrAlign = eth##n##_rx_buffer_desc, \ .txBdStartAddrAlign = eth##n##_tx_buffer_desc, \ .rxBufferAlign = eth##n##_rx_buffer[0], \ .txBufferAlign = eth##n##_tx_buffer[0], \ .rxMaintainEnable = true, \ .txMaintainEnable = true, \ ETH_MCUX_PTP_FRAMEINFO(n) \ }; \ \ ETH_MCUX_PM_DEVICE_INIT(n) \ \ ETH_NET_DEVICE_DT_INST_DEFINE(n, \ eth_init, \ ETH_MCUX_PM_DEVICE_GET(n), \ &eth##n##_context, \ &eth##n##_buffer_config, \ CONFIG_ETH_INIT_PRIORITY, \ &api_funcs, \ NET_ETH_MTU); \ \ static void eth##n##_config_func(void) \ { \ ETH_MCUX_IRQ(n, rx); \ ETH_MCUX_IRQ(n, tx); \ ETH_MCUX_IRQ(n, err); \ ETH_MCUX_IRQ(n, common); \ ETH_MCUX_IRQ_PTP(n); \ } \ DT_INST_FOREACH_STATUS_OKAY(ETH_MCUX_INIT) #if defined(CONFIG_PTP_CLOCK_MCUX) struct ptp_context { struct eth_context *eth_context; #if defined(CONFIG_PINCTRL) const struct pinctrl_dev_config *pincfg; #endif /* CONFIG_PINCTRL */ }; #if defined(CONFIG_PINCTRL) #define ETH_MCUX_PTP_PINCTRL_DEFINE(n) PINCTRL_DT_DEFINE(n); #define ETH_MCUX_PTP_PINCTRL_INIT(n) .pincfg = PINCTRL_DT_DEV_CONFIG_GET(n), #else #define ETH_MCUX_PTP_PINCTRL_DEFINE(n) #define ETH_MCUX_PTP_PINCTRL_INIT(n) #endif /* CONFIG_PINCTRL */ ETH_MCUX_PTP_PINCTRL_DEFINE(DT_NODELABEL(ptp)) static struct ptp_context ptp_mcux_0_context = { ETH_MCUX_PTP_PINCTRL_INIT(DT_NODELABEL(ptp)) }; static int ptp_clock_mcux_set(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; struct eth_context *context = ptp_context->eth_context; enet_ptp_time_t enet_time; enet_time.second = tm->second; enet_time.nanosecond = tm->nanosecond; ENET_Ptp1588SetTimer(context->base, &context->enet_handle, &enet_time); return 0; } static int ptp_clock_mcux_get(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; struct eth_context *context = ptp_context->eth_context; enet_ptp_time_t enet_time; ENET_Ptp1588GetTimer(context->base, &context->enet_handle, &enet_time); tm->second = enet_time.second; tm->nanosecond = enet_time.nanosecond; return 0; } static int ptp_clock_mcux_adjust(const struct device *dev, int increment) { struct ptp_context *ptp_context = dev->data; struct eth_context *context = ptp_context->eth_context; int key, ret; ARG_UNUSED(dev); if ((increment <= (int32_t)(-NSEC_PER_SEC)) || (increment >= (int32_t)NSEC_PER_SEC)) { ret = -EINVAL; } else { key = irq_lock(); if (context->base->ATPER != NSEC_PER_SEC) { ret = -EBUSY; } else { /* Seconds counter is handled by software. Change the * period of one software second to adjust the clock. */ context->base->ATPER = NSEC_PER_SEC - increment; ret = 0; } irq_unlock(key); } return ret; } static int ptp_clock_mcux_rate_adjust(const struct device *dev, double ratio) { const int hw_inc = NSEC_PER_SEC / CONFIG_ETH_MCUX_PTP_CLOCK_SRC_HZ; struct ptp_context *ptp_context = dev->data; struct eth_context *context = ptp_context->eth_context; int corr; int32_t mul; double val; /* No change needed. */ if ((ratio > 1.0 && ratio - 1.0 < 0.00000001) || (ratio < 1.0 && 1.0 - ratio < 0.00000001)) { return 0; } ratio *= context->clk_ratio; /* Limit possible ratio. */ if ((ratio > 1.0 + 1.0/(2 * hw_inc)) || (ratio < 1.0 - 1.0/(2 * hw_inc))) { return -EINVAL; } /* Save new ratio. */ context->clk_ratio = ratio; if (ratio < 1.0) { corr = hw_inc - 1; val = 1.0 / (hw_inc * (1.0 - ratio)); } else if (ratio > 1.0) { corr = hw_inc + 1; val = 1.0 / (hw_inc * (ratio - 1.0)); } else { val = 0; corr = hw_inc; } if (val >= INT32_MAX) { /* Value is too high. * It is not possible to adjust the rate of the clock. */ mul = 0; } else { mul = val; } k_mutex_lock(&context->ptp_mutex, K_FOREVER); ENET_Ptp1588AdjustTimer(context->base, corr, mul); k_mutex_unlock(&context->ptp_mutex); return 0; } static const struct ptp_clock_driver_api api = { .set = ptp_clock_mcux_set, .get = ptp_clock_mcux_get, .adjust = ptp_clock_mcux_adjust, .rate_adjust = ptp_clock_mcux_rate_adjust, }; static int ptp_mcux_init(const struct device *port) { const struct device *const eth_dev = DEVICE_DT_GET(DT_NODELABEL(enet)); struct eth_context *context = eth_dev->data; struct ptp_context *ptp_context = port->data; #if defined(CONFIG_PINCTRL) int err; err = pinctrl_apply_state(ptp_context->pincfg, PINCTRL_STATE_DEFAULT); if (err) { return err; } #endif /* CONFIG_PINCTRL */ context->ptp_clock = port; ptp_context->eth_context = context; return 0; } DEVICE_DEFINE(mcux_ptp_clock_0, PTP_CLOCK_NAME, ptp_mcux_init, NULL, &ptp_mcux_0_context, NULL, POST_KERNEL, CONFIG_ETH_MCUX_PTP_CLOCK_INIT_PRIO, &api); #endif /* CONFIG_PTP_CLOCK_MCUX */ ```
/content/code_sandbox/drivers/ethernet/nxp_enet/eth_mcux.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
14,538
```unknown menuconfig ETH_NXP_ENET_QOS bool "NXP ENET QOS Ethernet Module Driver" default y depends on DT_HAS_NXP_ENET_QOS_ENABLED select MDIO if DT_HAS_NXP_ENET_QOS_MDIO_ENABLED help Enable NXP ENET Ethernet Module Driver. This driver handles IP module level tasks. if ETH_NXP_ENET_QOS config ETH_NXP_ENET_QOS_MAC bool "NXP ENET QOS Ethernet MAC Driver" default y depends on DT_HAS_NXP_ENET_QOS_MAC_ENABLED depends on NET_BUF_FIXED_DATA_SIZE help Enable NXP ENET QOS Ethernet MAC Driver. if ETH_NXP_ENET_QOS_MAC config ETH_NXP_ENET_QOS_TX_BUFFER_DESCRIPTORS int "Number of tx buffer descriptors" default 4 range 4 1024 help Number of TX buffer descriptors. config ETH_NXP_ENET_QOS_RX_BUFFER_DESCRIPTORS int "Number of rx buffer descriptors" default 4 range 4 1024 help Number of RX buffer descriptors. config ETH_NXP_ENET_QOS_DMA_RESET_WAIT_TIME int "Time in microseconds to wait for software reset" default 10 range 0 1000 help Time in us microseconds allow for software reset. Value of 0 will allow the driver to wait unlimited time. config ETH_NXP_ENET_QOS_RX_THREAD_STACK_SIZE int "Size of ENET QOS RX thread stack" default 1024 help Size in bytes of the ENET QOS RX thread stack. config ETH_NXP_ENET_QOS_RX_THREAD_PRIORITY int "Cooperative priority of the ENET QOS RX thread" default 3 help Cooperative priority of the ENET QOS RX thread endif # ETH_NXP_ENET_QOS_MAC endif # ETH_NXP_ENET_QOS ```
/content/code_sandbox/drivers/ethernet/eth_nxp_enet_qos/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
415
```unknown config MIPI_DSI_MCUX bool "NXP MCUX MIPI-DSI Host Controller" default y depends on DT_HAS_NXP_IMX_MIPI_DSI_ENABLED help NXP MIPI DSI controller driver config MIPI_DSI_MCUX_2L bool "NXP MCUX MIPI-DSI 2L Controller" default y depends on DT_HAS_NXP_MIPI_DSI_2L_ENABLED help NXP MIPI DSI 2L controller driver if MIPI_DSI_MCUX_2L config MIPI_DSI_MCUX_2L_SMARTDMA bool "Use smartDMA controller with MIPI DSI" default y depends on DMA_MCUX_SMARTDMA help Use SMARTDMA. This accelerator will automatically convert RGB565 input data to BGR565 (little endian to big endian), and write it to the MIPI DSI. config MIPI_DSI_MCUX_2L_SWAP16 bool "Swap 16 byte color" help Swap 16 byte color data from little to big endian format. When this Kconfig is enabled, the DSI expects RGB565 data in little endian format, which will then be byte swapped. endif # MIPI_DSI_MCUX_2L ```
/content/code_sandbox/drivers/mipi_dsi/Kconfig.mcux
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
284
```unknown config MIPI_DSI_STM32 bool "STM32 MIPI-DSI Host Controller" default y depends on DT_HAS_ST_STM32_MIPI_DSI_ENABLED depends on DT_HAS_ST_STM32_LTDC_ENABLED select USE_STM32_HAL_DSI help STM32 MIPI DSI controller driver ```
/content/code_sandbox/drivers/mipi_dsi/Kconfig.stm32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
75
```c /* * */ #include <errno.h> #include <zephyr/drivers/mipi_dsi.h> ssize_t mipi_dsi_generic_read(const struct device *dev, uint8_t channel, const void *params, size_t nparams, void *buf, size_t len) { struct mipi_dsi_msg msg = { .tx_len = nparams, .tx_buf = params, .rx_len = len, .rx_buf = buf }; switch (nparams) { case 0U: msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM; break; case 1U: msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM; break; case 2U: msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM; break; default: return -EINVAL; } return mipi_dsi_transfer(dev, channel, &msg); } ssize_t mipi_dsi_generic_write(const struct device *dev, uint8_t channel, const void *buf, size_t len) { struct mipi_dsi_msg msg = { .tx_buf = buf, .tx_len = len }; switch (len) { case 0U: msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM; break; case 1U: msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM; break; case 2U: msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM; break; default: msg.type = MIPI_DSI_GENERIC_LONG_WRITE; break; } return mipi_dsi_transfer(dev, channel, &msg); } ssize_t mipi_dsi_dcs_read(const struct device *dev, uint8_t channel, uint8_t cmd, void *buf, size_t len) { struct mipi_dsi_msg msg = { .type = MIPI_DSI_DCS_READ, .cmd = cmd, .rx_buf = buf, .rx_len = len }; return mipi_dsi_transfer(dev, channel, &msg); } ssize_t mipi_dsi_dcs_write(const struct device *dev, uint8_t channel, uint8_t cmd, const void *buf, size_t len) { struct mipi_dsi_msg msg = { .cmd = cmd, .tx_buf = buf, .tx_len = len }; switch (len) { case 0U: msg.type = MIPI_DSI_DCS_SHORT_WRITE; break; case 1U: msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM; break; default: msg.type = MIPI_DSI_DCS_LONG_WRITE; break; } return mipi_dsi_transfer(dev, channel, &msg); } ```
/content/code_sandbox/drivers/mipi_dsi/mipi_dsi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
602
```c /* * based on dsi_mcux.c * */ #define DT_DRV_COMPAT st_stm32_mipi_dsi #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/sys/printk.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/mipi_dsi.h> #include <zephyr/drivers/reset.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(dsi_stm32, CONFIG_MIPI_DSI_LOG_LEVEL); #if defined(CONFIG_STM32_LTDC_ARGB8888) #define STM32_DSI_INIT_PIXEL_FORMAT DSI_RGB888 #elif defined(CONFIG_STM32_LTDC_RGB888) #define STM32_DSI_INIT_PIXEL_FORMAT DSI_RGB888 #elif defined(CONFIG_STM32_LTDC_RGB565) #define STM32_DSI_INIT_PIXEL_FORMAT DSI_RGB565 #else #error "Invalid LTDC pixel format chosen" #endif /* CONFIG_STM32_LTDC_ARGB8888 */ #define MAX_TX_ESC_CLK_KHZ 20000 #define MAX_TX_ESC_CLK_DIV 8 struct mipi_dsi_stm32_config { const struct device *rcc; const struct reset_dt_spec reset; struct stm32_pclken dsi_clk; struct stm32_pclken ref_clk; struct stm32_pclken pix_clk; uint32_t data_lanes; uint32_t active_errors; uint32_t lp_rx_filter_freq; int test_pattern; }; struct mipi_dsi_stm32_data { DSI_HandleTypeDef hdsi; DSI_HOST_TimeoutTypeDef *host_timeouts; DSI_PHY_TimerTypeDef *phy_timings; DSI_VidCfgTypeDef vid_cfg; DSI_PLLInitTypeDef pll_init; uint32_t lane_clk_khz; uint32_t pixel_clk_khz; }; static void mipi_dsi_stm32_log_config(const struct device *dev) { const struct mipi_dsi_stm32_config *config = dev->config; struct mipi_dsi_stm32_data *data = dev->data; LOG_DBG("DISPLAY: pix %d kHz, lane %d kHz", data->pixel_clk_khz, data->lane_clk_khz); LOG_DBG("HAL_DSI_Init setup:"); LOG_DBG(" AutomaticClockLaneControl 0x%x", data->hdsi.Init.AutomaticClockLaneControl); LOG_DBG(" TXEscapeCkdiv %u", data->hdsi.Init.TXEscapeCkdiv); LOG_DBG(" NumberOfLanes %u", data->hdsi.Init.NumberOfLanes); LOG_DBG(" PLLNDIV %u", data->pll_init.PLLNDIV); LOG_DBG(" PLLIDF %u", data->pll_init.PLLIDF); LOG_DBG(" PLLODF %u", data->pll_init.PLLODF); LOG_DBG("HAL_DSI_ConfigVideoMode setup:"); LOG_DBG(" VirtualChannelID %u", data->vid_cfg.VirtualChannelID); LOG_DBG(" ColorCoding 0x%x", data->vid_cfg.ColorCoding); LOG_DBG(" LooselyPacked 0x%x", data->vid_cfg.LooselyPacked); LOG_DBG(" Mode 0x%x", data->vid_cfg.Mode); LOG_DBG(" PacketSize %u", data->vid_cfg.PacketSize); LOG_DBG(" NumberOfChunks %u", data->vid_cfg.NumberOfChunks); LOG_DBG(" NullPacketSize %u", data->vid_cfg.NullPacketSize); LOG_DBG(" HSPolarity 0x%x", data->vid_cfg.HSPolarity); LOG_DBG(" VSPolarity 0x%x", data->vid_cfg.VSPolarity); LOG_DBG(" DEPolarity 0x%x", data->vid_cfg.DEPolarity); LOG_DBG(" HorizontalSyncActive %u", data->vid_cfg.HorizontalSyncActive); LOG_DBG(" HorizontalBackPorch %u", data->vid_cfg.HorizontalBackPorch); LOG_DBG(" HorizontalLine %u", data->vid_cfg.HorizontalLine); LOG_DBG(" VerticalSyncActive %u", data->vid_cfg.VerticalSyncActive); LOG_DBG(" VerticalBackPorch %u", data->vid_cfg.VerticalBackPorch); LOG_DBG(" VerticalFrontPorch %u", data->vid_cfg.VerticalFrontPorch); LOG_DBG(" VerticalActive %u", data->vid_cfg.VerticalActive); LOG_DBG(" LPCommandEnable 0x%x", data->vid_cfg.LPCommandEnable); LOG_DBG(" LPLargestPacketSize %u", data->vid_cfg.LPLargestPacketSize); LOG_DBG(" LPVACTLargestPacketSize %u", data->vid_cfg.LPVACTLargestPacketSize); LOG_DBG(" LPHorizontalFrontPorchEnable 0x%x", data->vid_cfg.LPHorizontalFrontPorchEnable); LOG_DBG(" LPHorizontalBackPorchEnable 0x%x", data->vid_cfg.LPHorizontalBackPorchEnable); LOG_DBG(" LPVerticalActiveEnable 0x%x", data->vid_cfg.LPVerticalActiveEnable); LOG_DBG(" LPVerticalFrontPorchEnable 0x%x", data->vid_cfg.LPVerticalFrontPorchEnable); LOG_DBG(" LPVerticalBackPorchEnable 0x%x", data->vid_cfg.LPVerticalBackPorchEnable); LOG_DBG(" LPVerticalSyncActiveEnable 0x%x", data->vid_cfg.LPVerticalSyncActiveEnable); LOG_DBG(" FrameBTAAcknowledgeEnable 0x%x", data->vid_cfg.FrameBTAAcknowledgeEnable); if (config->active_errors) { LOG_DBG("HAL_DSI_ConfigErrorMonitor: 0x%x", config->active_errors); } if (config->lp_rx_filter_freq) { LOG_DBG("HAL_DSI_SetLowPowerRXFilter: %d", config->lp_rx_filter_freq); } if (data->host_timeouts) { DSI_HOST_TimeoutTypeDef *ht = data->host_timeouts; LOG_DBG("HAL_DSI_ConfigHostTimeouts:"); LOG_DBG(" TimeoutCkdiv %u", ht->TimeoutCkdiv); LOG_DBG(" HighSpeedTransmissionTimeout %u", ht->HighSpeedTransmissionTimeout); LOG_DBG(" LowPowerReceptionTimeout %u", ht->LowPowerReceptionTimeout); LOG_DBG(" HighSpeedReadTimeout %u", ht->HighSpeedReadTimeout); LOG_DBG(" LowPowerReadTimeout %u", ht->LowPowerReadTimeout); LOG_DBG(" HighSpeedWriteTimeout %u", ht->HighSpeedWriteTimeout); LOG_DBG(" HighSpeedWritePrespMode %u", ht->HighSpeedWritePrespMode); LOG_DBG(" LowPowerWriteTimeout %u", ht->LowPowerWriteTimeout); LOG_DBG(" BTATimeout %u", ht->BTATimeout); } if (data->phy_timings) { LOG_DBG("HAL_DSI_ConfigPhyTimer:"); LOG_DBG(" ClockLaneHS2LPTime %u", data->phy_timings->ClockLaneHS2LPTime); LOG_DBG(" ClockLaneLP2HSTime %u", data->phy_timings->ClockLaneLP2HSTime); LOG_DBG(" DataLaneHS2LPTime %u", data->phy_timings->DataLaneHS2LPTime); LOG_DBG(" DataLaneLP2HSTime %u", data->phy_timings->DataLaneLP2HSTime); LOG_DBG(" DataLaneMaxReadTime %u", data->phy_timings->DataLaneMaxReadTime); LOG_DBG(" StopWaitTime %u", data->phy_timings->StopWaitTime); } } static int mipi_dsi_stm32_host_init(const struct device *dev) { const struct mipi_dsi_stm32_config *config = dev->config; struct mipi_dsi_stm32_data *data = dev->data; uint32_t hse_clock; int ret; switch (config->data_lanes) { case 1: data->hdsi.Init.NumberOfLanes = DSI_ONE_DATA_LANE; break; case 2: data->hdsi.Init.NumberOfLanes = DSI_TWO_DATA_LANES; break; default: LOG_ERR("Number of DSI lanes (%d) not supported!", config->data_lanes); return -ENOTSUP; } ret = clock_control_get_rate(config->rcc, (clock_control_subsys_t)&config->pix_clk, &data->pixel_clk_khz); if (ret) { LOG_ERR("Get pixel clock failed! (%d)", ret); return ret; } data->pixel_clk_khz /= 1000; ret = clock_control_get_rate(config->rcc, (clock_control_subsys_t)&config->ref_clk, &hse_clock); if (ret) { LOG_ERR("Get HSE clock failed! (%d)", ret); return ret; } /* LANE_BYTE_CLOCK = CLK_IN / PLLIDF * 2 * PLLNDIV / 2 / PLLODF / 8 */ data->lane_clk_khz = hse_clock / data->pll_init.PLLIDF * 2 * data->pll_init.PLLNDIV / 2 / (1UL << data->pll_init.PLLODF) / 8 / 1000; /* stm32x_hal_dsi: The values 0 and 1 stop the TX_ESC clock generation */ data->hdsi.Init.TXEscapeCkdiv = 0; for (int i = 2; i <= MAX_TX_ESC_CLK_DIV; i++) { if ((data->lane_clk_khz / i) <= MAX_TX_ESC_CLK_KHZ) { data->hdsi.Init.TXEscapeCkdiv = i; break; } } if (data->hdsi.Init.TXEscapeCkdiv < 2) { LOG_WRN("DSI TX escape clock disabled."); } ret = HAL_DSI_Init(&data->hdsi, &data->pll_init); if (ret != HAL_OK) { LOG_ERR("DSI init failed! (%d)", ret); return -ret; } if (data->host_timeouts) { ret = HAL_DSI_ConfigHostTimeouts(&data->hdsi, data->host_timeouts); if (ret != HAL_OK) { LOG_ERR("Set DSI host timeouts failed! (%d)", ret); return -ret; } } if (data->phy_timings) { ret = HAL_DSI_ConfigPhyTimer(&data->hdsi, data->phy_timings); if (ret != HAL_OK) { LOG_ERR("Set DSI PHY timings failed! (%d)", ret); return -ret; } } ret = HAL_DSI_ConfigFlowControl(&data->hdsi, DSI_FLOW_CONTROL_BTA); if (ret != HAL_OK) { LOG_ERR("Setup DSI flow control failed! (%d)", ret); return -ret; } if (config->lp_rx_filter_freq) { ret = HAL_DSI_SetLowPowerRXFilter(&data->hdsi, config->lp_rx_filter_freq); if (ret != HAL_OK) { LOG_ERR("Setup DSI LP RX filter failed! (%d)", ret); return -ret; } } ret = HAL_DSI_ConfigErrorMonitor(&data->hdsi, config->active_errors); if (ret != HAL_OK) { LOG_ERR("Setup DSI error monitor failed! (%d)", ret); return -ret; } return 0; } static int mipi_dsi_stm32_attach(const struct device *dev, uint8_t channel, const struct mipi_dsi_device *mdev) { const struct mipi_dsi_stm32_config *config = dev->config; struct mipi_dsi_stm32_data *data = dev->data; DSI_VidCfgTypeDef *vcfg = &data->vid_cfg; int ret; if (!(mdev->mode_flags & MIPI_DSI_MODE_VIDEO)) { LOG_ERR("DSI host supports video mode only!"); return -ENOTSUP; } vcfg->VirtualChannelID = channel; vcfg->ColorCoding = STM32_DSI_INIT_PIXEL_FORMAT; if (mdev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { vcfg->Mode = DSI_VID_MODE_BURST; } else if (mdev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { vcfg->Mode = DSI_VID_MODE_NB_PULSES; } else { vcfg->Mode = DSI_VID_MODE_NB_EVENTS; } vcfg->PacketSize = mdev->timings.hactive; vcfg->NumberOfChunks = 0; vcfg->NullPacketSize = 0xFFFU; vcfg->HorizontalSyncActive = (mdev->timings.hsync * data->lane_clk_khz) / data->pixel_clk_khz; vcfg->HorizontalBackPorch = (mdev->timings.hbp * data->lane_clk_khz) / data->pixel_clk_khz; vcfg->HorizontalLine = ((mdev->timings.hactive + mdev->timings.hsync + mdev->timings.hbp + mdev->timings.hfp) * data->lane_clk_khz) / data->pixel_clk_khz; vcfg->VerticalSyncActive = mdev->timings.vsync; vcfg->VerticalBackPorch = mdev->timings.vbp; vcfg->VerticalFrontPorch = mdev->timings.vfp; vcfg->VerticalActive = mdev->timings.vactive; if (mdev->mode_flags & MIPI_DSI_MODE_LPM) { vcfg->LPCommandEnable = DSI_LP_COMMAND_ENABLE; } else { vcfg->LPCommandEnable = DSI_LP_COMMAND_DISABLE; } vcfg->LPHorizontalFrontPorchEnable = DSI_LP_HFP_ENABLE; vcfg->LPHorizontalBackPorchEnable = DSI_LP_HBP_ENABLE; vcfg->LPVerticalActiveEnable = DSI_LP_VACT_ENABLE; vcfg->LPVerticalFrontPorchEnable = DSI_LP_VFP_ENABLE; vcfg->LPVerticalBackPorchEnable = DSI_LP_VBP_ENABLE; vcfg->LPVerticalSyncActiveEnable = DSI_LP_VSYNC_ENABLE; ret = HAL_DSI_ConfigVideoMode(&data->hdsi, vcfg); if (ret != HAL_OK) { LOG_ERR("Setup DSI video mode failed! (%d)", ret); return -ret; } if (IS_ENABLED(CONFIG_MIPI_DSI_LOG_LEVEL_DBG)) { mipi_dsi_stm32_log_config(dev); } ret = HAL_DSI_Start(&data->hdsi); if (ret != HAL_OK) { LOG_ERR("Start DSI host failed! (%d)", ret); return -ret; } if (config->test_pattern >= 0) { ret = HAL_DSI_PatternGeneratorStart(&data->hdsi, 0, config->test_pattern); if (ret != HAL_OK) { LOG_ERR("Start DSI pattern generator failed! (%d)", ret); return -ret; } } return 0; } static ssize_t mipi_dsi_stm32_transfer(const struct device *dev, uint8_t channel, struct mipi_dsi_msg *msg) { struct mipi_dsi_stm32_data *data = dev->data; uint32_t param1 = 0; uint32_t param2 = 0; ssize_t len; int ret; switch (msg->type) { case MIPI_DSI_DCS_READ: ret = HAL_DSI_Read(&data->hdsi, channel, (uint8_t *)msg->rx_buf, msg->rx_len, msg->type, msg->cmd, (uint8_t *)msg->rx_buf); len = msg->rx_len; break; case MIPI_DSI_DCS_SHORT_WRITE: case MIPI_DSI_DCS_SHORT_WRITE_PARAM: param1 = msg->cmd; if (msg->tx_len >= 1U) { param2 = ((uint8_t *)msg->tx_buf)[0]; } ret = HAL_DSI_ShortWrite(&data->hdsi, channel, msg->type, param1, param2); len = msg->tx_len; break; case MIPI_DSI_DCS_LONG_WRITE: ret = HAL_DSI_LongWrite(&data->hdsi, channel, msg->type, msg->tx_len, msg->cmd, (uint8_t *)msg->tx_buf); len = msg->tx_len; break; case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: param1 = ((uint8_t *)msg->tx_buf)[0]; if (msg->tx_len == 1U) { param2 = ((uint8_t *)msg->tx_buf)[1]; } if (msg->tx_len >= 2U) { param2 = *(uint16_t *)&((uint8_t *)msg->tx_buf)[1]; } ret = HAL_DSI_ShortWrite(&data->hdsi, channel, msg->type, param1, param2); len = msg->tx_len; break; case MIPI_DSI_GENERIC_LONG_WRITE: ret = HAL_DSI_LongWrite(&data->hdsi, channel, msg->type, msg->tx_len, ((uint8_t *)msg->tx_buf)[0], &((uint8_t *)msg->tx_buf)[1]); len = msg->tx_len; break; default: LOG_ERR("Unsupported message type (%d)", msg->type); return -ENOTSUP; } if (IS_ENABLED(CONFIG_MIPI_DSI_LOG_LEVEL_DBG)) { char tmp[64]; if (msg->type == MIPI_DSI_DCS_READ) { snprintk(tmp, sizeof(tmp), "RX: ch %3d, reg 0x%02x, len %2d", channel, msg->cmd, msg->rx_len); LOG_HEXDUMP_DBG(msg->rx_buf, msg->rx_len, tmp); } else { snprintk(tmp, sizeof(tmp), "TX: ch %3d, reg 0x%02x, len %2d", channel, msg->cmd, msg->tx_len); LOG_HEXDUMP_DBG(msg->tx_buf, msg->tx_len, tmp); } } if (ret != HAL_OK) { LOG_ERR("Transfer failed! (%d)", ret); return -EIO; } return len; } static struct mipi_dsi_driver_api dsi_stm32_api = { .attach = mipi_dsi_stm32_attach, .transfer = mipi_dsi_stm32_transfer, }; static int mipi_dsi_stm32_init(const struct device *dev) { const struct mipi_dsi_stm32_config *config = dev->config; int ret; if (!device_is_ready(config->rcc)) { LOG_ERR("clock control device not ready"); return -ENODEV; } ret = clock_control_on(config->rcc, (clock_control_subsys_t)&config->dsi_clk); if (ret < 0) { LOG_ERR("Enable DSI peripheral clock failed! (%d)", ret); return ret; } (void)reset_line_toggle_dt(&config->reset); ret = mipi_dsi_stm32_host_init(dev); if (ret) { LOG_ERR("Setup DSI host failed! (%d)", ret); return ret; } return 0; } #define CHILD_GET_DATA_LANES(child) DT_PROP(child, data_lanes) #define STM32_MIPI_DSI_DEVICE(inst) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, host_timeouts), \ (static DSI_HOST_TimeoutTypeDef host_timeouts_##inst = { \ .TimeoutCkdiv = DT_INST_PROP_BY_IDX(inst, host_timeouts, 0), \ .HighSpeedTransmissionTimeout = \ DT_INST_PROP_BY_IDX(inst, host_timeouts, 1), \ .LowPowerReceptionTimeout = \ DT_INST_PROP_BY_IDX(inst, host_timeouts, 2), \ .HighSpeedReadTimeout = DT_INST_PROP_BY_IDX(inst, host_timeouts, 3), \ .LowPowerReadTimeout = DT_INST_PROP_BY_IDX(inst, host_timeouts, 4), \ .HighSpeedWriteTimeout = DT_INST_PROP_BY_IDX(inst, host_timeouts, 5), \ .HighSpeedWritePrespMode = DT_INST_PROP_BY_IDX(inst, host_timeouts, 6), \ .LowPowerWriteTimeout = DT_INST_PROP_BY_IDX(inst, host_timeouts, 7), \ .BTATimeout = DT_INST_PROP_BY_IDX(inst, host_timeouts, 8) \ }), ()); \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, phy_timings), \ (static DSI_PHY_TimerTypeDef phy_timings_##inst = { \ .ClockLaneHS2LPTime = DT_INST_PROP_BY_IDX(inst, phy_timings, 0), \ .ClockLaneLP2HSTime = DT_INST_PROP_BY_IDX(inst, phy_timings, 1), \ .DataLaneHS2LPTime = DT_INST_PROP_BY_IDX(inst, phy_timings, 2), \ .DataLaneLP2HSTime = DT_INST_PROP_BY_IDX(inst, phy_timings, 3), \ .DataLaneMaxReadTime = DT_INST_PROP_BY_IDX(inst, phy_timings, 4), \ .StopWaitTime = DT_INST_PROP_BY_IDX(inst, phy_timings, 5) \ }), ()); \ /* Only child data-lanes property at index 0 is taken into account */ \ static const uint32_t data_lanes_##inst[] = { \ DT_INST_FOREACH_CHILD_STATUS_OKAY_SEP_VARGS(inst, DT_PROP_BY_IDX, (,), \ data_lanes, 0) \ }; \ static const struct mipi_dsi_stm32_config stm32_dsi_config_##inst = { \ .rcc = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE), \ .reset = RESET_DT_SPEC_INST_GET(inst), \ .dsi_clk = { \ .enr = DT_INST_CLOCKS_CELL_BY_NAME(inst, dsiclk, bits), \ .bus = DT_INST_CLOCKS_CELL_BY_NAME(inst, dsiclk, bus), \ }, \ .ref_clk = { \ .enr = DT_INST_CLOCKS_CELL_BY_NAME(inst, refclk, bits), \ .bus = DT_INST_CLOCKS_CELL_BY_NAME(inst, refclk, bus), \ }, \ .pix_clk = { \ .enr = DT_INST_CLOCKS_CELL_BY_NAME(inst, pixelclk, bits), \ .bus = DT_INST_CLOCKS_CELL_BY_NAME(inst, pixelclk, bus), \ }, \ /* Use only one (the first) display configuration for DSI HOST configuration */ \ .data_lanes = data_lanes_##inst[0], \ .active_errors = DT_INST_PROP_OR(inst, active_errors, HAL_DSI_ERROR_NONE), \ .lp_rx_filter_freq = DT_INST_PROP_OR(inst, lp_rx_filter, 0), \ .test_pattern = DT_INST_PROP_OR(inst, test_pattern, -1), \ }; \ static struct mipi_dsi_stm32_data stm32_dsi_data_##inst = { \ .hdsi = { \ .Instance = (DSI_TypeDef *)DT_INST_REG_ADDR(inst), \ .Init = { \ .AutomaticClockLaneControl = \ DT_INST_PROP(inst, non_continuous) ? \ DSI_AUTO_CLK_LANE_CTRL_ENABLE : \ DSI_AUTO_CLK_LANE_CTRL_DISABLE, \ }, \ }, \ .host_timeouts = COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, host_timeouts), \ (&host_timeouts_##inst), (NULL)), \ .phy_timings = COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, phy_timings), \ (&phy_timings_##inst), (NULL)), \ .vid_cfg = { \ .HSPolarity = DT_INST_PROP(inst, hs_active_high) ? \ DSI_HSYNC_ACTIVE_HIGH : DSI_HSYNC_ACTIVE_LOW, \ .VSPolarity = DT_INST_PROP(inst, vs_active_high) ? \ DSI_VSYNC_ACTIVE_HIGH : DSI_VSYNC_ACTIVE_LOW, \ .DEPolarity = DT_INST_PROP(inst, de_active_high) ? \ DSI_DATA_ENABLE_ACTIVE_HIGH : DSI_DATA_ENABLE_ACTIVE_LOW, \ .LooselyPacked = DT_INST_PROP(inst, loosely_packed) ? \ DSI_LOOSELY_PACKED_ENABLE : DSI_LOOSELY_PACKED_DISABLE, \ .LPLargestPacketSize = DT_INST_PROP_OR(inst, largest_packet_size, 4), \ .LPVACTLargestPacketSize = DT_INST_PROP_OR(inst, largest_packet_size, 4), \ .FrameBTAAcknowledgeEnable = DT_INST_PROP(inst, bta_ack_disable) ? \ DSI_FBTAA_DISABLE : DSI_FBTAA_ENABLE, \ }, \ .pll_init = { \ .PLLNDIV = DT_INST_PROP(inst, pll_ndiv), \ .PLLIDF = DT_INST_PROP(inst, pll_idf), \ .PLLODF = DT_INST_PROP(inst, pll_odf), \ }, \ }; \ DEVICE_DT_INST_DEFINE(inst, &mipi_dsi_stm32_init, NULL, \ &stm32_dsi_data_##inst, &stm32_dsi_config_##inst, \ POST_KERNEL, CONFIG_MIPI_DSI_INIT_PRIORITY, &dsi_stm32_api); DT_INST_FOREACH_STATUS_OKAY(STM32_MIPI_DSI_DEVICE) ```
/content/code_sandbox/drivers/mipi_dsi/dsi_stm32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,852
```unknown # Memory controller configuration options menuconfig MIPI_DSI bool "MIPI-DSI Host Controller drivers [EXPERIMENTAL]" select EXPERIMENTAL help Add support for MIPI-DSI host controllers if MIPI_DSI module = MIPI_DSI module-str = mipi_dsi source "subsys/logging/Kconfig.template.log_config" config MIPI_DSI_INIT_PRIORITY int "Initialization priority" default 86 help MIPI-DSI Host Controllers initialization priority. source "drivers/mipi_dsi/Kconfig.mcux" source "drivers/mipi_dsi/Kconfig.stm32" endif ```
/content/code_sandbox/drivers/mipi_dsi/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
138
```c /* * */ /* Based on dsi_mcux.c, which is (c) 2022 NXP */ #define DT_DRV_COMPAT nxp_mipi_dsi_2l #include <zephyr/kernel.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/mipi_dsi.h> #include <zephyr/drivers/mipi_dsi/mipi_dsi_mcux_2l.h> #include <zephyr/drivers/dma.h> #include <zephyr/drivers/dma/dma_mcux_smartdma.h> #include <zephyr/logging/log.h> #include <fsl_inputmux.h> #include <fsl_mipi_dsi.h> #include <fsl_clock.h> #include <soc.h> LOG_MODULE_REGISTER(dsi_mcux_host, CONFIG_MIPI_DSI_LOG_LEVEL); struct mcux_mipi_dsi_config { MIPI_DSI_HOST_Type *base; dsi_dpi_config_t dpi_config; bool auto_insert_eotp; bool noncontinuous_hs_clk; const struct device *bit_clk_dev; clock_control_subsys_t bit_clk_subsys; const struct device *esc_clk_dev; clock_control_subsys_t esc_clk_subsys; const struct device *pixel_clk_dev; clock_control_subsys_t pixel_clk_subsys; uint32_t dphy_ref_freq; #ifdef CONFIG_MIPI_DSI_MCUX_2L_SMARTDMA const struct device *smart_dma; #else void (*irq_config_func)(const struct device *dev); #endif }; struct mcux_mipi_dsi_data { dsi_handle_t mipi_handle; struct k_sem transfer_sem; #ifdef CONFIG_MIPI_DSI_MCUX_2L_SMARTDMA uint8_t dma_slot; #endif }; /* MAX DSI TX payload */ #define DSI_TX_MAX_PAYLOAD_BYTE (64U * 4U) #ifdef CONFIG_MIPI_DSI_MCUX_2L_SMARTDMA /* Callback for DSI DMA transfer completion, called in ISR context */ static void dsi_mcux_dma_cb(const struct device *dma_dev, void *user_data, uint32_t channel, int status) { const struct device *dev = user_data; const struct mcux_mipi_dsi_config *config = dev->config; struct mcux_mipi_dsi_data *data = dev->data; uint32_t int_flags1, int_flags2; if (status != 0) { LOG_ERR("SMARTDMA transfer failed"); } else { /* Disable DSI interrupts at transfer completion */ DSI_DisableInterrupts(config->base, kDSI_InterruptGroup1ApbTxDone | kDSI_InterruptGroup1HtxTo, 0U); DSI_GetAndClearInterruptStatus(config->base, &int_flags1, &int_flags2); k_sem_give(&data->transfer_sem); } } /* Helper function to transfer DSI color (DMA based implementation) */ static int dsi_mcux_tx_color(const struct device *dev, uint8_t channel, struct mipi_dsi_msg *msg) { /* * Color streams are a special case for this DSI peripheral, because * the SMARTDMA peripheral (if enabled) can be used to accelerate * the transfer of data to the DSI. The SMARTDMA has the additional * advantage over traditional DMA of being able to automatically * byte swap color data. This is advantageous, as most graphical * frameworks store RGB data in little endian format, but many * MIPI displays expect color data in big endian format. */ const struct mcux_mipi_dsi_config *config = dev->config; struct mcux_mipi_dsi_data *data = dev->data; struct dma_config dma_cfg = {0}; struct dma_block_config block = {0}; int ret; if (channel != 0) { return -ENOTSUP; /* DMA can only transfer on virtual channel 0 */ } /* Configure smartDMA device, and run transfer */ block.source_address = (uint32_t)msg->tx_buf; block.block_size = msg->tx_len; dma_cfg.dma_callback = dsi_mcux_dma_cb; dma_cfg.user_data = (struct device *)dev; dma_cfg.head_block = &block; dma_cfg.block_count = 1; dma_cfg.dma_slot = data->dma_slot; dma_cfg.channel_direction = MEMORY_TO_PERIPHERAL; ret = dma_config(config->smart_dma, 0, &dma_cfg); if (ret < 0) { LOG_ERR("Could not configure SMARTDMA"); return ret; } /* * SMARTDMA uses DSI interrupt line as input for the DMA * transfer trigger. Therefore, we need to enable DSI TX * interrupts in order to trigger the DMA engine. * Note that if the MIPI IRQ is enabled in * the NVIC, it will fire on every SMARTDMA transfer */ DSI_EnableInterrupts(config->base, kDSI_InterruptGroup1ApbTxDone | kDSI_InterruptGroup1HtxTo, 0U); /* Trigger DMA engine */ ret = dma_start(config->smart_dma, 0); if (ret < 0) { LOG_ERR("Could not start SMARTDMA"); return ret; } /* Wait for TX completion */ k_sem_take(&data->transfer_sem, K_FOREVER); return msg->tx_len; } #else /* CONFIG_MIPI_DSI_MCUX_2L_SMARTDMA is not set */ /* Callback for DSI transfer completion, called in ISR context */ static void dsi_transfer_complete(MIPI_DSI_HOST_Type *base, dsi_handle_t *handle, status_t status, void *userData) { struct mcux_mipi_dsi_data *data = userData; k_sem_give(&data->transfer_sem); } /* Helper function to transfer DSI color (Interrupt based implementation) */ static int dsi_mcux_tx_color(const struct device *dev, uint8_t channel, struct mipi_dsi_msg *msg) { const struct mcux_mipi_dsi_config *config = dev->config; struct mcux_mipi_dsi_data *data = dev->data; status_t status; dsi_transfer_t xfer = { .virtualChannel = channel, .txData = msg->tx_buf, .rxDataSize = (uint16_t)msg->rx_len, .rxData = msg->rx_buf, .sendDscCmd = true, .dscCmd = msg->cmd, .txDataType = kDSI_TxDataDcsLongWr, /* default to high speed unless told to use low power */ .flags = (msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : kDSI_TransferUseHighSpeed, }; /* * Cap transfer size. Note that we subtract six bytes here, * one for the DSC command and five to insure that * transfers are still aligned on a pixel boundary * (two or three byte pixel sizes are supported). */ xfer.txDataSize = MIN(msg->tx_len, (DSI_TX_MAX_PAYLOAD_BYTE - 6)); if (IS_ENABLED(CONFIG_MIPI_DSI_MCUX_2L_SWAP16)) { /* Manually swap the 16 byte color data in software */ uint8_t *src = (uint8_t *)xfer.txData; uint8_t tmp; for (uint32_t i = 0; i < xfer.txDataSize; i += 2) { tmp = src[i]; src[i] = src[i + 1]; src[i + 1] = tmp; } } /* Send TX data using non-blocking DSI API */ status = DSI_TransferNonBlocking(config->base, &data->mipi_handle, &xfer); /* Wait for transfer completion */ k_sem_take(&data->transfer_sem, K_FOREVER); if (status != kStatus_Success) { LOG_ERR("Transmission failed"); return -EIO; } return xfer.txDataSize; } /* ISR is used for DSI interrupt based implementation, unnecessary if DMA is used */ static int mipi_dsi_isr(const struct device *dev) { const struct mcux_mipi_dsi_config *config = dev->config; struct mcux_mipi_dsi_data *data = dev->data; DSI_TransferHandleIRQ(config->base, &data->mipi_handle); return 0; } #endif static int dsi_mcux_attach(const struct device *dev, uint8_t channel, const struct mipi_dsi_device *mdev) { const struct mcux_mipi_dsi_config *config = dev->config; dsi_dphy_config_t dphy_config; dsi_config_t dsi_config; uint32_t dphy_bit_clk_freq; uint32_t dphy_esc_clk_freq; uint32_t dsi_pixel_clk_freq; uint32_t bit_width; DSI_GetDefaultConfig(&dsi_config); dsi_config.numLanes = mdev->data_lanes; dsi_config.autoInsertEoTp = config->auto_insert_eotp; dsi_config.enableNonContinuousHsClk = config->noncontinuous_hs_clk; imxrt_pre_init_display_interface(); /* Init the DSI module. */ DSI_Init(config->base, &dsi_config); #ifdef CONFIG_MIPI_DSI_MCUX_2L_SMARTDMA /* Connect DSI IRQ line to SMARTDMA trigger via * INPUTMUX. */ /* Attach INPUTMUX from MIPI to SMARTDMA */ INPUTMUX_Init(INPUTMUX); INPUTMUX_AttachSignal(INPUTMUX, 0, kINPUTMUX_MipiIrqToSmartDmaInput); /* Gate inputmux clock to save power */ INPUTMUX_Deinit(INPUTMUX); if (!device_is_ready(config->smart_dma)) { return -ENODEV; } struct mcux_mipi_dsi_data *data = dev->data; switch (mdev->pixfmt) { case MIPI_DSI_PIXFMT_RGB888: data->dma_slot = DMA_SMARTDMA_MIPI_RGB888_DMA; break; case MIPI_DSI_PIXFMT_RGB565: if (IS_ENABLED(CONFIG_MIPI_DSI_MCUX_2L_SWAP16)) { data->dma_slot = DMA_SMARTDMA_MIPI_RGB565_DMA_SWAP; } else { data->dma_slot = DMA_SMARTDMA_MIPI_RGB565_DMA; } break; default: LOG_ERR("SMARTDMA does not support pixel_format %u", mdev->pixfmt); return -ENODEV; } #else struct mcux_mipi_dsi_data *data = dev->data; /* Create transfer handle */ if (DSI_TransferCreateHandle(config->base, &data->mipi_handle, dsi_transfer_complete, data) != kStatus_Success) { return -ENODEV; } #endif /* Get the DPHY bit clock frequency */ if (clock_control_get_rate(config->bit_clk_dev, config->bit_clk_subsys, &dphy_bit_clk_freq)) { return -EINVAL; }; /* Get the DPHY ESC clock frequency */ if (clock_control_get_rate(config->esc_clk_dev, config->esc_clk_subsys, &dphy_esc_clk_freq)) { return -EINVAL; } /* Get the Pixel clock frequency */ if (clock_control_get_rate(config->pixel_clk_dev, config->pixel_clk_subsys, &dsi_pixel_clk_freq)) { return -EINVAL; } switch (config->dpi_config.pixelPacket) { case kDSI_PixelPacket16Bit: bit_width = 16; break; case kDSI_PixelPacket18Bit: __fallthrough; case kDSI_PixelPacket18BitLoosely: bit_width = 18; break; case kDSI_PixelPacket24Bit: bit_width = 24; break; default: return -EINVAL; /* Invalid bit width enum value? */ } /* Init DPHY. * * The DPHY bit clock must be fast enough to send out the pixels, it should be * larger than: * * (Pixel clock * bit per output pixel) / number of MIPI data lane */ if (((dsi_pixel_clk_freq * bit_width) / mdev->data_lanes) > dphy_bit_clk_freq) { return -EINVAL; } DSI_GetDphyDefaultConfig(&dphy_config, dphy_bit_clk_freq, dphy_esc_clk_freq); if (config->dphy_ref_freq != 0) { dphy_bit_clk_freq = DSI_InitDphy(config->base, &dphy_config, config->dphy_ref_freq); } else { /* DPHY PLL is not present, ref clock is unused */ DSI_InitDphy(config->base, &dphy_config, 0); } /* * If nxp,lcdif node is present, then the MIPI DSI driver will * accept input on the DPI port from the LCDIF, and convert the output * to DSI data. This is useful for video mode, where the LCDIF can * constantly refresh the MIPI panel. */ if (mdev->mode_flags & MIPI_DSI_MODE_VIDEO) { /* Init DPI interface. */ DSI_SetDpiConfig(config->base, &config->dpi_config, mdev->data_lanes, dsi_pixel_clk_freq, dphy_bit_clk_freq); } imxrt_post_init_display_interface(); return 0; } static int dsi_mcux_detach(const struct device *dev, uint8_t channel, const struct mipi_dsi_device *mdev) { const struct mcux_mipi_dsi_config *config = dev->config; /* Enable DPHY auto power down */ DSI_DeinitDphy(config->base); /* Fully power off DPHY */ config->base->PD_DPHY = 0x1; /* Deinit MIPI */ DSI_Deinit(config->base); /* Call IMX RT clock function to gate clocks and power at SOC level */ imxrt_deinit_display_interface(); return 0; } static ssize_t dsi_mcux_transfer(const struct device *dev, uint8_t channel, struct mipi_dsi_msg *msg) { const struct mcux_mipi_dsi_config *config = dev->config; dsi_transfer_t dsi_xfer = {0}; status_t status; int ret; dsi_xfer.virtualChannel = channel; dsi_xfer.txDataSize = msg->tx_len; dsi_xfer.txData = msg->tx_buf; dsi_xfer.rxDataSize = msg->rx_len; dsi_xfer.rxData = msg->rx_buf; /* default to high speed unless told to use low power */ dsi_xfer.flags = (msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : kDSI_TransferUseHighSpeed; switch (msg->type) { case MIPI_DSI_DCS_READ: LOG_ERR("DCS Read not yet implemented or used"); return -ENOTSUP; case MIPI_DSI_DCS_SHORT_WRITE: dsi_xfer.sendDscCmd = true; dsi_xfer.dscCmd = msg->cmd; dsi_xfer.txDataType = kDSI_TxDataDcsShortWrNoParam; break; case MIPI_DSI_DCS_SHORT_WRITE_PARAM: dsi_xfer.sendDscCmd = true; dsi_xfer.dscCmd = msg->cmd; dsi_xfer.txDataType = kDSI_TxDataDcsShortWrOneParam; break; case MIPI_DSI_DCS_LONG_WRITE: dsi_xfer.sendDscCmd = true; dsi_xfer.dscCmd = msg->cmd; dsi_xfer.txDataType = kDSI_TxDataDcsLongWr; if (msg->flags & MCUX_DSI_2L_FB_DATA) { /* * Special case- transfer framebuffer data using * SMARTDMA or non blocking DSI API. The framebuffer * will also be color swapped, if enabled. */ ret = dsi_mcux_tx_color(dev, channel, msg); if (ret < 0) { LOG_ERR("Transmission failed"); return -EIO; } return ret; } break; case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: dsi_xfer.txDataType = kDSI_TxDataGenShortWrNoParam; break; case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: dsi_xfer.txDataType = kDSI_TxDataGenShortWrOneParam; break; case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: dsi_xfer.txDataType = kDSI_TxDataGenShortWrTwoParam; break; case MIPI_DSI_GENERIC_LONG_WRITE: dsi_xfer.txDataType = kDSI_TxDataGenLongWr; break; case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: __fallthrough; case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: __fallthrough; case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: LOG_ERR("Generic Read not yet implemented or used"); return -ENOTSUP; default: LOG_ERR("Unsupported message type (%d)", msg->type); return -ENOTSUP; } status = DSI_TransferBlocking(config->base, &dsi_xfer); if (status != kStatus_Success) { LOG_ERR("Transmission failed"); return -EIO; } if (msg->rx_len != 0) { /* Return rx_len on a read */ return msg->rx_len; } /* Return tx_len on a write */ return msg->tx_len; } static struct mipi_dsi_driver_api dsi_mcux_api = { .attach = dsi_mcux_attach, .detach = dsi_mcux_detach, .transfer = dsi_mcux_transfer, }; static int mcux_mipi_dsi_init(const struct device *dev) { const struct mcux_mipi_dsi_config *config = dev->config; struct mcux_mipi_dsi_data *data = dev->data; #ifndef CONFIG_MIPI_DSI_MCUX_2L_SMARTDMA /* Enable IRQ */ config->irq_config_func(dev); #endif k_sem_init(&data->transfer_sem, 0, 1); if (!device_is_ready(config->bit_clk_dev) || !device_is_ready(config->esc_clk_dev) || !device_is_ready(config->pixel_clk_dev)) { return -ENODEV; } return 0; } #define MCUX_DSI_DPI_CONFIG(id) \ IF_ENABLED(DT_NODE_HAS_PROP(DT_DRV_INST(id), nxp_lcdif), \ (.dpi_config = { \ .dpiColorCoding = DT_INST_ENUM_IDX(id, dpi_color_coding), \ .pixelPacket = DT_INST_ENUM_IDX(id, dpi_pixel_packet), \ .videoMode = DT_INST_ENUM_IDX(id, dpi_video_mode), \ .bllpMode = DT_INST_ENUM_IDX(id, dpi_bllp_mode), \ .pixelPayloadSize = DT_INST_PROP_BY_PHANDLE(id, nxp_lcdif, width), \ .panelHeight = DT_INST_PROP_BY_PHANDLE(id, nxp_lcdif, height), \ .polarityFlags = (DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), vsync_active) ? \ kDSI_DpiVsyncActiveHigh : \ kDSI_DpiVsyncActiveLow) | \ (DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), hsync_active) ? \ kDSI_DpiHsyncActiveHigh : \ kDSI_DpiHsyncActiveLow), \ .hfp = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), hfront_porch), \ .hbp = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), hback_porch), \ .hsw = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), hsync_len), \ .vfp = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), vfront_porch), \ .vbp = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), vback_porch), \ },)) #define MCUX_MIPI_DSI_DEVICE(id) \ COND_CODE_1(CONFIG_MIPI_DSI_MCUX_2L_SMARTDMA, \ (), (static void mipi_dsi_##n##_irq_config_func(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(id), DT_INST_IRQ(id, priority), \ mipi_dsi_isr, DEVICE_DT_INST_GET(id), 0); \ irq_enable(DT_INST_IRQN(id)); \ })) \ \ static const struct mcux_mipi_dsi_config mipi_dsi_config_##id = { \ MCUX_DSI_DPI_CONFIG(id) \ COND_CODE_1(CONFIG_MIPI_DSI_MCUX_2L_SMARTDMA, \ (.smart_dma = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_NAME(id, smartdma)),), \ (.irq_config_func = mipi_dsi_##n##_irq_config_func,)) \ .base = (MIPI_DSI_HOST_Type *)DT_INST_REG_ADDR(id), \ .auto_insert_eotp = DT_INST_PROP(id, autoinsert_eotp), \ .noncontinuous_hs_clk = DT_INST_PROP(id, noncontinuous_hs_clk), \ .dphy_ref_freq = DT_INST_PROP_OR(id, dphy_ref_frequency, 0), \ .bit_clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR_BY_NAME(id, dphy)), \ .bit_clk_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL_BY_NAME(id, dphy, name), \ .esc_clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR_BY_NAME(id, esc)), \ .esc_clk_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL_BY_NAME(id, esc, name), \ .pixel_clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR_BY_NAME(id, pixel)), \ .pixel_clk_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL_BY_NAME(id, pixel, name), \ }; \ \ static struct mcux_mipi_dsi_data mipi_dsi_data_##id; \ DEVICE_DT_INST_DEFINE(id, \ &mcux_mipi_dsi_init, \ NULL, \ &mipi_dsi_data_##id, \ &mipi_dsi_config_##id, \ POST_KERNEL, \ CONFIG_MIPI_DSI_INIT_PRIORITY, \ &dsi_mcux_api); DT_INST_FOREACH_STATUS_OKAY(MCUX_MIPI_DSI_DEVICE) ```
/content/code_sandbox/drivers/mipi_dsi/dsi_mcux_2l.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,189
```unknown # Organisation (CSIRO) ABN 41 687 119 230. # # Hidden option for turning on the dummy driver for vnd,i3c devices # used in testing. config I3C_TEST def_bool DT_HAS_VND_I3C_ENABLED depends on DT_HAS_VND_I3C_ENABLED ```
/content/code_sandbox/drivers/i3c/Kconfig.test
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
70
```c /* * */ #define DT_DRV_COMPAT nxp_imx_mipi_dsi #include <zephyr/drivers/mipi_dsi.h> #include <fsl_mipi_dsi.h> #include <fsl_clock.h> #include <zephyr/logging/log.h> #include <soc.h> LOG_MODULE_REGISTER(dsi_mcux, CONFIG_MIPI_DSI_LOG_LEVEL); #define MIPI_DPHY_REF_CLK DT_INST_PROP(0, dphy_ref_frequency) /* Max output frequency of DPHY bit clock */ #define MIPI_DPHY_MAX_FREQ MHZ(800) /* PLL CN should be in the range of 1 to 32. */ #define DSI_DPHY_PLL_CN_MIN 1U #define DSI_DPHY_PLL_CN_MAX 32U /* PLL refClk / CN should be in the range of 24M to 30M. */ #define DSI_DPHY_PLL_REFCLK_CN_MIN MHZ(24) #define DSI_DPHY_PLL_REFCLK_CN_MAX MHZ(30) /* PLL CM should be in the range of 16 to 255. */ #define DSI_DPHY_PLL_CM_MIN 16U #define DSI_DPHY_PLL_CM_MAX 255U #define DSI_DPHY_PLL_CO_MIN 0 #define DSI_DPHY_PLL_CO_MAX 3 /* MAX DSI TX payload */ #define DSI_TX_MAX_PAYLOAD_BYTE (64U * 4U) struct display_mcux_mipi_dsi_config { MIPI_DSI_Type base; dsi_dpi_config_t dpi_config; bool auto_insert_eotp; uint32_t phy_clock; }; struct display_mcux_mipi_dsi_data { const struct device *dev; }; static uint32_t dsi_mcux_best_clock(uint32_t ref_clk, uint32_t target_freq) { /* * This function is intended to find the closest realizable DPHY * bit clock for a given target frequency, such that the DPHY clock * is faster than the target frequency. MCUX SDK implements a similar * function with DSI_DphyGetPllDivider, but this function will * configure the DPHY to output the closest realizable clock frequency * to the requested value. This can cause dropped pixels if * the output frequency is less than the requested one. */ uint32_t co_shift, cn, cm; uint32_t cand_freq, vco_freq, refclk_cn_freq; uint32_t best_pll_freq = 0U; uint32_t best_diff = UINT32_MAX; /* * The formula for the DPHY output frequency is: * ref_clk * (CM / (CN * (1 << CO))) */ /* Test all available CO shifts (1x, 2x, 4x, 8x) */ for (co_shift = DSI_DPHY_PLL_CO_MIN; co_shift <= DSI_DPHY_PLL_CO_MAX; co_shift++) { /* Determine VCO output frequency before CO divider */ vco_freq = target_freq << co_shift; /* If desired VCO output frequency is too low, try next CO shift */ if (vco_freq < DSI_DPHY_PLL_VCO_MIN) { continue; } /* If desired VCO output frequency is too high, no point in * searching further */ if (vco_freq > DSI_DPHY_PLL_VCO_MAX) { break; } /* Search the best CN and CM values for desired VCO frequency */ for (cn = DSI_DPHY_PLL_CN_MIN; cn <= DSI_DPHY_PLL_CN_MAX; cn++) { refclk_cn_freq = ref_clk / cn; /* If the frequency after input divider is too high, * try next CN value */ if (refclk_cn_freq > DSI_DPHY_PLL_REFCLK_CN_MAX) { continue; } /* If the frequency after input divider is too low, * no point in trying higher dividers. */ if (refclk_cn_freq < DSI_DPHY_PLL_REFCLK_CN_MIN) { break; } /* Get the closest CM value for this vco frequency * and input divider. Round up, to bias towards higher * frequencies * NOTE: we differ from the SDK algorithm here, which * would round cm to the closest integer */ cm = (vco_freq + (refclk_cn_freq - 1)) / refclk_cn_freq; /* If CM was rounded up to one over valid range, * round down */ if (cm == (DSI_DPHY_PLL_CM_MAX + 1)) { cm = DSI_DPHY_PLL_CM_MAX; } /* If CM value is still out of range, CN/CO setting won't work */ if ((cm < DSI_DPHY_PLL_CM_MIN) || (cm > DSI_DPHY_PLL_CM_MAX)) { continue; } /* Calculate candidate frequency */ cand_freq = (refclk_cn_freq * cm) >> co_shift; if (cand_freq < target_freq) { /* SKIP frequencies less than target frequency. * this is where the algorithm differs from the * SDK. */ continue; } else { if ((cand_freq - target_freq) < best_diff) { /* New best CN, CM, and CO found */ best_diff = (cand_freq - target_freq); best_pll_freq = cand_freq; } } if (best_diff == 0U) { /* We have found exact match for CN, CM, CO. * return now. */ return best_pll_freq; } } } return best_pll_freq; } static int dsi_mcux_attach(const struct device *dev, uint8_t channel, const struct mipi_dsi_device *mdev) { const struct display_mcux_mipi_dsi_config *config = dev->config; dsi_dphy_config_t dphy_config; dsi_config_t dsi_config; uint32_t mipi_dsi_esc_clk_hz; uint32_t mipi_dsi_tx_esc_clk_hz; uint32_t mipi_dsi_dphy_ref_clk_hz = MIPI_DPHY_REF_CLK; DSI_GetDefaultConfig(&dsi_config); dsi_config.numLanes = mdev->data_lanes; dsi_config.autoInsertEoTp = config->auto_insert_eotp; /* Init the DSI module. */ DSI_Init((MIPI_DSI_Type *)&config->base, &dsi_config); /* Init DPHY. * * The DPHY bit clock must be fast enough to send out the pixels, it should be * larger than: * * (Pixel clock * bit per output pixel) / number of MIPI data lane */ uint32_t mipi_dsi_dpi_clk_hz = CLOCK_GetRootClockFreq(kCLOCK_Root_Lcdif); /* Find the best realizable clock value for the MIPI DSI */ uint32_t mipi_dsi_dphy_bit_clk_hz = dsi_mcux_best_clock(mipi_dsi_dphy_ref_clk_hz, config->phy_clock); if (mipi_dsi_dphy_bit_clk_hz == 0) { LOG_ERR("DPHY cannot support requested PHY clock"); return -ENOTSUP; } /* Cap clock value to max frequency */ mipi_dsi_dphy_bit_clk_hz = MIN(mipi_dsi_dphy_bit_clk_hz, MIPI_DPHY_MAX_FREQ); mipi_dsi_esc_clk_hz = CLOCK_GetRootClockFreq(kCLOCK_Root_Mipi_Esc); mipi_dsi_tx_esc_clk_hz = mipi_dsi_esc_clk_hz / 3; DSI_GetDphyDefaultConfig(&dphy_config, mipi_dsi_dphy_bit_clk_hz, mipi_dsi_tx_esc_clk_hz); mipi_dsi_dphy_bit_clk_hz = DSI_InitDphy((MIPI_DSI_Type *)&config->base, &dphy_config, mipi_dsi_dphy_ref_clk_hz); LOG_DBG("DPHY clock set to %u", mipi_dsi_dphy_bit_clk_hz); /* * If nxp,lcdif node is present, then the MIPI DSI driver will * accept input on the DPI port from the LCDIF, and convert the output * to DSI data. This is useful for video mode, where the LCDIF can * constantly refresh the MIPI panel. */ if (mdev->mode_flags & MIPI_DSI_MODE_VIDEO) { /* Init DPI interface. */ DSI_SetDpiConfig((MIPI_DSI_Type *)&config->base, &config->dpi_config, mdev->data_lanes, mipi_dsi_dpi_clk_hz, mipi_dsi_dphy_bit_clk_hz); } imxrt_post_init_display_interface(); return 0; } static ssize_t dsi_mcux_transfer(const struct device *dev, uint8_t channel, struct mipi_dsi_msg *msg) { const struct display_mcux_mipi_dsi_config *config = dev->config; dsi_transfer_t dsi_xfer = {0}; status_t status; dsi_xfer.virtualChannel = channel; dsi_xfer.txDataSize = msg->tx_len; dsi_xfer.txData = msg->tx_buf; dsi_xfer.rxDataSize = msg->rx_len; dsi_xfer.rxData = msg->rx_buf; switch (msg->type) { case MIPI_DSI_DCS_READ: LOG_ERR("DCS Read not yet implemented or used"); return -ENOTSUP; case MIPI_DSI_DCS_SHORT_WRITE: dsi_xfer.sendDscCmd = true; dsi_xfer.dscCmd = msg->cmd; dsi_xfer.txDataType = kDSI_TxDataDcsShortWrNoParam; break; case MIPI_DSI_DCS_SHORT_WRITE_PARAM: dsi_xfer.sendDscCmd = true; dsi_xfer.dscCmd = msg->cmd; dsi_xfer.txDataType = kDSI_TxDataDcsShortWrOneParam; break; case MIPI_DSI_DCS_LONG_WRITE: dsi_xfer.sendDscCmd = true; dsi_xfer.dscCmd = msg->cmd; dsi_xfer.flags = kDSI_TransferUseHighSpeed; dsi_xfer.txDataType = kDSI_TxDataDcsLongWr; /* * Cap transfer size. Note that we subtract six bytes here, * one for the DSC command and one to insure that * transfers are still aligned on a pixel boundary * (two or three byte pixel sizes are supported). */ dsi_xfer.txDataSize = MIN(dsi_xfer.txDataSize, (DSI_TX_MAX_PAYLOAD_BYTE - 6)); break; case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: dsi_xfer.txDataType = kDSI_TxDataGenShortWrNoParam; break; case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: dsi_xfer.txDataType = kDSI_TxDataGenShortWrOneParam; break; case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: dsi_xfer.txDataType = kDSI_TxDataGenShortWrTwoParam; break; case MIPI_DSI_GENERIC_LONG_WRITE: dsi_xfer.txDataType = kDSI_TxDataGenLongWr; break; case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: __fallthrough; case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: __fallthrough; case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: LOG_ERR("Generic Read not yet implemented or used"); return -ENOTSUP; default: LOG_ERR("Unsupported message type (%d)", msg->type); return -ENOTSUP; } status = DSI_TransferBlocking(&config->base, &dsi_xfer); if (status != kStatus_Success) { LOG_ERR("Transmission failed"); return -EIO; } if (msg->rx_len != 0) { /* Return rx_len on a read */ return dsi_xfer.rxDataSize; } /* Return tx_len on a write */ return dsi_xfer.txDataSize; } static struct mipi_dsi_driver_api dsi_mcux_api = { .attach = dsi_mcux_attach, .transfer = dsi_mcux_transfer, }; static int display_mcux_mipi_dsi_init(const struct device *dev) { imxrt_pre_init_display_interface(); return 0; } #define MCUX_DSI_DPI_CONFIG(id) \ IF_ENABLED(DT_NODE_HAS_PROP(DT_DRV_INST(id), nxp_lcdif), \ (.dpi_config = { \ .dpiColorCoding = DT_INST_ENUM_IDX(id, dpi_color_coding), \ .pixelPacket = DT_INST_ENUM_IDX(id, dpi_pixel_packet), \ .videoMode = DT_INST_ENUM_IDX(id, dpi_video_mode), \ .bllpMode = DT_INST_ENUM_IDX(id, dpi_bllp_mode), \ .pixelPayloadSize = DT_INST_PROP_BY_PHANDLE(id, nxp_lcdif, width), \ .panelHeight = DT_INST_PROP_BY_PHANDLE(id, nxp_lcdif, height), \ .polarityFlags = (DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), hsync_active) ? \ kDSI_DpiHsyncActiveHigh : kDSI_DpiHsyncActiveLow) | \ (DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), vsync_active) ? \ kDSI_DpiVsyncActiveHigh : kDSI_DpiVsyncActiveLow), \ .hfp = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), hfront_porch), \ .hbp = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), hback_porch), \ .hsw = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), hsync_len), \ .vfp = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), vfront_porch), \ .vbp = DT_PROP(DT_CHILD(DT_INST_PHANDLE(id, nxp_lcdif), \ display_timings), vback_porch), \ },)) #define MCUX_MIPI_DSI_DEVICE(id) \ static const struct display_mcux_mipi_dsi_config display_mcux_mipi_dsi_config_##id = { \ .base = { \ .host = (DSI_HOST_Type *)DT_INST_REG_ADDR_BY_IDX(id, 0), \ .dpi = (DSI_HOST_DPI_INTFC_Type *)DT_INST_REG_ADDR_BY_IDX(id, 1), \ .apb = (DSI_HOST_APB_PKT_IF_Type *)DT_INST_REG_ADDR_BY_IDX(id, 2), \ .dphy = (DSI_HOST_NXP_FDSOI28_DPHY_INTFC_Type *) \ DT_INST_REG_ADDR_BY_IDX(id, 3), \ }, \ MCUX_DSI_DPI_CONFIG(id) \ .auto_insert_eotp = DT_INST_PROP(id, autoinsert_eotp), \ .phy_clock = DT_INST_PROP(id, phy_clock), \ }; \ static struct display_mcux_mipi_dsi_data display_mcux_mipi_dsi_data_##id; \ DEVICE_DT_INST_DEFINE(id, \ &display_mcux_mipi_dsi_init, \ NULL, \ &display_mcux_mipi_dsi_data_##id, \ &display_mcux_mipi_dsi_config_##id, \ POST_KERNEL, \ CONFIG_MIPI_DSI_INIT_PRIORITY, \ &dsi_mcux_api); DT_INST_FOREACH_STATUS_OKAY(MCUX_MIPI_DSI_DEVICE) ```
/content/code_sandbox/drivers/mipi_dsi/dsi_mcux.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,593
```c /* * */ #include <string.h> #include <zephyr/toolchain.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/__assert.h> #include <zephyr/drivers/i3c.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(i3c, CONFIG_I3C_LOG_LEVEL); int i3c_ccc_do_getbcr(struct i3c_device_desc *target, struct i3c_ccc_getbcr *bcr) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); __ASSERT_NO_MSG(bcr != NULL); ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 1; ccc_tgt_payload.data = &bcr->bcr; ccc_tgt_payload.data_len = sizeof(bcr->bcr); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_GETBCR; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; return i3c_do_ccc(target->bus, &ccc_payload); } int i3c_ccc_do_getdcr(struct i3c_device_desc *target, struct i3c_ccc_getdcr *dcr) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); __ASSERT_NO_MSG(dcr != NULL); ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 1; ccc_tgt_payload.data = &dcr->dcr; ccc_tgt_payload.data_len = sizeof(dcr->dcr); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_GETDCR; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; return i3c_do_ccc(target->bus, &ccc_payload); } int i3c_ccc_do_getpid(struct i3c_device_desc *target, struct i3c_ccc_getpid *pid) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); __ASSERT_NO_MSG(pid != NULL); ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 1; ccc_tgt_payload.data = &pid->pid[0]; ccc_tgt_payload.data_len = sizeof(pid->pid); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_GETPID; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; return i3c_do_ccc(target->bus, &ccc_payload); } int i3c_ccc_do_rstact_all(const struct device *controller, enum i3c_ccc_rstact_defining_byte action) { struct i3c_ccc_payload ccc_payload; uint8_t def_byte; __ASSERT_NO_MSG(controller != NULL); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_RSTACT(true); def_byte = (uint8_t)action; ccc_payload.ccc.data = &def_byte; ccc_payload.ccc.data_len = 1U; return i3c_do_ccc(controller, &ccc_payload); } int i3c_ccc_do_rstdaa_all(const struct device *controller) { struct i3c_ccc_payload ccc_payload; __ASSERT_NO_MSG(controller != NULL); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_RSTDAA; return i3c_do_ccc(controller, &ccc_payload); } int i3c_ccc_do_setdasa(const struct i3c_device_desc *target) { struct i3c_driver_data *bus_data = (struct i3c_driver_data *)target->bus->data; struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; uint8_t dyn_addr; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); if ((target->static_addr == 0U) || (target->dynamic_addr != 0U)) { return -EINVAL; } /* * Note that the 7-bit address needs to start at bit 1 * (aka left-justified). So shift left by 1; */ dyn_addr = (target->init_dynamic_addr ? target->init_dynamic_addr : target->static_addr) << 1; /* check that initial dynamic address is free before setting it */ if ((target->init_dynamic_addr != 0) && (target->init_dynamic_addr != target->static_addr)) { if (!i3c_addr_slots_is_free(&bus_data->attached_dev.addr_slots, dyn_addr >> 1)) { return -EINVAL; } } ccc_tgt_payload.addr = target->static_addr; ccc_tgt_payload.rnw = 0; ccc_tgt_payload.data = &dyn_addr; ccc_tgt_payload.data_len = 1; memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_SETDASA; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; return i3c_do_ccc(target->bus, &ccc_payload); } int i3c_ccc_do_setnewda(const struct i3c_device_desc *target, struct i3c_ccc_address new_da) { struct i3c_driver_data *bus_data = (struct i3c_driver_data *)target->bus->data; struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; uint8_t new_dyn_addr; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); if (target->dynamic_addr == 0U) { return -EINVAL; } /* * Note that the 7-bit address needs to start at bit 1 * (aka left-justified). So shift left by 1; */ new_dyn_addr = new_da.addr << 1; /* check that initial dynamic address is free before setting it */ if (target->dynamic_addr != new_da.addr) { if (!i3c_addr_slots_is_free(&bus_data->attached_dev.addr_slots, new_da.addr)) { return -EINVAL; } } ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 0; ccc_tgt_payload.data = &new_dyn_addr; ccc_tgt_payload.data_len = 1; memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_SETNEWDA; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; return i3c_do_ccc(target->bus, &ccc_payload); } int i3c_ccc_do_events_all_set(const struct device *controller, bool enable, struct i3c_ccc_events *events) { struct i3c_ccc_payload ccc_payload; __ASSERT_NO_MSG(controller != NULL); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = enable ? I3C_CCC_ENEC(true) : I3C_CCC_DISEC(true); ccc_payload.ccc.data = &events->events; ccc_payload.ccc.data_len = sizeof(events->events); return i3c_do_ccc(controller, &ccc_payload); } int i3c_ccc_do_events_set(struct i3c_device_desc *target, bool enable, struct i3c_ccc_events *events) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); if (target->dynamic_addr == 0U) { return -EINVAL; } ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 0; ccc_tgt_payload.data = &events->events; ccc_tgt_payload.data_len = sizeof(events->events); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = enable ? I3C_CCC_ENEC(false) : I3C_CCC_DISEC(false); ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; return i3c_do_ccc(target->bus, &ccc_payload); } int i3c_ccc_do_setmwl_all(const struct device *controller, const struct i3c_ccc_mwl *mwl) { struct i3c_ccc_payload ccc_payload; uint8_t data[2]; __ASSERT_NO_MSG(controller != NULL); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_SETMWL(true); ccc_payload.ccc.data = &data[0]; ccc_payload.ccc.data_len = sizeof(data); /* The actual data is MSB first. So order the data. */ sys_put_be16(mwl->len, data); return i3c_do_ccc(controller, &ccc_payload); } int i3c_ccc_do_setmwl(const struct i3c_device_desc *target, const struct i3c_ccc_mwl *mwl) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; uint8_t data[2]; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 0; ccc_tgt_payload.data = &data[0]; ccc_tgt_payload.data_len = sizeof(data); ccc_payload.ccc.id = I3C_CCC_SETMWL(false); ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; /* The actual length is MSB first. So order the data. */ sys_put_be16(mwl->len, data); return i3c_do_ccc(target->bus, &ccc_payload); } int i3c_ccc_do_getmwl(const struct i3c_device_desc *target, struct i3c_ccc_mwl *mwl) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; uint8_t data[2]; int ret; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); __ASSERT_NO_MSG(mwl != NULL); ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 1; ccc_tgt_payload.data = &data[0]; ccc_tgt_payload.data_len = sizeof(data); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_GETMWL; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; ret = i3c_do_ccc(target->bus, &ccc_payload); if (ret == 0) { /* The actual length is MSB first. So order the data. */ mwl->len = sys_get_be16(data); } return ret; } int i3c_ccc_do_setmrl_all(const struct device *controller, const struct i3c_ccc_mrl *mrl, bool has_ibi_size) { struct i3c_ccc_payload ccc_payload; uint8_t data[3]; __ASSERT_NO_MSG(controller != NULL); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_SETMRL(true); ccc_payload.ccc.data = &data[0]; ccc_payload.ccc.data_len = has_ibi_size ? 3 : 2; /* The actual length is MSB first. So order the data. */ sys_put_be16(mrl->len, data); if (has_ibi_size) { data[2] = mrl->ibi_len; } return i3c_do_ccc(controller, &ccc_payload); } int i3c_ccc_do_setmrl(const struct i3c_device_desc *target, const struct i3c_ccc_mrl *mrl) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; uint8_t data[3]; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 0; ccc_tgt_payload.data = &data[0]; ccc_payload.ccc.id = I3C_CCC_SETMRL(false); ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; /* The actual length is MSB first. So order the data. */ sys_put_be16(mrl->len, data); if ((target->bcr & I3C_BCR_IBI_PAYLOAD_HAS_DATA_BYTE) == I3C_BCR_IBI_PAYLOAD_HAS_DATA_BYTE) { ccc_tgt_payload.data_len = 3; data[2] = mrl->ibi_len; } else { ccc_tgt_payload.data_len = 2; } return i3c_do_ccc(target->bus, &ccc_payload); } int i3c_ccc_do_getmrl(const struct i3c_device_desc *target, struct i3c_ccc_mrl *mrl) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; uint8_t data[3]; bool has_ibi_sz; int ret; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); __ASSERT_NO_MSG(mrl != NULL); has_ibi_sz = (target->bcr & I3C_BCR_IBI_PAYLOAD_HAS_DATA_BYTE) == I3C_BCR_IBI_PAYLOAD_HAS_DATA_BYTE; ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 1; ccc_tgt_payload.data = &data[0]; ccc_tgt_payload.data_len = has_ibi_sz ? 3 : 2; memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_GETMRL; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; ret = i3c_do_ccc(target->bus, &ccc_payload); if (ret == 0) { /* The actual length is MSB first. So order the data. */ mrl->len = sys_get_be16(data); if (has_ibi_sz) { mrl->ibi_len = data[2]; } } return ret; } int i3c_ccc_do_getstatus(const struct i3c_device_desc *target, union i3c_ccc_getstatus *status, enum i3c_ccc_getstatus_fmt fmt, enum i3c_ccc_getstatus_defbyte defbyte) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; uint8_t defining_byte; uint8_t data[2]; int ret; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); __ASSERT_NO_MSG(status != NULL); ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 1; ccc_tgt_payload.data = &data[0]; if (fmt == GETSTATUS_FORMAT_1) { ccc_tgt_payload.data_len = 2; } else if (fmt == GETSTATUS_FORMAT_2) { switch (defbyte) { case GETSTATUS_FORMAT_2_TGTSTAT: __fallthrough; case GETSTATUS_FORMAT_2_PRECR: ccc_tgt_payload.data_len = 2; break; default: ret = -EINVAL; goto out; } } else { ret = -EINVAL; goto out; } memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_GETSTATUS; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; if (fmt == GETSTATUS_FORMAT_2) { defining_byte = (uint8_t)defbyte; ccc_payload.ccc.data = &defining_byte; ccc_payload.ccc.data_len = 1; } ret = i3c_do_ccc(target->bus, &ccc_payload); if (ret == 0) { /* Received data is MSB first. So order the data. */ if (fmt == GETSTATUS_FORMAT_1) { status->fmt1.status = sys_get_be16(data); } else if (fmt == GETSTATUS_FORMAT_2) { switch (defbyte) { case GETSTATUS_FORMAT_2_TGTSTAT: __fallthrough; case GETSTATUS_FORMAT_2_PRECR: status->fmt2.raw_u16 = sys_get_be16(data); break; default: break; } } } out: return ret; } int i3c_ccc_do_getcaps(const struct i3c_device_desc *target, union i3c_ccc_getcaps *caps, enum i3c_ccc_getcaps_fmt fmt, enum i3c_ccc_getcaps_defbyte defbyte) { struct i3c_ccc_payload ccc_payload; struct i3c_ccc_target_payload ccc_tgt_payload; uint8_t defining_byte; uint8_t data[4]; uint8_t len; int ret; __ASSERT_NO_MSG(target != NULL); __ASSERT_NO_MSG(target->bus != NULL); __ASSERT_NO_MSG(caps != NULL); ccc_tgt_payload.addr = target->dynamic_addr; ccc_tgt_payload.rnw = 1; ccc_tgt_payload.data = &data[0]; if (fmt == GETCAPS_FORMAT_1) { /* Could be 1-4 Data Bytes Returned */ ccc_tgt_payload.data_len = 4; } else if (fmt == GETCAPS_FORMAT_2) { switch (defbyte) { case GETCAPS_FORMAT_2_CRCAPS: __fallthrough; case GETCAPS_FORMAT_2_VTCAPS: /* Could be 1-2 Data Bytes Returned*/ ccc_tgt_payload.data_len = 2; break; case GETCAPS_FORMAT_2_TGTCAPS: __fallthrough; case GETCAPS_FORMAT_2_TESTPAT: /* Could be 1-4 Data Bytes Returned */ ccc_tgt_payload.data_len = 4; break; default: ret = -EINVAL; goto out; } } else { ret = -EINVAL; goto out; } memset(&ccc_payload, 0, sizeof(ccc_payload)); ccc_payload.ccc.id = I3C_CCC_GETCAPS; ccc_payload.targets.payloads = &ccc_tgt_payload; ccc_payload.targets.num_targets = 1; if (fmt == GETCAPS_FORMAT_2) { defining_byte = (uint8_t)defbyte; ccc_payload.ccc.data = &defining_byte; ccc_payload.ccc.data_len = 1; } ret = i3c_do_ccc(target->bus, &ccc_payload); if (ret == 0) { /* GETCAPS will return a variable length */ len = ccc_tgt_payload.num_xfer; if (fmt == GETCAPS_FORMAT_1) { memcpy(caps->fmt1.getcaps, data, len); /* for values not received, assume default (1'b0) */ memset(&caps->fmt1.getcaps[len], 0, sizeof(caps->fmt1.getcaps) - len); } else if (fmt == GETCAPS_FORMAT_2) { switch (defbyte) { case GETCAPS_FORMAT_2_CRCAPS: memcpy(caps->fmt2.crcaps, data, len); /* for values not received, assume default (1'b0) */ memset(&caps->fmt2.crcaps[len], 0, sizeof(caps->fmt2.crcaps) - len); break; case GETCAPS_FORMAT_2_VTCAPS: memcpy(caps->fmt2.vtcaps, data, len); /* for values not received, assume default (1'b0) */ memset(&caps->fmt2.vtcaps[len], 0, sizeof(caps->fmt2.vtcaps) - len); break; case GETCAPS_FORMAT_2_TGTCAPS: memcpy(caps->fmt2.tgtcaps, data, len); /* for values not received, assume default (1'b0) */ memset(&caps->fmt2.tgtcaps[len], 0, sizeof(caps->fmt2.tgtcaps) - len); break; case GETCAPS_FORMAT_2_TESTPAT: /* should always be 4 data bytes */ caps->fmt2.testpat = sys_get_be32(data); break; default: break; } } } out: return ret; } ```
/content/code_sandbox/drivers/i3c/i3c_ccc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,804
```unknown # DT_COMPAT_NXP_MCUX_I3C := nxp,mcux-i3c module = I3C_MCUX module-str = i3c-mcux source "subsys/logging/Kconfig.template.log_config" config I3C_MCUX bool "MCUX I3C driver" depends on HAS_MCUX depends on DT_HAS_NXP_MCUX_I3C_ENABLED select PINCTRL select I3C_IBI_WORKQUEUE if I3C_USE_IBI default y help Enable mcux I3C driver. ```
/content/code_sandbox/drivers/i3c/Kconfig.nxp
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
121
```c /* * */ #include <string.h> #include <zephyr/toolchain.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/slist.h> #include <zephyr/drivers/i3c.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(i3c, CONFIG_I3C_LOG_LEVEL); void i3c_dump_msgs(const char *name, const struct i3c_msg *msgs, uint8_t num_msgs, struct i3c_device_desc *target) { LOG_DBG("I3C msg: %s, addr=%x", name, target->dynamic_addr); for (unsigned int i = 0; i < num_msgs; i++) { const struct i3c_msg *msg = &msgs[i]; LOG_DBG(" %c len=%02x: ", msg->flags & I3C_MSG_READ ? 'R' : 'W', msg->len); if (!(msg->flags & I3C_MSG_READ)) { LOG_HEXDUMP_DBG(msg->buf, msg->len, "contents:"); } } } void i3c_addr_slots_set(struct i3c_addr_slots *slots, uint8_t dev_addr, enum i3c_addr_slot_status status) { int bitpos; int idx; __ASSERT_NO_MSG(slots != NULL); if (dev_addr > I3C_MAX_ADDR) { /* Invalid address. Do nothing. */ return; } bitpos = dev_addr * 2; idx = bitpos / BITS_PER_LONG; bitpos %= BITS_PER_LONG; slots->slots[idx] &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK << bitpos); slots->slots[idx] |= status << bitpos; } enum i3c_addr_slot_status i3c_addr_slots_status(struct i3c_addr_slots *slots, uint8_t dev_addr) { unsigned long status; int bitpos; int idx; __ASSERT_NO_MSG(slots != NULL); if (dev_addr > I3C_MAX_ADDR) { /* Invalid address. * Simply says it's reserved so it will not be * used for anything. */ return I3C_ADDR_SLOT_STATUS_RSVD; } bitpos = dev_addr * 2; idx = bitpos / BITS_PER_LONG; bitpos %= BITS_PER_LONG; status = slots->slots[idx] >> bitpos; status &= I3C_ADDR_SLOT_STATUS_MASK; return status; } int i3c_addr_slots_init(const struct device *dev) { struct i3c_driver_data *data = (struct i3c_driver_data *)dev->data; const struct i3c_driver_config *config = (const struct i3c_driver_config *)dev->config; int i, ret = 0; struct i3c_device_desc *i3c_dev; struct i3c_i2c_device_desc *i2c_dev; __ASSERT_NO_MSG(dev != NULL); (void)memset(&data->attached_dev.addr_slots, 0, sizeof(data->attached_dev.addr_slots)); sys_slist_init(&data->attached_dev.devices.i3c); sys_slist_init(&data->attached_dev.devices.i2c); /* Address restrictions (ref 5.1.2.2.5, Specification for I3C v1.1.1) */ for (i = 0; i <= 7; i++) { /* Addresses 0 to 7 are reserved */ i3c_addr_slots_set(&data->attached_dev.addr_slots, i, I3C_ADDR_SLOT_STATUS_RSVD); /* * Addresses within a single bit error of broadcast address * are also reserved. */ i3c_addr_slots_set(&data->attached_dev.addr_slots, I3C_BROADCAST_ADDR ^ BIT(i), I3C_ADDR_SLOT_STATUS_RSVD); } /* The broadcast address is reserved */ i3c_addr_slots_set(&data->attached_dev.addr_slots, I3C_BROADCAST_ADDR, I3C_ADDR_SLOT_STATUS_RSVD); /* * Mark all I2C addresses first. */ for (i = 0; i < config->dev_list.num_i2c; i++) { i2c_dev = &config->dev_list.i2c[i]; ret = i3c_attach_i2c_device(i2c_dev); if (ret != 0) { /* Address slot is not free */ ret = -EINVAL; goto out; } } /* * If there is a static address for the I3C devices, check * if this address is free, and there is no other devices of * the same (pre-assigned) address on the bus. */ for (i = 0; i < config->dev_list.num_i3c; i++) { i3c_dev = &config->dev_list.i3c[i]; ret = i3c_attach_i3c_device(i3c_dev); if (ret != 0) { /* Address slot is not free */ ret = -EINVAL; goto out; } } out: return ret; } bool i3c_addr_slots_is_free(struct i3c_addr_slots *slots, uint8_t dev_addr) { enum i3c_addr_slot_status status; __ASSERT_NO_MSG(slots != NULL); status = i3c_addr_slots_status(slots, dev_addr); return (status == I3C_ADDR_SLOT_STATUS_FREE); } uint8_t i3c_addr_slots_next_free_find(struct i3c_addr_slots *slots, uint8_t start_addr) { uint8_t addr; enum i3c_addr_slot_status status; /* Addresses 0 to 7 are reserved. So start at 8. */ for (addr = MAX(start_addr, 8); addr < I3C_MAX_ADDR; addr++) { status = i3c_addr_slots_status(slots, addr); if (status == I3C_ADDR_SLOT_STATUS_FREE) { return addr; } } return 0; } struct i3c_device_desc *i3c_dev_list_find(const struct i3c_dev_list *dev_list, const struct i3c_device_id *id) { int i; struct i3c_device_desc *ret = NULL; __ASSERT_NO_MSG(dev_list != NULL); /* this only searches known I3C PIDs */ for (i = 0; i < dev_list->num_i3c; i++) { struct i3c_device_desc *desc = &dev_list->i3c[i]; if (desc->pid == id->pid) { ret = desc; break; } } return ret; } struct i3c_device_desc *i3c_dev_list_i3c_addr_find(struct i3c_dev_attached_list *dev_list, uint8_t addr) { sys_snode_t *node; struct i3c_device_desc *ret = NULL; __ASSERT_NO_MSG(dev_list != NULL); SYS_SLIST_FOR_EACH_NODE(&dev_list->devices.i3c, node) { struct i3c_device_desc *desc = (void *)node; if (desc->dynamic_addr == addr) { ret = desc; break; } } return ret; } struct i3c_i2c_device_desc *i3c_dev_list_i2c_addr_find(struct i3c_dev_attached_list *dev_list, uint16_t addr) { sys_snode_t *node; struct i3c_i2c_device_desc *ret = NULL; __ASSERT_NO_MSG(dev_list != NULL); SYS_SLIST_FOR_EACH_NODE(&dev_list->devices.i2c, node) { struct i3c_i2c_device_desc *desc = (void *)node; if (desc->addr == addr) { ret = desc; break; } } return ret; } int i3c_determine_default_addr(struct i3c_device_desc *target, uint8_t *addr) { struct i3c_driver_data *data = (struct i3c_driver_data *)target->bus->data; /* If dynamic addr is set, then it assumed that it was assigned by a primary controller */ if (target->dynamic_addr == 0) { /* It is assumed that SETDASA or ENTDAA will be run after this */ if (target->init_dynamic_addr != 0U) { /* initial dynamic address is requested */ if (target->static_addr == 0) { /* SA is set to 0, so DA will be set with ENTDAA */ if (i3c_addr_slots_is_free(&data->attached_dev.addr_slots, target->init_dynamic_addr)) { /* Set DA during ENTDAA */ *addr = target->init_dynamic_addr; } else { /* address is not free, get the next one */ *addr = i3c_addr_slots_next_free_find( &data->attached_dev.addr_slots, 0); } } else { /* Use the init dynamic address as it's DA, but the RR will need to * be first set with it's SA to run SETDASA, the RR address will * need be updated after SETDASA with the request dynamic address */ if (i3c_addr_slots_is_free(&data->attached_dev.addr_slots, target->static_addr)) { *addr = target->static_addr; } else { /* static address has already been taken */ return -EINVAL; } } } else { /* no init dynamic address is requested */ if (target->static_addr != 0) { if (i3c_addr_slots_is_free(&data->attached_dev.addr_slots, target->static_addr)) { /* static exists, set DA with same SA during SETDASA*/ *addr = target->static_addr; } else { /* static address has already been taken */ return -EINVAL; } } else { /* pick a DA to use */ *addr = i3c_addr_slots_next_free_find( &data->attached_dev.addr_slots, 0); } } } else { *addr = target->dynamic_addr; } return 0; } int i3c_attach_i3c_device(struct i3c_device_desc *target) { struct i3c_driver_data *data = (struct i3c_driver_data *)target->bus->data; const struct i3c_driver_api *api = (const struct i3c_driver_api *)target->bus->api; sys_snode_t *node; uint8_t addr = 0; int status = 0; /* check to see if the device has already been attached */ if (!sys_slist_is_empty(&data->attached_dev.devices.i3c)) { SYS_SLIST_FOR_EACH_NODE(&data->attached_dev.devices.i3c, node) { if (node == &target->node) { return -EINVAL; } } } status = i3c_determine_default_addr(target, &addr); if (status != 0) { return status; } sys_slist_append(&data->attached_dev.devices.i3c, &target->node); if (api->attach_i3c_device != NULL) { status = api->attach_i3c_device(target->bus, target, addr); } i3c_addr_slots_mark_i3c(&data->attached_dev.addr_slots, addr); return status; } int i3c_reattach_i3c_device(struct i3c_device_desc *target, uint8_t old_dyn_addr) { struct i3c_driver_data *data = (struct i3c_driver_data *)target->bus->data; const struct i3c_driver_api *api = (const struct i3c_driver_api *)target->bus->api; int status = 0; if (!i3c_addr_slots_is_free(&data->attached_dev.addr_slots, target->dynamic_addr)) { return -EINVAL; } if (api->reattach_i3c_device != NULL) { status = api->reattach_i3c_device(target->bus, target, old_dyn_addr); } if (old_dyn_addr) { /* mark the old address as free */ i3c_addr_slots_mark_free(&data->attached_dev.addr_slots, old_dyn_addr); } i3c_addr_slots_mark_i3c(&data->attached_dev.addr_slots, target->dynamic_addr); return status; } int i3c_detach_i3c_device(struct i3c_device_desc *target) { struct i3c_driver_data *data = (struct i3c_driver_data *)target->bus->data; const struct i3c_driver_api *api = (const struct i3c_driver_api *)target->bus->api; int status = 0; if (!sys_slist_is_empty(&data->attached_dev.devices.i3c)) { if (!sys_slist_find_and_remove(&data->attached_dev.devices.i3c, &target->node)) { return -EINVAL; } } else { return -EINVAL; } if (api->detach_i3c_device != NULL) { status = api->detach_i3c_device(target->bus, target); } i3c_addr_slots_mark_free(&data->attached_dev.addr_slots, target->dynamic_addr ? target->dynamic_addr : target->static_addr); return status; } int i3c_attach_i2c_device(struct i3c_i2c_device_desc *target) { struct i3c_driver_data *data = (struct i3c_driver_data *)target->bus->data; const struct i3c_driver_api *api = (const struct i3c_driver_api *)target->bus->api; sys_snode_t *node; int status = 0; /* check to see if the device has already been attached */ if (!sys_slist_is_empty(&data->attached_dev.devices.i2c)) { SYS_SLIST_FOR_EACH_NODE(&data->attached_dev.devices.i2c, node) { if (node == &target->node) { return -EINVAL; } } } if (!i3c_addr_slots_is_free(&data->attached_dev.addr_slots, target->addr)) { return -EINVAL; } sys_slist_append(&data->attached_dev.devices.i2c, &target->node); if (api->attach_i2c_device != NULL) { status = api->attach_i2c_device(target->bus, target); } i3c_addr_slots_mark_i2c(&data->attached_dev.addr_slots, target->addr); return status; } int i3c_detach_i2c_device(struct i3c_i2c_device_desc *target) { struct i3c_driver_data *data = (struct i3c_driver_data *)target->bus->data; const struct i3c_driver_api *api = (const struct i3c_driver_api *)target->bus->api; int status = 0; if (!sys_slist_is_empty(&data->attached_dev.devices.i2c)) { if (!sys_slist_find_and_remove(&data->attached_dev.devices.i2c, &target->node)) { return -EINVAL; } } else { return -EINVAL; } if (api->detach_i2c_device != NULL) { status = api->detach_i2c_device(target->bus, target); } i3c_addr_slots_mark_free(&data->attached_dev.addr_slots, target->addr); return status; } int i3c_dev_list_daa_addr_helper(struct i3c_addr_slots *addr_slots, const struct i3c_dev_list *dev_list, uint64_t pid, bool must_match, bool assigned_okay, struct i3c_device_desc **target, uint8_t *addr) { struct i3c_device_desc *desc; const uint16_t vendor_id = (uint16_t)(pid >> 32); const uint32_t part_no = (uint32_t)(pid & 0xFFFFFFFFU); uint8_t dyn_addr = 0; int ret = 0; const struct i3c_device_id i3c_id = I3C_DEVICE_ID(pid); desc = i3c_dev_list_find(dev_list, &i3c_id); if (must_match && (desc == NULL)) { /* * No device descriptor matching incoming PID and * that we want an exact match. */ ret = -ENODEV; LOG_DBG("PID 0x%04x%08x is not in registered device list", vendor_id, part_no); goto out; } if (desc != NULL && desc->dynamic_addr != 0U) { if (assigned_okay) { /* Return the already assigned address if desired so. */ dyn_addr = desc->dynamic_addr; goto out; } else { /* * Bail If target already has an assigned address. * This is probably due to having the same PIDs for multiple targets * in the device tree. */ LOG_ERR("PID 0x%04x%08x already has " "dynamic address (0x%02x) assigned", vendor_id, part_no, desc->dynamic_addr); ret = -EINVAL; goto err; } } /* * Use the desired dynamic address as the new dynamic address * if the slot is free. */ if (desc != NULL && desc->init_dynamic_addr != 0U) { if (i3c_addr_slots_is_free(addr_slots, desc->init_dynamic_addr)) { dyn_addr = desc->init_dynamic_addr; goto out; } } /* * Find the next available address. */ dyn_addr = i3c_addr_slots_next_free_find(addr_slots, 0); if (dyn_addr == 0U) { /* No free addresses available */ LOG_DBG("No more free addresses available."); ret = -ENOSPC; } out: *addr = dyn_addr; *target = desc; err: return ret; } int i3c_device_basic_info_get(struct i3c_device_desc *target) { int ret; uint8_t tmp_bcr; struct i3c_ccc_getbcr bcr = {0}; struct i3c_ccc_getdcr dcr = {0}; struct i3c_ccc_mrl mrl = {0}; struct i3c_ccc_mwl mwl = {0}; union i3c_ccc_getcaps caps = {0}; /* * Since some CCC functions requires BCR to function * correctly, we save the BCR here and update the BCR * in the descriptor. If any following operations fails, * we can restore the BCR. */ tmp_bcr = target->bcr; /* GETBCR */ ret = i3c_ccc_do_getbcr(target, &bcr); if (ret != 0) { goto out; } target->bcr = bcr.bcr; /* GETDCR */ ret = i3c_ccc_do_getdcr(target, &dcr); if (ret != 0) { goto out; } /* GETMRL */ if (i3c_ccc_do_getmrl(target, &mrl) != 0) { /* GETMRL may be optionally supported if no settable limit */ LOG_DBG("No settable limit for GETMRL"); } /* GETMWL */ if (i3c_ccc_do_getmwl(target, &mwl) != 0) { /* GETMWL may be optionally supported if no settable limit */ LOG_DBG("No settable limit for GETMWL"); } /* GETCAPS */ ret = i3c_ccc_do_getcaps_fmt1(target, &caps); /* * GETCAPS (GETHDRCAP) is required to be supported for I3C v1.0 targets that support HDR * modes and required if the Target's I3C version is v1.1 or later, but which the version it * supports it can't be known ahead of time. So if the BCR bit for Advanced capabilities is * set, then it is expected for GETCAPS to always be supported. Otherwise, then it's a I3C * v1.0 device without any HDR modes so do not treat as an error if no valid response. */ if (ret == 0) { memcpy(&target->getcaps, &caps, sizeof(target->getcaps)); } else if ((ret != 0) && (target->bcr & I3C_BCR_ADV_CAPABILITIES)) { goto out; } else { ret = 0; } target->dcr = dcr.dcr; target->data_length.mrl = mrl.len; target->data_length.mwl = mwl.len; target->data_length.max_ibi = mrl.ibi_len; out: if (ret != 0) { /* Restore BCR is any CCC fails. */ target->bcr = tmp_bcr; } return ret; } /** * @brief Do SETDASA to set static address as dynamic address. * * @param dev Pointer to the device driver instance. * @param[out] True if DAA is still needed. False if all registered * devices have static addresses. * * @retval 0 if successful. */ static int i3c_bus_setdasa(const struct device *dev, const struct i3c_dev_list *dev_list, bool *need_daa) { int i, ret; *need_daa = false; /* Loop through the registered I3C devices */ for (i = 0; i < dev_list->num_i3c; i++) { struct i3c_device_desc *desc = &dev_list->i3c[i]; /* * A device without static address => need to do * dynamic address assignment. */ if (desc->static_addr == 0U) { *need_daa = true; continue; } LOG_DBG("SETDASA for 0x%x", desc->static_addr); ret = i3c_ccc_do_setdasa(desc); if (ret == 0) { desc->dynamic_addr = (desc->init_dynamic_addr ? desc->init_dynamic_addr : desc->static_addr); if (desc->dynamic_addr != desc->static_addr) { if (i3c_reattach_i3c_device(desc, desc->static_addr) != 0) { LOG_ERR("Failed to reattach %s (%d)", desc->dev->name, ret); } } } else { /* SETDASA failed, detach it from the controller */ if (i3c_detach_i3c_device(desc) != 0) { LOG_ERR("Failed to detach %s (%d)", desc->dev->name, ret); } LOG_ERR("SETDASA error on address 0x%x (%d)", desc->static_addr, ret); continue; } } return 0; } int i3c_bus_init(const struct device *dev, const struct i3c_dev_list *dev_list) { int i, ret = 0; bool need_daa = true; struct i3c_ccc_events i3c_events; #ifdef CONFIG_I3C_INIT_RSTACT /* * Reset all connected targets. Also reset dynamic * addresses for all devices as we have no idea what * dynamic addresses the connected devices have * (e.g. assigned during previous power cycle). * * Note that we ignore error for both RSTACT and RSTDAA * as there may not be any connected devices responding * to these CCCs. */ if (i3c_ccc_do_rstact_all(dev, I3C_CCC_RSTACT_RESET_WHOLE_TARGET) != 0) { /* * Reset Whole Target support is not required so * if there is any NACK, we want to at least reset * the I3C peripheral of targets. */ LOG_DBG("Broadcast RSTACT (whole target) was NACK."); if (i3c_ccc_do_rstact_all(dev, I3C_CCC_RSTACT_PERIPHERAL_ONLY) != 0) { LOG_DBG("Broadcast RSTACT (peripehral) was NACK."); } } #endif if (i3c_ccc_do_rstdaa_all(dev) != 0) { LOG_DBG("Broadcast RSTDAA was NACK."); } /* * Disable all events from targets to avoid them * interfering with bus initialization, * especially during DAA. */ i3c_events.events = I3C_CCC_EVT_ALL; ret = i3c_ccc_do_events_all_set(dev, false, &i3c_events); if (ret != 0) { LOG_DBG("Broadcast DISEC was NACK."); } /* * Set static addresses as dynamic addresses. */ ret = i3c_bus_setdasa(dev, dev_list, &need_daa); if (ret != 0) { goto err_out; } /* * Perform Dynamic Address Assignment if needed. */ if (need_daa) { ret = i3c_do_daa(dev); if (ret != 0) { /* * Spec says to try once more * if DAA fails the first time. */ ret = i3c_do_daa(dev); if (ret != 0) { /* * Failure to finish dynamic address assignment * is not the end of world... hopefully. * Continue on so the devices already have * addresses can still function. */ LOG_ERR("DAA was not successful."); } } } /* * Loop through the registered I3C devices to retrieve * basic target information. */ for (i = 0; i < dev_list->num_i3c; i++) { struct i3c_device_desc *desc = &dev_list->i3c[i]; if (desc->dynamic_addr == 0U) { continue; } ret = i3c_device_basic_info_get(desc); if (ret != 0) { LOG_ERR("Error getting basic device info for 0x%02x", desc->static_addr); } else { LOG_DBG("Target 0x%02x, BCR 0x%02x, DCR 0x%02x, MRL %d, MWL %d, IBI %d", desc->dynamic_addr, desc->bcr, desc->dcr, desc->data_length.mrl, desc->data_length.mwl, desc->data_length.max_ibi); } } /* * Only re-enable Hot-Join from targets. * Target interrupts will be enabled when IBI is enabled. * And transferring controller role is not supported so not need to * enable the event. */ i3c_events.events = I3C_CCC_EVT_HJ; ret = i3c_ccc_do_events_all_set(dev, true, &i3c_events); if (ret != 0) { LOG_DBG("Broadcast ENEC was NACK."); } err_out: return ret; } ```
/content/code_sandbox/drivers/i3c/i3c_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,996
```unknown # module = I3C_CADENCE module-str = i3c-cadence source "subsys/logging/Kconfig.template.log_config" config I3C_CADENCE bool "Cadence I3C driver" select I3C_IBI_WORKQUEUE if I3C_USE_IBI depends on DT_HAS_CDNS_I3C_ENABLED default y help Enable Cadence I3C driver. ```
/content/code_sandbox/drivers/i3c/Kconfig.cdns
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
91
```c /* * */ /* * This is not a real I3C driver. It is used to instantiate struct * devices for the "vnd,i3c" devicetree compatible used in test code. */ #define DT_DRV_COMPAT vnd_i3c #include <zephyr/drivers/i3c.h> #include <zephyr/device.h> #include <zephyr/kernel.h> static int vnd_i3c_configure(const struct device *dev, enum i3c_config_type type, void *config) { return -ENOTSUP; } static int vnd_i3c_config_get(const struct device *dev, enum i3c_config_type type, void *config) { return -ENOTSUP; } static int vnd_i3c_recover_bus(const struct device *dev) { return -ENOTSUP; } static const struct i3c_driver_api vnd_i3c_api = { .configure = vnd_i3c_configure, .config_get = vnd_i3c_config_get, .recover_bus = vnd_i3c_recover_bus, }; #define VND_I3C_INIT(n) \ DEVICE_DT_INST_DEFINE(n, NULL, NULL, NULL, NULL, \ POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEVICE, \ &vnd_i3c_api); DT_INST_FOREACH_STATUS_OKAY(VND_I3C_INIT) ```
/content/code_sandbox/drivers/i3c/i3c_test.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
294
```c /* * */ #include <zephyr/drivers/i3c.h> #include <zephyr/shell/shell.h> #include <stdlib.h> #include <string.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/util.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(i3c_shell, CONFIG_LOG_DEFAULT_LEVEL); #define MAX_BYTES_FOR_REGISTER_INDEX 4 #define ARGV_DEV 1 #define ARGV_TDEV 2 #define ARGV_REG 3 /* Maximum bytes we can write or read at once */ #define MAX_I3C_BYTES 16 struct i3c_ctrl { const struct device *dev; const union shell_cmd_entry *i3c_attached_dev_subcmd; const union shell_cmd_entry *i3c_list_dev_subcmd; }; #define I3C_ATTACHED_DEV_GET_FN(node_id) \ static void node_id##cmd_i3c_attached_get(size_t idx, struct shell_static_entry *entry); \ \ SHELL_DYNAMIC_CMD_CREATE(node_id##sub_i3c_attached, node_id##cmd_i3c_attached_get); \ \ static void node_id##cmd_i3c_attached_get(size_t idx, struct shell_static_entry *entry) \ { \ const struct device *dev = DEVICE_DT_GET(node_id); \ struct i3c_driver_data *data; \ sys_snode_t *node; \ size_t cnt = 0; \ \ entry->syntax = NULL; \ entry->handler = NULL; \ entry->subcmd = NULL; \ entry->help = NULL; \ \ data = (struct i3c_driver_data *)dev->data; \ if (!sys_slist_is_empty(&data->attached_dev.devices.i3c)) { \ SYS_SLIST_FOR_EACH_NODE(&data->attached_dev.devices.i3c, node) { \ if (cnt == idx) { \ struct i3c_device_desc *desc = \ CONTAINER_OF(node, struct i3c_device_desc, node); \ entry->syntax = desc->dev->name; \ return; \ } \ cnt++; \ } \ } \ } #define I3C_LIST_DEV_GET_FN(node_id) \ static void node_id##cmd_i3c_list_get(size_t idx, struct shell_static_entry *entry); \ \ SHELL_DYNAMIC_CMD_CREATE(node_id##sub_i3c_list, node_id##cmd_i3c_list_get); \ \ static void node_id##cmd_i3c_list_get(size_t idx, struct shell_static_entry *entry) \ { \ const struct device *dev = DEVICE_DT_GET(node_id); \ struct i3c_driver_config *config; \ \ entry->syntax = NULL; \ entry->handler = NULL; \ entry->subcmd = NULL; \ entry->help = NULL; \ \ config = (struct i3c_driver_config *)dev->config; \ if (idx < config->dev_list.num_i3c) { \ entry->syntax = config->dev_list.i3c[idx].dev->name; \ } \ } #define I3C_CTRL_FN(node_id) \ I3C_ATTACHED_DEV_GET_FN(node_id) \ I3C_LIST_DEV_GET_FN(node_id) /* zephyr-keep-sorted-start */ DT_FOREACH_STATUS_OKAY(cdns_i3c, I3C_CTRL_FN) DT_FOREACH_STATUS_OKAY(nuvoton_npcx_i3c, I3C_CTRL_FN) DT_FOREACH_STATUS_OKAY(nxp_mcux_i3c, I3C_CTRL_FN) /* zephyr-keep-sorted-stop */ #define I3C_CTRL_LIST_ENTRY(node_id) \ { \ .dev = DEVICE_DT_GET(node_id), \ .i3c_attached_dev_subcmd = &node_id##sub_i3c_attached, \ .i3c_list_dev_subcmd = &node_id##sub_i3c_list, \ }, const struct i3c_ctrl i3c_list[] = { /* zephyr-keep-sorted-start */ DT_FOREACH_STATUS_OKAY(cdns_i3c, I3C_CTRL_LIST_ENTRY) DT_FOREACH_STATUS_OKAY(nuvoton_npcx_i3c, I3C_CTRL_LIST_ENTRY) DT_FOREACH_STATUS_OKAY(nxp_mcux_i3c, I3C_CTRL_LIST_ENTRY) /* zephyr-keep-sorted-stop */ }; static int get_bytes_count_for_hex(char *arg) { int length = (strlen(arg) + 1) / 2; if (length > 1 && arg[0] == '0' && (arg[1] == 'x' || arg[1] == 'X')) { length -= 1; } return MIN(MAX_BYTES_FOR_REGISTER_INDEX, length); } static struct i3c_i2c_device_desc *get_i3c_i2c_list_desc_from_addr(const struct device *dev, uint16_t addr) { struct i3c_driver_config *config; uint8_t i; config = (struct i3c_driver_config *)dev->config; for (i = 0; i < config->dev_list.num_i2c; i++) { if (config->dev_list.i2c[i].addr == addr) { /* only look for a device with the addr */ return &config->dev_list.i2c[i]; } } return NULL; } static struct i3c_device_desc *get_i3c_list_desc_from_dev_name(const struct device *dev, const char *tdev_name) { struct i3c_driver_config *config; uint8_t i; config = (struct i3c_driver_config *)dev->config; for (i = 0; i < config->dev_list.num_i3c; i++) { if (strcmp(config->dev_list.i3c[i].dev->name, tdev_name) == 0) { /* only look for a device with the same name */ return &config->dev_list.i3c[i]; } } return NULL; } static struct i3c_device_desc *get_i3c_attached_desc_from_dev_name(const struct device *dev, const char *tdev_name) { struct i3c_driver_data *data; sys_snode_t *node; data = (struct i3c_driver_data *)dev->data; if (!sys_slist_is_empty(&data->attached_dev.devices.i3c)) { SYS_SLIST_FOR_EACH_NODE(&data->attached_dev.devices.i3c, node) { struct i3c_device_desc *desc = CONTAINER_OF(node, struct i3c_device_desc, node); /* only look for a device with the same name */ if (strcmp(desc->dev->name, tdev_name) == 0) { return desc; } } } return NULL; } /* i3c info <device> [<target>] */ static int cmd_i3c_info(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_driver_data *data; sys_snode_t *node; bool found = false; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } data = (struct i3c_driver_data *)dev->data; if (argc == 3) { /* TODO: is this needed? */ tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Target Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } if (!sys_slist_is_empty(&data->attached_dev.devices.i3c)) { SYS_SLIST_FOR_EACH_NODE(&data->attached_dev.devices.i3c, node) { struct i3c_device_desc *desc = CONTAINER_OF(node, struct i3c_device_desc, node); /* only look for a device with the same name */ if (strcmp(desc->dev->name, tdev->name) == 0) { shell_print(shell_ctx, "name: %s\n" "\tpid: 0x%012llx\n" "\tstatic_addr: 0x%02x\n" "\tdynamic_addr: 0x%02x\n" #if defined(CONFIG_I3C_USE_GROUP_ADDR) "\tgroup_addr: 0x%02x\n" #endif "\tbcr: 0x%02x\n" "\tdcr: 0x%02x\n" "\tmaxrd: 0x%02x\n" "\tmaxwr: 0x%02x\n" "\tmax_read_turnaround: 0x%08x\n" "\tmrl: 0x%04x\n" "\tmwl: 0x%04x\n" "\tmax_ibi: 0x%02x\n" "\tgetcaps: 0x%02x; 0x%02x; 0x%02x; 0x%02x", desc->dev->name, (uint64_t)desc->pid, desc->static_addr, desc->dynamic_addr, #if defined(CONFIG_I3C_USE_GROUP_ADDR) desc->group_addr, #endif desc->bcr, desc->dcr, desc->data_speed.maxrd, desc->data_speed.maxwr, desc->data_speed.max_read_turnaround, desc->data_length.mrl, desc->data_length.mwl, desc->data_length.max_ibi, desc->getcaps.getcap1, desc->getcaps.getcap2, desc->getcaps.getcap3, desc->getcaps.getcap4); found = true; break; } } } else { shell_print(shell_ctx, "I3C: No devices found."); return -ENODEV; } if (found == false) { shell_error(shell_ctx, "I3C: Target device not found."); return -ENODEV; } } else if (argc == 2) { /* This gets all "currently attached" I3C and I2C devices */ if (!sys_slist_is_empty(&data->attached_dev.devices.i3c)) { shell_print(shell_ctx, "I3C: Devices found:"); SYS_SLIST_FOR_EACH_NODE(&data->attached_dev.devices.i3c, node) { struct i3c_device_desc *desc = CONTAINER_OF(node, struct i3c_device_desc, node); shell_print(shell_ctx, "name: %s\n" "\tpid: 0x%012llx\n" "\tstatic_addr: 0x%02x\n" "\tdynamic_addr: 0x%02x\n" #if defined(CONFIG_I3C_USE_GROUP_ADDR) "\tgroup_addr: 0x%02x\n" #endif "\tbcr: 0x%02x\n" "\tdcr: 0x%02x\n" "\tmaxrd: 0x%02x\n" "\tmaxwr: 0x%02x\n" "\tmax_read_turnaround: 0x%08x\n" "\tmrl: 0x%04x\n" "\tmwl: 0x%04x\n" "\tmax_ibi: 0x%02x\n" "\tgetcaps: 0x%02x; 0x%02x; 0x%02x; 0x%02x", desc->dev->name, (uint64_t)desc->pid, desc->static_addr, desc->dynamic_addr, #if defined(CONFIG_I3C_USE_GROUP_ADDR) desc->group_addr, #endif desc->bcr, desc->dcr, desc->data_speed.maxrd, desc->data_speed.maxwr, desc->data_speed.max_read_turnaround, desc->data_length.mrl, desc->data_length.mwl, desc->data_length.max_ibi, desc->getcaps.getcap1, desc->getcaps.getcap2, desc->getcaps.getcap3, desc->getcaps.getcap4); } } else { shell_print(shell_ctx, "I3C: No devices found."); } if (!sys_slist_is_empty(&data->attached_dev.devices.i2c)) { shell_print(shell_ctx, "I2C: Devices found:"); SYS_SLIST_FOR_EACH_NODE(&data->attached_dev.devices.i2c, node) { struct i3c_i2c_device_desc *desc = CONTAINER_OF(node, struct i3c_i2c_device_desc, node); shell_print(shell_ctx, "addr: 0x%02x\n" "\tlvr: 0x%02x", desc->addr, desc->lvr); } } else { shell_print(shell_ctx, "I2C: No devices found."); } } else { shell_error(shell_ctx, "Invalid number of arguments."); } return 0; } /* i3c recover <device> */ static int cmd_i3c_recover(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; int err; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[1]); return -ENODEV; } err = i3c_recover_bus(dev); if (err) { shell_error(shell_ctx, "I3C: Bus recovery failed (err %d)", err); return err; } return 0; } static int i3c_write_from_buffer(const struct shell *shell_ctx, char *s_dev_name, char *s_tdev_name, char *s_reg_addr, char **data, uint8_t data_length) { /* This buffer must preserve 4 bytes for register address, as it is * filled using put_be32 function and we don't want to lower available * space when using 1 byte address. */ uint8_t buf[MAX_I3C_BYTES + MAX_BYTES_FOR_REGISTER_INDEX - 1]; const struct device *dev, *tdev; struct i3c_device_desc *desc; int reg_addr_bytes; int reg_addr; int ret; int i; dev = device_get_binding(s_dev_name); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", s_dev_name); return -ENODEV; } tdev = device_get_binding(s_tdev_name); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", s_tdev_name); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } reg_addr = strtol(s_reg_addr, NULL, 16); reg_addr_bytes = get_bytes_count_for_hex(s_reg_addr); sys_put_be32(reg_addr, buf); if (data_length + reg_addr_bytes > MAX_I3C_BYTES) { data_length = MAX_I3C_BYTES - reg_addr_bytes; shell_info(shell_ctx, "Too many bytes provided, limit is %d", MAX_I3C_BYTES - reg_addr_bytes); } for (i = 0; i < data_length; i++) { buf[MAX_BYTES_FOR_REGISTER_INDEX + i] = (uint8_t)strtol(data[i], NULL, 16); } ret = i3c_write(desc, buf + MAX_BYTES_FOR_REGISTER_INDEX - reg_addr_bytes, reg_addr_bytes + data_length); if (ret < 0) { shell_error(shell_ctx, "Failed to write to device: %s", tdev->name); return -EIO; } return 0; } /* i3c write <device> <dev_addr> <reg_addr> [<byte1>, ...] */ static int cmd_i3c_write(const struct shell *shell_ctx, size_t argc, char **argv) { return i3c_write_from_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_TDEV], argv[ARGV_REG], &argv[4], argc - 4); } /* i3c write_byte <device> <dev_addr> <reg_addr> <value> */ static int cmd_i3c_write_byte(const struct shell *shell_ctx, size_t argc, char **argv) { return i3c_write_from_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_TDEV], argv[ARGV_REG], &argv[4], 1); } static int i3c_read_to_buffer(const struct shell *shell_ctx, char *s_dev_name, char *s_tdev_name, char *s_reg_addr, uint8_t *buf, uint8_t buf_length) { const struct device *dev, *tdev; struct i3c_device_desc *desc; uint8_t reg_addr_buf[MAX_BYTES_FOR_REGISTER_INDEX]; int reg_addr_bytes; int reg_addr; int ret; dev = device_get_binding(s_dev_name); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", s_dev_name); return -ENODEV; } tdev = device_get_binding(s_tdev_name); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", s_dev_name); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } reg_addr = strtol(s_reg_addr, NULL, 16); reg_addr_bytes = get_bytes_count_for_hex(s_reg_addr); sys_put_be32(reg_addr, reg_addr_buf); ret = i3c_write_read(desc, reg_addr_buf + MAX_BYTES_FOR_REGISTER_INDEX - reg_addr_bytes, reg_addr_bytes, buf, buf_length); if (ret < 0) { shell_error(shell_ctx, "Failed to read from device: %s", tdev->name); return -EIO; } return 0; } /* i3c read_byte <device> <target> <reg_addr> */ static int cmd_i3c_read_byte(const struct shell *shell_ctx, size_t argc, char **argv) { uint8_t out; int ret; ret = i3c_read_to_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_TDEV], argv[ARGV_REG], &out, 1); if (ret == 0) { shell_print(shell_ctx, "Output: 0x%x", out); } return ret; } /* i3c read <device> <target> <reg_addr> [<numbytes>] */ static int cmd_i3c_read(const struct shell *shell_ctx, size_t argc, char **argv) { uint8_t buf[MAX_I3C_BYTES]; int num_bytes; int ret; if (argc > 4) { num_bytes = strtol(argv[4], NULL, 16); if (num_bytes > MAX_I3C_BYTES) { num_bytes = MAX_I3C_BYTES; } } else { num_bytes = MAX_I3C_BYTES; } ret = i3c_read_to_buffer(shell_ctx, argv[ARGV_DEV], argv[ARGV_TDEV], argv[ARGV_REG], buf, num_bytes); if (ret == 0) { shell_hexdump(shell_ctx, buf, num_bytes); } return ret; } /* i3c ccc rstdaa <device> */ static int cmd_i3c_ccc_rstdaa(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; struct i3c_driver_data *data; sys_snode_t *node; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } data = (struct i3c_driver_data *)dev->data; ret = i3c_ccc_do_rstdaa_all(dev); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC RSTDAA."); return ret; } /* reset all devices DA */ if (!sys_slist_is_empty(&data->attached_dev.devices.i3c)) { SYS_SLIST_FOR_EACH_NODE(&data->attached_dev.devices.i3c, node) { struct i3c_device_desc *desc = CONTAINER_OF(node, struct i3c_device_desc, node); desc->dynamic_addr = 0; shell_print(shell_ctx, "Reset dynamic address for device %s", desc->dev->name); } } return ret; } /* i3c ccc entdaa <device> */ static int cmd_i3c_ccc_entdaa(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } return i3c_do_daa(dev); } /* i3c ccc setdasa <device> <target> */ static int cmd_i3c_ccc_setdasa(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } ret = i3c_ccc_do_setdasa(desc); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC SETDASA."); return ret; } /* update the target's dynamic address */ desc->dynamic_addr = desc->init_dynamic_addr ? desc->init_dynamic_addr : desc->static_addr; return ret; } /* i3c ccc setnewda <device> <target> <dynamic address>*/ static int cmd_i3c_ccc_setnewda(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_driver_data *data; struct i3c_ccc_address new_da; uint8_t old_da; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } data = (struct i3c_driver_data *)dev->data; new_da.addr = strtol(argv[3], NULL, 16); /* check if the addressed is free */ if (!i3c_addr_slots_is_free(&data->attached_dev.addr_slots, new_da.addr)) { shell_error(shell_ctx, "I3C: Address 0x%02x is already in use.", new_da.addr); return -EINVAL; } ret = i3c_ccc_do_setnewda(desc, new_da); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC SETDASA."); return ret; } /* reattach device address */ old_da = desc->dynamic_addr; desc->dynamic_addr = new_da.addr; ret = i3c_reattach_i3c_device(desc, old_da); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to reattach device"); return ret; } return ret; } /* i3c ccc getbcr <device> <target> */ static int cmd_i3c_ccc_getbcr(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_getbcr bcr; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } ret = i3c_ccc_do_getbcr(desc, &bcr); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC GETBCR."); return ret; } shell_print(shell_ctx, "BCR: 0x%02x", bcr.bcr); return ret; } /* i3c ccc getdcr <device> <target> */ static int cmd_i3c_ccc_getdcr(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_getdcr dcr; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } ret = i3c_ccc_do_getdcr(desc, &dcr); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC GETDCR."); return ret; } shell_print(shell_ctx, "DCR: 0x%02x", dcr.dcr); return ret; } /* i3c ccc getpid <device> <target> */ static int cmd_i3c_ccc_getpid(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_getpid pid; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } ret = i3c_ccc_do_getpid(desc, &pid); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC GETPID."); return ret; } shell_print(shell_ctx, "PID: 0x%012llx", sys_get_be48(pid.pid)); return ret; } /* i3c ccc getmrl <device> <target> */ static int cmd_i3c_ccc_getmrl(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_mrl mrl; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } ret = i3c_ccc_do_getmrl(desc, &mrl); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC GETMRL."); return ret; } if (desc->bcr & I3C_BCR_IBI_PAYLOAD_HAS_DATA_BYTE) { shell_print(shell_ctx, "MRL: 0x%04x; IBI Length:0x%02x", mrl.len, mrl.ibi_len); } else { shell_print(shell_ctx, "MRL: 0x%04x", mrl.len); } return ret; } /* i3c ccc getmwl <device> <target> */ static int cmd_i3c_ccc_getmwl(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_mwl mwl; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } ret = i3c_ccc_do_getmwl(desc, &mwl); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC GETMWL."); return ret; } shell_print(shell_ctx, "MWL: 0x%04x", mwl.len); return ret; } /* i3c ccc setmrl <device> <target> <max read length> [<max ibi length>] */ static int cmd_i3c_ccc_setmrl(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_mrl mrl; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } /* IBI length is required if the ibi payload bit is set */ if ((desc->bcr & I3C_BCR_IBI_PAYLOAD_HAS_DATA_BYTE) && (argc < 4)) { shell_error(shell_ctx, "I3C: Missing IBI length."); return -EINVAL; } mrl.len = strtol(argv[3], NULL, 16); if (argc > 3) { mrl.ibi_len = strtol(argv[4], NULL, 16); } ret = i3c_ccc_do_setmrl(desc, &mrl); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC SETMRL."); return ret; } return ret; } /* i3c ccc setmwl <device> <target> <max write length> */ static int cmd_i3c_ccc_setmwl(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_mwl mwl; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } mwl.len = strtol(argv[3], NULL, 16); ret = i3c_ccc_do_setmwl(desc, &mwl); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC SETMWL."); return ret; } return ret; } /* i3c ccc setmrl_bc <device> <max read length> [<max ibi length>] */ static int cmd_i3c_ccc_setmrl_bc(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; struct i3c_ccc_mrl mrl; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } mrl.len = strtol(argv[2], NULL, 16); if (argc > 3) { mrl.ibi_len = strtol(argv[3], NULL, 16); } ret = i3c_ccc_do_setmrl_all(dev, &mrl, argc > 3); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC SETMRL BC."); return ret; } return ret; } /* i3c ccc setmwl_bc <device> <max write length> */ static int cmd_i3c_ccc_setmwl_bc(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; struct i3c_ccc_mwl mwl; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } mwl.len = strtol(argv[3], NULL, 16); ret = i3c_ccc_do_setmwl_all(dev, &mwl); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC SETMWL BC."); return ret; } return ret; } /* i3c ccc rstact_bc <device> <defining byte> */ static int cmd_i3c_ccc_rstact_bc(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; enum i3c_ccc_rstact_defining_byte action; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } action = strtol(argv[2], NULL, 16); ret = i3c_ccc_do_rstact_all(dev, action); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC RSTACT BC."); return ret; } return ret; } /* i3c ccc enec_bc <device> <defining byte> */ static int cmd_i3c_ccc_enec_bc(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; struct i3c_ccc_events events; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } events.events = strtol(argv[2], NULL, 16); ret = i3c_ccc_do_events_all_set(dev, true, &events); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC ENEC BC."); return ret; } return ret; } /* i3c ccc disec_bc <device> <defining byte> */ static int cmd_i3c_ccc_disec_bc(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; struct i3c_ccc_events events; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } events.events = strtol(argv[2], NULL, 16); ret = i3c_ccc_do_events_all_set(dev, false, &events); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC ENEC BC."); return ret; } return ret; } /* i3c ccc enec <device> <target> <defining byte> */ static int cmd_i3c_ccc_enec(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_events events; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } events.events = strtol(argv[3], NULL, 16); ret = i3c_ccc_do_events_set(desc, true, &events); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC ENEC BC."); return ret; } return ret; } /* i3c ccc disec <device> <target> <defining byte> */ static int cmd_i3c_ccc_disec(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; struct i3c_ccc_events events; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } events.events = strtol(argv[3], NULL, 16); ret = i3c_ccc_do_events_set(desc, false, &events); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC ENEC BC."); return ret; } return ret; } /* i3c ccc getstatus <device> <target> [<defining byte>] */ static int cmd_i3c_ccc_getstatus(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; union i3c_ccc_getstatus status; enum i3c_ccc_getstatus_fmt fmt; enum i3c_ccc_getstatus_defbyte defbyte = GETSTATUS_FORMAT_2_INVALID; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } /* If there is a defining byte, then it is assumed to be Format 2*/ if (argc > 3) { fmt = GETSTATUS_FORMAT_2; defbyte = strtol(argv[3], NULL, 16); if (defbyte != GETSTATUS_FORMAT_2_TGTSTAT || defbyte != GETSTATUS_FORMAT_2_PRECR) { shell_error(shell_ctx, "Invalid defining byte."); return -EINVAL; } } else { fmt = GETSTATUS_FORMAT_1; } ret = i3c_ccc_do_getstatus(desc, &status, fmt, defbyte); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC GETSTATUS."); return ret; } if (fmt == GETSTATUS_FORMAT_2) { if (defbyte == GETSTATUS_FORMAT_2_TGTSTAT) { shell_print(shell_ctx, "TGTSTAT: 0x%04x", status.fmt2.tgtstat); } else if (defbyte == GETSTATUS_FORMAT_2_PRECR) { shell_print(shell_ctx, "PRECR: 0x%04x", status.fmt2.precr); } } else { shell_print(shell_ctx, "Status: 0x%04x", status.fmt1.status); } return ret; } /* i3c ccc getcaps <device> <target> [<defining byte>] */ static int cmd_i3c_ccc_getcaps(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; union i3c_ccc_getcaps caps; enum i3c_ccc_getcaps_fmt fmt; enum i3c_ccc_getcaps_defbyte defbyte = GETCAPS_FORMAT_2_INVALID; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } if (!(desc->bcr & I3C_BCR_ADV_CAPABILITIES)) { shell_error(shell_ctx, "I3C: Device %s does not support advanced capabilities", desc->dev->name); return -ENOTSUP; } /* If there is a defining byte, then it is assumed to be Format 2 */ if (argc > 3) { fmt = GETCAPS_FORMAT_2; defbyte = strtol(argv[3], NULL, 16); if (defbyte != GETCAPS_FORMAT_2_TGTCAPS || defbyte != GETCAPS_FORMAT_2_TESTPAT || defbyte != GETCAPS_FORMAT_2_CRCAPS || defbyte != GETCAPS_FORMAT_2_VTCAPS || defbyte != GETCAPS_FORMAT_2_DBGCAPS) { shell_error(shell_ctx, "Invalid defining byte."); return -EINVAL; } } else { fmt = GETCAPS_FORMAT_1; } ret = i3c_ccc_do_getcaps(desc, &caps, fmt, defbyte); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to send CCC GETCAPS."); return ret; } if (fmt == GETCAPS_FORMAT_2) { if (defbyte == GETCAPS_FORMAT_2_TGTCAPS) { shell_print(shell_ctx, "TGTCAPS: 0x%02x; 0x%02x; 0x%02x; 0x%02x", caps.fmt2.tgtcaps[0], caps.fmt2.tgtcaps[1], caps.fmt2.tgtcaps[2], caps.fmt2.tgtcaps[3]); } else if (defbyte == GETCAPS_FORMAT_2_TESTPAT) { shell_print(shell_ctx, "TESTPAT: 0x%08x", caps.fmt2.testpat); } else if (defbyte == GETCAPS_FORMAT_2_CRCAPS) { shell_print(shell_ctx, "CRCAPS: 0x%02x; 0x%02x", caps.fmt2.crcaps[0], caps.fmt2.crcaps[1]); } else if (defbyte == GETCAPS_FORMAT_2_VTCAPS) { shell_print(shell_ctx, "VTCAPS: 0x%02x; 0x%02x", caps.fmt2.vtcaps[0], caps.fmt2.vtcaps[1]); } } else { shell_print(shell_ctx, "GETCAPS: 0x%02x; 0x%02x; 0x%02x; 0x%02x", caps.fmt1.getcaps[0], caps.fmt1.getcaps[1], caps.fmt1.getcaps[2], caps.fmt1.getcaps[3]); } return ret; } static int cmd_i3c_attach(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_list_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } ret = i3c_attach_i3c_device(desc); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to attach device %s.", tdev->name); } return ret; } static int cmd_i3c_reattach(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; uint8_t old_dyn_addr = 0; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } if (argc > 2) { old_dyn_addr = strtol(argv[2], NULL, 16); } ret = i3c_reattach_i3c_device(desc, old_dyn_addr); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to reattach device %s.", tdev->name); } return ret; } static int cmd_i3c_detach(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev, *tdev; struct i3c_device_desc *desc; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } tdev = device_get_binding(argv[ARGV_TDEV]); if (!tdev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_TDEV]); return -ENODEV; } desc = get_i3c_attached_desc_from_dev_name(dev, tdev->name); if (!desc) { shell_error(shell_ctx, "I3C: Device %s not attached to bus.", tdev->name); return -ENODEV; } ret = i3c_detach_i3c_device(desc); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to detach device %s.", tdev->name); } return ret; } static int cmd_i3c_i2c_attach(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; struct i3c_i2c_device_desc *desc; uint16_t addr = 0; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[2], NULL, 16); desc = get_i3c_i2c_list_desc_from_addr(dev, addr); if (!desc) { shell_error(shell_ctx, "I3C: I2C addr 0x%02x not listed with the bus.", addr); return -ENODEV; } ret = i3c_attach_i2c_device(desc); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to attach I2C addr 0x%02x.", addr); } return ret; } static int cmd_i3c_i2c_detach(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; struct i3c_i2c_device_desc *desc; uint16_t addr = 0; int ret; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } addr = strtol(argv[2], NULL, 16); desc = get_i3c_i2c_list_desc_from_addr(dev, addr); if (!desc) { shell_error(shell_ctx, "I3C: I2C addr 0x%02x not listed with the bus.", addr); return -ENODEV; } ret = i3c_detach_i2c_device(desc); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to detach I2C addr 0x%02x.", addr); } return ret; } /* * This is a workaround command to perform an I2C Scan which is not as * simple on an I3C bus as it is with the I2C Shell. * * This will print "I3" if an address is already assigned for an I3C * device and it will print "I2" if an address is already assigned for * an I2C device. It will print RS, if the address is reserved according * to section 5.1.2.2.5 I3C Target Address Restrictions in I3C v1.1.1. * * This sends I2C messages without any data (i.e. stop condition after * sending just the address). If there is an ACK for the address, it * is assumed there is a device present. * * WARNING: As there is no standard I2C detection command, this code * uses arbitrary SMBus commands (namely SMBus quick write and SMBus * receive byte) to probe for devices. This operation can confuse * your I2C bus, cause data loss, and is known to corrupt the Atmel * AT24RF08 EEPROM found on many IBM Thinkpad laptops. * * path_to_url */ /* i3c i2c_scan <device> */ static int cmd_i3c_i2c_scan(const struct shell *shell_ctx, size_t argc, char **argv) { const struct device *dev; struct i3c_driver_data *data; enum i3c_addr_slot_status slot; uint8_t cnt = 0, first = 0x04, last = 0x77; dev = device_get_binding(argv[ARGV_DEV]); if (!dev) { shell_error(shell_ctx, "I3C: Device driver %s not found.", argv[ARGV_DEV]); return -ENODEV; } data = (struct i3c_driver_data *)dev->data; shell_print(shell_ctx, " 0 1 2 3 4 5 6 7 8 9 a b c d e f"); for (uint8_t i = 0; i <= last; i += 16) { shell_fprintf(shell_ctx, SHELL_NORMAL, "%02x: ", i); for (uint8_t j = 0; j < 16; j++) { if (i + j < first || i + j > last) { shell_fprintf(shell_ctx, SHELL_NORMAL, " "); continue; } slot = i3c_addr_slots_status(&data->attached_dev.addr_slots, i + j); if (slot == I3C_ADDR_SLOT_STATUS_FREE) { struct i2c_msg msgs[1]; uint8_t dst; int ret; struct i3c_i2c_device_desc desc = { .bus = dev, .addr = i + j, .lvr = 0x00, }; ret = i3c_attach_i2c_device(&desc); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to attach I2C addr 0x%02x.", desc.addr); } /* Send the address to read from */ msgs[0].buf = &dst; msgs[0].len = 0U; msgs[0].flags = I2C_MSG_WRITE | I2C_MSG_STOP; if (i2c_transfer(dev, &msgs[0], 1, i + j) == 0) { shell_fprintf(shell_ctx, SHELL_NORMAL, "%02x ", i + j); ++cnt; } else { shell_fprintf(shell_ctx, SHELL_NORMAL, "-- "); } ret = i3c_detach_i2c_device(&desc); if (ret < 0) { shell_error(shell_ctx, "I3C: unable to detach I2C addr 0x%02x.", desc.addr); } } else if (slot == I3C_ADDR_SLOT_STATUS_I3C_DEV) { shell_fprintf(shell_ctx, SHELL_NORMAL, "I3 "); } else if (slot == I3C_ADDR_SLOT_STATUS_I2C_DEV) { shell_fprintf(shell_ctx, SHELL_NORMAL, "I2 "); } else if (slot == I3C_ADDR_SLOT_STATUS_RSVD) { shell_fprintf(shell_ctx, SHELL_NORMAL, "RS "); } else { shell_fprintf(shell_ctx, SHELL_NORMAL, "-- "); } } shell_print(shell_ctx, ""); } shell_print(shell_ctx, "%u additional devices found on %s", cnt, argv[ARGV_DEV]); return 0; } static void i3c_device_list_target_name_get(size_t idx, struct shell_static_entry *entry) { if (idx < ARRAY_SIZE(i3c_list)) { entry->syntax = i3c_list[idx].dev->name; entry->handler = NULL; entry->help = NULL; entry->subcmd = i3c_list[idx].i3c_list_dev_subcmd; } else { entry->syntax = NULL; } } SHELL_DYNAMIC_CMD_CREATE(dsub_i3c_device_list_name, i3c_device_list_target_name_get); static void i3c_device_attached_target_name_get(size_t idx, struct shell_static_entry *entry) { if (idx < ARRAY_SIZE(i3c_list)) { entry->syntax = i3c_list[idx].dev->name; entry->handler = NULL; entry->help = NULL; entry->subcmd = i3c_list[idx].i3c_attached_dev_subcmd; } else { entry->syntax = NULL; } } SHELL_DYNAMIC_CMD_CREATE(dsub_i3c_device_attached_name, i3c_device_attached_target_name_get); static void i3c_device_name_get(size_t idx, struct shell_static_entry *entry) { if (idx < ARRAY_SIZE(i3c_list)) { entry->syntax = i3c_list[idx].dev->name; entry->handler = NULL; entry->help = NULL; entry->subcmd = NULL; } else { entry->syntax = NULL; } } SHELL_DYNAMIC_CMD_CREATE(dsub_i3c_device_name, i3c_device_name_get); /* L2 I3C CCC Shell Commands*/ SHELL_STATIC_SUBCMD_SET_CREATE( sub_i3c_ccc_cmds, SHELL_CMD_ARG(rstdaa, &dsub_i3c_device_name, "Send CCC RSTDAA\n" "Usage: ccc rstdaa <device>", cmd_i3c_ccc_rstdaa, 2, 0), SHELL_CMD_ARG(entdaa, &dsub_i3c_device_name, "Send CCC ENTDAA\n" "Usage: ccc entdaa <device>", cmd_i3c_ccc_entdaa, 2, 0), SHELL_CMD_ARG(setdasa, &dsub_i3c_device_attached_name, "Send CCC SETDASA\n" "Usage: ccc setdasa <device> <target>", cmd_i3c_ccc_setdasa, 3, 0), SHELL_CMD_ARG(setnewda, &dsub_i3c_device_attached_name, "Send CCC SETNEWDA\n" "Usage: ccc setnewda <device> <target> <dynamic address>", cmd_i3c_ccc_setnewda, 4, 0), SHELL_CMD_ARG(getbcr, &dsub_i3c_device_attached_name, "Send CCC GETBCR\n" "Usage: ccc getbcr <device> <target>", cmd_i3c_ccc_getbcr, 3, 0), SHELL_CMD_ARG(getdcr, &dsub_i3c_device_attached_name, "Send CCC GETDCR\n" "Usage: ccc getdcr <device> <target>", cmd_i3c_ccc_getdcr, 3, 0), SHELL_CMD_ARG(getpid, &dsub_i3c_device_attached_name, "Send CCC GETPID\n" "Usage: ccc getpid <device> <target>", cmd_i3c_ccc_getpid, 3, 0), SHELL_CMD_ARG(getmrl, &dsub_i3c_device_attached_name, "Send CCC GETMRL\n" "Usage: ccc getmrl <device> <target>", cmd_i3c_ccc_getmrl, 3, 0), SHELL_CMD_ARG(getmwl, &dsub_i3c_device_attached_name, "Send CCC GETMWL\n" "Usage: ccc getmwl <device> <target>", cmd_i3c_ccc_getmwl, 3, 0), SHELL_CMD_ARG(setmrl, &dsub_i3c_device_attached_name, "Send CCC SETMRL\n" "Usage: ccc setmrl <device> <target> <max read length> [<max ibi length>]", cmd_i3c_ccc_setmrl, 4, 1), SHELL_CMD_ARG(setmwl, &dsub_i3c_device_attached_name, "Send CCC SETMWL\n" "Usage: ccc setmwl <device> <target> <max write length>", cmd_i3c_ccc_setmwl, 4, 0), SHELL_CMD_ARG(setmrl_bc, &dsub_i3c_device_name, "Send CCC SETMRL BC\n" "Usage: ccc setmrl_bc <device> <max read length> [<max ibi length>]", cmd_i3c_ccc_setmrl_bc, 3, 1), SHELL_CMD_ARG(setmwl_bc, &dsub_i3c_device_name, "Send CCC SETMWL BC\n" "Usage: ccc setmwl_bc <device> <max write length>", cmd_i3c_ccc_setmwl_bc, 3, 0), SHELL_CMD_ARG(rstact_bc, &dsub_i3c_device_name, "Send CCC RSTACT BC\n" "Usage: ccc rstact_bc <device> <defining byte>", cmd_i3c_ccc_rstact_bc, 3, 0), SHELL_CMD_ARG(enec_bc, &dsub_i3c_device_name, "Send CCC ENEC BC\n" "Usage: ccc enec_bc <device> <defining byte>", cmd_i3c_ccc_enec_bc, 3, 0), SHELL_CMD_ARG(disec_bc, &dsub_i3c_device_name, "Send CCC DISEC BC\n" "Usage: ccc disec_bc <device> <defining byte>", cmd_i3c_ccc_disec_bc, 3, 0), SHELL_CMD_ARG(enec, &dsub_i3c_device_attached_name, "Send CCC ENEC\n" "Usage: ccc enec <device> <target> <defining byte>", cmd_i3c_ccc_enec, 4, 0), SHELL_CMD_ARG(disec, &dsub_i3c_device_attached_name, "Send CCC DISEC\n" "Usage: ccc disec <device> <target> <defining byte>", cmd_i3c_ccc_disec, 4, 0), SHELL_CMD_ARG(getstatus, &dsub_i3c_device_attached_name, "Send CCC GETSTATUS\n" "Usage: ccc getstatus <device> <target> [<defining byte>]", cmd_i3c_ccc_getstatus, 3, 1), SHELL_CMD_ARG(getcaps, &dsub_i3c_device_attached_name, "Send CCC GETCAPS\n" "Usage: ccc getcaps <device> <target> [<defining byte>]", cmd_i3c_ccc_getcaps, 3, 1), SHELL_SUBCMD_SET_END /* Array terminated. */ ); /* L1 I3C Shell Commands*/ SHELL_STATIC_SUBCMD_SET_CREATE( sub_i3c_cmds, SHELL_CMD_ARG(info, &dsub_i3c_device_attached_name, "Get I3C device info\n" "Usage: info <device> [<target>]", cmd_i3c_info, 2, 1), SHELL_CMD_ARG(recover, &dsub_i3c_device_name, "Recover I3C bus\n" "Usage: recover <device>", cmd_i3c_recover, 2, 0), SHELL_CMD_ARG(read, &dsub_i3c_device_attached_name, "Read bytes from an I3C device\n" "Usage: read <device> <target> <reg> [<bytes>]", cmd_i3c_read, 4, 1), SHELL_CMD_ARG(read_byte, &dsub_i3c_device_attached_name, "Read a byte from an I3C device\n" "Usage: read_byte <device> <target> <reg>", cmd_i3c_read_byte, 4, 0), SHELL_CMD_ARG(write, &dsub_i3c_device_attached_name, "Write bytes to an I3C device\n" "Usage: write <device> <target> <reg> [<byte1>, ...]", cmd_i3c_write, 4, MAX_I3C_BYTES), SHELL_CMD_ARG(write_byte, &dsub_i3c_device_attached_name, "Write a byte to an I3C device\n" "Usage: write_byte <device> <target> <reg> <value>", cmd_i3c_write_byte, 5, 0), SHELL_CMD_ARG(i3c_attach, &dsub_i3c_device_list_name, "Attach I3C device from the bus\n" "Usage: i3c_attach <device> <target>", cmd_i3c_attach, 3, 0), SHELL_CMD_ARG(i3c_reattach, &dsub_i3c_device_attached_name, "Reattach I3C device from the bus\n" "Usage: i3c_reattach <device> <target> [<old dynamic address>]", cmd_i3c_reattach, 3, 1), SHELL_CMD_ARG(i3c_detach, &dsub_i3c_device_attached_name, "Detach I3C device from the bus\n" "Usage: i3c_detach <device> <target>", cmd_i3c_detach, 3, 0), SHELL_CMD_ARG(i2c_attach, &dsub_i3c_device_name, "Attach I2C device from the bus\n" "Usage: i2c_attach <device> <addr>", cmd_i3c_i2c_attach, 3, 0), SHELL_CMD_ARG(i2c_detach, &dsub_i3c_device_name, "Detach I2C device from the bus\n" "Usage: i2c_detach <device> <addr>", cmd_i3c_i2c_detach, 3, 0), SHELL_CMD_ARG(i2c_scan, &dsub_i3c_device_name, "Scan I2C devices\n" "Usage: i2c_scan <device>", cmd_i3c_i2c_scan, 2, 0), SHELL_CMD_ARG(ccc, &sub_i3c_ccc_cmds, "Send I3C CCC\n" "Usage: ccc <sub cmd>", NULL, 3, 0), SHELL_SUBCMD_SET_END /* Array terminated. */ ); SHELL_CMD_REGISTER(i3c, &sub_i3c_cmds, "I3C commands", NULL); ```
/content/code_sandbox/drivers/i3c/i3c_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
15,628
```c /* * */ #include <zephyr/drivers/i3c.h> #include <string.h> #include <zephyr/internal/syscall_handler.h> static inline int z_vrfy_i3c_do_ccc(const struct device *dev, struct i3c_ccc_payload *payload) { K_OOPS(K_SYSCALL_DRIVER_I3C(dev, do_ccc)); K_OOPS(K_SYSCALL_MEMORY_READ(payload, sizeof(*payload))); K_OOPS(K_SYSCALL_MEMORY_WRITE(payload, sizeof(*payload))); if (payload->ccc.data != NULL) { K_OOPS(K_SYSCALL_MEMORY_ARRAY_READ(payload->ccc.data, payload->ccc.data_len, sizeof(*payload->ccc.data))); K_OOPS(K_SYSCALL_MEMORY_ARRAY_WRITE(payload->ccc.data, payload->ccc.data_len, sizeof(*payload->ccc.data))); } if (payload->targets.payloads != NULL) { K_OOPS(K_SYSCALL_MEMORY_ARRAY_READ(payload->targets.payloads, payload->targets.num_targets, sizeof(*payload->targets.payloads))); K_OOPS(K_SYSCALL_MEMORY_ARRAY_WRITE(payload->targets.payloads, payload->targets.num_targets, sizeof(*payload->targets.payloads))); } return z_impl_i3c_do_ccc(dev, payload); } #include <zephyr/syscalls/i3c_do_ccc_mrsh.c> static uint32_t copy_i3c_msgs_and_transfer(struct i3c_device_desc *target, const struct i3c_msg *msgs, uint8_t num_msgs) { struct i3c_msg copy[num_msgs]; uint8_t i; /* Use a local copy to avoid switcheroo attacks. */ memcpy(copy, msgs, num_msgs * sizeof(*msgs)); /* Validate the buffers in each message struct. Read options require * that the target buffer be writable */ for (i = 0U; i < num_msgs; i++) { K_OOPS(K_SYSCALL_MEMORY(copy[i].buf, copy[i].len, copy[i].flags & I3C_MSG_READ)); } return z_impl_i3c_transfer(target, copy, num_msgs); } static inline int z_vrfy_i3c_transfer(struct i3c_device_desc *target, struct i3c_msg *msgs, uint8_t num_msgs) { K_OOPS(K_SYSCALL_MEMORY_READ(target, sizeof(*target))); K_OOPS(K_SYSCALL_OBJ(target->bus, K_OBJ_DRIVER_I3C)); /* copy_msgs_and_transfer() will allocate a copy on the stack using * VLA, so ensure this won't blow the stack. Most functions defined * in i2c.h use only a handful of messages, so up to 32 messages * should be more than sufficient. */ K_OOPS(K_SYSCALL_VERIFY(num_msgs >= 1 && num_msgs < 32)); /* We need to be able to read the overall array of messages */ K_OOPS(K_SYSCALL_MEMORY_ARRAY_READ(msgs, num_msgs, sizeof(struct i3c_msg))); return copy_i3c_msgs_and_transfer((struct i3c_device_desc *)target, (struct i3c_msg *)msgs, (uint8_t)num_msgs); } #include <zephyr/syscalls/i3c_transfer_mrsh.c> ```
/content/code_sandbox/drivers/i3c/i3c_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
696
```unknown # NPCX I3C driver configuration options DT_I3C_NPCX := $(dt_nodelabel_path,i3c0) config I3C_NPCX bool "Nuvoton NPCX embedded controller (EC) I3C driver" depends on DT_HAS_NUVOTON_NPCX_I3C_ENABLED select RESET select I3C_IBI_WORKQUEUE if I3C_USE_IBI default y help This option enables the I3C driver for NPCX family of processors. Say y if you wish to use I3C channels on NPCX MCU. # Expose this option when the 'reg' property includes the MDMA base address # as the second group in the phandle-array. # i.e. I3C node example in dtsi file. # i3c0: i3c@400f0000 { # .... # /* reg[0]: I3C_1 register, reg[1]: MDMA5 register */ # reg-names = "i3c1", "mdma5"; # reg = <0x400f0000 0x2000>, # <0x40011500 0x100>; # .... # } config I3C_NPCX_DMA bool "Nuvoton NPCX embedded controller (EC) serial driver DMA support" depends on I3C_NPCX && "$(dt_node_reg_addr_hex,$(DT_I3C_NPCX),1)" != 0 default y help Enable support for npcx I3C DMA mode. ```
/content/code_sandbox/drivers/i3c/Kconfig.npcx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
333
```unknown # I3C configuration options # # menuconfig I3C bool "Improved Inter-Integrated Circuit (I3C) bus drivers" help Enable I3C Driver Configuration if I3C module = I3C module-str = i3c source "subsys/logging/Kconfig.template.log_config" config I3C_SHELL bool "I3C Shell" depends on SHELL help Enable I3C Shell. The I3C shell supports info, bus recovery, CCC, I3C read and write operations. config I3C_USE_GROUP_ADDR bool "Use Group Addresses" default y help Enable this to use group addresses if supported by the controllers and target devices. Says Y if unsure. menuconfig I3C_USE_IBI bool "Use In-Band Interrupt (IBI)" default y help Enable this to use In-Band Interrupt (IBI). Says Y if unsure. if I3C_USE_IBI config I3C_IBI_MAX_PAYLOAD_SIZE int "Maximum IBI Payload Size" default 16 help Maxmium IBI payload size. menuconfig I3C_IBI_WORKQUEUE bool "Use IBI Workqueue" help Use global workqueue for processing IBI. This is enabled by driver if needed. if I3C_IBI_WORKQUEUE config I3C_IBI_WORKQUEUE_STACK_SIZE int "IBI workqueue stack size" default 1024 help Stack size for the IBI global workqueue. config I3C_IBI_WORKQUEUE_PRIORITY int "IBI workqueue thread priority" default -1 help Thread priority for the IBI global workqueue. config I3C_IBI_WORKQUEUE_LENGTH int "IBI workqueue queue length" default 8 help Define the maximum number of IBIs that can be queued in the workqueue. config I3C_IBI_WORKQUEUE_VERBOSE_DEBUG bool "Verbose debug messages for IBI workqueue" help This turns on verbose debug for the IBI workqueue when logging level is set to DEBUG, and prints the IBI payload. endif # I3C_IBI_WORKQUEUE endif # I3C_USE_IBI comment "Initialization Priority" config I3C_CONTROLLER_INIT_PRIORITY int "I3C Controller Init Priority" # Default is just after CONFIG_KERNEL_INIT_PRIORITY_DEVICE default 50 help This is for setting up I3C controller device driver instance and also to perform bus initialization (e.g. dynamic address assignment). Note that this needs to be done before the device driver instances of the connected I2C and I3C devices start initializing those devices. This is because some devices may not be addressable until addresses are assigned by the controller. config I3C_INIT_RSTACT bool "Perform Reset Action During Bus Initialization" default y help This determines whether the bus initialization routine sends a reset action command to I3C targets. comment "Device Drivers" rsource "Kconfig.nxp" rsource "Kconfig.cdns" rsource "Kconfig.npcx" rsource "Kconfig.test" endif # I3C ```
/content/code_sandbox/drivers/i3c/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
702
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/init.h> #include <zephyr/drivers/i3c.h> #include <zephyr/logging/log.h> LOG_MODULE_DECLARE(i3c, CONFIG_I3C_LOG_LEVEL); /* Statically allocated array of IBI work item nodes */ static struct i3c_ibi_work i3c_ibi_work_nodes[CONFIG_I3C_IBI_WORKQUEUE_LENGTH]; static K_KERNEL_STACK_DEFINE(i3c_ibi_work_q_stack, CONFIG_I3C_IBI_WORKQUEUE_STACK_SIZE); /* IBI workqueue */ static struct k_work_q i3c_ibi_work_q; static sys_slist_t i3c_ibi_work_nodes_free; static inline int ibi_work_submit(struct i3c_ibi_work *ibi_node) { return k_work_submit_to_queue(&i3c_ibi_work_q, &ibi_node->work); } int i3c_ibi_work_enqueue(struct i3c_ibi_work *ibi_work) { sys_snode_t *node; struct i3c_ibi_work *ibi_node; int ret; node = sys_slist_get(&i3c_ibi_work_nodes_free); if (node == NULL) { ret = -ENOMEM; goto out; } ibi_node = (struct i3c_ibi_work *)node; (void)memcpy(ibi_node, ibi_work, sizeof(*ibi_node)); ret = ibi_work_submit(ibi_node); if (ret >= 0) { ret = 0; } out: return ret; } int i3c_ibi_work_enqueue_target_irq(struct i3c_device_desc *target, uint8_t *payload, size_t payload_len) { sys_snode_t *node; struct i3c_ibi_work *ibi_node; int ret; node = sys_slist_get(&i3c_ibi_work_nodes_free); if (node == NULL) { ret = -ENOMEM; goto out; } ibi_node = (struct i3c_ibi_work *)node; ibi_node->type = I3C_IBI_TARGET_INTR; ibi_node->target = target; ibi_node->payload.payload_len = payload_len; if ((payload != NULL) && (payload_len > 0U)) { (void)memcpy(&ibi_node->payload.payload[0], payload, payload_len); } ret = ibi_work_submit(ibi_node); if (ret >= 0) { ret = 0; } out: return ret; } int i3c_ibi_work_enqueue_hotjoin(const struct device *dev) { sys_snode_t *node; struct i3c_ibi_work *ibi_node; int ret; node = sys_slist_get(&i3c_ibi_work_nodes_free); if (node == NULL) { ret = -ENOMEM; goto out; } ibi_node = (struct i3c_ibi_work *)node; ibi_node->type = I3C_IBI_HOTJOIN; ibi_node->controller = dev; ibi_node->payload.payload_len = 0; ret = ibi_work_submit(ibi_node); if (ret >= 0) { ret = 0; } out: return ret; } int i3c_ibi_work_enqueue_cb(const struct device *dev, k_work_handler_t work_cb) { sys_snode_t *node; struct i3c_ibi_work *ibi_node; int ret; node = sys_slist_get(&i3c_ibi_work_nodes_free); if (node == NULL) { ret = -ENOMEM; goto out; } ibi_node = (struct i3c_ibi_work *)node; ibi_node->type = I3C_IBI_WORKQUEUE_CB; ibi_node->controller = dev; ibi_node->work_cb = work_cb; ret = ibi_work_submit(ibi_node); if (ret >= 0) { ret = 0; } out: return ret; } static void i3c_ibi_work_handler(struct k_work *work) { struct i3c_ibi_work *ibi_node = CONTAINER_OF(work, struct i3c_ibi_work, work); struct i3c_ibi_payload *payload; int ret = 0; if (IS_ENABLED(CONFIG_I3C_IBI_WORKQUEUE_VERBOSE_DEBUG) && ((uint32_t)ibi_node->type <= I3C_IBI_TYPE_MAX)) { LOG_DBG("Processing IBI work %p (type %d, len %u)", ibi_node, (int)ibi_node->type, ibi_node->payload.payload_len); if (ibi_node->payload.payload_len > 0U) { LOG_HEXDUMP_DBG(&ibi_node->payload.payload[0], ibi_node->payload.payload_len, "IBI Payload"); } } switch (ibi_node->type) { case I3C_IBI_TARGET_INTR: if (ibi_node->payload.payload_len != 0U) { payload = &ibi_node->payload; } else { payload = NULL; } ret = ibi_node->target->ibi_cb(ibi_node->target, payload); if ((ret != 0) && (ret != -EBUSY)) { LOG_ERR("IBI work %p cb returns %d", ibi_node, ret); } break; case I3C_IBI_HOTJOIN: ret = i3c_do_daa(ibi_node->controller); if ((ret != 0) && (ret != -EBUSY)) { LOG_ERR("i3c_do_daa returns %d", ret); } break; case I3C_IBI_WORKQUEUE_CB: if (ibi_node->work_cb != NULL) { ibi_node->work_cb(work); } break; case I3C_IBI_CONTROLLER_ROLE_REQUEST: /* TODO: Add support for controller role request */ __fallthrough; default: /* Unknown IBI type: do nothing */ LOG_DBG("Cannot process IBI type %d", (int)ibi_node->type); break; } if (ret == -EBUSY) { /* Retry if bus is busy. */ if (ibi_work_submit(ibi_node) < 0) { LOG_ERR("Error re-adding IBI work %p", ibi_node); } } else { /* Add the now processed node back to the free list */ sys_slist_append(&i3c_ibi_work_nodes_free, (sys_snode_t *)ibi_node); } } static int i3c_ibi_work_q_init(void) { struct k_work_queue_config cfg = { .name = "i3c_ibi_workq", .no_yield = true, }; /* Init the linked list of work item nodes */ sys_slist_init(&i3c_ibi_work_nodes_free); for (int i = 0; i < ARRAY_SIZE(i3c_ibi_work_nodes); i++) { i3c_ibi_work_nodes[i].work.handler = i3c_ibi_work_handler; sys_slist_append(&i3c_ibi_work_nodes_free, (sys_snode_t *)&i3c_ibi_work_nodes[i]); } /* Start the workqueue */ k_work_queue_start(&i3c_ibi_work_q, i3c_ibi_work_q_stack, K_KERNEL_STACK_SIZEOF(i3c_ibi_work_q_stack), CONFIG_I3C_IBI_WORKQUEUE_PRIORITY, &cfg); return 0; } SYS_INIT(i3c_ibi_work_q_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); ```
/content/code_sandbox/drivers/i3c/i3c_ibi_workq.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,648
```c /* * */ /*132*/ #define DT_DRV_COMPAT nuvoton_npcx_i3c #include <string.h> #include <zephyr/device.h> #include <zephyr/irq.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/sys_io.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/i3c.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/reset.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(npcx_i3c, CONFIG_I3C_LOG_LEVEL); #define NPCX_I3C_CHK_TIMEOUT_US 10000 /* Timeout for checking register status */ #define I3C_SCL_PP_FREQ_MAX_MHZ 12500000 #define I3C_SCL_OD_FREQ_MAX_MHZ 4170000 #define I3C_BUS_TLOW_PP_MIN_NS 24 /* T_LOW period in push-pull mode */ #define I3C_BUS_THigh_PP_MIN_NS 24 /* T_High period in push-pull mode */ #define I3C_BUS_TLOW_OD_MIN_NS 200 /* T_LOW period in open-drain mode */ #define PPBAUD_DIV_MAX (BIT(GET_FIELD_SZ(NPCX_I3C_MCONFIG_PPBAUD)) - 1) /* PPBAUD divider max */ #define DAA_TGT_INFO_SZ 0x8 /* 8 bytes = PID(6) + BCR(1) + DCR(1) */ #define BAMATCH_DIV 0x4 /* BAMATCH = APB4_CLK divided by four */ /* Default maximum time we allow for an I3C transfer */ #define I3C_TRANS_TIMEOUT_MS K_MSEC(100) #define MCLKD_FREQ_45_MHZ MHZ(45) #define I3C_STATUS_CLR_MASK \ (BIT(NPCX_I3C_MSTATUS_MCTRLDONE) | BIT(NPCX_I3C_MSTATUS_COMPLETE) | \ BIT(NPCX_I3C_MSTATUS_IBIWON) | BIT(NPCX_I3C_MSTATUS_NOWCNTLR)) #define HDR_DDR_CMD_AND_CRC_SZ_WORD 0x2 /* 2 words = Command(1 word) + CRC(1 word) */ #define HDR_RD_CMD 0x80 /* Supported I3C MCLKD frequency */ enum npcx_i3c_speed { NPCX_I3C_BUS_SPEED_45MHZ, }; /* Operation type */ enum npcx_i3c_oper_state { NPCX_I3C_IDLE, NPCX_I3C_WR, NPCX_I3C_RD, }; /* I3C timing configuration for each i3c speed */ struct npcx_i3c_timing_cfg { uint8_t ppbaud; /* Push-Pull high period */ uint8_t pplow; /* Push-Pull low period */ uint8_t odhpp; /* Open-Drain high period */ uint8_t odbaud; /* Open-Drain low period */ }; /* Recommended I3C timing values are based on MCLKD 45 MHz */ static const struct npcx_i3c_timing_cfg npcx_def_speed_cfg[] = { /* PP = 12.5 mhz, OD = 4.17 Mhz */ [NPCX_I3C_BUS_SPEED_45MHZ] = {.ppbaud = 1, .pplow = 0, .odhpp = 1, .odbaud = 4}, }; struct npcx_i3c_config { /* Common I3C Driver Config */ struct i3c_driver_config common; /* Pointer to controller registers. */ struct i3c_reg *base; /* Pointer to the clock device. */ const struct device *clock_dev; /* Reset controller */ struct reset_dt_spec reset; /* Clock control subsys related struct. */ struct npcx_clk_cfg clock_subsys; /* Reference clock to determine 1 s bus available time */ struct npcx_clk_cfg ref_clk_subsys; /* Pointer to pin control device. */ const struct pinctrl_dev_config *pincfg; /* Interrupt configuration function. */ void (*irq_config_func)(const struct device *dev); struct { uint32_t i3c_pp_scl_hz; /* I3C push pull clock frequency in Hz. */ uint32_t i3c_od_scl_hz; /* I3C open drain clock frequency in Hz. */ } clocks; #ifdef CONFIG_I3C_NPCX_DMA struct npcx_clk_cfg mdma_clk_subsys; struct mdma_reg *mdma_base; #endif }; struct npcx_i3c_data { struct i3c_driver_data common; /* Common i3c driver data */ struct k_mutex lock_mutex; /* Mutex of i3c controller */ struct k_sem sync_sem; /* Semaphore used for synchronization */ struct k_sem ibi_lock_sem; /* Semaphore used for ibi */ enum npcx_i3c_oper_state oper_state; /* controller operation state */ #ifdef CONFIG_I3C_USE_IBI struct { /* List of addresses used in the MIBIRULES register. */ uint8_t addr[5]; /* Number of valid addresses in MIBIRULES. */ uint8_t num_addr; /* True if all addresses have MSB set. */ bool msb; /* * True if all target devices require mandatory byte * for IBI. */ bool has_mandatory_byte; } ibi; #endif }; static void npcx_i3c_mutex_lock(const struct device *dev) { struct npcx_i3c_data *const data = dev->data; k_mutex_lock(&data->lock_mutex, K_FOREVER); } static void npcx_i3c_mutex_unlock(const struct device *dev) { struct npcx_i3c_data *const data = dev->data; k_mutex_unlock(&data->lock_mutex); } #ifdef CONFIG_I3C_NPCX_DMA static void i3c_ctrl_notify(const struct device *dev) { struct npcx_i3c_data *const data = dev->data; k_sem_give(&data->sync_sem); } static int i3c_ctrl_wait_completion(const struct device *dev) { struct npcx_i3c_data *const data = dev->data; return k_sem_take(&data->sync_sem, I3C_TRANS_TIMEOUT_MS); } static enum npcx_i3c_oper_state get_oper_state(const struct device *dev) { struct npcx_i3c_data *const data = dev->data; return data->oper_state; } #endif /* CONFIG_I3C_NPCX_DMA */ static void set_oper_state(const struct device *dev, enum npcx_i3c_oper_state state) { struct npcx_i3c_data *const data = dev->data; data->oper_state = state; } /* * brief: Wait for status bit done and clear the status * * param[in] inst Pointer to I3C register. * * return 0, success * -ETIMEDOUT: check status timeout. */ static inline int npcx_i3c_status_wait_clear(struct i3c_reg *inst, uint8_t bit_offset) { if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, bit_offset), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { return -ETIMEDOUT; } inst->MSTATUS = BIT(bit_offset); /* W1C */ return 0; } static inline uint32_t npcx_i3c_state_get(struct i3c_reg *inst) { return GET_FIELD(inst->MSTATUS, NPCX_I3C_MSTATUS_STATE); } static inline void npcx_i3c_interrupt_all_disable(struct i3c_reg *inst) { uint32_t intmask = inst->MINTSET; inst->MINTCLR = intmask; } static inline void npcx_i3c_interrupt_enable(struct i3c_reg *inst, uint32_t mask) { inst->MINTSET = mask; } static bool npcx_i3c_has_error(struct i3c_reg *inst) { if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_ERRWARN)) { LOG_ERR("ERROR: MSTATUS 0x%08x MERRWARN 0x%08x", inst->MSTATUS, inst->MERRWARN); return true; } return false; } static inline void npcx_i3c_status_clear_all(struct i3c_reg *inst) { uint32_t mask = I3C_STATUS_CLR_MASK; inst->MSTATUS = mask; } static inline void npcx_i3c_errwarn_clear_all(struct i3c_reg *inst) { inst->MERRWARN = inst->MERRWARN; } static inline void npcx_i3c_fifo_flush(struct i3c_reg *inst) { inst->MDATACTRL |= (BIT(NPCX_I3C_MDATACTRL_FLUSHTB) | BIT(NPCX_I3C_MDATACTRL_FLUSHFB)); } /* * brief: Send request and check the request is valid * * param[in] inst Pointer to I3C register. * * return 0, success * -ETIMEDOUT check MCTRLDONE timeout. * -ENOSYS invalid use of request. */ static inline int npcx_i3c_send_request(struct i3c_reg *inst, uint32_t mctrl_val) { inst->MCTRL = mctrl_val; if (npcx_i3c_status_wait_clear(inst, NPCX_I3C_MSTATUS_MCTRLDONE) != 0) { return -ETIMEDOUT; } /* Check invalid use of request */ if (IS_BIT_SET(inst->MERRWARN, NPCX_I3C_MERRWARN_INVERQ)) { LOG_ERR("%s: Invalid request, merrwarn: %#x", __func__, inst->MERRWARN); return -ENOSYS; } return 0; } /* Start DAA procedure and continue the DAA with a Repeated START */ static inline int npcx_i3c_request_daa(struct i3c_reg *inst) { uint32_t val = 0; int ret; /* Set IBI response NACK while processing DAA */ SET_FIELD(val, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_NACK); /* Send DAA request */ SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_PROCESSDAA); ret = npcx_i3c_send_request(inst, val); if (ret != 0) { LOG_ERR("Request DAA error, %d", ret); return ret; } return 0; } /* Tell controller to start auto IBI */ static inline int npcx_i3c_request_auto_ibi(struct i3c_reg *inst) { uint32_t val = 0; int ret; SET_FIELD(val, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_ACK); SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_AUTOIBI); ret = npcx_i3c_send_request(inst, val); if (ret != 0) { LOG_ERR("Request auto ibi error, %d", ret); return ret; } return 0; } /* * brief: Controller emit start and send address * * param[in] inst Pointer to I3C register. * param[in] addr Dyamic address for xfer or 0x7E for CCC command. * param[in] op_type Request type. * param[in] is_read Read(true) or write(false) operation. * param[in] read_sz Read size in bytes. * If op_tye is HDR-DDR, the read_sz must be the number of words. * * return 0, success * else, error */ static int npcx_i3c_request_emit_start(struct i3c_reg *inst, uint8_t addr, enum npcx_i3c_mctrl_type op_type, bool is_read, size_t read_sz) { uint32_t mctrl = 0; int ret; /* Set request and target address*/ SET_FIELD(mctrl, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_EMITSTARTADDR); /* Set operation type */ SET_FIELD(mctrl, NPCX_I3C_MCTRL_TYPE, op_type); /* Set IBI response NACK in emit start */ SET_FIELD(mctrl, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_NACK); /* Set dynamic address */ SET_FIELD(mctrl, NPCX_I3C_MCTRL_ADDR, addr); /* Set read(1) or write(0) */ if (is_read) { mctrl |= BIT(NPCX_I3C_MCTRL_DIR); SET_FIELD(mctrl, NPCX_I3C_MCTRL_RDTERM, read_sz); /* Set read length */ } else { mctrl &= ~BIT(NPCX_I3C_MCTRL_DIR); } ret = npcx_i3c_send_request(inst, mctrl); if (ret != 0) { LOG_ERR("Request start error, %d", ret); return ret; } /* Check NACK after MCTRLDONE is get */ if (IS_BIT_SET(inst->MERRWARN, NPCX_I3C_MERRWARN_NACK)) { LOG_DBG("Address nacked"); return -ENODEV; } return 0; } /* * brief: Controller emit STOP. * * This emits STOP when controller is in NORMACT state. * * param[in] inst Pointer to I3C register. * * return 0 success * -ECANCELED i3c state not as expected. * -ETIMEDOUT check MCTRLDONE timeout. * -ENOSYS invalid use of request. */ static inline int npcx_i3c_request_emit_stop(struct i3c_reg *inst) { uint32_t val = 0; int ret; uint32_t i3c_state = npcx_i3c_state_get(inst); /* Make sure we are in a state where we can emit STOP */ if (i3c_state == MSTATUS_STATE_IDLE) { LOG_WRN("Request stop in idle state, state= %#x", i3c_state); return -ECANCELED; } SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_EMITSTOP); ret = npcx_i3c_send_request(inst, val); if (ret != 0) { LOG_ERR("Request stop error, %d", ret); return ret; } return 0; } static inline int npcx_i3c_request_hdr_exit(struct i3c_reg *inst) { uint32_t val = 0; uint32_t state; int ret; /* Before sending the HDR exit command, check the HDR mode */ state = npcx_i3c_state_get(inst); if (state != MSTATUS_STATE_MSGDDR) { LOG_ERR("%s, state error: %#x", __func__, state); return -EPERM; } SET_FIELD(val, NPCX_I3C_MCTRL_TYPE, MCTRL_TYPE_HDR_EXIT); SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_FORCEEXIT); ret = npcx_i3c_send_request(inst, val); if (ret != 0) { LOG_ERR("Request hdr exit error %d", ret); return ret; } return 0; } static inline int npcx_i3c_xfer_stop(struct i3c_reg *inst) { uint32_t state; int ret; state = npcx_i3c_state_get(inst); LOG_DBG("Current working state=%d", state); switch (state) { case MSTATUS_STATE_NORMACT: /* SDR */ ret = npcx_i3c_request_emit_stop(inst); break; case MSTATUS_STATE_MSGDDR: /* HDR-DDR */ ret = npcx_i3c_request_hdr_exit(inst); break; default: /* Not supported */ ret = -ENOTSUP; LOG_WRN("xfer_stop state not supported, state:%d", state); break; } return ret; } static inline int npcx_i3c_ibi_respond_nack(struct i3c_reg *inst) { uint32_t val = 0; int ret; SET_FIELD(val, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_NACK); SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_IBIACKNACK); ret = npcx_i3c_send_request(inst, val); if (ret != 0) { LOG_ERR("Request ibi_rsp nack error, %d", ret); return ret; } return 0; } static inline int npcx_i3c_ibi_respond_ack(struct i3c_reg *inst) { uint32_t val = 0; int ret; SET_FIELD(val, NPCX_I3C_MCTRL_IBIRESP, MCTRL_IBIRESP_ACK); SET_FIELD(val, NPCX_I3C_MCTRL_REQUEST, MCTRL_REQUEST_IBIACKNACK); ret = npcx_i3c_send_request(inst, val); if (ret != 0) { LOG_ERR("Request ibi_rsp ack error %d", ret); return ret; } return 0; } /* * brief: Find a registered I3C target device. * * This returns the I3C device descriptor of the I3C device * matching the incoming id. * * param[in] dev Pointer to controller device driver instance. * param[in] id Pointer to I3C device ID. * * return see i3c_device_find. */ static inline struct i3c_device_desc *npcx_i3c_device_find(const struct device *dev, const struct i3c_device_id *id) { const struct npcx_i3c_config *config = dev->config; return i3c_dev_list_find(&config->common.dev_list, id); } /* * brief: Perform bus recovery. * * param[in] dev Pointer to controller device driver instance. * * return 0 success, otherwise error */ static int npcx_i3c_recover_bus(const struct device *dev) { const struct npcx_i3c_config *config = dev->config; struct i3c_reg *inst = config->base; /* * If the controller is in NORMACT state, tells it to emit STOP * so it can return to IDLE, or is ready to clear any pending * target initiated IBIs. */ if (npcx_i3c_state_get(inst) == MSTATUS_STATE_NORMACT) { npcx_i3c_request_emit_stop(inst); }; /* Exhaust all target initiated IBI */ while (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_TGTSTART)) { /* Tell the controller to perform auto IBI. */ npcx_i3c_request_auto_ibi(inst); if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { break; } /* Once auto IBI is done, discard bytes in FIFO. */ while (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_RXPEND)) { /* Flush FIFO as long as RXPEND is set. */ npcx_i3c_fifo_flush(inst); } /* * There might be other IBIs waiting. * So pause a bit to let other targets initiates * their IBIs. */ k_busy_wait(100); } /* Check IDLE state */ if (WAIT_FOR((npcx_i3c_state_get(inst) == MSTATUS_STATE_IDLE), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { return -EBUSY; } return 0; } static inline void npcx_i3c_xfer_reset(struct i3c_reg *inst) { npcx_i3c_status_clear_all(inst); npcx_i3c_errwarn_clear_all(inst); npcx_i3c_fifo_flush(inst); } /* * brief: Perform one write transaction. * * This writes all data in buf to TX FIFO or time out * waiting for FIFO spaces. * * param[in] inst Pointer to controller registers. * param[in] buf Buffer containing data to be sent. * param[in] buf_sz Number of bytes in buf to send. * param[in] no_ending True, not including ending byte in message. * False, including ending byte in message * * return Number of bytes written, or negative if error. * */ static int npcx_i3c_xfer_write_fifo(struct i3c_reg *inst, uint8_t *buf, uint8_t buf_sz, bool no_ending) { int offset = 0; int remaining = buf_sz; while (remaining > 0) { /* Check tx fifo not full */ if (WAIT_FOR(!IS_BIT_SET(inst->MDATACTRL, NPCX_I3C_MDATACTRL_TXFULL), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { LOG_DBG("Check tx fifo not full timed out"); return -ETIMEDOUT; } if ((remaining > 1) || no_ending) { inst->MWDATAB = (uint32_t)buf[offset]; } else { inst->MWDATABE = (uint32_t)buf[offset]; /* Set last byte */ } offset += 1; remaining -= 1; } return offset; } /* * brief: Perform read transaction. * * This reads from RX FIFO until COMPLETE bit is set in MSTATUS * or time out. * * param[in] inst Pointer to controller registers. * param[in] buf Buffer to store data. * param[in] buf_sz Number of bytes to read. * * return Number of bytes read, or negative if error. * */ static int npcx_i3c_xfer_read_fifo(struct i3c_reg *inst, uint8_t *buf, uint8_t rd_sz) { bool is_done = false; int offset = 0; while (is_done == false) { /* Check message is terminated */ if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE)) { is_done = true; } /* Check I3C bus error */ if (npcx_i3c_has_error(inst)) { /* Check timeout*/ if (IS_BIT_SET(inst->MERRWARN, NPCX_I3C_MERRWARN_TIMEOUT)) { LOG_WRN("%s: ERR: timeout", __func__); } inst->MERRWARN = inst->MERRWARN; return -EIO; } /* Check rx not empty */ if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_RXPEND)) { /* Receive all the data in this round. * Read in a tight loop to reduce chance of losing * FIFO data when the i3c speed is high. */ while (offset < rd_sz) { if (GET_FIELD(inst->MDATACTRL, NPCX_I3C_MDATACTRL_RXCOUNT) == 0) { break; } buf[offset++] = (uint8_t)inst->MRDATAB; } } } return offset; } #ifdef CONFIG_I3C_NPCX_DMA /* * brief: Perform DMA write transaction. * * For write end, use the interrupt generated by COMPLETE bit in MSTATUS register. * * param[in] dev Pointer to controller device driver instance. * param[in] buf Buffer to store data. * param[in] buf_sz Number of bytes to read. * * return Number of bytes read, or negative if error. * */ static int npcx_i3c_xfer_write_fifo_dma(const struct device *dev, uint8_t *buf, uint8_t buf_sz) { const struct npcx_i3c_config *config = dev->config; struct i3c_reg *i3c_inst = config->base; struct mdma_reg *mdma_inst = config->mdma_base; int ret; set_oper_state(dev, NPCX_I3C_WR); /* Enable I3C MDMA write for one frame */ SET_FIELD(i3c_inst->MDMACTRL, NPCX_I3C_MDMACTRL_DMATB, MDMA_DMATB_EN_ONE_FRAME); i3c_inst->MINTSET |= BIT(NPCX_I3C_MINTCLR_COMPLETE); /* Enable I3C complete interrupt */ /* Write Operation (MDMA CH_1) */ mdma_inst->MDMA_TCNT1 = buf_sz; /* Set MDMA transfer count */ mdma_inst->MDMA_SRCB1 = (uint32_t)buf; /* Set source address */ mdma_inst->MDMA_CTL1 |= BIT(NPCX_MDMA_CTL_MDMAEN); /* Start DMA transfer */ /* Wait I3C COMPLETE */ ret = i3c_ctrl_wait_completion(dev); if (ret < 0) { LOG_DBG("Check complete time out, buf_size:%d", buf_sz); goto out_wr_fifo_dma; } /* Check and clear DMA TC after complete */ if (!IS_BIT_SET(mdma_inst->MDMA_CTL1, NPCX_MDMA_CTL_TC)) { LOG_DBG("DMA busy, TC=%d", IS_BIT_SET(mdma_inst->MDMA_CTL1, NPCX_MDMA_CTL_TC)); ret = -EBUSY; goto out_wr_fifo_dma; } mdma_inst->MDMA_CTL1 &= ~BIT(NPCX_MDMA_CTL_TC); /* Clear TC, W0C */ ret = buf_sz - mdma_inst->MDMA_CTCNT1; /* Set transferred count */ LOG_DBG("Write cnt=%d", ret); out_wr_fifo_dma: i3c_inst->MINTCLR |= BIT(NPCX_I3C_MINTCLR_COMPLETE); /* Disable I3C complete interrupt */ npcx_i3c_fifo_flush(i3c_inst); set_oper_state(dev, NPCX_I3C_IDLE); return ret; } /* * brief: Perform DMA read transaction. * (Data width used for DMA transfers is "byte") * * For read end, use the MDMA end-of-transfer interrupt(SIEN bit) * instead of using the I3CI interrupt generated by COMPLETE bit in MSTATUS register. * * param[in] dev Pointer to controller device driver instance. * param[in] buf Buffer to store data. * param[in] buf_sz Number of bytes to read. * * return Number of bytes read, or negative if error. * */ static int npcx_i3c_xfer_read_fifo_dma(const struct device *dev, uint8_t *buf, uint8_t buf_sz) { const struct npcx_i3c_config *config = dev->config; struct i3c_reg *i3c_inst = config->base; struct mdma_reg *mdma_inst = config->mdma_base; int ret; set_oper_state(dev, NPCX_I3C_RD); /* Enable DMA until DMA is disabled by setting DMAFB to 00 */ SET_FIELD(i3c_inst->MDMACTRL, NPCX_I3C_MDMACTRL_DMAFB, MDMA_DMAFB_EN_MANUAL); /* Read Operation (MDMA CH_0) */ mdma_inst->MDMA_TCNT0 = buf_sz; /* Set MDMA transfer count */ mdma_inst->MDMA_DSTB0 = (uint32_t)buf; /* Set destination address */ mdma_inst->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_SIEN); /* Enable stop interrupt */ mdma_inst->MDMA_CTL0 |= BIT(NPCX_MDMA_CTL_MDMAEN); /* Start DMA transfer */ /* Wait MDMA TC */ ret = i3c_ctrl_wait_completion(dev); if (ret < 0) { LOG_DBG("Check DMA done time out"); } else { ret = buf_sz - mdma_inst->MDMA_CTCNT0; /* Set transferred count */ LOG_DBG("Read cnt=%d", ret); } mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_SIEN); /* Disable stop interrupt */ /* Disable I3C MDMA read */ SET_FIELD(i3c_inst->MDMACTRL, NPCX_I3C_MDMACTRL_DMAFB, MDMA_DMAFB_DISABLE); npcx_i3c_fifo_flush(i3c_inst); set_oper_state(dev, NPCX_I3C_IDLE); return ret; } /* * brief: Perform one transfer transaction by DMA. * (Support SDR and HDR-DDR) * * param[in] inst Pointer to controller registers. * param[in] addr Target address. * param[in] op_type Request type. * param[in] buf Buffer for data to be sent or received. * param[in] buf_sz Buffer size in bytes. * param[in] is_read True if this is a read transaction, false if write. * param[in] emit_start True if START is needed before read/write. * param[in] emit_stop True if STOP is needed after read/write. * * return Number of bytes read/written, or negative if error. */ static int npcx_i3c_do_one_xfer_dma(const struct device *dev, uint8_t addr, enum npcx_i3c_mctrl_type op_type, uint8_t *buf, size_t buf_sz, bool is_read, bool emit_start, bool emit_stop, uint8_t hdr_cmd) { const struct npcx_i3c_config *config = dev->config; struct i3c_reg *inst = config->base; int ret = 0; bool is_hdr_ddr = (op_type == NPCX_I3C_MCTRL_TYPE_I3C_HDR_DDR) ? true : false; size_t rd_len = buf_sz; npcx_i3c_status_clear_all(inst); npcx_i3c_errwarn_clear_all(inst); /* Check HDR-DDR moves data by words */ if (is_hdr_ddr && (buf_sz % 2 != 0)) { LOG_ERR("%s, HDR-DDR data length should be even, len=%#x", __func__, buf_sz); return -EINVAL; } /* Emit START if needed */ if (emit_start) { /* * For HDR-DDR mode read, RDTERM also includes one word (16 bits) for CRC. * For example, to read 8 bytes, set RDTERM to 6. * (1 word HDR-DDR command + 4 words data + 1 word for CRC) */ if (is_hdr_ddr) { if (is_read) { /* The unit of rd_len is "word" in DDR mode */ rd_len /= sizeof(uint16_t); /* byte to word */ rd_len += HDR_DDR_CMD_AND_CRC_SZ_WORD; hdr_cmd |= HDR_RD_CMD; } else { hdr_cmd &= ~HDR_RD_CMD; } /* Write the command code for the HDR-DDR message */ inst->MWDATAB = hdr_cmd; } ret = npcx_i3c_request_emit_start(inst, addr, op_type, is_read, rd_len); if (ret != 0) { LOG_ERR("%s: emit start fail", __func__); goto out_do_one_xfer_dma; } } /* No data to be transferred */ if ((buf == NULL) || (buf_sz == 0)) { goto out_do_one_xfer_dma; } /* Select read or write operation */ if (is_read) { ret = npcx_i3c_xfer_read_fifo_dma(dev, buf, buf_sz); } else { ret = npcx_i3c_xfer_write_fifo_dma(dev, buf, buf_sz); } if (ret < 0) { LOG_ERR("%s: %s fifo fail", __func__, is_read ? "read" : "write"); goto out_do_one_xfer_dma; } /* Check I3C bus error */ if (npcx_i3c_has_error(inst)) { ret = -EIO; LOG_ERR("%s: I3C bus error", __func__); } out_do_one_xfer_dma: /* Emit STOP or exit DDR if needed */ if (emit_stop) { npcx_i3c_xfer_stop(inst); } return ret; } #endif /* End of CONFIG_I3C_NPCX_DMA */ /* * brief: Perform one transfer transaction. * (Support SDR only) * * param[in] inst Pointer to controller registers. * param[in] addr Target address. * param[in] op_type Request type. * param[in] buf Buffer for data to be sent or received. * param[in] buf_sz Buffer size in bytes. * param[in] is_read True if this is a read transaction, false if write. * param[in] emit_start True if START is needed before read/write. * param[in] emit_stop True if STOP is needed after read/write. * param[in] no_ending True if not to signal end of write message. * * return Number of bytes read/written, or negative if error. */ static int npcx_i3c_do_one_xfer(struct i3c_reg *inst, uint8_t addr, enum npcx_i3c_mctrl_type op_type, uint8_t *buf, size_t buf_sz, bool is_read, bool emit_start, bool emit_stop, bool no_ending) { int ret = 0; npcx_i3c_status_clear_all(inst); npcx_i3c_errwarn_clear_all(inst); /* Emit START if needed */ if (emit_start) { ret = npcx_i3c_request_emit_start(inst, addr, op_type, is_read, buf_sz); if (ret != 0) { LOG_ERR("%s: emit start fail", __func__); goto out_do_one_xfer; } } /* No data to be transferred */ if ((buf == NULL) || (buf_sz == 0)) { goto out_do_one_xfer; } /* Select read or write operation */ if (is_read) { ret = npcx_i3c_xfer_read_fifo(inst, buf, buf_sz); } else { ret = npcx_i3c_xfer_write_fifo(inst, buf, buf_sz, no_ending); } if (ret < 0) { LOG_ERR("%s: %s fifo fail", __func__, is_read ? "read" : "write"); goto out_do_one_xfer; } /* Check message complete if is a read transaction or * ending byte of a write transaction. */ if (is_read || !no_ending) { /* Wait message transfer complete */ if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { LOG_DBG("Wait COMPLETE timed out, addr 0x%02x, buf_sz %u", addr, buf_sz); ret = -ETIMEDOUT; emit_stop = true; goto out_do_one_xfer; } inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_COMPLETE); /* W1C */ } /* Check I3C bus error */ if (npcx_i3c_has_error(inst)) { ret = -EIO; LOG_ERR("%s: I3C bus error", __func__); } out_do_one_xfer: /* Emit STOP if needed */ if (emit_stop) { npcx_i3c_request_emit_stop(inst); } return ret; } /* * brief: Transfer messages in I3C mode. * * see i3c_transfer * * param[in] dev Pointer to device driver instance. * param[in] target Pointer to target device descriptor. * param[in] msgs Pointer to I3C messages. * param[in] num_msgs Number of messages to transfers. * * return see i3c_transfer */ static int npcx_i3c_transfer(const struct device *dev, struct i3c_device_desc *target, struct i3c_msg *msgs, uint8_t num_msgs) { const struct npcx_i3c_config *config = dev->config; struct i3c_reg *inst = config->base; struct npcx_i3c_data *data = dev->data; uint32_t intmask; int xfered_len, ret = 0; bool send_broadcast = true; bool is_xfer_done = true; enum npcx_i3c_mctrl_type op_type; if (msgs == NULL) { return -EINVAL; } if (target->dynamic_addr == 0U) { return -EINVAL; } npcx_i3c_mutex_lock(dev); /* Check bus in idle state */ if (WAIT_FOR((npcx_i3c_state_get(inst) == MSTATUS_STATE_IDLE), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { LOG_ERR("%s: xfer state error: %d", __func__, npcx_i3c_state_get(inst)); npcx_i3c_mutex_unlock(dev); return -ETIMEDOUT; } /* Disable interrupt */ intmask = inst->MINTSET; npcx_i3c_interrupt_all_disable(inst); npcx_i3c_xfer_reset(inst); /* Iterate over all the messages */ for (int i = 0; i < num_msgs; i++) { /* * Check message is read or write operaion. * For write operation, check the last data byte of a transmit message. */ bool is_read = (msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ; bool no_ending = false; /* * Emit start if this is the first message or that * the RESTART flag is set in message. */ bool emit_start = (i == 0) || ((msgs[i].flags & I3C_MSG_RESTART) == I3C_MSG_RESTART); bool emit_stop = (msgs[i].flags & I3C_MSG_STOP) == I3C_MSG_STOP; /* * The controller requires special treatment of last byte of * a write message. Since the API permits having a bunch of * write messages without RESTART in between, this is just some * logic to determine whether to treat the last byte of this * message to be the last byte of a series of write mssages. * If not, tell the write function not to treat it that way. */ if (!is_read && !emit_stop && ((i + 1) != num_msgs)) { bool next_is_write = (msgs[i + 1].flags & I3C_MSG_RW_MASK) == I3C_MSG_WRITE; bool next_is_restart = ((msgs[i + 1].flags & I3C_MSG_RESTART) == I3C_MSG_RESTART); /* Check next msg is still write operation and not including Sr */ if (next_is_write && !next_is_restart) { no_ending = true; } } #ifdef CONFIG_I3C_NPCX_DMA /* Current DMA not support multi-message write */ if (!is_read && no_ending) { LOG_ERR("I3C DMA transfer not support multi-message write"); ret = -EINVAL; break; } #endif /* Check message SDR or HDR mode */ bool is_msg_hdr = (msgs[i].flags & I3C_MSG_HDR) == I3C_MSG_HDR; /* Set emit start type SDR or HDR-DDR mode */ if (!is_msg_hdr || msgs[i].hdr_mode == 0) { op_type = NPCX_I3C_MCTRL_TYPE_I3C; /* Set operation type SDR */ /* * SDR, send boradcast header(0x7E) * * Two ways to do read/write transfer (SDR mode). * 1. [S] + [0x7E] + [address] + [data] + [Sr or P] * 2. [S] + [address] + [data] + [Sr or P] * * Send broadcast header(0x7E) on first transfer or after a STOP, * unless flag is set not to. */ if (!(msgs[i].flags & I3C_MSG_NBCH) && send_broadcast) { ret = npcx_i3c_request_emit_start(inst, I3C_BROADCAST_ADDR, NPCX_I3C_MCTRL_TYPE_I3C, false, 0); if (ret < 0) { LOG_ERR("%s: emit start of broadcast addr failed, error " "(%d)", __func__, ret); break; } send_broadcast = false; } } else if ((data->common.ctrl_config.supported_hdr & I3C_MSG_HDR_DDR) && (msgs[i].hdr_mode == I3C_MSG_HDR_DDR) && is_msg_hdr) { op_type = NPCX_I3C_MCTRL_TYPE_I3C_HDR_DDR; /* Set operation type DDR */ /* Check HDR-DDR moves data by words */ if ((msgs[i].len % 2) != 0x0) { LOG_ERR("HDR-DDR data length should be number of words , xfer " "len=%d", msgs[i].num_xfer); ret = -EINVAL; break; } } else { LOG_ERR("%s: %s controller HDR Mode %#x\r\n" "msg HDR mode %#x, msg flag %#x", __func__, dev->name, data->common.ctrl_config.supported_hdr, msgs[i].hdr_mode, msgs[i].flags); ret = -ENOTSUP; break; } #ifdef CONFIG_I3C_NPCX_DMA /* Do transfer with target device */ xfered_len = npcx_i3c_do_one_xfer_dma(dev, target->dynamic_addr, op_type, msgs[i].buf, msgs[i].len, is_read, emit_start, emit_stop, msgs[i].hdr_cmd_code); #endif if (xfered_len < 0) { LOG_ERR("%s: do xfer fail", __func__); ret = xfered_len; /* Set error code to ret */ break; } /* Write back the total number of bytes transferred */ msgs[i].num_xfer = xfered_len; if (emit_stop) { /* SDR. After a STOP, send broadcast header before next msg */ send_broadcast = true; } /* Check emit stop flag including in the final msg */ if ((i == num_msgs - 1) && (emit_stop == false)) { is_xfer_done = false; } } /* Emit stop if error occurs or stop flag not in the msg */ if ((ret != 0) || (is_xfer_done == false)) { npcx_i3c_xfer_stop(inst); } npcx_i3c_errwarn_clear_all(inst); npcx_i3c_status_clear_all(inst); npcx_i3c_interrupt_enable(inst, intmask); npcx_i3c_mutex_unlock(dev); return ret; } /* * brief: Perform Dynamic Address Assignment. * * param[in] dev Pointer to controller device driver instance. * * return 0 If successful. * -EBUSY Bus is busy. * -EIO General input / output error. * -ENODEV If a provisioned ID does not match to any target devices * in the registered device list. * -ENOSPC No more free addresses can be assigned to target. * -ENOSYS Dynamic address assignment is not supported by * the controller driver. */ static int npcx_i3c_do_daa(const struct device *dev) { const struct npcx_i3c_config *config = dev->config; struct npcx_i3c_data *data = dev->data; struct i3c_reg *inst = config->base; int ret = 0; uint8_t rx_buf[8]; size_t rx_count; uint32_t intmask; npcx_i3c_mutex_lock(dev); memset(rx_buf, 0xff, sizeof(rx_buf)); /* Check bus in idle state */ if (WAIT_FOR((npcx_i3c_state_get(inst) == MSTATUS_STATE_IDLE), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { LOG_ERR("%s: DAA state error: %d", __func__, npcx_i3c_state_get(inst)); npcx_i3c_mutex_unlock(dev); return -ETIMEDOUT; } LOG_DBG("DAA: ENTDAA"); /* Disable interrupt */ intmask = inst->MINTSET; npcx_i3c_interrupt_all_disable(inst); npcx_i3c_xfer_reset(inst); /* Emit process DAA */ if (npcx_i3c_request_daa(inst) != 0) { ret = -ETIMEDOUT; LOG_ERR("Emit process DAA error"); goto out_do_daa; } /* Loop until no more responses from devices */ do { /* Check ERRWARN bit set */ if (npcx_i3c_has_error(inst)) { ret = -EIO; LOG_ERR("DAA recv error"); break; } /* Receive Provisioned ID, BCR and DCR (total 8 bytes) */ rx_count = GET_FIELD(inst->MDATACTRL, NPCX_I3C_MDATACTRL_RXCOUNT); if (rx_count == DAA_TGT_INFO_SZ) { for (int i = 0; i < rx_count; i++) { rx_buf[i] = (uint8_t)inst->MRDATAB; } } else { /* Data count not as expected, exit DAA */ ret = -EBADMSG; LOG_DBG("Rx count not as expected %d, abort DAA", rx_count); break; } /* Start assign dynamic address */ if ((npcx_i3c_state_get(inst) == MSTATUS_STATE_DAA) && IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_BETWEEN)) { struct i3c_device_desc *target; uint16_t vendor_id; uint32_t part_no; uint64_t pid; uint8_t dyn_addr = 0; /* PID[47:33] = manufacturer ID */ vendor_id = (((uint16_t)rx_buf[0] << 8U) | (uint16_t)rx_buf[1]) & 0xFFFEU; /* PID[31:0] = vendor fixed falue or random value */ part_no = (uint32_t)rx_buf[2] << 24U | (uint32_t)rx_buf[3] << 16U | (uint32_t)rx_buf[4] << 8U | (uint32_t)rx_buf[5]; /* Combine into one Provisioned ID */ pid = (uint64_t)vendor_id << 32U | (uint64_t)part_no; LOG_DBG("DAA: Rcvd PID 0x%04x%08x", vendor_id, part_no); /* Find a usable address during ENTDAA */ ret = i3c_dev_list_daa_addr_helper(&data->common.attached_dev.addr_slots, &config->common.dev_list, pid, false, false, &target, &dyn_addr); if (ret != 0) { LOG_ERR("%s: Assign new DA error", __func__); break; } if (target == NULL) { LOG_INF("%s: PID 0x%04x%08x is not in registered device " "list, given dynamic address 0x%02x", dev->name, vendor_id, part_no, dyn_addr); } else { /* Update target descriptor */ target->dynamic_addr = dyn_addr; target->bcr = rx_buf[6]; target->dcr = rx_buf[7]; } /* Mark the address as I3C device */ i3c_addr_slots_mark_i3c(&data->common.attached_dev.addr_slots, dyn_addr); /* * If the device has static address, after address assignment, * the device will not respond to the static address anymore. * So free the static one from address slots if different from * newly assigned one. */ if ((target != NULL) && (target->static_addr != 0U) && (dyn_addr != target->static_addr)) { i3c_addr_slots_mark_free(&data->common.attached_dev.addr_slots, dyn_addr); } /* Emit process DAA again to send the address to the device */ inst->MWDATAB = dyn_addr; ret = npcx_i3c_request_daa(inst); if (ret != 0) { LOG_ERR("%s: Assign DA timeout", __func__); break; } LOG_DBG("PID 0x%04x%08x assigned dynamic address 0x%02x", vendor_id, part_no, dyn_addr); /* Target did not accept the assigned DA, exit DAA */ if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_NACKED)) { ret = -EFAULT; LOG_DBG("TGT NACK assigned DA %#x", dyn_addr); /* Free the reserved DA */ i3c_addr_slots_mark_free(&data->common.attached_dev.addr_slots, dyn_addr); /* 0 if address has not been assigned */ if (target != NULL) { target->dynamic_addr = 0; } break; } } /* Check all targets have been assigned DA and DAA complete */ } while ((!IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE)) && npcx_i3c_state_get(inst) != MSTATUS_STATE_IDLE); out_do_daa: /* Exit DAA mode when error occurs */ if (ret != 0) { npcx_i3c_request_emit_stop(inst); } /* Clear all flags. */ npcx_i3c_errwarn_clear_all(inst); npcx_i3c_status_clear_all(inst); /* Re-Enable I3C IRQ sources. */ npcx_i3c_interrupt_enable(inst, intmask); npcx_i3c_fifo_flush(inst); npcx_i3c_mutex_unlock(dev); return ret; } /* * brief: Send Common Command Code (CCC). * * param[in] dev Pointer to controller device driver instance. * param[in] payload Pointer to CCC payload. * * return: The same as i3c_do_ccc() * 0 If successful. * -EBUSY Bus is busy. * -EIO General Input / output error. * -EINVAL Invalid valid set in the payload structure. * -ENOSYS Not implemented. */ static int npcx_i3c_do_ccc(const struct device *dev, struct i3c_ccc_payload *payload) { const struct npcx_i3c_config *config = dev->config; int ret; struct i3c_reg *inst = config->base; uint32_t intmask; int xfered_len; if (dev == NULL || payload == NULL) { return -EINVAL; } npcx_i3c_mutex_lock(dev); /* Disable interrupt */ intmask = inst->MINTSET; npcx_i3c_interrupt_all_disable(inst); /* Clear status and flush fifo */ npcx_i3c_xfer_reset(inst); LOG_DBG("CCC[0x%02x]", payload->ccc.id); /* Write emit START and broadcast address (0x7E) */ ret = npcx_i3c_request_emit_start(inst, I3C_BROADCAST_ADDR, NPCX_I3C_MCTRL_TYPE_I3C, false, 0); if (ret < 0) { LOG_ERR("CCC[0x%02x] %s START error (%d)", payload->ccc.id, i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct", ret); goto out_do_ccc; } /* Write CCC command */ npcx_i3c_status_clear_all(inst); npcx_i3c_errwarn_clear_all(inst); xfered_len = npcx_i3c_xfer_write_fifo(inst, &payload->ccc.id, 1, payload->ccc.data_len > 0); if (xfered_len < 0) { LOG_ERR("CCC[0x%02x] %s command error (%d)", payload->ccc.id, i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct", ret); ret = xfered_len; goto out_do_ccc; } /* Write data (defining byte or data bytes) for CCC if needed */ if (payload->ccc.data_len > 0) { npcx_i3c_status_clear_all(inst); npcx_i3c_errwarn_clear_all(inst); xfered_len = npcx_i3c_xfer_write_fifo(inst, payload->ccc.data, payload->ccc.data_len, false); if (xfered_len < 0) { LOG_ERR("CCC[0x%02x] %s command payload error (%d)", payload->ccc.id, i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct", ret); ret = xfered_len; goto out_do_ccc; } /* Write back the transferred bytes */ payload->ccc.num_xfer = xfered_len; } /* Wait message transfer complete */ if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { ret = -ETIMEDOUT; LOG_DBG("Check complete timeout"); goto out_do_ccc; } inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_COMPLETE); /* W1C */ /* For direct CCC */ if (!i3c_ccc_is_payload_broadcast(payload)) { /* * If there are payload(s) for each target, * RESTART and then send payload for each target. */ for (int idx = 0; idx < payload->targets.num_targets; idx++) { struct i3c_ccc_target_payload *tgt_payload = &payload->targets.payloads[idx]; bool is_read = (tgt_payload->rnw == 1U); xfered_len = npcx_i3c_do_one_xfer( inst, tgt_payload->addr, NPCX_I3C_MCTRL_TYPE_I3C, tgt_payload->data, tgt_payload->data_len, is_read, true, false, false); if (xfered_len < 0) { LOG_ERR("CCC[0x%02x] target payload error (%d)", payload->ccc.id, ret); ret = xfered_len; goto out_do_ccc; } /* Write back the total number of bytes transferred */ tgt_payload->num_xfer = xfered_len; } } out_do_ccc: npcx_i3c_request_emit_stop(inst); npcx_i3c_interrupt_enable(inst, intmask); npcx_i3c_mutex_unlock(dev); return ret; } #ifdef CONFIG_I3C_USE_IBI /* * brief Callback to service target initiated IBIs in workqueue. * * param[in] work Pointer to k_work item. */ static void npcx_i3c_ibi_work(struct k_work *work) { uint8_t payload[CONFIG_I3C_IBI_MAX_PAYLOAD_SIZE]; size_t payload_sz = 0; struct i3c_ibi_work *i3c_ibi_work = CONTAINER_OF(work, struct i3c_ibi_work, work); const struct device *dev = i3c_ibi_work->controller; const struct npcx_i3c_config *config = dev->config; struct npcx_i3c_data *data = dev->data; struct i3c_dev_attached_list *dev_list = &data->common.attached_dev; struct i3c_reg *inst = config->base; struct i3c_device_desc *target = NULL; uint32_t ibitype, ibiaddr; int ret; k_sem_take(&data->ibi_lock_sem, K_FOREVER); if (npcx_i3c_state_get(inst) != MSTATUS_STATE_TGTREQ) { LOG_DBG("IBI work %p running not because of IBI", work); LOG_ERR("%s: IBI not in TGTREQ state, state : %#x", __func__, npcx_i3c_state_get(inst)); LOG_ERR("%s: MSTATUS 0x%08x MERRWARN 0x%08x", __func__, inst->MSTATUS, inst->MERRWARN); npcx_i3c_request_emit_stop(inst); goto out_ibi_work; }; /* Use auto IBI to service the IBI */ npcx_i3c_request_auto_ibi(inst); /* Wait for target to win address arbitration (ibitype and ibiaddr) */ if (WAIT_FOR(IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_IBIWON), NPCX_I3C_CHK_TIMEOUT_US, NULL) == false) { LOG_ERR("IBI work, IBIWON timeout"); LOG_ERR("%s: MSTATUS 0x%08x MERRWARN 0x%08x", __func__, inst->MSTATUS, inst->MERRWARN); npcx_i3c_request_emit_stop(inst); goto out_ibi_work; } ibitype = GET_FIELD(inst->MSTATUS, NPCX_I3C_MSTATUS_IBITYPE); ibiaddr = GET_FIELD(inst->MSTATUS, NPCX_I3C_MSTATUS_IBIADDR); switch (ibitype) { case MSTATUS_IBITYPE_IBI: ret = npcx_i3c_xfer_read_fifo(inst, &payload[0], sizeof(payload)); if (ret >= 0) { payload_sz = (size_t)ret; } else { LOG_ERR("Error reading IBI payload"); npcx_i3c_request_emit_stop(inst); goto out_ibi_work; } break; case MSTATUS_IBITYPE_HJ: npcx_i3c_ibi_respond_ack(inst); npcx_i3c_request_emit_stop(inst); break; case MSTATUS_IBITYPE_CR: LOG_DBG("Controller role handoff not supported"); npcx_i3c_ibi_respond_nack(inst); npcx_i3c_request_emit_stop(inst); break; default: break; } if (npcx_i3c_has_error(inst)) { LOG_ERR("%s: unexpected error, ibi type:%d", __func__, ibitype); /* * If the controller detects any errors, simply * emit a STOP to abort the IBI. The target will * raise IBI again if so desired. */ npcx_i3c_request_emit_stop(inst); goto out_ibi_work; } switch (ibitype) { case MSTATUS_IBITYPE_IBI: target = i3c_dev_list_i3c_addr_find(dev_list, (uint8_t)ibiaddr); if (target != NULL) { if (i3c_ibi_work_enqueue_target_irq(target, &payload[0], payload_sz) != 0) { LOG_ERR("Error enqueue IBI IRQ work"); } } else { LOG_ERR("IBI (MDB) target not in the list"); } /* Finishing the IBI transaction */ npcx_i3c_request_emit_stop(inst); break; case MSTATUS_IBITYPE_HJ: if (i3c_ibi_work_enqueue_hotjoin(dev) != 0) { LOG_ERR("Error enqueue IBI HJ work"); } break; case MSTATUS_IBITYPE_CR: /* Not supported, for future use. */ break; default: break; } out_ibi_work: npcx_i3c_xfer_reset(inst); k_sem_give(&data->ibi_lock_sem); /* Re-enable target initiated IBI interrupt. */ inst->MINTSET = BIT(NPCX_I3C_MINTSET_TGTSTART); } /* Set local IBI information to IBIRULES register */ static void npcx_i3c_ibi_rules_setup(struct npcx_i3c_data *data, struct i3c_reg *inst) { uint32_t ibi_rules; int idx; ibi_rules = 0; for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) { uint32_t addr_6bit; /* Extract the lower 6-bit of target address */ addr_6bit = (uint32_t)data->ibi.addr[idx] & IBIRULES_ADDR_MSK; /* Shift into correct place */ addr_6bit <<= idx * IBIRULES_ADDR_SHIFT; /* Put into the temporary IBI Rules register */ ibi_rules |= addr_6bit; } if (!data->ibi.msb) { /* The MSB0 field is 1 if MSB is 0 */ ibi_rules |= BIT(NPCX_I3C_IBIRULES_MSB0); } if (!data->ibi.has_mandatory_byte) { /* The NOBYTE field is 1 if there is no mandatory byte */ ibi_rules |= BIT(NPCX_I3C_IBIRULES_NOBYTE); } /* Update the register */ inst->IBIRULES = ibi_rules; LOG_DBG("MIBIRULES 0x%08x", ibi_rules); } static int npcx_i3c_ibi_enable(const struct device *dev, struct i3c_device_desc *target) { const struct npcx_i3c_config *config = dev->config; struct npcx_i3c_data *data = dev->data; struct i3c_reg *inst = config->base; struct i3c_ccc_events i3c_events; uint8_t idx; bool msb, has_mandatory_byte; int ret; /* Check target IBI request capable */ if (!i3c_device_is_ibi_capable(target)) { LOG_ERR("%s: device is not ibi capable", __func__); return -EINVAL; } if (data->ibi.num_addr >= ARRAY_SIZE(data->ibi.addr)) { /* No more free entries in the IBI Rules table */ LOG_ERR("%s: no more free space in the IBI rules table", __func__); return -ENOMEM; } /* Check whether the selected target is already in the list */ for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) { if (data->ibi.addr[idx] == target->dynamic_addr) { LOG_ERR("%s: selected target is already in the list", __func__); return -EINVAL; } } /* Disable controller interrupt while we configure IBI rules. */ inst->MINTCLR = BIT(NPCX_I3C_MINTCLR_TGTSTART); LOG_DBG("IBI enabling for 0x%02x (BCR 0x%02x)", target->dynamic_addr, target->bcr); msb = (target->dynamic_addr & BIT(6)) == BIT(6); /* Check addess(7-bit) MSB enable */ has_mandatory_byte = i3c_ibi_has_payload(target); /* * If there are already addresses in the table, we must * check if the incoming entry is compatible with * the existing ones. * * All targets in the list should follow the same IBI rules. */ if (data->ibi.num_addr > 0) { /* * 1. All devices in the table must all use mandatory * bytes, or do not. * * 2. Each address in entry only captures the lowest 6-bit. * The MSB (7th bit) is captured separated in another bit * in the register. So all addresses must have the same MSB. */ if ((has_mandatory_byte != data->ibi.has_mandatory_byte) || (msb != data->ibi.msb)) { ret = -EINVAL; LOG_ERR("%s: New IBI does not have same mandatory byte or msb" " as previous IBI", __func__); goto out_ibi_enable; } /* Find an empty address slot */ for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) { if (data->ibi.addr[idx] == 0U) { break; } } if (idx >= ARRAY_SIZE(data->ibi.addr)) { ret = -ENOTSUP; LOG_ERR("Cannot support more IBIs"); goto out_ibi_enable; } } else { /* * If the incoming address is the first in the table, * it dictates future compatibilities. */ data->ibi.has_mandatory_byte = has_mandatory_byte; data->ibi.msb = msb; idx = 0; } data->ibi.addr[idx] = target->dynamic_addr; data->ibi.num_addr += 1U; npcx_i3c_ibi_rules_setup(data, inst); /* Enable target IBI event by ENEC command */ i3c_events.events = I3C_CCC_EVT_INTR; ret = i3c_ccc_do_events_set(target, true, &i3c_events); if (ret != 0) { LOG_ERR("Error sending IBI ENEC for 0x%02x (%d)", target->dynamic_addr, ret); } out_ibi_enable: if (data->ibi.num_addr > 0U) { /* * If there is more than 1 target in the list, * enable controller to raise interrupt when a target * initiates IBI. */ inst->MINTSET = BIT(NPCX_I3C_MINTSET_TGTSTART); } return ret; } static int npcx_i3c_ibi_disable(const struct device *dev, struct i3c_device_desc *target) { const struct npcx_i3c_config *config = dev->config; struct npcx_i3c_data *data = dev->data; struct i3c_reg *inst = config->base; struct i3c_ccc_events i3c_events; int ret; int idx; if (!i3c_device_is_ibi_capable(target)) { LOG_ERR("%s: device is not ibi capable", __func__); return -EINVAL; } for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) { if (target->dynamic_addr == data->ibi.addr[idx]) { break; } } if (idx == ARRAY_SIZE(data->ibi.addr)) { LOG_ERR("%s: target is not in list of registered addresses", __func__); return -ENODEV; } /* Disable controller interrupt while we configure IBI rules. */ inst->MINTCLR = BIT(NPCX_I3C_MINTCLR_TGTSTART); /* Clear the ibi rule data */ data->ibi.addr[idx] = 0U; data->ibi.num_addr -= 1U; /* Disable disable target IBI */ i3c_events.events = I3C_CCC_EVT_INTR; ret = i3c_ccc_do_events_set(target, false, &i3c_events); if (ret != 0) { LOG_ERR("Error sending IBI DISEC for 0x%02x (%d)", target->dynamic_addr, ret); } npcx_i3c_ibi_rules_setup(data, inst); if (data->ibi.num_addr > 0U) { /* * Enable controller to raise interrupt when a target * initiates IBI. */ inst->MINTSET = BIT(NPCX_I3C_MINTSET_TGTSTART); } return ret; } #endif /* CONFIG_I3C_USE_IBI */ static void npcx_i3c_isr(const struct device *dev) { const struct npcx_i3c_config *config = dev->config; struct i3c_reg *inst = config->base; #ifdef CONFIG_I3C_NPCX_DMA struct mdma_reg *mdma_inst = config->mdma_base; if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_COMPLETE)) { inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_COMPLETE); /* W1C */ /* MDMA write */ if (get_oper_state(dev) == NPCX_I3C_WR) { return i3c_ctrl_notify(dev); } } if (IS_BIT_SET(mdma_inst->MDMA_CTL0, NPCX_MDMA_CTL_TC)) { mdma_inst->MDMA_CTL0 &= ~BIT(NPCX_MDMA_CTL_TC); /* W0C */ /* MDMA read */ if (get_oper_state(dev) == NPCX_I3C_RD) { return i3c_ctrl_notify(dev); } } #endif /* CONFIG_I3C_NPCX_DMA */ #ifdef CONFIG_I3C_USE_IBI int ret; /* Target start detected */ if (IS_BIT_SET(inst->MSTATUS, NPCX_I3C_MSTATUS_TGTSTART)) { LOG_DBG("ISR TGTSTART !"); /* Disable further target initiated IBI interrupt */ inst->MINTCLR = BIT(NPCX_I3C_MINTCLR_TGTSTART); /* Clear TGTSTART interrupt */ inst->MSTATUS = BIT(NPCX_I3C_MSTATUS_TGTSTART); /* Handle IBI in workqueue */ ret = i3c_ibi_work_enqueue_cb(dev, npcx_i3c_ibi_work); if (ret < 0) { LOG_ERR("Enqueuing ibi work fail, ret %d", ret); inst->MINTSET = BIT(NPCX_I3C_MINTSET_TGTSTART); } } #endif /* CONFIG_I3C_USE_IBI */ } static int npcx_i3c_get_scl_config(struct npcx_i3c_timing_cfg *cfg, uint32_t i3c_src_clk, uint32_t pp_baudrate_hz, uint32_t od_baudrate_hz) { uint32_t i3c_div, freq; uint32_t ppbaud, odbaud; uint32_t pplow_ns, odlow_ns; if (cfg == NULL) { LOG_ERR("Freq config NULL"); return -EINVAL; } if ((pp_baudrate_hz == 0) || (pp_baudrate_hz > I3C_SCL_PP_FREQ_MAX_MHZ) || (od_baudrate_hz == 0) || (od_baudrate_hz > I3C_SCL_OD_FREQ_MAX_MHZ)) { LOG_ERR("I3C PP_SCL should within 12.5 Mhz, input: %d", pp_baudrate_hz); LOG_ERR("I3C OD_SCL should within 4.17 Mhz, input: %d", od_baudrate_hz); return -EINVAL; } /* Fixed PPLOW = 0 to achieve 50% duty cycle */ /* pp_freq = ((f_mclkd / 2) / (PPBAUD+1)) */ freq = i3c_src_clk / 2UL; i3c_div = freq / pp_baudrate_hz; i3c_div = (i3c_div == 0UL) ? 1UL : i3c_div; if (freq / i3c_div > pp_baudrate_hz) { i3c_div++; } if (i3c_div > PPBAUD_DIV_MAX) { LOG_ERR("PPBAUD out of range"); return -EINVAL; } ppbaud = i3c_div - 1UL; freq /= i3c_div; /* Check PP low period in spec (should be the same as PPHIGH) */ pplow_ns = (uint32_t)(NSEC_PER_SEC / (2UL * freq)); if (pplow_ns < I3C_BUS_TLOW_PP_MIN_NS) { LOG_ERR("PPLOW ns out of spec"); return -EINVAL; } /* Fixed odhpp = 1 configuration */ /* odFreq = (2*freq) / (ODBAUD + 2), 1 <= ODBAUD <= 255 */ i3c_div = (2UL * freq) / od_baudrate_hz; i3c_div = i3c_div < 2UL ? 2UL : i3c_div; if ((2UL * freq / i3c_div) > od_baudrate_hz) { i3c_div++; } odbaud = i3c_div - 2UL; freq = (2UL * freq) / i3c_div; /* For I2C usage in the future */ /* Check OD low period in spec */ odlow_ns = (odbaud + 1UL) * pplow_ns; if (odlow_ns < I3C_BUS_TLOW_OD_MIN_NS) { LOG_ERR("ODBAUD ns out of spec"); return -EINVAL; } cfg->pplow = 0; cfg->odhpp = 1; cfg->ppbaud = ppbaud; cfg->odbaud = odbaud; return 0; } static int npcx_i3c_freq_init(const struct device *dev) { const struct npcx_i3c_config *config = dev->config; struct npcx_i3c_data *data = dev->data; struct i3c_reg *inst = config->base; const struct device *const clk_dev = config->clock_dev; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; uint32_t scl_pp = ctrl_config->scl.i3c; uint32_t scl_od = config->clocks.i3c_od_scl_hz; struct npcx_i3c_timing_cfg timing_cfg; uint32_t mclkd; int ret; ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->clock_subsys, &mclkd); if (ret != 0x0) { LOG_ERR("Get I3C source clock fail %d", ret); return -EINVAL; } LOG_DBG("MCLKD: %d", mclkd); LOG_DBG("SCL_PP_FEQ MAX: %d", I3C_SCL_PP_FREQ_MAX_MHZ); LOG_DBG("SCL_OD_FEQ MAX: %d", I3C_SCL_OD_FREQ_MAX_MHZ); LOG_DBG("scl_pp: %d", scl_pp); LOG_DBG("scl_od: %d", scl_od); LOG_DBG("hdr: %d", ctrl_config->supported_hdr); /* MCLKD = MCLK / I3C_DIV(1 or 2) * MCLKD must between 40 mhz to 50 mhz. */ if (mclkd == MCLKD_FREQ_45_MHZ) { /* Set default I3C_SCL configuration */ timing_cfg = npcx_def_speed_cfg[NPCX_I3C_BUS_SPEED_45MHZ]; } else { LOG_ERR("Unsupported MCLKD freq for %s.", dev->name); return -EINVAL; } ret = npcx_i3c_get_scl_config(&timing_cfg, mclkd, scl_pp, scl_od); if (ret != 0x0) { LOG_ERR("Adjust I3C frequency fail"); return -EINVAL; } /* Apply SCL_PP and SCL_OD */ SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_PPBAUD, timing_cfg.ppbaud); SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_PPLOW, timing_cfg.pplow); SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_ODBAUD, timing_cfg.odbaud); if (timing_cfg.odhpp != 0) { inst->MCONFIG |= BIT(NPCX_I3C_MCONFIG_ODHPP); } else { inst->MCONFIG &= ~BIT(NPCX_I3C_MCONFIG_ODHPP); } LOG_DBG("ppbaud: %d", GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_PPBAUD)); LOG_DBG("odbaud: %d", GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_ODBAUD)); LOG_DBG("pplow: %d", GET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_PPLOW)); LOG_DBG("odhpp: %d", IS_BIT_SET(inst->MCONFIG, NPCX_I3C_MCONFIG_ODHPP)); return 0; } static int npcx_i3c_cntlr_init(const struct device *dev) { const struct npcx_i3c_config *config = dev->config; struct i3c_reg *inst = config->base; const struct device *const clk_dev = config->clock_dev; uint32_t apb4_rate; uint8_t bamatch; int ret; /* Reset I3C module */ reset_line_toggle_dt(&config->reset); /* Disable all interrupts */ npcx_i3c_interrupt_all_disable(inst); /* Initial baudrate. PPLOW=1, PPBAUD, ODHPP=1, ODBAUD */ if (npcx_i3c_freq_init(dev) != 0x0) { return -EINVAL; } /* Enable main controller mode */ SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_CTRENA, MCONFIG_CTRENA_ON); /* Enable external high-keeper */ SET_FIELD(inst->MCONFIG, NPCX_I3C_MCONFIG_HKEEP, MCONFIG_HKEEP_EXT_SDA_SCL); /* Enable open-drain stop */ inst->MCONFIG |= BIT(NPCX_I3C_MCONFIG_ODSTOP); /* Enable timeout */ inst->MCONFIG &= ~BIT(NPCX_I3C_MCONFIG_DISTO); /* Flush tx and tx FIFO buffer */ npcx_i3c_fifo_flush(inst); /* Set bus available match value in target register */ ret = clock_control_get_rate(clk_dev, (clock_control_subsys_t)&config->ref_clk_subsys, &apb4_rate); LOG_DBG("APB4_CLK: %d", apb4_rate); if (ret != 0x0) { LOG_ERR("Get APb4 source clock fail %d", ret); return -EINVAL; } bamatch = DIV_ROUND_UP(apb4_rate, MHZ(1)); bamatch = DIV_ROUND_UP(bamatch, BAMATCH_DIV); LOG_DBG("BAMATCH: %d", bamatch); SET_FIELD(inst->CONFIG, NPCX_I3C_CONFIG_BAMATCH, bamatch); return 0; } static int npcx_i3c_configure(const struct device *dev, enum i3c_config_type type, void *config) { struct npcx_i3c_data *dev_data = dev->data; struct i3c_config_controller *cntlr_cfg = config; if (type == I3C_CONFIG_CONTROLLER) { /* * Check for valid configuration parameters. * Currently, must be the primary controller. */ if ((cntlr_cfg->is_secondary) || (cntlr_cfg->scl.i3c == 0U)) { return -EINVAL; } /* Save requested config to dev */ (void)memcpy(&dev_data->common.ctrl_config, cntlr_cfg, sizeof(*cntlr_cfg)); /* Controller init */ return npcx_i3c_cntlr_init(dev); } LOG_ERR("Support controller mode only"); return -EINVAL; } static int npcx_i3c_config_get(const struct device *dev, enum i3c_config_type type, void *config) { struct npcx_i3c_data *data = dev->data; if ((type != I3C_CONFIG_CONTROLLER) || (config == NULL)) { return -EINVAL; } (void)memcpy(config, &data->common.ctrl_config, sizeof(data->common.ctrl_config)); return 0; } static int npcx_i3c_init(const struct device *dev) { const struct npcx_i3c_config *config = dev->config; struct npcx_i3c_data *data = dev->data; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; const struct device *const clk_dev = config->clock_dev; int ret; /* Check clock device ready */ if (!device_is_ready(clk_dev)) { LOG_ERR("%s Clk device not ready", clk_dev->name); return -ENODEV; } /* Set I3C_PD operational */ ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clock_subsys); if (ret < 0) { LOG_ERR("Turn on I3C clock fail %d", ret); return ret; } #ifdef CONFIG_I3C_NPCX_DMA ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->mdma_clk_subsys); if (ret < 0) { LOG_ERR("Turn on I3C MDMA clock fail %d", ret); return ret; } #endif /* Apply pin-muxing */ ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("Apply pinctrl fail %d", ret); return ret; } k_mutex_init(&data->lock_mutex); k_sem_init(&data->sync_sem, 0, 1); k_sem_init(&data->ibi_lock_sem, 1, 1); ret = i3c_addr_slots_init(dev); if (ret != 0) { LOG_ERR("Addr slots init fail %d", ret); return ret; } ctrl_config->is_secondary = false; /* Currently can only act as primary controller. */ ctrl_config->supported_hdr = I3C_MSG_HDR_DDR; /* HDR-DDR mode is supported. */ ctrl_config->scl.i3c = config->clocks.i3c_pp_scl_hz; /* Set I3C frequency */ ret = npcx_i3c_configure(dev, I3C_CONFIG_CONTROLLER, ctrl_config); if (ret != 0) { LOG_ERR("Apply i3c_configure() fail %d", ret); return ret; } /* Just in case the bus is not in idle. */ ret = npcx_i3c_recover_bus(dev); if (ret != 0) { LOG_ERR("Apply i3c_recover_bus() fail %d", ret); return ret; } /* Configure interrupt */ config->irq_config_func(dev); /* Initialize driver status machine */ set_oper_state(dev, NPCX_I3C_IDLE); /* Check I3C target device exist in device tree */ if (config->common.dev_list.num_i3c > 0) { /* Perform bus initialization */ ret = i3c_bus_init(dev, &config->common.dev_list); if (ret != 0) { LOG_ERR("Apply i3c_bus_init() fail %d", ret); return ret; } } return 0; } static const struct i3c_driver_api npcx_i3c_driver_api = { .configure = npcx_i3c_configure, .config_get = npcx_i3c_config_get, .recover_bus = npcx_i3c_recover_bus, .do_daa = npcx_i3c_do_daa, .do_ccc = npcx_i3c_do_ccc, .i3c_device_find = npcx_i3c_device_find, .i3c_xfers = npcx_i3c_transfer, #ifdef CONFIG_I3C_USE_IBI .ibi_enable = npcx_i3c_ibi_enable, .ibi_disable = npcx_i3c_ibi_disable, #endif }; #define I3C_NPCX_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static void npcx_i3c_config_func_##id(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(id), DT_INST_IRQ(id, priority), npcx_i3c_isr, \ DEVICE_DT_INST_GET(id), 0); \ irq_enable(DT_INST_IRQN(id)); \ }; \ static struct i3c_device_desc npcx_i3c_device_array_##id[] = I3C_DEVICE_ARRAY_DT_INST(id); \ static struct i3c_i2c_device_desc npcx_i3c_i2c_device_array_##id[] = \ I3C_I2C_DEVICE_ARRAY_DT_INST(id); \ static const struct npcx_i3c_config npcx_i3c_config_##id = { \ .base = (struct i3c_reg *)DT_INST_REG_ADDR(id), \ .clock_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE), \ .reset = RESET_DT_SPEC_INST_GET(id), \ .clock_subsys = NPCX_DT_CLK_CFG_ITEM_BY_NAME(id, mclkd), \ .ref_clk_subsys = NPCX_DT_CLK_CFG_ITEM_BY_NAME(id, apb4), \ .irq_config_func = npcx_i3c_config_func_##id, \ .common.dev_list.i3c = npcx_i3c_device_array_##id, \ .common.dev_list.num_i3c = ARRAY_SIZE(npcx_i3c_device_array_##id), \ .common.dev_list.i2c = npcx_i3c_i2c_device_array_##id, \ .common.dev_list.num_i2c = ARRAY_SIZE(npcx_i3c_i2c_device_array_##id), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ .clocks.i3c_pp_scl_hz = DT_INST_PROP_OR(id, i3c_scl_hz, 0), \ .clocks.i3c_od_scl_hz = DT_INST_PROP_OR(id, i3c_od_scl_hz, 0), \ IF_ENABLED(CONFIG_I3C_NPCX_DMA, ( \ .mdma_clk_subsys = NPCX_DT_CLK_CFG_ITEM_BY_IDX(id, 2), \ )) \ IF_ENABLED(CONFIG_I3C_NPCX_DMA, ( \ .mdma_base = (struct mdma_reg *)DT_INST_REG_ADDR_BY_IDX(id, 1), \ )) \ }; \ static struct npcx_i3c_data npcx_i3c_data_##id; \ DEVICE_DT_INST_DEFINE(id, npcx_i3c_init, NULL, &npcx_i3c_data_##id, &npcx_i3c_config_##id, \ POST_KERNEL, CONFIG_I3C_CONTROLLER_INIT_PRIORITY, \ &npcx_i3c_driver_api); DT_INST_FOREACH_STATUS_OKAY(I3C_NPCX_DEVICE) ```
/content/code_sandbox/drivers/i3c/i3c_npcx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
19,073
```c /* * */ #define DT_DRV_COMPAT nxp_mcux_i3c #include <string.h> #include <zephyr/device.h> #include <zephyr/irq.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/sys_io.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/i3c.h> #include <zephyr/drivers/pinctrl.h> /* * This is from NXP HAL which contains register bits macros * which are used in this driver. */ #include <fsl_i3c.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(i3c_mcux, CONFIG_I3C_MCUX_LOG_LEVEL); #define I3C_MCTRL_REQUEST_NONE I3C_MCTRL_REQUEST(0) #define I3C_MCTRL_REQUEST_EMIT_START_ADDR I3C_MCTRL_REQUEST(1) #define I3C_MCTRL_REQUEST_EMIT_STOP I3C_MCTRL_REQUEST(2) #define I3C_MCTRL_REQUEST_IBI_ACK_NACK I3C_MCTRL_REQUEST(3) #define I3C_MCTRL_REQUEST_PROCESS_DAA I3C_MCTRL_REQUEST(4) #define I3C_MCTRL_REQUEST_FORCE_EXIT I3C_MCTRL_REQUEST(6) #define I3C_MCTRL_REQUEST_AUTO_IBI I3C_MCTRL_REQUEST(7) #define I3C_MCTRL_IBIRESP_ACK I3C_MCTRL_IBIRESP(0) #define I3C_MCTRL_IBIRESP_ACK_AUTO I3C_MCTRL_IBIRESP(0) #define I3C_MCTRL_IBIRESP_NACK I3C_MCTRL_IBIRESP(1) #define I3C_MCTRL_IBIRESP_ACK_WITH_BYTE I3C_MCTRL_IBIRESP(2) #define I3C_MCTRL_IBIRESP_MANUAL I3C_MCTRL_IBIRESP(3) #define I3C_MCTRL_TYPE_I3C I3C_MCTRL_TYPE(0) #define I3C_MCTRL_TYPE_I2C I3C_MCTRL_TYPE(1) #define I3C_MCTRL_DIR_WRITE I3C_MCTRL_DIR(0) #define I3C_MCTRL_DIR_READ I3C_MCTRL_DIR(1) #define I3C_MSTATUS_STATE_IDLE I3C_MSTATUS_STATE(0) #define I3C_MSTATUS_STATE_SLVREQ I3C_MSTATUS_STATE(1) #define I3C_MSTATUS_STATE_MSGSDR I3C_MSTATUS_STATE(2) #define I3C_MSTATUS_STATE_NORMACT I3C_MSTATUS_STATE(3) #define I3C_MSTATUS_STATE_MSGDDR I3C_MSTATUS_STATE(4) #define I3C_MSTATUS_STATE_DAA I3C_MSTATUS_STATE(5) #define I3C_MSTATUS_STATE_IBIACK I3C_MSTATUS_STATE(6) #define I3C_MSTATUS_STATE_IBIRCV I3C_MSTATUS_STATE(7) #define I3C_MSTATUS_IBITYPE_NONE I3C_MSTATUS_IBITYPE(0) #define I3C_MSTATUS_IBITYPE_IBI I3C_MSTATUS_IBITYPE(1) #define I3C_MSTATUS_IBITYPE_MR I3C_MSTATUS_IBITYPE(2) #define I3C_MSTATUS_IBITYPE_HJ I3C_MSTATUS_IBITYPE(3) #define I3C_MAX_STOP_RETRIES 5 struct mcux_i3c_config { /** Common I3C Driver Config */ struct i3c_driver_config common; /** Pointer to controller registers. */ I3C_Type *base; /** Pointer to the clock device. */ const struct device *clock_dev; /** Clock control subsys related struct. */ clock_control_subsys_t clock_subsys; /** Pointer to pin control device. */ const struct pinctrl_dev_config *pincfg; /** Interrupt configuration function. */ void (*irq_config_func)(const struct device *dev); /** Disable open drain high push pull */ bool disable_open_drain_high_pp; }; struct mcux_i3c_data { /** Common I3C Driver Data */ struct i3c_driver_data common; /** Mutex to serialize access */ struct k_mutex lock; /** Condvar for waiting for bus to be in IDLE state */ struct k_condvar condvar; struct { /** * Clock divider for use when generating clock for * I3C Push-pull mode. */ uint8_t clk_div_pp; /** * Clock divider for use when generating clock for * I3C open drain mode. */ uint8_t clk_div_od; /** * Clock divider for the slow time control clock. */ uint8_t clk_div_tc; /** I3C open drain clock frequency in Hz. */ uint32_t i3c_od_scl_hz; } clocks; #ifdef CONFIG_I3C_USE_IBI struct { /** List of addresses used in the MIBIRULES register. */ uint8_t addr[5]; /** Number of valid addresses in MIBIRULES. */ uint8_t num_addr; /** True if all addresses have MSB set. */ bool msb; /** * True if all target devices require mandatory byte * for IBI. */ bool has_mandatory_byte; } ibi; #endif }; /** * @brief Read a register and test for bit matches with timeout. * * Please be aware that this uses @see k_busy_wait. * * @param reg Pointer to 32-bit Register. * @param mask Mask to the register value. * @param match Value to match for masked register value. * @param timeout_us Timeout in microsecond before bailing out. * * @retval 0 If masked register value matches before time out. * @retval -ETIMEDOUT Timedout without matching. */ static int reg32_poll_timeout(volatile uint32_t *reg, uint32_t mask, uint32_t match, uint32_t timeout_us) { /* * These polling checks are typically satisfied * quickly (some sub-microseconds) so no extra * delay between checks. */ if (!WAIT_FOR((sys_read32((mm_reg_t)reg) & mask) == match, timeout_us, /*nop*/)) { return -ETIMEDOUT; } return 0; } /** * @brief Update register value. * * @param reg Pointer to 32-bit Register. * @param mask Mask to the register value. * @param update Value to be updated in register. */ static inline void reg32_update(volatile uint32_t *reg, uint32_t mask, uint32_t update) { uint32_t val = sys_read32((mem_addr_t)reg); val &= ~mask; val |= (update & mask); sys_write32(val, (mem_addr_t)reg); } /** * @brief Test if masked register value has certain value. * * @param reg Pointer to 32-bit register. * @param mask Mask to test. * @param match Value to match. * * @return True if bits in @p mask mask matches @p match, false otherwise. */ static inline bool reg32_test_match(volatile uint32_t *reg, uint32_t mask, uint32_t match) { uint32_t val = sys_read32((mem_addr_t)reg); return (val & mask) == match; } /** * @brief Test if masked register value is the same as the mask. * * @param reg Pointer to 32-bit register. * @param mask Mask to test. * * @return True if bits in @p mask are all set, false otherwise. */ static inline bool reg32_test(volatile uint32_t *reg, uint32_t mask) { return reg32_test_match(reg, mask, mask); } /** * @breif Disable all interrupts. * * @param base Pointer to controller registers. * * @return Previous enabled interrupts. */ static uint32_t mcux_i3c_interrupt_disable(I3C_Type *base) { uint32_t intmask = base->MINTSET; base->MINTCLR = intmask; return intmask; } /** * @brief Enable interrupts according to mask. * * @param base Pointer to controller registers. * @param mask Interrupts to be enabled. * */ static void mcux_i3c_interrupt_enable(I3C_Type *base, uint32_t mask) { base->MINTSET = mask; } /** * @brief Check if there are any errors. * * This checks if MSTATUS has ERRWARN bit set. * * @retval True if there are any errors. * @retval False if no errors. */ static bool mcux_i3c_has_error(I3C_Type *base) { uint32_t mstatus, merrwarn; mstatus = base->MSTATUS; if ((mstatus & I3C_MSTATUS_ERRWARN_MASK) == I3C_MSTATUS_ERRWARN_MASK) { merrwarn = base->MERRWARN; /* * Note that this uses LOG_DBG() for displaying * register values for debugging. In production builds, * printing any error messages should be handled in * callers of this function. */ LOG_DBG("ERROR: MSTATUS 0x%08x MERRWARN 0x%08x", mstatus, merrwarn); return true; } return false; } /** * @brief Check if there are any errors, and if one of them is time out error. * * @retval True if controller times out on operation. * @retval False if no time out error. */ static inline bool mcux_i3c_error_is_timeout(I3C_Type *base) { if (mcux_i3c_has_error(base)) { if (reg32_test(&base->MERRWARN, I3C_MERRWARN_TIMEOUT_MASK)) { return true; } } return false; } /** * @brief Check if there are any errors, and if one of them is NACK. * * NACK is generated when: * 1. Target does not ACK the last used address. * 2. All targets do not ACK on 0x7E. * * @retval True if NACK is received. * @retval False if no NACK error. */ static inline bool mcux_i3c_error_is_nack(I3C_Type *base) { if (mcux_i3c_has_error(base)) { if (reg32_test(&base->MERRWARN, I3C_MERRWARN_NACK_MASK)) { return true; } } return false; } /** * @brief Test if certain bits are set in MSTATUS. * * @param base Pointer to controller registers. * @param mask Bits to be tested. * * @retval True if @p mask bits are set. * @retval False if @p mask bits are not set. */ static inline bool mcux_i3c_status_is_set(I3C_Type *base, uint32_t mask) { return reg32_test(&base->MSTATUS, mask); } /** * @brief Spin wait for MSTATUS bit to be set. * * This spins forever for the bits to be set. * * @param base Pointer to controller registers. * @param mask Bits to be tested. */ static inline void mcux_i3c_status_wait(I3C_Type *base, uint32_t mask) { /* Wait for bits to be set */ while (!mcux_i3c_status_is_set(base, mask)) { k_busy_wait(1); }; } /** * @brief Wait for MSTATUS bits to be set with time out. * * @param base Pointer to controller registers. * @param mask Bits to be tested. * @param timeout_us Timeout in microsecond before bailing out. * * @retval 0 If bits are set before time out. * @retval -ETIMEDOUT */ static inline int mcux_i3c_status_wait_timeout(I3C_Type *base, uint32_t mask, uint32_t timeout_us) { return reg32_poll_timeout(&base->MSTATUS, mask, mask, timeout_us); } /** * @brief Clear the MSTATUS bits and wait for them to be cleared. * * This spins forever for the bits to be cleared; * * @param base Pointer to controller registers. * @param mask Bits to be cleared. */ static inline void mcux_i3c_status_clear(I3C_Type *base, uint32_t mask) { /* Try to clear bit until it is cleared */ while (1) { base->MSTATUS = mask; if (!mcux_i3c_status_is_set(base, mask)) { break; } k_busy_wait(1); } } /** * @brief Clear transfer and IBI related bits in MSTATUS. * * This spins forever for those bits to be cleared; * * @see I3C_MSTATUS_MCTRLDONE_MASK * @see I3C_MSTATUS_COMPLETE_MASK * @see I3C_MSTATUS_IBIWON_MASK * @see I3C_MSTATUS_ERRWARN_MASK * * @param base Pointer to controller registers. */ static inline void mcux_i3c_status_clear_all(I3C_Type *base) { uint32_t mask = I3C_MSTATUS_MCTRLDONE_MASK | I3C_MSTATUS_COMPLETE_MASK | I3C_MSTATUS_IBIWON_MASK | I3C_MSTATUS_ERRWARN_MASK; mcux_i3c_status_clear(base, mask); } /** * @brief Clear the MSTATUS bits and wait for them to be cleared with time out. * * @param base Pointer to controller registers. * @param mask Bits to be cleared. * @param timeout_us Timeout in microsecond before bailing out. * * @retval 0 If bits are cleared before time out. * @retval -ETIMEDOUT */ static inline int mcux_i3c_status_clear_timeout(I3C_Type *base, uint32_t mask, uint32_t timeout_us) { bool result; base->MSTATUS = mask; /* * Status should clear quickly so no extra delays between * checks. Use the delay_stmt to retry clearing the * status by writing to the MSTATUS register. */ result = WAIT_FOR(!mcux_i3c_status_is_set(base, mask), timeout_us, base->MSTATUS = mask); if (!result) { return -ETIMEDOUT; } return 0; } /** * @brief Spin wait for MSTATUS bit to be set, and clear it afterwards. * * Note that this spins forever waiting for bits to be set, and * to be cleared. * * @see mcux_i3c_status_wait * @see mcux_i3c_status_clear * * @param base Pointer to controller registers. * @param mask Bits to be set and to be cleared; */ static inline void mcux_i3c_status_wait_clear(I3C_Type *base, uint32_t mask) { mcux_i3c_status_wait(base, mask); mcux_i3c_status_clear(base, mask); } /** * @brief Wait for MSTATUS bit to be set, and clear it afterwards, with time out. * * @see mcux_i3c_status_wait_timeout * @see mcux_i3c_status_clear_timeout * * @param base Pointer to controller registers. * @param mask Bits to be set and to be cleared. * @param timeout_us Timeout in microsecond before bailing out. * * @retval 0 If masked register value matches before time out. * @retval -ETIMEDOUT Timedout without matching. */ static inline int mcux_i3c_status_wait_clear_timeout(I3C_Type *base, uint32_t mask, uint32_t timeout_us) { int ret; ret = mcux_i3c_status_wait_timeout(base, mask, timeout_us); if (ret != 0) { goto out; } ret = mcux_i3c_status_clear_timeout(base, mask, timeout_us); out: return ret; } /** * @brief Clear the MERRWARN register. * * @param base Pointer to controller registers. */ static inline void mcux_i3c_errwarn_clear_all_nowait(I3C_Type *base) { base->MERRWARN = base->MERRWARN; } /** * @brief Tell controller to start DAA process. * * @param base Pointer to controller registers. */ static inline void mcux_i3c_request_daa(I3C_Type *base) { reg32_update(&base->MCTRL, I3C_MCTRL_REQUEST_MASK | I3C_MCTRL_IBIRESP_MASK | I3C_MCTRL_RDTERM_MASK, I3C_MCTRL_REQUEST_PROCESS_DAA | I3C_MCTRL_IBIRESP_NACK); } /** * @brief Tell controller to start auto IBI. * * @param base Pointer to controller registers. */ static inline void mcux_i3c_request_auto_ibi(I3C_Type *base) { reg32_update(&base->MCTRL, I3C_MCTRL_REQUEST_MASK | I3C_MCTRL_IBIRESP_MASK | I3C_MCTRL_RDTERM_MASK, I3C_MCTRL_REQUEST_AUTO_IBI | I3C_MCTRL_IBIRESP_ACK_AUTO); /* AUTO_IBI should result in IBIWON bit being set in status */ mcux_i3c_status_wait_clear(base, I3C_MSTATUS_IBIWON_MASK); } /** * @brief Get the controller state. * * @param base Pointer to controller registers. * * @retval I3C_MSTATUS_STATE_IDLE * @retval I3C_MSTATUS_STATE_SLVREQ * @retval I3C_MSTATUS_STATE_MSGSDR * @retval I3C_MSTATUS_STATE_NORMACT * @retval I3C_MSTATUS_STATE_MSGDDR * @retval I3C_MSTATUS_STATE_DAA * @retval I3C_MSTATUS_STATE_IBIACK * @retval I3C_MSTATUS_STATE_IBIRCV */ static inline uint32_t mcux_i3c_state_get(I3C_Type *base) { uint32_t mstatus = base->MSTATUS; uint32_t state; /* Make sure we are in a state where we can emit STOP */ state = (mstatus & I3C_MSTATUS_STATE_MASK) >> I3C_MSTATUS_STATE_SHIFT; return state; } /** * @brief Wait for MSTATUS state * * @param base Pointer to controller registers. * @param state MSTATUS state to wait for. * @param step_delay_us Delay in microsecond between each read of register * (cannot be 0). * @param total_delay_us Total delay in microsecond before bailing out. * * @retval 0 If masked register value matches before time out. * @retval -ETIMEDOUT Exhausted all delays without matching. */ static inline int mcux_i3c_state_wait_timeout(I3C_Type *base, uint32_t state, uint32_t step_delay_us, uint32_t total_delay_us) { uint32_t delayed = 0; int ret = -ETIMEDOUT; while (delayed <= total_delay_us) { if (mcux_i3c_state_get(base) == state) { ret = 0; break; } k_busy_wait(step_delay_us); delayed += step_delay_us; } return ret; } /** * @brief Wait for MSTATUS to be IDLE * * @param base Pointer to controller registers. */ static inline void mcux_i3c_wait_idle(struct mcux_i3c_data *dev_data, I3C_Type *base) { while (mcux_i3c_state_get(base) != I3C_MSTATUS_STATE_IDLE) { k_condvar_wait(&dev_data->condvar, &dev_data->lock, K_FOREVER); } } /** * @brief Tell controller to emit START. * * @param base Pointer to controller registers. * @param addr Target address. * @param is_i2c True if this is I2C transactions, false if I3C. * @param is_read True if this is a read transaction, false if write. * @param read_sz Number of bytes to read if @p is_read is true. * * @return 0 if successful, or negative if error. */ static int mcux_i3c_request_emit_start(I3C_Type *base, uint8_t addr, bool is_i2c, bool is_read, size_t read_sz) { uint32_t mctrl; int ret = 0; mctrl = is_i2c ? I3C_MCTRL_TYPE_I2C : I3C_MCTRL_TYPE_I3C; mctrl |= I3C_MCTRL_IBIRESP_NACK; if (is_read) { mctrl |= I3C_MCTRL_DIR_READ; /* How many bytes to read */ mctrl |= I3C_MCTRL_RDTERM(read_sz); } else { mctrl |= I3C_MCTRL_DIR_WRITE; } mctrl |= I3C_MCTRL_REQUEST_EMIT_START_ADDR | I3C_MCTRL_ADDR(addr); base->MCTRL = mctrl; /* Wait for controller to say the operation is done */ ret = mcux_i3c_status_wait_clear_timeout(base, I3C_MSTATUS_MCTRLDONE_MASK, 1000); if (ret == 0) { /* Check for NACK */ if (mcux_i3c_error_is_nack(base)) { ret = -ENODEV; } } return ret; } /** * @brief Tell controller to emit STOP. * * This emits STOP and waits for controller to get out of NORMACT, * checking for errors. * * @param base Pointer to controller registers. * @param wait_stop True if need to wait for controller to be * no longer in NORMACT. */ static inline int mcux_i3c_do_request_emit_stop(I3C_Type *base, bool wait_stop) { reg32_update(&base->MCTRL, I3C_MCTRL_REQUEST_MASK | I3C_MCTRL_DIR_MASK | I3C_MCTRL_RDTERM_MASK, I3C_MCTRL_REQUEST_EMIT_STOP); /* * EMIT_STOP request doesn't result in MCTRLDONE being cleared * so don't wait for it. */ if (wait_stop) { /* * Note that we don't exactly wait for I3C_MSTATUS_STATE_IDLE. * If there is an incoming IBI, it will get stuck forever * as state would be I3C_MSTATUS_STATE_SLVREQ. */ while (reg32_test_match(&base->MSTATUS, I3C_MSTATUS_STATE_MASK, I3C_MSTATUS_STATE_NORMACT)) { if (mcux_i3c_has_error(base)) { /* * A timeout error has been observed on * an EMIT_STOP request. Refman doesn't say * how that could occur but clear it * and return the error. */ if (reg32_test(&base->MERRWARN, I3C_MERRWARN_TIMEOUT_MASK)) { mcux_i3c_errwarn_clear_all_nowait(base); return -ETIMEDOUT; } return -EIO; } k_busy_wait(10); } } return 0; } /** * @brief Tell controller to emit STOP. * * This emits STOP when controller is in NORMACT state as this is * the only valid state where STOP can be emitted. This also waits * for the controller to get out of NORMACT before returning and * retries if any timeout errors occur during the emit STOP. * * @param dev_data Pointer to device driver data * @param base Pointer to controller registers. * @param wait_stop True if need to wait for controller to be * no longer in NORMACT. */ static inline void mcux_i3c_request_emit_stop(struct mcux_i3c_data *dev_data, I3C_Type *base, bool wait_stop) { size_t retries; /* * Stop is usually the last part of a transfer. * Sometimes, an error occurred before. We want to clear * it so any error as a result of emitting the stop * itself doesn't get incorrectly mixed together. */ if (mcux_i3c_has_error(base)) { mcux_i3c_errwarn_clear_all_nowait(base); } /* Make sure we are in a state where we can emit STOP */ if (!reg32_test_match(&base->MSTATUS, I3C_MSTATUS_STATE_MASK, I3C_MSTATUS_STATE_NORMACT)) { return; } retries = 0; while (1) { int err = mcux_i3c_do_request_emit_stop(base, wait_stop); if (err) { if ((err == -ETIMEDOUT) && (++retries <= I3C_MAX_STOP_RETRIES)) { LOG_WRN("Timeout on emit stop, retrying"); continue; } LOG_ERR("Error waiting for stop"); return; } /* * Success. If wait_stop was true, state should now * be IDLE or possibly SLVREQ. */ if (retries) { LOG_WRN("EMIT_STOP succeeded on %u retries", retries); } break; } /* Release any threads that might have been blocked waiting for IDLE */ k_condvar_broadcast(&dev_data->condvar); } /** * @brief Tell controller to NACK the incoming IBI. * * @param base Pointer to controller registers. */ static inline void mcux_i3c_ibi_respond_nack(I3C_Type *base) { reg32_update(&base->MCTRL, I3C_MCTRL_REQUEST_MASK | I3C_MCTRL_IBIRESP_MASK, I3C_MCTRL_REQUEST_IBI_ACK_NACK | I3C_MCTRL_IBIRESP_NACK); mcux_i3c_status_wait_clear(base, I3C_MSTATUS_MCTRLDONE_MASK); } /** * @brief Tell controller to ACK the incoming IBI. * * @param base Pointer to controller registers. */ static inline void mcux_i3c_ibi_respond_ack(I3C_Type *base) { reg32_update(&base->MCTRL, I3C_MCTRL_REQUEST_MASK | I3C_MCTRL_IBIRESP_MASK, I3C_MCTRL_REQUEST_IBI_ACK_NACK | I3C_MCTRL_IBIRESP_ACK); mcux_i3c_status_wait_clear(base, I3C_MSTATUS_MCTRLDONE_MASK); } /** * @brief Get the number of bytes in RX FIFO. * * This returns the number of bytes in RX FIFO which * can be read. * * @param base Pointer to controller registers. * * @return Number of bytes in RX FIFO. */ static inline int mcux_i3c_fifo_rx_count_get(I3C_Type *base) { uint32_t mdatactrl = base->MDATACTRL; return (int)((mdatactrl & I3C_MDATACTRL_RXCOUNT_MASK) >> I3C_MDATACTRL_RXCOUNT_SHIFT); } /** * @brief Tell controller to flush both TX and RX FIFOs. * * @param base Pointer to controller registers. */ static inline void mcux_i3c_fifo_flush(I3C_Type *base) { base->MDATACTRL = I3C_MDATACTRL_FLUSHFB_MASK | I3C_MDATACTRL_FLUSHTB_MASK; } /** * @brief Prepare the controller for transfers. * * This is simply a wrapper to clear out status bits, * and error bits. Also this tells the controller to * flush both TX and RX FIFOs. * * @param base Pointer to controller registers. */ static inline void mcux_i3c_xfer_reset(I3C_Type *base) { mcux_i3c_status_clear_all(base); mcux_i3c_errwarn_clear_all_nowait(base); mcux_i3c_fifo_flush(base); } /** * @brief Drain RX FIFO. * * @param dev Pointer to controller device driver instance. */ static void mcux_i3c_fifo_rx_drain(const struct device *dev) { const struct mcux_i3c_config *config = dev->config; I3C_Type *base = config->base; uint8_t buf; /* Read from FIFO as long as RXPEND is set. */ while (mcux_i3c_status_is_set(base, I3C_MSTATUS_RXPEND_MASK)) { buf = base->MRDATAB; } } /** * @brief Find a registered I3C target device. * * This returns the I3C device descriptor of the I3C device * matching the incoming @p id. * * @param dev Pointer to controller device driver instance. * @param id Pointer to I3C device ID. * * @return @see i3c_device_find. */ static struct i3c_device_desc *mcux_i3c_device_find(const struct device *dev, const struct i3c_device_id *id) { const struct mcux_i3c_config *config = dev->config; return i3c_dev_list_find(&config->common.dev_list, id); } /** * @brief Perform bus recovery. * * @param dev Pointer to controller device driver instance. */ static int mcux_i3c_recover_bus(const struct device *dev) { const struct mcux_i3c_config *config = dev->config; I3C_Type *base = config->base; int ret = 0; /* * If the controller is in NORMACT state, tells it to emit STOP * so it can return to IDLE, or is ready to clear any pending * target initiated IBIs. */ if (mcux_i3c_state_get(base) == I3C_MSTATUS_STATE_NORMACT) { mcux_i3c_request_emit_stop(dev->data, base, true); }; /* Exhaust all target initiated IBI */ while (mcux_i3c_status_is_set(base, I3C_MSTATUS_SLVSTART_MASK)) { /* Tell the controller to perform auto IBI. */ mcux_i3c_request_auto_ibi(base); if (mcux_i3c_status_wait_clear_timeout(base, I3C_MSTATUS_COMPLETE_MASK, 1000) == -ETIMEDOUT) { break; } /* Once auto IBI is done, discard bytes in FIFO. */ mcux_i3c_fifo_rx_drain(dev); /* * There might be other IBIs waiting. * So pause a bit to let other targets initiates * their IBIs. */ k_busy_wait(100); } if (reg32_poll_timeout(&base->MSTATUS, I3C_MSTATUS_STATE_MASK, I3C_MSTATUS_STATE_IDLE, 1000) == -ETIMEDOUT) { ret = -EBUSY; } return ret; } /** * @brief Perform one read transaction. * * This reads from RX FIFO until COMPLETE bit is set in MSTATUS * or time out. * * @param base Pointer to controller registers. * @param buf Buffer to store data. * @param buf_sz Buffer size in bytes. * * @return Number of bytes read, or negative if error. */ static int mcux_i3c_do_one_xfer_read(I3C_Type *base, uint8_t *buf, uint8_t buf_sz, bool ibi) { int ret = 0; int offset = 0; while (offset < buf_sz) { /* * Transfer data from FIFO into buffer. Read * in a tight loop to reduce chance of losing * FIFO data when the i3c speed is high. */ while (offset < buf_sz) { if (mcux_i3c_fifo_rx_count_get(base) == 0) { break; } buf[offset++] = (uint8_t)base->MRDATAB; } /* * If controller says timed out, we abort the transaction. */ if (mcux_i3c_has_error(base)) { if (mcux_i3c_error_is_timeout(base)) { ret = -ETIMEDOUT; } /* clear error */ base->MERRWARN = base->MERRWARN; /* for ibi, ignore timeout err if any bytes were * read, since the code doesn't know how many * bytes will be sent by device. for regular * application read request, return err always. */ if ((ret == -ETIMEDOUT) && ibi && offset) { break; } else { if (ret == -ETIMEDOUT) { LOG_ERR("Timeout error"); } goto one_xfer_read_out; } } } ret = offset; one_xfer_read_out: return ret; } /** * @brief Perform one write transaction. * * This writes all data in @p buf to TX FIFO or time out * waiting for FIFO spaces. * * @param base Pointer to controller registers. * @param buf Buffer containing data to be sent. * @param buf_sz Number of bytes in @p buf to send. * @param no_ending True if not to signal end of write message. * * @return Number of bytes written, or negative if error. */ static int mcux_i3c_do_one_xfer_write(I3C_Type *base, uint8_t *buf, uint8_t buf_sz, bool no_ending) { int offset = 0; int remaining = buf_sz; int ret = 0; while (remaining > 0) { ret = reg32_poll_timeout(&base->MDATACTRL, I3C_MDATACTRL_TXFULL_MASK, 0, 1000); if (ret == -ETIMEDOUT) { goto one_xfer_write_out; } if ((remaining > 1) || no_ending) { base->MWDATAB = (uint32_t)buf[offset]; } else { base->MWDATABE = (uint32_t)buf[offset]; } offset += 1; remaining -= 1; } ret = offset; one_xfer_write_out: return ret; } /** * @brief Perform one transfer transaction. * * @param base Pointer to controller registers. * @param data Pointer to controller device instance data. * @param addr Target address. * @param is_i2c True if this is I2C transactions, false if I3C. * @param buf Buffer for data to be sent or received. * @param buf_sz Buffer size in bytes. * @param is_read True if this is a read transaction, false if write. * @param emit_start True if START is needed before read/write. * @param emit_stop True if STOP is needed after read/write. * @param no_ending True if not to signal end of write message. * * @return Number of bytes read/written, or negative if error. */ static int mcux_i3c_do_one_xfer(I3C_Type *base, struct mcux_i3c_data *data, uint8_t addr, bool is_i2c, uint8_t *buf, size_t buf_sz, bool is_read, bool emit_start, bool emit_stop, bool no_ending) { int ret = 0; mcux_i3c_status_clear_all(base); mcux_i3c_errwarn_clear_all_nowait(base); /* Emit START if so desired */ if (emit_start) { ret = mcux_i3c_request_emit_start(base, addr, is_i2c, is_read, buf_sz); if (ret != 0) { emit_stop = true; goto out_one_xfer; } } if ((buf == NULL) || (buf_sz == 0)) { goto out_one_xfer; } if (is_read) { ret = mcux_i3c_do_one_xfer_read(base, buf, buf_sz, false); } else { ret = mcux_i3c_do_one_xfer_write(base, buf, buf_sz, no_ending); } if (ret < 0) { goto out_one_xfer; } if (is_read || !no_ending) { /* * Wait for controller to say the operation is done. * Save time by not clearing the bit. */ ret = mcux_i3c_status_wait_timeout(base, I3C_MSTATUS_COMPLETE_MASK, 1000); if (ret != 0) { LOG_DBG("%s: timed out addr 0x%02x, buf_sz %u", __func__, addr, buf_sz); emit_stop = true; goto out_one_xfer; } } if (mcux_i3c_has_error(base)) { ret = -EIO; } out_one_xfer: if (emit_stop) { mcux_i3c_request_emit_stop(data, base, true); } return ret; } /** * @brief Transfer messages in I3C mode. * * @see i3c_transfer * * @param dev Pointer to device driver instance. * @param target Pointer to target device descriptor. * @param msgs Pointer to I3C messages. * @param num_msgs Number of messages to transfers. * * @return @see i3c_transfer */ static int mcux_i3c_transfer(const struct device *dev, struct i3c_device_desc *target, struct i3c_msg *msgs, uint8_t num_msgs) { const struct mcux_i3c_config *config = dev->config; struct mcux_i3c_data *dev_data = dev->data; I3C_Type *base = config->base; int ret; bool send_broadcast = true; if (target->dynamic_addr == 0U) { ret = -EINVAL; goto out_xfer_i3c; } k_mutex_lock(&dev_data->lock, K_FOREVER); mcux_i3c_wait_idle(dev_data, base); mcux_i3c_xfer_reset(base); /* Iterate over all the messages */ for (int i = 0; i < num_msgs; i++) { bool is_read = (msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ; bool no_ending = false; /* * Emit start if this is the first message or that * the RESTART flag is set in message. */ bool emit_start = (i == 0) || ((msgs[i].flags & I3C_MSG_RESTART) == I3C_MSG_RESTART); bool emit_stop = (msgs[i].flags & I3C_MSG_STOP) == I3C_MSG_STOP; /* * The controller requires special treatment of last byte of * a write message. Since the API permits having a bunch of * write messages without RESTART in between, this is just some * logic to determine whether to treat the last byte of this * message to be the last byte of a series of write mssages. * If not, tell the write function not to treat it that way. */ if (!is_read && !emit_stop && ((i + 1) != num_msgs)) { bool next_is_write = (msgs[i + 1].flags & I3C_MSG_RW_MASK) == I3C_MSG_WRITE; bool next_is_restart = ((msgs[i + 1].flags & I3C_MSG_RESTART) == I3C_MSG_RESTART); if (next_is_write && !next_is_restart) { no_ending = true; } } /* * Send broadcast header on first transfer or after a STOP, * unless flag is set not to. */ if (!(msgs[i].flags & I3C_MSG_NBCH) && (send_broadcast)) { while (1) { ret = mcux_i3c_request_emit_start(base, I3C_BROADCAST_ADDR, false, false, 0); if (ret == -ENODEV) { LOG_WRN("emit start of broadcast addr got NACK, maybe IBI"); /* wait for idle then try again */ mcux_i3c_wait_idle(dev_data, base); continue; } if (ret < 0) { LOG_ERR("emit start of broadcast addr failed, error (%d)", ret); goto out_xfer_i3c_stop_unlock; } break; } send_broadcast = false; } ret = mcux_i3c_do_one_xfer(base, dev_data, target->dynamic_addr, false, msgs[i].buf, msgs[i].len, is_read, emit_start, emit_stop, no_ending); if (ret < 0) { goto out_xfer_i3c_stop_unlock; } /* write back the total number of bytes transferred */ msgs[i].num_xfer = ret; if (emit_stop) { /* After a STOP, send broadcast header before next msg */ send_broadcast = true; } } ret = 0; out_xfer_i3c_stop_unlock: mcux_i3c_request_emit_stop(dev_data, base, true); mcux_i3c_errwarn_clear_all_nowait(base); mcux_i3c_status_clear_all(base); k_mutex_unlock(&dev_data->lock); out_xfer_i3c: return ret; } /** * @brief Perform Dynamic Address Assignment. * * @see i3c_do_daa * * @param dev Pointer to controller device driver instance. * * @return @see i3c_do_daa */ static int mcux_i3c_do_daa(const struct device *dev) { const struct mcux_i3c_config *config = dev->config; struct mcux_i3c_data *data = dev->data; I3C_Type *base = config->base; int ret = 0; uint8_t rx_buf[8] = {0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU}; size_t rx_count; uint8_t rx_size = 0; uint32_t intmask; k_mutex_lock(&data->lock, K_FOREVER); ret = mcux_i3c_state_wait_timeout(base, I3C_MSTATUS_STATE_IDLE, 100, 100000); if (ret == -ETIMEDOUT) { goto out_daa_unlock; } LOG_DBG("DAA: ENTDAA"); /* Disable I3C IRQ sources while we configure stuff. */ intmask = mcux_i3c_interrupt_disable(base); mcux_i3c_xfer_reset(base); /* Emit process DAA */ mcux_i3c_request_daa(base); /* Loop until no more responses from devices */ do { /* Loop to grab data from devices (Provisioned ID, BCR and DCR) */ do { if (mcux_i3c_has_error(base)) { LOG_ERR("DAA recv error"); ret = -EIO; goto out_daa; } rx_count = mcux_i3c_fifo_rx_count_get(base); while (mcux_i3c_status_is_set(base, I3C_MSTATUS_RXPEND_MASK) && (rx_count != 0U)) { rx_buf[rx_size] = (uint8_t)(base->MRDATAB & I3C_MRDATAB_VALUE_MASK); rx_size++; rx_count--; } } while (!mcux_i3c_status_is_set(base, I3C_MSTATUS_MCTRLDONE_MASK)); mcux_i3c_status_clear(base, I3C_MSTATUS_MCTRLDONE_MASK); /* Figure out what address to assign to device */ if ((mcux_i3c_state_get(base) == I3C_MSTATUS_STATE_DAA) && (mcux_i3c_status_is_set(base, I3C_MSTATUS_BETWEEN_MASK))) { struct i3c_device_desc *target; uint16_t vendor_id; uint32_t part_no; uint64_t pid; uint8_t dyn_addr; rx_size = 0; /* Vendor ID portion of Provisioned ID */ vendor_id = (((uint16_t)rx_buf[0] << 8U) | (uint16_t)rx_buf[1]) & 0xFFFEU; /* Part Number portion of Provisioned ID */ part_no = (uint32_t)rx_buf[2] << 24U | (uint32_t)rx_buf[3] << 16U | (uint32_t)rx_buf[4] << 8U | (uint32_t)rx_buf[5]; /* ... and combine into one Provisioned ID */ pid = (uint64_t)vendor_id << 32U | (uint64_t)part_no; LOG_DBG("DAA: Rcvd PID 0x%04x%08x", vendor_id, part_no); ret = i3c_dev_list_daa_addr_helper(&data->common.attached_dev.addr_slots, &config->common.dev_list, pid, false, false, &target, &dyn_addr); if (ret != 0) { goto out_daa; } /* Update target descriptor */ target->dynamic_addr = dyn_addr; target->bcr = rx_buf[6]; target->dcr = rx_buf[7]; /* Mark the address as I3C device */ i3c_addr_slots_mark_i3c(&data->common.attached_dev.addr_slots, dyn_addr); /* * If the device has static address, after address assignment, * the device will not respond to the static address anymore. * So free the static one from address slots if different from * newly assigned one. */ if ((target->static_addr != 0U) && (dyn_addr != target->static_addr)) { i3c_addr_slots_mark_free(&data->common.attached_dev.addr_slots, dyn_addr); } /* Emit process DAA again to send the address to the device */ base->MWDATAB = dyn_addr; mcux_i3c_request_daa(base); LOG_DBG("PID 0x%04x%08x assigned dynamic address 0x%02x", vendor_id, part_no, dyn_addr); } } while (!mcux_i3c_status_is_set(base, I3C_MSTATUS_COMPLETE_MASK)); out_daa: /* Clear all flags. */ mcux_i3c_errwarn_clear_all_nowait(base); mcux_i3c_status_clear_all(base); /* Re-Enable I3C IRQ sources. */ mcux_i3c_interrupt_enable(base, intmask); out_daa_unlock: k_mutex_unlock(&data->lock); return ret; } /** * @brief Send Common Command Code (CCC). * * @see i3c_do_ccc * * @param dev Pointer to controller device driver instance. * @param payload Pointer to CCC payload. * * @return @see i3c_do_ccc */ static int mcux_i3c_do_ccc(const struct device *dev, struct i3c_ccc_payload *payload) { const struct mcux_i3c_config *config = dev->config; struct mcux_i3c_data *data = dev->data; I3C_Type *base = config->base; int ret = 0; if (payload == NULL) { return -EINVAL; } if (config->common.dev_list.num_i3c == 0) { /* * No i3c devices in dev tree. Just return so * we don't get errors doing cmds when there * are no devices listening/responding. */ return 0; } k_mutex_lock(&data->lock, K_FOREVER); mcux_i3c_xfer_reset(base); LOG_DBG("CCC[0x%02x]", payload->ccc.id); /* Emit START */ ret = mcux_i3c_request_emit_start(base, I3C_BROADCAST_ADDR, false, false, 0); if (ret < 0) { LOG_ERR("CCC[0x%02x] %s START error (%d)", payload->ccc.id, i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct", ret); goto out_ccc_stop; } /* Write the CCC code */ mcux_i3c_status_clear_all(base); mcux_i3c_errwarn_clear_all_nowait(base); ret = mcux_i3c_do_one_xfer_write(base, &payload->ccc.id, 1, payload->ccc.data_len > 0); if (ret < 0) { LOG_ERR("CCC[0x%02x] %s command error (%d)", payload->ccc.id, i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct", ret); goto out_ccc_stop; } /* Write additional data for CCC if needed */ if (payload->ccc.data_len > 0) { mcux_i3c_status_clear_all(base); mcux_i3c_errwarn_clear_all_nowait(base); ret = mcux_i3c_do_one_xfer_write(base, payload->ccc.data, payload->ccc.data_len, false); if (ret < 0) { LOG_ERR("CCC[0x%02x] %s command payload error (%d)", payload->ccc.id, i3c_ccc_is_payload_broadcast(payload) ? "broadcast" : "direct", ret); goto out_ccc_stop; } /* write back the total number of bytes transferred */ payload->ccc.num_xfer = ret; } /* Wait for controller to say the operation is done */ ret = mcux_i3c_status_wait_clear_timeout(base, I3C_MSTATUS_COMPLETE_MASK, 1000); if (ret != 0) { goto out_ccc_stop; } if (!i3c_ccc_is_payload_broadcast(payload)) { /* * If there are payload(s) for each target, * RESTART and then send payload for each target. */ for (int idx = 0; idx < payload->targets.num_targets; idx++) { struct i3c_ccc_target_payload *tgt_payload = &payload->targets.payloads[idx]; bool is_read = tgt_payload->rnw == 1U; bool emit_start = idx == 0; ret = mcux_i3c_do_one_xfer(base, data, tgt_payload->addr, false, tgt_payload->data, tgt_payload->data_len, is_read, emit_start, false, false); if (ret < 0) { LOG_ERR("CCC[0x%02x] target payload error (%d)", payload->ccc.id, ret); goto out_ccc_stop; } /* write back the total number of bytes transferred */ tgt_payload->num_xfer = ret; } } out_ccc_stop: mcux_i3c_request_emit_stop(data, base, true); if (ret > 0) { ret = 0; } k_mutex_unlock(&data->lock); return ret; } #ifdef CONFIG_I3C_USE_IBI /** * @brief Callback to service target initiated IBIs. * * @param work Pointer to k_work item. */ static void mcux_i3c_ibi_work(struct k_work *work) { uint8_t payload[CONFIG_I3C_IBI_MAX_PAYLOAD_SIZE]; size_t payload_sz = 0; struct i3c_ibi_work *i3c_ibi_work = CONTAINER_OF(work, struct i3c_ibi_work, work); const struct device *dev = i3c_ibi_work->controller; const struct mcux_i3c_config *config = dev->config; struct mcux_i3c_data *data = dev->data; struct i3c_dev_attached_list *dev_list = &data->common.attached_dev; I3C_Type *base = config->base; struct i3c_device_desc *target = NULL; uint32_t mstatus, ibitype, ibiaddr; int ret; k_mutex_lock(&data->lock, K_FOREVER); if (mcux_i3c_state_get(base) != I3C_MSTATUS_STATE_SLVREQ) { LOG_DBG("IBI work %p running not because of IBI", work); LOG_DBG("MSTATUS 0x%08x MERRWARN 0x%08x", base->MSTATUS, base->MERRWARN); mcux_i3c_request_emit_stop(data, base, true); goto out_ibi_work; }; /* Use auto IBI to service the IBI */ mcux_i3c_request_auto_ibi(base); mstatus = sys_read32((mem_addr_t)&base->MSTATUS); ibiaddr = (mstatus & I3C_MSTATUS_IBIADDR_MASK) >> I3C_MSTATUS_IBIADDR_SHIFT; /* * Note that the I3C_MSTATUS_IBI_TYPE_* are not shifted right. * So no need to shift here. */ ibitype = (mstatus & I3C_MSTATUS_IBITYPE_MASK); /* * Wait for COMPLETE bit to be set to indicate auto IBI * has finished for hot-join and controller role request. * For target interrupts, the IBI payload may be longer * than the RX FIFO so we won't get the COMPLETE bit set * at the first round of data read. So checking of * COMPLETE bit is deferred to the reading. */ switch (ibitype) { case I3C_MSTATUS_IBITYPE_HJ: __fallthrough; case I3C_MSTATUS_IBITYPE_MR: if (mcux_i3c_status_wait_timeout(base, I3C_MSTATUS_COMPLETE_MASK, 1000) == -ETIMEDOUT) { LOG_ERR("Timeout waiting for COMPLETE"); mcux_i3c_request_emit_stop(data, base, true); goto out_ibi_work; } break; default: break; }; switch (ibitype) { case I3C_MSTATUS_IBITYPE_IBI: target = i3c_dev_list_i3c_addr_find(dev_list, (uint8_t)ibiaddr); if (target != NULL) { ret = mcux_i3c_do_one_xfer_read(base, &payload[0], sizeof(payload), true); if (ret >= 0) { payload_sz = (size_t)ret; } else { LOG_ERR("Error reading IBI payload"); mcux_i3c_request_emit_stop(data, base, true); goto out_ibi_work; } } else { LOG_ERR("IBI from unknown device addr 0x%x", ibiaddr); /* NACK IBI coming from unknown device */ mcux_i3c_ibi_respond_nack(base); } break; case I3C_MSTATUS_IBITYPE_HJ: mcux_i3c_ibi_respond_ack(base); break; case I3C_MSTATUS_IBITYPE_MR: LOG_DBG("Controller role handoff not supported"); mcux_i3c_ibi_respond_nack(base); break; default: break; } if (mcux_i3c_has_error(base)) { /* * If the controller detects any errors, simply * emit a STOP to abort the IBI. The target will * raise IBI again if so desired. */ mcux_i3c_request_emit_stop(data, base, true); goto out_ibi_work; } switch (ibitype) { case I3C_MSTATUS_IBITYPE_IBI: if (target != NULL) { if (i3c_ibi_work_enqueue_target_irq(target, &payload[0], payload_sz) != 0) { LOG_ERR("Error enqueue IBI IRQ work"); } } /* Finishing the IBI transaction */ mcux_i3c_request_emit_stop(data, base, true); break; case I3C_MSTATUS_IBITYPE_HJ: if (i3c_ibi_work_enqueue_hotjoin(dev) != 0) { LOG_ERR("Error enqueue IBI HJ work"); } break; case I3C_MSTATUS_IBITYPE_MR: break; default: break; } out_ibi_work: k_mutex_unlock(&data->lock); /* Re-enable target initiated IBI interrupt. */ base->MINTSET = I3C_MINTSET_SLVSTART_MASK; } static void mcux_i3c_ibi_rules_setup(struct mcux_i3c_data *data, I3C_Type *base) { uint32_t ibi_rules; int idx; ibi_rules = 0; for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) { uint32_t addr_6bit; /* Extract the lower 6-bit of target address */ addr_6bit = (uint32_t)data->ibi.addr[idx] & I3C_MIBIRULES_ADDR0_MASK; /* Shift into correct place */ addr_6bit <<= idx * I3C_MIBIRULES_ADDR1_SHIFT; /* Put into the temporary IBI Rules register */ ibi_rules |= addr_6bit; } if (!data->ibi.msb) { /* The MSB0 field is 1 if MSB is 0 */ ibi_rules |= I3C_MIBIRULES_MSB0_MASK; } if (!data->ibi.has_mandatory_byte) { /* The NOBYTE field is 1 if there is no mandatory byte */ ibi_rules |= I3C_MIBIRULES_NOBYTE_MASK; } /* Update the register */ base->MIBIRULES = ibi_rules; LOG_DBG("MIBIRULES 0x%08x", ibi_rules); } int mcux_i3c_ibi_enable(const struct device *dev, struct i3c_device_desc *target) { const struct mcux_i3c_config *config = dev->config; struct mcux_i3c_data *data = dev->data; I3C_Type *base = config->base; struct i3c_ccc_events i3c_events; uint8_t idx; bool msb, has_mandatory_byte; int ret = 0; if (!i3c_device_is_ibi_capable(target)) { ret = -EINVAL; goto out1; } if (data->ibi.num_addr >= ARRAY_SIZE(data->ibi.addr)) { /* No more free entries in the IBI Rules table */ ret = -ENOMEM; goto out1; } /* Check for duplicate */ for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) { if (data->ibi.addr[idx] == target->dynamic_addr) { ret = -EINVAL; goto out1; } } /* Disable controller interrupt while we configure IBI rules. */ base->MINTCLR = I3C_MINTCLR_SLVSTART_MASK; LOG_DBG("IBI enabling for 0x%02x (BCR 0x%02x)", target->dynamic_addr, target->bcr); msb = (target->dynamic_addr & BIT(6)) == BIT(6); has_mandatory_byte = i3c_ibi_has_payload(target); /* * If there are already addresses in the table, we must * check if the incoming entry is compatible with * the existing ones. */ if (data->ibi.num_addr > 0) { /* * 1. All devices in the table must all use mandatory * bytes, or do not. * * 2. Each address in entry only captures the lowest 6-bit. * The MSB (7th bit) is captured separated in another bit * in the register. So all addresses must have the same MSB. */ if (has_mandatory_byte != data->ibi.has_mandatory_byte) { LOG_ERR("New IBI does not have same mandatory byte requirement" " as previous IBI"); ret = -EINVAL; goto out; } if (msb != data->ibi.msb) { LOG_ERR("New IBI does not have same msb as previous IBI"); ret = -EINVAL; goto out; } /* Find an empty address slot */ for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) { if (data->ibi.addr[idx] == 0U) { break; } } if (idx >= ARRAY_SIZE(data->ibi.addr)) { LOG_ERR("Cannot support more IBIs"); ret = -ENOTSUP; goto out; } } else { /* * If the incoming address is the first in the table, * it dictates future compatibilities. */ data->ibi.has_mandatory_byte = has_mandatory_byte; data->ibi.msb = msb; idx = 0; } data->ibi.addr[idx] = target->dynamic_addr; data->ibi.num_addr += 1U; mcux_i3c_ibi_rules_setup(data, base); /* Tell target to enable IBI */ i3c_events.events = I3C_CCC_EVT_INTR; ret = i3c_ccc_do_events_set(target, true, &i3c_events); if (ret != 0) { LOG_ERR("Error sending IBI ENEC for 0x%02x (%d)", target->dynamic_addr, ret); } out: if (data->ibi.num_addr > 0U) { /* * Enable controller to raise interrupt when a target * initiates IBI. */ base->MINTSET = I3C_MINTSET_SLVSTART_MASK; } out1: return ret; } int mcux_i3c_ibi_disable(const struct device *dev, struct i3c_device_desc *target) { const struct mcux_i3c_config *config = dev->config; struct mcux_i3c_data *data = dev->data; I3C_Type *base = config->base; struct i3c_ccc_events i3c_events; int ret = 0; int idx; if (!i3c_device_is_ibi_capable(target)) { ret = -EINVAL; goto out; } for (idx = 0; idx < ARRAY_SIZE(data->ibi.addr); idx++) { if (target->dynamic_addr == data->ibi.addr[idx]) { break; } } if (idx == ARRAY_SIZE(data->ibi.addr)) { /* Target is not in list of registered addresses. */ ret = -ENODEV; goto out; } /* Disable controller interrupt while we configure IBI rules. */ base->MINTCLR = I3C_MINTCLR_SLVSTART_MASK; data->ibi.addr[idx] = 0U; data->ibi.num_addr -= 1U; /* Tell target to disable IBI */ i3c_events.events = I3C_CCC_EVT_INTR; ret = i3c_ccc_do_events_set(target, false, &i3c_events); if (ret != 0) { LOG_ERR("Error sending IBI DISEC for 0x%02x (%d)", target->dynamic_addr, ret); } mcux_i3c_ibi_rules_setup(data, base); if (data->ibi.num_addr > 0U) { /* * Enable controller to raise interrupt when a target * initiates IBI. */ base->MINTSET = I3C_MINTSET_SLVSTART_MASK; } out: return ret; } #endif /* CONFIG_I3C_USE_IBI */ /** * @brief Interrupt Service Routine * * Currently only services interrupts when any target initiates IBIs. * * @param dev Pointer to controller device driver instance. */ static void mcux_i3c_isr(const struct device *dev) { #ifdef CONFIG_I3C_USE_IBI const struct mcux_i3c_config *config = dev->config; I3C_Type *base = config->base; /* Target initiated IBIs */ if (mcux_i3c_status_is_set(base, I3C_MSTATUS_SLVSTART_MASK)) { int err; /* Clear SLVSTART interrupt */ base->MSTATUS = I3C_MSTATUS_SLVSTART_MASK; /* * Disable further target initiated IBI interrupt * while we try to service the current one. */ base->MINTCLR = I3C_MINTCLR_SLVSTART_MASK; /* * Handle IBI in workqueue. */ err = i3c_ibi_work_enqueue_cb(dev, mcux_i3c_ibi_work); if (err) { LOG_ERR("Error enqueuing ibi work, err %d", err); base->MINTSET = I3C_MINTCLR_SLVSTART_MASK; } } #else ARG_UNUSED(dev); #endif } /** * @brief Configure I3C hardware. * * @param dev Pointer to controller device driver instance. * @param type Type of configuration parameters being passed * in @p config. * @param config Pointer to the configuration parameters. * * @retval 0 If successful. * @retval -EINVAL If invalid configure parameters. * @retval -EIO General Input/Output errors. * @retval -ENOSYS If not implemented. */ static int mcux_i3c_configure(const struct device *dev, enum i3c_config_type type, void *config) { const struct mcux_i3c_config *dev_cfg = dev->config; struct mcux_i3c_data *dev_data = dev->data; I3C_Type *base = dev_cfg->base; i3c_master_config_t master_config; struct i3c_config_controller *ctrl_cfg = config; uint32_t clock_freq; int ret = 0; if (type != I3C_CONFIG_CONTROLLER) { ret = -EINVAL; goto out_configure; } /* * Check for valid configuration parameters. * * Currently, must be the primary controller. */ if ((ctrl_cfg->is_secondary) || (ctrl_cfg->scl.i2c == 0U) || (ctrl_cfg->scl.i3c == 0U)) { ret = -EINVAL; goto out_configure; } /* Get the clock frequency */ if (clock_control_get_rate(dev_cfg->clock_dev, dev_cfg->clock_subsys, &clock_freq)) { ret = -EINVAL; goto out_configure; } /* * Save requested config so next config_get() call returns the * correct values. */ (void)memcpy(&dev_data->common.ctrl_config, ctrl_cfg, sizeof(*ctrl_cfg)); I3C_MasterGetDefaultConfig(&master_config); master_config.baudRate_Hz.i2cBaud = ctrl_cfg->scl.i2c; master_config.baudRate_Hz.i3cPushPullBaud = ctrl_cfg->scl.i3c; master_config.enableOpenDrainHigh = dev_cfg->disable_open_drain_high_pp ? false : true; if (dev_data->clocks.i3c_od_scl_hz) { master_config.baudRate_Hz.i3cOpenDrainBaud = dev_data->clocks.i3c_od_scl_hz; } /* Initialize hardware */ I3C_MasterInit(base, &master_config, clock_freq); out_configure: return ret; } /** * @brief Get configuration of the I3C hardware. * * This provides a way to get the current configuration of the I3C hardware. * * This can return cached config or probed hardware parameters, but it has to * be up to date with current configuration. * * @param[in] dev Pointer to controller device driver instance. * @param[in] type Type of configuration parameters being passed * in @p config. * @param[in,out] config Pointer to the configuration parameters. * * Note that if @p type is @c I3C_CONFIG_CUSTOM, @p config must contain * the ID of the parameter to be retrieved. * * @retval 0 If successful. * @retval -EIO General Input/Output errors. * @retval -ENOSYS If not implemented. */ static int mcux_i3c_config_get(const struct device *dev, enum i3c_config_type type, void *config) { struct mcux_i3c_data *data = dev->data; int ret = 0; if ((type != I3C_CONFIG_CONTROLLER) || (config == NULL)) { ret = -EINVAL; goto out_configure; } (void)memcpy(config, &data->common.ctrl_config, sizeof(data->common.ctrl_config)); out_configure: return ret; } /** * @brief Initialize the hardware. * * @param dev Pointer to controller device driver instance. */ static int mcux_i3c_init(const struct device *dev) { const struct mcux_i3c_config *config = dev->config; struct mcux_i3c_data *data = dev->data; I3C_Type *base = config->base; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; i3c_master_config_t ctrl_config_hal; int ret = 0; ret = i3c_addr_slots_init(dev); if (ret != 0) { goto err_out; } CLOCK_SetClkDiv(kCLOCK_DivI3cClk, data->clocks.clk_div_pp); CLOCK_SetClkDiv(kCLOCK_DivI3cSlowClk, data->clocks.clk_div_od); CLOCK_SetClkDiv(kCLOCK_DivI3cTcClk, data->clocks.clk_div_tc); ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { goto err_out; } k_mutex_init(&data->lock); k_condvar_init(&data->condvar); I3C_MasterGetDefaultConfig(&ctrl_config_hal); /* Set default SCL clock rate (in Hz) */ if (ctrl_config->scl.i2c == 0U) { ctrl_config->scl.i2c = ctrl_config_hal.baudRate_Hz.i2cBaud; } if (ctrl_config->scl.i3c == 0U) { ctrl_config->scl.i3c = ctrl_config_hal.baudRate_Hz.i3cPushPullBaud; } /* Currently can only act as primary controller. */ ctrl_config->is_secondary = false; /* HDR mode not supported at the moment. */ ctrl_config->supported_hdr = 0U; ret = mcux_i3c_configure(dev, I3C_CONFIG_CONTROLLER, ctrl_config); if (ret != 0) { ret = -EINVAL; goto err_out; } /* Disable all interrupts */ base->MINTCLR = I3C_MINTCLR_SLVSTART_MASK | I3C_MINTCLR_MCTRLDONE_MASK | I3C_MINTCLR_COMPLETE_MASK | I3C_MINTCLR_RXPEND_MASK | I3C_MINTCLR_TXNOTFULL_MASK | I3C_MINTCLR_IBIWON_MASK | I3C_MINTCLR_ERRWARN_MASK | I3C_MINTCLR_NOWMASTER_MASK; /* Just in case the bus is not in idle. */ ret = mcux_i3c_recover_bus(dev); if (ret != 0) { ret = -EIO; goto err_out; } /* Configure interrupt */ config->irq_config_func(dev); /* Perform bus initialization */ ret = i3c_bus_init(dev, &config->common.dev_list); err_out: return ret; } static int mcux_i3c_i2c_api_configure(const struct device *dev, uint32_t dev_config) { return -ENOSYS; } static int mcux_i3c_i2c_api_transfer(const struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { const struct mcux_i3c_config *config = dev->config; struct mcux_i3c_data *dev_data = dev->data; I3C_Type *base = config->base; int ret; k_mutex_lock(&dev_data->lock, K_FOREVER); mcux_i3c_wait_idle(dev_data, base); mcux_i3c_xfer_reset(base); /* Iterate over all the messages */ for (int i = 0; i < num_msgs; i++) { bool is_read = (msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ; bool no_ending = false; /* * Emit start if this is the first message or that * the RESTART flag is set in message. */ bool emit_start = (i == 0) || ((msgs[i].flags & I2C_MSG_RESTART) == I2C_MSG_RESTART); bool emit_stop = (msgs[i].flags & I2C_MSG_STOP) == I2C_MSG_STOP; /* * The controller requires special treatment of last byte of * a write message. Since the API permits having a bunch of * write messages without RESTART in between, this is just some * logic to determine whether to treat the last byte of this * message to be the last byte of a series of write mssages. * If not, tell the write function not to treat it that way. */ if (!is_read && !emit_stop && ((i + 1) != num_msgs)) { bool next_is_write = (msgs[i + 1].flags & I2C_MSG_RW_MASK) == I2C_MSG_WRITE; bool next_is_restart = ((msgs[i + 1].flags & I2C_MSG_RESTART) == I2C_MSG_RESTART); if (next_is_write && !next_is_restart) { no_ending = true; } } ret = mcux_i3c_do_one_xfer(base, dev_data, addr, true, msgs[i].buf, msgs[i].len, is_read, emit_start, emit_stop, no_ending); if (ret < 0) { goto out_xfer_i2c_stop_unlock; } } ret = 0; out_xfer_i2c_stop_unlock: mcux_i3c_request_emit_stop(dev_data, base, true); mcux_i3c_errwarn_clear_all_nowait(base); mcux_i3c_status_clear_all(base); k_mutex_unlock(&dev_data->lock); return ret; } static const struct i3c_driver_api mcux_i3c_driver_api = { .i2c_api.configure = mcux_i3c_i2c_api_configure, .i2c_api.transfer = mcux_i3c_i2c_api_transfer, .i2c_api.recover_bus = mcux_i3c_recover_bus, .configure = mcux_i3c_configure, .config_get = mcux_i3c_config_get, .recover_bus = mcux_i3c_recover_bus, .do_daa = mcux_i3c_do_daa, .do_ccc = mcux_i3c_do_ccc, .i3c_device_find = mcux_i3c_device_find, .i3c_xfers = mcux_i3c_transfer, #ifdef CONFIG_I3C_USE_IBI .ibi_enable = mcux_i3c_ibi_enable, .ibi_disable = mcux_i3c_ibi_disable, #endif }; #define I3C_MCUX_DEVICE(id) \ PINCTRL_DT_INST_DEFINE(id); \ static void mcux_i3c_config_func_##id(const struct device *dev); \ static struct i3c_device_desc mcux_i3c_device_array_##id[] = \ I3C_DEVICE_ARRAY_DT_INST(id); \ static struct i3c_i2c_device_desc mcux_i3c_i2c_device_array_##id[] = \ I3C_I2C_DEVICE_ARRAY_DT_INST(id); \ static const struct mcux_i3c_config mcux_i3c_config_##id = { \ .base = (I3C_Type *) DT_INST_REG_ADDR(id), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(id)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(id, name), \ .irq_config_func = mcux_i3c_config_func_##id, \ .common.dev_list.i3c = mcux_i3c_device_array_##id, \ .common.dev_list.num_i3c = ARRAY_SIZE(mcux_i3c_device_array_##id), \ .common.dev_list.i2c = mcux_i3c_i2c_device_array_##id, \ .common.dev_list.num_i2c = ARRAY_SIZE(mcux_i3c_i2c_device_array_##id), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(id), \ .disable_open_drain_high_pp = \ DT_INST_PROP(id, disable_open_drain_high_pp), \ }; \ static struct mcux_i3c_data mcux_i3c_data_##id = { \ .clocks.i3c_od_scl_hz = DT_INST_PROP_OR(id, i3c_od_scl_hz, 0), \ .common.ctrl_config.scl.i3c = DT_INST_PROP_OR(id, i3c_scl_hz, 0), \ .common.ctrl_config.scl.i2c = DT_INST_PROP_OR(id, i2c_scl_hz, 0), \ .clocks.clk_div_pp = DT_INST_PROP(id, clk_divider), \ .clocks.clk_div_od = DT_INST_PROP(id, clk_divider_slow), \ .clocks.clk_div_tc = DT_INST_PROP(id, clk_divider_tc), \ }; \ DEVICE_DT_INST_DEFINE(id, \ mcux_i3c_init, \ NULL, \ &mcux_i3c_data_##id, \ &mcux_i3c_config_##id, \ POST_KERNEL, \ CONFIG_I3C_CONTROLLER_INIT_PRIORITY, \ &mcux_i3c_driver_api); \ static void mcux_i3c_config_func_##id(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(id), \ DT_INST_IRQ(id, priority), \ mcux_i3c_isr, \ DEVICE_DT_INST_GET(id), \ 0); \ irq_enable(DT_INST_IRQN(id)); \ }; \ DT_INST_FOREACH_STATUS_OKAY(I3C_MCUX_DEVICE) ```
/content/code_sandbox/drivers/i3c/i3c_mcux.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
17,273
```c /* * an affiliate of Cypress Semiconductor Corporation * * */ /** * @brief SDIO driver for Infineon CAT1 MCU family. * * This driver support only SDIO protocol of the SD interface for general * I/O functions. * * Refer to the SD Specifications Part 1 SDIO Specifications Version 4.10 for more * information on the SDIO protocol and specifications. * * Features * - Supports 4-bit interface * - Supports Ultra High Speed (UHS-I) mode * - Supports Default Speed (DS), High Speed (HS), SDR12, SDR25 and SDR50 speed modes * - Supports SDIO card interrupts in both 1-bit SD and 4-bit SD modes * - Supports Standard capacity (SDSC), High capacity (SDHC) and * Extended capacity (SDXC) memory * * Note (limitations): * - current version of ifx_cat1_sdio supports only following set of commands: * > GO_IDLE_STATE (CMD0) * > SEND_RELATIVE_ADDR (CMD3) * > IO_SEND_OP_COND (CMD5) * > SELECT_CARD (CMD7) * > VOLTAGE_SWITCH (CMD11) * > GO_INACTIVE_STATE (CMD15) * > IO_RW_DIRECT (CMD52) * > IO_RW_EXTENDED (CMD53) */ #define DT_DRV_COMPAT infineon_cat1_sdhc_sdio #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/sdhc.h> #include <zephyr/sd/sd_spec.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> #include <soc.h> #include <zephyr/drivers/pinctrl.h> #include <cyhal_sdhc.h> #include <cyhal_sdio.h> #include <cyhal_gpio.h> LOG_MODULE_REGISTER(ifx_cat1_sdio, CONFIG_SDHC_LOG_LEVEL); #include <zephyr/irq.h> #define IFX_CAT1_SDIO_F_MIN (SDMMC_CLOCK_400KHZ) #define IFX_CAT1_SDIO_F_MAX (SD_CLOCK_50MHZ) struct ifx_cat1_sdio_config { const struct pinctrl_dev_config *pincfg; SDHC_Type *reg_addr; uint8_t irq_priority; }; struct ifx_cat1_sdio_data { cyhal_sdio_t sdio_obj; cyhal_resource_inst_t hw_resource; cyhal_sdio_configurator_t cyhal_sdio_config; enum sdhc_clock_speed clock_speed; enum sdhc_bus_width bus_width; void *sdio_cb_user_data; sdhc_interrupt_cb_t sdio_cb; }; static uint32_t sdio_rca; static const cy_stc_sd_host_init_config_t host_config = {false, CY_SD_HOST_DMA_ADMA2, false}; static cy_en_sd_host_card_capacity_t sd_host_card_capacity = CY_SD_HOST_SDSC; static cy_en_sd_host_card_type_t sd_host_card_type = CY_SD_HOST_NOT_EMMC; static cy_stc_sd_host_sd_card_config_t sd_host_sd_card_config = { .lowVoltageSignaling = false, .busWidth = CY_SD_HOST_BUS_WIDTH_4_BIT, .cardType = &sd_host_card_type, .rca = &sdio_rca, .cardCapacity = &sd_host_card_capacity, }; /* List of available SDHC instances */ static SDHC_Type *const IFX_CAT1_SDHC_BASE_ADDRESSES[CY_IP_MXSDHC_INSTANCES] = { #ifdef SDHC0 SDHC0, #endif /* ifdef SDHC0 */ #ifdef SDHC1 SDHC1, #endif /* ifdef SDHC1 */ }; static int32_t _get_hw_block_num(SDHC_Type *reg_addr) { uint32_t i; for (i = 0u; i < CY_IP_MXSDHC_INSTANCES; i++) { if (IFX_CAT1_SDHC_BASE_ADDRESSES[i] == reg_addr) { return i; } } return -EINVAL; } static int ifx_cat1_sdio_reset(const struct device *dev) { struct ifx_cat1_sdio_data *dev_data = dev->data; cyhal_sdhc_software_reset((cyhal_sdhc_t *)&dev_data->sdio_obj); return 0; } static int ifx_cat1_sdio_set_io(const struct device *dev, struct sdhc_io *ios) { cy_rslt_t ret; struct ifx_cat1_sdio_data *dev_data = dev->data; cyhal_sdio_cfg_t config = {.frequencyhal_hz = ios->clock}; /* NOTE: Set bus width, set card power, set host signal voltage, * set I/O timing does not support in current version of driver */ /* Set host clock */ if ((dev_data->clock_speed != ios->clock) && (ios->clock != 0)) { if ((ios->clock > IFX_CAT1_SDIO_F_MAX) || (ios->clock < IFX_CAT1_SDIO_F_MIN)) { return -EINVAL; } ret = cyhal_sdio_configure(&dev_data->sdio_obj, &config); if (ret != CY_RSLT_SUCCESS) { return -ENOTSUP; } dev_data->clock_speed = ios->clock; } return 0; } static int ifx_cat1_sdio_card_busy(const struct device *dev) { struct ifx_cat1_sdio_data *dev_data = dev->data; return cyhal_sdio_is_busy(&dev_data->sdio_obj) ? 1 : 0; } static int ifx_cat1_sdio_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { struct ifx_cat1_sdio_data *dev_data = dev->data; int ret; switch (cmd->opcode) { case CYHAL_SDIO_CMD_GO_IDLE_STATE: case CYHAL_SDIO_CMD_SEND_RELATIVE_ADDR: case CYHAL_SDIO_CMD_IO_SEND_OP_COND: case CYHAL_SDIO_CMD_SELECT_CARD: case CYHAL_SDIO_CMD_VOLTAGE_SWITCH: case CYHAL_SDIO_CMD_GO_INACTIVE_STATE: case CYHAL_SDIO_CMD_IO_RW_DIRECT: ret = cyhal_sdio_send_cmd(&dev_data->sdio_obj, CYHAL_SDIO_XFER_TYPE_READ, cmd->opcode, cmd->arg, cmd->response); if (ret != CY_RSLT_SUCCESS) { LOG_ERR("cyhal_sdio_send_cmd failed ret = %d \r\n", ret); } break; case CYHAL_SDIO_CMD_IO_RW_EXTENDED: cyhal_sdio_transfer_type_t direction; direction = (cmd->arg & BIT(SDIO_CMD_ARG_RW_SHIFT)) ? CYHAL_SDIO_XFER_TYPE_WRITE : CYHAL_SDIO_XFER_TYPE_READ; ret = cyhal_sdio_bulk_transfer(&dev_data->sdio_obj, direction, cmd->arg, data->data, data->blocks * data->block_size, cmd->response); if (ret != CY_RSLT_SUCCESS) { LOG_ERR("cyhal_sdio_bulk_transfer failed ret = %d \r\n", ret); } break; default: ret = -ENOTSUP; } return ret; } static int ifx_cat1_sdio_get_card_present(const struct device *dev) { return 1; } static int ifx_cat1_sdio_get_host_props(const struct device *dev, struct sdhc_host_props *props) { memset(props, 0, sizeof(*props)); props->f_max = IFX_CAT1_SDIO_F_MAX; props->f_min = IFX_CAT1_SDIO_F_MIN; props->host_caps.bus_4_bit_support = true; props->host_caps.high_spd_support = true; props->host_caps.sdr50_support = true; props->host_caps.sdio_async_interrupt_support = true; props->host_caps.vol_330_support = true; return 0; } static int ifx_cat1_sdio_enable_interrupt(const struct device *dev, sdhc_interrupt_cb_t callback, int sources, void *user_data) { struct ifx_cat1_sdio_data *data = dev->data; const struct ifx_cat1_sdio_config *cfg = dev->config; if (sources != SDHC_INT_SDIO) { return -ENOTSUP; } if (callback == NULL) { return -EINVAL; } /* Record SDIO callback parameters */ data->sdio_cb = callback; data->sdio_cb_user_data = user_data; /* Enable CARD INTERRUPT event */ cyhal_sdio_enable_event(&data->sdio_obj, CYHAL_SDIO_CARD_INTERRUPT, cfg->irq_priority, true); return 0; } static int ifx_cat1_sdio_disable_interrupt(const struct device *dev, int sources) { struct ifx_cat1_sdio_data *data = dev->data; const struct ifx_cat1_sdio_config *cfg = dev->config; if (sources != SDHC_INT_SDIO) { return -ENOTSUP; } data->sdio_cb = NULL; data->sdio_cb_user_data = NULL; /* Disable CARD INTERRUPT event */ cyhal_sdio_enable_event(&data->sdio_obj, CYHAL_SDIO_CARD_INTERRUPT, cfg->irq_priority, false); return 0; } static void ifx_cat1_sdio_event_callback(void *callback_arg, cyhal_sdio_event_t event) { const struct device *dev = callback_arg; struct ifx_cat1_sdio_data *data = dev->data; if ((event == CYHAL_SDIO_CARD_INTERRUPT) && (data->sdio_cb != NULL)) { data->sdio_cb(dev, SDHC_INT_SDIO, data->sdio_cb_user_data); } } static int ifx_cat1_sdio_init(const struct device *dev) { cy_rslt_t ret; struct ifx_cat1_sdio_data *data = dev->data; const struct ifx_cat1_sdio_config *config = dev->config; /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } /* Dedicate SDIO HW resource */ data->hw_resource.type = CYHAL_RSC_SDIODEV; data->hw_resource.block_num = _get_hw_block_num(config->reg_addr); /* Initialize the SDIO peripheral */ data->cyhal_sdio_config.resource = &data->hw_resource; data->cyhal_sdio_config.host_config = &host_config, data->cyhal_sdio_config.card_config = &sd_host_sd_card_config, ret = cyhal_sdio_init_cfg(&data->sdio_obj, &data->cyhal_sdio_config); if (ret != CY_RSLT_SUCCESS) { LOG_ERR("cyhal_sdio_init_cfg failed ret = %d \r\n", ret); return ret; } /* Register callback for SDIO events */ cyhal_sdio_register_callback(&data->sdio_obj, ifx_cat1_sdio_event_callback, (void *)dev); return 0; } static const struct sdhc_driver_api ifx_cat1_sdio_api = { .reset = ifx_cat1_sdio_reset, .request = ifx_cat1_sdio_request, .set_io = ifx_cat1_sdio_set_io, .get_card_present = ifx_cat1_sdio_get_card_present, .card_busy = ifx_cat1_sdio_card_busy, .get_host_props = ifx_cat1_sdio_get_host_props, .enable_interrupt = ifx_cat1_sdio_enable_interrupt, .disable_interrupt = ifx_cat1_sdio_disable_interrupt, }; #define IFX_CAT1_SDHC_INIT(n) \ \ PINCTRL_DT_INST_DEFINE(n); \ \ static const struct ifx_cat1_sdio_config ifx_cat1_sdio_##n##_config = { \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .reg_addr = (SDHC_Type *)DT_INST_REG_ADDR(n), \ .irq_priority = DT_INST_IRQ(n, priority)}; \ \ static struct ifx_cat1_sdio_data ifx_cat1_sdio_##n##_data; \ \ DEVICE_DT_INST_DEFINE(n, &ifx_cat1_sdio_init, NULL, &ifx_cat1_sdio_##n##_data, \ &ifx_cat1_sdio_##n##_config, POST_KERNEL, CONFIG_SDHC_INIT_PRIORITY, \ &ifx_cat1_sdio_api); DT_INST_FOREACH_STATUS_OKAY(IFX_CAT1_SDHC_INIT) ```
/content/code_sandbox/drivers/sdhc/ifx_cat1_sdio.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,757
```c /* * */ #define DT_DRV_COMPAT cdns_sdhc #include <zephyr/drivers/sdhc.h> #include <zephyr/logging/log.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/reset.h> #include "sdhc_cdns_ll.h" #define SDHC_CDNS_DESC_SIZE (1<<20) #define COMBOPHY_ADDR_MASK 0x0000FFFFU #define DEV_CFG(_dev) ((const struct sdhc_cdns_config *)(_dev)->config) #define DEV_DATA(_dev) ((struct sdhc_cdns_data *const)(_dev)->data) LOG_MODULE_REGISTER(sdhc_cdns, CONFIG_SDHC_LOG_LEVEL); /* SDMMC operations FPs are the element of structure*/ static const struct sdhc_cdns_ops *cdns_sdmmc_ops; struct sdhc_cdns_config { DEVICE_MMIO_NAMED_ROM(reg_base); DEVICE_MMIO_NAMED_ROM(combo_phy); /* Clock rate for host */ uint32_t clk_rate; /* power delay prop for host */ uint32_t power_delay_ms; /* run time device structure */ const struct device *cdns_clk_dev; /* type to identify a clock controller sub-system */ clock_control_subsys_t clkid; /* Reset controller device configuration. */ const struct reset_dt_spec reset_sdmmc; const struct reset_dt_spec reset_sdmmcocp; const struct reset_dt_spec reset_softphy; }; struct sdhc_cdns_data { DEVICE_MMIO_NAMED_RAM(reg_base); DEVICE_MMIO_NAMED_RAM(combo_phy); /* Host controller parameters */ struct sdhc_cdns_params params; /* sdmmc device informartaion for host */ struct sdmmc_device_info info; /* Input/Output configuration */ struct sdhc_io host_io; }; static int sdhc_cdns_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { int ret = 0; struct sdmmc_cmd cdns_sdmmc_cmd; if (cmd == NULL) { LOG_ERR("Wrong CMD parameter"); return -EINVAL; } /* Initialization of command structure */ cdns_sdmmc_cmd.cmd_idx = cmd->opcode; cdns_sdmmc_cmd.cmd_arg = cmd->arg; cdns_sdmmc_cmd.resp_type = (cmd->response_type & SDHC_NATIVE_RESPONSE_MASK); /* Sending command as per the data or non data */ if (data) { ret = cdns_sdmmc_ops->prepare(data->block_addr, (uintptr_t)data->data, data); if (ret != 0) { LOG_ERR("DMA Prepare failed"); return -EINVAL; } } ret = cdns_sdmmc_ops->send_cmd(&cdns_sdmmc_cmd, data); if (ret == 0) { if (cmd->opcode == SD_READ_SINGLE_BLOCK || cmd->opcode == SD_APP_SEND_SCR || cmd->opcode == SD_READ_MULTIPLE_BLOCK) { if (data == NULL) { LOG_ERR("Invalid data parameter"); return -ENODATA; } ret = cdns_sdmmc_ops->cache_invd(data->block_addr, (uintptr_t)data->data, data->block_size); if (ret != 0) { return ret; } } } /* copying all responses as per response type */ for (int i = 0; i < 4; i++) { cmd->response[i] = cdns_sdmmc_cmd.resp_data[i]; } return ret; } static int sdhc_cdns_get_card_present(const struct device *dev) { return cdns_sdmmc_ops->card_present(); } static int sdhc_cdns_card_busy(const struct device *dev) { return cdns_sdmmc_ops->busy(); } static int sdhc_cdns_get_host_props(const struct device *dev, struct sdhc_host_props *props) { const struct sdhc_cdns_config *sdhc_config = DEV_CFG(dev); memset(props, 0, sizeof(struct sdhc_host_props)); props->f_min = SDMMC_CLOCK_400KHZ; /* * default max speed is 25MHZ, as per SCR register * it will switch accordingly */ props->f_max = SD_CLOCK_25MHZ; props->power_delay = sdhc_config->power_delay_ms; props->host_caps.vol_330_support = true; props->is_spi = false; return 0; } static int sdhc_cdns_reset(const struct device *dev) { return cdns_sdmmc_ops->reset(); } static int sdhc_cdns_init(const struct device *dev) { struct sdhc_cdns_data *const data = DEV_DATA(dev); const struct sdhc_cdns_config *sdhc_config = DEV_CFG(dev); int ret; /* SDHC reg base */ DEVICE_MMIO_NAMED_MAP(dev, reg_base, K_MEM_CACHE_NONE); /* ComboPhy reg base */ DEVICE_MMIO_NAMED_MAP(dev, combo_phy, K_MEM_CACHE_NONE); /* clock setting */ if (sdhc_config->clk_rate == 0U) { if (!device_is_ready(sdhc_config->cdns_clk_dev)) { LOG_ERR("Clock controller device is not ready"); return -EINVAL; } ret = clock_control_get_rate(sdhc_config->cdns_clk_dev, sdhc_config->clkid, &data->params.clk_rate); if (ret != 0) { return ret; } } else { data->params.clk_rate = sdhc_config->clk_rate; } /* Setting regbase */ data->params.reg_base = DEVICE_MMIO_NAMED_GET(dev, reg_base); data->params.reg_phy = DEVICE_MMIO_NAMED_GET(dev, combo_phy); data->params.combophy = (DEVICE_MMIO_NAMED_ROM_PTR((dev), combo_phy)->phys_addr); data->params.combophy = (data->params.combophy & COMBOPHY_ADDR_MASK); /* resetting the lines */ if (sdhc_config->reset_sdmmc.dev != NULL) { if (!device_is_ready(sdhc_config->reset_sdmmc.dev) || !device_is_ready(sdhc_config->reset_sdmmcocp.dev) || !device_is_ready(sdhc_config->reset_softphy.dev)) { LOG_ERR("Reset device not found"); return -ENODEV; } ret = reset_line_toggle(sdhc_config->reset_softphy.dev, sdhc_config->reset_softphy.id); if (ret != 0) { LOG_ERR("Softphy Reset failed"); return ret; } ret = reset_line_toggle(sdhc_config->reset_sdmmc.dev, sdhc_config->reset_sdmmc.id); if (ret != 0) { LOG_ERR("sdmmc Reset failed"); return ret; } ret = reset_line_toggle(sdhc_config->reset_sdmmcocp.dev, sdhc_config->reset_sdmmcocp.id); if (ret != 0) { LOG_ERR("sdmmcocp Reset failed"); return ret; } } /* Init function to call lower layer file */ sdhc_cdns_sdmmc_init(&data->params, &data->info, &cdns_sdmmc_ops); ret = sdhc_cdns_reset(dev); if (ret != 0U) { LOG_ERR("Card reset failed"); return ret; } /* Init operation called for register initialisation */ ret = cdns_sdmmc_ops->init(); if (ret != 0U) { LOG_ERR("Card initialization failed"); return ret; } return 0; } static int sdhc_cdns_set_io(const struct device *dev, struct sdhc_io *ios) { struct sdhc_cdns_data *data = dev->data; struct sdhc_io *host_io = &data->host_io; if (host_io->bus_width != ios->bus_width || host_io->clock != ios->clock) { host_io->bus_width = ios->bus_width; host_io->clock = ios->clock; return cdns_sdmmc_ops->set_ios(ios->clock, ios->bus_width); } return 0; } static const struct sdhc_driver_api sdhc_cdns_api = { .request = sdhc_cdns_request, .set_io = sdhc_cdns_set_io, .get_host_props = sdhc_cdns_get_host_props, .get_card_present = sdhc_cdns_get_card_present, .reset = sdhc_cdns_reset, .card_busy = sdhc_cdns_card_busy, }; #define SDHC_CDNS_CLOCK_RATE_INIT(inst) \ COND_CODE_1(DT_INST_NODE_HAS_PROP(inst, clock_frequency), \ ( \ .clk_rate = DT_INST_PROP(inst, clock_frequency), \ .cdns_clk_dev = NULL, \ .clkid = (clock_control_subsys_t)0, \ ), \ ( \ .clk_rate = 0, \ .cdns_clk_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(inst)), \ .clkid = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(inst, clkid), \ ) \ ) #define SDHC_CDNS_RESET_SPEC_INIT(inst) \ .reset_sdmmc = RESET_DT_SPEC_INST_GET_BY_IDX(inst, 0), \ .reset_sdmmcocp = RESET_DT_SPEC_INST_GET_BY_IDX(inst, 1),\ .reset_softphy = RESET_DT_SPEC_INST_GET_BY_IDX(inst, 2), #define SDHC_CDNS_INIT(inst) \ static struct sdhc_cdns_desc cdns_desc \ [CONFIG_CDNS_DESC_COUNT]; \ \ static const struct sdhc_cdns_config sdhc_cdns_config_##inst = {\ DEVICE_MMIO_NAMED_ROM_INIT_BY_NAME( \ reg_base, DT_DRV_INST(inst)), \ DEVICE_MMIO_NAMED_ROM_INIT_BY_NAME( \ combo_phy, DT_DRV_INST(inst)), \ SDHC_CDNS_CLOCK_RATE_INIT(inst) \ IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, resets), \ (SDHC_CDNS_RESET_SPEC_INIT(inst))) \ .power_delay_ms = DT_INST_PROP(inst, power_delay_ms), \ }; \ static struct sdhc_cdns_data sdhc_cdns_data_##inst = { \ .params = { \ .bus_width = SDHC_BUS_WIDTH1BIT, \ .desc_base = (uintptr_t) &cdns_desc, \ .desc_size = SDHC_CDNS_DESC_SIZE, \ .flags = 0, \ }, \ .info = { \ .cdn_sdmmc_dev_type = SD_DS, \ .ocr_voltage = OCR_3_3_3_4 | OCR_3_2_3_3, \ }, \ }; \ DEVICE_DT_INST_DEFINE(inst, \ &sdhc_cdns_init, \ NULL, \ &sdhc_cdns_data_##inst, \ &sdhc_cdns_config_##inst, \ POST_KERNEL, \ CONFIG_SDHC_INIT_PRIORITY, \ &sdhc_cdns_api); DT_INST_FOREACH_STATUS_OKAY(SDHC_CDNS_INIT) ```
/content/code_sandbox/drivers/sdhc/sdhc_cdns.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,487
```objective-c /* * */ #ifndef __RCAR_MMC_REGISTERS_H__ #define __RCAR_MMC_REGISTERS_H__ #include <zephyr/sys/util_macro.h> /* for BIT macro */ /* * The command type register is used to select the command type * and response type */ #define RCAR_MMC_CMD 0x000 /* command */ #define RCAR_MMC_CMD_NOSTOP BIT(14) /* No automatic CMD12 issue */ #define RCAR_MMC_CMD_MULTI BIT(13) /* multiple block transfer */ #define RCAR_MMC_CMD_RD BIT(12) /* 1: read, 0: write */ #define RCAR_MMC_CMD_DATA BIT(11) /* data transfer */ #define RCAR_MMC_CMD_APP BIT(6) /* ACMD preceded by CMD55 */ #define RCAR_MMC_CMD_NORMAL (0 << 8) /* auto-detect of resp-type */ #define RCAR_MMC_CMD_RSP_NONE (3 << 8) /* response: none */ #define RCAR_MMC_CMD_RSP_R1 (4 << 8) /* response: R1, R5, R6, R7 */ #define RCAR_MMC_CMD_RSP_R1B (5 << 8) /* response: R1b, R5b */ #define RCAR_MMC_CMD_RSP_R2 (6 << 8) /* response: R2 */ #define RCAR_MMC_CMD_RSP_R3 (7 << 8) /* response: R3, R4 */ /* Command arguments register for SD card */ #define RCAR_MMC_ARG 0x010 /* command argument */ /* * The data stop register is used to enable or disable block counting at * multiple block transfer, and to control the issuing of CMD12 within * command sequences. */ #define RCAR_MMC_STOP 0x020 /* stop action control */ #define RCAR_MMC_STOP_SEC BIT(8) /* use sector count */ #define RCAR_MMC_STOP_STP BIT(0) /* issue CMD12 */ /* * The block count register is used to specify the number of * transfer blocks at multiple block transfer. */ #define RCAR_MMC_SECCNT 0x028 /* sector counter */ /* The SD card response registers hold the response from the SD card */ #define RCAR_MMC_RSP10 0x030 /* response[39:8] */ #define RCAR_MMC_RSP32 0x040 /* response[71:40] */ #define RCAR_MMC_RSP54 0x050 /* response[103:72] */ #define RCAR_MMC_RSP76 0x060 /* response[127:104] */ /* * The SD card interrupt flag register 1 indicates the response end and access * end in the command sequence. This register also indicates the card * detect/write protect state. */ #define RCAR_MMC_INFO1 0x070 /* IRQ status 1 */ #define RCAR_MMC_INFO1_CD BIT(5) /* state of card detect */ #define RCAR_MMC_INFO1_INSERT BIT(4) /* card inserted */ #define RCAR_MMC_INFO1_REMOVE BIT(3) /* card removed */ #define RCAR_MMC_INFO1_CMP BIT(2) /* data complete */ #define RCAR_MMC_INFO1_RSP BIT(0) /* response complete */ /* * The SD card interrupt flag register 2 indicates the access status of the * SD buffer and SD card. */ #define RCAR_MMC_INFO2 0x078 /* IRQ status 2 */ #define RCAR_MMC_INFO2_ERR_ILA BIT(15) /* illegal access err */ #define RCAR_MMC_INFO2_CBSY BIT(14) /* command busy */ #define RCAR_MMC_INFO2_SCLKDIVEN BIT(13) /* command setting reg ena */ #define RCAR_MMC_INFO2_CLEAR BIT(11) /* the write value should always be 1 */ #define RCAR_MMC_INFO2_BWE BIT(9) /* write buffer ready */ #define RCAR_MMC_INFO2_BRE BIT(8) /* read buffer ready */ #define RCAR_MMC_INFO2_DAT0 BIT(7) /* SDDAT0 */ #define RCAR_MMC_INFO2_ERR_RTO BIT(6) /* response time out */ #define RCAR_MMC_INFO2_ERR_ILR BIT(5) /* illegal read err */ #define RCAR_MMC_INFO2_ERR_ILW BIT(4) /* illegal write err */ #define RCAR_MMC_INFO2_ERR_TO BIT(3) /* time out error */ #define RCAR_MMC_INFO2_ERR_END BIT(2) /* END bit error */ #define RCAR_MMC_INFO2_ERR_CRC BIT(1) /* CRC error */ #define RCAR_MMC_INFO2_ERR_IDX BIT(0) /* cmd index error */ #define RCAR_MMC_INFO2_ERRORS \ (RCAR_MMC_INFO2_ERR_RTO | RCAR_MMC_INFO2_ERR_ILR | \ RCAR_MMC_INFO2_ERR_ILW | RCAR_MMC_INFO2_ERR_TO | \ RCAR_MMC_INFO2_ERR_END | RCAR_MMC_INFO2_ERR_CRC | \ RCAR_MMC_INFO2_ERR_IDX | RCAR_MMC_INFO2_ERR_ILA) /* * The interrupt mask 1 register is used to enable or disable * the RCAR_MMC_INFO1 interrupt. */ #define RCAR_MMC_INFO1_MASK 0x080 /* * The interrupt mask 2 register is used to enable or disable * the RCAR_MMC_INFO2 interrupt. */ #define RCAR_MMC_INFO2_MASK 0x088 /* * The SD clock control register is used to control * the SD clock output and to set the frequency. */ #define RCAR_MMC_CLKCTL 0x090 #define RCAR_MMC_CLKCTL_DIV_MASK 0x104ff #define RCAR_MMC_CLKCTL_DIV512 BIT(7) /* SDCLK = CLK / 512 */ #define RCAR_MMC_CLKCTL_DIV256 BIT(6) /* SDCLK = CLK / 256 */ #define RCAR_MMC_CLKCTL_DIV128 BIT(5) /* SDCLK = CLK / 128 */ #define RCAR_MMC_CLKCTL_DIV64 BIT(4) /* SDCLK = CLK / 64 */ #define RCAR_MMC_CLKCTL_DIV32 BIT(3) /* SDCLK = CLK / 32 */ #define RCAR_MMC_CLKCTL_DIV16 BIT(2) /* SDCLK = CLK / 16 */ #define RCAR_MMC_CLKCTL_DIV8 BIT(1) /* SDCLK = CLK / 8 */ #define RCAR_MMC_CLKCTL_DIV4 BIT(0) /* SDCLK = CLK / 4 */ #define RCAR_MMC_CLKCTL_DIV2 0 /* SDCLK = CLK / 2 */ #define RCAR_MMC_CLKCTL_RCAR_DIV1 0xff /* SDCLK = CLK (RCar ver.) */ #define RCAR_MMC_CLKCTL_OFFEN BIT(9) /* stop SDCLK when unused */ #define RCAR_MMC_CLKCTL_SCLKEN BIT(8) /* SDCLK output enable */ /* * The transfer data length register is used to specify * the transfer data size. */ #define RCAR_MMC_SIZE 0x098 /* * The SD card access control option register is used to set * the bus width and timeout counter. */ #define RCAR_MMC_OPTION 0x0A0 #define RCAR_MMC_OPTION_WIDTH_MASK (5 << 13) #define RCAR_MMC_OPTION_WIDTH_1 (4 << 13) #define RCAR_MMC_OPTION_WIDTH_4 (0 << 13) #define RCAR_MMC_OPTION_WIDTH_8 (1 << 13) /* * The SD error status register 1 indicates the CRC status, CRC error, * End error, and CMD error. */ #define RCAR_MMC_ERR_STS1 0x0B0 /* The SD error status register 2 indicates the timeout state. */ #define RCAR_MMC_ERR_STS2 0x0B8 /* SD Buffer Read/Write Register */ #define RCAR_MMC_BUF0 0x0C0 /* The DMA mode enable register enables the DMA transfer. */ #define RCAR_MMC_EXTMODE 0x360 #define RCAR_MMC_EXTMODE_DMA_EN BIT(1) /* transfer 1: DMA, 0: pio */ /* The software reset register sets a software reset. */ #define RCAR_MMC_SOFT_RST 0x380 #define RCAR_MMC_SOFT_RST_RSTX BIT(0) /* reset deassert */ /* The version register indicates the version of the SD host interface. */ #define RCAR_MMC_VERSION 0x388 #define RCAR_MMC_VERSION_IP 0xff /* IP version */ /* * The host interface mode setting register selects the width for access to * the data bus. */ #define RCAR_MMC_HOST_MODE 0x390 /* The SD interface mode setting register specifies HS400 mode. */ #define RCAR_MMC_IF_MODE 0x398 #define RCAR_MMC_IF_MODE_DDR BIT(0) /* DDR mode */ /* Set of DMAC registers */ #define RCAR_MMC_DMA_MODE 0x820 #define RCAR_MMC_DMA_MODE_DIR_RD BIT(16) /* 1: from device, 0: to dev */ #define RCAR_MMC_DMA_MODE_WIDTH (BIT(4) | BIT(5)) #define RCAR_MMC_DMA_MODE_ADDR_INC BIT(0) /* 1: address inc, 0: fixed */ #define RCAR_MMC_DMA_CTL 0x828 #define RCAR_MMC_DMA_CTL_START BIT(0) /* start DMA (auto cleared) */ #define RCAR_MMC_DMA_RST 0x830 #define RCAR_MMC_DMA_RST_DTRAN0 BIT(8) #define RCAR_MMC_DMA_RST_DTRAN1 BIT(9) #define RCAR_MMC_DMA_INFO1 0x840 #define RCAR_MMC_DMA_INFO1_END_RD2 BIT(20) /* DMA from device is complete (uniphier) */ #define RCAR_MMC_DMA_INFO1_END_RD BIT(17) /* DMA from device is complete (renesas) */ #define RCAR_MMC_DMA_INFO1_END_WR BIT(16) /* DMA to device is complete */ #define RCAR_MMC_DMA_INFO1_MASK 0x848 #define RCAR_MMC_DMA_INFO2 0x850 #define RCAR_MMC_DMA_INFO2_ERR_RD BIT(17) #define RCAR_MMC_DMA_INFO2_ERR_WR BIT(16) #define RCAR_MMC_DMA_INFO2_MASK 0x858 #define RCAR_MMC_DMA_ADDR_L 0x880 #define RCAR_MMC_DMA_ADDR_H 0x888 /* set of SCC registers */ /* Initial setting register */ #define RENESAS_SDHI_SCC_DTCNTL 0x1000 #define RENESAS_SDHI_SCC_DTCNTL_TAPEN BIT(0) /* Sampling clock position setting register */ #define RENESAS_SDHI_SCC_TAPSET 0x1008 #define RENESAS_SDHI_SCC_DT2FF 0x1010 /* Sampling Clock Selection Register */ #define RENESAS_SDHI_SCC_CKSEL 0x1018 #define RENESAS_SDHI_SCC_CKSEL_DTSEL BIT(0) /* Sampling Clock Position Correction Register */ #define RENESAS_SDHI_SCC_RVSCNTL 0x1020 #define RENESAS_SDHI_SCC_RVSCNTL_RVSEN BIT(0) /* Sampling Clock Position Correction Request Register */ #define RENESAS_SDHI_SCC_RVSREQ 0x1028 #define RENESAS_SDHI_SCC_RVSREQ_REQTAPDOWN BIT(0) #define RENESAS_SDHI_SCC_RVSREQ_REQTAPUP BIT(1) #define RENESAS_SDHI_SCC_RVSREQ_REQTAP_MASK \ (RENESAS_SDHI_SCC_RVSREQ_REQTAPDOWN | RENESAS_SDHI_SCC_RVSREQ_REQTAPUP) #define RENESAS_SDHI_SCC_RVSREQ_ERR BIT(2) /* Sampling data comparison register */ #define RENESAS_SDHI_SCC_SMPCMP 0x1030 /* Hardware Adjustment Register 2, used for configuration HS400 mode */ #define RENESAS_SDHI_SCC_TMPPORT2 0x1038 #define RENESAS_SDHI_SCC_TMPPORT2_HS400EN BIT(31) #define RENESAS_SDHI_SCC_TMPPORT2_HS400OSEL BIT(4) #endif /* __RCAR_MMC_REGISTERS_H__ */ ```
/content/code_sandbox/drivers/sdhc/rcar_mmc_registers.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,837
```c /* */ #include <stddef.h> #include "sdhc_cdns_ll.h" #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(sdhc_cdns_ll, CONFIG_SDHC_LOG_LEVEL); /* card busy and present */ #define CARD_BUSY 1 #define CARD_NOT_BUSY 0 #define CARD_PRESENT 1 /* SRS12 error mask */ #define CDNS_SRS12_ERR_MASK 0xFFFF8000U #define CDNS_CSD_BYTE_MASK 0x000000FFU /* General define */ #define SDHC_REG_MASK 0xFFFFFFFFU #define SD_HOST_BLOCK_SIZE 0x200 #define SDMMC_DMA_MAX_BUFFER_SIZE (64 * 1024) #define CDNSMMC_ADDRESS_MASK (CONFIG_SDHC_BUFFER_ALIGNMENT - 1) #define SRS10_VAL_READ (ADMA2_32 | HS_EN | DT_WIDTH) #define SRS10_VAL_SW (ADMA2_32 | DT_WIDTH) #define SRS11_VAL_GEN (READ_CLK | CDNS_SRS11_ICE | CDNS_SRS11_ICS | CDNS_SRS11_SDCE) #define SRS11_VAL_CID (CDNS_SRS11_ICE | CDNS_SRS11_ICS | CDNS_SRS11_SDCE) #define SRS15_VAL_GEN (CDNS_SRS15_BIT_AD_64 | CDNS_SRS15_HV4E | CDNS_SRS15_V18SE) #define SRS15_VAL_RD_WR (SRS15_VAL_GEN | CDNS_SRS15_SDR104 | CDNS_SRS15_PVE) #define SRS15_VAL_CID (CDNS_SRS15_HV4E | CDNS_SRS15_V18SE) #define CARD_REG_TIME_DELAY_US 100000 #define WAIT_ICS_TIME_DELAY_US 5000 #define RESET_SRS14 0x00000000 static struct sdhc_cdns_params cdns_params; static struct sdhc_cdns_combo_phy sdhc_cdns_combo_phy_reg_info; static struct sdhc_cdns_sdmmc sdhc_cdns_sdmmc_reg_info; /* Function to write general phy registers */ static int sdhc_cdns_write_phy_reg(uint32_t phy_reg_addr, uint32_t phy_reg_addr_value, uint32_t phy_reg_data, uint32_t phy_reg_data_value) { uint32_t data = 0; /* Set PHY register address, write HRS04*/ sys_write32(phy_reg_addr_value, phy_reg_addr); /* Set PHY register data, write HRS05 */ sys_write32(phy_reg_data_value, phy_reg_data); data = sys_read32(phy_reg_data); if (data != phy_reg_data_value) { LOG_ERR("PHY_REG_DATA is not set properly"); return -ENXIO; } return 0; } int sdhc_cdns_wait_ics(uint16_t timeout, uint32_t cdn_srs_res) { /* Wait status command response ready */ if (!WAIT_FOR(((sys_read32(cdn_srs_res) & CDNS_SRS11_ICS) == CDNS_SRS11_ICS), timeout, k_msleep(1))) { LOG_ERR("Timed out waiting for ICS response"); return -ETIMEDOUT; } return 0; } static int sdhc_cdns_busy(void) { unsigned int data; data = sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS09); return (data & CDNS_SRS09_STAT_DAT_BUSY) ? CARD_BUSY : CARD_NOT_BUSY; } static int sdhc_cdns_card_present(void) { uint32_t timeout = CARD_REG_TIME_DELAY_US; if (!WAIT_FOR((((sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS09)) & CDNS_SRS09_CI) == CDNS_SRS09_CI), timeout, k_msleep(1))) { LOG_ERR("Card detection timeout"); return -ETIMEDOUT; } return CARD_PRESENT; } static int sdhc_cdns_vol_reset(void) { /* Reset embedded card, turn off supply voltage */ sys_write32(BUS_VOLTAGE_3_3_V, (cdns_params.reg_base + SDHC_CDNS_SRS10)); /* * Turn on supply voltage * CDNS_SRS10_BVS = 7, CDNS_SRS10_BP = 1, BP2 only in UHS2 mode */ sys_write32(BUS_VOLTAGE_3_3_V | CDNS_SRS10_BP, (cdns_params.reg_base + SDHC_CDNS_SRS10)); return 0; } /* * Values are taken from IP documents and calc_setting.py script * with input value- mode sd_ds, * sdmclk 5000, * sdclk 10000, * iocell_input_delay 2500, * iocell_output_delay 2500 and * delay_element 24 */ void cdns_sdhc_set_sdmmc_params(struct sdhc_cdns_combo_phy *sdhc_cdns_combo_phy_reg, struct sdhc_cdns_sdmmc *sdhc_cdns_sdmmc_reg) { /* Values are taken by the reference of cadence IP documents */ sdhc_cdns_combo_phy_reg->cp_clk_wr_delay = 0; sdhc_cdns_combo_phy_reg->cp_clk_wrdqs_delay = 0; sdhc_cdns_combo_phy_reg->cp_data_select_oe_end = 1; sdhc_cdns_combo_phy_reg->cp_dll_bypass_mode = 1; sdhc_cdns_combo_phy_reg->cp_dll_locked_mode = 3; sdhc_cdns_combo_phy_reg->cp_dll_start_point = 4; sdhc_cdns_combo_phy_reg->cp_gate_cfg_always_on = 1; sdhc_cdns_combo_phy_reg->cp_io_mask_always_on = 0; sdhc_cdns_combo_phy_reg->cp_io_mask_end = 2; sdhc_cdns_combo_phy_reg->cp_io_mask_start = 0; sdhc_cdns_combo_phy_reg->cp_rd_del_sel = 52; sdhc_cdns_combo_phy_reg->cp_read_dqs_cmd_delay = 0; sdhc_cdns_combo_phy_reg->cp_read_dqs_delay = 0; sdhc_cdns_combo_phy_reg->cp_sw_half_cycle_shift = 0; sdhc_cdns_combo_phy_reg->cp_sync_method = 1; sdhc_cdns_combo_phy_reg->cp_underrun_suppress = 1; sdhc_cdns_combo_phy_reg->cp_use_ext_lpbk_dqs = 1; sdhc_cdns_combo_phy_reg->cp_use_lpbk_dqs = 1; sdhc_cdns_combo_phy_reg->cp_use_phony_dqs = 1; sdhc_cdns_combo_phy_reg->cp_use_phony_dqs_cmd = 1; sdhc_cdns_sdmmc_reg->sdhc_extended_rd_mode = 1; sdhc_cdns_sdmmc_reg->sdhc_extended_wr_mode = 1; sdhc_cdns_sdmmc_reg->sdhc_hcsdclkadj = 6; sdhc_cdns_sdmmc_reg->sdhc_idelay_val = 1; sdhc_cdns_sdmmc_reg->sdhc_rdcmd_en = 1; sdhc_cdns_sdmmc_reg->sdhc_rddata_en = 1; sdhc_cdns_sdmmc_reg->sdhc_rw_compensate = 10; sdhc_cdns_sdmmc_reg->sdhc_sdcfsh = 0; sdhc_cdns_sdmmc_reg->sdhc_sdcfsl = 1; sdhc_cdns_sdmmc_reg->sdhc_wrcmd0_dly = 1; sdhc_cdns_sdmmc_reg->sdhc_wrcmd0_sdclk_dly = 0; sdhc_cdns_sdmmc_reg->sdhc_wrcmd1_dly = 0; sdhc_cdns_sdmmc_reg->sdhc_wrcmd1_sdclk_dly = 0; sdhc_cdns_sdmmc_reg->sdhc_wrdata0_dly = 1; sdhc_cdns_sdmmc_reg->sdhc_wrdata0_sdclk_dly = 0; sdhc_cdns_sdmmc_reg->sdhc_wrdata1_dly = 0; sdhc_cdns_sdmmc_reg->sdhc_wrdata1_sdclk_dly = 0; } /* Phy register programing for phy init */ static int sdhc_cdns_program_phy_reg(struct sdhc_cdns_combo_phy *sdhc_cdns_combo_phy_reg, struct sdhc_cdns_sdmmc *sdhc_cdns_sdmmc_reg) { uint32_t value = 0; int ret = 0; /* * program PHY_DQS_TIMING_REG * This register controls the DQS related timing */ value = (CP_USE_EXT_LPBK_DQS(sdhc_cdns_combo_phy_reg->cp_use_ext_lpbk_dqs)) | (CP_USE_LPBK_DQS(sdhc_cdns_combo_phy_reg->cp_use_lpbk_dqs)) | (CP_USE_PHONY_DQS(sdhc_cdns_combo_phy_reg->cp_use_phony_dqs)) | (CP_USE_PHONY_DQS_CMD(sdhc_cdns_combo_phy_reg->cp_use_phony_dqs_cmd)); ret = sdhc_cdns_write_phy_reg(cdns_params.reg_base + SDHC_CDNS_HRS04, cdns_params.combophy + PHY_DQS_TIMING_REG, cdns_params.reg_base + SDHC_CDNS_HRS05, value); if (ret != 0U) { LOG_ERR("Error in PHY_DQS_TIMING_REG programming"); return ret; } /* * program PHY_GATE_LPBK_CTRL_REG * This register controls the gate and loopback control related timing. */ value = (CP_SYNC_METHOD(sdhc_cdns_combo_phy_reg->cp_sync_method)) | (CP_SW_HALF_CYCLE_SHIFT(sdhc_cdns_combo_phy_reg->cp_sw_half_cycle_shift)) | (CP_RD_DEL_SEL(sdhc_cdns_combo_phy_reg->cp_rd_del_sel)) | (CP_UNDERRUN_SUPPRESS(sdhc_cdns_combo_phy_reg->cp_underrun_suppress)) | (CP_GATE_CFG_ALWAYS_ON(sdhc_cdns_combo_phy_reg->cp_gate_cfg_always_on)); ret = sdhc_cdns_write_phy_reg(cdns_params.reg_base + SDHC_CDNS_HRS04, cdns_params.combophy + PHY_GATE_LPBK_CTRL_REG, cdns_params.reg_base + SDHC_CDNS_HRS05, value); if (ret != 0U) { LOG_ERR("Error in PHY_GATE_LPBK_CTRL_REG programming"); return -ret; } /* * program PHY_DLL_MASTER_CTRL_REG * This register holds the control for the Master DLL logic. */ value = (CP_DLL_BYPASS_MODE(sdhc_cdns_combo_phy_reg->cp_dll_bypass_mode)) | (CP_DLL_START_POINT(sdhc_cdns_combo_phy_reg->cp_dll_start_point)); ret = sdhc_cdns_write_phy_reg(cdns_params.reg_base + SDHC_CDNS_HRS04, cdns_params.combophy + PHY_DLL_MASTER_CTRL_REG, cdns_params.reg_base + SDHC_CDNS_HRS05, value); if (ret != 0U) { LOG_ERR("Error in PHY_DLL_MASTER_CTRL_REG programming"); return ret; } /* * program PHY_DLL_SLAVE_CTRL_REG * This register holds the control for the slave DLL logic. */ value = (CP_READ_DQS_CMD_DELAY(sdhc_cdns_combo_phy_reg->cp_read_dqs_cmd_delay)) | (CP_CLK_WRDQS_DELAY(sdhc_cdns_combo_phy_reg->cp_clk_wrdqs_delay)) | (CP_CLK_WR_DELAY(sdhc_cdns_combo_phy_reg->cp_clk_wr_delay)) | (CP_READ_DQS_DELAY(sdhc_cdns_combo_phy_reg->cp_read_dqs_delay)); ret = sdhc_cdns_write_phy_reg(cdns_params.reg_base + SDHC_CDNS_HRS04, cdns_params.combophy + PHY_DLL_SLAVE_CTRL_REG, cdns_params.reg_base + SDHC_CDNS_HRS05, value); if (ret != 0U) { LOG_ERR("Error in PHY_DLL_SLAVE_CTRL_REG programming"); return ret; } /* * program PHY_CTRL_REG * This register handles the global control settings for the PHY. */ sys_write32(cdns_params.combophy + PHY_CTRL_REG, cdns_params.reg_base + SDHC_CDNS_HRS04); value = sys_read32(cdns_params.reg_base + SDHC_CDNS_HRS05); /* phony_dqs_timing=0 */ value &= ~(CP_PHONY_DQS_TIMING_MASK << CP_PHONY_DQS_TIMING_SHIFT); sys_write32(value, cdns_params.reg_base + SDHC_CDNS_HRS05); /* switch off DLL_RESET */ do { value = sys_read32(cdns_params.reg_base + SDHC_CDNS_HRS09); value |= CDNS_HRS09_PHY_SW_RESET; sys_write32(value, cdns_params.reg_base + SDHC_CDNS_HRS09); value = sys_read32(cdns_params.reg_base + SDHC_CDNS_HRS09); /* polling PHY_INIT_COMPLETE */ } while ((value & CDNS_HRS09_PHY_INIT_COMP) != CDNS_HRS09_PHY_INIT_COMP); /* * program PHY_DQ_TIMING_REG * This register controls the DQ related timing. */ sdhc_cdns_combo_phy_reg->cp_io_mask_end = 0U; value = (CP_IO_MASK_ALWAYS_ON(sdhc_cdns_combo_phy_reg->cp_io_mask_always_on)) | (CP_IO_MASK_END(sdhc_cdns_combo_phy_reg->cp_io_mask_end)) | (CP_IO_MASK_START(sdhc_cdns_combo_phy_reg->cp_io_mask_start)) | (CP_DATA_SELECT_OE_END(sdhc_cdns_combo_phy_reg->cp_data_select_oe_end)); ret = sdhc_cdns_write_phy_reg(cdns_params.reg_base + SDHC_CDNS_HRS04, cdns_params.combophy + PHY_DQ_TIMING_REG, cdns_params.reg_base + SDHC_CDNS_HRS05, value); if (ret != 0U) { LOG_ERR("Error in PHY_DQ_TIMING_REG programming"); return ret; } return 0; } static int sdhc_cdns_cache_invd(int lba, uintptr_t buf, size_t size) { int ret = 0; ret = arch_dcache_invd_range((void *)buf, size); if (ret != 0) { LOG_ERR("%s: error in invalidate dcache with ret %d", __func__, ret); return ret; } return 0; } /* DMA preparation for the read and write operation */ static int sdhc_cdns_prepare(uint32_t dma_start_addr, uintptr_t dma_buff, struct sdhc_data *data) { struct sdhc_cdns_desc *desc; uint32_t desc_cnt, i; uintptr_t base; uint64_t desc_base; uint32_t size = data->blocks * data->block_size; __ASSERT_NO_MSG(((dma_buff & CDNSMMC_ADDRESS_MASK) == 0) && (cdns_params.desc_size > 0) && ((cdns_params.desc_size & MMC_BLOCK_MASK) == 0)); arch_dcache_flush_range((void *)dma_buff, size); desc_cnt = (size + (SDMMC_DMA_MAX_BUFFER_SIZE) - 1) / (SDMMC_DMA_MAX_BUFFER_SIZE); __ASSERT_NO_MSG(desc_cnt * sizeof(struct sdhc_cdns_desc) < cdns_params.desc_size); if (desc_cnt > CONFIG_CDNS_DESC_COUNT) { LOG_ERR("Requested data transfer length %u greater than configured length %u", size, (CONFIG_CDNS_DESC_COUNT * SDMMC_DMA_MAX_BUFFER_SIZE)); return -EINVAL; } /* * Creating descriptor as per the desc_count and linked list of * descriptor for contiguous desc alignment */ base = cdns_params.reg_base; desc = (struct sdhc_cdns_desc *)cdns_params.desc_base; desc_base = (uint64_t)desc; i = 0; while ((i+1) < desc_cnt) { desc->attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA; desc->reserved = 0; desc->len = MAX_64KB_PAGE; desc->addr_lo = (dma_buff & 0xffffffff) + (SDMMC_DMA_MAX_BUFFER_SIZE * i); desc->addr_hi = (dma_buff >> 32) & 0xffffffff; size -= SDMMC_DMA_MAX_BUFFER_SIZE; desc++; i++; } desc->attr = ADMA_DESC_ATTR_VALID | ADMA_DESC_TRANSFER_DATA | ADMA_DESC_ATTR_END; desc->reserved = 0; desc->len = size; desc->addr_lo = (dma_buff & 0xffffffff) + (SDMMC_DMA_MAX_BUFFER_SIZE * i); desc->addr_hi = (dma_buff >> 32) & 0xffffffff; sys_write32((uint32_t)desc_base, cdns_params.reg_base + SDHC_CDNS_SRS22); sys_write32((uint32_t)(desc_base >> 32), cdns_params.reg_base + SDHC_CDNS_SRS23); arch_dcache_flush_range((void *)cdns_params.desc_base, desc_cnt * sizeof(struct sdhc_cdns_desc)); sys_write32((data->block_size << CDNS_SRS01_BLK_SIZE | data->blocks << CDNS_SRS01_BLK_COUNT_CT | BUFFER_BOUNDARY_512K << CDNS_SRS01_SDMA_BUF), cdns_params.reg_base + SDHC_CDNS_SRS01); return 0; } static int sdhc_cdns_host_set_clk(int clk) { uint32_t sdclkfsval = 0; uint32_t dtcvval = 0xe; int ret = 0; sdclkfsval = (cdns_params.clk_rate / 2000) / clk; sys_write32(0, cdns_params.reg_base + SDHC_CDNS_SRS11); sys_write32(((dtcvval << CDNS_SRS11_DTCV) | (sdclkfsval << CDNS_SRS11_SDCLKFS) | CDNS_SRS11_ICE), cdns_params.reg_base + SDHC_CDNS_SRS11); ret = sdhc_cdns_wait_ics(WAIT_ICS_TIME_DELAY_US, cdns_params.reg_base + SDHC_CDNS_SRS11); if (ret != 0) { return ret; } /* Enable DLL reset */ sys_clear_bit(cdns_params.reg_base + SDHC_CDNS_HRS09, 0); /* Set extended_wr_mode */ sys_write32(((sys_read32(cdns_params.reg_base + SDHC_CDNS_HRS09) & 0xFFFFFFF7) | CDNS_HRS09_EXT_WR_MODE), (cdns_params.reg_base + SDHC_CDNS_HRS09)); /* Release DLL reset */ sys_set_bits(cdns_params.reg_base + SDHC_CDNS_HRS09, CDNS_HRS09_RDCMD_EN_BIT | CDNS_HRS09_RDDATA_EN_BIT); sys_write32(((dtcvval << CDNS_SRS11_DTCV) | (sdclkfsval << CDNS_SRS11_SDCLKFS) | CDNS_SRS11_ICE | CDNS_SRS11_SDCE), cdns_params.reg_base + SDHC_CDNS_SRS11); sys_write32(0xFFFFFFFF, cdns_params.reg_base + SDHC_CDNS_SRS13); return 0; } static int sdhc_cdns_set_ios(unsigned int clk, unsigned int width) { int ret = 0; switch (width) { case SDHC_BUS_WIDTH1BIT: sys_clear_bit(cdns_params.reg_base + SDHC_CDNS_SRS10, WIDTH_BIT1); break; case SDHC_BUS_WIDTH4BIT: sys_set_bit(cdns_params.reg_base + SDHC_CDNS_SRS10, WIDTH_BIT4); break; case SDHC_BUS_WIDTH8BIT: sys_set_bit(cdns_params.reg_base + SDHC_CDNS_SRS10, WIDTH_BIT8); break; default: __ASSERT_NO_MSG(0); break; } /* Perform clock configuration when SD clock is not gated */ if (clk != 0) { ret = sdhc_cdns_host_set_clk(clk); if (ret != 0) { LOG_ERR("%s: Clock configuration failed", __func__); return ret; } } return 0; } /* Programming HRS register for initialisation */ static int sdhc_cdns_init_hrs_io(struct sdhc_cdns_combo_phy *sdhc_cdns_combo_phy_reg, struct sdhc_cdns_sdmmc *sdhc_cdns_sdmmc_reg) { uint32_t value = 0; int ret = 0; /* * program HRS09, register 42 * PHY Control and Status Register */ value = (CDNS_HRS09_RDDATA_EN(sdhc_cdns_sdmmc_reg->sdhc_rddata_en)) | (CDNS_HRS09_RDCMD_EN(sdhc_cdns_sdmmc_reg->sdhc_rdcmd_en)) | (CDNS_HRS09_EXTENDED_WR(sdhc_cdns_sdmmc_reg->sdhc_extended_wr_mode)) | (CDNS_HRS09_EXT_RD_MODE(sdhc_cdns_sdmmc_reg->sdhc_extended_rd_mode)); sys_write32(value, cdns_params.reg_base + SDHC_CDNS_HRS09); /* * program HRS10, register 43 * Host Controller SDCLK start point adjustment */ value = (SDHC_HRS10_HCSDCLKADJ(sdhc_cdns_sdmmc_reg->sdhc_hcsdclkadj)); sys_write32(value, cdns_params.reg_base + SDHC_CDNS_HRS10); /* * program HRS16, register 48 * CMD/DAT output delay */ value = (CDNS_HRS16_WRDATA1_SDCLK_DLY(sdhc_cdns_sdmmc_reg->sdhc_wrdata1_sdclk_dly)) | (CDNS_HRS16_WRDATA0_SDCLK_DLY(sdhc_cdns_sdmmc_reg->sdhc_wrdata0_sdclk_dly)) | (CDNS_HRS16_WRCMD1_SDCLK_DLY(sdhc_cdns_sdmmc_reg->sdhc_wrcmd1_sdclk_dly)) | (CDNS_HRS16_WRCMD0_SDCLK_DLY(sdhc_cdns_sdmmc_reg->sdhc_wrcmd0_sdclk_dly)) | (CDNS_HRS16_WRDATA1_DLY(sdhc_cdns_sdmmc_reg->sdhc_wrdata1_dly)) | (CDNS_HRS16_WRDATA0_DLY(sdhc_cdns_sdmmc_reg->sdhc_wrdata0_dly)) | (CDNS_HRS16_WRCMD1_DLY(sdhc_cdns_sdmmc_reg->sdhc_wrcmd1_dly)) | (CDNS_HRS16_WRCMD0_DLY(sdhc_cdns_sdmmc_reg->sdhc_wrcmd0_dly)); sys_write32(value, cdns_params.reg_base + SDHC_CDNS_HRS16); /* * program HRS07, register 40 * IO Delay Information Register */ value = (CDNS_HRS07_RW_COMPENSATE(sdhc_cdns_sdmmc_reg->sdhc_rw_compensate)) | (CDNS_HRS07_IDELAY_VAL(sdhc_cdns_sdmmc_reg->sdhc_idelay_val)); sys_write32(value, cdns_params.reg_base + SDHC_CDNS_HRS07); return ret; } static int sdhc_cdns_set_clk(struct sdhc_cdns_params *cdn_sdmmc_dev_type_params) { uint32_t dtcvval, sdclkfsval; int ret = 0; dtcvval = DTC_VAL; sdclkfsval = 0; /* Condition for Default speed mode and SDR12 and SDR_BC */ if ((cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == SD_DS) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == SD_UHS_SDR12) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == EMMC_SDR_BC)) { sdclkfsval = 4; } else if ((cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == SD_HS) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == SD_UHS_SDR25) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == SD_UHS_DDR50) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == EMMC_SDR)) { sdclkfsval = 2; } else if ((cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == SD_UHS_SDR50) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == EMMC_DDR) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == EMMC_HS400) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == EMMC_HS400ES)) { sdclkfsval = 1; } else if ((cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == SD_UHS_SDR104) || (cdn_sdmmc_dev_type_params->cdn_sdmmc_dev_type == EMMC_HS200)) { sdclkfsval = 0; } /* Disabling SD clock enable */ sys_write32(0, cdns_params.reg_base + SDHC_CDNS_SRS11); sys_write32((dtcvval << CDNS_SRS11_DTCV) | (sdclkfsval << CDNS_SRS11_SDCLKFS) | CDNS_SRS11_ICE, cdns_params.reg_base + SDHC_CDNS_SRS11); ret = sdhc_cdns_wait_ics(WAIT_ICS_TIME_DELAY_US, cdns_params.reg_base + SDHC_CDNS_SRS11); if (ret != 0) { return ret; } /* Enable DLL reset */ sys_clear_bit(cdns_params.reg_base + SDHC_CDNS_HRS09, 0); /* Set extended_wr_mode */ sys_write32(((sys_read32(cdns_params.reg_base + SDHC_CDNS_HRS09) & 0xFFFFFFF7) | CDNS_HRS09_EXT_WR_MODE), (cdns_params.reg_base + SDHC_CDNS_HRS09)); /* Release DLL reset */ sys_set_bits(cdns_params.reg_base + SDHC_CDNS_HRS09, CDNS_HRS09_RDCMD_EN_BIT | CDNS_HRS09_RDDATA_EN_BIT); sys_write32((dtcvval << CDNS_SRS11_DTCV) | (sdclkfsval << CDNS_SRS11_SDCLKFS) | CDNS_SRS11_ICE | CDNS_SRS11_SDCE, cdns_params.reg_base + SDHC_CDNS_SRS11); sys_write32(0xFFFFFFFF, cdns_params.reg_base + SDHC_CDNS_SRS13); return 0; } static int sdhc_cdns_reset(void) { int32_t timeout; sys_clear_bits(cdns_params.reg_base + SDHC_CDNS_SRS11, 0xFFFF); /* Software reset */ sys_set_bit(cdns_params.reg_base + SDHC_CDNS_HRS00, CDNS_HRS00_SWR); /* Wait status command response ready */ timeout = CARD_REG_TIME_DELAY_US; if (!WAIT_FOR(((sys_read32(cdns_params.reg_base + SDHC_CDNS_HRS00) & CDNS_HRS00_SWR) == 0), timeout, k_msleep(1))) { LOG_ERR("Software reset is not completed...timedout"); return -ETIMEDOUT; } /* Step 1, switch on DLL_RESET */ sys_clear_bit(cdns_params.reg_base + SDHC_CDNS_HRS09, CDNS_HRS09_PHY_SW_RESET); return 0; } static int sdhc_cdns_init(void) { int ret = 0; ret = sdhc_cdns_program_phy_reg(&sdhc_cdns_combo_phy_reg_info, &sdhc_cdns_sdmmc_reg_info); if (ret != 0U) { LOG_ERR("SoftPhy register configuration failed"); return ret; } ret = sdhc_cdns_init_hrs_io(&sdhc_cdns_combo_phy_reg_info, &sdhc_cdns_sdmmc_reg_info); if (ret != 0U) { LOG_ERR("Configuration for HRS IO reg failed"); return ret; } ret = sdhc_cdns_card_present(); if (ret != CARD_PRESENT) { LOG_ERR("SD card does not detect"); return -ETIMEDOUT; } ret = sdhc_cdns_vol_reset(); if (ret != 0U) { LOG_ERR("SD/MMC card reset failed"); return ret; } ret = sdhc_cdns_set_clk(&cdns_params); if (ret != 0U) { LOG_ERR("Host controller set clk failed"); return ret; } return 0; } static int sdhc_cdns_send_cmd(struct sdmmc_cmd *cmd, struct sdhc_data *data) { uint32_t op = 0; uint32_t value; uintptr_t base; int32_t timeout; uint32_t cmd_indx; uint32_t status_check = 0; __ASSERT(cmd, "Assert %s function call", __func__); base = cdns_params.reg_base; cmd_indx = (cmd->cmd_idx) << CDNS_SRS03_COM_IDX; if (data) { switch (cmd->cmd_idx) { case SD_SWITCH: op = CDNS_SRS03_DATA_PRSNT; sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS10, SRS10_VAL_SW); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS11, SRS11_VAL_GEN); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS15, SRS15_VAL_GEN); break; case SD_WRITE_SINGLE_BLOCK: case SD_READ_SINGLE_BLOCK: op = CDNS_SRS03_DATA_PRSNT; sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS10, SRS10_VAL_READ); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS11, SRS11_VAL_GEN); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS15, SRS15_VAL_RD_WR); sys_write32(CDNS_SRS00_SAAR, cdns_params.reg_base + SDHC_CDNS_SRS00); break; case SD_WRITE_MULTIPLE_BLOCK: case SD_READ_MULTIPLE_BLOCK: op = CDNS_SRS03_DATA_PRSNT | AUTO_CMD23 | CDNS_SRS03_MULTI_BLK_READ; sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS10, SRS10_VAL_READ); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS11, SRS11_VAL_GEN); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS15, SRS15_VAL_RD_WR); sys_write32(CDNS_SRS00_SAAR, cdns_params.reg_base + SDHC_CDNS_SRS00); break; case SD_APP_SEND_SCR: op = CDNS_SRS03_DATA_PRSNT; sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS10, ADMA2_32); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS11, SRS11_VAL_GEN); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS15, SRS15_VAL_GEN); break; default: op = 0; break; } } else { switch (cmd->cmd_idx) { case SD_GO_IDLE_STATE: sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS11, SRS11_VAL_CID); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS15, CDNS_SRS15_HV4E); break; case SD_ALL_SEND_CID: sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS11, SRS11_VAL_CID); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS15, SRS15_VAL_CID); break; case SD_SEND_IF_COND: op = CDNS_SRS03_CMD_IDX_CHK_EN; sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS11, SRS11_VAL_GEN); sys_set_bits(cdns_params.reg_base + SDHC_CDNS_SRS15, CDNS_SRS15_HV4E); break; case SD_STOP_TRANSMISSION: op = CMD_STOP_ABORT_CMD; break; case SD_SEND_STATUS: break; case SD_SELECT_CARD: op = CDNS_SRS03_MULTI_BLK_READ; break; default: op = 0; break; } } switch (cmd->resp_type) { case SD_RSP_TYPE_NONE: op |= (CDNS_SRS03_CMD_READ | CDNS_SRS03_MULTI_BLK_READ | CDNS_SRS03_DMA_EN | CDNS_SRS03_BLK_CNT_EN); break; case SD_RSP_TYPE_R2: op |= (CDNS_SRS03_CMD_READ | CDNS_SRS03_MULTI_BLK_READ | CDNS_SRS03_DMA_EN | CDNS_SRS03_BLK_CNT_EN | RES_TYPE_SEL_136 | CDNS_SRS03_RESP_CRCCE); break; case SD_RSP_TYPE_R3: op |= (CDNS_SRS03_CMD_READ | CDNS_SRS03_MULTI_BLK_READ | CDNS_SRS03_DMA_EN | CDNS_SRS03_BLK_CNT_EN | RES_TYPE_SEL_48); break; case SD_RSP_TYPE_R1: if ((cmd->cmd_idx == SD_WRITE_SINGLE_BLOCK) || (cmd->cmd_idx == SD_WRITE_MULTIPLE_BLOCK)) { op |= (CDNS_SRS03_DMA_EN | CDNS_SRS03_BLK_CNT_EN | RES_TYPE_SEL_48 | CDNS_SRS03_RESP_CRCCE | CDNS_SRS03_CMD_IDX_CHK_EN); } else { op |= (CDNS_SRS03_DMA_EN | CDNS_SRS03_BLK_CNT_EN | CDNS_SRS03_CMD_READ | RES_TYPE_SEL_48 | CDNS_SRS03_RESP_CRCCE | CDNS_SRS03_CMD_IDX_CHK_EN); } break; default: op |= (CDNS_SRS03_DMA_EN | CDNS_SRS03_BLK_CNT_EN | CDNS_SRS03_CMD_READ | CDNS_SRS03_MULTI_BLK_READ | RES_TYPE_SEL_48 | CDNS_SRS03_RESP_CRCCE | CDNS_SRS03_CMD_IDX_CHK_EN); break; } timeout = CARD_REG_TIME_DELAY_US; if (!WAIT_FOR((sdhc_cdns_busy() == 0), timeout, k_msleep(1))) { k_panic(); } sys_write32(~0, cdns_params.reg_base + SDHC_CDNS_SRS12); sys_write32(cmd->cmd_arg, cdns_params.reg_base + SDHC_CDNS_SRS02); sys_write32(RESET_SRS14, cdns_params.reg_base + SDHC_CDNS_SRS14); sys_write32(op | cmd_indx, cdns_params.reg_base + SDHC_CDNS_SRS03); timeout = CARD_REG_TIME_DELAY_US; if (!WAIT_FOR(((((sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS12)) & CDNS_SRS12_CC) == CDNS_SRS12_CC) | (((sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS12)) & CDNS_SRS12_EINT) == CDNS_SRS12_EINT)), timeout, k_msleep(1))) { LOG_ERR("Response timeout SRS12"); return -ETIMEDOUT; } value = sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS12); status_check = value & CDNS_SRS12_ERR_MASK; if (status_check != 0U) { LOG_ERR("SD host controller send command failed, SRS12 = %X", status_check); return -EIO; } if ((op & RES_TYPE_SEL_48) || (op & RES_TYPE_SEL_136)) { cmd->resp_data[0] = sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS04); if (op & RES_TYPE_SEL_136) { cmd->resp_data[1] = sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS05); cmd->resp_data[2] = sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS06); cmd->resp_data[3] = sys_read32(cdns_params.reg_base + SDHC_CDNS_SRS07); /* 136-bit: RTS=01b, Response field R[127:8] - RESP3[23:0], * RESP2[31:0], RESP1[31:0], RESP0[31:0] * Subsystem expects 128 bits response but cadence SDHC sends * 120 bits response from R[127:8]. Bits manupulation to address * the correct responses for the 136 bit response type. */ cmd->resp_data[3] = ((cmd->resp_data[3] << 8) | ((cmd->resp_data[2] >> 24) & CDNS_CSD_BYTE_MASK)); cmd->resp_data[2] = ((cmd->resp_data[2] << 8) | ((cmd->resp_data[1] >> 24) & CDNS_CSD_BYTE_MASK)); cmd->resp_data[1] = ((cmd->resp_data[1] << 8) | ((cmd->resp_data[0] >> 24) & CDNS_CSD_BYTE_MASK)); cmd->resp_data[0] = (cmd->resp_data[0] << 8); } } return 0; } static const struct sdhc_cdns_ops cdns_sdmmc_ops = { .init = sdhc_cdns_init, .send_cmd = sdhc_cdns_send_cmd, .card_present = sdhc_cdns_card_present, .set_ios = sdhc_cdns_set_ios, .prepare = sdhc_cdns_prepare, .cache_invd = sdhc_cdns_cache_invd, .busy = sdhc_cdns_busy, .reset = sdhc_cdns_reset, }; void sdhc_cdns_sdmmc_init(struct sdhc_cdns_params *params, struct sdmmc_device_info *info, const struct sdhc_cdns_ops **cb_sdmmc_ops) { __ASSERT_NO_MSG((params != NULL) && ((params->reg_base & MMC_BLOCK_MASK) == 0) && ((params->desc_size & MMC_BLOCK_MASK) == 0) && ((params->reg_phy & MMC_BLOCK_MASK) == 0) && (params->desc_size > 0) && (params->clk_rate > 0) && ((params->bus_width == MMC_BUS_WIDTH_1) || (params->bus_width == MMC_BUS_WIDTH_4) || (params->bus_width == MMC_BUS_WIDTH_8))); memcpy(&cdns_params, params, sizeof(struct sdhc_cdns_params)); cdns_params.cdn_sdmmc_dev_type = info->cdn_sdmmc_dev_type; *cb_sdmmc_ops = &cdns_sdmmc_ops; cdns_sdhc_set_sdmmc_params(&sdhc_cdns_combo_phy_reg_info, &sdhc_cdns_sdmmc_reg_info); } ```
/content/code_sandbox/drivers/sdhc/sdhc_cdns_ll.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,781
```unknown config SAM_HSMCI bool "ATMEL SAM HSMCI driver" default y depends on DT_HAS_ATMEL_SAM_HSMCI_ENABLED select SDHC_SUPPORTS_NATIVE_MODE help Enable the ATMEL SAM HSMCI MMC/SD card driver. if SAM_HSMCI config SAM_HSMCI_PDCMODE bool "Use PDC if available" default y if SOC_SERIES_SAM4E help Use peripheral DMA controller, if supported config SAM_HSMCI_PWRSAVE bool "Power save during card inactive" default y help Power-save mode reduces the clock-speed during SD card inactivity. if SAM_HSMCI_PWRSAVE config SAM_HSMCI_PWRSAVE_DIV int "Divisor value of clock when in power-save mode" default 7 help SD clock freqeuncy is divided by 2**(N+1) where N is the divisor value. Valid values are 0 to 7. endif # SAM_HSMCI_PWRSAVE endif # SAM_HSMCI ```
/content/code_sandbox/drivers/sdhc/Kconfig.sam_hsmci
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
231
```unknown config SDHC_ESP32 bool "ESP32 SDHC Driver" default y depends on DT_HAS_ESPRESSIF_ESP32_SDHC_SLOT_ENABLED select SDHC_SUPPORTS_NATIVE_MODE select PINCTRL help Enables the ESP32 SD Host controller driver if SDHC_ESP32 # ESP32 DMA needs 32 bit aligned buffers config SDHC_BUFFER_ALIGNMENT default 4 endif ```
/content/code_sandbox/drivers/sdhc/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
90
```unknown config RCAR_MMC bool "Renesas Rcar MMC driver" default y depends on DT_HAS_RENESAS_RCAR_MMC_ENABLED select SDHC_SUPPORTS_NATIVE_MODE select REGULATOR select GPIO select SDHC_SUPPORTS_UHS if SDMMC_STACK help Renesas Rcar MMC driver. if RCAR_MMC config RCAR_MMC_DMA_SUPPORT bool "Internal DMA support for Renesas Rcar MMC driver" select CACHE_MANAGEMENT select DCACHE default y help Internal DMA support for Renesas Rcar MMC driver. config RCAR_MMC_SCC_SUPPORT bool "Support of SCC" default y help Enable support of Sampling Clock Controller for Renesas Rcar MMC driver. if RCAR_MMC_DMA_SUPPORT config SDHC_BUFFER_ALIGNMENT default 128 config RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT bool "Internal DMA IRQ driven support for Renesas Rcar MMC driver" default y endif # RCAR_MMC_DMA_SUPPORT endif # RCAR_MMC ```
/content/code_sandbox/drivers/sdhc/Kconfig.rcar
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
228
```c /* * */ #define DT_DRV_COMPAT nxp_imx_usdhc #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/sdhc.h> #include <zephyr/sd/sd_spec.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> #include <soc.h> #include <zephyr/drivers/pinctrl.h> #define PINCTRL_STATE_SLOW PINCTRL_STATE_PRIV_START #define PINCTRL_STATE_MED (PINCTRL_STATE_PRIV_START + 1U) #define PINCTRL_STATE_FAST (PINCTRL_STATE_PRIV_START + 2U) #define PINCTRL_STATE_NOPULL (PINCTRL_STATE_PRIV_START + 3U) LOG_MODULE_REGISTER(usdhc, CONFIG_SDHC_LOG_LEVEL); #include <fsl_usdhc.h> #include <fsl_cache.h> #include <zephyr/irq.h> enum transfer_callback_status { TRANSFER_CMD_COMPLETE = BIT(0), TRANSFER_CMD_FAILED = BIT(1), TRANSFER_DATA_COMPLETE = BIT(2), TRANSFER_DATA_FAILED = BIT(3), }; #define TRANSFER_CMD_FLAGS (TRANSFER_CMD_COMPLETE | TRANSFER_CMD_FAILED) #define TRANSFER_DATA_FLAGS (TRANSFER_DATA_COMPLETE | TRANSFER_DATA_FAILED) /* USDHC tuning constants */ #define IMX_USDHC_STANDARD_TUNING_START (10U) #define IMX_USDHC_TUNING_STEP (2U) #define IMX_USDHC_STANDARD_TUNING_COUNTER (60U) /* Default transfer timeout in ms for tuning */ #define IMX_USDHC_DEFAULT_TIMEOUT (5000U) struct usdhc_host_transfer { usdhc_transfer_t *transfer; k_timeout_t command_timeout; k_timeout_t data_timeout; }; struct usdhc_config { USDHC_Type *base; const struct device *clock_dev; clock_control_subsys_t clock_subsys; uint8_t nusdhc; const struct gpio_dt_spec pwr_gpio; const struct gpio_dt_spec detect_gpio; bool detect_dat3; bool no_180_vol; uint32_t data_timeout; uint32_t read_watermark; uint32_t write_watermark; uint32_t max_current_330; uint32_t max_current_300; uint32_t max_current_180; uint32_t power_delay_ms; uint32_t min_bus_freq; uint32_t max_bus_freq; bool mmc_hs200_1_8v; bool mmc_hs400_1_8v; const struct pinctrl_dev_config *pincfg; void (*irq_config_func)(const struct device *dev); }; struct usdhc_data { const struct device *dev; struct sdhc_host_props props; bool card_present; struct k_sem transfer_sem; volatile uint32_t transfer_status; usdhc_handle_t transfer_handle; struct sdhc_io host_io; struct k_mutex access_mutex; sdhc_interrupt_cb_t sdhc_cb; struct gpio_callback cd_callback; void *sdhc_cb_user_data; uint8_t usdhc_rx_dummy[128] __aligned(32); #ifdef CONFIG_IMX_USDHC_DMA_SUPPORT uint32_t *usdhc_dma_descriptor; /* ADMA descriptor table (noncachable) */ uint32_t dma_descriptor_len; /* DMA descriptor table length in words */ #endif }; static void transfer_complete_cb(USDHC_Type *usdhc, usdhc_handle_t *handle, status_t status, void *user_data) { const struct device *dev = (const struct device *)user_data; struct usdhc_data *data = dev->data; if (status == kStatus_USDHC_TransferDataFailed) { data->transfer_status |= TRANSFER_DATA_FAILED; } else if (status == kStatus_USDHC_TransferDataComplete) { data->transfer_status |= TRANSFER_DATA_COMPLETE; } else if (status == kStatus_USDHC_SendCommandFailed) { data->transfer_status |= TRANSFER_CMD_FAILED; } else if (status == kStatus_USDHC_SendCommandSuccess) { data->transfer_status |= TRANSFER_CMD_COMPLETE; } k_sem_give(&data->transfer_sem); } static void sdio_interrupt_cb(USDHC_Type *usdhc, void *user_data) { const struct device *dev = user_data; struct usdhc_data *data = dev->data; if (data->sdhc_cb) { data->sdhc_cb(dev, SDHC_INT_SDIO, data->sdhc_cb_user_data); } } static void card_inserted_cb(USDHC_Type *usdhc, void *user_data) { const struct device *dev = user_data; struct usdhc_data *data = dev->data; if (data->sdhc_cb) { data->sdhc_cb(dev, SDHC_INT_INSERTED, data->sdhc_cb_user_data); } } static void card_removed_cb(USDHC_Type *usdhc, void *user_data) { const struct device *dev = user_data; struct usdhc_data *data = dev->data; if (data->sdhc_cb) { data->sdhc_cb(dev, SDHC_INT_REMOVED, data->sdhc_cb_user_data); } } static void card_detect_gpio_cb(const struct device *port, struct gpio_callback *cb, gpio_port_pins_t pins) { struct usdhc_data *data = CONTAINER_OF(cb, struct usdhc_data, cd_callback); const struct device *dev = data->dev; const struct usdhc_config *cfg = dev->config; if (data->sdhc_cb) { if (gpio_pin_get_dt(&cfg->detect_gpio)) { data->sdhc_cb(dev, SDHC_INT_INSERTED, data->sdhc_cb_user_data); } else { data->sdhc_cb(dev, SDHC_INT_REMOVED, data->sdhc_cb_user_data); } } } static void imx_usdhc_select_1_8v(USDHC_Type *base, bool enable_1_8v) { #if !(defined(FSL_FEATURE_USDHC_HAS_NO_VOLTAGE_SELECT) && \ (FSL_FEATURE_USDHC_HAS_NO_VOLTAGE_SELECT)) UDSHC_SelectVoltage(base, enable_1_8v); #endif } static int imx_usdhc_dat3_pull(const struct usdhc_config *cfg, bool pullup) { int ret = 0U; ret = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_NOPULL); if (ret) { LOG_ERR("No DAT3 floating state defined, but dat3 detect selected"); return ret; } #ifdef CONFIG_IMX_USDHC_DAT3_PWR_TOGGLE if (!pullup) { /* Power off the card to clear DAT3 legacy status */ if (cfg->pwr_gpio.port) { ret = gpio_pin_set_dt(&cfg->pwr_gpio, 0); if (ret) { return ret; } /* Delay for card power off to complete */ k_busy_wait(1000); ret = gpio_pin_set_dt(&cfg->pwr_gpio, 1); /* Delay for power on */ k_busy_wait(1000); if (ret) { return ret; } } } #endif return ret; } /* * Reset SDHC after command error */ static void imx_usdhc_error_recovery(const struct device *dev) { const struct usdhc_config *cfg = dev->config; uint32_t status = USDHC_GetPresentStatusFlags(cfg->base); if (status & kUSDHC_CommandInhibitFlag) { /* Reset command line */ USDHC_Reset(cfg->base, kUSDHC_ResetCommand, 100U); } if (((status & (uint32_t)kUSDHC_DataInhibitFlag) != 0U) || (USDHC_GetAdmaErrorStatusFlags(cfg->base) != 0U)) { /* Reset data line */ USDHC_Reset(cfg->base, kUSDHC_DataInhibitFlag, 100U); } } /* * Initialize SDHC host properties for use in get_host_props api call */ static void imx_usdhc_init_host_props(const struct device *dev) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *data = dev->data; usdhc_capability_t caps; struct sdhc_host_props *props = &data->props; memset(props, 0, sizeof(struct sdhc_host_props)); props->f_max = cfg->max_bus_freq; props->f_min = cfg->min_bus_freq; props->max_current_330 = cfg->max_current_330; props->max_current_180 = cfg->max_current_180; props->power_delay = cfg->power_delay_ms; /* Read host capabilities */ USDHC_GetCapability(cfg->base, &caps); if (cfg->no_180_vol) { props->host_caps.vol_180_support = false; } else { props->host_caps.vol_180_support = (bool)(caps.flags & kUSDHC_SupportV180Flag); } props->host_caps.vol_300_support = (bool)(caps.flags & kUSDHC_SupportV300Flag); props->host_caps.vol_330_support = (bool)(caps.flags & kUSDHC_SupportV330Flag); props->host_caps.suspend_res_support = (bool)(caps.flags & kUSDHC_SupportSuspendResumeFlag); props->host_caps.sdma_support = (bool)(caps.flags & kUSDHC_SupportDmaFlag); props->host_caps.high_spd_support = (bool)(caps.flags & kUSDHC_SupportHighSpeedFlag); props->host_caps.adma_2_support = (bool)(caps.flags & kUSDHC_SupportAdmaFlag); props->host_caps.max_blk_len = (bool)(caps.maxBlockLength); props->host_caps.ddr50_support = (bool)(caps.flags & kUSDHC_SupportDDR50Flag); props->host_caps.sdr104_support = (bool)(caps.flags & kUSDHC_SupportSDR104Flag); props->host_caps.sdr50_support = (bool)(caps.flags & kUSDHC_SupportSDR50Flag); props->host_caps.bus_8_bit_support = (bool)(caps.flags & kUSDHC_Support8BitFlag); props->host_caps.bus_4_bit_support = (bool)(caps.flags & kUSDHC_Support4BitFlag); props->host_caps.hs200_support = (bool)(cfg->mmc_hs200_1_8v); props->host_caps.hs400_support = (bool)(cfg->mmc_hs400_1_8v); } /* * Reset USDHC controller */ static int imx_usdhc_reset(const struct device *dev) { const struct usdhc_config *cfg = dev->config; /* Switch to default I/O voltage of 3.3V */ imx_usdhc_select_1_8v(cfg->base, false); USDHC_EnableDDRMode(cfg->base, false, 0U); #if defined(FSL_FEATURE_USDHC_HAS_SDR50_MODE) && (FSL_FEATURE_USDHC_HAS_SDR50_MODE) USDHC_EnableStandardTuning(cfg->base, 0, 0, false); USDHC_EnableAutoTuning(cfg->base, false); #endif #if FSL_FEATURE_USDHC_HAS_HS400_MODE /* Disable HS400 mode */ USDHC_EnableHS400Mode(cfg->base, false); /* Disable DLL */ USDHC_EnableStrobeDLL(cfg->base, false); #endif /* Reset data/command/tuning circuit */ return USDHC_Reset(cfg->base, kUSDHC_ResetAll, 1000U) == true ? 0 : -ETIMEDOUT; } /* * Set SDHC io properties */ static int imx_usdhc_set_io(const struct device *dev, struct sdhc_io *ios) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *data = dev->data; uint32_t src_clk_hz, bus_clk; struct sdhc_io *host_io = &data->host_io; LOG_DBG("SDHC I/O: bus width %d, clock %dHz, card power %s, voltage %s", ios->bus_width, ios->clock, ios->power_mode == SDHC_POWER_ON ? "ON" : "OFF", ios->signal_voltage == SD_VOL_1_8_V ? "1.8V" : "3.3V" ); if (clock_control_get_rate(cfg->clock_dev, cfg->clock_subsys, &src_clk_hz)) { return -EINVAL; } if (ios->clock && (ios->clock > data->props.f_max || ios->clock < data->props.f_min)) { return -EINVAL; } /* Set host clock */ if (host_io->clock != ios->clock) { if (ios->clock != 0) { /* Enable the clock output */ bus_clk = USDHC_SetSdClock(cfg->base, src_clk_hz, ios->clock); LOG_DBG("BUS CLOCK: %d", bus_clk); if (bus_clk == 0) { return -ENOTSUP; } } host_io->clock = ios->clock; } /* Set bus width */ if (host_io->bus_width != ios->bus_width) { switch (ios->bus_width) { case SDHC_BUS_WIDTH1BIT: USDHC_SetDataBusWidth(cfg->base, kUSDHC_DataBusWidth1Bit); break; case SDHC_BUS_WIDTH4BIT: USDHC_SetDataBusWidth(cfg->base, kUSDHC_DataBusWidth4Bit); break; case SDHC_BUS_WIDTH8BIT: USDHC_SetDataBusWidth(cfg->base, kUSDHC_DataBusWidth8Bit); break; default: return -ENOTSUP; } host_io->bus_width = ios->bus_width; } /* Set host signal voltage */ if (ios->signal_voltage != host_io->signal_voltage) { switch (ios->signal_voltage) { case SD_VOL_3_3_V: case SD_VOL_3_0_V: imx_usdhc_select_1_8v(cfg->base, false); break; case SD_VOL_1_8_V: /** * USDHC peripheral deviates from SD spec here. * The host controller specification claims * the "SD clock enable" bit can be used to gate the SD * clock by clearing it. The USDHC controller does not * provide this bit, only a way to force the SD clock * on. We will instead delay 10 ms to allow the clock * to be gated for enough time, then force it on for * 10 ms, then allow it to be gated again. */ /* Switch to 1.8V */ imx_usdhc_select_1_8v(cfg->base, true); /* Wait 10 ms- clock will be gated during this period */ k_msleep(10); /* Force the clock on */ USDHC_ForceClockOn(cfg->base, true); /* Keep the clock on for a moment, so SD will recognize it */ k_msleep(10); /* Stop forcing clock on */ USDHC_ForceClockOn(cfg->base, false); break; default: return -ENOTSUP; } /* Save new host voltage */ host_io->signal_voltage = ios->signal_voltage; } /* Set card power */ if ((host_io->power_mode != ios->power_mode) && (cfg->pwr_gpio.port)) { if (ios->power_mode == SDHC_POWER_OFF) { gpio_pin_set_dt(&cfg->pwr_gpio, 0); } else if (ios->power_mode == SDHC_POWER_ON) { gpio_pin_set_dt(&cfg->pwr_gpio, 1); } host_io->power_mode = ios->power_mode; } /* Set I/O timing */ if (host_io->timing != ios->timing) { switch (ios->timing) { case SDHC_TIMING_LEGACY: case SDHC_TIMING_HS: break; case SDHC_TIMING_DDR50: case SDHC_TIMING_DDR52: /* Enable DDR mode */ USDHC_EnableDDRMode(cfg->base, true, 0); __fallthrough; case SDHC_TIMING_SDR12: case SDHC_TIMING_SDR25: pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_SLOW); break; case SDHC_TIMING_SDR50: pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_MED); break; case SDHC_TIMING_HS400: #if FSL_FEATURE_USDHC_HAS_HS400_MODE USDHC_EnableHS400Mode(cfg->base, true); USDHC_EnableDDRMode(cfg->base, true, 0U); USDHC_ConfigStrobeDLL(cfg->base, 7U, 4U); USDHC_EnableStrobeDLL(cfg->base, true); #else LOG_ERR("HS400 not supported for this device"); return -ENOTSUP; #endif case SDHC_TIMING_SDR104: case SDHC_TIMING_HS200: pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_FAST); break; default: return -ENOTSUP; } host_io->timing = ios->timing; } return 0; } /* * Internal transfer function, used by tuning and request apis */ static int imx_usdhc_transfer(const struct device *dev, struct usdhc_host_transfer *request) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *dev_data = dev->data; status_t error; #ifdef CONFIG_IMX_USDHC_DMA_SUPPORT usdhc_adma_config_t dma_config = {0}; /* Configure DMA */ dma_config.admaTable = dev_data->usdhc_dma_descriptor; dma_config.admaTableWords = dev_data->dma_descriptor_len; #if !(defined(FSL_FEATURE_USDHC_HAS_NO_RW_BURST_LEN) && FSL_FEATURE_USDHC_HAS_NO_RW_BURST_LEN) dma_config.burstLen = kUSDHC_EnBurstLenForINCR; #endif dma_config.dmaMode = kUSDHC_DmaModeAdma2; #endif /* CONFIG_IMX_USDHC_DMA_SUPPORT */ /* Reset transfer status */ dev_data->transfer_status = 0U; /* Reset semaphore */ k_sem_reset(&dev_data->transfer_sem); #ifdef CONFIG_IMX_USDHC_DMA_SUPPORT error = USDHC_TransferNonBlocking(cfg->base, &dev_data->transfer_handle, &dma_config, request->transfer); #else error = USDHC_TransferNonBlocking(cfg->base, &dev_data->transfer_handle, NULL, request->transfer); #endif if (error == kStatus_USDHC_ReTuningRequest) { return -EAGAIN; } else if (error != kStatus_Success) { return -EIO; } /* Wait for event to occur */ while ((dev_data->transfer_status & (TRANSFER_CMD_FLAGS | TRANSFER_DATA_FLAGS)) == 0) { if (k_sem_take(&dev_data->transfer_sem, request->command_timeout)) { return -ETIMEDOUT; } } if (dev_data->transfer_status & TRANSFER_CMD_FAILED) { return -EIO; } /* If data was sent, wait for that to complete */ if (request->transfer->data) { while ((dev_data->transfer_status & TRANSFER_DATA_FLAGS) == 0) { if (k_sem_take(&dev_data->transfer_sem, request->data_timeout)) { return -ETIMEDOUT; } } if (dev_data->transfer_status & TRANSFER_DATA_FAILED) { return -EIO; } } return 0; } /* Stops transmission after failed command with CMD12 */ static void imx_usdhc_stop_transmission(const struct device *dev) { usdhc_command_t stop_cmd = {0}; struct usdhc_host_transfer request; usdhc_transfer_t transfer; /* Send CMD12 to stop transmission */ stop_cmd.index = SD_STOP_TRANSMISSION; stop_cmd.argument = 0U; stop_cmd.type = kCARD_CommandTypeAbort; stop_cmd.responseType = SD_RSP_TYPE_R1b; transfer.command = &stop_cmd; transfer.data = NULL; request.transfer = &transfer; request.command_timeout = K_MSEC(IMX_USDHC_DEFAULT_TIMEOUT); request.data_timeout = K_MSEC(IMX_USDHC_DEFAULT_TIMEOUT); imx_usdhc_transfer(dev, &request); } /* * Return 0 if card is not busy, 1 if it is */ static int imx_usdhc_card_busy(const struct device *dev) { const struct usdhc_config *cfg = dev->config; return (USDHC_GetPresentStatusFlags(cfg->base) & (kUSDHC_Data0LineLevelFlag | kUSDHC_Data1LineLevelFlag | kUSDHC_Data2LineLevelFlag | kUSDHC_Data3LineLevelFlag)) ? 0 : 1; } /* * Execute card tuning */ static int imx_usdhc_execute_tuning(const struct device *dev) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *dev_data = dev->data; usdhc_command_t cmd = {0}; usdhc_data_t data = {0}; struct usdhc_host_transfer request; usdhc_transfer_t transfer; int ret; bool retry_tuning = true; if ((dev_data->host_io.timing == SDHC_TIMING_HS200) || (dev_data->host_io.timing == SDHC_TIMING_HS400)) { /*Currently only reaches here when MMC */ cmd.index = MMC_SEND_TUNING_BLOCK; } else { cmd.index = SD_SEND_TUNING_BLOCK; } cmd.argument = 0; cmd.responseType = SD_RSP_TYPE_R1; if (dev_data->host_io.bus_width == SDHC_BUS_WIDTH8BIT) { data.blockSize = sizeof(dev_data->usdhc_rx_dummy); } else { data.blockSize = sizeof(dev_data->usdhc_rx_dummy) / 2; } data.blockCount = 1; data.rxData = (uint32_t *)dev_data->usdhc_rx_dummy; data.dataType = kUSDHC_TransferDataTuning; transfer.command = &cmd; transfer.data = &data; /* Reset tuning circuit */ USDHC_Reset(cfg->base, kUSDHC_ResetTuning, 100U); /* Disable standard tuning */ USDHC_EnableStandardTuning(cfg->base, IMX_USDHC_STANDARD_TUNING_START, IMX_USDHC_TUNING_STEP, false); USDHC_ForceClockOn(cfg->base, true); /* * Tuning fail found on some SOCs is caused by the different of delay * cell, so we need to increase the tuning counter to cover the * adjustable tuning window */ USDHC_SetStandardTuningCounter(cfg->base, IMX_USDHC_STANDARD_TUNING_COUNTER); /* Reenable standard tuning */ USDHC_EnableStandardTuning(cfg->base, IMX_USDHC_STANDARD_TUNING_START, IMX_USDHC_TUNING_STEP, true); request.command_timeout = K_MSEC(IMX_USDHC_DEFAULT_TIMEOUT); request.data_timeout = K_MSEC(IMX_USDHC_DEFAULT_TIMEOUT); request.transfer = &transfer; while (true) { ret = imx_usdhc_transfer(dev, &request); if (ret) { return ret; } /* Delay 1ms */ k_busy_wait(1000); /* Wait for execute tuning bit to clear */ if (USDHC_GetExecuteStdTuningStatus(cfg->base) != 0) { continue; } /* If tuning had error, retry tuning */ if ((USDHC_CheckTuningError(cfg->base) != 0U) && retry_tuning) { retry_tuning = false; /* Enable standard tuning */ USDHC_EnableStandardTuning(cfg->base, IMX_USDHC_STANDARD_TUNING_START, IMX_USDHC_TUNING_STEP, true); USDHC_SetTuningDelay(cfg->base, IMX_USDHC_STANDARD_TUNING_START, 0U, 0U); } else { break; } } /* Check tuning result */ if (USDHC_CheckStdTuningResult(cfg->base) == 0) { return -EIO; } USDHC_ForceClockOn(cfg->base, false); /* Enable auto tuning */ USDHC_EnableAutoTuning(cfg->base, true); return 0; } /* * Send CMD or CMD/DATA via SDHC */ static int imx_usdhc_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *dev_data = dev->data; usdhc_command_t host_cmd = {0}; usdhc_data_t host_data = {0}; struct usdhc_host_transfer request; usdhc_transfer_t transfer; int busy_timeout = IMX_USDHC_DEFAULT_TIMEOUT; int ret = 0; int retries = (int)cmd->retries; if (cmd->opcode == SD_GO_IDLE_STATE) { USDHC_SetCardActive(cfg->base, 0xFFFF); } host_cmd.index = cmd->opcode; host_cmd.argument = cmd->arg; /* Mask out part of response type field used for SPI commands */ host_cmd.responseType = (cmd->response_type & SDHC_NATIVE_RESPONSE_MASK); transfer.command = &host_cmd; if (cmd->timeout_ms == SDHC_TIMEOUT_FOREVER) { request.command_timeout = K_FOREVER; } else { request.command_timeout = K_MSEC(cmd->timeout_ms); } if (data) { host_data.blockSize = data->block_size; host_data.blockCount = data->blocks; /* * Determine type of command. Note that driver is expected to * handle CMD12 and CMD23 for reading and writing blocks */ switch (cmd->opcode) { case SD_WRITE_SINGLE_BLOCK: host_data.enableAutoCommand12 = true; host_data.txData = data->data; break; case SD_WRITE_MULTIPLE_BLOCK: if (dev_data->host_io.timing == SDHC_TIMING_SDR104) { /* Card uses UHS104, so it must support CMD23 */ host_data.enableAutoCommand23 = true; } else { /* No CMD23 support */ host_data.enableAutoCommand12 = true; } host_data.txData = data->data; break; case SD_READ_SINGLE_BLOCK: host_data.enableAutoCommand12 = true; host_data.rxData = data->data; break; case SD_READ_MULTIPLE_BLOCK: if (dev_data->host_io.timing == SDHC_TIMING_SDR104) { /* Card uses UHS104, so it must support CMD23 */ host_data.enableAutoCommand23 = true; } else { /* No CMD23 support */ host_data.enableAutoCommand12 = true; } host_data.rxData = data->data; break; case MMC_CHECK_BUS_TEST: case MMC_SEND_EXT_CSD: case SD_APP_SEND_SCR: case SD_SWITCH: case SD_APP_SEND_NUM_WRITTEN_BLK: host_data.rxData = data->data; break; case SDIO_RW_EXTENDED: /* Use R/W bit to determine data direction */ if (host_cmd.argument & BIT(SDIO_CMD_ARG_RW_SHIFT)) { host_data.txData = data->data; } else { host_data.rxData = data->data; } break; default: return -ENOTSUP; } transfer.data = &host_data; if (data->timeout_ms == SDHC_TIMEOUT_FOREVER) { request.data_timeout = K_FOREVER; } else { request.data_timeout = K_MSEC(data->timeout_ms); } } else { transfer.data = NULL; request.data_timeout = K_NO_WAIT; } request.transfer = &transfer; /* Ensure we have exclusive access to SD card before sending request */ if (k_mutex_lock(&dev_data->access_mutex, request.command_timeout) != 0) { return -EBUSY; } while (retries >= 0) { ret = imx_usdhc_transfer(dev, &request); if (ret && data) { /* * Disable and clear interrupts. If the data transmission * completes later, we will encounter issues because * the USDHC driver expects data to be present in the * current transmission, but CMD12 does not contain data */ USDHC_DisableInterruptSignal(cfg->base, kUSDHC_CommandFlag | kUSDHC_DataFlag | kUSDHC_DataDMAFlag); USDHC_ClearInterruptStatusFlags(cfg->base, kUSDHC_CommandFlag | kUSDHC_DataFlag | kUSDHC_DataDMAFlag); /* Stop transmission with CMD12 in case of data error */ imx_usdhc_stop_transmission(dev); /* Wait for card to go idle */ while (busy_timeout > 0) { if (!imx_usdhc_card_busy(dev)) { break; } /* Wait 125us before polling again */ k_busy_wait(125); busy_timeout -= 125; } if (busy_timeout <= 0) { LOG_DBG("Card did not idle after CMD12"); k_mutex_unlock(&dev_data->access_mutex); return -ETIMEDOUT; } } if (ret == -EAGAIN) { /* Retry, card made a tuning request */ if (dev_data->host_io.timing == SDHC_TIMING_SDR50 || dev_data->host_io.timing == SDHC_TIMING_SDR104 || dev_data->host_io.timing == SDHC_TIMING_HS200 || dev_data->host_io.timing == SDHC_TIMING_HS400) { /* Retune card */ LOG_DBG("Card made tuning request, retune"); ret = imx_usdhc_execute_tuning(dev); if (ret) { LOG_DBG("Card failed to tune"); k_mutex_unlock(&dev_data->access_mutex); return ret; } } } if (ret) { imx_usdhc_error_recovery(dev); retries--; } else { break; } } /* Release access on card */ k_mutex_unlock(&dev_data->access_mutex); /* Record command response */ memcpy(cmd->response, host_cmd.response, sizeof(cmd->response)); if (data) { /* Record number of bytes xfered */ data->bytes_xfered = dev_data->transfer_handle.transferredWords; } return ret; } /* * Get card presence */ static int imx_usdhc_get_card_present(const struct device *dev) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *data = dev->data; if (cfg->detect_dat3) { /* * If card is already present, do not retry detection. * Power line toggling would reset SD card */ if (!data->card_present) { /* Detect card presence with DAT3 line pull */ imx_usdhc_dat3_pull(cfg, false); USDHC_CardDetectByData3(cfg->base, true); /* Delay to ensure host has time to detect card */ k_busy_wait(1000); data->card_present = USDHC_DetectCardInsert(cfg->base); /* Clear card detection and pull */ imx_usdhc_dat3_pull(cfg, true); USDHC_CardDetectByData3(cfg->base, false); } } else if (cfg->detect_gpio.port) { data->card_present = gpio_pin_get_dt(&cfg->detect_gpio) > 0; } else { data->card_present = USDHC_DetectCardInsert(cfg->base); } return ((int)data->card_present); } /* * Get host properties */ static int imx_usdhc_get_host_props(const struct device *dev, struct sdhc_host_props *props) { struct usdhc_data *data = dev->data; memcpy(props, &data->props, sizeof(struct sdhc_host_props)); return 0; } /* * Enable SDHC card interrupt */ static int imx_usdhc_enable_interrupt(const struct device *dev, sdhc_interrupt_cb_t callback, int sources, void *user_data) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *data = dev->data; int ret; /* Record SDIO callback parameters */ data->sdhc_cb = callback; data->sdhc_cb_user_data = user_data; /* Disable interrupts, then enable what the user requested */ USDHC_DisableInterruptStatus(cfg->base, kUSDHC_CardInterruptFlag); USDHC_DisableInterruptSignal(cfg->base, kUSDHC_CardInterruptFlag); if (cfg->detect_gpio.port) { ret = gpio_pin_interrupt_configure_dt(&cfg->detect_gpio, GPIO_INT_DISABLE); if (ret) { return ret; } } else { USDHC_DisableInterruptSignal(cfg->base, kUSDHC_CardInsertionFlag); USDHC_DisableInterruptStatus(cfg->base, kUSDHC_CardInsertionFlag); USDHC_DisableInterruptSignal(cfg->base, kUSDHC_CardRemovalFlag); USDHC_DisableInterruptStatus(cfg->base, kUSDHC_CardRemovalFlag); } if (sources & SDHC_INT_SDIO) { /* Enable SDIO card interrupt */ USDHC_EnableInterruptStatus(cfg->base, kUSDHC_CardInterruptFlag); USDHC_EnableInterruptSignal(cfg->base, kUSDHC_CardInterruptFlag); } if (sources & SDHC_INT_INSERTED) { if (cfg->detect_gpio.port) { /* Use GPIO interrupt */ ret = gpio_pin_interrupt_configure_dt(&cfg->detect_gpio, GPIO_INT_EDGE_TO_ACTIVE); if (ret) { return ret; } } else { /* Enable card insertion interrupt */ USDHC_EnableInterruptStatus(cfg->base, kUSDHC_CardInsertionFlag); USDHC_EnableInterruptSignal(cfg->base, kUSDHC_CardInsertionFlag); } } if (sources & SDHC_INT_REMOVED) { if (cfg->detect_gpio.port) { /* Use GPIO interrupt */ ret = gpio_pin_interrupt_configure_dt(&cfg->detect_gpio, GPIO_INT_EDGE_TO_INACTIVE); if (ret) { return ret; } } else { /* Enable card removal interrupt */ USDHC_EnableInterruptStatus(cfg->base, kUSDHC_CardRemovalFlag); USDHC_EnableInterruptSignal(cfg->base, kUSDHC_CardRemovalFlag); } } return 0; } static int imx_usdhc_disable_interrupt(const struct device *dev, int sources) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *data = dev->data; int ret; if (sources & SDHC_INT_SDIO) { /* Disable SDIO card interrupt */ USDHC_DisableInterruptStatus(cfg->base, kUSDHC_CardInterruptFlag); USDHC_DisableInterruptSignal(cfg->base, kUSDHC_CardInterruptFlag); } if (sources & SDHC_INT_INSERTED) { if (cfg->detect_gpio.port) { ret = gpio_pin_interrupt_configure_dt(&cfg->detect_gpio, GPIO_INT_DISABLE); if (ret) { return ret; } } else { /* Disable card insertion interrupt */ USDHC_DisableInterruptStatus(cfg->base, kUSDHC_CardInsertionFlag); USDHC_DisableInterruptSignal(cfg->base, kUSDHC_CardInsertionFlag); } } if (sources & SDHC_INT_REMOVED) { if (cfg->detect_gpio.port) { ret = gpio_pin_interrupt_configure_dt(&cfg->detect_gpio, GPIO_INT_DISABLE); if (ret) { return ret; } } else { /* Disable card removal interrupt */ USDHC_DisableInterruptStatus(cfg->base, kUSDHC_CardRemovalFlag); USDHC_DisableInterruptSignal(cfg->base, kUSDHC_CardRemovalFlag); } } /* If all interrupt flags are disabled, remove callback */ if ((USDHC_GetEnabledInterruptStatusFlags(cfg->base) & (kUSDHC_CardInterruptFlag | kUSDHC_CardInsertionFlag | kUSDHC_CardRemovalFlag)) == 0) { data->sdhc_cb = NULL; data->sdhc_cb_user_data = NULL; } return 0; } static int imx_usdhc_isr(const struct device *dev) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *data = dev->data; USDHC_TransferHandleIRQ(cfg->base, &data->transfer_handle); return 0; } /* * Perform early system init for SDHC */ static int imx_usdhc_init(const struct device *dev) { const struct usdhc_config *cfg = dev->config; struct usdhc_data *data = dev->data; usdhc_config_t host_config = {0}; int ret; const usdhc_transfer_callback_t callbacks = { .TransferComplete = transfer_complete_cb, .SdioInterrupt = sdio_interrupt_cb, .CardInserted = card_inserted_cb, .CardRemoved = card_removed_cb, }; if (!device_is_ready(cfg->clock_dev)) { LOG_ERR("clock control device not ready"); return -ENODEV; } ret = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } USDHC_TransferCreateHandle(cfg->base, &data->transfer_handle, &callbacks, (void *)dev); cfg->irq_config_func(dev); host_config.dataTimeout = cfg->data_timeout; host_config.endianMode = kUSDHC_EndianModeLittle; host_config.readWatermarkLevel = cfg->read_watermark; host_config.writeWatermarkLevel = cfg->write_watermark; USDHC_Init(cfg->base, &host_config); /* Read host controller properties */ imx_usdhc_init_host_props(dev); /* Set power GPIO low, so card starts powered off */ if (cfg->pwr_gpio.port) { ret = gpio_pin_configure_dt(&cfg->pwr_gpio, GPIO_OUTPUT_INACTIVE); if (ret) { return ret; } } else { LOG_WRN("No power control GPIO defined. Without power control,\n" "the SD card may fail to communicate with the host"); } if (cfg->detect_gpio.port) { ret = gpio_pin_configure_dt(&cfg->detect_gpio, GPIO_INPUT); if (ret) { return ret; } gpio_init_callback(&data->cd_callback, card_detect_gpio_cb, BIT(cfg->detect_gpio.pin)); ret = gpio_add_callback_dt(&cfg->detect_gpio, &data->cd_callback); if (ret) { return ret; } } data->dev = dev; k_mutex_init(&data->access_mutex); /* Setup initial host IO values */ data->host_io.clock = 0; data->host_io.bus_mode = SDHC_BUSMODE_PUSHPULL; data->host_io.power_mode = SDHC_POWER_OFF; data->host_io.bus_width = SDHC_BUS_WIDTH1BIT; data->host_io.timing = SDHC_TIMING_LEGACY; data->host_io.driver_type = SD_DRIVER_TYPE_B; data->host_io.signal_voltage = SD_VOL_3_3_V; return k_sem_init(&data->transfer_sem, 0, 1); } static const struct sdhc_driver_api usdhc_api = { .reset = imx_usdhc_reset, .request = imx_usdhc_request, .set_io = imx_usdhc_set_io, .get_card_present = imx_usdhc_get_card_present, .execute_tuning = imx_usdhc_execute_tuning, .card_busy = imx_usdhc_card_busy, .get_host_props = imx_usdhc_get_host_props, .enable_interrupt = imx_usdhc_enable_interrupt, .disable_interrupt = imx_usdhc_disable_interrupt, }; #ifdef CONFIG_NOCACHE_MEMORY #define IMX_USDHC_NOCACHE_TAG __attribute__((__section__(".nocache"))); #else #define IMX_USDHC_NOCACHE_TAG #endif #ifdef CONFIG_IMX_USDHC_DMA_SUPPORT #define IMX_USDHC_DMA_BUFFER_DEFINE(n) \ static uint32_t __aligned(32) \ usdhc_##n##_dma_descriptor[CONFIG_IMX_USDHC_DMA_BUFFER_SIZE / 4]\ IMX_USDHC_NOCACHE_TAG; #define IMX_USDHC_DMA_BUFFER_INIT(n) \ .usdhc_dma_descriptor = usdhc_##n##_dma_descriptor, \ .dma_descriptor_len = CONFIG_IMX_USDHC_DMA_BUFFER_SIZE / 4, #else #define IMX_USDHC_DMA_BUFFER_DEFINE(n) #define IMX_USDHC_DMA_BUFFER_INIT(n) #endif /* CONFIG_IMX_USDHC_DMA_SUPPORT */ #define IMX_USDHC_INIT(n) \ static void usdhc_##n##_irq_config_func(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ imx_usdhc_isr, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } \ \ PINCTRL_DT_INST_DEFINE(n); \ \ static const struct usdhc_config usdhc_##n##_config = { \ .base = (USDHC_Type *) DT_INST_REG_ADDR(n), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ .nusdhc = n, \ .pwr_gpio = GPIO_DT_SPEC_INST_GET_OR(n, pwr_gpios, {0}), \ .detect_gpio = GPIO_DT_SPEC_INST_GET_OR(n, cd_gpios, {0}), \ .data_timeout = DT_INST_PROP(n, data_timeout), \ .detect_dat3 = DT_INST_PROP(n, detect_dat3), \ .no_180_vol = DT_INST_PROP(n, no_1_8_v), \ .read_watermark = DT_INST_PROP(n, read_watermark), \ .write_watermark = DT_INST_PROP(n, write_watermark), \ .max_current_330 = DT_INST_PROP(n, max_current_330), \ .max_current_180 = DT_INST_PROP(n, max_current_180), \ .min_bus_freq = DT_INST_PROP(n, min_bus_freq), \ .max_bus_freq = DT_INST_PROP(n, max_bus_freq), \ .power_delay_ms = DT_INST_PROP(n, power_delay_ms), \ .mmc_hs200_1_8v = DT_INST_PROP(n, mmc_hs200_1_8v), \ .mmc_hs400_1_8v = DT_INST_PROP(n, mmc_hs400_1_8v), \ .irq_config_func = usdhc_##n##_irq_config_func, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ }; \ \ \ IMX_USDHC_DMA_BUFFER_DEFINE(n) \ \ static struct usdhc_data usdhc_##n##_data = { \ .card_present = false, \ IMX_USDHC_DMA_BUFFER_INIT(n) \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ &imx_usdhc_init, \ NULL, \ &usdhc_##n##_data, \ &usdhc_##n##_config, \ POST_KERNEL, \ CONFIG_SDHC_INIT_PRIORITY, \ &usdhc_api); DT_INST_FOREACH_STATUS_OKAY(IMX_USDHC_INIT) ```
/content/code_sandbox/drivers/sdhc/imx_usdhc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,888
```unknown config INTEL_EMMC_HOST bool "EMMC driver" select SDHC_SUPPORTS_NATIVE_MODE select EVENTS default y depends on DT_HAS_INTEL_EMMC_HOST_ENABLED help EMMC driver support. Though this driver can be easily port to any HW which is complaint to eMMC spec, currently it is only validated using intel's EMMC host controller. if INTEL_EMMC_HOST config INTEL_EMMC_HOST_INTR bool "EMMC host controller interrupt mode" default y help EMMC host controller interrupt mode support. config INTEL_EMMC_HOST_DMA bool "EMMC host controller DMA mode" select DCACHE help EMMC host controller DMA mode support. config INTEL_EMMC_HOST_ADMA bool "EMMC host controller ADMA mode" depends on INTEL_EMMC_HOST_DMA help EMMC host controller ADMA mode support. config INTEL_EMMC_HOST_ADMA_DESC_SIZE int "EMMC host controller ADMA Descriptor size" depends on INTEL_EMMC_HOST_ADMA default 32 help EMMC host controller ADMA Descriptor size. config INTEL_EMMC_HOST_AUTO_STOP bool "auto stop command mode" default y help Auto stop command mode support. config INTEL_EMMC_HOST_BLOCK_GAP bool "Block gap mode" depends on INTEL_EMMC_HOST_DMA help Block gap mode support. config INTEL_EMMC_HOST_TUNING bool "Host tuning" help Host tuning support. endif ```
/content/code_sandbox/drivers/sdhc/Kconfig.intel
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
328
```c /* * */ #include <errno.h> #include <zephyr/drivers/i3c.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/logging/log.h> #include <zephyr/sys/byteorder.h> #include <zephyr/sys/sys_io.h> #include <zephyr/sys/util.h> #define DEV_ID 0x0 #define DEV_ID_I3C_MASTER 0x5034 #define CONF_STATUS0 0x4 #define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29)) #define CONF_STATUS0_ECC_CHK BIT(28) #define CONF_STATUS0_INTEG_CHK BIT(27) #define CONF_STATUS0_CSR_DAP_CHK BIT(26) #define CONF_STATUS0_TRANS_TOUT_CHK BIT(25) #define CONF_STATUS0_PROT_FAULTS_CHK BIT(24) #define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16) #define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8) #define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7)) /* CONF_STATUS0_SUPPORTS_DDR moved to CONF_STATUS1 in rev >= 1p7 */ #define CONF_STATUS0_SUPPORTS_DDR BIT(5) #define CONF_STATUS0_SEC_MASTER BIT(4) /* And it was replaced with a Dev Role mask */ #define CONF_STATUS0_DEV_ROLE(x) ((x) & GENMASK(5, 4) >> 4) #define CONF_STATUS0_DEV_ROLE_MAIN_MASTER 0 #define CONF_STATUS0_DEV_ROLE_SEC_MASTER 1 #define CONF_STATUS0_DEV_ROLE_SLAVE 2 #define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0)) #define CONF_STATUS1 0x8 #define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1) #define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26)) #define CONF_STATUS1_SLV_DDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21)) #define CONF_STATUS1_SLV_DDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16)) #define CONF_STATUS1_SUPPORTS_DDR BIT(14) #define CONF_STATUS1_ALT_MODE BIT(13) #define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10)) #define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5)) #define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0))) #define REV_ID 0xc #define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20) #define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8) #define REV_ID_REV(id) ((id) & GENMASK(7, 0)) #define REV_ID_VERSION(m, n) ((m << 5) | (n)) #define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 5)) >> 5) #define REV_ID_REV_MINOR(id) ((id) & GENMASK(4, 0)) #define CTRL 0x10 #define CTRL_DEV_EN BIT(31) #define CTRL_HALT_EN BIT(30) #define CTRL_MCS BIT(29) #define CTRL_MCS_EN BIT(28) #define CTRL_I3C_11_SUPP BIT(26) #define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24)) #define CTRL_TC_EN BIT(9) #define CTRL_HJ_DISEC BIT(8) #define CTRL_MST_ACK BIT(7) #define CTRL_HJ_ACK BIT(6) #define CTRL_HJ_INIT BIT(5) #define CTRL_MST_INIT BIT(4) #define CTRL_AHDR_OPT BIT(3) #define CTRL_PURE_BUS_MODE 0 #define CTRL_MIXED_FAST_BUS_MODE 2 #define CTRL_MIXED_SLOW_BUS_MODE 3 #define CTRL_BUS_MODE_MASK GENMASK(1, 0) #define THD_DELAY_MAX 3 #define PRESCL_CTRL0 0x14 #define PRESCL_CTRL0_I2C(x) ((x) << 16) #define PRESCL_CTRL0_I3C(x) (x) #define PRESCL_CTRL0_I3C_MAX GENMASK(9, 0) #define PRESCL_CTRL0_I2C_MAX GENMASK(15, 0) #define PRESCL_CTRL1 0x18 #define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8) #define PRESCL_CTRL1_PP_LOW(x) ((x) << 8) #define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0) #define PRESCL_CTRL1_OD_LOW(x) (x) #define SLV_STATUS4 0x1C #define SLV_STATUS4_BUSCON_FILL_LVL GENMASK(16, 8) #define SLV_STATUS5_BUSCON_DATA GENMASK(7, 0) #define MST_IER 0x20 #define MST_IDR 0x24 #define MST_IMR 0x28 #define MST_ICR 0x2c #define MST_ISR 0x30 #define MST_INT_HALTED BIT(18) #define MST_INT_MR_DONE BIT(17) #define MST_INT_IMM_COMP BIT(16) #define MST_INT_TX_THR BIT(15) #define MST_INT_TX_OVF BIT(14) #define MST_INT_C_REF_ROV BIT(13) #define MST_INT_IBID_THR BIT(12) #define MST_INT_IBID_UNF BIT(11) #define MST_INT_IBIR_THR BIT(10) #define MST_INT_IBIR_UNF BIT(9) #define MST_INT_IBIR_OVF BIT(8) #define MST_INT_RX_THR BIT(7) #define MST_INT_RX_UNF BIT(6) #define MST_INT_CMDD_EMP BIT(5) #define MST_INT_CMDD_THR BIT(4) #define MST_INT_CMDD_OVF BIT(3) #define MST_INT_CMDR_THR BIT(2) #define MST_INT_CMDR_UNF BIT(1) #define MST_INT_CMDR_OVF BIT(0) #define MST_INT_MASK GENMASK(18, 0) #define MST_STATUS0 0x34 #define MST_STATUS0_IDLE BIT(18) #define MST_STATUS0_HALTED BIT(17) #define MST_STATUS0_MASTER_MODE BIT(16) #define MST_STATUS0_TX_FULL BIT(13) #define MST_STATUS0_IBID_FULL BIT(12) #define MST_STATUS0_IBIR_FULL BIT(11) #define MST_STATUS0_RX_FULL BIT(10) #define MST_STATUS0_CMDD_FULL BIT(9) #define MST_STATUS0_CMDR_FULL BIT(8) #define MST_STATUS0_TX_EMP BIT(5) #define MST_STATUS0_IBID_EMP BIT(4) #define MST_STATUS0_IBIR_EMP BIT(3) #define MST_STATUS0_RX_EMP BIT(2) #define MST_STATUS0_CMDD_EMP BIT(1) #define MST_STATUS0_CMDR_EMP BIT(0) #define CMDR 0x38 #define CMDR_NO_ERROR 0 #define CMDR_DDR_PREAMBLE_ERROR 1 #define CMDR_DDR_PARITY_ERROR 2 #define CMDR_DDR_RX_FIFO_OVF 3 #define CMDR_DDR_TX_FIFO_UNF 4 #define CMDR_M0_ERROR 5 #define CMDR_M1_ERROR 6 #define CMDR_M2_ERROR 7 #define CMDR_MST_ABORT 8 #define CMDR_NACK_RESP 9 #define CMDR_INVALID_DA 10 #define CMDR_DDR_DROPPED 11 #define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24) #define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8) #define CMDR_CMDID_HJACK_DISEC 0xfe #define CMDR_CMDID_HJACK_ENTDAA 0xff #define CMDR_CMDID(x) ((x) & GENMASK(7, 0)) #define IBIR 0x3c #define IBIR_ACKED BIT(12) #define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8) #define IBIR_SLVID_INV 0xF #define IBIR_ERROR BIT(7) #define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2) #define IBIR_TYPE_IBI 0 #define IBIR_TYPE_HJ 1 #define IBIR_TYPE_MR 2 #define IBIR_TYPE(x) ((x) & GENMASK(1, 0)) #define SLV_IER 0x40 #define SLV_IDR 0x44 #define SLV_IMR 0x48 #define SLV_ICR 0x4c #define SLV_ISR 0x50 #define SLV_INT_CHIP_RST BIT(31) #define SLV_INT_PERIPH_RST BIT(30) #define SLV_INT_FLUSH_DONE BIT(29) #define SLV_INT_RST_DAA BIT(28) #define SLV_INT_BUSCON_UP BIT(26) #define SLV_INT_MRL_UP BIT(25) #define SLV_INT_MWL_UP BIT(24) #define SLV_INT_IBI_THR BIT(23) #define SLV_INT_IBI_DONE BIT(22) #define SLV_INT_DEFSLVS BIT(21) #define SLV_INT_TM BIT(20) #define SLV_INT_ERROR BIT(19) #define SLV_INT_EVENT_UP BIT(18) #define SLV_INT_HJ_DONE BIT(17) #define SLV_INT_MR_DONE BIT(16) #define SLV_INT_DA_UPD BIT(15) #define SLV_INT_SDR_FAIL BIT(14) #define SLV_INT_DDR_FAIL BIT(13) #define SLV_INT_M_RD_ABORT BIT(12) #define SLV_INT_DDR_RX_THR BIT(11) #define SLV_INT_DDR_TX_THR BIT(10) #define SLV_INT_SDR_RX_THR BIT(9) #define SLV_INT_SDR_TX_THR BIT(8) #define SLV_INT_DDR_RX_UNF BIT(7) #define SLV_INT_DDR_TX_OVF BIT(6) #define SLV_INT_SDR_RX_UNF BIT(5) #define SLV_INT_SDR_TX_OVF BIT(4) #define SLV_INT_DDR_RD_COMP BIT(3) #define SLV_INT_DDR_WR_COMP BIT(2) #define SLV_INT_SDR_RD_COMP BIT(1) #define SLV_INT_SDR_WR_COMP BIT(0) #define SLV_STATUS0 0x54 #define SLV_STATUS0_IBI_XFRD_BYTEs(s) (((s) & GENMASK(31, 24)) >> 24) #define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16) #define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0)) #define SLV_STATUS1 0x58 #define SLV_STATUS1_SCL_IN_RST BIT(31) #define SLV_STATUS1_HJ_IN_USE BIT(30) #define SLV_STATUS1_NACK_NXT_PW BIT(29) #define SLV_STATUS1_NACK_NXT_PR BIT(28) #define SLV_STATUS1_MR_PEND BIT(27) #define SLV_STATUS1_HJ_PEND BIT(26) #define SLV_STATUS1_IBI_PEND BIT(25) #define SLV_STATUS1_IBI_DIS BIT(24) #define SLV_STATUS1_BUS_VAR BIT(23) #define SLV_STATUS1_TCAM0_DIS BIT(22) #define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20) #define SLV_STATUS1_VEN_TM BIT(19) #define SLV_STATUS1_HJ_DIS BIT(18) #define SLV_STATUS1_MR_DIS BIT(17) #define SLV_STATUS1_PROT_ERR BIT(16) #define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9) #define SLV_STATUS1_HAS_DA BIT(8) #define SLV_STATUS1_DDR_RX_FULL BIT(7) #define SLV_STATUS1_DDR_TX_FULL BIT(6) #define SLV_STATUS1_DDR_RX_EMPTY BIT(5) #define SLV_STATUS1_DDR_TX_EMPTY BIT(4) #define SLV_STATUS1_SDR_RX_FULL BIT(3) #define SLV_STATUS1_SDR_TX_FULL BIT(2) #define SLV_STATUS1_SDR_RX_EMPTY BIT(1) #define SLV_STATUS1_SDR_TX_EMPTY BIT(0) #define SLV_IBI_CTRL 0x5c #define SLV_IBI_TCAM_EVNT(x) ((x) << 27) #define SLV_IBI_PL(x) ((x) << 16) #define SLV_IBI_TCAM0 BIT(9) #define SLV_IBI_REQ BIT(8) #define SLV_IBI_AUTO_CLR_IBI 1 #define SLV_IBI_AUTO_CLR_PR 2 #define SLV_IBI_AUTO_CLR_IBI_OR_PR 3 #define SLV_IBI_CLEAR_TRIGGER(x) ((x) << 4) #define CMD0_FIFO 0x60 #define CMD0_FIFO_IS_DDR BIT(31) #define CMD0_FIFO_IS_CCC BIT(30) #define CMD0_FIFO_BCH BIT(29) #define XMIT_BURST_STATIC_SUBADDR 0 #define XMIT_SINGLE_INC_SUBADDR 1 #define XMIT_SINGLE_STATIC_SUBADDR 2 #define XMIT_BURST_WITHOUT_SUBADDR 3 #define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27) #define CMD0_FIFO_SBCA BIT(26) #define CMD0_FIFO_RSBC BIT(25) #define CMD0_FIFO_IS_10B BIT(24) #define CMD0_FIFO_PL_LEN(l) ((l) << 12) #define CMD0_FIFO_IS_DB BIT(11) #define CMD0_FIFO_PL_LEN_MAX 4095 #define CMD0_FIFO_DEV_ADDR(a) ((a) << 1) #define CMD0_FIFO_RNW BIT(0) #define CMD1_FIFO 0x64 #define CMD1_FIFO_CMDID(id) ((id) << 24) #define CMD1_FIFO_DB(db) (((db) & GENMASK(15, 8)) << 8) #define CMD1_FIFO_CSRADDR(a) (a) #define CMD1_FIFO_CCC(id) (id) #define TX_FIFO 0x68 #define TX_FIFO_STATUS 0x6C #define IMD_CMD0 0x70 #define IMD_CMD0_PL_LEN(l) ((l) << 12) #define IMD_CMD0_DEV_ADDR(a) ((a) << 1) #define IMD_CMD0_RNW BIT(0) #define IMD_CMD1 0x74 #define IMD_CMD1_CCC(id) (id) #define IMD_DATA 0x78 #define RX_FIFO 0x80 #define IBI_DATA_FIFO 0x84 #define SLV_DDR_TX_FIFO 0x88 #define SLV_DDR_RX_FIFO 0x8c #define DDR_PREAMBLE_MASK GENMASK(19, 18) #define DDR_PREAMBLE_CMD_CRC 0x1 << 18 #define DDR_PREAMBLE_DATA_ABORT 0x2 << 18 #define DDR_PREAMBLE_DATA_ABORT_ALT 0x3 << 18 #define DDR_DATA(x) (((x) & GENMASK(17, 2)) >> 2) #define DDR_EVEN_PARITY BIT(0) #define DDR_ODD_PARITY BIT(1) #define DDR_CRC_AND_HEADER_SIZE 0x4 #define DDR_CONVERT_BUF_LEN(x) (4 * (x)) #define HDR_CMD_RD BIT(15) #define HDR_CMD_CODE(c) (((c) & GENMASK(6, 0)) << 8) #define DDR_CRC_TOKEN (0xC << 14) #define DDR_CRC_TOKEN_MASK GENMASK(17, 14) #define DDR_CRC(t) (((t) & (GENMASK(13, 9))) >> 9) #define CMD_IBI_THR_CTRL 0x90 #define IBIR_THR(t) ((t) << 24) #define CMDR_THR(t) ((t) << 16) #define CMDR_THR_MASK (GENMASK(20, 16)) #define IBI_THR(t) ((t) << 8) #define CMD_THR(t) (t) #define TX_RX_THR_CTRL 0x94 #define RX_THR(t) ((t) << 16) #define RX_THR_MASK (GENMASK(31, 16)) #define TX_THR(t) (t) #define TX_THR_MASK (GENMASK(15, 0)) #define SLV_DDR_TX_RX_THR_CTRL 0x98 #define SLV_DDR_RX_THR(t) ((t) << 16) #define SLV_DDR_TX_THR(t) (t) #define FLUSH_CTRL 0x9c #define FLUSH_IBI_RESP BIT(24) #define FLUSH_CMD_RESP BIT(23) #define FLUSH_SLV_DDR_RX_FIFO BIT(22) #define FLUSH_SLV_DDR_TX_FIFO BIT(21) #define FLUSH_IMM_FIFO BIT(20) #define FLUSH_IBI_FIFO BIT(19) #define FLUSH_RX_FIFO BIT(18) #define FLUSH_TX_FIFO BIT(17) #define FLUSH_CMD_FIFO BIT(16) #define SLV_CTRL 0xA0 #define SLV_PROT_ERR_TYPE 0xA4 #define SLV_ERR6_IBI BIT(9) #define SLV_ERR6_PR BIT(8) #define SLV_ERR_GETCCC BIT(7) #define SLV_ERR5 BIT(6) #define SLV_ERR4 BIT(5) #define SLV_ERR3 BIT(4) #define SLV_ERR2_PW BIT(3) #define SLV_ERR2_SETCCC BIT(2) #define SLV_ERR1 BIT(1) #define SLV_ERR0 BIT(0) #define SLV_STATUS2 0xA8 #define SLV_STATUS3 0xAC #define SLV_STATUS3_BC_FSM(s) (((s) & GENMASK(26, 16)) >> 16) #define SLV_STATUS3_MWL(s) ((s) & GENMASK(15, 0)) #define TTO_PRESCL_CTRL0 0xb0 #define TTO_PRESCL_CTRL0_PRESCL_I2C(x) ((x) << 16) #define TTO_PRESCL_CTRL0_PRESCL_I3C(x) (x) #define TTO_PRESCL_CTRL1 0xb4 #define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16) #define TTO_PRESCL_CTRL1_DIVA(x) (x) #define TTO_PRESCL_CTRL1_PP_LOW(x) ((x) << 8) #define TTO_PRESCL_CTRL1_OD_LOW(x) (x) #define DEVS_CTRL 0xb8 #define DEVS_CTRL_DEV_CLR_SHIFT 16 #define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16) #define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev)) #define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev) #define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0) #define MAX_DEVS 16 #define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10)) #define DEV_ID_RR0_LVR_EXT_ADDR BIT(11) #define DEV_ID_RR0_HDR_CAP BIT(10) #define DEV_ID_RR0_IS_I3C BIT(9) #define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(7, 1) | GENMASK(15, 13)) #define DEV_ID_RR0_SET_DEV_ADDR(a) (((a << 1) & GENMASK(7, 1)) | (((a) & GENMASK(9, 7)) << 13)) #define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | (((x) >> 6) & GENMASK(9, 7))) #define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10)) #define DEV_ID_RR1_PID_MSB(pid) (pid) #define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10)) #define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16) #define DEV_ID_RR2_BCR(bcr) ((bcr) << 8) #define DEV_ID_RR2_DCR(dcr) (dcr) #define DEV_ID_RR2_LVR(lvr) (lvr) #define SIR_MAP(x) (0x180 + ((x) * 4)) #define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2) #define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0)) #define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0)) #define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0)) #define DEV_ROLE_SLAVE 0 #define DEV_ROLE_MASTER 1 #define SIR_MAP_DEV_ROLE(role) ((role) << 14) #define SIR_MAP_DEV_SLOW BIT(13) #define SIR_MAP_DEV_PL(l) ((l) << 8) #define SIR_MAP_PL_MAX GENMASK(4, 0) #define SIR_MAP_DEV_DA(a) ((a) << 1) #define SIR_MAP_DEV_ACK BIT(0) #define GRPADDR_LIST 0x198 #define GRPADDR_CS 0x19C #define GPIR_WORD(x) (0x200 + ((x) * 4)) #define GPI_REG(val, id) (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0)) #define GPOR_WORD(x) (0x220 + ((x) * 4)) #define GPO_REG(val, id) (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0)) #define ASF_INT_STATUS 0x300 #define ASF_INT_RAW_STATUS 0x304 #define ASF_INT_MASK 0x308 #define ASF_INT_TEST 0x30c #define ASF_INT_FATAL_SELECT 0x310 #define ASF_INTEGRITY_ERR BIT(6) #define ASF_PROTOCOL_ERR BIT(5) #define ASF_TRANS_TIMEOUT_ERR BIT(4) #define ASF_CSR_ERR BIT(3) #define ASF_DAP_ERR BIT(2) #define ASF_SRAM_UNCORR_ERR BIT(1) #define ASF_SRAM_CORR_ERR BIT(0) #define ASF_SRAM_CORR_FAULT_STATUS 0x320 #define ASF_SRAM_UNCORR_FAULT_STATUS 0x324 #define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24) #define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0)) #define ASF_SRAM_FAULT_STATS 0x328 #define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16) #define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0)) #define ASF_TRANS_TOUT_CTRL 0x330 #define ASF_TRANS_TOUT_EN BIT(31) #define ASF_TRANS_TOUT_VAL(x) (x) #define ASF_TRANS_TOUT_FAULT_MASK 0x334 #define ASF_TRANS_TOUT_FAULT_STATUS 0x338 #define ASF_TRANS_TOUT_FAULT_APB BIT(3) #define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2) #define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1) #define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0) #define ASF_PROTO_FAULT_MASK 0x340 #define ASF_PROTO_FAULT_STATUS 0x344 #define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31) #define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30) #define ASF_PROTO_FAULT_S(x) BIT(16 + (x)) #define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15) #define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14) #define ASF_PROTO_FAULT_M(x) BIT(x) /******************************************************************************* * Local Constants Definition ******************************************************************************/ /* TODO: this needs to be configurable in the dts...somehow */ #define I3C_CONTROLLER_ADDR 0x08 /* Maximum i3c devices that the IP can be built with */ #define I3C_MAX_DEVS 11 #define I3C_MAX_MSGS 10 #define I3C_SIR_DEFAULT_DA 0x7F #define I3C_MAX_IDLE_CANCEL_WAIT_RETRIES 50 #define I3C_PRESCL_REG_SCALE (4) #define I2C_PRESCL_REG_SCALE (5) #define I3C_WAIT_FOR_IDLE_STATE_US 100 #define I3C_IDLE_TIMEOUT_CYC \ (I3C_WAIT_FOR_IDLE_STATE_US * (sys_clock_hw_cycles_per_sec() / USEC_PER_SEC)) /* Target T_LOW period in open-drain mode. */ #define I3C_BUS_TLOW_OD_MIN_NS 200 /* * MIPI I3C v1.1.1 Spec defines tsco max as 12ns, but the default for devices is 8ns * TODO: this should be configurable by the value with in maxRd from the CCC GETMXDS * for individual devices */ #define I3C_TSCO_DEFAULT_NS 8 /* Interrupt thresholds. */ /* command response fifo threshold */ #define I3C_CMDR_THR 1 /* command tx fifo threshold - unused */ #define I3C_CMDD_THR 1 /* in-band-interrupt data fifo threshold - unused */ #define I3C_IBID_THR 1 /* in-band-interrupt response queue threshold */ #define I3C_IBIR_THR 1 /* tx data threshold - unused */ #define I3C_TX_THR 1 #define LOG_MODULE_NAME I3C_CADENCE LOG_MODULE_REGISTER(I3C_CADENCE, CONFIG_I3C_CADENCE_LOG_LEVEL); /******************************************************************************* * Local Types Definition ******************************************************************************/ /** Describes peripheral HW configuration determined from CONFx registers. */ struct cdns_i3c_hw_config { /* Revision ID */ uint32_t rev_id; /* The maxiumum command queue depth. */ uint32_t cmd_mem_depth; /* The maxiumum command response queue depth. */ uint32_t cmdr_mem_depth; /* The maximum RX FIFO depth. */ uint32_t rx_mem_depth; /* The maximum TX FIFO depth. */ uint32_t tx_mem_depth; /* The maximum DDR RX FIFO depth. */ uint32_t ddr_rx_mem_depth; /* The maximum DDR TX FIFO depth. */ uint32_t ddr_tx_mem_depth; /* The maximum IBIR FIFO depth. */ uint32_t ibir_mem_depth; /* The maximum IBI FIFO depth. */ uint32_t ibi_mem_depth; }; /* Cadence I3C/I2C Device Private Data */ struct cdns_i3c_i2c_dev_data { /* Device id within the retaining registers. This is set after bus initialization by the * controller. */ uint8_t id; }; /* Single command/transfer */ struct cdns_i3c_cmd { uint32_t cmd0; uint32_t cmd1; uint32_t ddr_header; uint32_t ddr_crc; uint32_t len; uint32_t *num_xfer; void *buf; uint32_t error; enum i3c_data_rate hdr; }; /* Transfer data */ struct cdns_i3c_xfer { struct k_sem complete; int ret; int num_cmds; struct cdns_i3c_cmd cmds[I3C_MAX_MSGS]; }; /* Driver config */ struct cdns_i3c_config { struct i3c_driver_config common; /** base address of the controller */ uintptr_t base; /** input frequency to the I3C Cadence */ uint32_t input_frequency; /** Interrupt configuration function. */ void (*irq_config_func)(const struct device *dev); }; /* Driver instance data */ struct cdns_i3c_data { struct i3c_driver_data common; struct cdns_i3c_hw_config hw_cfg; struct k_mutex bus_lock; struct cdns_i3c_i2c_dev_data cdns_i3c_i2c_priv_data[I3C_MAX_DEVS]; struct cdns_i3c_xfer xfer; struct i3c_target_config *target_config; struct k_sem ibi_hj_complete; uint32_t free_rr_slots; uint16_t fifo_bytes_read; uint8_t max_devs; }; /******************************************************************************* * Global Variables Declaration ******************************************************************************/ /******************************************************************************* * Local Functions Declaration ******************************************************************************/ /******************************************************************************* * Private Functions Code ******************************************************************************/ static uint8_t i3c_cdns_crc5(uint8_t crc5, uint16_t word) { uint8_t crc0; int i; /* * crc0 = next_data_bit ^ crc[4] * 1 2 3 4 * crc[4:0] = { crc[3:2], crc[1]^crc0, crc[0], crc0 } */ for (i = 15; i >= 0; --i) { crc0 = ((word >> i) ^ (crc5 >> 4)) & 0x1; crc5 = ((crc5 << 1) & 0x1a) | (((crc5 >> 1) ^ crc0) << 2) | crc0; } return crc5 & 0x1f; } static uint8_t cdns_i3c_ddr_parity(uint16_t payload) { uint16_t pb; uint8_t parity; /* Calculate odd parity. */ pb = (payload >> 15) ^ (payload >> 13) ^ (payload >> 11) ^ (payload >> 9) ^ (payload >> 7) ^ (payload >> 5) ^ (payload >> 3) ^ (payload >> 1); parity = (pb & 1) << 1; /* Calculate even and 1 parity */ pb = (payload >> 14) ^ (payload >> 12) ^ (payload >> 10) ^ (payload >> 8) ^ (payload >> 6) ^ (payload >> 4) ^ (payload >> 2) ^ payload ^ 1; parity |= (pb & 1); return parity; } /* This prepares the ddr word from the payload add adding on parity, This * does not write the preamble */ static uint32_t prepare_ddr_word(uint16_t payload) { return (uint32_t)payload << 2 | cdns_i3c_ddr_parity(payload); } /* This ensures that PA0 contains 1'b1 which allows for easier Bus Turnaround */ static uint16_t prepare_ddr_cmd_parity_adjustment_bit(uint16_t word) { uint16_t pb; pb = (word >> 14) ^ (word >> 12) ^ (word >> 10) ^ (word >> 8) ^ (word >> 6) ^ (word >> 4) ^ (word >> 2); if (pb & 1) { word |= BIT(0); } return word; } /* Computes and sets parity */ /* Returns [7:1] 7-bit addr, [0] even/xor parity */ static uint8_t cdns_i3c_even_parity_byte(uint8_t byte) { uint8_t parity = 0; uint8_t b = byte; while (b) { parity = !parity; b = b & (b - 1); } b = (byte << 1) | !parity; return b; } /* Check if command response fifo is empty */ static inline bool cdns_i3c_cmd_rsp_fifo_empty(const struct cdns_i3c_config *config) { uint32_t mst_st = sys_read32(config->base + MST_STATUS0); return ((mst_st & MST_STATUS0_CMDR_EMP) ? true : false); } /* Check if command fifo is empty */ static inline bool cdns_i3c_cmd_fifo_empty(const struct cdns_i3c_config *config) { uint32_t mst_st = sys_read32(config->base + MST_STATUS0); return ((mst_st & MST_STATUS0_CMDD_EMP) ? true : false); } /* Check if command fifo is full */ static inline bool cdns_i3c_cmd_fifo_full(const struct cdns_i3c_config *config) { uint32_t mst_st = sys_read32(config->base + MST_STATUS0); return ((mst_st & MST_STATUS0_CMDD_FULL) ? true : false); } /* Check if ibi response fifo is empty */ static inline bool cdns_i3c_ibi_rsp_fifo_empty(const struct cdns_i3c_config *config) { uint32_t mst_st = sys_read32(config->base + MST_STATUS0); return ((mst_st & MST_STATUS0_IBIR_EMP) ? true : false); } /* Check if tx fifo is full */ static inline bool cdns_i3c_tx_fifo_full(const struct cdns_i3c_config *config) { uint32_t mst_st = sys_read32(config->base + MST_STATUS0); return ((mst_st & MST_STATUS0_TX_FULL) ? true : false); } /* Check if rx fifo is full */ static inline bool cdns_i3c_rx_fifo_full(const struct cdns_i3c_config *config) { uint32_t mst_st = sys_read32(config->base + MST_STATUS0); return ((mst_st & MST_STATUS0_RX_FULL) ? true : false); } /* Check if rx fifo is empty */ static inline bool cdns_i3c_rx_fifo_empty(const struct cdns_i3c_config *config) { uint32_t mst_st = sys_read32(config->base + MST_STATUS0); return ((mst_st & MST_STATUS0_RX_EMP) ? true : false); } /* Check if ibi fifo is empty */ static inline bool cdns_i3c_ibi_fifo_empty(const struct cdns_i3c_config *config) { uint32_t mst_st = sys_read32(config->base + MST_STATUS0); return ((mst_st & MST_STATUS0_IBID_EMP) ? true : false); } /* Interrupt handling */ static inline void cdns_i3c_interrupts_disable(const struct cdns_i3c_config *config) { sys_write32(MST_INT_MASK, config->base + MST_IDR); } static inline void cdns_i3c_interrupts_clear(const struct cdns_i3c_config *config) { sys_write32(MST_INT_MASK, config->base + MST_ICR); } /* FIFO mgmt */ static void cdns_i3c_write_tx_fifo(const struct cdns_i3c_config *config, const void *buf, uint32_t len) { const uint32_t *ptr = buf; uint32_t remain, val; for (remain = len; remain >= 4; remain -= 4) { val = *ptr++; sys_write32(val, config->base + TX_FIFO); } if (remain > 0) { val = 0; memcpy(&val, ptr, remain); sys_write32(val, config->base + TX_FIFO); } } static void cdns_i3c_write_ddr_tx_fifo(const struct cdns_i3c_config *config, const void *buf, uint32_t len) { const uint32_t *ptr = buf; uint32_t remain, val; for (remain = len; remain >= 4; remain -= 4) { val = *ptr++; sys_write32(val, config->base + SLV_DDR_TX_FIFO); } if (remain > 0) { val = 0; memcpy(&val, ptr, remain); sys_write32(val, config->base + SLV_DDR_TX_FIFO); } } #ifdef CONFIG_I3C_USE_IBI static void cdns_i3c_write_ibi_fifo(const struct cdns_i3c_config *config, const void *buf, uint32_t len) { const uint32_t *ptr = buf; uint32_t remain, val; for (remain = len; remain >= 4; remain -= 4) { val = *ptr++; sys_write32(val, config->base + IBI_DATA_FIFO); } if (remain > 0) { val = 0; memcpy(&val, ptr, remain); sys_write32(val, config->base + IBI_DATA_FIFO); } } #endif /* CONFIG_I3C_USE_IBI */ static void cdns_i3c_target_read_rx_fifo(const struct device *dev) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; const struct i3c_target_callbacks *target_cb = data->target_config->callbacks; /* Version 1p7 uses the full 32b FIFO width */ if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) { uint16_t xferred_bytes = SLV_STATUS0_XFRD_BYTES(sys_read32(config->base + SLV_STATUS0)); for (int i = data->fifo_bytes_read; i < xferred_bytes; i += 4) { uint32_t rx_data = sys_read32(config->base + RX_FIFO); /* Call write received cb for each remaining byte */ for (int j = 0; j < MIN(4, xferred_bytes - i); j++) { target_cb->write_received_cb(data->target_config, (rx_data >> (8 * j))); } } /* * store the xfer bytes as the thr interrupt may trigger again as xferred_bytes will * count up to the "total" bytes received */ data->fifo_bytes_read = xferred_bytes; } else { /* * Target writes only write to the first byte of the 32 bit * width fifo for older version */ uint8_t rx_data = (uint8_t)sys_read32(config->base + RX_FIFO); target_cb->write_received_cb(data->target_config, rx_data); } } static int cdns_i3c_read_rx_fifo(const struct cdns_i3c_config *config, void *buf, uint32_t len) { uint32_t *ptr = buf; uint32_t remain, val; for (remain = len; remain >= 4; remain -= 4) { if (cdns_i3c_rx_fifo_empty(config)) { return -EIO; } val = sys_le32_to_cpu(sys_read32(config->base + RX_FIFO)); *ptr++ = val; } if (remain > 0) { if (cdns_i3c_rx_fifo_empty(config)) { return -EIO; } val = sys_le32_to_cpu(sys_read32(config->base + RX_FIFO)); memcpy(ptr, &val, remain); } return 0; } static int cdns_i3c_read_rx_fifo_ddr_xfer(const struct cdns_i3c_config *config, void *buf, uint32_t len, uint32_t ddr_header) { uint16_t *ptr = buf; uint32_t val; uint32_t preamble; uint8_t crc5 = 0x1F; /* * TODO: This function does not support threshold interrupts, it is expected that the * whole packet to be within the FIFO and not split across multiple calls to this function. */ crc5 = i3c_cdns_crc5(crc5, (uint16_t)DDR_DATA(ddr_header)); for (int i = 0; i < len; i++) { if (cdns_i3c_rx_fifo_empty(config)) { return -EIO; } val = sys_read32(config->base + RX_FIFO); preamble = (val & DDR_PREAMBLE_MASK); if (preamble == DDR_PREAMBLE_DATA_ABORT || preamble == DDR_PREAMBLE_DATA_ABORT_ALT) { *ptr++ = sys_cpu_to_be16((uint16_t)DDR_DATA(val)); crc5 = i3c_cdns_crc5(crc5, (uint16_t)DDR_DATA(val)); } else if ((preamble == DDR_PREAMBLE_CMD_CRC) && ((val & DDR_CRC_TOKEN_MASK) == DDR_CRC_TOKEN)) { uint8_t crc = (uint8_t)DDR_CRC(val); if (crc5 != crc) { LOG_ERR("DDR RX crc error"); return -EIO; } } } return 0; } static inline int cdns_i3c_wait_for_idle(const struct device *dev) { const struct cdns_i3c_config *config = dev->config; uint32_t start_time = k_cycle_get_32(); /** * Spin waiting for device to go idle. It is unlikely that this will * actually take any time unless if the last transaction came immediately * after an error condition. */ while (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_IDLE)) { if (k_cycle_get_32() - start_time > I3C_IDLE_TIMEOUT_CYC) { return -EAGAIN; } } return 0; } static void cdns_i3c_set_prescalers(const struct device *dev) { struct cdns_i3c_data *data = dev->data; const struct cdns_i3c_config *config = dev->config; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; /* These formulas are from section 6.2.1 of the Cadence I3C Master User Guide. */ uint32_t prescl_i3c = DIV_ROUND_UP(config->input_frequency, (ctrl_config->scl.i3c * I3C_PRESCL_REG_SCALE)) - 1; uint32_t prescl_i2c = DIV_ROUND_UP(config->input_frequency, (ctrl_config->scl.i2c * I2C_PRESCL_REG_SCALE)) - 1; /* update with actual value */ ctrl_config->scl.i3c = config->input_frequency / ((prescl_i3c + 1) * I3C_PRESCL_REG_SCALE); ctrl_config->scl.i2c = config->input_frequency / ((prescl_i2c + 1) * I2C_PRESCL_REG_SCALE); LOG_DBG("%s: I3C speed = %u, PRESCL_CTRL0.i3c = 0x%x", dev->name, ctrl_config->scl.i3c, prescl_i3c); LOG_DBG("%s: I2C speed = %u, PRESCL_CTRL0.i2c = 0x%x", dev->name, ctrl_config->scl.i2c, prescl_i2c); /* Calculate the OD_LOW value assuming a desired T_low period of 210ns. */ uint32_t pres_step = 1000000000 / (ctrl_config->scl.i3c * 4); int32_t od_low = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2; if (od_low < 0) { od_low = 0; } LOG_DBG("%s: PRESCL_CTRL1.od_low = 0x%x", dev->name, od_low); /* disable in order to update timing */ uint32_t ctrl = sys_read32(config->base + CTRL); if (ctrl & CTRL_DEV_EN) { sys_write32(~CTRL_DEV_EN & ctrl, config->base + CTRL); } sys_write32(PRESCL_CTRL0_I3C(prescl_i3c) | PRESCL_CTRL0_I2C(prescl_i2c), config->base + PRESCL_CTRL0); /* Sets the open drain low time relative to the push-pull. */ sys_write32(PRESCL_CTRL1_OD_LOW(od_low & PRESCL_CTRL1_OD_LOW_MASK), config->base + PRESCL_CTRL1); /* reenable */ if (ctrl & CTRL_DEV_EN) { sys_write32(CTRL_DEV_EN | ctrl, config->base + CTRL); } } /** * @brief Compute RR0 Value from addr * * @param addr Address of the target * * @return RR0 value */ static uint32_t prepare_rr0_dev_address(uint16_t addr) { /* RR0[7:1] = addr[6:0] | parity^[0] */ uint32_t ret = cdns_i3c_even_parity_byte(addr); if (addr & GENMASK(9, 7)) { /* RR0[15:13] = addr[9:7] */ ret |= (addr & GENMASK(9, 7)) << 6; /* RR0[11] = 10b lvr addr */ ret |= DEV_ID_RR0_LVR_EXT_ADDR; } return ret; } /** * @brief Program Retaining Registers with device lists * * This will program the retaining register with the controller itself * * @param dev Pointer to controller device driver instance. */ static void cdns_i3c_program_controller_retaining_reg(const struct device *dev) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; /* Set controller retaining register */ uint8_t controller_da = I3C_CONTROLLER_ADDR; if (!i3c_addr_slots_is_free(&data->common.attached_dev.addr_slots, controller_da)) { controller_da = i3c_addr_slots_next_free_find(&data->common.attached_dev.addr_slots, 0); LOG_DBG("%s: 0x%02x DA selected for controller", dev->name, controller_da); } sys_write32(prepare_rr0_dev_address(controller_da), config->base + DEV_ID_RR0(0)); /* Mark the address as I3C device */ i3c_addr_slots_mark_i3c(&data->common.attached_dev.addr_slots, controller_da); } #ifdef CONFIG_I3C_USE_IBI static int cdns_i3c_controller_ibi_enable(const struct device *dev, struct i3c_device_desc *target) { uint32_t sir_map; uint32_t sir_cfg; const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = target->controller_priv; struct i3c_ccc_events i3c_events; int ret = 0; if (!i3c_device_is_ibi_capable(target)) { ret = -EINVAL; return ret; } /* TODO: check for duplicate in SIR */ sir_cfg = SIR_MAP_DEV_ROLE(I3C_BCR_DEVICE_ROLE(target->bcr)) | SIR_MAP_DEV_DA(target->dynamic_addr) | SIR_MAP_DEV_PL(target->data_length.max_ibi); if (target->ibi_cb != NULL) { sir_cfg |= SIR_MAP_DEV_ACK; } if (target->bcr & I3C_BCR_MAX_DATA_SPEED_LIMIT) { sir_cfg |= SIR_MAP_DEV_SLOW; } LOG_DBG("%s: IBI enabling for 0x%02x (BCR 0x%02x)", dev->name, target->dynamic_addr, target->bcr); /* Tell target to enable IBI */ i3c_events.events = I3C_CCC_EVT_INTR; ret = i3c_ccc_do_events_set(target, true, &i3c_events); if (ret != 0) { LOG_ERR("%s: Error sending IBI ENEC for 0x%02x (%d)", dev->name, target->dynamic_addr, ret); return ret; } sir_map = sys_read32(config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1)); sir_map &= ~SIR_MAP_DEV_CONF_MASK(cdns_i3c_device_data->id - 1); sir_map |= SIR_MAP_DEV_CONF(cdns_i3c_device_data->id - 1, sir_cfg); sys_write32(sir_map, config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1)); return ret; } static int cdns_i3c_controller_ibi_disable(const struct device *dev, struct i3c_device_desc *target) { uint32_t sir_map; const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = target->controller_priv; struct i3c_ccc_events i3c_events; int ret = 0; if (!i3c_device_is_ibi_capable(target)) { ret = -EINVAL; return ret; } /* Tell target to disable IBI */ i3c_events.events = I3C_CCC_EVT_INTR; ret = i3c_ccc_do_events_set(target, false, &i3c_events); if (ret != 0) { LOG_ERR("%s: Error sending IBI DISEC for 0x%02x (%d)", dev->name, target->dynamic_addr, ret); return ret; } sir_map = sys_read32(config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1)); sir_map &= ~SIR_MAP_DEV_CONF_MASK(cdns_i3c_device_data->id - 1); sir_map |= SIR_MAP_DEV_CONF(cdns_i3c_device_data->id - 1, SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR)); sys_write32(sir_map, config->base + SIR_MAP_DEV_REG(cdns_i3c_device_data->id - 1)); return ret; } static int cdns_i3c_target_ibi_raise_hj(const struct device *dev) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; /* HJ requests should not be done by primary controllers */ if (!ctrl_config->is_secondary) { LOG_ERR("%s: controller is primary, HJ not available", dev->name); return -ENOTSUP; } /* Check if target already has a DA assigned to it */ if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HAS_DA) { LOG_ERR("%s: HJ not available, DA already assigned", dev->name); return -EACCES; } /* Check if HJ requests DISEC CCC with DISHJ field set has been received */ if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_HJ_DIS) { LOG_ERR("%s: HJ requests are currently disabled by DISEC", dev->name); return -EAGAIN; } sys_write32(CTRL_HJ_INIT | sys_read32(config->base + CTRL), config->base + CTRL); k_sem_reset(&data->ibi_hj_complete); if (k_sem_take(&data->ibi_hj_complete, K_MSEC(500)) != 0) { LOG_ERR("%s: timeout waiting for DAA after HJ", dev->name); return -ETIMEDOUT; } return 0; } static int cdns_i3c_target_ibi_raise_intr(const struct device *dev, struct i3c_ibi *request) { const struct cdns_i3c_config *config = dev->config; const struct cdns_i3c_data *data = dev->data; uint32_t ibi_ctrl_val; LOG_DBG("%s: issuing IBI TIR", dev->name); /* * Ensure data will fit within FIFO * * TODO: This limitation prevents burst transfers greater than the * FIFO sizes and should be replaced with an implementation that * utilizes the IBI data threshold interrupts. */ if (request->payload_len > data->hw_cfg.ibi_mem_depth) { LOG_ERR("%s: payload too large for IBI TIR", dev->name); return -ENOMEM; } cdns_i3c_write_ibi_fifo(config, request->payload, request->payload_len); /* Write Payload Length and Start Condition */ ibi_ctrl_val = sys_read32(config->base + SLV_IBI_CTRL); ibi_ctrl_val |= SLV_IBI_PL(request->payload_len); ibi_ctrl_val |= SLV_IBI_REQ; sys_write32(ibi_ctrl_val, config->base + SLV_IBI_CTRL); return 0; } static int cdns_i3c_target_ibi_raise(const struct device *dev, struct i3c_ibi *request) { struct cdns_i3c_data *data = dev->data; if (request == NULL) { return -EINVAL; } switch (request->ibi_type) { case I3C_IBI_TARGET_INTR: /* Check IP Revision since older versions of CDNS IP do not support IBI interrupt*/ if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) { return cdns_i3c_target_ibi_raise_intr(dev, request); } else { return -ENOTSUP; } case I3C_IBI_CONTROLLER_ROLE_REQUEST: /* TODO: Cadence I3C can support CR, but not implemented yet */ return -ENOTSUP; case I3C_IBI_HOTJOIN: return cdns_i3c_target_ibi_raise_hj(dev); default: return -EINVAL; } } #endif static void cdns_i3c_cancel_transfer(const struct device *dev) { struct cdns_i3c_data *data = dev->data; const struct cdns_i3c_config *config = dev->config; uint32_t val; uint32_t retry_count; /* Disable further interrupts */ sys_write32(MST_INT_CMDD_EMP, config->base + MST_IDR); /* Ignore if no pending transfer */ if (data->xfer.num_cmds == 0) { return; } data->xfer.num_cmds = 0; /* Clear main enable bit to disable further transactions */ sys_write32(~CTRL_DEV_EN & sys_read32(config->base + CTRL), config->base + CTRL); /** * Spin waiting for device to go idle. It is unlikely that this will * actually take any time since we only get here if a transaction didn't * complete in a long time. */ retry_count = I3C_MAX_IDLE_CANCEL_WAIT_RETRIES; while (retry_count--) { val = sys_read32(config->base + MST_STATUS0); if (val & MST_STATUS0_IDLE) { break; } k_msleep(10); } if (retry_count == 0) { data->xfer.ret = -ETIMEDOUT; } /** * Flush all queues. */ sys_write32(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO | FLUSH_CMD_RESP, config->base + FLUSH_CTRL); /* Re-enable device */ sys_write32(CTRL_DEV_EN | sys_read32(config->base + CTRL), config->base + CTRL); } /** * @brief Start a I3C/I2C Transfer * * This is to be called from a I3C/I2C transfer function. This will write * all data to tx and cmd fifos * * @param dev Pointer to controller device driver instance. */ static void cdns_i3c_start_transfer(const struct device *dev) { struct cdns_i3c_data *data = dev->data; const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_xfer *xfer = &data->xfer; /* Ensure no pending command response queue threshold interrupt */ sys_write32(MST_INT_CMDD_EMP, config->base + MST_ICR); /* Make sure RX FIFO is empty. */ while (!cdns_i3c_rx_fifo_empty(config)) { (void)sys_read32(config->base + RX_FIFO); } /* Make sure CMDR FIFO is empty too */ while (!cdns_i3c_cmd_rsp_fifo_empty(config)) { (void)sys_read32(config->base + CMDR); } /* Write all tx data to fifo */ for (unsigned int i = 0; i < xfer->num_cmds; i++) { if (xfer->cmds[i].hdr == I3C_DATA_RATE_SDR) { if (!(xfer->cmds[i].cmd0 & CMD0_FIFO_RNW)) { cdns_i3c_write_tx_fifo(config, xfer->cmds[i].buf, xfer->cmds[i].len); } } else if (xfer->cmds[i].hdr == I3C_DATA_RATE_HDR_DDR) { /* DDR Xfer requires sending header block*/ cdns_i3c_write_tx_fifo(config, &xfer->cmds[i].ddr_header, DDR_CRC_AND_HEADER_SIZE); /* If not read operation need to send data + crc of data*/ if (!(DDR_DATA(xfer->cmds[i].ddr_header) & HDR_CMD_RD)) { uint8_t *buf = (uint8_t *)xfer->cmds[i].buf; uint32_t ddr_message = 0; uint16_t ddr_data_payload = sys_get_be16(&buf[0]); /* HDR-DDR Data Words */ ddr_message = (DDR_PREAMBLE_DATA_ABORT | prepare_ddr_word(ddr_data_payload)); cdns_i3c_write_tx_fifo(config, &ddr_message, DDR_CRC_AND_HEADER_SIZE); for (int j = 2; j < ((xfer->cmds[i].len - 2) * 2); j += 2) { ddr_data_payload = sys_get_be16(&buf[j]); ddr_message = (DDR_PREAMBLE_DATA_ABORT_ALT | prepare_ddr_word(ddr_data_payload)); cdns_i3c_write_tx_fifo(config, &ddr_message, DDR_CRC_AND_HEADER_SIZE); } /* HDR-DDR CRC Word */ cdns_i3c_write_tx_fifo(config, &xfer->cmds[i].ddr_crc, DDR_CRC_AND_HEADER_SIZE); } } else { xfer->ret = -ENOTSUP; return; } } /* Write all data to cmd fifos */ for (unsigned int i = 0; i < xfer->num_cmds; i++) { /* The command ID is just the msg index. */ xfer->cmds[i].cmd1 |= CMD1_FIFO_CMDID(i); sys_write32(xfer->cmds[i].cmd1, config->base + CMD1_FIFO); sys_write32(xfer->cmds[i].cmd0, config->base + CMD0_FIFO); if (xfer->cmds[i].hdr == I3C_DATA_RATE_HDR_DDR) { sys_write32(0x00, config->base + CMD1_FIFO); if ((DDR_DATA(xfer->cmds[i].ddr_header) & HDR_CMD_RD)) { sys_write32(CMD0_FIFO_IS_DDR | CMD0_FIFO_PL_LEN(1), config->base + CMD0_FIFO); } else { sys_write32(CMD0_FIFO_IS_DDR | CMD0_FIFO_PL_LEN(xfer->cmds[i].len), config->base + CMD0_FIFO); } } } /* kickoff transfer */ sys_write32(CTRL_MCS | sys_read32(config->base + CTRL), config->base + CTRL); sys_write32(MST_INT_CMDD_EMP, config->base + MST_IER); } /** * @brief Send Common Command Code (CCC). * * @see i3c_do_ccc * * @param dev Pointer to controller device driver instance. * @param payload Pointer to CCC payload. * * @return @see i3c_do_ccc */ static int cdns_i3c_do_ccc(const struct device *dev, struct i3c_ccc_payload *payload) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; struct cdns_i3c_cmd *cmd; int ret = 0; uint8_t num_cmds = 0; /* make sure we are currently the active controller */ if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) { return -EACCES; } if (payload == NULL) { return -EINVAL; } /* * Ensure data will fit within FIFOs. * * TODO: This limitation prevents burst transfers greater than the * FIFO sizes and should be replaced with an implementation that * utilizes the RX/TX data threshold interrupts. */ uint32_t num_msgs = 1 + ((payload->ccc.data_len > 0) ? payload->targets.num_targets : MAX(payload->targets.num_targets - 1, 0)); if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) { LOG_ERR("%s: Too many messages", dev->name); return -ENOMEM; } uint32_t rxsize = 0; /* defining byte is stored in a separate register for direct CCCs */ uint32_t txsize = i3c_ccc_is_payload_broadcast(payload) ? ROUND_UP(payload->ccc.data_len, 4) : 0; for (int i = 0; i < payload->targets.num_targets; i++) { if (payload->targets.payloads[i].rnw) { rxsize += ROUND_UP(payload->targets.payloads[i].data_len, 4); } else { txsize += ROUND_UP(payload->targets.payloads[i].data_len, 4); } } if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) { LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name); return -ENOMEM; } LOG_DBG("%s: CCC[0x%02x]", dev->name, payload->ccc.id); k_mutex_lock(&data->bus_lock, K_FOREVER); /* wait for idle */ ret = cdns_i3c_wait_for_idle(dev); if (ret != 0) { goto error; } /* if this is a direct CCC */ if (!i3c_ccc_is_payload_broadcast(payload)) { /* if the CCC has no data bytes, then the target payload must be in * the same command buffer */ for (int i = 0; i < payload->targets.num_targets; i++) { cmd = &data->xfer.cmds[i]; num_cmds++; cmd->cmd1 = CMD1_FIFO_CCC(payload->ccc.id); cmd->cmd0 = CMD0_FIFO_IS_CCC; /* if there is a defining byte */ if (payload->ccc.data_len == 1) { /* Only revision 1p7 supports defining byte for direct CCCs */ if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) { cmd->cmd0 |= CMD0_FIFO_IS_DB; cmd->cmd1 |= CMD1_FIFO_DB(payload->ccc.data[0]); } else { LOG_ERR("%s: Defining Byte with Direct CCC not supported " "with rev %lup%lu", dev->name, REV_ID_REV_MAJOR(data->hw_cfg.rev_id), REV_ID_REV_MINOR(data->hw_cfg.rev_id)); ret = -ENOTSUP; goto error; } } else if (payload->ccc.data_len > 1) { LOG_ERR("%s: Defining Byte length greater than 1", dev->name); ret = -EINVAL; goto error; } /* for a short CCC, i.e. where a direct ccc has multiple targets, * BCH must be 0 for subsequent targets and RSBC must be 1, otherwise * if there is just one target, RSBC must be 0 on the first target */ if (i == 0) { cmd->cmd0 |= CMD0_FIFO_BCH; } if (i < (payload->targets.num_targets - 1)) { cmd->cmd0 |= CMD0_FIFO_RSBC; } cmd->buf = payload->targets.payloads[i].data; cmd->len = payload->targets.payloads[i].data_len; cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(payload->targets.payloads[i].addr) | CMD0_FIFO_PL_LEN(payload->targets.payloads[i].data_len); if (payload->targets.payloads[i].rnw) { cmd->cmd0 |= CMD0_FIFO_RNW; } cmd->hdr = I3C_DATA_RATE_SDR; /* * write the address of num_xfer which is to be updated upon message * completion */ cmd->num_xfer = &(payload->targets.payloads[i].num_xfer); } } else { cmd = &data->xfer.cmds[0]; num_cmds++; cmd->cmd1 = CMD1_FIFO_CCC(payload->ccc.id); cmd->cmd0 = CMD0_FIFO_IS_CCC | CMD0_FIFO_BCH; cmd->hdr = I3C_DATA_RATE_SDR; if (payload->ccc.data_len > 0) { /* Write additional data for CCC if needed */ cmd->buf = payload->ccc.data; cmd->len = payload->ccc.data_len; cmd->cmd0 |= CMD0_FIFO_PL_LEN(payload->ccc.data_len); /* write the address of num_xfer which is to be updated upon message * completion */ cmd->num_xfer = &(payload->ccc.num_xfer); } else { /* no data to transfer */ cmd->len = 0; cmd->num_xfer = NULL; } } data->xfer.ret = -ETIMEDOUT; data->xfer.num_cmds = num_cmds; cdns_i3c_start_transfer(dev); if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) { cdns_i3c_cancel_transfer(dev); } if (data->xfer.ret < 0) { LOG_ERR("%s: CCC[0x%02x] error (%d)", dev->name, payload->ccc.id, data->xfer.ret); } ret = data->xfer.ret; error: k_mutex_unlock(&data->bus_lock); return ret; } /** * @brief Perform Dynamic Address Assignment. * * @see i3c_do_daa * * @param dev Pointer to controller device driver instance. * * @return @see i3c_do_daa */ static int cdns_i3c_do_daa(const struct device *dev) { struct cdns_i3c_data *data = dev->data; const struct cdns_i3c_config *config = dev->config; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; /* DAA should not be done by secondary controllers */ if (ctrl_config->is_secondary) { return -EACCES; } /* read dev active reg */ uint32_t olddevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; /* ignore the controller register */ olddevs |= BIT(0); /* the Cadence I3C IP will assign an address for it from the RR */ struct i3c_ccc_payload entdaa_ccc; memset(&entdaa_ccc, 0, sizeof(entdaa_ccc)); entdaa_ccc.ccc.id = I3C_CCC_ENTDAA; int status = cdns_i3c_do_ccc(dev, &entdaa_ccc); if (status != 0) { return status; } /* read again dev active reg */ uint32_t newdevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; /* look for new bits that were set */ newdevs &= ~olddevs; if (newdevs) { /* loop through each set bit for new devices */ for (uint8_t i = find_lsb_set(newdevs); i <= find_msb_set(newdevs); i++) { uint8_t rr_idx = i - 1; if (newdevs & BIT(rr_idx)) { /* Read RRx registers */ uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(rr_idx)); uint32_t dev_id_rr1 = sys_read32(config->base + DEV_ID_RR1(rr_idx)); uint32_t dev_id_rr2 = sys_read32(config->base + DEV_ID_RR2(rr_idx)); uint64_t pid = ((uint64_t)dev_id_rr1 << 16) + (dev_id_rr2 >> 16); uint8_t dyn_addr = (dev_id_rr0 & 0xFE) >> 1; uint8_t bcr = dev_id_rr2 >> 8; uint8_t dcr = dev_id_rr2 & 0xFF; const struct i3c_device_id i3c_id = I3C_DEVICE_ID(pid); struct i3c_device_desc *target = i3c_device_find(dev, &i3c_id); if (target == NULL) { LOG_INF("%s: PID 0x%012llx is not in registered device " "list, given DA 0x%02x", dev->name, pid, dyn_addr); i3c_addr_slots_mark_i3c( &data->common.attached_dev.addr_slots, dyn_addr); } else { target->dynamic_addr = dyn_addr; target->bcr = bcr; target->dcr = dcr; LOG_DBG("%s: PID 0x%012llx assigned dynamic address 0x%02x", dev->name, pid, dyn_addr); } } } } else { LOG_DBG("%s: ENTDAA: No devices found", dev->name); } /* mark slot as not free, may already be set if already attached */ data->free_rr_slots &= ~newdevs; /* Unmask Hot-Join request interrupts. HJ will send DISEC HJ from the CTRL value */ struct i3c_ccc_events i3c_events; i3c_events.events = I3C_CCC_EVT_HJ; status = i3c_ccc_do_events_all_set(dev, true, &i3c_events); if (status != 0) { LOG_DBG("%s: Broadcast ENEC was NACK", dev->name); } return 0; } /** * @brief Configure I2C hardware. * * @param dev Pointer to controller device driver instance. * @param config Value of the configuration parameters. * * @retval 0 If successful. * @retval -EINVAL If invalid configure parameters. * @retval -EIO General Input/Output errors. * @retval -ENOSYS If not implemented. */ static int cdns_i3c_i2c_api_configure(const struct device *dev, uint32_t config) { struct cdns_i3c_data *data = dev->data; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; switch (I2C_SPEED_GET(config)) { case I2C_SPEED_STANDARD: ctrl_config->scl.i2c = 100000; break; case I2C_SPEED_FAST: ctrl_config->scl.i2c = 400000; break; case I2C_SPEED_FAST_PLUS: ctrl_config->scl.i2c = 1000000; break; case I2C_SPEED_HIGH: ctrl_config->scl.i2c = 3400000; break; case I2C_SPEED_ULTRA: ctrl_config->scl.i2c = 5000000; break; default: break; } cdns_i3c_set_prescalers(dev); return 0; } /** * @brief Configure I3C hardware. * * @param dev Pointer to controller device driver instance. * @param type Type of configuration parameters being passed * in @p config. * @param config Pointer to the configuration parameters. * * @retval 0 If successful. * @retval -EINVAL If invalid configure parameters. * @retval -EIO General Input/Output errors. * @retval -ENOSYS If not implemented. */ static int cdns_i3c_configure(const struct device *dev, enum i3c_config_type type, void *config) { struct cdns_i3c_data *data = dev->data; struct i3c_config_controller *ctrl_cfg = config; if ((ctrl_cfg->scl.i2c == 0U) || (ctrl_cfg->scl.i3c == 0U)) { return -EINVAL; } data->common.ctrl_config.scl.i3c = ctrl_cfg->scl.i3c; data->common.ctrl_config.scl.i2c = ctrl_cfg->scl.i2c; cdns_i3c_set_prescalers(dev); return 0; } /** * @brief Complete a I3C/I2C Transfer * * This is to be called from an ISR when the Command Response FIFO * is Empty. This will check each Command Response reading the RX * FIFO if message was a RnW and if any message had an error. * * @param dev Pointer to controller device driver instance. */ static void cdns_i3c_complete_transfer(const struct device *dev) { struct cdns_i3c_data *data = dev->data; const struct cdns_i3c_config *config = dev->config; uint32_t cmdr; uint32_t id = 0; uint32_t xfer = 0; int ret = 0; struct cdns_i3c_cmd *cmd; bool was_full; /* Used only to determine in the case of a controller abort */ was_full = cdns_i3c_rx_fifo_full(config); /* Disable further interrupts */ sys_write32(MST_INT_CMDD_EMP, config->base + MST_IDR); /* Ignore if no pending transfer */ if (data->xfer.num_cmds == 0) { return; } /* Process all results in fifo */ for (uint32_t status0 = sys_read32(config->base + MST_STATUS0); !(status0 & MST_STATUS0_CMDR_EMP); status0 = sys_read32(config->base + MST_STATUS0)) { cmdr = sys_read32(config->base + CMDR); id = CMDR_CMDID(cmdr); if (id == CMDR_CMDID_HJACK_DISEC || id == CMDR_CMDID_HJACK_ENTDAA || id >= data->xfer.num_cmds) { continue; } cmd = &data->xfer.cmds[id]; xfer = MIN(CMDR_XFER_BYTES(cmdr), cmd->len); if (cmd->num_xfer != NULL) { *cmd->num_xfer = xfer; } /* Read any rx data into buffer */ if (cmd->cmd0 & CMD0_FIFO_RNW) { ret = cdns_i3c_read_rx_fifo(config, cmd->buf, xfer); } if ((cmd->hdr == I3C_DATA_RATE_HDR_DDR) && (DDR_DATA(cmd->ddr_header) & HDR_CMD_RD)) { ret = cdns_i3c_read_rx_fifo_ddr_xfer(config, cmd->buf, xfer, cmd->ddr_header); } /* Record error */ cmd->error = CMDR_ERROR(cmdr); } for (int i = 0; i < data->xfer.num_cmds; i++) { switch (data->xfer.cmds[i].error) { case CMDR_NO_ERROR: break; case CMDR_MST_ABORT: /* * A controller abort is forced if the RX FIFO fills up * There is also the case where the fifo can be full as * the len of the packet is the same length of the fifo * Check that the requested len is greater than the total * transferred to confirm that is not case. Otherwise the * abort was caused by the buffer length being meet and * the target did not give an End of Data (EoD) in the T * bit. Do not treat that condition as an error because * some targets will just auto-increment the read address * way beyond the buffer not giving an EoD. */ if ((was_full) && (data->xfer.cmds[i].len > *data->xfer.cmds[i].num_xfer)) { ret = -ENOSPC; } else { LOG_DBG("%s: Controller Abort due to buffer length excedded with " "no EoD from target", dev->name); } break; case CMDR_M0_ERROR: { uint8_t ccc = data->xfer.cmds[i].cmd1 & 0xFF; /* * The M0 is an illegally formatted CCC. i.e. the Controller * receives 1 byte instead of 2 with the GETMWL CCC. This can * be problematic for CCCs that can have variable length such * as GETMXDS and GETCAPS. Verify the number of bytes received matches * what's expected from the specification and ignore the error. The IP will * still retramsit the same CCC and theres nothing that can be done to * prevent this. It it still up to the application to read `num_xfer` to * determine the number of bytes returned. */ if (ccc == I3C_CCC_GETMXDS) { /* * Whether GETMXDS format 1 and format 2 can't be known ahead of * time which will be returned. */ if ((*data->xfer.cmds[i].num_xfer != sizeof(((union i3c_ccc_getmxds *)0)->fmt1)) && (*data->xfer.cmds[i].num_xfer != sizeof(((union i3c_ccc_getmxds *)0)->fmt2))) { ret = -EIO; } } else if (ccc == I3C_CCC_GETCAPS) { /* GETCAPS can only return 1-4 bytes */ if (*data->xfer.cmds[i].num_xfer > sizeof(union i3c_ccc_getcaps)) { ret = -EIO; } } else { ret = -EIO; } break; } case CMDR_DDR_PREAMBLE_ERROR: case CMDR_DDR_PARITY_ERROR: case CMDR_M1_ERROR: case CMDR_M2_ERROR: case CMDR_NACK_RESP: case CMDR_DDR_DROPPED: ret = -EIO; break; case CMDR_DDR_RX_FIFO_OVF: case CMDR_DDR_TX_FIFO_UNF: ret = -ENOSPC; break; case CMDR_INVALID_DA: default: ret = -EINVAL; break; } } data->xfer.ret = ret; /* Indicate no transfer is pending */ data->xfer.num_cmds = 0; k_sem_give(&data->xfer.complete); } /** * @brief Transfer messages in I2C mode. * * @param dev Pointer to device driver instance. * @param target Pointer to target device descriptor. * @param msgs Pointer to I2C messages. * @param num_msgs Number of messages to transfers. * * @retval 0 If successful. * @retval -EIO General input / output error. * @retval -EINVAL Address not registered */ static int cdns_i3c_i2c_transfer(const struct device *dev, struct i3c_i2c_device_desc *i2c_dev, struct i2c_msg *msgs, uint8_t num_msgs) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; uint32_t txsize = 0; uint32_t rxsize = 0; int ret; /* make sure we are currently the active controller */ if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) { return -EACCES; } if (num_msgs == 0) { return 0; } if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) { LOG_ERR("%s: Too many messages", dev->name); return -ENOMEM; } /* * Ensure data will fit within FIFOs */ for (unsigned int i = 0; i < num_msgs; i++) { if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) { rxsize += ROUND_UP(msgs[i].len, 4); } else { txsize += ROUND_UP(msgs[i].len, 4); } } if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) { LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name); return -ENOMEM; } k_mutex_lock(&data->bus_lock, K_FOREVER); /* wait for idle */ ret = cdns_i3c_wait_for_idle(dev); if (ret != 0) { goto error; } for (unsigned int i = 0; i < num_msgs; i++) { struct cdns_i3c_cmd *cmd = &data->xfer.cmds[i]; cmd->len = msgs[i].len; cmd->buf = msgs[i].buf; /* not an i3c transfer, but must be set to sdr */ cmd->hdr = I3C_DATA_RATE_SDR; cmd->cmd0 = CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR); cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(i2c_dev->addr); cmd->cmd0 |= CMD0_FIFO_PL_LEN(msgs[i].len); /* Send repeated start on all transfers except the last or those marked STOP. */ if ((i < (num_msgs - 1)) && ((msgs[i].flags & I2C_MSG_STOP) == 0)) { cmd->cmd0 |= CMD0_FIFO_RSBC; } if (msgs[i].flags & I2C_MSG_ADDR_10_BITS) { cmd->cmd0 |= CMD0_FIFO_IS_10B; } if ((msgs[i].flags & I2C_MSG_RW_MASK) == I2C_MSG_READ) { cmd->cmd0 |= CMD0_FIFO_RNW; } /* i2c transfers are a don't care for num_xfer */ cmd->num_xfer = NULL; } data->xfer.ret = -ETIMEDOUT; data->xfer.num_cmds = num_msgs; cdns_i3c_start_transfer(dev); if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) { cdns_i3c_cancel_transfer(dev); } ret = data->xfer.ret; error: k_mutex_unlock(&data->bus_lock); return ret; } static int cdns_i3c_master_get_rr_slot(const struct device *dev, uint8_t dyn_addr) { struct cdns_i3c_data *data = dev->data; const struct cdns_i3c_config *config = dev->config; if (dyn_addr == 0) { if (!data->free_rr_slots) { return -ENOSPC; } return find_lsb_set(data->free_rr_slots) - 1; } uint32_t activedevs = sys_read32(config->base + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK; activedevs &= ~BIT(0); /* loop through each set bit for new devices */ for (uint8_t i = find_lsb_set(activedevs); i <= find_msb_set(activedevs); i++) { if (activedevs & BIT(i)) { uint32_t rr = sys_read32(config->base + DEV_ID_RR0(i)); if (!(rr & DEV_ID_RR0_IS_I3C) || DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr) { continue; } return i; } } return -EINVAL; } static int cdns_i3c_attach_device(const struct device *dev, struct i3c_device_desc *desc, uint8_t addr) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; int slot = cdns_i3c_master_get_rr_slot(dev, desc->dynamic_addr); if (slot < 0) { LOG_ERR("%s: no space for i3c device: %s", dev->name, desc->dev->name); return slot; } k_mutex_lock(&data->bus_lock, K_FOREVER); data->cdns_i3c_i2c_priv_data[slot].id = slot; desc->controller_priv = &(data->cdns_i3c_i2c_priv_data[slot]); data->free_rr_slots &= ~BIT(slot); uint32_t dev_id_rr0 = DEV_ID_RR0_IS_I3C | prepare_rr0_dev_address(addr); uint32_t dev_id_rr1 = DEV_ID_RR1_PID_MSB((desc->pid & 0xFFFFFFFF0000) >> 16); uint32_t dev_id_rr2 = DEV_ID_RR2_PID_LSB(desc->pid & 0xFFFF); sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(slot)); sys_write32(dev_id_rr1, config->base + DEV_ID_RR1(slot)); sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(slot)); /** Mark Devices as active, devices that will be found and marked active during DAA, * it will be given the exact DA programmed in it's RR if the PID matches and marked * as active duing ENTDAA, otherwise they get set as active here. If dynamic address * is set, then it assumed that it was already initialized by the primary controller. */ if ((desc->static_addr != 0) || (desc->dynamic_addr != 0)) { sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_ACTIVE(slot), config->base + DEVS_CTRL); } k_mutex_unlock(&data->bus_lock); return 0; } static int cdns_i3c_reattach_device(const struct device *dev, struct i3c_device_desc *desc, uint8_t old_dyn_addr) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = desc->controller_priv; if (cdns_i3c_device_data == NULL) { LOG_ERR("%s: %s: device not attached", dev->name, desc->dev->name); return -EINVAL; } k_mutex_lock(&data->bus_lock, K_FOREVER); uint32_t dev_id_rr0 = DEV_ID_RR0_IS_I3C | prepare_rr0_dev_address(desc->dynamic_addr); uint32_t dev_id_rr1 = DEV_ID_RR1_PID_MSB((desc->pid & 0xFFFFFFFF0000) >> 16); uint32_t dev_id_rr2 = DEV_ID_RR2_PID_LSB(desc->pid & 0xFFFF) | DEV_ID_RR2_BCR(desc->bcr) | DEV_ID_RR2_DCR(desc->dcr); sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(cdns_i3c_device_data->id)); sys_write32(dev_id_rr1, config->base + DEV_ID_RR1(cdns_i3c_device_data->id)); sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(cdns_i3c_device_data->id)); k_mutex_unlock(&data->bus_lock); return 0; } static int cdns_i3c_detach_device(const struct device *dev, struct i3c_device_desc *desc) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; struct cdns_i3c_i2c_dev_data *cdns_i3c_device_data = desc->controller_priv; if (cdns_i3c_device_data == NULL) { LOG_ERR("%s: %s: device not attached", dev->name, desc->dev->name); return -EINVAL; } k_mutex_lock(&data->bus_lock, K_FOREVER); sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_CLR(cdns_i3c_device_data->id), config->base + DEVS_CTRL); data->free_rr_slots |= BIT(cdns_i3c_device_data->id); desc->controller_priv = NULL; k_mutex_unlock(&data->bus_lock); return 0; } static int cdns_i3c_i2c_attach_device(const struct device *dev, struct i3c_i2c_device_desc *desc) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; int slot = cdns_i3c_master_get_rr_slot(dev, 0); if (slot < 0) { LOG_ERR("%s: no space for i2c device: addr 0x%02x", dev->name, desc->addr); return slot; } k_mutex_lock(&data->bus_lock, K_FOREVER); uint32_t dev_id_rr0 = prepare_rr0_dev_address(desc->addr); uint32_t dev_id_rr2 = DEV_ID_RR2_LVR(desc->lvr); sys_write32(dev_id_rr0, config->base + DEV_ID_RR0(slot)); sys_write32(0, config->base + DEV_ID_RR1(slot)); sys_write32(dev_id_rr2, config->base + DEV_ID_RR2(slot)); data->cdns_i3c_i2c_priv_data[slot].id = slot; desc->controller_priv = &(data->cdns_i3c_i2c_priv_data[slot]); data->free_rr_slots &= ~BIT(slot); sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_ACTIVE(slot), config->base + DEVS_CTRL); k_mutex_unlock(&data->bus_lock); return 0; } static int cdns_i3c_i2c_detach_device(const struct device *dev, struct i3c_i2c_device_desc *desc) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; struct cdns_i3c_i2c_dev_data *cdns_i2c_device_data = desc->controller_priv; if (cdns_i2c_device_data == NULL) { LOG_ERR("%s: device not attached", dev->name); return -EINVAL; } k_mutex_lock(&data->bus_lock, K_FOREVER); sys_write32(sys_read32(config->base + DEVS_CTRL) | DEVS_CTRL_DEV_CLR(cdns_i2c_device_data->id), config->base + DEVS_CTRL); data->free_rr_slots |= BIT(cdns_i2c_device_data->id); desc->controller_priv = NULL; k_mutex_unlock(&data->bus_lock); return 0; } /** * @brief Transfer messages in I3C mode. * * @see i3c_transfer * * @param dev Pointer to device driver instance. * @param target Pointer to target device descriptor. * @param msgs Pointer to I3C messages. * @param num_msgs Number of messages to transfers. * * @return @see i3c_transfer */ static int cdns_i3c_transfer(const struct device *dev, struct i3c_device_desc *target, struct i3c_msg *msgs, uint8_t num_msgs) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; int txsize = 0; int rxsize = 0; int ret; /* make sure we are currently the active controller */ if (!(sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE)) { return -EACCES; } if (num_msgs == 0) { return 0; } if (num_msgs > data->hw_cfg.cmd_mem_depth || num_msgs > data->hw_cfg.cmdr_mem_depth) { LOG_ERR("%s: Too many messages", dev->name); return -ENOMEM; } /* * Ensure data will fit within FIFOs. * * TODO: This limitation prevents burst transfers greater than the * FIFO sizes and should be replaced with an implementation that * utilizes the RX/TX data interrupts. */ for (int i = 0; i < num_msgs; i++) { if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) { rxsize += ROUND_UP(msgs[i].len, 4); } else { txsize += ROUND_UP(msgs[i].len, 4); } } if ((rxsize > data->hw_cfg.rx_mem_depth) || (txsize > data->hw_cfg.tx_mem_depth)) { LOG_ERR("%s: Total RX and/or TX transfer larger than FIFO", dev->name); return -ENOMEM; } k_mutex_lock(&data->bus_lock, K_FOREVER); /* wait for idle */ ret = cdns_i3c_wait_for_idle(dev); if (ret != 0) { goto error; } /* * Prepare transfer commands. Currently there is only a single transfer * in-flight but it would be possible to keep a queue of transfers. If so, * this preparation could be completed outside of the bus lock allowing * greater parallelism. */ bool send_broadcast = true; for (int i = 0; i < num_msgs; i++) { struct cdns_i3c_cmd *cmd = &data->xfer.cmds[i]; uint32_t pl = msgs[i].len; /* check hdr mode */ if ((!(msgs[i].flags & I3C_MSG_HDR)) || ((msgs[i].flags & I3C_MSG_HDR) && (msgs[i].hdr_mode == 0))) { /* HDR message flag is not set or if hdr flag is set but no hdr mode is set */ cmd->len = pl; cmd->buf = msgs[i].buf; cmd->cmd0 = CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR); cmd->cmd0 |= CMD0_FIFO_DEV_ADDR(target->dynamic_addr); if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) { cmd->cmd0 |= CMD0_FIFO_RNW; /* * For I3C_XMIT_MODE_NO_ADDR reads in SDN mode, * CMD0_FIFO_PL_LEN specifies the abort limit not bytes to read */ cmd->cmd0 |= CMD0_FIFO_PL_LEN(pl + 1); } else { cmd->cmd0 |= CMD0_FIFO_PL_LEN(pl); } /* Send broadcast header on first transfer or after a STOP. */ if (!(msgs[i].flags & I3C_MSG_NBCH) && (send_broadcast)) { cmd->cmd0 |= CMD0_FIFO_BCH; send_broadcast = false; } /* * Send repeated start on all transfers except the last or those marked * STOP. */ if ((i < (num_msgs - 1)) && ((msgs[i].flags & I3C_MSG_STOP) == 0)) { cmd->cmd0 |= CMD0_FIFO_RSBC; } else { send_broadcast = true; } /* * write the address of num_xfer which is to be updated upon message * completion */ cmd->num_xfer = &(msgs[i].num_xfer); cmd->hdr = I3C_DATA_RATE_SDR; } else if ((data->common.ctrl_config.supported_hdr & I3C_MSG_HDR_DDR) && (msgs[i].hdr_mode == I3C_MSG_HDR_DDR) && (msgs[i].flags & I3C_MSG_HDR)) { uint16_t ddr_header_payload; /* DDR sends data out in 16b, so len must be a multiple of 2 */ if (!((pl % 2) == 0)) { ret = -EINVAL; goto error; } /* HDR message flag is set and hdr mode is DDR */ cmd->buf = msgs[i].buf; if ((msgs[i].flags & I3C_MSG_RW_MASK) == I3C_MSG_READ) { /* HDR-DDR Read */ ddr_header_payload = HDR_CMD_RD | HDR_CMD_CODE(msgs[i].hdr_cmd_code) | (target->dynamic_addr << 1); /* Parity Adjustment Bit for Reads */ ddr_header_payload = prepare_ddr_cmd_parity_adjustment_bit(ddr_header_payload); /* HDR-DDR Command Word */ cmd->ddr_header = DDR_PREAMBLE_CMD_CRC | prepare_ddr_word(ddr_header_payload); } else { uint8_t crc5 = 0x1F; /* HDR-DDR Write */ ddr_header_payload = HDR_CMD_CODE(msgs[i].hdr_cmd_code) | (target->dynamic_addr << 1); /* HDR-DDR Command Word */ cmd->ddr_header = DDR_PREAMBLE_CMD_CRC | prepare_ddr_word(ddr_header_payload); /* calculate crc5 */ crc5 = i3c_cdns_crc5(crc5, ddr_header_payload); for (int j = 0; j < pl; j += 2) { crc5 = i3c_cdns_crc5( crc5, sys_get_be16((void *)((uintptr_t)cmd->buf + j))); } cmd->ddr_crc = DDR_PREAMBLE_CMD_CRC | DDR_CRC_TOKEN | (crc5 << 9); } /* Length of DDR Transfer is length of payload (in 16b) + header and CRC * blocks */ cmd->len = ((pl / 2) + 2); /* prep command FIFO for ENTHDR0 */ cmd->cmd0 = CMD0_FIFO_IS_CCC; cmd->cmd1 = I3C_CCC_ENTHDR0; /* write the address of num_xfer which is to be updated upon message * completion */ cmd->num_xfer = &(msgs[i].num_xfer); cmd->hdr = I3C_DATA_RATE_HDR_DDR; } else { LOG_ERR("%s: Unsupported HDR Mode %d", dev->name, msgs[i].hdr_mode); ret = -ENOTSUP; goto error; } } data->xfer.ret = -ETIMEDOUT; data->xfer.num_cmds = num_msgs; cdns_i3c_start_transfer(dev); if (k_sem_take(&data->xfer.complete, K_MSEC(1000)) != 0) { LOG_ERR("%s: transfer timed out", dev->name); cdns_i3c_cancel_transfer(dev); } ret = data->xfer.ret; error: k_mutex_unlock(&data->bus_lock); return ret; } #ifdef CONFIG_I3C_USE_IBI static int cdns_i3c_read_ibi_fifo(const struct cdns_i3c_config *config, void *buf, uint32_t len) { uint32_t *ptr = buf; uint32_t remain, val; for (remain = len; remain >= 4; remain -= 4) { if (cdns_i3c_ibi_fifo_empty(config)) { return -EIO; } val = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO)); *ptr++ = val; } if (remain > 0) { if (cdns_i3c_ibi_fifo_empty(config)) { return -EIO; } val = sys_le32_to_cpu(sys_read32(config->base + IBI_DATA_FIFO)); memcpy(ptr, &val, remain); } return 0; } static void cdns_i3c_handle_ibi(const struct device *dev, uint32_t ibir) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; uint8_t ibi_data[CONFIG_I3C_IBI_MAX_PAYLOAD_SIZE]; /* The slave ID returned here is the device ID in the SIR map NOT the device ID * in the RR map. */ uint8_t slave_id = IBIR_SLVID(ibir); if (slave_id == IBIR_SLVID_INV) { /* DA does not match any value among SIR map */ return; } uint32_t dev_id_rr0 = sys_read32(config->base + DEV_ID_RR0(slave_id + 1)); uint8_t dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(dev_id_rr0); struct i3c_device_desc *desc = i3c_dev_list_i3c_addr_find(&data->common.attached_dev, dyn_addr); /* * Check for NAK or error conditions. * * Note: The logging is for debugging only so will be compiled out in most cases. * However, if the log level for this module is DEBUG and log mode is IMMEDIATE or MINIMAL, * this option is also set this may cause problems due to being inside an ISR. */ if (!(IBIR_ACKED & ibir)) { LOG_DBG("%s: NAK for slave ID %u", dev->name, (unsigned int)slave_id); return; } if (ibir & IBIR_ERROR) { LOG_ERR("%s: Data overflow", dev->name); return; } /* Read out any payload bytes */ uint8_t ibi_len = IBIR_XFER_BYTES(ibir); if (ibi_len > 0) { if (cdns_i3c_read_ibi_fifo(config, ibi_data, ibi_len) < 0) { LOG_ERR("%s: Failed to get payload", dev->name); } } if (i3c_ibi_work_enqueue_target_irq(desc, ibi_data, ibi_len) != 0) { LOG_ERR("%s: Error enqueue IBI IRQ work", dev->name); } } static void cdns_i3c_handle_hj(const struct device *dev, uint32_t ibir) { if (!(IBIR_ACKED & ibir)) { LOG_DBG("%s: NAK for HJ", dev->name); return; } if (i3c_ibi_work_enqueue_hotjoin(dev) != 0) { LOG_ERR("%s: Error enqueue IBI HJ work", dev->name); } } static void cnds_i3c_master_demux_ibis(const struct device *dev) { const struct cdns_i3c_config *config = dev->config; for (uint32_t status0 = sys_read32(config->base + MST_STATUS0); !(status0 & MST_STATUS0_IBIR_EMP); status0 = sys_read32(config->base + MST_STATUS0)) { uint32_t ibir = sys_read32(config->base + IBIR); switch (IBIR_TYPE(ibir)) { case IBIR_TYPE_IBI: cdns_i3c_handle_ibi(dev, ibir); break; case IBIR_TYPE_HJ: cdns_i3c_handle_hj(dev, ibir); break; case IBIR_TYPE_MR: /* not implemented */ break; default: break; } } } static void cdns_i3c_target_ibi_hj_complete(const struct device *dev) { struct cdns_i3c_data *data = dev->data; k_sem_give(&data->ibi_hj_complete); } #endif static void cdns_i3c_irq_handler(const struct device *dev) { const struct cdns_i3c_config *config = dev->config; if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) { uint32_t int_st = sys_read32(config->base + MST_ISR); /* Command queue empty */ if (int_st & MST_INT_HALTED) { LOG_WRN("Core Halted, 2 read aborts"); sys_write32(MST_INT_HALTED, config->base + MST_ICR); } /* Command queue empty */ if (int_st & MST_INT_CMDD_EMP) { cdns_i3c_complete_transfer(dev); sys_write32(MST_INT_CMDD_EMP, config->base + MST_ICR); } /* Command queue threshold */ if (int_st & MST_INT_CMDD_THR) { sys_write32(MST_INT_CMDD_THR, config->base + MST_ICR); } /* Command response threshold hit */ if (int_st & MST_INT_CMDR_THR) { sys_write32(MST_INT_CMDR_THR, config->base + MST_ICR); } /* RX data ready */ if (int_st & MST_INT_RX_THR) { sys_write32(MST_INT_RX_THR, config->base + MST_ICR); } /* In-band interrupt */ if (int_st & MST_INT_IBIR_THR) { sys_write32(MST_INT_IBIR_THR, config->base + MST_ICR); #ifdef CONFIG_I3C_USE_IBI cnds_i3c_master_demux_ibis(dev); #else LOG_ERR("%s: IBI received - Kconfig for using IBIs is not enabled", dev->name); #endif } /* In-band interrupt data */ if (int_st & MST_INT_TX_OVF) { sys_write32(MST_INT_TX_OVF, config->base + MST_ICR); LOG_ERR("%s: controller tx buffer overflow,", dev->name); } /* In-band interrupt data */ if (int_st & MST_INT_RX_UNF) { sys_write32(MST_INT_RX_UNF, config->base + MST_ICR); LOG_ERR("%s: controller rx buffer underflow,", dev->name); } /* In-band interrupt data */ if (int_st & MST_INT_IBID_THR) { sys_write32(MST_INT_IBID_THR, config->base + MST_ICR); } } else { uint32_t int_sl = sys_read32(config->base + SLV_ISR); struct cdns_i3c_data *data = dev->data; const struct i3c_target_callbacks *target_cb = data->target_config->callbacks; /* Clear interrupts */ sys_write32(int_sl, config->base + SLV_ICR); /* SLV SDR rx fifo threshold */ if (int_sl & SLV_INT_SDR_RX_THR) { /* while rx fifo is not empty */ while (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_RX_EMPTY)) { if (target_cb != NULL && target_cb->write_received_cb != NULL) { cdns_i3c_target_read_rx_fifo(dev); } } } /* SLV SDR tx fifo threshold */ if (int_sl & SLV_INT_SDR_TX_THR) { int status = 0; if (target_cb != NULL && target_cb->read_processed_cb) { /* while tx fifo is not full and there is still data available */ while ((!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL)) && (status == 0)) { /* call function pointer for read */ uint8_t byte; /* will return negative if no data left to transmit and 0 if * data available */ status = target_cb->read_processed_cb(data->target_config, &byte); if (status == 0) { cdns_i3c_write_tx_fifo(config, &byte, sizeof(byte)); } } } } /* SLV SDR rx complete */ if (int_sl & SLV_INT_SDR_RD_COMP) { /* a read needs to be done on slv_status 0 else a NACK will happen */ (void)sys_read32(config->base + SLV_STATUS0); /* call stop function pointer */ if (target_cb != NULL && target_cb->stop_cb) { target_cb->stop_cb(data->target_config); } } /* SLV SDR tx complete */ if (int_sl & SLV_INT_SDR_WR_COMP) { /* a read needs to be done on slv_status 0 else a NACK will happen */ (void)sys_read32(config->base + SLV_STATUS0); /* clear bytes read parameter */ data->fifo_bytes_read = 0; /* call stop function pointer */ if (target_cb != NULL && target_cb->stop_cb) { target_cb->stop_cb(data->target_config); } } /* DA has been updated */ if (int_sl & SLV_INT_DA_UPD) { LOG_INF("%s: DA updated to 0x%02lx", dev->name, SLV_STATUS1_DA(sys_read32(config->base + SLV_STATUS1))); /* HJ could send a DISEC which would trigger the SLV_INT_EVENT_UP bit, * but it's still expected to eventually send a DAA */ #ifdef CONFIG_I3C_USE_IBI cdns_i3c_target_ibi_hj_complete(dev); #endif } /* HJ complete and DA has been assigned */ if (int_sl & SLV_INT_HJ_DONE) { } /* Controllership has been been given */ if (int_sl & SLV_INT_MR_DONE) { /* TODO: implement support for controllership handoff */ } /* EISC or DISEC has been received */ if (int_sl & SLV_INT_EVENT_UP) { } /* sdr transfer aborted by controller */ if (int_sl & SLV_INT_M_RD_ABORT) { /* TODO: consider flushing tx buffer? */ } /* SLV SDR rx fifo underflow */ if (int_sl & SLV_INT_SDR_RX_UNF) { LOG_ERR("%s: slave sdr rx buffer underflow", dev->name); } /* SLV SDR tx fifo overflow */ if (int_sl & SLV_INT_SDR_TX_OVF) { LOG_ERR("%s: slave sdr tx buffer overflow,", dev->name); } if (int_sl & SLV_INT_DDR_RX_THR) { } /* SLV DDR WR COMPLETE */ if (int_sl & SLV_INT_DDR_WR_COMP) { /* initial value of CRC5 for HDR-DDR is 0x1F */ uint8_t crc5 = 0x1F; while (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_RX_EMPTY)) { uint32_t ddr_rx_data = sys_read32(config->base + SLV_DDR_RX_FIFO); uint32_t preamble = (ddr_rx_data & DDR_PREAMBLE_MASK); if (preamble == DDR_PREAMBLE_DATA_ABORT || preamble == DDR_PREAMBLE_DATA_ABORT_ALT) { uint16_t ddr_payload = DDR_DATA(ddr_rx_data); if (cdns_i3c_ddr_parity(ddr_payload) != (ddr_rx_data & (DDR_ODD_PARITY | DDR_EVEN_PARITY))) { LOG_ERR("%s: Received incorrect DDR Parity", dev->name); } /* calculate a running a crc */ crc5 = i3c_cdns_crc5(crc5, ddr_payload); if (target_cb != NULL && target_cb->write_received_cb != NULL) { /* DDR receives 2B for each payload */ target_cb->write_received_cb( data->target_config, (uint8_t)((ddr_payload >> 8) & 0xFF)); target_cb->write_received_cb( data->target_config, (uint8_t)(ddr_payload)); } } else if ((preamble == DDR_PREAMBLE_CMD_CRC) && ((ddr_rx_data & DDR_CRC_TOKEN_MASK) == DDR_CRC_TOKEN)) { /* should come through here last */ if (crc5 != DDR_CRC(ddr_rx_data)) { LOG_ERR("%s: Received incorrect DDR CRC5", dev->name); } } else if (preamble == DDR_PREAMBLE_CMD_CRC) { /* should come through here first */ uint16_t ddr_header_payload = DDR_DATA(ddr_rx_data); crc5 = i3c_cdns_crc5(crc5, ddr_header_payload); } } if (target_cb != NULL && target_cb->stop_cb != NULL) { target_cb->stop_cb(data->target_config); } } /* SLV SDR rx complete */ if (int_sl & SLV_INT_DDR_RD_COMP) { /* a read needs to be done on slv_status 0 else a NACK will happen */ (void)sys_read32(config->base + SLV_STATUS0); /* call stop function pointer */ if (target_cb != NULL && target_cb->stop_cb) { target_cb->stop_cb(data->target_config); } } /*SLV DDR TX THR*/ if (int_sl & SLV_INT_DDR_TX_THR) { int status = 0; if (target_cb != NULL && target_cb->read_processed_cb) { while ((!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL)) && (status == 0)) { /* call function pointer for read */ uint8_t byte; /* will return negative if no data left to transmit * and 0 if data available */ status = target_cb->read_processed_cb(data->target_config, &byte); if (status == 0) { cdns_i3c_write_ddr_tx_fifo(config, &byte, sizeof(byte)); } } } } } } static void cdns_i3c_read_hw_cfg(const struct device *dev) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; uint32_t devid = sys_read32(config->base + DEV_ID); uint32_t revid = sys_read32(config->base + REV_ID); LOG_DBG("%s: Device info:\r\n" " vid: 0x%03lX, pid: 0x%03lX\r\n" " revision: major = %lu, minor = %lu\r\n" " device ID: 0x%04X", dev->name, REV_ID_VID(revid), REV_ID_PID(revid), REV_ID_REV_MAJOR(revid), REV_ID_REV_MINOR(revid), devid); /* * Depths are specified as number of words (32bit), convert to bytes */ uint32_t cfg0 = sys_read32(config->base + CONF_STATUS0); uint32_t cfg1 = sys_read32(config->base + CONF_STATUS1); data->hw_cfg.rev_id = revid; data->hw_cfg.cmdr_mem_depth = CONF_STATUS0_CMDR_DEPTH(cfg0) * 4; data->hw_cfg.cmd_mem_depth = CONF_STATUS1_CMD_DEPTH(cfg1) * 4; data->hw_cfg.rx_mem_depth = CONF_STATUS1_RX_DEPTH(cfg1) * 4; data->hw_cfg.tx_mem_depth = CONF_STATUS1_TX_DEPTH(cfg1) * 4; data->hw_cfg.ddr_rx_mem_depth = CONF_STATUS1_SLV_DDR_RX_DEPTH(cfg1) * 4; data->hw_cfg.ddr_tx_mem_depth = CONF_STATUS1_SLV_DDR_TX_DEPTH(cfg1) * 4; data->hw_cfg.ibir_mem_depth = CONF_STATUS0_IBIR_DEPTH(cfg0) * 4; data->hw_cfg.ibi_mem_depth = CONF_STATUS1_IBI_DEPTH(cfg0) * 4; LOG_DBG("%s: FIFO info:\r\n" " cmd_mem_depth = %u\r\n" " cmdr_mem_depth = %u\r\n" " rx_mem_depth = %u\r\n" " tx_mem_depth = %u\r\n" " ddr_rx_mem_depth = %u\r\n" " ddr_tx_mem_depth = %u\r\n" " ibi_mem_depth = %u\r\n" " ibir_mem_depth = %u", dev->name, data->hw_cfg.cmd_mem_depth, data->hw_cfg.cmdr_mem_depth, data->hw_cfg.rx_mem_depth, data->hw_cfg.tx_mem_depth, data->hw_cfg.ddr_rx_mem_depth, data->hw_cfg.ddr_tx_mem_depth, data->hw_cfg.ibi_mem_depth, data->hw_cfg.ibir_mem_depth); /* Regardless of the cmd depth size we are limited by our cmd array length. */ data->hw_cfg.cmd_mem_depth = MIN(data->hw_cfg.cmd_mem_depth, ARRAY_SIZE(data->xfer.cmds)); } /** * @brief Get configuration of the I3C hardware. * * This provides a way to get the current configuration of the I3C hardware. * * This can return cached config or probed hardware parameters, but it has to * be up to date with current configuration. * * @param[in] dev Pointer to controller device driver instance. * @param[in] type Type of configuration parameters being passed * in @p config. * @param[in,out] config Pointer to the configuration parameters. * * Note that if @p type is @c I3C_CONFIG_CUSTOM, @p config must contain * the ID of the parameter to be retrieved. * * @retval 0 If successful. * @retval -EIO General Input/Output errors. * @retval -ENOSYS If not implemented. */ static int cdns_i3c_config_get(const struct device *dev, enum i3c_config_type type, void *config) { struct cdns_i3c_data *data = dev->data; int ret = 0; if (config == NULL) { ret = -EINVAL; goto out_configure; } (void)memcpy(config, &data->common.ctrl_config, sizeof(data->common.ctrl_config)); out_configure: return ret; } static int cdns_i3c_target_tx_ddr_write(const struct device *dev, uint8_t *buf, uint16_t len) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; uint32_t i, preamble; uint32_t data_word; uint8_t crc5 = 0x1F; /* check if there is space available in the tx fifo */ if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL) { return -ENOSPC; } /* DDR sends data out in 16b, so len must be a multiple of 2 */ if (!((len % 2) == 0)) { return -EINVAL; } /* Header shall be known in advanced to calculate crc5 */ uint8_t slave_da = SLV_STATUS1_DA(sys_read32(config->base + SLV_STATUS1)); uint16_t ddr_payload_header = HDR_CMD_RD | (slave_da << 1); ddr_payload_header = prepare_ddr_cmd_parity_adjustment_bit(ddr_payload_header); crc5 = i3c_cdns_crc5(crc5, ddr_payload_header); /* write as much as you can to the fifo */ for (i = 0; i < len && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL)); i += 2) { /* Use ALT with other than first packets */ preamble = (i > 0) ? DDR_PREAMBLE_DATA_ABORT_ALT : DDR_PREAMBLE_DATA_ABORT; data_word = (preamble | prepare_ddr_word(sys_get_be16(&buf[i]))); crc5 = i3c_cdns_crc5(crc5, sys_get_be16(&buf[i])); sys_write32(data_word, config->base + SLV_DDR_TX_FIFO); } /* end of data buffer, write crc packet (if we are still not full) */ if ((i == len) && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_DDR_TX_FULL))) { sys_write32(DDR_PREAMBLE_CMD_CRC | DDR_CRC_TOKEN | crc5 << 9, config->base + SLV_DDR_TX_FIFO); } /* setup THR interrupt */ uint32_t thr_ctrl = sys_read32(config->base + SLV_DDR_TX_RX_THR_CTRL); /* * Interrupt at half of the data or FIFO depth to give it enough time to be * processed. The ISR will then callback to the function pointer * `read_processed_cb` to collect more data to transmit */ thr_ctrl &= ~TX_THR_MASK; thr_ctrl |= TX_THR(MIN((data->hw_cfg.tx_mem_depth / 4) / 2, len / 2)); sys_write32(thr_ctrl, config->base + SLV_DDR_TX_RX_THR_CTRL); /* return total bytes written */ return i; } /** * @brief Writes to the Target's TX FIFO * * The Cadence I3C will then ACK read requests to it's TX FIFO from a * Controller * * @param dev Pointer to the device structure for an I3C controller * driver configured in target mode. * @param buf Pointer to the buffer * @param len Length of the buffer * * @retval Total number of bytes written * @retval -EACCES Not in Target Mode * @retval -ENOSPC No space in Tx FIFO */ static int cdns_i3c_target_tx_write(const struct device *dev, uint8_t *buf, uint16_t len, uint8_t hdr_mode) { const struct cdns_i3c_config *config = dev->config; struct cdns_i3c_data *data = dev->data; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; const uint32_t *buf_32 = (uint32_t *)buf; uint32_t i = 0; uint32_t val = 0; uint16_t remain = len; /* check if we are currently a target */ if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) { return -EACCES; } /* check if there is space available in the tx fifo */ if (sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL) { return -ENOSPC; } k_mutex_lock(&data->bus_lock, K_FOREVER); /* rev 1p7 requires the length be written to the SLV_CTRL reg */ if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) { sys_write32(len, config->base + SLV_CTRL); } if (hdr_mode == I3C_MSG_HDR_DDR) { if (ctrl_config->supported_hdr & I3C_MSG_HDR_DDR) { i = cdns_i3c_target_tx_ddr_write(dev, buf, len); /* TODO: DDR THR interrupt support not implemented yet*/ } else { LOG_ERR("%s: HDR-DDR not supported", dev->name); i = -ENOTSUP; } } else if (hdr_mode == 0) { /* write as much as you can to the fifo */ while (i < len && (!(sys_read32(config->base + SLV_STATUS1) & SLV_STATUS1_SDR_TX_FULL))) { /* with rev 1p7, while as a target, the fifos are using the full word, * otherwise only the first byte is used */ if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) { remain = len - i; if (remain >= 4) { val = *buf_32++; } else if (remain > 0) { val = 0; memcpy(&val, buf_32, remain); } sys_write32(val, config->base + TX_FIFO); i += 4; } else { sys_write32((uint32_t)buf[i], config->base + TX_FIFO); i++; } } /* setup THR interrupt */ uint32_t thr_ctrl = sys_read32(config->base + TX_RX_THR_CTRL); /* * Interrupt at half of the data or FIFO depth to give it enough time to be * processed. The ISR will then callback to the function pointer * `read_processed_cb` to collect more data to transmit */ thr_ctrl &= ~TX_THR_MASK; thr_ctrl |= TX_THR(MIN((data->hw_cfg.tx_mem_depth / 4) / 2, len / 2)); sys_write32(thr_ctrl, config->base + TX_RX_THR_CTRL); } else { LOG_ERR("%s: Unsupported HDR Mode %d", dev->name, hdr_mode); i = -ENOTSUP; } k_mutex_unlock(&data->bus_lock); /* return total bytes written */ return i; } /** * @brief Instructs the I3C Target device to register itself to the I3C Controller * * This routine instructs the I3C Target device to register itself to the I3C * Controller via its parent controller's i3c_target_register() API. * * @param dev Pointer to target device driver instance. * @param cfg Config struct with functions and parameters used by the I3C driver * to send bus events * * @return @see i3c_device_find. */ static int cdns_i3c_target_register(const struct device *dev, struct i3c_target_config *cfg) { struct cdns_i3c_data *data = dev->data; data->target_config = cfg; return 0; } /** * @brief Unregisters the provided config as Target device * * This routine disables I3C target mode for the 'dev' I3C bus driver using * the provided 'config' struct containing the functions and parameters * to send bus events. * * @param dev Pointer to target device driver instance. * @param cfg Config struct with functions and parameters used by the I3C driver * to send bus events * * @return @see i3c_device_find. */ static int cdns_i3c_target_unregister(const struct device *dev, struct i3c_target_config *cfg) { /* no way to disable? maybe write DA to 0? */ return 0; } /** * @brief Find a registered I3C target device. * * This returns the I3C device descriptor of the I3C device * matching the incoming @p id. * * @param dev Pointer to controller device driver instance. * @param id Pointer to I3C device ID. * * @return @see i3c_device_find. */ static struct i3c_device_desc *cdns_i3c_device_find(const struct device *dev, const struct i3c_device_id *id) { const struct cdns_i3c_config *config = dev->config; return i3c_dev_list_find(&config->common.dev_list, id); } /** * Find a registered I2C target device. * * Controller only API. * * This returns the I2C device descriptor of the I2C device * matching the device address @p addr. * * @param dev Pointer to controller device driver instance. * @param id I2C target device address. * * @return @see i3c_i2c_device_find. */ static struct i3c_i2c_device_desc *cdns_i3c_i2c_device_find(const struct device *dev, uint16_t addr) { struct cdns_i3c_data *data = dev->data; return i3c_dev_list_i2c_addr_find(&data->common.attached_dev, addr); } /** * @brief Transfer messages in I2C mode. * * @see i2c_transfer * * @param dev Pointer to device driver instance. * @param target Pointer to target device descriptor. * @param msgs Pointer to I2C messages. * @param num_msgs Number of messages to transfers. * * @return @see i2c_transfer */ static int cdns_i3c_i2c_api_transfer(const struct device *dev, struct i2c_msg *msgs, uint8_t num_msgs, uint16_t addr) { struct i3c_i2c_device_desc *i2c_dev = cdns_i3c_i2c_device_find(dev, addr); int ret; if (i2c_dev == NULL) { ret = -ENODEV; } else { ret = cdns_i3c_i2c_transfer(dev, i2c_dev, msgs, num_msgs); } return ret; } /** * Determine I3C bus mode from the i2c devices on the bus * * Reads the LVR of all I2C devices and returns the I3C bus * Mode * * @param dev_list Pointer to device list * * @return @see enum i3c_bus_mode. */ static enum i3c_bus_mode i3c_bus_mode(const struct i3c_dev_list *dev_list) { enum i3c_bus_mode mode = I3C_BUS_MODE_PURE; for (int i = 0; i < dev_list->num_i2c; i++) { switch (I3C_LVR_I2C_DEV_IDX(dev_list->i2c[i].lvr)) { case I3C_LVR_I2C_DEV_IDX_0: if (mode < I3C_BUS_MODE_MIXED_FAST) { mode = I3C_BUS_MODE_MIXED_FAST; } break; case I3C_LVR_I2C_DEV_IDX_1: if (mode < I3C_BUS_MODE_MIXED_LIMITED) { mode = I3C_BUS_MODE_MIXED_LIMITED; } break; case I3C_LVR_I2C_DEV_IDX_2: if (mode < I3C_BUS_MODE_MIXED_SLOW) { mode = I3C_BUS_MODE_MIXED_SLOW; } break; default: mode = I3C_BUS_MODE_INVALID; break; } } return mode; } /** * Determine THD_DEL value for CTRL register * * @param dev Pointer to device driver instance. * * @return Value to be written to THD_DEL */ static uint8_t cdns_i3c_clk_to_data_turnaround(const struct device *dev) { const struct cdns_i3c_config *config = dev->config; uint32_t input_clock_frequency = config->input_frequency; uint8_t thd_delay = DIV_ROUND_UP(I3C_TSCO_DEFAULT_NS, (NSEC_PER_SEC / input_clock_frequency)); if (thd_delay > THD_DELAY_MAX) { thd_delay = THD_DELAY_MAX; } return (THD_DELAY_MAX - thd_delay); } /** * @brief Initialize the hardware. * * @param dev Pointer to controller device driver instance. */ static int cdns_i3c_bus_init(const struct device *dev) { struct cdns_i3c_data *data = dev->data; const struct cdns_i3c_config *config = dev->config; struct i3c_config_controller *ctrl_config = &data->common.ctrl_config; cdns_i3c_read_hw_cfg(dev); /* Clear all retaining regs */ sys_write32(DEVS_CTRL_DEV_CLR_ALL, config->base + DEVS_CTRL); uint32_t conf0 = sys_read32(config->base + CONF_STATUS0); uint32_t conf1 = sys_read32(config->base + CONF_STATUS1); data->max_devs = CONF_STATUS0_DEVS_NUM(conf0); data->free_rr_slots = GENMASK(data->max_devs, 1); /* DDR supported bit moved in 1p7 revision along with dev role added */ if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 7)) { ctrl_config->supported_hdr = (conf1 & CONF_STATUS1_SUPPORTS_DDR) ? I3C_MSG_HDR_DDR : 0; ctrl_config->is_secondary = (CONF_STATUS0_DEV_ROLE(conf0) == CONF_STATUS0_DEV_ROLE_SEC_MASTER) ? true : false; } else { ctrl_config->supported_hdr = (conf0 & CONF_STATUS0_SUPPORTS_DDR) ? I3C_MSG_HDR_DDR : 0; ctrl_config->is_secondary = (conf0 & CONF_STATUS0_SEC_MASTER) ? true : false; } k_mutex_init(&data->bus_lock); k_sem_init(&data->xfer.complete, 0, 1); k_sem_init(&data->ibi_hj_complete, 0, 1); cdns_i3c_interrupts_disable(config); cdns_i3c_interrupts_clear(config); config->irq_config_func(dev); /* Ensure the bus is disabled. */ sys_write32(~CTRL_DEV_EN & sys_read32(config->base + CTRL), config->base + CTRL); /* determine prescaler timings for i3c and i2c scl */ cdns_i3c_set_prescalers(dev); enum i3c_bus_mode mode = i3c_bus_mode(&config->common.dev_list); LOG_DBG("%s: i3c bus mode %d", dev->name, mode); int cdns_mode; switch (mode) { case I3C_BUS_MODE_PURE: cdns_mode = CTRL_PURE_BUS_MODE; break; case I3C_BUS_MODE_MIXED_FAST: cdns_mode = CTRL_MIXED_FAST_BUS_MODE; break; case I3C_BUS_MODE_MIXED_LIMITED: case I3C_BUS_MODE_MIXED_SLOW: cdns_mode = CTRL_MIXED_SLOW_BUS_MODE; break; default: return -EINVAL; } /* * When a Hot-Join request happens, disable all events coming from this device. * We will issue ENTDAA afterwards from the threaded IRQ handler. * Set HJ ACK later after bus init to prevent targets from indirect DAA enforcement. * * Set the I3C Bus Mode based on the LVR of the I2C devices */ uint32_t ctrl = CTRL_HJ_DISEC | CTRL_MCS_EN | (CTRL_BUS_MODE_MASK & cdns_mode); /* Disable Controllership requests as it is not supported yet by the driver */ ctrl &= ~CTRL_MST_ACK; /* * Cadence I3C release r104v1p0 and above support configuration of the clock to data * turnaround time. */ if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 4)) { ctrl |= CTRL_THD_DELAY(cdns_i3c_clk_to_data_turnaround(dev)); } /* * Cadence I3C release r105v1p0 and above support I3C v1.1 timing change * for tCASHr_min = tCAS_min / 2, otherwise tCASr_min = tCAS_min (as * per MIPI spec v1.0) */ if (REV_ID_REV(data->hw_cfg.rev_id) >= REV_ID_VERSION(1, 5)) { ctrl |= CTRL_I3C_11_SUPP; } /* write ctrl register value */ sys_write32(ctrl, config->base + CTRL); /* enable Core */ sys_write32(CTRL_DEV_EN | ctrl, config->base + CTRL); /* Set fifo thresholds. */ sys_write32(CMD_THR(I3C_CMDD_THR) | IBI_THR(I3C_IBID_THR) | CMDR_THR(I3C_CMDR_THR) | IBIR_THR(I3C_IBIR_THR), config->base + CMD_IBI_THR_CTRL); /* Set TX/RX interrupt thresholds. */ if (sys_read32(config->base + MST_STATUS0) & MST_STATUS0_MASTER_MODE) { sys_write32(TX_THR(I3C_TX_THR) | RX_THR(data->hw_cfg.rx_mem_depth), config->base + TX_RX_THR_CTRL); } else { sys_write32(TX_THR(1) | RX_THR(1), config->base + TX_RX_THR_CTRL); sys_write32(SLV_DDR_TX_THR(0) | SLV_DDR_RX_THR(1), config->base + SLV_DDR_TX_RX_THR_CTRL); } /* enable target interrupts */ sys_write32(SLV_INT_DA_UPD | SLV_INT_SDR_RD_COMP | SLV_INT_SDR_WR_COMP | SLV_INT_SDR_RX_THR | SLV_INT_SDR_TX_THR | SLV_INT_SDR_RX_UNF | SLV_INT_SDR_TX_OVF | SLV_INT_HJ_DONE | SLV_INT_DDR_WR_COMP | SLV_INT_DDR_RD_COMP | SLV_INT_DDR_RX_THR | SLV_INT_DDR_TX_THR, config->base + SLV_IER); /* Enable IBI interrupts. */ sys_write32(MST_INT_IBIR_THR | MST_INT_RX_UNF | MST_INT_HALTED | MST_INT_TX_OVF, config->base + MST_IER); int ret = i3c_addr_slots_init(dev); if (ret != 0) { return ret; } /* Program retaining regs. */ cdns_i3c_program_controller_retaining_reg(dev); /* only primary controllers are responsible for initializing the bus */ if (!ctrl_config->is_secondary) { /* Sleep to wait for bus idle. */ k_busy_wait(201); /* Perform bus initialization */ ret = i3c_bus_init(dev, &config->common.dev_list); #ifdef CONFIG_I3C_USE_IBI /* Bus Initialization Complete, allow HJ ACKs */ sys_write32(CTRL_HJ_ACK | sys_read32(config->base + CTRL), config->base + CTRL); #endif } return 0; } static struct i3c_driver_api api = { .i2c_api.configure = cdns_i3c_i2c_api_configure, .i2c_api.transfer = cdns_i3c_i2c_api_transfer, .configure = cdns_i3c_configure, .config_get = cdns_i3c_config_get, .attach_i3c_device = cdns_i3c_attach_device, .reattach_i3c_device = cdns_i3c_reattach_device, .detach_i3c_device = cdns_i3c_detach_device, .attach_i2c_device = cdns_i3c_i2c_attach_device, .detach_i2c_device = cdns_i3c_i2c_detach_device, .do_daa = cdns_i3c_do_daa, .do_ccc = cdns_i3c_do_ccc, .i3c_device_find = cdns_i3c_device_find, .i3c_xfers = cdns_i3c_transfer, .target_tx_write = cdns_i3c_target_tx_write, .target_register = cdns_i3c_target_register, .target_unregister = cdns_i3c_target_unregister, #ifdef CONFIG_I3C_USE_IBI .ibi_enable = cdns_i3c_controller_ibi_enable, .ibi_disable = cdns_i3c_controller_ibi_disable, .ibi_raise = cdns_i3c_target_ibi_raise, #endif }; #define CADENCE_I3C_INSTANTIATE(n) \ static void cdns_i3c_config_func_##n(const struct device *dev); \ static struct i3c_device_desc cdns_i3c_device_array_##n[] = I3C_DEVICE_ARRAY_DT_INST(n); \ static struct i3c_i2c_device_desc cdns_i3c_i2c_device_array_##n[] = \ I3C_I2C_DEVICE_ARRAY_DT_INST(n); \ static const struct cdns_i3c_config i3c_config_##n = { \ .base = DT_INST_REG_ADDR(n), \ .input_frequency = DT_INST_PROP(n, input_clock_frequency), \ .irq_config_func = cdns_i3c_config_func_##n, \ .common.dev_list.i3c = cdns_i3c_device_array_##n, \ .common.dev_list.num_i3c = ARRAY_SIZE(cdns_i3c_device_array_##n), \ .common.dev_list.i2c = cdns_i3c_i2c_device_array_##n, \ .common.dev_list.num_i2c = ARRAY_SIZE(cdns_i3c_i2c_device_array_##n), \ }; \ static struct cdns_i3c_data i3c_data_##n = { \ .common.ctrl_config.scl.i3c = DT_INST_PROP_OR(n, i3c_scl_hz, 0), \ .common.ctrl_config.scl.i2c = DT_INST_PROP_OR(n, i2c_scl_hz, 0), \ }; \ DEVICE_DT_INST_DEFINE(n, cdns_i3c_bus_init, NULL, &i3c_data_##n, &i3c_config_##n, \ POST_KERNEL, CONFIG_I3C_CONTROLLER_INIT_PRIORITY, &api); \ static void cdns_i3c_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), cdns_i3c_irq_handler, \ DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ }; #define DT_DRV_COMPAT cdns_i3c DT_INST_FOREACH_STATUS_OKAY(CADENCE_I3C_INSTANTIATE) ```
/content/code_sandbox/drivers/i3c/i3c_cdns.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
31,568
```unknown config CDNS_SDHC bool "CDNS SDHC" default y depends on DT_HAS_CDNS_SDHC_ENABLED select SDHC_SUPPORTS_NATIVE_MODE help Enable Cadence SDMMC Host Controller. if CDNS_SDHC # Cadence SDHC DMA needs 64 bit aligned buffers config SDHC_BUFFER_ALIGNMENT default 8 config CDNS_DESC_COUNT int "Allocate number of descriptors" default 8 help SD host controllers require DMA preparation for read and write operation. Creates static descriptors which can be used by ADMA. Devices should configure this flag if they require to transfer more than 8*64Kb of data. endif # CDNS_SDHC ```
/content/code_sandbox/drivers/sdhc/Kconfig.sdhc_cdns
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
156
```unknown config SPI_SDHC bool "SD protocol over SPI bus" default y depends on DT_HAS_ZEPHYR_SDHC_SPI_SLOT_ENABLED select SPI select CRC select SDHC_SUPPORTS_SPI_MODE help Enable the SPI SD host controller driver ```
/content/code_sandbox/drivers/sdhc/Kconfig.spi
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
58
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_DISK_EMMC_HOST_H_ #define ZEPHYR_DRIVERS_DISK_EMMC_HOST_H_ /* Bit map for command Register */ #define EMMC_HOST_CMD_RESP_TYPE_LOC 0 #define EMMC_HOST_CMD_CRC_CHECK_EN_LOC 3 #define EMMC_HOST_CMD_IDX_CHECK_EN_LOC 4 #define EMMC_HOST_CMD_DATA_PRESENT_LOC 5 #define EMMC_HOST_CMD_TYPE_LOC 6 #define EMMC_HOST_CMD_INDEX_LOC 8 /* Bit map for Transfer Mode Register */ #define EMMC_HOST_XFER_DMA_EN_LOC 0 #define EMMC_HOST_XFER_BLOCK_CNT_EN_LOC 1 #define EMMC_HOST_XFER_AUTO_CMD_EN_LOC 2 #define EMMC_HOST_XFER_DATA_DIR_LOC 4 #define EMMC_HOST_XFER_MULTI_BLOCK_SEL_LOC 5 #define EMMC_HOST_XFER_DMA_EN_MASK 0x01 #define EMMC_HOST_XFER_BLOCK_CNT_EN_MASK 0x01 #define EMMC_HOST_XFER_AUTO_CMD_EN_MASK 0x03 #define EMMC_HOST_XFER_DATA_DIR_MASK 0x01 #define EMMC_HOST_XFER_MULTI_BLOCK_SEL_MASK 0x01 /* Bit map for Block Size and GAP Register */ #define EMMC_HOST_BLOCK_SIZE_LOC 0 #define EMMC_HOST_BLOCK_SIZE_MASK 0xFFF #define EMMC_HOST_DMA_BUF_SIZE_LOC 12 #define EMMC_HOST_DMA_BUF_SIZE_MASK 0x07 #define EMMC_HOST_BLOCK_GAP_LOC 3 #define EMMC_HOST_BLOCK_GAP_MASK 0x01 #define EMMC_HOST_ADMA_BUFF_ADD_LOC 32 #define EMMC_HOST_ADMA_BUFF_LEN_LOC 16 #define EMMC_HOST_ADMA_BUFF_LINK_NEXT (0x3 << 4) #define EMMC_HOST_ADMA_BUFF_LINK_LAST (0x2 << 4) #define EMMC_HOST_ADMA_INTR_EN BIT(2) #define EMMC_HOST_ADMA_BUFF_LAST BIT(1) #define EMMC_HOST_ADMA_BUFF_VALID BIT(0) /* Bit Map and length details for Clock Control Register */ #define EMMC_HOST_CLK_SDCLCK_FREQ_SEL_LOC 8 #define EMMC_HOST_CLK_SDCLCK_FREQ_SEL_UPPER_LOC 6 #define EMMC_HOST_CLK_SDCLCK_FREQ_SEL_MASK 0xFF #define EMMC_HOST_CLK_SDCLCK_FREQ_SEL_UPPER_MASK 0x03 /* Bit Map for Host Control 1 Register */ #define EMMC_HOST_CTRL1_DAT_WIDTH_LOC 1 #define EMMC_HOST_CTRL1_DMA_SEL_LOC 3 #define EMMC_HOST_CTRL1_EXT_DAT_WIDTH_LOC 5 #define EMMC_HOST_CTRL1_DMA_SEL_MASK 0x03 #define EMMC_HOST_CTRL1_EXT_DAT_WIDTH_MASK 0x01 #define EMMC_HOST_CTRL1_DAT_WIDTH_MASK 0x01 /** Constants Software Reset register */ #define EMMC_HOST_SW_RESET_REG_ALL BIT(0) #define EMMC_HOST_SW_RESET_REG_CMD BIT(1) #define EMMC_HOST_SW_RESET_REG_DATA BIT(2) #define EMMC_HOST_RESPONSE_SIZE 4 #define EMMC_HOST_OCR_BUSY_BIT BIT(31) #define EMMC_HOST_OCR_CAPACITY_MASK 0x40000000U #define EMMC_HOST_DUAL_VOLTAGE_RANGE 0x40FF8080U #define EMMC_HOST_BLOCK_SIZE 512 #define EMMC_HOST_RCA_SHIFT 16 #define EMMC_HOST_EXTCSD_SEC_COUNT 53 #define EMMC_HOST_EXTCSD_GENERIC_CMD6_TIME 62 #define EMMC_HOST_EXTCSD_BUS_WIDTH_ADDR 0xB7 #define EMMC_HOST_EXTCSD_HS_TIMING_ADDR 0xB9 #define EMMC_HOST_BUS_SPEED_HIGHSPEED 1 #define EMMC_HOST_CMD_COMPLETE_RETRY 10000 #define EMMC_HOST_XFR_COMPLETE_RETRY 2000000 #define EMMC_HOST_CMD1_RETRY_TIMEOUT 1000 #define EMMC_HOST_CMD6_TIMEOUT_MULT 10 #define EMMC_HOST_NORMAL_INTR_MASK 0x3f #define EMMC_HOST_ERROR_INTR_MASK 0x13ff #define EMMC_HOST_NORMAL_INTR_MASK_CLR 0x60ff #define EMMC_HOST_POWER_CTRL_SD_BUS_POWER 0x1 #define EMMC_HOST_POWER_CTRL_SD_BUS_VOLT_SEL 0x5 #define EMMC_HOST_UHSMODE_SDR12 0x0 #define EMMC_HOST_UHSMODE_SDR25 0x1 #define EMMC_HOST_UHSMODE_SDR50 0x2 #define EMMC_HOST_UHSMODE_SDR104 0x3 #define EMMC_HOST_UHSMODE_DDR50 0x4 #define EMMC_HOST_UHSMODE_HS400 0x5 #define EMMC_HOST_CTRL2_1P8V_SIG_EN 1 #define EMMC_HOST_CTRL2_1P8V_SIG_LOC 3 #define EMMC_HOST_CTRL2_UHS_MODE_SEL_LOC 0 #define EMMC_HOST_CTRL2_UHS_MODE_SEL_MASK 0x07 /* Event/command status */ #define EMMC_HOST_CMD_COMPLETE BIT(0) #define EMMC_HOST_XFER_COMPLETE BIT(1) #define EMMC_HOST_BLOCK_GAP_INTR BIT(2) #define EMMC_HOST_DMA_INTR BIT(3) #define EMMC_HOST_BUF_WR_READY BIT(4) #define EMMC_HOST_BUF_RD_READY BIT(5) #define EMMC_HOST_CMD_TIMEOUT_ERR BIT(0) #define EMMC_HOST_CMD_CRC_ERR BIT(1) #define EMMC_HOST_CMD_END_BIT_ERR BIT(2) #define EMMC_HOST_CMD_IDX_ERR BIT(3) #define EMMC_HOST_DATA_TIMEOUT_ERR BIT(4) #define EMMC_HOST_DATA_CRC_ERR BIT(5) #define EMMC_HOST_DATA_END_BIT_ERR BIT(6) #define EMMC_HOST_CUR_LMT_ERR BIT(7) #define EMMC_HOST_DMA_TXFR_ERR BIT(12) #define EMMC_HOST_ERR_STATUS 0xFFF /** PState register bits */ #define EMMC_HOST_PSTATE_CMD_INHIBIT BIT(0) #define EMMC_HOST_PSTATE_DAT_INHIBIT BIT(1) #define EMMC_HOST_PSTATE_DAT_LINE_ACTIVE BIT(2) #define EMMC_HOST_PSTATE_WR_DMA_ACTIVE BIT(8) #define EMMC_HOST_PSTATE_RD_DMA_ACTIVE BIT(9) #define EMMC_HOST_PSTATE_BUF_READ_EN BIT(11) #define EMMC_HOST_PSTATE_BUF_WRITE_EN BIT(10) #define EMMC_HOST_PSTATE_CARD_INSERTED BIT(16) #define EMMC_HOST_MAX_TIMEOUT 0xe #define EMMC_HOST_MSEC_DELAY 1000 /** Constants for Clock Control register */ #define EMMC_HOST_INTERNAL_CLOCK_EN BIT(0) #define EMMC_HOST_INTERNAL_CLOCK_STABLE BIT(1) #define EMMC_HOST_SD_CLOCK_EN BIT(2) /** Clock frequency */ #define EMMC_HOST_CLK_FREQ_400K 0.4 #define EMMC_HOST_CLK_FREQ_25M 25 #define EMMC_HOST_CLK_FREQ_50M 50 #define EMMC_HOST_CLK_FREQ_100M 100 #define EMMC_HOST_CLK_FREQ_200M 200 #define EMMC_HOST_TUNING_SUCCESS BIT(7) #define EMMC_HOST_START_TUNING BIT(6) #define EMMC_HOST_VOL_3_3_V_SUPPORT BIT(24) #define EMMC_HOST_VOL_3_3_V_SELECT (7 << 1) #define EMMC_HOST_VOL_3_0_V_SUPPORT BIT(25) #define EMMC_HOST_VOL_3_0_V_SELECT (6 << 1) #define EMMC_HOST_VOL_1_8_V_SUPPORT BIT(26) #define EMMC_HOST_VOL_1_8_V_SELECT (5 << 1) #define EMMC_HOST_CMD_WAIT_TIMEOUT_US 3000 #define EMMC_HOST_CMD_CMPLETE_TIMEOUT_US 9000 #define EMMC_HOST_XFR_CMPLETE_TIMEOUT_US 1000 #define EMMC_HOST_SDMA_BOUNDARY 0x0 #define EMMC_HOST_RCA_ADDRESS 0x2 #define EMMC_HOST_RESP_MASK (0xFF000000U) #define EMMC_HOST_SET_RESP(resp0, resp1) (resp0 >> 1) | ((resp1 & 1) << 30) #define SET_BITS(reg, pos, bit_width, val) \ reg &= ~(bit_width << pos); \ reg |= ((val & bit_width) << pos) /* get value from certain bit */ #define GET_BITS(reg_name, start, width) ((reg_name) & (((1 << (width)) - 1) << (start))) #define ERR_INTR_STATUS_EVENT(reg_bits) reg_bits << 16 #define ADDRESS_32BIT_MASK 0xFFFFFFFF struct emmc_reg { volatile uint32_t sdma_sysaddr; /**< SDMA System Address */ volatile uint16_t block_size; /**< Block Size */ volatile uint16_t block_count; /**< Block Count */ volatile uint32_t argument; /**< Argument */ volatile uint16_t transfer_mode; /**< Transfer Mode */ volatile uint16_t cmd; /**< Command */ volatile uint32_t resp_01; /**< Response Register 0 & 1 */ volatile uint16_t resp_2; /**< Response Register 2*/ volatile uint16_t resp_3; /**< Response Register 3 */ volatile uint16_t resp_4; /**< Response Register 4 */ volatile uint16_t resp_5; /**< Response Register 5 */ volatile uint16_t resp_6; /**< Response Register 6 */ volatile uint16_t resp_7; /**< Response Register 7 */ volatile uint32_t data_port; /**< Buffer Data Port */ volatile uint32_t present_state; /**< Present State */ volatile uint8_t host_ctrl1; /**< Host Control 1 */ volatile uint8_t power_ctrl; /**< Power Control */ volatile uint8_t block_gap_ctrl; /**< Block Gap Control */ volatile uint8_t wake_up_ctrl; /**< Wakeup Control */ volatile uint16_t clock_ctrl; /**< Clock Control */ volatile uint8_t timeout_ctrl; /**< Timeout Control */ volatile uint8_t sw_reset; /**< Software Reset */ volatile uint16_t normal_int_stat; /**< Normal Interrupt Status */ volatile uint16_t err_int_stat; /**< Error Interrupt Status */ volatile uint16_t normal_int_stat_en; /**< Normal Interrupt Status Enable */ volatile uint16_t err_int_stat_en; /**< Error Interrupt Status Enable */ volatile uint16_t normal_int_signal_en; /**< Normal Interrupt Signal Enable */ volatile uint16_t err_int_signal_en; /**< Error Interrupt Signal Enable */ volatile uint16_t auto_cmd_err_stat; /**< Auto CMD Error Status */ volatile uint16_t host_ctrl2; /**< Host Control 2 */ volatile uint64_t capabilities; /**< Capabilities */ volatile uint64_t max_current_cap; /**< Max Current Capabilities */ volatile uint16_t force_err_autocmd_stat; /**< Force Event for Auto CMD Error Status*/ volatile uint16_t force_err_int_stat; /**< Force Event for Error Interrupt Status */ volatile uint8_t adma_err_stat; /**< ADMA Error Status */ volatile uint8_t reserved[3]; volatile uint32_t adma_sys_addr1; /**< ADMA System Address1 */ volatile uint32_t adma_sys_addr2; /**< ADMA System Address2 */ volatile uint16_t preset_val_0; /**< Preset Value 0 */ volatile uint16_t preset_val_1; /**< Preset Value 1 */ volatile uint16_t preset_val_2; /**< Preset Value 2 */ volatile uint16_t preset_val_3; /**< Preset Value 3 */ volatile uint16_t preset_val_4; /**< Preset Value 4 */ volatile uint16_t preset_val_5; /**< Preset Value 5 */ volatile uint16_t preset_val_6; /**< Preset Value 6 */ volatile uint16_t preset_val_7; /**< Preset Value 7 */ volatile uint32_t boot_timeout; /**< Boot Timeout */ volatile uint16_t preset_val_8; /**< Preset Value 8 */ volatile uint16_t reserved3; volatile uint16_t vendor_reg; /**< Vendor Enhanced strobe */ volatile uint16_t reserved4[56]; volatile uint32_t reserved5[4]; volatile uint16_t slot_intr_stat; /**< Slot Interrupt Status */ volatile uint16_t host_cntrl_version; /**< Host Controller Version */ volatile uint32_t reserved6[64]; volatile uint32_t cq_ver; /**< Command Queue Version */ volatile uint32_t cq_cap; /**< Command Queue Capabilities */ volatile uint32_t cq_cfg; /**< Command Queue Configuration */ volatile uint32_t cq_ctrl; /**< Command Queue Control */ volatile uint32_t cq_intr_stat; /**< Command Queue Interrupt Status */ volatile uint32_t cq_intr_stat_en; /**< Command Queue Interrupt Status Enable */ volatile uint32_t cq_intr_sig_en; /**< Command Queue Interrupt Signal Enable */ volatile uint32_t cq_intr_coalesc; /**< Command Queue Interrupt Coalescing */ volatile uint32_t cq_tdlba; /**< Command Queue Task Desc List Base Addr */ volatile uint32_t cq_tdlba_upr; /**< Command Queue Task Desc List Base Addr Upr */ volatile uint32_t cq_task_db; /**< Command Queue Task DoorBell */ volatile uint32_t cq_task_db_notify; /**< Command Queue Task DoorBell Notify */ volatile uint32_t cq_dev_qstat; /**< Command Queue Device queue status */ volatile uint32_t cq_dev_pend_task; /**< Command Queue Device pending tasks */ volatile uint32_t cq_task_clr; /**< Command Queue Task Clr */ volatile uint32_t reserved7; volatile uint32_t cq_ssc1; /**< Command Queue Send Status Configuration 1 */ volatile uint32_t cq_ssc2; /**< Command Queue Send Status Configuration 2 */ volatile uint32_t cq_crdct; /**< Command response for direct command */ volatile uint32_t reserved8; volatile uint32_t cq_rmem; /**< Command response mode error mask */ volatile uint32_t cq_terri; /**< Command Queue Task Error Information */ volatile uint32_t cq_cri; /**< Command Queue Command response index */ volatile uint32_t cq_cra; /**< Command Queue Command response argument */ volatile uint32_t reserved9[425]; }; enum emmc_sw_reset { EMMC_HOST_SW_RESET_DATA_LINE = 0, EMMC_HOST_SW_RESET_CMD_LINE, EMMC_HOST_SW_RESET_ALL }; enum emmc_cmd_type { EMMC_HOST_CMD_NORMAL = 0, EMMC_HOST_CMD_SUSPEND, EMMC_HOST_CMD_RESUME, EMMC_HOST_CMD_ABORT, }; enum emmc_response_type { EMMC_HOST_RESP_NONE = 0, EMMC_HOST_RESP_LEN_136, EMMC_HOST_RESP_LEN_48, EMMC_HOST_RESP_LEN_48B, EMMC_HOST_INVAL_HOST_RESP_LEN, }; struct emmc_cmd_config { struct sdhc_command *sdhc_cmd; uint32_t cmd_idx; enum emmc_cmd_type cmd_type; bool data_present; bool idx_check_en; bool crc_check_en; }; struct resp { uint64_t resp_48bit; }; #endif /* _EMMC_HOST_HC_H_ */ ```
/content/code_sandbox/drivers/sdhc/intel_emmc_host.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,231
```c /* * */ #define DT_DRV_COMPAT atmel_sam_hsmci #include <zephyr/drivers/sdhc.h> #include <zephyr/drivers/gpio.h> #include <zephyr/devicetree.h> #include <zephyr/logging/log.h> #include <zephyr/kernel.h> #include <soc.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> LOG_MODULE_REGISTER(hsmci, CONFIG_SDHC_LOG_LEVEL); #ifdef HSMCI_MR_PDCMODE #ifdef CONFIG_SAM_HSMCI_PDCMODE #define _HSMCI_PDCMODE #endif #endif #ifdef CONFIG_SAM_HSMCI_PWRSAVE #if (CONFIG_SAM_HSMCI_PWRSAVE_DIV < 0) || (CONFIG_SAM_HSMCI_PWRSAVE_DIV > 7) #error "CONFIG_SAM_HSMCI_PWRSAVE_DIV must be 0 to 7" #endif #endif #define _HSMCI_DEFAULT_TIMEOUT 5000 #define _HSMCI_MAX_FREQ (SOC_ATMEL_SAM_MCK_FREQ_HZ >> 1) #define _HSMCI_MIN_FREQ (_HSMCI_MAX_FREQ / 0x200) #define _MSMCI_MAX_DIVISOR 0x1FF #define _HSMCI_SR_ERR (HSMCI_SR_RINDE \ | HSMCI_SR_RDIRE \ | HSMCI_SR_RCRCE \ | HSMCI_SR_RENDE \ | HSMCI_SR_RTOE \ | HSMCI_SR_DCRCE \ | HSMCI_SR_DTOE \ | HSMCI_SR_CSTOE \ | HSMCI_SR_OVRE \ | HSMCI_SR_UNRE) static const uint8_t _resp2size[] = { [SD_RSP_TYPE_NONE] = HSMCI_CMDR_RSPTYP_NORESP, [SD_RSP_TYPE_R1] = HSMCI_CMDR_RSPTYP_48_BIT, [SD_RSP_TYPE_R1b] = HSMCI_CMDR_RSPTYP_R1B, [SD_RSP_TYPE_R2] = HSMCI_CMDR_RSPTYP_136_BIT, [SD_RSP_TYPE_R3] = HSMCI_CMDR_RSPTYP_48_BIT, [SD_RSP_TYPE_R4] = HSMCI_CMDR_RSPTYP_48_BIT, [SD_RSP_TYPE_R5] = 0 /* SDIO not supported */, [SD_RSP_TYPE_R5b] = 0 /* SDIO not supported */, [SD_RSP_TYPE_R6] = HSMCI_CMDR_RSPTYP_48_BIT, [SD_RSP_TYPE_R7] = HSMCI_CMDR_RSPTYP_48_BIT, }; /* timeout multiplier shift (actual value is 1 << _mul_shift[*]) */ static const uint8_t _mul_shift[] = {0, 4, 7, 8, 10, 12, 16, 20}; static const uint8_t _mul_shift_size = 8; struct sam_hsmci_config { Hsmci *base; const struct atmel_sam_pmc_config clock_cfg; const struct pinctrl_dev_config *pincfg; struct gpio_dt_spec carrier_detect; }; struct sam_hsmci_data { bool open_drain; uint8_t cmd_in_progress; struct k_mutex mtx; }; static int sam_hsmci_reset(const struct device *dev) { const struct sam_hsmci_config *config = dev->config; Hsmci *hsmci = config->base; uint32_t mr = hsmci->HSMCI_MR; uint32_t dtor = hsmci->HSMCI_DTOR; uint32_t sdcr = hsmci->HSMCI_SDCR; uint32_t cstor = hsmci->HSMCI_CSTOR; uint32_t cfg = hsmci->HSMCI_CFG; hsmci->HSMCI_CR = HSMCI_CR_SWRST; hsmci->HSMCI_MR = mr; hsmci->HSMCI_DTOR = dtor; hsmci->HSMCI_SDCR = sdcr; hsmci->HSMCI_CSTOR = cstor; hsmci->HSMCI_CFG = cfg; hsmci->HSMCI_CR = HSMCI_CR_PWSEN | HSMCI_CR_MCIEN; return 0; } static int sam_hsmci_get_host_props(const struct device *dev, struct sdhc_host_props *props) { memset(props, 0, sizeof(*props)); props->f_max = _HSMCI_MAX_FREQ; props->f_min = _HSMCI_MIN_FREQ; /* high-speed not working yet due to limitations of the SDHC sm */ props->host_caps.high_spd_support = false; props->power_delay = 500; props->is_spi = false; props->max_current_330 = 4; return 0; } static int sam_hsmci_set_io(const struct device *dev, struct sdhc_io *ios) { const struct sam_hsmci_config *config = dev->config; struct sam_hsmci_data *data = dev->data; Hsmci *hsmci = config->base; uint32_t frequency; uint32_t div_val; int ret; LOG_DBG("%s(clock=%d, bus_width=%d, timing=%d, mode=%d)", __func__, ios->clock, ios->bus_width, ios->timing, ios->bus_mode); if (ios->clock > 0) { if (ios->clock > _HSMCI_MAX_FREQ) { return -ENOTSUP; } ret = clock_control_get_rate(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&config->clock_cfg, &frequency); if (ret < 0) { LOG_ERR("Failed to get clock rate, err=%d", ret); return ret; } div_val = frequency / ios->clock - 2; if (div_val < 0) { div_val = 0; } if (div_val > _MSMCI_MAX_DIVISOR) { div_val = _MSMCI_MAX_DIVISOR; } LOG_DBG("divider: %d (freq=%d)", div_val, frequency / (div_val + 2)); hsmci->HSMCI_MR &= ~HSMCI_MR_CLKDIV_Msk; hsmci->HSMCI_MR |= ((div_val & 1) ? HSMCI_MR_CLKODD : 0) | HSMCI_MR_CLKDIV(div_val >> 1); } if (ios->bus_width) { hsmci->HSMCI_SDCR &= ~HSMCI_SDCR_SDCBUS_Msk; switch (ios->bus_width) { case SDHC_BUS_WIDTH1BIT: hsmci->HSMCI_SDCR = HSMCI_SDCR_SDCBUS_1; break; case SDHC_BUS_WIDTH4BIT: hsmci->HSMCI_SDCR = HSMCI_SDCR_SDCBUS_4; break; default: return -ENOTSUP; } } data->open_drain = (ios->bus_mode == SDHC_BUSMODE_OPENDRAIN); if (ios->timing) { switch (ios->timing) { case SDHC_TIMING_LEGACY: hsmci->HSMCI_CFG &= ~HSMCI_CFG_HSMODE; break; case SDHC_TIMING_HS: hsmci->HSMCI_CFG |= HSMCI_CFG_HSMODE; break; default: return -ENOTSUP; } } return 0; } static int sam_hsmci_init(const struct device *dev) { const struct sam_hsmci_config *config = dev->config; int ret; /* Connect pins to the peripheral */ ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("pinctrl_apply_state() => %d", ret); return ret; } /* Enable module's clock */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&config->clock_cfg); /* init carrier detect (if set) */ if (config->carrier_detect.port != NULL) { if (!gpio_is_ready_dt(&config->carrier_detect)) { LOG_ERR("GPIO port for carrier-detect pin is not ready"); return -ENODEV; } ret = gpio_pin_configure_dt(&config->carrier_detect, GPIO_INPUT); if (ret < 0) { LOG_ERR("Couldn't configure carrier-detect pin; (%d)", ret); return ret; } } Hsmci *hsmci = config->base; /* reset the device */ hsmci->HSMCI_CR = HSMCI_CR_SWRST; hsmci->HSMCI_CR = HSMCI_CR_PWSDIS; hsmci->HSMCI_CR = HSMCI_CR_MCIEN; #ifdef CONFIG_SAM_HSMCI_PWRSAVE hsmci->HSMCI_MR = HSMCI_MR_RDPROOF | HSMCI_MR_WRPROOF | HSMCI_MR_PWSDIV(CONFIG_SAM_HSMCI_PWRSAVE_DIV); hsmci->HSMCI_CR = HSMCI_CR_PWSEN; #else hsmci->HSMCI_MR = HSMCI_MR_RDPROOF | HSMCI_MR_WRPROOF; #endif return 0; } static int sam_hsmci_get_card_present(const struct device *dev) { const struct sam_hsmci_config *config = dev->config; if (config->carrier_detect.port == NULL) { return 1; } return gpio_pin_get_dt(&config->carrier_detect); } static int sam_hsmci_card_busy(const struct device *dev) { const struct sam_hsmci_config *config = dev->config; Hsmci *hsmci = config->base; return (hsmci->HSMCI_SR & HSMCI_SR_NOTBUSY) == 0; } static void sam_hsmci_send_clocks(Hsmci *hsmci) { hsmci->HSMCI_MR &= ~(HSMCI_MR_WRPROOF | HSMCI_MR_RDPROOF | HSMCI_MR_FBYTE); hsmci->HSMCI_ARGR = 0; hsmci->HSMCI_CMDR = HSMCI_CMDR_RSPTYP_NORESP | HSMCI_CMDR_SPCMD_INIT | HSMCI_CMDR_OPDCMD_OPENDRAIN; while (!(hsmci->HSMCI_SR & HSMCI_SR_CMDRDY)) { ; } hsmci->HSMCI_MR |= HSMCI_MR_WRPROOF | HSMCI_MR_RDPROOF; } static int sam_hsmci_send_cmd(Hsmci *hsmci, struct sdhc_command *cmd, uint32_t cmdr, struct sam_hsmci_data *data) { uint32_t sr; hsmci->HSMCI_ARGR = cmd->arg; cmdr |= HSMCI_CMDR_CMDNB(cmd->opcode) | HSMCI_CMDR_MAXLAT_64; if (data->open_drain) { cmdr |= HSMCI_CMDR_OPDCMD_OPENDRAIN; } uint8_t nrt = cmd->response_type & SDHC_NATIVE_RESPONSE_MASK; if (nrt > SD_RSP_TYPE_R7) { return -ENOTSUP; } cmdr |= _resp2size[nrt]; hsmci->HSMCI_CMDR = cmdr; do { sr = hsmci->HSMCI_SR; /* special case ,ignore CRC status if response is R3 to clear it */ if (nrt == SD_RSP_TYPE_R3 || nrt == SD_RSP_TYPE_NONE) { sr &= ~HSMCI_SR_RCRCE; } if ((sr & _HSMCI_SR_ERR) != 0) { LOG_DBG("Status register error bits: %08x", sr & _HSMCI_SR_ERR); return -EIO; } } while (!(sr & HSMCI_SR_CMDRDY)); if (nrt == SD_RSP_TYPE_R1b) { do { sr = hsmci->HSMCI_SR; } while (!((sr & HSMCI_SR_NOTBUSY) && ((sr & HSMCI_SR_DTIP) == 0))); } /* RSPR is just a FIFO, index is of no consequence */ cmd->response[3] = hsmci->HSMCI_RSPR[0]; cmd->response[2] = hsmci->HSMCI_RSPR[0]; cmd->response[1] = hsmci->HSMCI_RSPR[0]; cmd->response[0] = hsmci->HSMCI_RSPR[0]; return 0; } static int sam_hsmci_wait_write_end(Hsmci *hsmci) { uint32_t sr = 0; #ifdef _HSMCI_PDCMODE /* Timeout is included in HSMCI, see DTOE bit, not required explicitly. */ do { sr = hsmci->HSMCI_SR; if (sr & (HSMCI_SR_UNRE | HSMCI_SR_OVRE | HSMCI_SR_DTOE | HSMCI_SR_DCRCE)) { LOG_DBG("PDC sr 0x%08x error", sr); return -EIO; } } while (!(sr & HSMCI_SR_TXBUFE)); #endif do { sr = hsmci->HSMCI_SR; if (sr & (HSMCI_SR_UNRE | HSMCI_SR_OVRE | HSMCI_SR_DTOE | HSMCI_SR_DCRCE)) { LOG_DBG("PDC sr 0x%08x last transfer error", sr); return -EIO; } } while (!(sr & HSMCI_SR_NOTBUSY)); if (!(hsmci->HSMCI_SR & HSMCI_SR_FIFOEMPTY)) { return -EIO; } return 0; } static int sam_hsmci_wait_read_end(Hsmci *hsmci) { uint32_t sr; #ifdef _HSMCI_PDCMODE do { sr = hsmci->HSMCI_SR; if (sr & (HSMCI_SR_UNRE | HSMCI_SR_OVRE | HSMCI_SR_DTOE | HSMCI_SR_DCRCE)) { LOG_DBG("PDC sr 0x%08x error", sr & (HSMCI_SR_UNRE | HSMCI_SR_OVRE | HSMCI_SR_DTOE | HSMCI_SR_DCRCE)); return -EIO; } } while (!(sr & HSMCI_SR_RXBUFF)); #endif do { sr = hsmci->HSMCI_SR; if (sr & (HSMCI_SR_UNRE | HSMCI_SR_OVRE | HSMCI_SR_DTOE | HSMCI_SR_DCRCE)) { return -EIO; } } while (!(sr & HSMCI_SR_XFRDONE)); return 0; } static int sam_hsmci_write_timeout(Hsmci *hsmci, int timeout_ms) { /* convert to clocks (coarsely) */ int clocks = ATMEL_SAM_DT_CPU_CLK_FREQ_HZ / 1000 * timeout_ms; int mul, max_clock; for (int i = 0; i < _mul_shift_size; i++) { mul = 1 << _mul_shift[i]; max_clock = 15 * mul; if (max_clock > clocks) { hsmci->HSMCI_DTOR = ((i << HSMCI_DTOR_DTOMUL_Pos) & HSMCI_DTOR_DTOMUL_Msk) | HSMCI_DTOR_DTOCYC((clocks + mul - 1) / mul); return 0; } } /* * So, if it is > maximum timeout... we'll just put it on the maximum the driver supports * its not nice.. but it should work.. what else is there to do? */ hsmci->HSMCI_DTOR = HSMCI_DTOR_DTOMUL_Msk | HSMCI_DTOR_DTOCYC_Msk; return 0; } static inline int wait_write_transfer_done(Hsmci *hsmci) { int sr; do { sr = hsmci->HSMCI_SR; if (sr & (HSMCI_SR_UNRE | HSMCI_SR_OVRE | HSMCI_SR_DTOE | HSMCI_SR_DCRCE)) { return -EIO; } } while (!(sr & HSMCI_SR_TXRDY)); return 0; } static inline int wait_read_transfer_done(Hsmci *hsmci) { int sr; do { sr = HSMCI->HSMCI_SR; if (sr & (HSMCI_SR_UNRE | HSMCI_SR_OVRE | HSMCI_SR_DTOE | HSMCI_SR_DCRCE)) { return -EIO; } } while (!(sr & HSMCI_SR_RXRDY)); return 0; } #ifndef _HSMCI_PDCMODE static int hsmci_do_manual_transfer(Hsmci *hsmci, bool byte_mode, bool is_write, void *data, int transfer_count) { int ret; if (is_write) { if (byte_mode) { const uint8_t *ptr = data; while (transfer_count-- > 0) { ret = wait_write_transfer_done(hsmci); if (ret != 0) { return ret; } hsmci->HSMCI_TDR = *ptr; ptr++; } } else { const uint32_t *ptr = data; while (transfer_count-- > 0) { ret = wait_write_transfer_done(hsmci); if (ret != 0) { return ret; } hsmci->HSMCI_TDR = *ptr; ptr++; } } ret = sam_hsmci_wait_write_end(hsmci); } else { if (byte_mode) { uint8_t *ptr = data; while (transfer_count-- > 0) { ret = wait_read_transfer_done(hsmci); if (ret != 0) { return ret; } *ptr = hsmci->HSMCI_RDR; ptr++; } } else { uint32_t *ptr = data; while (transfer_count-- > 0) { ret = wait_read_transfer_done(hsmci); if (ret != 0) { return ret; } *ptr = hsmci->HSMCI_RDR; ptr++; } } ret = sam_hsmci_wait_read_end(hsmci); } return ret; } #endif /* !_HSMCI_PDCMODE */ static int sam_hsmci_request_inner(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *sd_data) { const struct sam_hsmci_config *config = dev->config; struct sam_hsmci_data *data = dev->data; Hsmci *hsmci = config->base; uint32_t sr; uint32_t size; uint32_t transfer_count; uint32_t cmdr = 0; int ret; bool is_write, byte_mode; LOG_DBG("%s(opcode=%d, arg=%08x, data=%08x, rsptype=%d)", __func__, cmd->opcode, cmd->arg, (uint32_t)sd_data, cmd->response_type & SDHC_NATIVE_RESPONSE_MASK); if (cmd->opcode == SD_GO_IDLE_STATE) { /* send 74 clocks, as required by SD spec */ sam_hsmci_send_clocks(hsmci); } if (sd_data) { cmdr |= HSMCI_CMDR_TRCMD_START_DATA; ret = sam_hsmci_write_timeout(hsmci, cmd->timeout_ms); if (ret != 0) { return ret; } switch (cmd->opcode) { case SD_WRITE_SINGLE_BLOCK: cmdr |= HSMCI_CMDR_TRTYP_SINGLE; cmdr |= HSMCI_CMDR_TRDIR_WRITE; is_write = true; break; case SD_WRITE_MULTIPLE_BLOCK: is_write = true; cmdr |= HSMCI_CMDR_TRTYP_MULTIPLE; cmdr |= HSMCI_CMDR_TRDIR_WRITE; break; case SD_APP_SEND_SCR: case SD_SWITCH: case SD_READ_SINGLE_BLOCK: is_write = false; cmdr |= HSMCI_CMDR_TRTYP_SINGLE; cmdr |= HSMCI_CMDR_TRDIR_READ; break; case SD_READ_MULTIPLE_BLOCK: is_write = false; cmdr |= HSMCI_CMDR_TRTYP_MULTIPLE; cmdr |= HSMCI_CMDR_TRDIR_READ; break; case SD_APP_SEND_NUM_WRITTEN_BLK: is_write = false; break; default: return -ENOTSUP; } if ((sd_data->block_size & 0x3) == 0 && (((uint32_t)sd_data->data) & 0x3) == 0) { size = (sd_data->block_size + 3) >> 2; hsmci->HSMCI_MR &= ~HSMCI_MR_FBYTE; byte_mode = true; } else { size = sd_data->block_size; hsmci->HSMCI_MR |= HSMCI_MR_FBYTE; byte_mode = false; } hsmci->HSMCI_BLKR = HSMCI_BLKR_BLKLEN(sd_data->block_size) | HSMCI_BLKR_BCNT(sd_data->blocks); transfer_count = size * sd_data->blocks; #ifdef _HSMCI_PDCMODE hsmci->HSMCI_MR |= HSMCI_MR_PDCMODE; hsmci->HSMCI_RNCR = 0; if (is_write) { hsmci->HSMCI_TCR = transfer_count; hsmci->HSMCI_TPR = (uint32_t)sd_data->data; } else { hsmci->HSMCI_RCR = transfer_count; hsmci->HSMCI_RPR = (uint32_t)sd_data->data; hsmci->HSMCI_PTCR = HSMCI_PTCR_RXTEN; } } else { hsmci->HSMCI_MR &= ~HSMCI_MR_PDCMODE; #endif /* _HSMCI_PDCMODE */ } ret = sam_hsmci_send_cmd(hsmci, cmd, cmdr, data); if (sd_data) { #ifdef _HSMCI_PDCMODE if (ret == 0) { if (is_write) { hsmci->HSMCI_PTCR = HSMCI_PTCR_TXTEN; ret = sam_hsmci_wait_write_end(hsmci); } else { ret = sam_hsmci_wait_read_end(hsmci); } } hsmci->HSMCI_PTCR = HSMCI_PTCR_TXTDIS | HSMCI_PTCR_RXTDIS; hsmci->HSMCI_MR &= ~HSMCI_MR_PDCMODE; #else /* !_HSMCI_PDCMODE */ if (ret == 0) { ret = hsmci_do_manual_transfer(hsmci, byte_mode, is_write, sd_data->data, transfer_count); } #endif /* _HSMCI_PDCMODE */ } sr = hsmci->HSMCI_SR; LOG_DBG("RSP0=%08x, RPS1=%08x, RPS2=%08x,RSP3=%08x, SR=%08x", cmd->response[0], cmd->response[1], cmd->response[2], cmd->response[3], sr); return ret; } static void sam_hsmci_abort(const struct device *dev) { #ifdef _HSMCI_PDCMODE const struct sam_hsmci_config *config = dev->config; Hsmci *hsmci = config->base; hsmci->HSMCI_PTCR = HSMCI_PTCR_RXTDIS | HSMCI_PTCR_TXTDIS; #endif /* _HSMCI_PDCMODE */ struct sdhc_command cmd = { .opcode = SD_STOP_TRANSMISSION, .arg = 0, .response_type = SD_RSP_TYPE_NONE}; sam_hsmci_request_inner(dev, &cmd, NULL); } static int sam_hsmci_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *sd_data) { struct sam_hsmci_data *dev_data = dev->data; int busy_timeout = _HSMCI_DEFAULT_TIMEOUT; int ret; ret = k_mutex_lock(&dev_data->mtx, K_MSEC(cmd->timeout_ms)); if (ret) { LOG_ERR("Could not access card"); return -EBUSY; } #ifdef CONFIG_SAM_HSMCI_PWRSAVE const struct sam_hsmci_config *config = dev->config; Hsmci *hsmci = config->base; hsmci->HSMCI_CR = HSMCI_CR_PWSDIS; #endif /* CONFIG_SAM_HSMCI_PWRSAVE */ do { ret = sam_hsmci_request_inner(dev, cmd, sd_data); if (sd_data && (ret || sd_data->blocks > 1)) { sam_hsmci_abort(dev); while (busy_timeout > 0) { if (!sam_hsmci_card_busy(dev)) { break; } k_busy_wait(125); busy_timeout -= 125; } if (busy_timeout <= 0) { LOG_ERR("Card did not idle after CMD12"); ret = -ETIMEDOUT; } } } while (ret != 0 && (cmd->retries-- > 0)); #ifdef CONFIG_SAM_HSMCI_PWRSAVE hsmci->HSMCI_CR = HSMCI_CR_PWSEN; #endif /* CONFIG_SAM_HSMCI_PWRSAVE */ k_mutex_unlock(&dev_data->mtx); return ret; } static const struct sdhc_driver_api hsmci_api = { .reset = sam_hsmci_reset, .get_host_props = sam_hsmci_get_host_props, .set_io = sam_hsmci_set_io, .get_card_present = sam_hsmci_get_card_present, .request = sam_hsmci_request, .card_busy = sam_hsmci_card_busy, }; #define SAM_HSMCI_INIT(N) \ PINCTRL_DT_INST_DEFINE(N); \ static const struct sam_hsmci_config hsmci_##N##_config = { \ .base = (Hsmci *)DT_INST_REG_ADDR(N), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(N), \ .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(N), \ .carrier_detect = GPIO_DT_SPEC_INST_GET_OR(N, cd_gpios, {0})}; \ static struct sam_hsmci_data hsmci_##N##_data = {}; \ DEVICE_DT_INST_DEFINE(N, &sam_hsmci_init, NULL, &hsmci_##N##_data, &hsmci_##N##_config, \ POST_KERNEL, CONFIG_SDHC_INIT_PRIORITY, &hsmci_api); DT_INST_FOREACH_STATUS_OKAY(SAM_HSMCI_INIT) ```
/content/code_sandbox/drivers/sdhc/sam_hsmci.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,160
```c /* * */ #define DT_DRV_COMPAT zephyr_sdhc_spi_slot #include <zephyr/drivers/sdhc.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/spi.h> #include <zephyr/sys/crc.h> LOG_MODULE_REGISTER(sdhc_spi, CONFIG_SDHC_LOG_LEVEL); #define MAX_CMD_READ 21 #define SPI_R1B_TIMEOUT_MS 3000 #define SD_SPI_SKIP_RETRIES 1000000 #define _INST_REQUIRES_EXPLICIT_FF(inst) (SPI_MOSI_OVERRUN_DT(DT_INST_BUS(inst)) != 0xFF) || /* The SD protocol requires sending ones while reading but the Zephyr * SPI API defers the choice of default values to the drivers. * * For drivers that we know will send ones we can avoid allocating a * 512 byte array of ones and remove the limit on the number of bytes * that can be read in a single transaction. */ #define ANY_INST_REQUIRES_EXPLICIT_FF DT_INST_FOREACH_STATUS_OKAY(_INST_REQUIRES_EXPLICIT_FF) 0 #if ANY_INST_REQUIRES_EXPLICIT_FF static const uint8_t sdhc_ones[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; BUILD_ASSERT(sizeof(sdhc_ones) == 512, "0xFF array for SDHC must be 512 bytes"); #endif /* ANY_INST_REQUIRES_EXPLICIT_FF */ struct sdhc_spi_config { const struct device *spi_dev; const struct gpio_dt_spec pwr_gpio; const uint32_t spi_max_freq; uint32_t power_delay_ms; }; struct sdhc_spi_data { enum sdhc_power power_mode; struct spi_config *spi_cfg; struct spi_config cfg_a; struct spi_config cfg_b; uint8_t scratch[MAX_CMD_READ]; }; /* Receives a block of bytes */ static int sdhc_spi_rx(const struct device *spi_dev, struct spi_config *spi_cfg, uint8_t *buf, int len) { #if ANY_INST_REQUIRES_EXPLICIT_FF struct spi_buf tx_bufs[] = { { .buf = (uint8_t *)sdhc_ones, .len = len } }; const struct spi_buf_set tx = { .buffers = tx_bufs, .count = 1, }; const struct spi_buf_set *tx_ptr = &tx; #else const struct spi_buf_set *tx_ptr = NULL; #endif /* ANY_INST_REQUIRES_EXPLICIT_FF */ struct spi_buf rx_bufs[] = { { .buf = buf, .len = len } }; const struct spi_buf_set rx = { .buffers = rx_bufs, .count = 1, }; return spi_transceive(spi_dev, spi_cfg, tx_ptr, &rx); } static int sdhc_spi_init_card(const struct device *dev) { /* SD spec requires at least 74 clocks be send to SD to start it. * for SPI protocol, this will be performed by sending 10 0xff values * to the card (this should result in 80 SCK cycles) */ const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *data = dev->data; struct spi_config *spi_cfg = data->spi_cfg; int ret; if (spi_cfg->frequency == 0) { /* Use default 400KHZ frequency */ spi_cfg->frequency = SDMMC_CLOCK_400KHZ; } /* the initial 74 clocks must be sent while CS is high */ spi_cfg->operation |= SPI_CS_ACTIVE_HIGH; ret = sdhc_spi_rx(config->spi_dev, spi_cfg, data->scratch, 10); if (ret != 0) { spi_release(config->spi_dev, spi_cfg); spi_cfg->operation &= ~SPI_CS_ACTIVE_HIGH; return ret; } /* Release lock on SPI bus */ ret = spi_release(config->spi_dev, spi_cfg); spi_cfg->operation &= ~SPI_CS_ACTIVE_HIGH; return ret; } /* Checks if SPI SD card is sending busy signal */ static int sdhc_spi_card_busy(const struct device *dev) { const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *data = dev->data; int ret; uint8_t response; ret = sdhc_spi_rx(config->spi_dev, data->spi_cfg, &response, 1); if (ret) { return -EIO; } if (response == 0xFF) { return 0; } else { return 1; } } /* Waits for SPI SD card to stop sending busy signal */ static int sdhc_spi_wait_unbusy(const struct device *dev, int timeout_ms, int interval_ticks) { const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *data = dev->data; int ret; uint8_t response; while (timeout_ms > 0) { ret = sdhc_spi_rx(config->spi_dev, data->spi_cfg, &response, 1); if (ret) { return ret; } if (response == 0xFF) { return 0; } k_msleep(k_ticks_to_ms_floor32(interval_ticks)); timeout_ms -= k_ticks_to_ms_floor32(interval_ticks); } return -ETIMEDOUT; } /* Read SD command from SPI response */ static int sdhc_spi_response_get(const struct device *dev, struct sdhc_command *cmd, int rx_len) { const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *dev_data = dev->data; uint8_t *response = dev_data->scratch; uint8_t *end = response + rx_len; int ret, timeout = cmd->timeout_ms; uint8_t value, i; /* First step is finding the first valid byte of the response. * All SPI responses start with R1, which will have MSB of zero. * we know we can ignore the first 7 bytes, which hold the command and * initial "card ready" byte. */ response += 8; while (response < end && ((*response & SD_SPI_START) == SD_SPI_START)) { response++; } if (response == end) { /* Some cards are slow, and need more time to respond. Continue * with single byte reads until the card responds. */ response = dev_data->scratch; end = response + 1; while (timeout > 0) { ret = sdhc_spi_rx(config->spi_dev, dev_data->spi_cfg, response, 1); if (ret < 0) { return ret; } if (*response != 0xff) { break; } /* Delay for a bit, and poll the card again */ k_msleep(10); timeout -= 10; } if (*response == 0xff) { return -ETIMEDOUT; } } /* Record R1 response */ cmd->response[0] = *response++; /* Check response for error */ if (cmd->response[0] != 0) { if (cmd->response[0] & (SD_SPI_R1PARAMETER_ERR | SD_SPI_R1ADDRESS_ERR)) { return -EFAULT; /* Bad address */ } else if (cmd->response[0] & (SD_SPI_R1ILLEGAL_CMD_ERR)) { return -EINVAL; /* Invalid command */ } else if (cmd->response[0] & (SD_SPI_R1CMD_CRC_ERR)) { return -EILSEQ; /* Illegal byte sequence */ } else if (cmd->response[0] & (SD_SPI_R1ERASE_SEQ_ERR | SD_SPI_R1ERASE_RESET)) { return -EIO; } /* else IDLE_STATE bit is set, which is not an error, card is just resetting */ } switch ((cmd->response_type & SDHC_SPI_RESPONSE_TYPE_MASK)) { case SD_SPI_RSP_TYPE_R1: /* R1 response - one byte*/ break; case SD_SPI_RSP_TYPE_R1b: /* R1b response - one byte plus busy signal */ /* Read remaining bytes to see if card is still busy. * card will be ready when it stops driving data out * low. */ while (response < end && (*response == 0x0)) { response++; } if (response == end) { value = cmd->timeout_ms; response--; /* Periodically check busy line */ ret = sdhc_spi_wait_unbusy(dev, SPI_R1B_TIMEOUT_MS, 1000); } break; case SD_SPI_RSP_TYPE_R2: case SD_SPI_RSP_TYPE_R5: /* R2/R5 response - R1 response + 1 byte*/ if (response == end) { response = dev_data->scratch; end = response + 1; /* Read the next byte */ ret = sdhc_spi_rx(config->spi_dev, dev_data->spi_cfg, response, 1); if (ret) { return ret; } } cmd->response[0] = (*response) << 8; break; case SD_SPI_RSP_TYPE_R3: case SD_SPI_RSP_TYPE_R4: case SD_SPI_RSP_TYPE_R7: /* R3/R4/R7 response - R1 response + 4 bytes */ cmd->response[1] = 0; for (i = 0; i < 4; i++) { cmd->response[1] <<= 8; /* Read bytes of response */ if (response == end) { response = dev_data->scratch; end = response + 1; /* Read the next byte */ ret = sdhc_spi_rx(config->spi_dev, dev_data->spi_cfg, response, 1); if (ret) { return ret; } } cmd->response[1] |= *response++; } break; default: /* Other RSP types not supported */ return -ENOTSUP; } return 0; } /* Send SD command using SPI */ static int sdhc_spi_send_cmd(const struct device *dev, struct sdhc_command *cmd, bool data_present) { const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *dev_data = dev->data; int err; uint8_t *cmd_buf; /* To reduce overhead, we will send entire command in one SPI * transaction. The packet takes the following format: * - all ones byte to ensure card is ready * - opcode byte (which includes start and transmission bits) * - 4 bytes for argument * - crc7 byte (with end bit) * The SD card can take up to 8 bytes worth of SCLK cycles to respond. * therefore, we provide 8 bytes of all ones, to read data from the card. * the maximum spi response length is 5 bytes, so we provide an * additional 5 bytes of data, leaving us with 13 bytes of 0xff. * Finally, we send a padding byte of all 0xff, to ensure that * the card recives at least one 0xff byte before next command. */ /* Note: we can discard CMD data as we send it, * so resuse the TX buf as RX */ struct spi_buf bufs[] = { { .buf = dev_data->scratch, .len = sizeof(dev_data->scratch), }, }; const struct spi_buf_set buf_set = { .buffers = bufs, .count = 1, }; if (data_present) { /* We cannot send extra SCLK cycles with our command, * since we'll miss the data the card responds with. We * send one 0xff byte, six command bytes, two additional 0xff * bytes, since the min value of NCR (see SD SPI timing * diagrams) is one, and we know there will be an R1 response. */ bufs[0].len = SD_SPI_CMD_SIZE + 3; } memset(dev_data->scratch, 0xFF, sizeof(dev_data->scratch)); cmd_buf = dev_data->scratch + 1; /* Command packet holds the following bits: * [47]: start bit, 0b0 * [46]: transmission bit, 0b1 * [45-40]: command index * [39-8]: argument * [7-1]: CRC * [0]: end bit, 0b1 * Note that packets are sent MSB first. */ /* Add start bit, tx bit, and cmd opcode */ cmd_buf[0] = (cmd->opcode & SD_SPI_CMD); cmd_buf[0] = ((cmd_buf[0] | SD_SPI_TX) & ~SD_SPI_START); /* Add argument */ sys_put_be32(cmd->arg, &cmd_buf[1]); /* Add CRC, and set LSB as the end bit */ cmd_buf[SD_SPI_CMD_BODY_SIZE] = crc7_be(0, cmd_buf, SD_SPI_CMD_BODY_SIZE) | 0x1; LOG_DBG("cmd%d arg 0x%x", cmd->opcode, cmd->arg); /* Set data, will lock SPI bus */ err = spi_transceive(config->spi_dev, dev_data->spi_cfg, &buf_set, &buf_set); if (err != 0) { return err; } /* Read command response */ return sdhc_spi_response_get(dev, cmd, bufs[0].len); } /* Skips bytes in SDHC data stream. */ static int sdhc_skip(const struct device *dev, uint8_t skip_val) { const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *data = dev->data; uint8_t buf; int ret; uint32_t retries = SD_SPI_SKIP_RETRIES; do { ret = sdhc_spi_rx(config->spi_dev, data->spi_cfg, &buf, sizeof(buf)); if (ret) { return ret; } } while (buf == skip_val && retries--); if (retries == 0) { return -ETIMEDOUT; } /* Return first non-skipped value */ return buf; } /* Handles reading data from SD SPI device */ static int sdhc_spi_read_data(const struct device *dev, struct sdhc_data *data) { const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *dev_data = dev->data; uint8_t *read_location = data->data; uint32_t remaining = data->blocks; int ret; uint8_t crc[SD_SPI_CRC16_SIZE + 1]; #if ANY_INST_REQUIRES_EXPLICIT_FF /* If the driver requires explicit 0xFF bytes on receive, we * are limited to receiving the size of the sdhc_ones buffer */ if (data->block_size > sizeof(sdhc_ones)) { return -ENOTSUP; } const struct spi_buf tx_bufs[] = { { .buf = (uint8_t *)sdhc_ones, .len = data->block_size, }, }; const struct spi_buf_set tx = { .buffers = tx_bufs, .count = 1, }; const struct spi_buf_set *tx_ptr = &tx; #else const struct spi_buf_set *tx_ptr = NULL; #endif /* ANY_INST_REQUIRES_EXPLICIT_FF */ struct spi_buf rx_bufs[] = { { .buf = read_location, .len = data->block_size, } }; const struct spi_buf_set rx = { .buffers = rx_bufs, .count = 1, }; /* Read bytes until data stream starts. SD will send 0xff until * data is available */ ret = sdhc_skip(dev, 0xff); if (ret < 0) { return ret; } /* Check token */ if (ret != SD_SPI_TOKEN_SINGLE) { return -EIO; } /* Read blocks until we are out of data */ while (remaining--) { ret = spi_transceive(config->spi_dev, dev_data->spi_cfg, tx_ptr, &rx); if (ret) { LOG_ERR("Data write failed"); return ret; } /* Read CRC16 plus one end byte */ ret = sdhc_spi_rx(config->spi_dev, dev_data->spi_cfg, crc, sizeof(crc)); if (crc16_itu_t(0, read_location, data->block_size) != sys_get_be16(crc)) { /* Bad CRC */ LOG_ERR("Bad data CRC"); return -EILSEQ; } /* Advance read location */ read_location += data->block_size; rx_bufs[0].buf = read_location; if (remaining) { /* Check next data token */ ret = sdhc_skip(dev, 0xff); if (ret != SD_SPI_TOKEN_SINGLE) { LOG_ERR("Bad token"); return -EIO; } } } return ret; } /* Handles writing data to SD SPI device */ static int sdhc_spi_write_data(const struct device *dev, struct sdhc_data *data) { const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *dev_data = dev->data; int ret; uint8_t token, resp; uint8_t *write_location = data->data, crc[SD_SPI_CRC16_SIZE]; uint32_t remaining = data->blocks; struct spi_buf tx_bufs[] = { { .buf = &token, .len = sizeof(uint8_t), }, { .buf = write_location, .len = data->block_size, }, { .buf = crc, .len = sizeof(crc), }, }; struct spi_buf_set tx = { .buffers = tx_bufs, .count = 3, }; /* Set the token- single block reads use different token * than multibock */ if (remaining > 1) { token = SD_SPI_TOKEN_MULTI_WRITE; } else { token = SD_SPI_TOKEN_SINGLE; } while (remaining--) { /* Build the CRC for this data block */ sys_put_be16(crc16_itu_t(0, write_location, data->block_size), crc); ret = spi_write(config->spi_dev, dev_data->spi_cfg, &tx); if (ret) { return ret; } /* Read back the data response token from the card */ ret = sdhc_spi_rx(config->spi_dev, dev_data->spi_cfg, &resp, sizeof(resp)); if (ret) { return ret; } /* Check response token */ if ((resp & 0xF) != SD_SPI_RESPONSE_ACCEPTED) { if ((resp & 0xF) == SD_SPI_RESPONSE_CRC_ERR) { return -EILSEQ; } else if ((resp & 0xF) == SD_SPI_RESPONSE_WRITE_ERR) { return -EIO; } LOG_DBG("Unknown write response token 0x%x", resp); return -EIO; } /* Advance write location */ write_location += data->block_size; tx_bufs[1].buf = write_location; /* Wait for card to stop being busy */ ret = sdhc_spi_wait_unbusy(dev, data->timeout_ms, 0); if (ret) { return ret; } } if (data->blocks > 1) { /* Write stop transfer token to card */ token = SD_SPI_TOKEN_STOP_TRAN; tx.count = 1; ret = spi_write(config->spi_dev, dev_data->spi_cfg, &tx); if (ret) { return ret; } /* Wait for card to stop being busy */ ret = sdhc_spi_wait_unbusy(dev, data->timeout_ms, 0); if (ret) { return ret; } } return 0; } static int sdhc_spi_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { const struct sdhc_spi_config *config = dev->config; struct sdhc_spi_data *dev_data = dev->data; int ret, stop_ret, retries = cmd->retries; const struct sdhc_command stop_cmd = { .opcode = SD_STOP_TRANSMISSION, .arg = 0, .response_type = SD_SPI_RSP_TYPE_R1b, .timeout_ms = 1000, .retries = 1, }; if (data == NULL) { do { ret = sdhc_spi_send_cmd(dev, cmd, false); } while ((ret != 0) && (retries-- > 0)); } else { do { retries--; ret = sdhc_spi_send_cmd(dev, cmd, true); if (ret) { continue; } if ((cmd->opcode == SD_WRITE_SINGLE_BLOCK) || (cmd->opcode == SD_WRITE_MULTIPLE_BLOCK)) { ret = sdhc_spi_write_data(dev, data); } else { ret = sdhc_spi_read_data(dev, data); } if (ret || (cmd->opcode == SD_READ_MULTIPLE_BLOCK)) { int stop_retries = cmd->retries; /* CMD12 is required after multiple read, or * to retry failed transfer */ stop_ret = sdhc_spi_send_cmd(dev, (struct sdhc_command *)&stop_cmd, false); while ((stop_ret != 0) && (stop_retries > 0)) { /* Retry stop command */ ret = stop_ret = sdhc_spi_send_cmd(dev, (struct sdhc_command *)&stop_cmd, false); stop_retries--; } } } while ((ret != 0) && (retries > 0)); } if (ret) { /* Release SPI bus */ spi_release(config->spi_dev, dev_data->spi_cfg); return ret; } /* Release SPI bus */ return spi_release(config->spi_dev, dev_data->spi_cfg); } static int sdhc_spi_set_io(const struct device *dev, struct sdhc_io *ios) { const struct sdhc_spi_config *cfg = dev->config; struct sdhc_spi_data *data = dev->data; if (ios->clock != data->spi_cfg->frequency) { if (ios->clock > cfg->spi_max_freq) { return -ENOTSUP; } /* Because pointer comparision is used, we have to * swap to a new configuration structure to reconfigure SPI. */ if (ios->clock != 0) { if (data->spi_cfg == &data->cfg_a) { data->cfg_a.frequency = ios->clock; memcpy(&data->cfg_b, &data->cfg_a, sizeof(struct spi_config)); data->spi_cfg = &data->cfg_b; } else { data->cfg_b.frequency = ios->clock; memcpy(&data->cfg_a, &data->cfg_b, sizeof(struct spi_config)); data->spi_cfg = &data->cfg_a; } } } if (ios->bus_mode != SDHC_BUSMODE_PUSHPULL) { /* SPI mode supports push pull */ return -ENOTSUP; } if (data->power_mode != ios->power_mode) { if (ios->power_mode == SDHC_POWER_ON) { /* Send 74 clock cycles to start card */ if (sdhc_spi_init_card(dev) != 0) { LOG_ERR("Card SCLK init sequence failed"); return -EIO; } } if (cfg->pwr_gpio.port) { /* If power control GPIO is defined, toggle SD power */ if (ios->power_mode == SDHC_POWER_ON) { if (gpio_pin_set_dt(&cfg->pwr_gpio, 1)) { return -EIO; } LOG_INF("Powered up"); } else { if (gpio_pin_set_dt(&cfg->pwr_gpio, 0)) { return -EIO; } LOG_INF("Powered down"); } } data->power_mode = ios->power_mode; } if (ios->bus_width != SDHC_BUS_WIDTH1BIT) { /* SPI mode supports 1 bit bus */ return -ENOTSUP; } if (ios->signal_voltage != SD_VOL_3_3_V) { /* SPI mode does not support UHS voltages */ return -ENOTSUP; } return 0; } static int sdhc_spi_get_card_present(const struct device *dev) { /* SPI has no card presence method, assume card is in slot */ return 1; } static int sdhc_spi_get_host_props(const struct device *dev, struct sdhc_host_props *props) { const struct sdhc_spi_config *cfg = dev->config; memset(props, 0, sizeof(struct sdhc_host_props)); props->f_min = SDMMC_CLOCK_400KHZ; props->f_max = cfg->spi_max_freq; props->power_delay = cfg->power_delay_ms; props->host_caps.vol_330_support = true; props->is_spi = true; return 0; } static int sdhc_spi_reset(const struct device *dev) { struct sdhc_spi_data *data = dev->data; /* Reset host I/O */ data->spi_cfg->frequency = SDMMC_CLOCK_400KHZ; return 0; } static int sdhc_spi_init(const struct device *dev) { const struct sdhc_spi_config *cfg = dev->config; struct sdhc_spi_data *data = dev->data; int ret = 0; if (!device_is_ready(cfg->spi_dev)) { return -ENODEV; } if (cfg->pwr_gpio.port) { if (!gpio_is_ready_dt(&cfg->pwr_gpio)) { return -ENODEV; } ret = gpio_pin_configure_dt(&cfg->pwr_gpio, GPIO_OUTPUT_INACTIVE); if (ret != 0) { LOG_ERR("Could not configure power gpio (%d)", ret); return ret; } } data->power_mode = SDHC_POWER_OFF; data->spi_cfg = &data->cfg_a; data->spi_cfg->frequency = 0; return ret; } static const struct sdhc_driver_api sdhc_spi_api = { .request = sdhc_spi_request, .set_io = sdhc_spi_set_io, .get_host_props = sdhc_spi_get_host_props, .get_card_present = sdhc_spi_get_card_present, .reset = sdhc_spi_reset, .card_busy = sdhc_spi_card_busy, }; #define SDHC_SPI_INIT(n) \ const struct sdhc_spi_config sdhc_spi_config_##n = { \ .spi_dev = DEVICE_DT_GET(DT_INST_PARENT(n)), \ .pwr_gpio = GPIO_DT_SPEC_INST_GET_OR(n, pwr_gpios, {0}), \ .spi_max_freq = DT_INST_PROP(n, spi_max_frequency), \ .power_delay_ms = DT_INST_PROP(n, power_delay_ms), \ }; \ \ struct sdhc_spi_data sdhc_spi_data_##n = { \ .cfg_a = SPI_CONFIG_DT_INST(n, \ (SPI_LOCK_ON | SPI_HOLD_ON_CS | SPI_WORD_SET(8) \ | (DT_INST_PROP(n, spi_clock_mode_cpol) ? SPI_MODE_CPOL : 0) \ | (DT_INST_PROP(n, spi_clock_mode_cpha) ? SPI_MODE_CPHA : 0) \ ),\ 0), \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ &sdhc_spi_init, \ NULL, \ &sdhc_spi_data_##n, \ &sdhc_spi_config_##n, \ POST_KERNEL, \ CONFIG_SDHC_INIT_PRIORITY, \ &sdhc_spi_api); DT_INST_FOREACH_STATUS_OKAY(SDHC_SPI_INIT) ```
/content/code_sandbox/drivers/sdhc/sdhc_spi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
8,313
```unknown config MCUX_SDIF bool "NXP MCUX SDIF Driver" default y depends on DT_HAS_NXP_LPC_SDIF_ENABLED select SDHC_SUPPORTS_NATIVE_MODE select PINCTRL help Enable the NXP SDIF Host controller driver if MCUX_SDIF config MCUX_SDIF_DMA_SUPPORT bool "DMA support for MCUX SDIF driver" default y help Enable DMA support for MCUX SDIF driver. May be disabled to reduce footprint of driver. if MCUX_SDIF_DMA_SUPPORT # SDIF DMA needs 32 bit aligned buffers config SDHC_BUFFER_ALIGNMENT default 4 config MCUX_SDIF_DMA_BUFFER_SIZE int "Size of DMA descriptor buffer in bytes" default 256 help Size of MCUX SDIF DMA descriptor buffer in bytes endif #MCUX_SDIF_DMA_SUPPORT endif #MCUX_SDIF ```
/content/code_sandbox/drivers/sdhc/Kconfig.mcux_sdif
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
194
```objective-c /* * */ #define SDMMC_FREQ_DEFAULT 20000 /*!< SD/MMC Default speed (limited by clock divider) */ #define SDMMC_FREQ_HIGHSPEED 40000 /*!< SD High speed (limited by clock divider) */ #define SDMMC_FREQ_PROBING 400 /*!< SD/MMC probing speed */ #define SDMMC_FREQ_52M 52000 /*!< MMC 52MHz speed */ #define SDMMC_FREQ_26M 26000 /*!< MMC 26MHz speed */ #define SDMMC_DATA_ERR_MASK \ (uint32_t)(SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC | SDMMC_INTMASK_HTO | \ SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE) #define SDMMC_DMA_DONE_MASK \ (uint32_t)(SDMMC_IDMAC_INTMASK_RI | SDMMC_IDMAC_INTMASK_TI | SDMMC_IDMAC_INTMASK_NI) #define SDMMC_CMD_ERR_MASK \ (uint32_t)(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC | SDMMC_INTMASK_RESP_ERR) enum sdmmc_req_state { SDMMC_IDLE, SDMMC_SENDING_CMD, SDMMC_SENDING_DATA, SDMMC_BUSY, }; /* SDHC command flags */ #define SCF_ITSDONE 0x0001 /*!< command is complete */ #define SCF_CMD(flags) ((flags) & 0x00f0) #define SCF_CMD_AC 0x0000 #define SCF_CMD_ADTC 0x0010 #define SCF_CMD_BC 0x0020 #define SCF_CMD_BCR 0x0030 #define SCF_CMD_READ 0x0040 /*!< read command (data expected) */ #define SCF_RSP_BSY 0x0100 #define SCF_RSP_136 0x0200 #define SCF_RSP_CRC 0x0400 #define SCF_RSP_IDX 0x0800 #define SCF_RSP_PRESENT 0x1000 /* response types */ #define SCF_RSP_R0 0 /*!< none */ #define SCF_RSP_R1 (SCF_RSP_PRESENT | SCF_RSP_CRC | SCF_RSP_IDX) #define SCF_RSP_R1B (SCF_RSP_PRESENT | SCF_RSP_CRC | SCF_RSP_IDX | SCF_RSP_BSY) #define SCF_RSP_R2 (SCF_RSP_PRESENT | SCF_RSP_CRC | SCF_RSP_136) #define SCF_RSP_R3 (SCF_RSP_PRESENT) #define SCF_RSP_R4 (SCF_RSP_PRESENT) #define SCF_RSP_R5 (SCF_RSP_PRESENT | SCF_RSP_CRC | SCF_RSP_IDX) #define SCF_RSP_R5B (SCF_RSP_PRESENT | SCF_RSP_CRC | SCF_RSP_IDX | SCF_RSP_BSY) #define SCF_RSP_R6 (SCF_RSP_PRESENT | SCF_RSP_CRC | SCF_RSP_IDX) #define SCF_RSP_R7 (SCF_RSP_PRESENT | SCF_RSP_CRC | SCF_RSP_IDX) /* special flags */ #define SCF_WAIT_BUSY 0x2000 /*!< Wait for completion of card busy signal before returning */ #define SD_OCR_SDHC_CAP (1 << 30) #define SD_OCR_VOL_MASK 0xFF8000 /* bits 23:15 */ /* For debug only */ static const char *const timingStr[] = {"UNKNOWN", "LEGACY", "HS", "SDR12", "SDR25", "SDR50", "SDR104", "DDR50", "DDR52", "HS200", "HS400"}; struct sdmmc_transfer_state { uint8_t *ptr; size_t size_remaining; size_t next_desc; size_t desc_remaining; }; struct sdmmc_event { uint32_t header_DUMMY; /* Reserved for system use (Zephyr message queue) */ uint32_t sdmmc_status; /* masked SDMMC interrupt status */ uint32_t dma_status; /* masked DMA interrupt status */ }; /** * Host contexts */ struct host_ctx { intr_handle_t intr_handle; struct k_msgq *event_queue; }; /** * SD/MMC command information */ struct sdmmc_command { uint32_t opcode; /*!< SD or MMC command index */ uint32_t arg; /*!< SD/MMC command argument */ uint32_t response[4]; /*!< response buffer */ void *data; /*!< buffer to send or read into */ size_t datalen; /*!< length of data in the buffer */ size_t buflen; /*!< length of the buffer */ size_t blklen; /*!< block length */ int flags; /*!< see below */ esp_err_t error; /*!< error returned from transfer */ uint32_t timeout_ms; /*!< response timeout, in milliseconds */ }; /** * @brief Convert ESP to Zephyr error codes * * @param ret_esp ESP return value * * @return Zephyr error code */ static __attribute__((always_inline)) inline int err_esp2zep(int ret_esp) { int ret; switch (ret_esp) { /* Treating the error codes most relevant to be individuated */ case ESP_ERR_INVALID_ARG: ret = -EINVAL; break; case ESP_ERR_TIMEOUT: ret = -ETIMEDOUT; break; case ESP_ERR_NOT_FOUND: ret = -ENODEV; /* SD card not inserted (requires CD signal) */ break; case ESP_ERR_INVALID_STATE: ret = -EACCES; /* SD card write-protected (requires WP sinal) */ break; default: ret = -EIO; break; } return ret; } ```
/content/code_sandbox/drivers/sdhc/sdhc_esp32.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,197
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_sdhc_slot #include <zephyr/kernel.h> #include <zephyr/drivers/sdhc.h> #include <zephyr/drivers/gpio.h> #include <zephyr/logging/log.h> #include <soc.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control.h> /* ESP32 includes */ #include <esp_clk_tree.h> #include <hal/sdmmc_ll.h> #include <esp_intr_alloc.h> #include <esp_timer.h> #include <hal/gpio_hal.h> #include <hal/rtc_io_hal.h> #include <soc/sdmmc_reg.h> #include <esp_memory_utils.h> #include "sdhc_esp32.h" LOG_MODULE_REGISTER(sdhc, CONFIG_SDHC_LOG_LEVEL); #define SDMMC_SLOT_WIDTH_DEFAULT 1 #define SDMMC_HOST_CLOCK_UPDATE_CMD_TIMEOUT_US 1000 * 1000 #define SDMMC_HOST_RESET_TIMEOUT_US 5000 * 1000 #define SDMMC_HOST_START_CMD_TIMEOUT_US 1000 * 1000 #define SDMMC_HOST_WAIT_EVENT_TIMEOUT_US 1000 * 1000 #define SDMMC_EVENT_QUEUE_LENGTH 32 #define SDMMC_TIMEOUT_MAX 0xFFFFFFFF /* Number of DMA descriptors used for transfer. * Increasing this value above 4 doesn't improve performance for the usual case * of SD memory cards (most data transfers are multiples of 512 bytes). */ #define SDMMC_DMA_DESC_CNT 4 /* mask for card current state */ #define MMC_R1_CURRENT_STATE(resp) (((resp)[0] >> 9) & 0xf) struct sdhc_esp32_config { int slot; const sdmmc_dev_t *sdio_hw; const struct device *clock_dev; const clock_control_subsys_t clock_subsys; const struct pinctrl_dev_config *pcfg; const struct gpio_dt_spec pwr_gpio; /* * Pins below are only defined for ESP32. For SoC's with GPIO matrix feature * please use pinctrl for pin configuration. */ const int clk_pin; const int cmd_pin; const int d0_pin; const int d1_pin; const int d2_pin; const int d3_pin; int irq_source; uint8_t bus_width_cfg; struct sdhc_host_props props; }; struct sdhc_esp32_data { uint8_t bus_width; /* Bus width used by the slot (can change during execution) */ uint32_t bus_clock; /* Value in Hz. ESP-IDF functions use kHz instead */ enum sdhc_power power_mode; enum sdhc_timing_mode timing; struct host_ctx s_host_ctx; struct k_mutex s_request_mutex; bool s_is_app_cmd; sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT]; struct sdmmc_transfer_state s_cur_transfer; }; /********************************************************************** * ESP32 low level functions **********************************************************************/ /* We have two clock divider stages: * - one is the clock generator which drives SDMMC peripheral, * it can be configured using sdio_hw->clock register. It can generate * frequencies 160MHz/(N + 1), where 0 < N < 16, I.e. from 10 to 80 MHz. * - 4 clock dividers inside SDMMC peripheral, which can divide clock * from the first stage by 2 * M, where 0 < M < 255 * (they can also be bypassed). * * For cards which aren't UHS-1 or UHS-2 cards, which we don't support, * maximum bus frequency in high speed (HS) mode is 50 MHz. * Note: for non-UHS-1 cards, HS mode is optional. * Default speed (DS) mode is mandatory, it works up to 25 MHz. * Whether the card supports HS or not can be determined using TRAN_SPEED * field of card's CSD register. * * 50 MHz can not be obtained exactly, closest we can get is 53 MHz. * * The first stage divider is set to the highest possible value for the given * frequency, and the second stage dividers are used if division factor * is >16. * * Of the second stage dividers, div0 is used for card 0, and div1 is used * for card 1. */ static int sdmmc_host_set_clk_div(sdmmc_dev_t *sdio_hw, int div) { if (!((div > 1) && (div <= 16))) { LOG_ERR("Invalid parameter 'div'"); return ESP_ERR_INVALID_ARG; } sdmmc_ll_set_clock_div(sdio_hw, div); sdmmc_ll_select_clk_source(sdio_hw, SDMMC_CLK_SRC_DEFAULT); sdmmc_ll_init_phase_delay(sdio_hw); /* Wait for the clock to propagate */ esp_rom_delay_us(10); return 0; } static void sdmmc_host_dma_init(sdmmc_dev_t *sdio_hw) { sdio_hw->ctrl.dma_enable = 1; sdio_hw->bmod.val = 0; sdio_hw->bmod.sw_reset = 1; sdio_hw->idinten.ni = 1; sdio_hw->idinten.ri = 1; sdio_hw->idinten.ti = 1; } static void sdmmc_host_dma_stop(sdmmc_dev_t *sdio_hw) { sdio_hw->ctrl.use_internal_dma = 0; sdio_hw->ctrl.dma_reset = 1; sdio_hw->bmod.fb = 0; sdio_hw->bmod.enable = 0; } static int sdmmc_host_transaction_handler_init(struct sdhc_esp32_data *data) { k_mutex_init(&data->s_request_mutex); data->s_is_app_cmd = false; return 0; } static int sdmmc_host_wait_for_event(struct sdhc_esp32_data *data, int timeout_ms, struct sdmmc_event *out_event) { if (!out_event) { return ESP_ERR_INVALID_ARG; } if (!data->s_host_ctx.event_queue) { return ESP_ERR_INVALID_STATE; } int ret = k_msgq_get(data->s_host_ctx.event_queue, out_event, K_MSEC(timeout_ms)); return ret; } static int handle_idle_state_events(struct sdhc_esp32_data *data) { /* Handle any events which have happened in between transfers. * Under current assumptions (no SDIO support) only card detect events * can happen in the idle state. */ struct sdmmc_event evt; int64_t yield_delay_us = 100 * 1000; /* initially 100ms */ int64_t t0 = esp_timer_get_time(); int64_t t1 = 0; while (sdmmc_host_wait_for_event(data, 0, &evt) == 0) { if (evt.sdmmc_status & SDMMC_INTMASK_CD) { LOG_DBG("card detect event"); evt.sdmmc_status &= ~SDMMC_INTMASK_CD; } if (evt.sdmmc_status != 0 || evt.dma_status != 0) { LOG_DBG("%s unhandled: %08" PRIx32 " %08" PRIx32, __func__, evt.sdmmc_status, evt.dma_status); } /* Loop timeout */ t1 = esp_timer_get_time(); if (t1 - t0 > SDMMC_HOST_WAIT_EVENT_TIMEOUT_US) { return ESP_ERR_TIMEOUT; } if (t1 - t0 > yield_delay_us) { yield_delay_us *= 2; k_sleep(K_MSEC(1)); } } return 0; } static void fill_dma_descriptors(struct sdhc_esp32_data *data, size_t num_desc) { for (size_t i = 0; i < num_desc; ++i) { if (data->s_cur_transfer.size_remaining == 0) { return; } const size_t next = data->s_cur_transfer.next_desc; sdmmc_desc_t *desc = &data->s_dma_desc[next]; if (desc->owned_by_idmac) { return; } size_t size_to_fill = (data->s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ? data->s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN; bool last = size_to_fill == data->s_cur_transfer.size_remaining; desc->last_descriptor = last; desc->second_address_chained = 1; desc->owned_by_idmac = 1; desc->buffer1_ptr = data->s_cur_transfer.ptr; desc->next_desc_ptr = (last) ? NULL : &data->s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT]; if (!((size_to_fill < 4) || ((size_to_fill % 4) == 0))) { return; } desc->buffer1_size = (size_to_fill + 3) & (~3); data->s_cur_transfer.size_remaining -= size_to_fill; data->s_cur_transfer.ptr += size_to_fill; data->s_cur_transfer.next_desc = (data->s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT; LOG_DBG("fill %d desc=%d rem=%d next=%d last=%d sz=%d", num_desc, next, data->s_cur_transfer.size_remaining, data->s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size); } } static void sdmmc_host_dma_resume(sdmmc_dev_t *sdio_hw) { sdmmc_ll_poll_demand(sdio_hw); } static void sdmmc_host_dma_prepare(sdmmc_dev_t *sdio_hw, sdmmc_desc_t *desc, size_t block_size, size_t data_size) { /* Set size of data and DMA descriptor pointer */ sdmmc_ll_set_data_transfer_len(sdio_hw, data_size); sdmmc_ll_set_block_size(sdio_hw, block_size); sdmmc_ll_set_desc_addr(sdio_hw, (uint32_t)desc); /* Enable everything needed to use DMA */ sdmmc_ll_enable_dma(sdio_hw, true); sdmmc_host_dma_resume(sdio_hw); } static int sdmmc_host_start_command(sdmmc_dev_t *sdio_hw, int slot, sdmmc_hw_cmd_t cmd, uint32_t arg) { if (!(slot == 0 || slot == 1)) { return ESP_ERR_INVALID_ARG; } if (!sdmmc_ll_is_card_detected(sdio_hw, slot)) { return ESP_ERR_NOT_FOUND; } if (cmd.data_expected && cmd.rw && sdmmc_ll_is_card_write_protected(sdio_hw, slot)) { return ESP_ERR_INVALID_STATE; } /* Outputs should be synchronized to cclk_out */ cmd.use_hold_reg = 1; int64_t yield_delay_us = 100 * 1000; /* initially 100ms */ int64_t t0 = esp_timer_get_time(); int64_t t1 = 0; while (sdio_hw->cmd.start_command == 1) { t1 = esp_timer_get_time(); if (t1 - t0 > SDMMC_HOST_START_CMD_TIMEOUT_US) { return ESP_ERR_TIMEOUT; } if (t1 - t0 > yield_delay_us) { yield_delay_us *= 2; k_sleep(K_MSEC(1)); } } sdio_hw->cmdarg = arg; cmd.card_num = slot; cmd.start_command = 1; sdio_hw->cmd = cmd; return ESP_OK; } static void process_command_response(sdmmc_dev_t *sdio_hw, uint32_t status, struct sdmmc_command *cmd) { if (cmd->flags & SCF_RSP_PRESENT) { if (cmd->flags & SCF_RSP_136) { /* Destination is 4-byte aligned, can memcopy from peripheral registers */ memcpy(cmd->response, (uint32_t *)sdio_hw->resp, 4 * sizeof(uint32_t)); } else { cmd->response[0] = sdio_hw->resp[0]; cmd->response[1] = 0; cmd->response[2] = 0; cmd->response[3] = 0; } } int err = ESP_OK; if (status & SDMMC_INTMASK_RTO) { /* response timeout is only possible when response is expected */ if (!(cmd->flags & SCF_RSP_PRESENT)) { return; } err = ESP_ERR_TIMEOUT; } else if ((cmd->flags & SCF_RSP_CRC) && (status & SDMMC_INTMASK_RCRC)) { err = ESP_ERR_INVALID_CRC; } else if (status & SDMMC_INTMASK_RESP_ERR) { err = ESP_ERR_INVALID_RESPONSE; } if (err != ESP_OK) { cmd->error = err; if (cmd->data) { sdmmc_host_dma_stop(sdio_hw); } LOG_DBG("%s: error 0x%x (status=%08" PRIx32 ")", __func__, err, status); } } static void process_data_status(sdmmc_dev_t *sdio_hw, uint32_t status, struct sdmmc_command *cmd) { if (status & SDMMC_DATA_ERR_MASK) { if (status & SDMMC_INTMASK_DTO) { cmd->error = ESP_ERR_TIMEOUT; } else if (status & SDMMC_INTMASK_DCRC) { cmd->error = ESP_ERR_INVALID_CRC; } else if ((status & SDMMC_INTMASK_EBE) && (cmd->flags & SCF_CMD_READ) == 0) { cmd->error = ESP_ERR_TIMEOUT; } else { cmd->error = ESP_FAIL; } sdio_hw->ctrl.fifo_reset = 1; } if (cmd->error != 0) { if (cmd->data) { sdmmc_host_dma_stop(sdio_hw); } LOG_DBG("%s: error 0x%x (status=%08" PRIx32 ")", __func__, cmd->error, status); } } static inline bool mask_check_and_clear(uint32_t *state, uint32_t mask) { bool ret = ((*state) & mask) != 0; *state &= ~mask; return ret; } static size_t get_free_descriptors_count(struct sdhc_esp32_data *data) { const size_t next = data->s_cur_transfer.next_desc; size_t count = 0; /* Starting with the current DMA descriptor, count the number of * descriptors which have 'owned_by_idmac' set to 0. These are the * descriptors already processed by the DMA engine. */ for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) { sdmmc_desc_t *desc = &data->s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT]; if (desc->owned_by_idmac) { break; } ++count; if (desc->next_desc_ptr == NULL) { /* final descriptor in the chain */ break; } } return count; } static int process_events(const struct device *dev, struct sdmmc_event evt, struct sdmmc_command *cmd, enum sdmmc_req_state *pstate, struct sdmmc_event *unhandled_events) { const struct sdhc_esp32_config *cfg = dev->config; struct sdhc_esp32_data *data = dev->data; sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw; const char *const s_state_names[] __attribute__((unused)) = {"IDLE", "SENDING_CMD", "SENDIND_DATA", "BUSY"}; struct sdmmc_event orig_evt = evt; LOG_DBG("%s: state=%s evt=%" PRIx32 " dma=%" PRIx32, __func__, s_state_names[*pstate], evt.sdmmc_status, evt.dma_status); enum sdmmc_req_state next_state = *pstate; enum sdmmc_req_state state = (enum sdmmc_req_state) -1; while (next_state != state) { state = next_state; switch (state) { case SDMMC_IDLE: break; case SDMMC_SENDING_CMD: if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_CMD_ERR_MASK)) { process_command_response(sdio_hw, orig_evt.sdmmc_status, cmd); /* * In addition to the error interrupt, CMD_DONE will also be * reported. It may occur immediately (in the same sdmmc_event_t) or * be delayed until the next interrupt */ } if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_CMD_DONE)) { process_command_response(sdio_hw, orig_evt.sdmmc_status, cmd); if (cmd->error != ESP_OK) { next_state = SDMMC_IDLE; break; } if (cmd->data == NULL) { next_state = SDMMC_IDLE; } else { next_state = SDMMC_SENDING_DATA; } } break; case SDMMC_SENDING_DATA: if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_DATA_ERR_MASK)) { process_data_status(sdio_hw, orig_evt.sdmmc_status, cmd); sdmmc_host_dma_stop(sdio_hw); } if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) { data->s_cur_transfer.desc_remaining--; if (data->s_cur_transfer.size_remaining) { int desc_to_fill = get_free_descriptors_count(data); fill_dma_descriptors(data, desc_to_fill); sdmmc_host_dma_resume(sdio_hw); } if (data->s_cur_transfer.desc_remaining == 0) { next_state = SDMMC_BUSY; } } if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) { /* On start bit error, DATA_DONE interrupt will not be generated */ next_state = SDMMC_IDLE; break; } break; case SDMMC_BUSY: if (!mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_DATA_OVER)) { break; } process_data_status(sdio_hw, orig_evt.sdmmc_status, cmd); next_state = SDMMC_IDLE; break; } LOG_DBG("%s state=%s next_state=%s", __func__, s_state_names[state], s_state_names[next_state]); } *pstate = state; *unhandled_events = evt; return ESP_OK; } static int handle_event(const struct device *dev, struct sdmmc_command *cmd, enum sdmmc_req_state *state, struct sdmmc_event *unhandled_events) { const struct sdhc_esp32_config *cfg = dev->config; struct sdhc_esp32_data *data = dev->data; sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw; struct sdmmc_event event; int err = sdmmc_host_wait_for_event(data, cmd->timeout_ms, &event); if (err != 0) { LOG_ERR("sdmmc_handle_event: sdmmc_host_wait_for_event returned 0x%x, timeout %d " "ms", err, cmd->timeout_ms); if (err == -EAGAIN) { sdmmc_host_dma_stop(sdio_hw); } return err; } LOG_DBG("sdmmc_handle_event: event %08" PRIx32 " %08" PRIx32 ", unhandled %08" PRIx32 " %08" PRIx32, event.sdmmc_status, event.dma_status, unhandled_events->sdmmc_status, unhandled_events->dma_status); event.sdmmc_status |= unhandled_events->sdmmc_status; event.dma_status |= unhandled_events->dma_status; process_events(dev, event, cmd, state, unhandled_events); LOG_DBG("sdmmc_handle_event: events unhandled: %08" PRIx32 " %08" PRIx32, unhandled_events->sdmmc_status, unhandled_events->dma_status); return ESP_OK; } static bool wait_for_busy_cleared(const sdmmc_dev_t *sdio_hw, uint32_t timeout_ms) { if (timeout_ms == 0) { return !(sdio_hw->status.data_busy == 1); } /* It would have been nice to do this without polling, however the peripheral * can only generate Busy Clear Interrupt for data write commands, and waiting * for busy clear is mostly needed for other commands such as MMC_SWITCH. */ uint32_t timeout_ticks = k_ms_to_ticks_ceil32(timeout_ms); while (timeout_ticks-- > 0) { if (!(sdio_hw->status.data_busy == 1)) { return true; } k_sleep(K_MSEC(1)); } return false; } static bool cmd_needs_auto_stop(const struct sdmmc_command *cmd) { /* SDMMC host needs an "auto stop" flag for the following commands: */ return cmd->datalen > 0 && (cmd->opcode == SD_WRITE_MULTIPLE_BLOCK || cmd->opcode == SD_READ_MULTIPLE_BLOCK); } static sdmmc_hw_cmd_t make_hw_cmd(struct sdmmc_command *cmd) { sdmmc_hw_cmd_t res = {0}; res.cmd_index = cmd->opcode; if (cmd->opcode == SD_STOP_TRANSMISSION) { res.stop_abort_cmd = 1; } else if (cmd->opcode == SD_GO_IDLE_STATE) { res.send_init = 1; } else { res.wait_complete = 1; } if (cmd->opcode == SD_GO_IDLE_STATE) { res.send_init = 1; } if (cmd->flags & SCF_RSP_PRESENT) { res.response_expect = 1; if (cmd->flags & SCF_RSP_136) { res.response_long = 1; } } if (cmd->flags & SCF_RSP_CRC) { res.check_response_crc = 1; } if (cmd->data) { res.data_expected = 1; if ((cmd->flags & SCF_CMD_READ) == 0) { res.rw = 1; } if ((cmd->datalen % cmd->blklen) != 0) { return res; /* Error situation, data will be invalid */ } res.send_auto_stop = cmd_needs_auto_stop(cmd) ? 1 : 0; } LOG_DBG("%s: opcode=%d, rexp=%d, crc=%d, auto_stop=%d", __func__, res.cmd_index, res.response_expect, res.check_response_crc, res.send_auto_stop); return res; } static int sdmmc_host_do_transaction(const struct device *dev, int slot, struct sdmmc_command *cmdinfo) { const struct sdhc_esp32_config *cfg = dev->config; struct sdhc_esp32_data *data = dev->data; sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw; int ret; if (k_mutex_lock(&data->s_request_mutex, K_FOREVER) != 0) { return ESP_ERR_NO_MEM; } /* dispose of any events which happened asynchronously */ handle_idle_state_events(data); /* convert cmdinfo to hardware register value */ sdmmc_hw_cmd_t hw_cmd = make_hw_cmd(cmdinfo); if (cmdinfo->data) { /* Length should be either <4 or >=4 and =0 (mod 4) */ if ((cmdinfo->datalen >= 4) && (cmdinfo->datalen % 4) != 0) { LOG_DBG("%s: invalid size: total=%d", __func__, cmdinfo->datalen); ret = ESP_ERR_INVALID_SIZE; goto out; } if ((((intptr_t)cmdinfo->data % 4) != 0) || !esp_ptr_dma_capable(cmdinfo->data)) { LOG_DBG("%s: buffer %p can not be used for DMA", __func__, cmdinfo->data); ret = ESP_ERR_INVALID_ARG; goto out; } /* this clears "owned by IDMAC" bits */ memset(data->s_dma_desc, 0, sizeof(data->s_dma_desc)); /* initialize first descriptor */ data->s_dma_desc[0].first_descriptor = 1; /* save transfer info */ data->s_cur_transfer.ptr = (uint8_t *)cmdinfo->data; data->s_cur_transfer.size_remaining = cmdinfo->datalen; data->s_cur_transfer.next_desc = 0; data->s_cur_transfer.desc_remaining = (cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN; /* prepare descriptors */ fill_dma_descriptors(data, SDMMC_DMA_DESC_CNT); /* write transfer info into hardware */ sdmmc_host_dma_prepare(sdio_hw, &data->s_dma_desc[0], cmdinfo->blklen, cmdinfo->datalen); } /* write command into hardware, this also sends the command to the card */ ret = sdmmc_host_start_command(sdio_hw, slot, hw_cmd, cmdinfo->arg); if (ret != ESP_OK) { goto out; } /* process events until transfer is complete */ cmdinfo->error = ESP_OK; enum sdmmc_req_state state = SDMMC_SENDING_CMD; struct sdmmc_event unhandled_events = {0}; while (state != SDMMC_IDLE) { ret = handle_event(dev, cmdinfo, &state, &unhandled_events); if (ret != 0) { break; } } if (ret == 0 && (cmdinfo->flags & SCF_WAIT_BUSY)) { if (!wait_for_busy_cleared(sdio_hw, cmdinfo->timeout_ms)) { ret = ESP_ERR_TIMEOUT; } } data->s_is_app_cmd = (ret == ESP_OK && cmdinfo->opcode == SD_APP_CMD); out: k_mutex_unlock(&data->s_request_mutex); return ret; } static int sdmmc_host_clock_update_command(sdmmc_dev_t *sdio_hw, int slot) { int ret; bool repeat = true; /* Clock update command (not a real command; just updates CIU registers) */ sdmmc_hw_cmd_t cmd_val = {.card_num = slot, .update_clk_reg = 1, .wait_complete = 1}; while (repeat) { ret = sdmmc_host_start_command(sdio_hw, slot, cmd_val, 0); if (ret != 0) { return ret; } int64_t yield_delay_us = 100 * 1000; /* initially 100ms */ int64_t t0 = esp_timer_get_time(); int64_t t1 = 0; while (true) { t1 = esp_timer_get_time(); if (t1 - t0 > SDMMC_HOST_CLOCK_UPDATE_CMD_TIMEOUT_US) { return ESP_ERR_TIMEOUT; } /* Sending clock update command to the CIU can generate HLE error */ /* According to the manual, this is okay and we must retry the command */ if (sdio_hw->rintsts.hle) { sdio_hw->rintsts.hle = 1; repeat = true; break; } /* When the command is accepted by CIU, start_command bit will be */ /* cleared in sdio_hw->cmd register */ if (sdio_hw->cmd.start_command == 0) { repeat = false; break; } if (t1 - t0 > yield_delay_us) { yield_delay_us *= 2; k_sleep(K_MSEC(1)); } } } return 0; } void sdmmc_host_get_clk_dividers(uint32_t freq_khz, int *host_div, int *card_div) { uint32_t clk_src_freq_hz = 0; esp_clk_tree_src_get_freq_hz(SDMMC_CLK_SRC_DEFAULT, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &clk_src_freq_hz); assert(clk_src_freq_hz == (160 * 1000 * 1000)); /* Calculate new dividers */ if (freq_khz >= SDMMC_FREQ_HIGHSPEED) { *host_div = 4; /* 160 MHz / 4 = 40 MHz */ *card_div = 0; } else if (freq_khz == SDMMC_FREQ_DEFAULT) { *host_div = 8; /* 160 MHz / 8 = 20 MHz */ *card_div = 0; } else if (freq_khz == SDMMC_FREQ_PROBING) { *host_div = 10; /* 160 MHz / 10 / (20 * 2) = 400 kHz */ *card_div = 20; } else { /* * for custom frequencies use maximum range of host divider (1-16), find the closest * <= div. combination if exceeded, combine with the card divider to keep reasonable * precision (applies mainly to low frequencies) effective frequency range: 400 kHz * - 32 MHz (32.1 - 39.9 MHz cannot be covered with given divider scheme) */ *host_div = (clk_src_freq_hz) / (freq_khz * 1000); if (*host_div > 15) { *host_div = 2; *card_div = (clk_src_freq_hz / 2) / (2 * freq_khz * 1000); if (((clk_src_freq_hz / 2) % (2 * freq_khz * 1000)) > 0) { (*card_div)++; } } else if ((clk_src_freq_hz % (freq_khz * 1000)) > 0) { (*host_div)++; } } } static int sdmmc_host_calc_freq(const int host_div, const int card_div) { uint32_t clk_src_freq_hz = 0; esp_clk_tree_src_get_freq_hz(SDMMC_CLK_SRC_DEFAULT, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &clk_src_freq_hz); assert(clk_src_freq_hz == (160 * 1000 * 1000)); return clk_src_freq_hz / host_div / ((card_div == 0) ? 1 : card_div * 2) / 1000; } int sdmmc_host_set_card_clk(sdmmc_dev_t *sdio_hw, int slot, uint32_t freq_khz) { if (!(slot == 0 || slot == 1)) { return ESP_ERR_INVALID_ARG; } /* Disable clock first */ sdmmc_ll_enable_card_clock(sdio_hw, slot, false); int err = sdmmc_host_clock_update_command(sdio_hw, slot); if (err != 0) { LOG_ERR("disabling clk failed"); LOG_ERR("%s: sdmmc_host_clock_update_command returned 0x%x", __func__, err); return err; } int host_div = 0; /* clock divider of the host (sdio_hw->clock) */ int card_div = 0; /* 1/2 of card clock divider (sdio_hw->clkdiv) */ sdmmc_host_get_clk_dividers(freq_khz, &host_div, &card_div); int real_freq = sdmmc_host_calc_freq(host_div, card_div); LOG_DBG("slot=%d host_div=%d card_div=%d freq=%dkHz (max %" PRIu32 "kHz)", slot, host_div, card_div, real_freq, freq_khz); /* Program card clock settings, send them to the CIU */ sdmmc_ll_set_card_clock_div(sdio_hw, slot, card_div); err = sdmmc_host_set_clk_div(sdio_hw, host_div); if (err != 0) { return err; } err = sdmmc_host_clock_update_command(sdio_hw, slot); if (err != 0) { LOG_ERR("setting clk div failed"); LOG_ERR("%s: sdmmc_host_clock_update_command returned 0x%x", __func__, err); return err; } /* Re-enable clocks */ sdmmc_ll_enable_card_clock(sdio_hw, slot, true); sdmmc_ll_enable_card_clock_low_power(sdio_hw, slot, true); err = sdmmc_host_clock_update_command(sdio_hw, slot); if (err != 0) { LOG_ERR("re-enabling clk failed"); LOG_ERR("%s: sdmmc_host_clock_update_command returned 0x%x", __func__, err); return err; } /* set data timeout */ const uint32_t data_timeout_ms = 100; uint32_t data_timeout_cycles = data_timeout_ms * freq_khz; sdmmc_ll_set_data_timeout(sdio_hw, data_timeout_cycles); /* always set response timeout to highest value, it's small enough anyway */ sdmmc_ll_set_response_timeout(sdio_hw, 255); return 0; } int sdmmc_host_set_bus_width(sdmmc_dev_t *sdio_hw, int slot, size_t width) { if (!(slot == 0 || slot == 1)) { return ESP_ERR_INVALID_ARG; } const uint16_t mask = BIT(slot); if (width == 1) { sdio_hw->ctype.card_width_8 &= ~mask; sdio_hw->ctype.card_width &= ~mask; } else if (width == 4) { sdio_hw->ctype.card_width_8 &= ~mask; sdio_hw->ctype.card_width |= mask; } else { return ESP_ERR_INVALID_ARG; } LOG_DBG("slot=%d width=%d", slot, width); return ESP_OK; } static void configure_pin_iomux(int gpio_num) { const int sdmmc_func = SDMMC_LL_IOMUX_FUNC; const int drive_strength = 3; if (gpio_num == GPIO_NUM_NC) { return; /* parameter check*/ } int rtc_num = rtc_io_num_map[gpio_num]; rtcio_hal_pulldown_disable(rtc_num); rtcio_hal_pullup_enable(rtc_num); uint32_t reg = GPIO_PIN_MUX_REG[gpio_num]; PIN_INPUT_ENABLE(reg); gpio_hal_iomux_func_sel(reg, sdmmc_func); PIN_SET_DRV(reg, drive_strength); } /********************************************************************** * Zephyr API **********************************************************************/ /* * Reset USDHC controller */ static int sdhc_esp32_reset(const struct device *dev) { const struct sdhc_esp32_config *cfg = dev->config; sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw; /* Set reset bits */ sdio_hw->ctrl.controller_reset = 1; sdio_hw->ctrl.dma_reset = 1; sdio_hw->ctrl.fifo_reset = 1; /* Wait for the reset bits to be cleared by hardware */ int64_t yield_delay_us = 100 * 1000; /* initially 100ms */ int64_t t0 = esp_timer_get_time(); int64_t t1 = 0; while (sdio_hw->ctrl.controller_reset || sdio_hw->ctrl.fifo_reset || sdio_hw->ctrl.dma_reset) { t1 = esp_timer_get_time(); if (t1 - t0 > SDMMC_HOST_RESET_TIMEOUT_US) { return -ETIMEDOUT; } if (t1 - t0 > yield_delay_us) { yield_delay_us *= 2; k_busy_wait(1); } } /* Reset carried out successfully */ return 0; } /* * Set SDHC io properties */ static int sdhc_esp32_set_io(const struct device *dev, struct sdhc_io *ios) { const struct sdhc_esp32_config *cfg = dev->config; sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw; struct sdhc_esp32_data *data = dev->data; uint8_t bus_width; int ret = 0; LOG_INF("SDHC I/O: slot: %d, bus width %d, clock %dHz, card power %s, voltage %s", cfg->slot, ios->bus_width, ios->clock, ios->power_mode == SDHC_POWER_ON ? "ON" : "OFF", ios->signal_voltage == SD_VOL_1_8_V ? "1.8V" : "3.3V"); if (ios->clock) { /* Check for frequency boundaries supported by host */ if (ios->clock > cfg->props.f_max || ios->clock < cfg->props.f_min) { LOG_ERR("Proposed clock outside supported host range"); return -EINVAL; } if (data->bus_clock != (uint32_t)ios->clock) { /* Try setting new clock */ ret = sdmmc_host_set_card_clk(sdio_hw, cfg->slot, (ios->clock / 1000)); if (ret == 0) { LOG_INF("Bus clock successfully set to %d kHz", ios->clock / 1000); } else { LOG_ERR("Error configuring card clock"); return err_esp2zep(ret); } data->bus_clock = (uint32_t)ios->clock; } } if (ios->bus_width > 0) { /* Set bus width */ switch (ios->bus_width) { case SDHC_BUS_WIDTH1BIT: bus_width = 1; break; case SDHC_BUS_WIDTH4BIT: bus_width = 4; break; default: return -ENOTSUP; } if (data->bus_width != bus_width) { ret = sdmmc_host_set_bus_width(sdio_hw, cfg->slot, bus_width); if (ret == 0) { LOG_INF("Bus width set successfully to %d bit", bus_width); } else { LOG_ERR("Error configuring bus width"); return err_esp2zep(ret); } data->bus_width = bus_width; } } /* Toggle card power supply */ if ((data->power_mode != ios->power_mode) && (cfg->pwr_gpio.port)) { if (ios->power_mode == SDHC_POWER_OFF) { gpio_pin_set_dt(&cfg->pwr_gpio, 0); } else if (ios->power_mode == SDHC_POWER_ON) { gpio_pin_set_dt(&cfg->pwr_gpio, 1); } data->power_mode = ios->power_mode; } if (ios->timing > 0) { /* Set I/O timing */ if (data->timing != ios->timing) { switch (ios->timing) { case SDHC_TIMING_LEGACY: case SDHC_TIMING_HS: sdmmc_ll_enable_ddr_mode(sdio_hw, cfg->slot, false); break; case SDHC_TIMING_DDR50: case SDHC_TIMING_DDR52: /* Enable DDR mode */ sdmmc_ll_enable_ddr_mode(sdio_hw, cfg->slot, true); LOG_INF("DDR mode enabled"); break; case SDHC_TIMING_SDR12: case SDHC_TIMING_SDR25: sdmmc_ll_enable_ddr_mode(sdio_hw, cfg->slot, false); break; case SDHC_TIMING_SDR50: case SDHC_TIMING_HS400: case SDHC_TIMING_SDR104: case SDHC_TIMING_HS200: default: LOG_ERR("Timing mode not supported for this device"); ret = -ENOTSUP; break; } LOG_INF("Bus timing successfully changed to %s", timingStr[ios->timing]); data->timing = ios->timing; } } return ret; } /* * Return 0 if card is not busy, 1 if it is */ static int sdhc_esp32_card_busy(const struct device *dev) { const struct sdhc_esp32_config *cfg = dev->config; const sdmmc_dev_t *sdio_hw = cfg->sdio_hw; return (sdio_hw->status.data_busy == 1); } /* * Send CMD or CMD/DATA via SDHC */ static int sdhc_esp32_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { const struct sdhc_esp32_config *cfg = dev->config; const sdmmc_dev_t *sdio_hw = cfg->sdio_hw; int retries = (int)(cmd->retries + 1); /* first try plus retries */ uint32_t timeout_cfg; int ret_esp; int ret = 0; /* convert command structures Zephyr vs ESP */ struct sdmmc_command esp_cmd = { .opcode = cmd->opcode, .arg = cmd->arg, }; if (data) { esp_cmd.data = data->data; esp_cmd.blklen = data->block_size; esp_cmd.datalen = (data->blocks * data->block_size); esp_cmd.buflen = esp_cmd.datalen; timeout_cfg = data->timeout_ms; } else { timeout_cfg = cmd->timeout_ms; } /* setting timeout according to command type */ if (cmd->timeout_ms == SDHC_TIMEOUT_FOREVER) { esp_cmd.timeout_ms = SDMMC_TIMEOUT_MAX; } else { esp_cmd.timeout_ms = timeout_cfg; } /* * Handle flags and arguments with ESP32 specifics */ switch (cmd->opcode) { case SD_GO_IDLE_STATE: esp_cmd.flags = SCF_CMD_BC | SCF_RSP_R0; break; case SD_APP_CMD: case SD_SEND_STATUS: case SD_SET_BLOCK_SIZE: esp_cmd.flags = SCF_CMD_AC | SCF_RSP_R1; break; case SD_SEND_IF_COND: esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R7; break; case SD_APP_SEND_OP_COND: esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R3; esp_cmd.arg = SD_OCR_SDHC_CAP | SD_OCR_VOL_MASK; break; case SDIO_RW_DIRECT: esp_cmd.flags = SCF_CMD_AC | SCF_RSP_R5; break; case SDIO_SEND_OP_COND: esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R4; break; case SD_ALL_SEND_CID: esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R2; break; case SD_SEND_RELATIVE_ADDR: esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R6; break; case SD_SEND_CSD: esp_cmd.flags = SCF_CMD_AC | SCF_RSP_R2; esp_cmd.datalen = 0; break; case SD_SELECT_CARD: /* Don't expect to see a response when de-selecting a card */ esp_cmd.flags = SCF_CMD_AC | (cmd->arg > 0 ? SCF_RSP_R1 : 0); break; case SD_APP_SEND_SCR: case SD_SWITCH: case SD_READ_SINGLE_BLOCK: case SD_READ_MULTIPLE_BLOCK: case SD_APP_SEND_NUM_WRITTEN_BLK: esp_cmd.flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1; break; case SD_WRITE_SINGLE_BLOCK: case SD_WRITE_MULTIPLE_BLOCK: esp_cmd.flags = SCF_CMD_ADTC | SCF_RSP_R1; break; default: LOG_INF("SDHC driver: command %u not supported", cmd->opcode); return -ENOTSUP; } while (retries > 0) { ret_esp = sdmmc_host_do_transaction(dev, cfg->slot, &esp_cmd); if (ret_esp) { retries--; /* error, retry */ } else { break; } } if ((ret_esp != 0) || esp_cmd.error) { LOG_DBG("Error command: %u arg %08x ret_esp = 0x%x error = 0x%x\n", cmd->opcode, cmd->arg, ret_esp, esp_cmd.error); ret_esp = (ret_esp > 0) ? ret_esp : esp_cmd.error; ret = err_esp2zep(ret_esp); } else { /* fill response buffer */ memcpy(cmd->response, esp_cmd.response, sizeof(cmd->response)); int state = MMC_R1_CURRENT_STATE(esp_cmd.response); LOG_DBG("cmd %u arg %08x response %08x %08x %08x %08x err=0x%x state=%d", esp_cmd.opcode, esp_cmd.arg, esp_cmd.response[0], esp_cmd.response[1], esp_cmd.response[2], esp_cmd.response[3], esp_cmd.error, state); if (data) { /* Record number of bytes xfered */ data->bytes_xfered = esp_cmd.datalen; } } return ret; } /* * Get card presence */ static int sdhc_esp32_get_card_present(const struct device *dev) { const struct sdhc_esp32_config *cfg = dev->config; sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw; return sdmmc_ll_is_card_detected(sdio_hw, cfg->slot); } /* * Get host properties */ static int sdhc_esp32_get_host_props(const struct device *dev, struct sdhc_host_props *props) { const struct sdhc_esp32_config *cfg = dev->config; memcpy(props, &cfg->props, sizeof(struct sdhc_host_props)); return 0; } /** * @brief SDMMC interrupt handler * * All communication in SD protocol is driven by the master, and the hardware * handles things like stop commands automatically. * So the interrupt handler doesn't need to do much, we just push interrupt * status into a queue, clear interrupt flags, and let the task currently * doing communication figure out what to do next. * * Card detect interrupts pose a small issue though, because if a card is * plugged in and out a few times, while there is no task to process * the events, event queue can become full and some card detect events * may be dropped. We ignore this problem for now, since the there are no other * interesting events which can get lost due to this. */ static void IRAM_ATTR sdio_esp32_isr(void *arg) { const struct device *dev = (const struct device *)arg; const struct sdhc_esp32_config *cfg = dev->config; struct sdhc_esp32_data *data = dev->data; sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw; struct sdmmc_event event; struct k_msgq *queue = data->s_host_ctx.event_queue; uint32_t pending = sdmmc_ll_get_intr_status(sdio_hw) & 0xFFFF; sdio_hw->rintsts.val = pending; event.sdmmc_status = pending; uint32_t dma_pending = sdio_hw->idsts.val; sdio_hw->idsts.val = dma_pending; event.dma_status = dma_pending & 0x1f; if ((pending != 0) || (dma_pending != 0)) { k_msgq_put(queue, &event, K_NO_WAIT); } } /* * Perform early system init for SDHC */ static int sdhc_esp32_init(const struct device *dev) { const struct sdhc_esp32_config *cfg = dev->config; struct sdhc_esp32_data *data = dev->data; sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw; int ret; /* Pin configuration */ /* Set power GPIO high, so card starts powered */ if (cfg->pwr_gpio.port) { ret = gpio_pin_configure_dt(&cfg->pwr_gpio, GPIO_OUTPUT_ACTIVE); if (ret) { return -EIO; } } /* * Pins below are only defined for ESP32. For SoC's with GPIO matrix feature * please use pinctrl for pin configuration. */ configure_pin_iomux(cfg->clk_pin); configure_pin_iomux(cfg->cmd_pin); configure_pin_iomux(cfg->d0_pin); configure_pin_iomux(cfg->d1_pin); configure_pin_iomux(cfg->d2_pin); configure_pin_iomux(cfg->d3_pin); ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("Failed to configure SDHC pins"); return -EINVAL; } if (!device_is_ready(cfg->clock_dev)) { return -ENODEV; } ret = clock_control_on(cfg->clock_dev, cfg->clock_subsys); if (ret != 0) { LOG_ERR("Error enabling SDHC clock"); return ret; } /* Enable clock to peripheral. Use smallest divider first */ ret = sdmmc_host_set_clk_div(sdio_hw, 2); if (ret != 0) { return err_esp2zep(ret); } /* Reset controller */ sdhc_esp32_reset(dev); /* Clear interrupt status and set interrupt mask to known state */ sdio_hw->rintsts.val = 0xffffffff; sdio_hw->intmask.val = 0; sdio_hw->ctrl.int_enable = 0; /* Attach interrupt handler */ ret = esp_intr_alloc(cfg->irq_source, 0, &sdio_esp32_isr, (void *)dev, &data->s_host_ctx.intr_handle); if (ret != 0) { k_msgq_purge(data->s_host_ctx.event_queue); return -EFAULT; } /* Enable interrupts */ sdio_hw->intmask.val = SDMMC_INTMASK_CD | SDMMC_INTMASK_CMD_DONE | SDMMC_INTMASK_DATA_OVER | SDMMC_INTMASK_RCRC | SDMMC_INTMASK_DCRC | SDMMC_INTMASK_RTO | SDMMC_INTMASK_DTO | SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE | SDMMC_INTMASK_RESP_ERR | SDMMC_INTMASK_HLE; /* sdio is enabled only when use */ sdio_hw->ctrl.int_enable = 1; /* Disable generation of Busy Clear Interrupt */ sdio_hw->cardthrctl.busy_clr_int_en = 0; /* Enable DMA */ sdmmc_host_dma_init(sdio_hw); /* Initialize transaction handler */ ret = sdmmc_host_transaction_handler_init(data); if (ret != 0) { k_msgq_purge(data->s_host_ctx.event_queue); esp_intr_free(data->s_host_ctx.intr_handle); data->s_host_ctx.intr_handle = NULL; return ret; } /* post init settings */ ret = sdmmc_host_set_card_clk(sdio_hw, cfg->slot, data->bus_clock / 1000); if (ret != 0) { LOG_ERR("Error configuring card clock"); return err_esp2zep(ret); } ret = sdmmc_host_set_bus_width(sdio_hw, cfg->slot, data->bus_width); if (ret != 0) { LOG_ERR("Error configuring bus width"); return err_esp2zep(ret); } return 0; } static const struct sdhc_driver_api sdhc_api = {.reset = sdhc_esp32_reset, .request = sdhc_esp32_request, .set_io = sdhc_esp32_set_io, .get_card_present = sdhc_esp32_get_card_present, .card_busy = sdhc_esp32_card_busy, .get_host_props = sdhc_esp32_get_host_props}; #define SDHC_ESP32_INIT(n) \ \ PINCTRL_DT_DEFINE(DT_DRV_INST(n)); \ K_MSGQ_DEFINE(sdhc##n##_queue, sizeof(struct sdmmc_event), SDMMC_EVENT_QUEUE_LENGTH, 1); \ \ static const struct sdhc_esp32_config sdhc_esp32_##n##_config = { \ .sdio_hw = (const sdmmc_dev_t *)DT_REG_ADDR(DT_INST_PARENT(n)), \ .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_INST_PARENT(n))), \ .clock_subsys = (clock_control_subsys_t)DT_CLOCKS_CELL(DT_INST_PARENT(n), offset), \ .irq_source = DT_IRQN(DT_INST_PARENT(n)), \ .slot = DT_REG_ADDR(DT_DRV_INST(n)), \ .bus_width_cfg = DT_INST_PROP(n, bus_width), \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(DT_DRV_INST(n)), \ .pwr_gpio = GPIO_DT_SPEC_INST_GET_OR(n, pwr_gpios, {0}), \ .clk_pin = DT_INST_PROP_OR(n, clk_pin, GPIO_NUM_NC), \ .cmd_pin = DT_INST_PROP_OR(n, cmd_pin, GPIO_NUM_NC), \ .d0_pin = DT_INST_PROP_OR(n, d0_pin, GPIO_NUM_NC), \ .d1_pin = DT_INST_PROP_OR(n, d1_pin, GPIO_NUM_NC), \ .d2_pin = DT_INST_PROP_OR(n, d2_pin, GPIO_NUM_NC), \ .d3_pin = DT_INST_PROP_OR(n, d3_pin, GPIO_NUM_NC), \ .props = {.is_spi = false, \ .f_max = DT_INST_PROP(n, max_bus_freq), \ .f_min = DT_INST_PROP(n, min_bus_freq), \ .max_current_330 = DT_INST_PROP(n, max_current_330), \ .max_current_180 = DT_INST_PROP(n, max_current_180), \ .power_delay = DT_INST_PROP_OR(n, power_delay_ms, 0), \ .host_caps = {.vol_180_support = false, \ .vol_300_support = false, \ .vol_330_support = true, \ .suspend_res_support = false, \ .sdma_support = true, \ .high_spd_support = \ (DT_INST_PROP(n, bus_width) == 4) ? true : false, \ .adma_2_support = false, \ .max_blk_len = 0, \ .ddr50_support = false, \ .sdr104_support = false, \ .sdr50_support = false, \ .bus_8_bit_support = false, \ .bus_4_bit_support = \ (DT_INST_PROP(n, bus_width) == 4) ? true : false, \ .hs200_support = false, \ .hs400_support = false}}}; \ \ static struct sdhc_esp32_data sdhc_esp32_##n##_data = { \ .bus_width = SDMMC_SLOT_WIDTH_DEFAULT, \ .bus_clock = (SDMMC_FREQ_PROBING * 1000), \ .power_mode = SDHC_POWER_ON, \ .timing = SDHC_TIMING_LEGACY, \ .s_host_ctx = {.event_queue = &sdhc##n##_queue}}; \ \ DEVICE_DT_INST_DEFINE(n, &sdhc_esp32_init, NULL, &sdhc_esp32_##n##_data, \ &sdhc_esp32_##n##_config, POST_KERNEL, CONFIG_SDHC_INIT_PRIORITY, \ &sdhc_api); DT_INST_FOREACH_STATUS_OKAY(SDHC_ESP32_INIT) BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "Currently, only one espressif,esp32-sdhc-slot compatible node is supported"); ```
/content/code_sandbox/drivers/sdhc/sdhc_esp32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
12,208
```objective-c /* */ #include <zephyr/cache.h> #include <zephyr/drivers/sdhc.h> /* HRS09 */ #define CDNS_HRS09_PHY_SW_RESET BIT(0) #define CDNS_HRS09_PHY_INIT_COMP BIT(1) #define CDNS_HRS09_EXT_WR_MODE BIT(3) #define CDNS_HRS09_RDCMD_EN_BIT BIT(15) #define CDNS_HRS09_RDDATA_EN_BIT BIT(16) #define CDNS_HRS09_EXT_RD_MODE(x) ((x) << 2) #define CDNS_HRS09_EXTENDED_WR(x) ((x) << 3) #define CDNS_HRS09_RDCMD_EN(x) ((x) << 15) #define CDNS_HRS09_RDDATA_EN(x) ((x) << 16) /* HRS00 */ #define CDNS_HRS00_SWR BIT(0) /* CMD_DATA_OUTPUT */ #define SDHC_CDNS_HRS16 0x40 /* SRS09 - Present State Register */ #define CDNS_SRS09_STAT_DAT_BUSY BIT(2) #define CDNS_SRS09_CI BIT(16) /* SRS10 - Host Control 1 (General / Power / Block-Gap / Wake-Up) */ #define LEDC BIT(0) #define DT_WIDTH BIT(1) #define HS_EN BIT(2) #define CDNS_SRS10_DTW 1 #define CDNS_SRS10_EDTW 5 #define CDNS_SRS10_BP BIT(8) #define CDNS_SRS10_BVS 9 #define BUS_VOLTAGE_1_8_V (5 << CDNS_SRS10_BVS) #define BUS_VOLTAGE_3_0_V (6 << CDNS_SRS10_BVS) #define BUS_VOLTAGE_3_3_V (7 << CDNS_SRS10_BVS) /* data bus width */ #define WIDTH_BIT1 CDNS_SRS10_DTW #define WIDTH_BIT4 CDNS_SRS10_DTW #define WIDTH_BIT8 CDNS_SRS10_EDTW /* SRS11 */ #define CDNS_SRS11_ICE BIT(0) #define CDNS_SRS11_ICS BIT(1) #define CDNS_SRS11_SDCE BIT(2) #define CDNS_SRS11_USDCLKFS 6 #define CDNS_SRS11_SDCLKFS 8 #define CDNS_SRS11_DTCV 16 #define CDNS_SRS11_SRFA BIT(24) #define CDNS_SRS11_SRCMD BIT(25) #define CDNS_SRS11_SRDAT BIT(26) /* * This value determines the interval by which DAT line timeouts are detected * The interval can be computed as below: * 1111b - Reserved * 1110b - t_sdmclk*2(27+2) * 1101b - t_sdmclk*2(26+2) */ #define DTC_VAL 0xE #define READ_CLK (0xa << CDNS_SRS11_DTCV) #define WRITE_CLK (0xe << CDNS_SRS11_DTCV) /* SRS12 */ #define CDNS_SRS12_CC BIT(0) #define CDNS_SRS12_TC BIT(1) #define CDNS_SRS12_EINT BIT(15) /* SDMA Buffer Boundary */ #define BUFFER_BOUNDARY_4K 0U #define BUFFER_BOUNDARY_8K 1U #define BUFFER_BOUNDARY_16K 2U #define BUFFER_BOUNDARY_32K 3U #define BUFFER_BOUNDARY_64K 4U #define BUFFER_BOUNDARY_128K 5U #define BUFFER_BOUNDARY_256K 6U #define BUFFER_BOUNDARY_512K 7U /* SRS01 */ #define CDNS_SRS01_BLK_SIZE 0U #define CDNS_SRS01_SDMA_BUF 12 #define CDNS_SRS01_BLK_COUNT_CT 16 /* SRS15 Registers */ #define CDNS_SRS15_UMS 16 #define CDNS_SRS15_SDR12 (0 << CDNS_SRS15_UMS) #define CDNS_SRS15_SDR25 (1 << CDNS_SRS15_UMS) #define CDNS_SRS15_SDR50 (2 << CDNS_SRS15_UMS) #define CDNS_SRS15_SDR104 (3 << CDNS_SRS15_UMS) #define CDNS_SRS15_DDR50 (4 << CDNS_SRS15_UMS) /* V18SE is 0 for DS and HS, 1 for UHS-I */ #define CDNS_SRS15_V18SE BIT(19) #define CDNS_SRS15_CMD23_EN BIT(27) /* HC4E is 0 means version 3.0 and 1 means v 4.0 */ #define CDNS_SRS15_HV4E BIT(28) #define CDNS_SRS15_BIT_AD_32 0U #define CDNS_SRS15_BIT_AD_64 BIT(29) #define CDNS_SRS15_PVE BIT(31) /* Combo PHY */ #define PHY_DQ_TIMING_REG 0x0 #define PHY_DQS_TIMING_REG 0x04 #define PHY_GATE_LPBK_CTRL_REG 0x08 #define PHY_DLL_MASTER_CTRL_REG 0x0C #define PHY_DLL_SLAVE_CTRL_REG 0x10 #define PHY_CTRL_REG 0x80 #define PERIPHERAL_SDMMC_MASK 0x60 #define PERIPHERAL_SDMMC_OFFSET 6 #define DFI_INTF_MASK 0x1 /* PHY_DQS_TIMING_REG */ #define CP_USE_EXT_LPBK_DQS(x) (x << 22) #define CP_USE_LPBK_DQS(x) (x << 21) #define CP_USE_PHONY_DQS(x) (x << 20) #define CP_USE_PHONY_DQS_CMD(x) (x << 19) /* PHY_GATE_LPBK_CTRL_REG */ #define CP_SYNC_METHOD(x) ((x) << 31) #define CP_SW_HALF_CYCLE_SHIFT(x) ((x) << 28) #define CP_RD_DEL_SEL(x) ((x) << 19) #define CP_UNDERRUN_SUPPRESS(x) ((x) << 18) #define CP_GATE_CFG_ALWAYS_ON(x) ((x) << 6) /* PHY_DLL_MASTER_CTRL_REG */ #define CP_DLL_BYPASS_MODE(x) ((x) << 23) #define CP_DLL_START_POINT(x) ((x) << 0) /* PHY_DLL_SLAVE_CTRL_REG */ #define CP_READ_DQS_CMD_DELAY(x) ((x) << 24) #define CP_CLK_WRDQS_DELAY(x) ((x) << 16) #define CP_CLK_WR_DELAY(x) ((x) << 8) #define CP_READ_DQS_DELAY(x) (x) /* PHY_DQ_TIMING_REG */ #define CP_IO_MASK_ALWAYS_ON(x) ((x) << 31) #define CP_IO_MASK_END(x) ((x) << 27) #define CP_IO_MASK_START(x) ((x) << 24) #define CP_DATA_SELECT_OE_END(x) (x) /* SW RESET REG */ #define SDHC_CDNS_HRS00 (0x00) #define CDNS_HRS00_SWR BIT(0) /* PHY access port */ #define SDHC_CDNS_HRS04 0x10 #define CDNS_HRS04_ADDR GENMASK(5, 0) /* PHY data access port */ #define SDHC_CDNS_HRS05 0x14 /* eMMC control registers */ #define SDHC_CDNS_HRS06 0x18 /* PHY_CTRL_REG */ #define CP_PHONY_DQS_TIMING_MASK 0x3F #define CP_PHONY_DQS_TIMING_SHIFT 4 /* SRS */ #define SDHC_CDNS_SRS00 0x200 #define SDHC_CDNS_SRS01 0x204 #define SDHC_CDNS_SRS02 0x208 #define SDHC_CDNS_SRS03 0x20c #define SDHC_CDNS_SRS04 0x210 #define SDHC_CDNS_SRS05 0x214 #define SDHC_CDNS_SRS06 0x218 #define SDHC_CDNS_SRS07 0x21C #define SDHC_CDNS_SRS08 0x220 #define SDHC_CDNS_SRS09 0x224 #define SDHC_CDNS_SRS10 0x228 #define SDHC_CDNS_SRS11 0x22C #define SDHC_CDNS_SRS12 0x230 #define SDHC_CDNS_SRS13 0x234 #define SDHC_CDNS_SRS14 0x238 #define SDHC_CDNS_SRS15 0x23c #define SDHC_CDNS_SRS21 0x254 #define SDHC_CDNS_SRS22 0x258 #define SDHC_CDNS_SRS23 0x25c /* SRS00 */ #define CDNS_SRS00_SAAR 1 /* SRS03 */ #define CDNS_SRS03_CMD_START BIT(31) #define CDNS_SRS03_CMD_USE_HOLD_REG BIT(29) #define CDNS_SRS03_COM_IDX 24 /* Command type */ #define CDNS_SRS03_CMD_TYPE 22 #define CMD_STOP_ABORT_CMD (3 << CDNS_SRS03_CMD_TYPE) #define CMD_RESUME_CMD (2 << CDNS_SRS03_CMD_TYPE) #define CMD_SUSPEND_CMD (1 << CDNS_SRS03_CMD_TYPE) #define CDNS_SRS03_DATA_PRSNT BIT(21) #define CDNS_SRS03_CMD_IDX_CHK_EN BIT(20) #define CDNS_SRS03_RESP_CRCCE BIT(19) #define CDNS_SRS03_RESP_ERR BIT(7) #define CDNS_SRS03_MULTI_BLK_READ BIT(5) #define CDNS_SRS03_CMD_READ BIT(4) /* Response type select */ #define CDNS_SRS03_RES_TYPE_SEL 16 #define RES_TYPE_SEL_NO (0 << CDNS_SRS03_RES_TYPE_SEL) #define RES_TYPE_SEL_136 (1 << CDNS_SRS03_RES_TYPE_SEL) #define RES_TYPE_SEL_48 (2 << CDNS_SRS03_RES_TYPE_SEL) #define RES_TYPE_SEL_48_B (3 << CDNS_SRS03_RES_TYPE_SEL) /* Auto CMD Enable */ #define CDNS_SRS03_ACE 2 #define NO_AUTO_COMMAND (0 << CDNS_SRS03_ACE) #define AUTO_CMD12 (1 << CDNS_SRS03_ACE) #define AUTO_CMD23 (2 << CDNS_SRS03_ACE) #define AUTO_CMD_AUTO (3 << CDNS_SRS03_ACE) #define CDNS_SRS03_DMA_EN BIT(0) #define CDNS_SRS03_BLK_CNT_EN BIT(1) /* HRS07 - IO Delay Information Register */ #define SDHC_CDNS_HRS07 0x1c #define CDNS_HRS07_IDELAY_VAL(x) (x) #define CDNS_HRS07_RW_COMPENSATE(x) ((x) << 16) /* HRS09 - PHY Control and Status Register */ #define SDHC_CDNS_HRS09 0x24 /* HRS10 - Host Controller SDCLK start point adjustment */ #define SDHC_CDNS_HRS10 0x28 /* HCSDCLKADJ DATA; DDR Mode */ #define SDHC_HRS10_HCSDCLKADJ(x) ((x) << 16) /* HRS16 */ #define CDNS_HRS16_WRCMD0_DLY(x) (x) #define CDNS_HRS16_WRCMD1_DLY(x) ((x) << 4) #define CDNS_HRS16_WRDATA0_DLY(x) ((x) << 8) #define CDNS_HRS16_WRDATA1_DLY(x) ((x) << 12) #define CDNS_HRS16_WRCMD0_SDCLK_DLY(x) ((x) << 16) #define CDNS_HRS16_WRCMD1_SDCLK_DLY(x) ((x) << 20) #define CDNS_HRS16_WRDATA0_SDCLK_DLY(x) ((x) << 24) #define CDNS_HRS16_WRDATA1_SDCLK_DLY(x) ((x) << 28) /* Shared Macros */ #define SDMMC_CDN(_reg) (SDMMC_CDN_REG_BASE + \ (SDMMC_CDN_##_reg)) /* MMC Peripheral Definition */ #define MMC_BLOCK_SIZE 512U #define MMC_BLOCK_MASK (MMC_BLOCK_SIZE - 1) #define MMC_BOOT_CLK_RATE (400 * 1000) #define OCR_POWERUP BIT(31) #define OCR_HCS BIT(30) #define OCR_3_5_3_6 BIT(23) #define OCR_3_4_3_5 BIT(22) #define OCR_3_3_3_4 BIT(21) #define OCR_3_2_3_3 BIT(20) #define OCR_3_1_3_2 BIT(19) #define OCR_3_0_3_1 BIT(18) #define OCR_2_9_3_0 BIT(17) #define OCR_2_8_2_9 BIT(16) #define OCR_2_7_2_8 BIT(15) #define OCR_VDD_MIN_2V7 GENMASK(23, 15) #define OCR_VDD_MIN_2V0 GENMASK(14, 8) #define OCR_VDD_MIN_1V7 BIT(7) #define MMC_RSP_48 BIT(0) #define MMC_RSP_136 BIT(1) /* 136 bit response */ #define MMC_RSP_CRC BIT(2) /* expect valid crc */ #define MMC_RSP_CMD_IDX BIT(3) /* response contains cmd idx */ #define MMC_RSP_BUSY BIT(4) /* device may be busy */ /* JEDEC 4.51 chapter 6.12 */ #define MMC_RESPONSE_R1 (MMC_RSP_48 | MMC_RSP_CMD_IDX | MMC_RSP_CRC) #define MMC_RESPONSE_R1B (MMC_RESPONSE_R1 | MMC_RSP_BUSY) #define MMC_RESPONSE_R2 (MMC_RSP_48 | MMC_RSP_136 | MMC_RSP_CRC) #define MMC_RESPONSE_R3 (MMC_RSP_48) #define MMC_RESPONSE_R4 (MMC_RSP_48) #define MMC_RESPONSE_R5 (MMC_RSP_48 | MMC_RSP_CRC | MMC_RSP_CMD_IDX) #define MMC_RESPONSE_R6 (MMC_RSP_CRC | MMC_RSP_CMD_IDX) #define MMC_RESPONSE_R7 (MMC_RSP_48 | MMC_RSP_CRC) #define MMC_RESPONSE_NONE 0 /* Value randomly chosen for eMMC RCA, it should be > 1 */ #define MMC_FIX_RCA 6 #define RCA_SHIFT_OFFSET 16 #define CMD_EXTCSD_PARTITION_CONFIG 179 #define CMD_EXTCSD_BUS_WIDTH 183 #define CMD_EXTCSD_HS_TIMING 185 #define CMD_EXTCSD_SEC_CNT 212 #define PART_CFG_BOOT_PARTITION1_ENABLE BIT(3) #define PART_CFG_PARTITION1_ACCESS 1 /* Values in EXT CSD register */ #define MMC_BUS_WIDTH_1 0 #define MMC_BUS_WIDTH_4 1 #define MMC_BUS_WIDTH_8 2 #define MMC_BUS_WIDTH_DDR_4 5 #define MMC_BUS_WIDTH_DDR_8 6 #define MMC_BOOT_MODE_BACKWARD 0 #define MMC_BOOT_MODE_HS_TIMING BIT(3) #define MMC_BOOT_MODE_DDR (2 << 3) #define EXTCSD_SET_CMD 0 #define EXTCSD_SET_BITS BIT(24) #define EXTCSD_CLR_BITS (2 << 24) #define EXTCSD_WRITE_BYTES (3 << 24) #define EXTCSD_CMD(x) (((x) & 0xff) << 16) #define EXTCSD_VALUE(x) (((x) & 0xff) << 8) #define EXTCSD_CMD_SET_NORMAL 1 #define CSD_TRAN_SPEED_UNIT_MASK GENMASK(2, 0) #define CSD_TRAN_SPEED_MULT_MASK GENMASK(6, 3) #define CSD_TRAN_SPEED_MULT_SHIFT 3 #define STATUS_CURRENT_STATE(x) (((x) & 0xf) << 9) #define STATUS_READY_FOR_DATA BIT(8) #define STATUS_SWITCH_ERROR BIT(7) #define MMC_GET_STATE(x) (((x) >> 9) & 0xf) #define MMC_STATE_IDLE 0 #define MMC_STATE_READY 1 #define MMC_STATE_IDENT 2 #define MMC_STATE_STBY 3 #define MMC_STATE_TRAN 4 #define MMC_STATE_DATA 5 #define MMC_STATE_RCV 6 #define MMC_STATE_PRG 7 #define MMC_STATE_DIS 8 #define MMC_STATE_BTST 9 #define MMC_STATE_SLP 10 #define MMC_FLAG_CMD23 1 #define CMD8_CHECK_PATTERN 0xAA #define VHS_2_7_3_6_V BIT(8) #define SD_SCR_BUS_WIDTH_1 BIT(8) #define SD_SCR_BUS_WIDTH_4 BIT(10) /* ADMA table component */ #define ADMA_DESC_ATTR_VALID BIT(0) #define ADMA_DESC_ATTR_END BIT(1) #define ADMA_DESC_ATTR_INT BIT(2) #define ADMA_DESC_ATTR_ACT1 BIT(4) #define ADMA_DESC_ATTR_ACT2 BIT(5) #define ADMA_DESC_TRANSFER_DATA ADMA_DESC_ATTR_ACT2 /* Conf depends on SRS15.HV4E */ #define SDMA 0 #define ADMA2_32 (2 << 3) #define ADMA2_64 (3 << 3) /* here 0 defines the 64 Kb size */ #define MAX_64KB_PAGE 0 struct sdmmc_cmd { unsigned int cmd_idx; unsigned int cmd_arg; unsigned int resp_type; unsigned int resp_data[4]; }; struct sdhc_cdns_ops { /* init function for card */ int (*init)(void); /* busy check function for card */ int (*busy)(void); /* card_present function check for card */ int (*card_present)(void); /* reset the card */ int (*reset)(void); /* send command and respective argument */ int (*send_cmd)(struct sdmmc_cmd *cmd, struct sdhc_data *data); /* io set up for card */ int (*set_ios)(unsigned int clk, unsigned int width); /* prepare dma descriptors */ int (*prepare)(uint32_t lba, uintptr_t buf, struct sdhc_data *data); /* cache invd api */ int (*cache_invd)(int lba, uintptr_t buf, size_t size); }; /* Combo Phy reg */ struct sdhc_cdns_combo_phy { uint32_t cp_clk_wr_delay; uint32_t cp_clk_wrdqs_delay; uint32_t cp_data_select_oe_end; uint32_t cp_dll_bypass_mode; uint32_t cp_dll_locked_mode; uint32_t cp_dll_start_point; uint32_t cp_gate_cfg_always_on; uint32_t cp_io_mask_always_on; uint32_t cp_io_mask_end; uint32_t cp_io_mask_start; uint32_t cp_rd_del_sel; uint32_t cp_read_dqs_cmd_delay; uint32_t cp_read_dqs_delay; uint32_t cp_sw_half_cycle_shift; uint32_t cp_sync_method; uint32_t cp_underrun_suppress; uint32_t cp_use_ext_lpbk_dqs; uint32_t cp_use_lpbk_dqs; uint32_t cp_use_phony_dqs; uint32_t cp_use_phony_dqs_cmd; }; /* sdmmc reg */ struct sdhc_cdns_sdmmc { uint32_t sdhc_extended_rd_mode; uint32_t sdhc_extended_wr_mode; uint32_t sdhc_hcsdclkadj; uint32_t sdhc_idelay_val; uint32_t sdhc_rdcmd_en; uint32_t sdhc_rddata_en; uint32_t sdhc_rw_compensate; uint32_t sdhc_sdcfsh; uint32_t sdhc_sdcfsl; uint32_t sdhc_wrcmd0_dly; uint32_t sdhc_wrcmd0_sdclk_dly; uint32_t sdhc_wrcmd1_dly; uint32_t sdhc_wrcmd1_sdclk_dly; uint32_t sdhc_wrdata0_dly; uint32_t sdhc_wrdata0_sdclk_dly; uint32_t sdhc_wrdata1_dly; uint32_t sdhc_wrdata1_sdclk_dly; }; enum sdmmc_device_mode { /* Identification */ SD_DS_ID, /* Default speed */ SD_DS, /* High speed */ SD_HS, /* Ultra high speed SDR12 */ SD_UHS_SDR12, /* Ultra high speed SDR25 */ SD_UHS_SDR25, /* Ultra high speed SDR`50 */ SD_UHS_SDR50, /* Ultra high speed SDR104 */ SD_UHS_SDR104, /* Ultra high speed DDR50 */ SD_UHS_DDR50, /* SDR backward compatible */ EMMC_SDR_BC, /* SDR */ EMMC_SDR, /* DDR */ EMMC_DDR, /* High speed 200Mhz in SDR */ EMMC_HS200, /* High speed 200Mhz in DDR */ EMMC_HS400, /* High speed 200Mhz in SDR with enhanced strobe */ EMMC_HS400ES, }; struct sdhc_cdns_params { uintptr_t reg_base; uintptr_t reg_phy; uintptr_t desc_base; size_t desc_size; int clk_rate; int bus_width; unsigned int flags; enum sdmmc_device_mode cdn_sdmmc_dev_type; uint32_t combophy; }; struct sdmmc_device_info { /* Size of device in bytes */ unsigned long long device_size; /* Block size in bytes */ unsigned int block_size; /* Max bus freq in Hz */ unsigned int max_bus_freq; /* OCR voltage */ unsigned int ocr_voltage; /* Type of MMC */ enum sdmmc_device_mode cdn_sdmmc_dev_type; }; /*descriptor structure with 8 byte alignment*/ struct sdhc_cdns_desc { /* 8 bit attribute */ uint8_t attr; /* reserved bits in desc */ uint8_t reserved; /* page length for the descriptor */ uint16_t len; /* lower 32 bits for buffer (64 bit addressing) */ uint32_t addr_lo; /* higher 32 bits for buffer (64 bit addressing) */ uint32_t addr_hi; } __aligned(8); void sdhc_cdns_sdmmc_init(struct sdhc_cdns_params *params, struct sdmmc_device_info *info, const struct sdhc_cdns_ops **cb_sdmmc_ops); ```
/content/code_sandbox/drivers/sdhc/sdhc_cdns_ll.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,070
```c /* * */ #define DT_DRV_COMPAT nxp_lpc_sdif #include <zephyr/drivers/sdhc.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/logging/log.h> #include <zephyr/irq.h> #include <zephyr/kernel.h> #include <fsl_sdif.h> LOG_MODULE_REGISTER(sdif, CONFIG_SDHC_LOG_LEVEL); enum mcux_sdif_callback_status { TRANSFER_CMD_COMPLETE = BIT(0), TRANSFER_CMD_FAILED = BIT(1), TRANSFER_DATA_COMPLETE = BIT(2), TRANSFER_DATA_FAILED = BIT(3), }; #define TRANSFER_CMD_FLAGS (TRANSFER_CMD_COMPLETE | TRANSFER_CMD_FAILED) #define TRANSFER_DATA_FLAGS (TRANSFER_DATA_COMPLETE | TRANSFER_DATA_FAILED) #define MCUX_SDIF_RESET_TIMEOUT_VALUE (1000000U) #define MCUX_SDIF_DEFAULT_TIMEOUT (5000U) #define MCUX_SDIF_F_MAX MHZ(50) #define MCUX_SDIF_F_MIN KHZ(400) struct mcux_sdif_config { SDIF_Type *base; const struct pinctrl_dev_config *pincfg; uint32_t response_timeout; uint32_t cd_debounce_clocks; uint32_t data_timeout; const struct device *clock_dev; clock_control_subsys_t clock_subsys; void (*irq_config_func)(const struct device *dev); }; struct mcux_sdif_data { volatile uint32_t transfer_status; sdif_handle_t transfer_handle; struct k_sem transfer_sem; struct k_mutex access_mutex; #ifdef CONFIG_MCUX_SDIF_DMA_SUPPORT uint32_t *sdif_dma_descriptor; #endif /* CONFIG_MCUX_SDIF_DMA_SUPPORT */ }; static void mcux_sdif_transfer_complete(SDIF_Type *base, void *handle, status_t status, void *user_data) { const struct device *dev = (const struct device *)user_data; struct mcux_sdif_data *data = dev->data; if (status == kStatus_SDIF_DataTransferFail) { data->transfer_status |= TRANSFER_DATA_FAILED; } else if (status == kStatus_SDIF_DataTransferSuccess) { data->transfer_status |= TRANSFER_DATA_COMPLETE; } else if (status == kStatus_SDIF_SendCmdFail) { data->transfer_status |= TRANSFER_CMD_FAILED; } else if (status == kStatus_SDIF_SendCmdSuccess) { data->transfer_status |= TRANSFER_CMD_COMPLETE; } else { __ASSERT(false, "Unknown status code from SD interrupt"); } k_sem_give(&data->transfer_sem); } /* SDIF IRQ handler not exposed in SDK header, so declare it here */ extern void SDIO_DriverIRQHandler(void); /* * MCUX SDIF interrupt service routine */ static int mcux_sdif_isr(const struct device *dev) { SDIO_DriverIRQHandler(); return 0; } static int mcux_sdif_reset(const struct device *dev) { const struct mcux_sdif_config *config = dev->config; struct mcux_sdif_data *data = dev->data; k_mutex_lock(&data->access_mutex, K_FOREVER); /* Disable all interrupts */ SDIF_DisableInterrupt(config->base, kSDIF_AllInterruptStatus); /* Release all bus lines */ (void)SDIF_Reset(config->base, kSDIF_ResetAll, MCUX_SDIF_RESET_TIMEOUT_VALUE); /* clear all interrupt/DMA status */ SDIF_ClearInterruptStatus(config->base, kSDIF_AllInterruptStatus); SDIF_ClearInternalDMAStatus(config->base, kSDIF_DMAAllStatus); k_mutex_unlock(&data->access_mutex); return 0; } static int mcux_sdif_get_host_props(const struct device *dev, struct sdhc_host_props *props) { memset(props, 0, sizeof(*props)); props->f_max = MCUX_SDIF_F_MAX; props->f_min = MCUX_SDIF_F_MIN; props->power_delay = 500; props->host_caps.high_spd_support = true; props->host_caps.suspend_res_support = true; props->host_caps.vol_330_support = true; props->host_caps.bus_8_bit_support = true; props->max_current_330 = 1024; return 0; } static int mcux_sdif_set_io(const struct device *dev, struct sdhc_io *ios) { const struct mcux_sdif_config *config = dev->config; uint32_t src_clk_hz, bus_clk_hz; if (clock_control_get_rate(config->clock_dev, config->clock_subsys, &src_clk_hz)) { return -EINVAL; } /* If clock is set to zero, we should gate clock */ if (ios->clock != 0 && (ios->clock <= MCUX_SDIF_F_MAX) && (ios->clock >= MCUX_SDIF_F_MIN)) { bus_clk_hz = SDIF_SetCardClock(config->base, src_clk_hz, ios->clock); if (bus_clk_hz == 0) { return -ENOTSUP; } LOG_DBG("SDIF clock set to %d", bus_clk_hz); } else if (ios->clock != 0) { /* Invalid clock setting */ return -ENOTSUP; } if (ios->bus_mode != SDHC_BUSMODE_PUSHPULL) { return -ENOTSUP; } SDIF_EnableCardPower(config->base, ios->power_mode == SDHC_POWER_ON); switch (ios->bus_width) { case SDHC_BUS_WIDTH1BIT: SDIF_SetCardBusWidth(config->base, kSDIF_Bus1BitWidth); break; case SDHC_BUS_WIDTH4BIT: SDIF_SetCardBusWidth(config->base, kSDIF_Bus4BitWidth); break; case SDHC_BUS_WIDTH8BIT: SDIF_SetCardBusWidth(config->base, kSDIF_Bus8BitWidth); break; default: return -ENOTSUP; } if (ios->signal_voltage != SD_VOL_3_3_V) { return -ENOTSUP; } return 0; } /* * Early system init for SDHC */ static int mcux_sdif_init(const struct device *dev) { const struct mcux_sdif_config *config = dev->config; struct mcux_sdif_data *data = dev->data; sdif_transfer_callback_t sdif_cb = { .TransferComplete = mcux_sdif_transfer_complete, }; int ret; sdif_config_t host_config = {0}; ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } host_config.responseTimeout = config->response_timeout; host_config.cardDetDebounce_Clock = config->cd_debounce_clocks; host_config.dataTimeout = config->data_timeout; SDIF_Init(config->base, &host_config); SDIF_TransferCreateHandle(config->base, &data->transfer_handle, &sdif_cb, (void *)dev); config->irq_config_func(dev); k_mutex_init(&data->access_mutex); k_sem_init(&data->transfer_sem, 0, 1); return 0; } static int mcux_sdif_get_card_present(const struct device *dev) { const struct mcux_sdif_config *config = dev->config; return SDIF_DetectCardInsert(config->base, false); } static int mcux_sdif_transfer(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { const struct mcux_sdif_config *config = dev->config; struct mcux_sdif_data *dev_data = dev->data; status_t error; sdif_transfer_t transfer = {0}; sdif_command_t sdif_cmd = {0}; sdif_data_t sdif_data; #ifdef CONFIG_MCUX_SDIF_DMA_SUPPORT sdif_dma_config_t dma_config = { .enableFixBurstLen = false, .mode = kSDIF_DualDMAMode, .dmaDesBufferStartAddr = dev_data->sdif_dma_descriptor, .dmaDesBufferLen = (CONFIG_MCUX_SDIF_DMA_BUFFER_SIZE / 4), .dmaDesSkipLen = 0 }; #endif /* CONFIG_MCUX_SDIF_DMA_SUPPORT */ if (cmd->opcode == SD_GO_IDLE_STATE) { /* * Special handling for CMD0- we want to initialize the card * with 80 clocks, so we will use the SDIF_SendCardActive api * to ensure that CMD0 is sent while the SEND_INITIALIZATION * bit is set in the CMD register. */ if (!SDIF_SendCardActive(config->base, MCUX_SDIF_DEFAULT_TIMEOUT)) { LOG_ERR("Card clock init failed"); return -EIO; } return 0; } /* Copy Zephyr data fields to SDIF struct */ sdif_cmd.index = cmd->opcode; sdif_cmd.argument = cmd->arg; /* Lower 4 bits hold native SD response type */ sdif_cmd.responseType = (cmd->response_type & SDHC_NATIVE_RESPONSE_MASK); transfer.command = &sdif_cmd; if (data) { transfer.data = &sdif_data; memset(&sdif_data, 0, sizeof(sdif_data)); sdif_data.blockSize = data->block_size; sdif_data.blockCount = data->blocks; /* * Determine command type. Note that the driver is expected * to handle CMD12 and CMD23 for multiblock I/O. */ switch (cmd->opcode) { case SD_WRITE_SINGLE_BLOCK: case SD_WRITE_MULTIPLE_BLOCK: sdif_data.enableAutoCommand12 = true; sdif_data.txData = data->data; break; case SD_READ_SINGLE_BLOCK: case SD_READ_MULTIPLE_BLOCK: sdif_data.enableAutoCommand12 = true; sdif_data.rxData = data->data; break; case SD_APP_SEND_SCR: case SD_SWITCH: case SD_APP_SEND_NUM_WRITTEN_BLK: sdif_data.rxData = data->data; break; default: return -ENOTSUP; } } dev_data->transfer_status = 0U; k_sem_reset(&dev_data->transfer_sem); do { #ifdef CONFIG_MCUX_SDIF_DMA_SUPPORT error = SDIF_TransferNonBlocking(config->base, &dev_data->transfer_handle, &dma_config, &transfer); #else error = SDIF_TransferNonBlocking(config->base, &dev_data->transfer_handle, NULL, &transfer); #endif /* CONFIG_MCUX_SDIF_DMA_SUPPORT */ } while (error == kStatus_SDIF_SyncCmdTimeout && cmd->timeout_ms--); if (error != kStatus_Success) { return -EIO; } /* Wait for the command to complete */ while ((dev_data->transfer_status & TRANSFER_CMD_FLAGS) == 0U) { if (k_sem_take(&dev_data->transfer_sem, K_MSEC(cmd->timeout_ms))) { return -ETIMEDOUT; } } if (dev_data->transfer_status & TRANSFER_CMD_FAILED) { return -EIO; } /* If data was sent, wait for that to complete */ if (data) { while ((dev_data->transfer_status & TRANSFER_DATA_FLAGS) == 0) { if (k_sem_take(&dev_data->transfer_sem, K_MSEC(data->timeout_ms))) { return -ETIMEDOUT; } } if (dev_data->transfer_status & TRANSFER_DATA_FAILED) { return -EIO; } } /* Record command response */ memcpy(cmd->response, sdif_cmd.response, sizeof(cmd->response)); if (data) { /* Record bytes transferred */ data->bytes_xfered = dev_data->transfer_handle.transferredWords; } return 0; } static int mcux_sdif_card_busy(const struct device *dev) { const struct mcux_sdif_config *config = dev->config; return (SDIF_GetControllerStatus(config->base) & SDIF_STATUS_DATA_BUSY_MASK) ? 1 : 0; } /* Stops transmission of data using CMD12, after failed command */ static void mcux_sdif_stop_transmission(const struct device *dev) { const struct mcux_sdif_config *config = dev->config; struct mcux_sdif_data *data = dev->data; sdif_command_t cmd = {0}; sdif_transfer_t transfer = { .command = &cmd, .data = NULL, }; cmd.index = SD_STOP_TRANSMISSION; cmd.argument = 0; cmd.type = kCARD_CommandTypeAbort; cmd.responseType = SD_RSP_TYPE_R1b; /* Disable transmit interrupt, since we are using blocking transfer */ SDIF_DisableInterrupt(config->base, kSDIF_AllInterruptStatus); SDIF_ClearInterruptStatus(config->base, kSDIF_AllInterruptStatus); LOG_WRN("Transfer failed, sending CMD12"); SDIF_TransferNonBlocking(config->base, &data->transfer_handle, NULL, &transfer); } static int mcux_sdif_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { int ret; int busy_timeout = MCUX_SDIF_DEFAULT_TIMEOUT; struct mcux_sdif_data *dev_data = dev->data; ret = k_mutex_lock(&dev_data->access_mutex, K_MSEC(cmd->timeout_ms)); if (ret) { LOG_ERR("Could not access card"); return -EBUSY; } do { ret = mcux_sdif_transfer(dev, cmd, data); if (data && ret) { /* Send CMD12 to stop transmission after error */ mcux_sdif_stop_transmission(dev); while (busy_timeout > 0) { if (!mcux_sdif_card_busy(dev)) { break; } /* Wait 125us before polling again */ k_busy_wait(125); busy_timeout -= 125; } if (busy_timeout <= 0) { LOG_DBG("Card did not idle after CMD12"); k_mutex_unlock(&dev_data->access_mutex); return -ETIMEDOUT; } } } while (ret != 0 && (cmd->retries-- > 0)); k_mutex_unlock(&dev_data->access_mutex); return ret; } static const struct sdhc_driver_api sdif_api = { .reset = mcux_sdif_reset, .get_host_props = mcux_sdif_get_host_props, .set_io = mcux_sdif_set_io, .get_card_present = mcux_sdif_get_card_present, .request = mcux_sdif_request, .card_busy = mcux_sdif_card_busy, }; #ifdef CONFIG_MCUX_SDIF_DMA_SUPPORT #define MCUX_SDIF_DMA_DESCRIPTOR_DEFINE(n) \ static uint32_t mcux_sdif_dma_descriptor_##n \ [CONFIG_MCUX_SDIF_DMA_BUFFER_SIZE / 4] __aligned(4); #define MCUX_SDIF_DMA_DESCRIPTOR_INIT(n) \ .sdif_dma_descriptor = mcux_sdif_dma_descriptor_##n, #else #define MCUX_SDIF_DMA_DESCRIPTOR_DEFINE(n) #define MCUX_SDIF_DMA_DESCRIPTOR_INIT(n) #endif /* CONFIG_MCUX_SDIF_DMA_SUPPORT */ #define MCUX_SDIF_INIT(n) \ static void sdif_##n##_irq_config_func(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \ mcux_sdif_isr, DEVICE_DT_INST_GET(n), 0); \ irq_enable(DT_INST_IRQN(n)); \ } \ \ PINCTRL_DT_INST_DEFINE(n); \ \ static const struct mcux_sdif_config sdif_##n##_config = { \ .base = (SDIF_Type *) DT_INST_REG_ADDR(n), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .response_timeout = DT_INST_PROP(n, response_timeout), \ .cd_debounce_clocks = DT_INST_PROP(n, cd_debounce_clocks), \ .data_timeout = DT_INST_PROP(n, data_timeout), \ .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .clock_subsys = \ (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \ .irq_config_func = sdif_##n##_irq_config_func, \ }; \ \ MCUX_SDIF_DMA_DESCRIPTOR_DEFINE(n); \ \ static struct mcux_sdif_data sdif_##n##_data = { \ MCUX_SDIF_DMA_DESCRIPTOR_INIT(n) \ }; \ \ DEVICE_DT_INST_DEFINE(n, \ &mcux_sdif_init, \ NULL, \ &sdif_##n##_data, \ &sdif_##n##_config, \ POST_KERNEL, \ CONFIG_SDHC_INIT_PRIORITY, \ &sdif_api); DT_INST_FOREACH_STATUS_OKAY(MCUX_SDIF_INIT) ```
/content/code_sandbox/drivers/sdhc/mcux_sdif.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,776
```c /* * */ #define DT_DRV_COMPAT renesas_rcar_mmc #include <zephyr/devicetree.h> #include <zephyr/drivers/disk.h> #include <zephyr/drivers/sdhc.h> #include <zephyr/drivers/clock_control/renesas_cpg_mssr.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> #include <zephyr/cache.h> #include <zephyr/drivers/regulator.h> #include "rcar_mmc_registers.h" #define PINCTRL_STATE_UHS PINCTRL_STATE_PRIV_START /** * @note we don't need any locks here, because SDHC subsystem cares about it */ LOG_MODULE_REGISTER(rcar_mmc, CONFIG_LOG_DEFAULT_LEVEL); #define MMC_POLL_FLAGS_TIMEOUT_US 100000 #define MMC_POLL_FLAGS_ONE_CYCLE_TIMEOUT_US 1 #define MMC_BUS_CLOCK_FREQ 800000000 #ifdef CONFIG_RCAR_MMC_DMA_SUPPORT #define ALIGN_BUF_DMA __aligned(CONFIG_SDHC_BUFFER_ALIGNMENT) #else #define ALIGN_BUF_DMA #endif /** * @brief Renesas MMC host controller driver data * */ struct mmc_rcar_data { DEVICE_MMIO_RAM; /* Must be first */ struct sdhc_io host_io; struct sdhc_host_props props; #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT struct k_sem irq_xref_fin; #endif uint8_t ver; /* in bytes, possible values are 2, 4 or 8 */ uint8_t width_access_sd_buf0; uint8_t ddr_mode; uint8_t restore_cfg_after_reset; uint8_t is_last_cmd_app_cmd; /* ACMD55 */ #ifdef CONFIG_RCAR_MMC_SCC_SUPPORT uint8_t manual_retuning; uint8_t tuning_buf[128] ALIGN_BUF_DMA; #endif /* CONFIG_RCAR_MMC_SCC_SUPPORT */ uint8_t can_retune; }; /** * @brief Renesas MMC host controller driver configuration */ struct mmc_rcar_cfg { DEVICE_MMIO_ROM; /* Must be first */ struct rcar_cpg_clk cpg_clk; struct rcar_cpg_clk bus_clk; const struct device *cpg_dev; const struct pinctrl_dev_config *pcfg; const struct device *regulator_vqmmc; const struct device *regulator_vmmc; uint32_t max_frequency; #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT void (*irq_config_func)(const struct device *dev); #endif uint8_t non_removable; uint8_t uhs_support; uint8_t mmc_hs200_1_8v; uint8_t mmc_hs400_1_8v; uint8_t bus_width; uint8_t mmc_sdr104_support; }; #ifdef CONFIG_RCAR_MMC_SCC_SUPPORT static int rcar_mmc_execute_tuning(const struct device *dev); static int rcar_mmc_retune_if_needed(const struct device *dev, bool request_retune); #endif static int rcar_mmc_disable_scc(const struct device *dev); static uint32_t rcar_mmc_read_reg32(const struct device *dev, uint32_t reg) { return sys_read32(DEVICE_MMIO_GET(dev) + reg); } static void rcar_mmc_write_reg32(const struct device *dev, uint32_t reg, uint32_t val) { sys_write32(val, DEVICE_MMIO_GET(dev) + reg); } /* cleanup SD card interrupt flag register and mask their interrupts */ static inline void rcar_mmc_reset_and_mask_irqs(const struct device *dev) { struct mmc_rcar_data *data = dev->data; rcar_mmc_write_reg32(dev, RCAR_MMC_INFO1, 0); rcar_mmc_write_reg32(dev, RCAR_MMC_INFO1_MASK, ~0); rcar_mmc_write_reg32(dev, RCAR_MMC_INFO2, RCAR_MMC_INFO2_CLEAR); rcar_mmc_write_reg32(dev, RCAR_MMC_INFO2_MASK, ~0); #ifdef CONFIG_RCAR_MMC_DMA_SUPPORT /* default value of Seq suspend should be 0 */ rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO1_MASK, 0xfffffeff); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO1, 0x0); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO2_MASK, 0xffffffff); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO2, 0x0); #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT k_sem_reset(&data->irq_xref_fin); #endif #endif /* CONFIG_RCAR_MMC_DMA_SUPPORT */ } /** * @brief check if MMC is busy * * This check should generally be implemented as checking the controller * state. No MMC commands need to be sent. * * @param dev MMC device * @retval 0 card is not busy * @retval 1 card is busy * @retval -EINVAL: the dev pointer is NULL */ static int rcar_mmc_card_busy(const struct device *dev) { uint32_t reg; if (!dev) { return -EINVAL; } reg = rcar_mmc_read_reg32(dev, RCAR_MMC_INFO2); return (reg & RCAR_MMC_INFO2_DAT0) ? 0 : 1; } /** * @brief Check error flags inside INFO2 MMC register * * @note in/out parameters should be checked by a caller function * * @param dev MMC device * * @retval 0 INFO2 register hasn't errors * @retval -ETIMEDOUT: timed out while tx/rx * @retval -EIO: I/O error * @retval -EILSEQ: communication out of sync */ static int rcar_mmc_check_errors(const struct device *dev) { uint32_t info2 = rcar_mmc_read_reg32(dev, RCAR_MMC_INFO2); if (info2 & (RCAR_MMC_INFO2_ERR_TO | RCAR_MMC_INFO2_ERR_RTO)) { LOG_DBG("timeout error 0x%08x", info2); return -ETIMEDOUT; } if (info2 & (RCAR_MMC_INFO2_ERR_END | RCAR_MMC_INFO2_ERR_CRC | RCAR_MMC_INFO2_ERR_IDX)) { LOG_DBG("communication out of sync 0x%08x", info2); return -EILSEQ; } if (info2 & (RCAR_MMC_INFO2_ERR_ILA | RCAR_MMC_INFO2_ERR_ILR | RCAR_MMC_INFO2_ERR_ILW)) { LOG_DBG("illegal access 0x%08x", info2); return -EIO; } return 0; } /** * @brief Poll flag(s) in MMC register and check errors * * @note in/out parameters should be checked by a caller function * * @param dev MMC device * @param reg register offset relative to the base address * @param flag polling flag(s) * @param state state of flag(s) when we should stop polling * @param check_errors call @ref rcar_mmc_check_errors function or not * @param check_dma_errors check if there are DMA errors inside info2 * @param timeout_us timeout in microseconds how long we should poll flag(s) * * @retval 0 poll of flag(s) was successful * @retval -ETIMEDOUT: timed out while tx/rx * @retval -EIO: I/O error * @retval -EILSEQ: communication out of sync */ static int rcar_mmc_poll_reg_flags_check_err(const struct device *dev, unsigned int reg, uint32_t flag, uint32_t state, bool check_errors, bool check_dma_errors, int64_t timeout_us) { int ret; while ((rcar_mmc_read_reg32(dev, reg) & flag) != state) { if (timeout_us < 0) { LOG_DBG("timeout error during polling flag(s) 0x%08x in reg 0x%08x", flag, reg); return -ETIMEDOUT; } if (check_errors) { ret = rcar_mmc_check_errors(dev); if (ret) { return ret; } } if (check_dma_errors && rcar_mmc_read_reg32(dev, RCAR_MMC_DMA_INFO2)) { LOG_DBG("%s: an error occurs on the DMAC channel #%u", dev->name, (reg & RCAR_MMC_DMA_INFO2_ERR_RD) ? 1U : 0U); return -EIO; } k_usleep(MMC_POLL_FLAGS_ONE_CYCLE_TIMEOUT_US); timeout_us -= MMC_POLL_FLAGS_ONE_CYCLE_TIMEOUT_US; } return 0; } /* reset DMA MMC controller */ static inline void rcar_mmc_reset_dma(const struct device *dev) { uint32_t reg = RCAR_MMC_DMA_RST_DTRAN0 | RCAR_MMC_DMA_RST_DTRAN1; rcar_mmc_write_reg32(dev, RCAR_MMC_EXTMODE, 0); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_RST, ~reg); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_RST, ~0); rcar_mmc_write_reg32(dev, RCAR_MMC_EXTMODE, 1); } /** * @brief reset MMC controller state * * Used when the MMC has encountered an error. Resetting the MMC controller * should clear all errors on the MMC, but does not necessarily reset I/O * settings to boot (this can be done with @ref sdhc_set_io) * * @note during reset the clock input is disabled, also this call changes rate * * @param dev MMC controller device * @retval 0 reset succeeded * @retval -ETIMEDOUT: controller reset timed out * @retval -EINVAL: the dev pointer is NULL * @retval -EILSEQ: communication out of sync * @retval -ENOTSUP: controller does not support I/O * * @details List of affected registers and their bits during the soft reset trigger: * * RCAR_MMC_STOP all bits reset to default (0x0); * * RCAR_MMC_INFO1 affected bits: * * RCAR_MMC_INFO1_CMP default state 0; * * RCAR_MMC_INFO1_RSP default state 0; * * HPIRES Response Reception Completion (16), default state 0; * * RCAR_MMC_INFO2 all bits reset 0, except the next: * * RCAR_MMC_INFO2_DAT0 state unknown after reset; * * RCAR_MMC_INFO2_SCLKDIVEN default state 1; * * RCAR_MMC_CLKCTL affected bit(s): * * RCAR_MMC_CLKCTL_SCLKEN default state 0; * * RCAR_MMC_OPTION affected bits: * * WIDTH (15) and WIDTH8 (13) set to 0, which equal to 4-bits bus; * * Timeout Mode Select (EXTOP - 9) is set to 0; * * Timeout Mask (TOUTMASK - 8) is set to 0; * * Timeout Counter (TOP27-TOP24 bits 7-4) is equal to 0b1110; * * Card Detect Time Counter (CTOP24-CTOP21 bits 3-0) is equal to 0b1110; * * RCAR_MMC_ERR_STS1 all bits after reset 0, except the next: * * E13 default state 1 (E12-E14 it is CRC status 0b010); * * RCAR_MMC_ERR_STS2 all bits after reset 0; * * IO_INFO1 all bits after reset 0; * * RCAR_MMC_IF_MODE all bits after reset 0. */ static int rcar_mmc_reset(const struct device *dev) { int ret = 0; uint32_t reg; struct mmc_rcar_data *data; uint8_t can_retune; if (!dev) { return -EINVAL; } data = dev->data; /* * soft reset of the host */ reg = rcar_mmc_read_reg32(dev, RCAR_MMC_SOFT_RST); reg &= ~RCAR_MMC_SOFT_RST_RSTX; rcar_mmc_write_reg32(dev, RCAR_MMC_SOFT_RST, reg); reg |= RCAR_MMC_SOFT_RST_RSTX; rcar_mmc_write_reg32(dev, RCAR_MMC_SOFT_RST, reg); rcar_mmc_reset_and_mask_irqs(dev); /* * note: DMA reset can be triggered only in case of error in * DMA Info2 otherwise the SDIP will not accurately operate */ #ifdef CONFIG_RCAR_MMC_DMA_SUPPORT rcar_mmc_reset_dma(dev); #endif can_retune = data->can_retune; if (can_retune) { rcar_mmc_disable_scc(dev); } /* note: be careful soft reset stops SDCLK */ if (data->restore_cfg_after_reset) { struct sdhc_io ios; memcpy(&ios, &data->host_io, sizeof(ios)); memset(&data->host_io, 0, sizeof(ios)); data->host_io.power_mode = ios.power_mode; ret = sdhc_set_io(dev, &ios); rcar_mmc_write_reg32(dev, RCAR_MMC_STOP, RCAR_MMC_STOP_SEC); #ifdef CONFIG_RCAR_MMC_SCC_SUPPORT /* tune if this reset isn't invoked during tuning */ if (can_retune && (ios.timing == SDHC_TIMING_SDR50 || ios.timing == SDHC_TIMING_SDR104 || ios.timing == SDHC_TIMING_HS200)) { ret = rcar_mmc_execute_tuning(dev); } #endif return ret; } data->ddr_mode = 0; data->host_io.bus_width = SDHC_BUS_WIDTH4BIT; data->host_io.timing = SDHC_TIMING_LEGACY; data->is_last_cmd_app_cmd = 0; return 0; } /** * @brief SD Clock (SD_CLK) Output Control Enable * * @note in/out parameters should be checked by a caller function. * * @param dev MMC device * @param enable * false: SD_CLK output is disabled. The SD_CLK signal is fixed 0. * true: SD_CLK output is enabled. * * @retval 0 I/O was configured correctly * @retval -ETIMEDOUT: card busy flag is set during long time */ static int rcar_mmc_enable_clock(const struct device *dev, bool enable) { int ret; uint32_t mmc_clk_ctl = rcar_mmc_read_reg32(dev, RCAR_MMC_CLKCTL); if (enable == true) { mmc_clk_ctl &= ~RCAR_MMC_CLKCTL_OFFEN; mmc_clk_ctl |= RCAR_MMC_CLKCTL_SCLKEN; } else { mmc_clk_ctl |= RCAR_MMC_CLKCTL_OFFEN; mmc_clk_ctl &= ~RCAR_MMC_CLKCTL_SCLKEN; } /* * Do not change the values of these bits * when the CBSY bit in SD_INFO2 is 1 */ ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_INFO2, RCAR_MMC_INFO2_CBSY, 0, false, false, MMC_POLL_FLAGS_TIMEOUT_US); if (ret) { return -ETIMEDOUT; } rcar_mmc_write_reg32(dev, RCAR_MMC_CLKCTL, mmc_clk_ctl); /* SD spec recommends at least 1 ms of delay */ k_msleep(1); return 0; } /** * @brief Convert SDHC response to Renesas MMC response * * Function performs a conversion from SDHC response to Renesas MMC * CMD register response. * * @note in/out parameters should be checked by a caller function. * * @param response_type SDHC response type without SPI flags * * @retval positiv number (partial configuration of CMD register) on * success, negative errno code otherwise */ static int32_t rcar_mmc_convert_sd_to_mmc_resp(uint32_t response_type) { uint32_t mmc_resp = 0U; switch (response_type) { case SD_RSP_TYPE_NONE: mmc_resp = RCAR_MMC_CMD_RSP_NONE; break; case SD_RSP_TYPE_R1: case SD_RSP_TYPE_R5: case SD_RSP_TYPE_R6: case SD_RSP_TYPE_R7: mmc_resp = RCAR_MMC_CMD_RSP_R1; break; case SD_RSP_TYPE_R1b: case SD_RSP_TYPE_R5b: mmc_resp = RCAR_MMC_CMD_RSP_R1B; break; case SD_RSP_TYPE_R2: mmc_resp = RCAR_MMC_CMD_RSP_R2; break; case SD_RSP_TYPE_R3: case SD_RSP_TYPE_R4: mmc_resp = RCAR_MMC_CMD_RSP_R3; break; default: LOG_ERR("unknown response type 0x%08x", response_type); return -EINVAL; } __ASSERT((int32_t)mmc_resp >= 0, "%s: converted response shouldn't be negative", __func__); return mmc_resp; } /** * @brief Convert response from Renesas MMC to SDHC * * Function writes a response to response array of @ref sdhc_command structure * * @note in/out parameters should be checked by a caller function. * * @param dev MMC device * @param cmd MMC command * @param response_type SDHC response type without SPI flags * * @retval none */ static void rcar_mmc_extract_resp(const struct device *dev, struct sdhc_command *cmd, uint32_t response_type) { if (response_type == SD_RSP_TYPE_R2) { uint32_t rsp_127_104 = rcar_mmc_read_reg32(dev, RCAR_MMC_RSP76); uint32_t rsp_103_72 = rcar_mmc_read_reg32(dev, RCAR_MMC_RSP54); uint32_t rsp_71_40 = rcar_mmc_read_reg32(dev, RCAR_MMC_RSP32); uint32_t rsp_39_8 = rcar_mmc_read_reg32(dev, RCAR_MMC_RSP10); cmd->response[0] = (rsp_39_8 & 0xffffff) << 8; cmd->response[1] = ((rsp_71_40 & 0x00ffffff) << 8) | ((rsp_39_8 & 0xff000000) >> 24); cmd->response[2] = ((rsp_103_72 & 0x00ffffff) << 8) | ((rsp_71_40 & 0xff000000) >> 24); cmd->response[3] = ((rsp_127_104 & 0x00ffffff) << 8) | ((rsp_103_72 & 0xff000000) >> 24); LOG_DBG("Response 2\n\t[0]: 0x%08x\n\t[1]: 0x%08x" "\n\t[2]: 0x%08x\n\t[3]: 0x%08x", cmd->response[0], cmd->response[1], cmd->response[2], cmd->response[3]); } else { cmd->response[0] = rcar_mmc_read_reg32(dev, RCAR_MMC_RSP10); LOG_DBG("Response %u\n\t[0]: 0x%08x", response_type, cmd->response[0]); } } /* configure CMD register for tx/rx data */ static uint32_t rcar_mmc_gen_data_cmd(struct sdhc_command *cmd, struct sdhc_data *data) { uint32_t cmd_reg = RCAR_MMC_CMD_DATA; switch (cmd->opcode) { case MMC_SEND_EXT_CSD: case SD_READ_SINGLE_BLOCK: case MMC_SEND_TUNING_BLOCK: case SD_SEND_TUNING_BLOCK: case SD_SWITCH: case SD_APP_SEND_NUM_WRITTEN_BLK: case SD_APP_SEND_SCR: cmd_reg |= RCAR_MMC_CMD_RD; break; case SD_READ_MULTIPLE_BLOCK: cmd_reg |= RCAR_MMC_CMD_RD; cmd_reg |= RCAR_MMC_CMD_MULTI; break; case SD_WRITE_MULTIPLE_BLOCK: cmd_reg |= RCAR_MMC_CMD_MULTI; break; case SD_WRITE_SINGLE_BLOCK: /* fall through */ default: break; } if (data->blocks > 1) { cmd_reg |= RCAR_MMC_CMD_MULTI; } return cmd_reg; } /** * @brief Transmit/Receive data to/from MMC using DMA * * Sends/Receives data to/from the MMC controller. * * @note in/out parameters should be checked by a caller function. * * @param dev MMC device * @param data MMC data buffer for tx/rx * @param is_read it is read or write operation * * @retval 0 tx/rx was successful * @retval -ENOTSUP: cache flush/invalidate aren't supported * @retval -ETIMEDOUT: timed out while tx/rx * @retval -EIO: I/O error * @retval -EILSEQ: communication out of sync */ static int rcar_mmc_dma_rx_tx_data(const struct device *dev, struct sdhc_data *data, bool is_read) { uintptr_t dma_addr; uint32_t reg; int ret = 0; uint32_t dma_info1_poll_flag; #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT struct mmc_rcar_data *dev_data = dev->data; #endif ret = sys_cache_data_flush_range(data->data, data->blocks * data->block_size); if (ret < 0) { LOG_ERR("%s: can't invalidate data cache before write", dev->name); return ret; } reg = rcar_mmc_read_reg32(dev, RCAR_MMC_DMA_MODE); if (is_read) { dma_info1_poll_flag = RCAR_MMC_DMA_INFO1_END_RD2; reg |= RCAR_MMC_DMA_MODE_DIR_RD; } else { dma_info1_poll_flag = RCAR_MMC_DMA_INFO1_END_WR; reg &= ~RCAR_MMC_DMA_MODE_DIR_RD; } rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_MODE, reg); reg = rcar_mmc_read_reg32(dev, RCAR_MMC_EXTMODE); reg |= RCAR_MMC_EXTMODE_DMA_EN; rcar_mmc_write_reg32(dev, RCAR_MMC_EXTMODE, reg); dma_addr = k_mem_phys_addr(data->data); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_ADDR_L, dma_addr); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_ADDR_H, 0); #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT rcar_mmc_write_reg32( dev, RCAR_MMC_DMA_INFO2_MASK, (uint32_t)(is_read ? (~RCAR_MMC_DMA_INFO2_ERR_RD) : (~RCAR_MMC_DMA_INFO2_ERR_WR))); reg = rcar_mmc_read_reg32(dev, RCAR_MMC_DMA_INFO1_MASK); reg &= ~dma_info1_poll_flag; rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO1_MASK, reg); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_CTL, RCAR_MMC_DMA_CTL_START); ret = k_sem_take(&dev_data->irq_xref_fin, K_MSEC(data->timeout_ms)); if (ret < 0) { LOG_ERR("%s: interrupt signal timeout error %d", dev->name, ret); } reg = rcar_mmc_read_reg32(dev, RCAR_MMC_DMA_INFO2); if (reg) { LOG_ERR("%s: an error occurs on the DMAC channel #%u", dev->name, (reg & RCAR_MMC_DMA_INFO2_ERR_RD) ? 1U : 0U); ret = -EIO; } #else rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_CTL, RCAR_MMC_DMA_CTL_START); ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_DMA_INFO1, dma_info1_poll_flag, dma_info1_poll_flag, false, true, data->timeout_ms * 1000LL); #endif if (is_read) { if (sys_cache_data_invd_range(data->data, data->blocks * data->block_size) < 0) { LOG_ERR("%s: can't invalidate data cache after read", dev->name); } } /* in case when we get to here and there wasn't IRQ trigger */ rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO1_MASK, 0xfffffeff); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO2_MASK, ~0); if (ret == -EIO) { rcar_mmc_reset_dma(dev); } reg = rcar_mmc_read_reg32(dev, RCAR_MMC_EXTMODE); reg &= ~RCAR_MMC_EXTMODE_DMA_EN; rcar_mmc_write_reg32(dev, RCAR_MMC_EXTMODE, reg); return ret; } /* read from SD/MMC controller buf0 register */ static inline uint64_t rcar_mmc_read_buf0(const struct device *dev) { uint64_t buf0 = 0ULL; struct mmc_rcar_data *dev_data = dev->data; uint8_t sd_buf0_size = dev_data->width_access_sd_buf0; mm_reg_t buf0_addr = DEVICE_MMIO_GET(dev) + RCAR_MMC_BUF0; switch (sd_buf0_size) { case 8: buf0 = sys_read64(buf0_addr); break; case 4: buf0 = sys_read32(buf0_addr); break; case 2: buf0 = sys_read16(buf0_addr); break; default: k_panic(); break; } return buf0; } /* write to SD/MMC controller buf0 register */ static inline void rcar_mmc_write_buf0(const struct device *dev, uint64_t val) { struct mmc_rcar_data *dev_data = dev->data; uint8_t sd_buf0_size = dev_data->width_access_sd_buf0; mm_reg_t buf0_addr = DEVICE_MMIO_GET(dev) + RCAR_MMC_BUF0; switch (sd_buf0_size) { case 8: sys_write64(val, buf0_addr); break; case 4: sys_write32(val, buf0_addr); break; case 2: sys_write16(val, buf0_addr); break; default: k_panic(); break; } } /** * @brief Transmit/Receive data to/from MMC without DMA * * Sends/Receives data to/from the MMC controller. * * @note in/out parameters should be checked by a caller function. * * @param dev MMC device * @param data MMC data buffer for tx/rx * @param is_read it is read or write operation * * @retval 0 tx/rx was successful * @retval -EINVAL: invalid block size * @retval -ETIMEDOUT: timed out while tx/rx * @retval -EIO: I/O error * @retval -EILSEQ: communication out of sync */ static int rcar_mmc_sd_buf_rx_tx_data(const struct device *dev, struct sdhc_data *data, bool is_read) { struct mmc_rcar_data *dev_data = dev->data; uint32_t block; int ret = 0; uint32_t info2_poll_flag = is_read ? RCAR_MMC_INFO2_BRE : RCAR_MMC_INFO2_BWE; uint8_t sd_buf0_size = dev_data->width_access_sd_buf0; uint16_t aligned_block_size = ROUND_UP(data->block_size, sd_buf0_size); uint32_t cmd_reg = 0; int64_t remaining_timeout_us = data->timeout_ms * 1000LL; /* * note: below code should work for all possible block sizes, but * we need below check, because code isn't tested with smaller * block sizes. */ if ((data->block_size % dev_data->width_access_sd_buf0) || (data->block_size < dev_data->width_access_sd_buf0)) { LOG_ERR("%s: block size (%u) less or not align on SD BUF0 access width (%hhu)", dev->name, data->block_size, dev_data->width_access_sd_buf0); return -EINVAL; } /* * JEDEC Standard No. 84-B51 * 6.6.24 Dual Data Rate mode operation: * Therefore, all single or multiple block data transfer read or write will operate on * a fixed block size of 512 bytes while the Device remains in dual data rate. * * Physical Layer Specification Version 3.01 * 4.12.6 Timing Changes in DDR50 Mode * 4.12.6.2 Protocol Principles * * Read and Write data block length size is always 512 bytes (same as SDHC). */ if (dev_data->ddr_mode && data->block_size != 512) { LOG_ERR("%s: block size (%u) isn't equal to 512 in DDR mode", dev->name, data->block_size); return -EINVAL; } /* * note: the next restrictions we have according to description of * transfer data length register from R-Car S4 series User's Manual */ if (data->block_size > 512 || data->block_size == 0) { LOG_ERR("%s: block size (%u) must not be bigger than 512 bytes and equal to zero", dev->name, data->block_size); return -EINVAL; } cmd_reg = rcar_mmc_read_reg32(dev, RCAR_MMC_CMD); if (cmd_reg & RCAR_MMC_CMD_MULTI) { /* CMD12 is automatically issued at multiple block transfer */ if (!(cmd_reg & RCAR_MMC_CMD_NOSTOP) && data->block_size != 512) { LOG_ERR("%s: illegal block size (%u) for multi-block xref with CMD12", dev->name, data->block_size); return -EINVAL; } switch (data->block_size) { case 32: case 64: case 128: case 256: case 512: break; default: LOG_ERR("%s: illegal block size (%u) for multi-block xref without CMD12", dev->name, data->block_size); return -EINVAL; } } if (data->block_size == 1 && dev_data->host_io.bus_width == SDHC_BUS_WIDTH8BIT) { LOG_ERR("%s: block size can't be equal to 1 with 8-bits bus width", dev->name); return -EINVAL; } for (block = 0; block < data->blocks; block++) { uint8_t *buf = (uint8_t *)data->data + (block * data->block_size); uint32_t info2_reg; uint16_t w_off; /* word offset in a block */ uint64_t start_block_xref_us = k_ticks_to_us_ceil64(k_uptime_ticks()); /* wait until the buffer is filled with data */ ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_INFO2, info2_poll_flag, info2_poll_flag, true, false, remaining_timeout_us); if (ret) { return ret; } /* clear write/read buffer ready flag */ info2_reg = rcar_mmc_read_reg32(dev, RCAR_MMC_INFO2); info2_reg &= ~info2_poll_flag; rcar_mmc_write_reg32(dev, RCAR_MMC_INFO2, info2_reg); for (w_off = 0; w_off < aligned_block_size; w_off += sd_buf0_size) { uint64_t buf0 = 0ULL; uint8_t copy_size = MIN(sd_buf0_size, data->block_size - w_off); if (is_read) { buf0 = rcar_mmc_read_buf0(dev); memcpy(buf + w_off, &buf0, copy_size); } else { memcpy(&buf0, buf + w_off, copy_size); rcar_mmc_write_buf0(dev, buf0); } } remaining_timeout_us -= k_ticks_to_us_ceil64(k_uptime_ticks()) - start_block_xref_us; if (remaining_timeout_us < 0) { return -ETIMEDOUT; } } return ret; } /** * @brief Transmit/Receive data to/from MMC * * Sends/Receives data to/from the MMC controller. * * @note in/out parameters should be checked by a caller function. * * @param dev MMC device * @param data MMC data buffer for tx/rx * @param is_read it is read or write operation * * @retval 0 tx/rx was successful * @retval -EINVAL: invalid block size * @retval -ETIMEDOUT: timed out while tx/rx * @retval -EIO: I/O error * @retval -EILSEQ: communication out of sync */ static int rcar_mmc_rx_tx_data(const struct device *dev, struct sdhc_data *data, bool is_read) { uint32_t info1_reg; int ret = 0; #ifdef CONFIG_RCAR_MMC_DMA_SUPPORT if (!(k_mem_phys_addr(data->data) >> 32)) { ret = rcar_mmc_dma_rx_tx_data(dev, data, is_read); } else #endif { ret = rcar_mmc_sd_buf_rx_tx_data(dev, data, is_read); } if (ret < 0) { return ret; } ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_INFO1, RCAR_MMC_INFO1_CMP, RCAR_MMC_INFO1_CMP, true, false, MMC_POLL_FLAGS_TIMEOUT_US); if (ret) { return ret; } /* clear access end flag */ info1_reg = rcar_mmc_read_reg32(dev, RCAR_MMC_INFO1); info1_reg &= ~RCAR_MMC_INFO1_CMP; rcar_mmc_write_reg32(dev, RCAR_MMC_INFO1, info1_reg); return ret; } /** * @brief Send command to MMC * * Sends a command to the MMC controller. * * @param dev MMC device * @param cmd MMC command * @param data MMC data. Leave NULL to send SD command without data. * * @retval 0 command was sent successfully * @retval -ETIMEDOUT: command timed out while sending * @retval -ENOTSUP: host controller does not support command * @retval -EIO: I/O error * @retval -EILSEQ: communication out of sync */ static int rcar_mmc_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { int ret = -ENOTSUP; uint32_t reg; uint32_t response_type; bool is_read = true; int attempts; struct mmc_rcar_data *dev_data; if (!dev || !cmd) { return -EINVAL; } dev_data = dev->data; response_type = cmd->response_type & SDHC_NATIVE_RESPONSE_MASK; attempts = cmd->retries + 1; while (ret && attempts-- > 0) { if (ret != -ENOTSUP) { rcar_mmc_reset(dev); #ifdef CONFIG_RCAR_MMC_SCC_SUPPORT rcar_mmc_retune_if_needed(dev, true); #endif } ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_INFO2, RCAR_MMC_INFO2_CBSY, 0, false, false, MMC_POLL_FLAGS_TIMEOUT_US); if (ret) { ret = -EBUSY; continue; } rcar_mmc_reset_and_mask_irqs(dev); rcar_mmc_write_reg32(dev, RCAR_MMC_ARG, cmd->arg); reg = cmd->opcode; if (data) { rcar_mmc_write_reg32(dev, RCAR_MMC_SIZE, data->block_size); rcar_mmc_write_reg32(dev, RCAR_MMC_SECCNT, data->blocks); reg |= rcar_mmc_gen_data_cmd(cmd, data); is_read = (reg & RCAR_MMC_CMD_RD) ? true : false; } /* CMD55 is always sended before ACMD */ if (dev_data->is_last_cmd_app_cmd) { reg |= RCAR_MMC_CMD_APP; } ret = rcar_mmc_convert_sd_to_mmc_resp(response_type); if (ret < 0) { /* don't need to retry we will always have the same result */ return -EINVAL; } reg |= ret; LOG_DBG("(SD_CMD=%08x, SD_ARG=%08x)", cmd->opcode, cmd->arg); rcar_mmc_write_reg32(dev, RCAR_MMC_CMD, reg); /* wait until response end flag is set or errors occur */ ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_INFO1, RCAR_MMC_INFO1_RSP, RCAR_MMC_INFO1_RSP, true, false, cmd->timeout_ms * 1000LL); if (ret) { continue; } /* clear response end flag */ reg = rcar_mmc_read_reg32(dev, RCAR_MMC_INFO1); reg &= ~RCAR_MMC_INFO1_RSP; rcar_mmc_write_reg32(dev, RCAR_MMC_INFO1, reg); rcar_mmc_extract_resp(dev, cmd, response_type); if (data) { ret = rcar_mmc_rx_tx_data(dev, data, is_read); if (ret) { continue; } } /* wait until the SD bus (CMD, DAT) is free or errors occur */ ret = rcar_mmc_poll_reg_flags_check_err( dev, RCAR_MMC_INFO2, RCAR_MMC_INFO2_SCLKDIVEN, RCAR_MMC_INFO2_SCLKDIVEN, true, false, MMC_POLL_FLAGS_TIMEOUT_US); } if (ret) { rcar_mmc_reset(dev); #ifdef CONFIG_RCAR_MMC_SCC_SUPPORT rcar_mmc_retune_if_needed(dev, true); #endif } dev_data->is_last_cmd_app_cmd = (cmd->opcode == SD_APP_CMD); return ret; } /* convert sd_voltage to string */ static inline const char *const rcar_mmc_get_signal_voltage_str(enum sd_voltage voltage) { static const char *const sig_vol_str[] = { [0] = "Unset", [SD_VOL_3_3_V] = "3.3V", [SD_VOL_3_0_V] = "3.0V", [SD_VOL_1_8_V] = "1.8V", [SD_VOL_1_2_V] = "1.2V", }; if (voltage >= 0 && voltage < ARRAY_SIZE(sig_vol_str)) { return sig_vol_str[voltage]; } else { return "Unknown"; } } /* convert sdhc_timing_mode to string */ static inline const char *const rcar_mmc_get_timing_str(enum sdhc_timing_mode timing) { static const char *const timing_str[] = { [0] = "Unset", [SDHC_TIMING_LEGACY] = "LEGACY", [SDHC_TIMING_HS] = "HS", [SDHC_TIMING_SDR12] = "SDR12", [SDHC_TIMING_SDR25] = "SDR25", [SDHC_TIMING_SDR50] = "SDR50", [SDHC_TIMING_SDR104] = "SDR104", [SDHC_TIMING_DDR50] = "DDR50", [SDHC_TIMING_DDR52] = "DDR52", [SDHC_TIMING_HS200] = "HS200", [SDHC_TIMING_HS400] = "HS400", }; if (timing >= 0 && timing < ARRAY_SIZE(timing_str)) { return timing_str[timing]; } else { return "Unknown"; } } /* change voltage of MMC */ static int rcar_mmc_change_voltage(const struct mmc_rcar_cfg *cfg, struct sdhc_io *host_io, struct sdhc_io *ios) { int ret = 0; /* Set host signal voltage */ if (!ios->signal_voltage || ios->signal_voltage == host_io->signal_voltage) { return 0; } switch (ios->signal_voltage) { case SD_VOL_3_3_V: ret = regulator_set_voltage(cfg->regulator_vqmmc, 3300000, 3300000); if (ret && ret != -ENOSYS) { break; } ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); break; case SD_VOL_1_8_V: ret = regulator_set_voltage(cfg->regulator_vqmmc, 1800000, 1800000); if (ret && ret != -ENOSYS) { break; } ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_UHS); break; case SD_VOL_3_0_V: case SD_VOL_1_2_V: /* fall through */ default: ret = -ENOTSUP; return ret; } if (!ret) { host_io->signal_voltage = ios->signal_voltage; } return ret; } /* note: for zero val function returns zero */ static inline uint32_t round_up_next_pwr_of_2(uint32_t val) { __ASSERT(val, "Zero val passed to %s", __func__); val--; val |= val >> 1; val |= val >> 2; val |= val >> 4; val |= val >> 8; val |= val >> 16; return ++val; } /** * @brief configure clock divider on MMC controller * * @note In/out parameters should be checked by a caller function. * @note In the case of data transfer in HS400 mode (HS400 bit in * SDIF_MODE = 1), do not set this width equal to 1. * @note In the case of writing of one-byte block, 8-bit width cannot * be specified for the bus width. Change the bus width to 4 bits * or 1 bit before writing one-byte block. * * @param dev MMC device * @param io I/O properties * * @retval 0 I/O was configured correctly * @retval -ENOTSUP: controller does not support these I/O settings * @retval -ETIMEDOUT: card busy flag is set during long time */ static int rcar_mmc_set_clk_rate(const struct device *dev, struct sdhc_io *ios) { int ret = 0; uint32_t divisor; uint32_t mmc_clk_ctl; struct mmc_rcar_data *data = dev->data; const struct mmc_rcar_cfg *cfg = dev->config; struct sdhc_io *host_io = &data->host_io; if (host_io->clock == ios->clock) { return 0; } if (ios->clock == 0) { host_io->clock = 0; return rcar_mmc_enable_clock(dev, false); } if (ios->clock > data->props.f_max || ios->clock < data->props.f_min) { LOG_ERR("SDHC I/O: clock (%d) isn't in range %d - %d Hz", ios->clock, data->props.f_min, data->props.f_max); return -EINVAL; } divisor = DIV_ROUND_UP(cfg->max_frequency, ios->clock); /* Do not set divider to 0xff in DDR mode */ if (data->ddr_mode && (divisor == 1)) { divisor = 2; } divisor = round_up_next_pwr_of_2(divisor); if (divisor == 1) { divisor = RCAR_MMC_CLKCTL_RCAR_DIV1; } else { divisor >>= 2; } /* * Stop the clock before changing its rate * to avoid a glitch signal */ ret = rcar_mmc_enable_clock(dev, false); if (ret) { return ret; } mmc_clk_ctl = rcar_mmc_read_reg32(dev, RCAR_MMC_CLKCTL); if ((mmc_clk_ctl & RCAR_MMC_CLKCTL_SCLKEN) && (mmc_clk_ctl & RCAR_MMC_CLKCTL_DIV_MASK) == divisor) { host_io->clock = ios->clock; return rcar_mmc_enable_clock(dev, false); } /* * Do not change the values of these bits * when the CBSY bit in SD_INFO2 is 1 */ ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_INFO2, RCAR_MMC_INFO2_CBSY, 0, false, false, MMC_POLL_FLAGS_TIMEOUT_US); if (ret) { return -ETIMEDOUT; } mmc_clk_ctl &= ~RCAR_MMC_CLKCTL_DIV_MASK; mmc_clk_ctl |= divisor; rcar_mmc_write_reg32(dev, RCAR_MMC_CLKCTL, mmc_clk_ctl); ret = rcar_mmc_enable_clock(dev, true); if (ret) { return ret; } host_io->clock = ios->clock; LOG_DBG("%s: set clock rate to %d", dev->name, ios->clock); return 0; } /** * @brief set bus width of MMC * * @note In/out parameters should be checked by a caller function. * @note In the case of data transfer in HS400 mode (HS400 bit in * SDIF_MODE = 1), do not set this width equal to 1. * @note In the case of writing of one-byte block, 8-bit width cannot * be specified for the bus width. Change the bus width to 4 bits * or 1 bit before writing one-byte block. * * @param dev MMC device * @param io I/O properties * * @retval 0 I/O was configured correctly * @retval -ENOTSUP: controller does not support these I/O settings * @retval -ETIMEDOUT: card busy flag is set during long time */ static int rcar_mmc_set_bus_width(const struct device *dev, struct sdhc_io *ios) { int ret = 0; uint32_t mmc_option_reg; uint32_t reg_width; struct mmc_rcar_data *data = dev->data; struct sdhc_io *host_io = &data->host_io; /* Set bus width */ if (host_io->bus_width == ios->bus_width) { return 0; } if (!ios->bus_width) { return 0; } switch (ios->bus_width) { case SDHC_BUS_WIDTH1BIT: reg_width = RCAR_MMC_OPTION_WIDTH_1; break; case SDHC_BUS_WIDTH4BIT: if (data->props.host_caps.bus_4_bit_support) { reg_width = RCAR_MMC_OPTION_WIDTH_4; } else { LOG_ERR("SDHC I/O: 4-bits bus width isn't supported"); return -ENOTSUP; } break; case SDHC_BUS_WIDTH8BIT: if (data->props.host_caps.bus_8_bit_support) { reg_width = RCAR_MMC_OPTION_WIDTH_8; } else { LOG_ERR("SDHC I/O: 8-bits bus width isn't supported"); return -ENOTSUP; } break; default: return -ENOTSUP; } /* * Do not change the values of these bits * when the CBSY bit in SD_INFO2 is 1 */ ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_INFO2, RCAR_MMC_INFO2_CBSY, 0, false, false, MMC_POLL_FLAGS_TIMEOUT_US); if (ret) { return -ETIMEDOUT; } mmc_option_reg = rcar_mmc_read_reg32(dev, RCAR_MMC_OPTION); mmc_option_reg &= ~RCAR_MMC_OPTION_WIDTH_MASK; mmc_option_reg |= reg_width; rcar_mmc_write_reg32(dev, RCAR_MMC_OPTION, mmc_option_reg); host_io->bus_width = ios->bus_width; LOG_DBG("%s: set bus-width to %d", dev->name, host_io->bus_width); return 0; } /** * set DDR mode on MMC controller according to value inside * ddr_mode field from @ref mmc_rcar_data structure. */ static int rcar_mmc_set_ddr_mode(const struct device *dev) { int ret = 0; uint32_t if_mode_reg; struct mmc_rcar_data *data = dev->data; /* * Do not change the values of these bits * when the CBSY bit in SD_INFO2 is 1 */ ret = rcar_mmc_poll_reg_flags_check_err(dev, RCAR_MMC_INFO2, RCAR_MMC_INFO2_CBSY, 0, false, false, MMC_POLL_FLAGS_TIMEOUT_US); if (ret) { return -ETIMEDOUT; } if_mode_reg = rcar_mmc_read_reg32(dev, RCAR_MMC_IF_MODE); if (data->ddr_mode) { /* HS400 mode (DDR mode) */ if_mode_reg |= RCAR_MMC_IF_MODE_DDR; } else { /* Normal mode (default, high speed, or SDR) */ if_mode_reg &= ~RCAR_MMC_IF_MODE_DDR; } rcar_mmc_write_reg32(dev, RCAR_MMC_IF_MODE, if_mode_reg); return 0; } /** * @brief set timing property of MMC * * For now function only can enable DDR mode and call the function for * changing voltage. It is expectable that we change clock using another * I/O option. * @note In/out parameters should be checked by a caller function. * * @param dev MMC device * @param io I/O properties * * @retval 0 I/O was configured correctly * @retval -ENOTSUP: controller does not support these I/O settings * @retval -ETIMEDOUT: card busy flag is set during long time */ static int rcar_mmc_set_timings(const struct device *dev, struct sdhc_io *ios) { int ret; struct mmc_rcar_data *data = dev->data; struct sdhc_io *host_io = &data->host_io; enum sd_voltage new_voltage = host_io->signal_voltage; if (host_io->timing == ios->timing) { return 0; } if (!host_io->timing) { return 0; } data->ddr_mode = 0; switch (ios->timing) { case SDHC_TIMING_LEGACY: break; case SDHC_TIMING_HS: if (!data->props.host_caps.high_spd_support) { LOG_ERR("SDHC I/O: HS timing isn't supported"); return -ENOTSUP; } break; case SDHC_TIMING_SDR12: case SDHC_TIMING_SDR25: case SDHC_TIMING_SDR50: break; case SDHC_TIMING_SDR104: if (!data->props.host_caps.sdr104_support) { LOG_ERR("SDHC I/O: SDR104 timing isn't supported"); return -ENOTSUP; } break; case SDHC_TIMING_HS400: if (!data->props.host_caps.hs400_support) { LOG_ERR("SDHC I/O: HS400 timing isn't supported"); return -ENOTSUP; } new_voltage = SD_VOL_1_8_V; data->ddr_mode = 1; break; case SDHC_TIMING_DDR50: case SDHC_TIMING_DDR52: if (!data->props.host_caps.ddr50_support) { LOG_ERR("SDHC I/O: DDR50/DDR52 timing isn't supported"); return -ENOTSUP; } data->ddr_mode = 1; break; case SDHC_TIMING_HS200: if (!data->props.host_caps.hs200_support) { LOG_ERR("SDHC I/O: HS200 timing isn't supported"); return -ENOTSUP; } new_voltage = SD_VOL_1_8_V; break; default: return -ENOTSUP; } ios->signal_voltage = new_voltage; if (rcar_mmc_change_voltage(dev->config, host_io, ios)) { return -ENOTSUP; } ret = rcar_mmc_set_ddr_mode(dev); if (ret) { return ret; } host_io->timing = ios->timing; return 0; } /** * @brief set I/O properties of MMC * * I/O properties should be reconfigured when the card has been sent a command * to change its own MMC settings. This function can also be used to toggle * power to the SD card. * * @param dev MMC device * @param io I/O properties * * @retval 0 I/O was configured correctly * @retval -ENOTSUP: controller does not support these I/O settings * @retval -EINVAL: some of pointers provided to the function are NULL * @retval -ETIMEDOUT: card busy flag is set during long time */ static int rcar_mmc_set_io(const struct device *dev, struct sdhc_io *ios) { int ret = 0; struct mmc_rcar_data *data; struct sdhc_io *host_io; if (!dev || !ios || !dev->data || !dev->config) { return -EINVAL; } data = dev->data; host_io = &data->host_io; LOG_DBG("SDHC I/O: bus width %d, clock %dHz, card power %s, " "timing %s, voltage %s", ios->bus_width, ios->clock, ios->power_mode == SDHC_POWER_ON ? "ON" : "OFF", rcar_mmc_get_timing_str(ios->timing), rcar_mmc_get_signal_voltage_str(ios->signal_voltage)); /* Set host clock */ ret = rcar_mmc_set_clk_rate(dev, ios); if (ret) { LOG_ERR("SDHC I/O: can't change clock rate error %d old %d new %d", ret, host_io->clock, ios->clock); return ret; } /* * Set card bus mode * * SD Specifications Part 1 Physical Layer Simplified Specification Version 9.00 * 4.7.1 Command Types: "... there is no Open Drain mode in SD Memory Card" * * The use of open-drain mode is not possible in SD memory cards because the SD bus uses * push-pull signaling, where both the host and the card can actively drive the data lines * high or low. * In an SD card, the command and response signaling needs to be bidirectional, and each * signal line needs to be actively driven high or low. The use of open-drain mode in this * scenario would not allow for the necessary bidirectional signaling and could result in * communication errors. * * JEDEC Standard No. 84-B51, 10 The eMMC bus: * "The eMMC bus has eleven communication lines: * - CMD: Command is a bidirectional signal. The host and Device drivers are operating in * two modes, open drain and push/pull. * - DAT0-7: Data lines are bidirectional signals. Host and Device drivers are operating * in push-pull mode. * - CLK: Clock is a host to Device signal. CLK operates in push-pull mode. * - Data Strobe: Data Strobe is a Device to host signal. Data Strobe operates in * push-pull mode." * * So, open-drain mode signaling is supported in eMMC as one of the signaling modes for * the CMD line. But Gen3 and Gen4 boards has MMC/SD controller which is a specialized * component designed specifically for managing communication with MMC/SD devices. It * handles low-level operations such as protocol handling, data transfer, and error * checking and should take care of the low-level details of communicating with the * MMC/SD card, including setting the bus mode. Moreover, we can use only MMIO mode, the * processor communicates with the MMC/SD controller through memory read and write * operations, rather than through dedicated I/O instructions or specialized data transfer * protocols like SPI or SDIO. Finally, R-Car Gen3 and Gen4 "Users manuals: Hardware" * don't have direct configurations for open-drain mode for both PFC and GPIO and Zephyr * SDHC subsystem doesn't support any bus mode except push-pull. */ if (ios->bus_mode != SDHC_BUSMODE_PUSHPULL) { LOG_ERR("SDHC I/O: not supported bus mode %d", ios->bus_mode); return -ENOTSUP; } host_io->bus_mode = ios->bus_mode; /* Set card power */ if (ios->power_mode && host_io->power_mode != ios->power_mode) { const struct mmc_rcar_cfg *cfg = dev->config; switch (ios->power_mode) { case SDHC_POWER_ON: ret = regulator_enable(cfg->regulator_vmmc); if (ret) { break; } k_msleep(data->props.power_delay); ret = regulator_enable(cfg->regulator_vqmmc); if (ret) { break; } k_msleep(data->props.power_delay); ret = rcar_mmc_enable_clock(dev, true); break; case SDHC_POWER_OFF: if (regulator_is_enabled(cfg->regulator_vqmmc)) { ret = regulator_disable(cfg->regulator_vqmmc); if (ret) { break; } } if (regulator_is_enabled(cfg->regulator_vmmc)) { ret = regulator_disable(cfg->regulator_vmmc); if (ret) { break; } } ret = rcar_mmc_enable_clock(dev, false); break; default: LOG_ERR("SDHC I/O: not supported power mode %d", ios->power_mode); return -ENOTSUP; } if (ret) { return ret; } host_io->power_mode = ios->power_mode; } ret = rcar_mmc_set_bus_width(dev, ios); if (ret) { LOG_ERR("SDHC I/O: can't change bus width error %d old %d new %d", ret, host_io->bus_width, ios->bus_width); return ret; } ret = rcar_mmc_set_timings(dev, ios); if (ret) { LOG_ERR("SDHC I/O: can't change timing error %d old %d new %d", ret, host_io->timing, ios->timing); return ret; } ret = rcar_mmc_change_voltage(dev->config, host_io, ios); if (ret) { LOG_ERR("SDHC I/O: can't change voltage! error %d old %d new %d", ret, host_io->signal_voltage, ios->signal_voltage); return ret; } return 0; } /** * @brief check for MMC card presence * * Checks if card is present on the bus. * * @param dev MMC device * * @retval 1 card is present * @retval 0 card is not present * @retval -EINVAL: some of pointers provided to the function are NULL */ static int rcar_mmc_get_card_present(const struct device *dev) { const struct mmc_rcar_cfg *cfg; if (!dev || !dev->config) { return -EINVAL; } cfg = dev->config; if (cfg->non_removable) { return 1; } return !!(rcar_mmc_read_reg32(dev, RCAR_MMC_INFO1) & RCAR_MMC_INFO1_CD); } #ifdef CONFIG_RCAR_MMC_SCC_SUPPORT /* JESD84-B51, 6.6.5.1 Sampling Tuning Sequence for HS200 */ static const uint8_t tun_block_8_bits_bus[] = { 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, }; /* * In 4 bit mode the same pattern is used as shown above, * but only first 4 bits least significant from every byte is used, examle: * 8-bits pattern: 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00 ... * f f 0 f f f 0 0 ... * 4-bits pattern: 0xff 0x0f 0xff 0x00 ... */ static const uint8_t tun_block_4_bits_bus[] = { 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, }; #define RENESAS_TAPNUM 8 /** * @brief run MMC tuning * * MMC cards require signal tuning for UHS modes SDR104, HS200 or HS400. * This function allows an application to request the SD host controller * to tune the card. * * @param dev MMC device * * @retval 0 tuning succeeded (card is ready for commands), otherwise negative number is returned */ static int rcar_mmc_execute_tuning(const struct device *dev) { int ret = -ENOTSUP; const uint8_t *tun_block_ptr; uint8_t tap_idx; uint8_t is_mmc_cmd = false; struct sdhc_command cmd = {0}; struct sdhc_data data = {0}; struct mmc_rcar_data *dev_data; uint16_t valid_taps = 0; uint16_t smpcmp_bitmask = 0; BUILD_ASSERT(sizeof(valid_taps) * 8 >= 2 * RENESAS_TAPNUM); BUILD_ASSERT(sizeof(smpcmp_bitmask) * 8 >= 2 * RENESAS_TAPNUM); if (!dev) { return -EINVAL; } dev_data = dev->data; dev_data->can_retune = 0; if (dev_data->host_io.timing == SDHC_TIMING_HS200) { cmd.opcode = MMC_SEND_TUNING_BLOCK; is_mmc_cmd = true; } else if (dev_data->host_io.timing != SDHC_TIMING_HS400) { cmd.opcode = SD_SEND_TUNING_BLOCK; } else { LOG_ERR("%s: tuning isn't possible in HS400 mode, it should be done in HS200", dev->name); return -EINVAL; } cmd.response_type = SD_RSP_TYPE_R1; cmd.timeout_ms = CONFIG_SD_CMD_TIMEOUT; data.blocks = 1; data.data = dev_data->tuning_buf; data.timeout_ms = CONFIG_SD_DATA_TIMEOUT; if (dev_data->host_io.bus_width == SDHC_BUS_WIDTH4BIT) { data.block_size = sizeof(tun_block_4_bits_bus); tun_block_ptr = tun_block_4_bits_bus; } else if (dev_data->host_io.bus_width == SDHC_BUS_WIDTH8BIT) { data.block_size = sizeof(tun_block_8_bits_bus); tun_block_ptr = tun_block_8_bits_bus; } else { LOG_ERR("%s: don't support tuning for 1-bit bus width", dev->name); return -EINVAL; } ret = rcar_mmc_enable_clock(dev, false); if (ret) { return ret; } /* enable modes SDR104/HS200/HS400 */ rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_DT2FF, 0x300); /* SCC sampling clock operation is enabled */ rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_DTCNTL, RENESAS_SDHI_SCC_DTCNTL_TAPEN | RENESAS_TAPNUM << 16); /* SCC sampling clock is used */ rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_CKSEL, RENESAS_SDHI_SCC_CKSEL_DTSEL); /* SCC sampling clock position correction is disabled */ rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_RVSCNTL, 0); /* cleanup errors */ rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_RVSREQ, 0); ret = rcar_mmc_enable_clock(dev, true); if (ret) { return ret; } /* * two runs is better for detecting TAP ok cases like next: * - one burn: 0b10000011 * - two burns: 0b1000001110000011 * it is more easly to detect 3 OK taps in a row */ for (tap_idx = 0; tap_idx < 2 * RENESAS_TAPNUM; tap_idx++) { /* clear flags */ rcar_mmc_reset_and_mask_irqs(dev); rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_TAPSET, tap_idx % RENESAS_TAPNUM); memset(dev_data->tuning_buf, 0, data.block_size); ret = rcar_mmc_request(dev, &cmd, &data); if (ret) { LOG_DBG("%s: received an error (%d) during tuning request", dev->name, ret); if (is_mmc_cmd) { struct sdhc_command stop_cmd = { .opcode = SD_STOP_TRANSMISSION, .response_type = SD_RSP_TYPE_R1b, .timeout_ms = CONFIG_SD_CMD_TIMEOUT, }; rcar_mmc_request(dev, &stop_cmd, NULL); } continue; } smpcmp_bitmask |= !rcar_mmc_read_reg32(dev, RENESAS_SDHI_SCC_SMPCMP) << tap_idx; if (memcmp(tun_block_ptr, dev_data->tuning_buf, data.block_size)) { LOG_DBG("%s: received tuning block doesn't equal to pattert TAP index %u", dev->name, tap_idx); continue; } valid_taps |= BIT(tap_idx); LOG_DBG("%s: smpcmp_bitmask[%u] 0x%08x", dev->name, tap_idx, smpcmp_bitmask); } /* both parts of bitmasks have to be the same */ valid_taps &= (valid_taps >> RENESAS_TAPNUM); valid_taps |= (valid_taps << RENESAS_TAPNUM); smpcmp_bitmask &= (smpcmp_bitmask >> RENESAS_TAPNUM); smpcmp_bitmask |= (smpcmp_bitmask << RENESAS_TAPNUM); rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_RVSREQ, 0); if (!valid_taps) { LOG_ERR("%s: there isn't any valid tap during tuning", dev->name); goto reset_scc; } /* * If all of the taps[i] is OK, the sampling clock position is selected by identifying * the change point of data. Change point of the data can be found in the value of * SCC_SMPCMP register */ if ((valid_taps >> RENESAS_TAPNUM) == (1 << RENESAS_TAPNUM) - 1) { valid_taps = smpcmp_bitmask; } /* do we have 3 set bits in a row at least */ if (valid_taps & (valid_taps >> 1) & (valid_taps >> 2)) { uint32_t max_len_range_pos = 0; uint32_t max_bits_in_range = 0; uint32_t pos_of_lsb_set = 0; /* all bits are set */ if ((valid_taps >> RENESAS_TAPNUM) == (1 << RENESAS_TAPNUM) - 1) { rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_TAPSET, 0); if (!dev_data->manual_retuning) { rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_RVSCNTL, 1); } dev_data->can_retune = 1; return 0; } /* searching the longest range of set bits */ while (valid_taps) { uint32_t num_bits_in_range; uint32_t rsh = 0; rsh = find_lsb_set(valid_taps) - 1; pos_of_lsb_set += rsh; /* shift all leading zeros */ valid_taps >>= rsh; num_bits_in_range = find_lsb_set(~valid_taps) - 1; /* shift all leading ones */ valid_taps >>= num_bits_in_range; if (max_bits_in_range < num_bits_in_range) { max_bits_in_range = num_bits_in_range; max_len_range_pos = pos_of_lsb_set; } pos_of_lsb_set += num_bits_in_range; } tap_idx = (max_len_range_pos + max_bits_in_range / 2) % RENESAS_TAPNUM; rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_TAPSET, tap_idx); LOG_DBG("%s: valid_taps %08x smpcmp_bitmask %08x tap_idx %u", dev->name, valid_taps, smpcmp_bitmask, tap_idx); if (!dev_data->manual_retuning) { rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_RVSCNTL, 1); } dev_data->can_retune = 1; return 0; } reset_scc: rcar_mmc_disable_scc(dev); return ret; } /* retune SCC in case of error during xref */ static int rcar_mmc_retune_if_needed(const struct device *dev, bool request_retune) { struct mmc_rcar_data *dev_data = dev->data; int ret = 0; uint32_t reg; bool scc_pos_err = false; uint8_t scc_tapset; if (!dev_data->can_retune) { return 0; } reg = rcar_mmc_read_reg32(dev, RENESAS_SDHI_SCC_RVSREQ); if (reg & RENESAS_SDHI_SCC_RVSREQ_ERR) { scc_pos_err = true; } scc_tapset = rcar_mmc_read_reg32(dev, RENESAS_SDHI_SCC_TAPSET); LOG_DBG("%s: scc_tapset %08x scc_rvsreq %08x request %d is manual tuning %d", dev->name, scc_tapset, reg, request_retune, dev_data->manual_retuning); if (request_retune || (scc_pos_err && !dev_data->manual_retuning)) { return rcar_mmc_execute_tuning(dev); } rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_RVSREQ, 0); switch (reg & RENESAS_SDHI_SCC_RVSREQ_REQTAP_MASK) { case RENESAS_SDHI_SCC_RVSREQ_REQTAPDOWN: scc_tapset = (scc_tapset - 1) % RENESAS_TAPNUM; break; case RENESAS_SDHI_SCC_RVSREQ_REQTAPUP: scc_tapset = (scc_tapset + 1) % RENESAS_TAPNUM; break; default: ret = -EINVAL; LOG_ERR("%s: can't perform manual tuning SCC_RVSREQ %08x", dev->name, reg); break; } if (!ret) { rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_TAPSET, scc_tapset); } return ret; } #endif /* CONFIG_RCAR_MMC_SCC_SUPPORT */ /** * @brief Get MMC controller properties * * Gets host properties from the host controller. Host controller should * initialize all values in the @ref sdhc_host_props structure provided. * * @param dev Renesas MMC device * @param props property structure to be filled by MMC driver * * @retval 0 function succeeded. * @retval -EINVAL: some of pointers provided to the function are NULL */ static int rcar_mmc_get_host_props(const struct device *dev, struct sdhc_host_props *props) { struct mmc_rcar_data *data; if (!props || !dev || !dev->data) { return -EINVAL; } data = dev->data; memcpy(props, &data->props, sizeof(*props)); return 0; } static const struct sdhc_driver_api rcar_sdhc_api = { .card_busy = rcar_mmc_card_busy, #ifdef CONFIG_RCAR_MMC_SCC_SUPPORT .execute_tuning = rcar_mmc_execute_tuning, #endif .get_card_present = rcar_mmc_get_card_present, .get_host_props = rcar_mmc_get_host_props, .request = rcar_mmc_request, .reset = rcar_mmc_reset, .set_io = rcar_mmc_set_io, }; /* start SD-IF clock at max frequency configured in dts */ static int rcar_mmc_init_start_clk(const struct mmc_rcar_cfg *cfg) { int ret = 0; const struct device *cpg_dev = cfg->cpg_dev; uintptr_t rate = cfg->max_frequency; ret = clock_control_on(cpg_dev, (clock_control_subsys_t *)&cfg->bus_clk); if (ret < 0) { return ret; } ret = clock_control_on(cpg_dev, (clock_control_subsys_t *)&cfg->cpg_clk); if (ret < 0) { return ret; } ret = clock_control_set_rate(cpg_dev, (clock_control_subsys_t *)&cfg->cpg_clk, (clock_control_subsys_rate_t)rate); if (ret < 0) { clock_control_off(cpg_dev, (clock_control_subsys_t *)&cfg->cpg_clk); } rate = MMC_BUS_CLOCK_FREQ; ret = clock_control_set_rate(cpg_dev, (clock_control_subsys_t *)&cfg->bus_clk, (clock_control_subsys_rate_t)rate); /* SD spec recommends at least 1 ms of delay after start of clock */ k_msleep(1); return ret; } static void rcar_mmc_init_host_props(const struct device *dev) { struct mmc_rcar_data *data = dev->data; const struct mmc_rcar_cfg *cfg = dev->config; struct sdhc_host_props *props = &data->props; struct sdhc_host_caps *host_caps = &props->host_caps; memset(props, 0, sizeof(*props)); /* Note: init only properties that are used for mmc/sdhc */ props->f_max = cfg->max_frequency; /* * note: actually, it's possible to get lower frequency * if we use divider from cpg too */ props->f_min = (cfg->max_frequency >> 9); props->power_delay = 100; /* ms */ props->is_spi = 0; switch (cfg->bus_width) { case SDHC_BUS_WIDTH8BIT: host_caps->bus_8_bit_support = 1; case SDHC_BUS_WIDTH4BIT: host_caps->bus_4_bit_support = 1; default: break; } host_caps->high_spd_support = 1; #ifdef CONFIG_RCAR_MMC_SCC_SUPPORT host_caps->sdr104_support = cfg->mmc_sdr104_support; host_caps->sdr50_support = cfg->uhs_support; /* neither Linux nor U-boot support DDR50 mode, that's why we don't support it too */ host_caps->ddr50_support = 0; host_caps->hs200_support = cfg->mmc_hs200_1_8v; /* TODO: add support */ host_caps->hs400_support = 0; #endif host_caps->vol_330_support = regulator_is_supported_voltage(cfg->regulator_vqmmc, 3300000, 3300000); host_caps->vol_300_support = regulator_is_supported_voltage(cfg->regulator_vqmmc, 3000000, 3000000); host_caps->vol_180_support = regulator_is_supported_voltage(cfg->regulator_vqmmc, 1800000, 1800000); } /* reset sampling clock controller registers */ static int rcar_mmc_disable_scc(const struct device *dev) { int ret; uint32_t reg; struct mmc_rcar_data *data = dev->data; uint32_t mmc_clk_ctl = rcar_mmc_read_reg32(dev, RCAR_MMC_CLKCTL); /* just to be to be sure that the SD clock is disabled */ ret = rcar_mmc_enable_clock(dev, false); if (ret) { return ret; } /* * Reset SCC registers, need to disable and enable clock * before and after reset */ /* Disable SCC sampling clock */ reg = rcar_mmc_read_reg32(dev, RENESAS_SDHI_SCC_CKSEL); reg &= ~RENESAS_SDHI_SCC_CKSEL_DTSEL; rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_CKSEL, reg); /* disable hs400 mode & data output timing */ reg = rcar_mmc_read_reg32(dev, RENESAS_SDHI_SCC_TMPPORT2); reg &= ~(RENESAS_SDHI_SCC_TMPPORT2_HS400EN | RENESAS_SDHI_SCC_TMPPORT2_HS400OSEL); rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_TMPPORT2, reg); ret = rcar_mmc_enable_clock(dev, (mmc_clk_ctl & RCAR_MMC_CLKCTL_OFFEN) ? false : true); if (ret) { return ret; } /* disable SCC sampling clock position correction */ reg = rcar_mmc_read_reg32(dev, RENESAS_SDHI_SCC_RVSCNTL); reg &= ~RENESAS_SDHI_SCC_RVSCNTL_RVSEN; rcar_mmc_write_reg32(dev, RENESAS_SDHI_SCC_RVSCNTL, reg); data->can_retune = 0; return 0; } /* initialize and configure the Renesas MMC controller registers */ static int rcar_mmc_init_controller_regs(const struct device *dev) { int ret = 0; uint32_t reg; struct mmc_rcar_data *data = dev->data; struct sdhc_io ios = {0}; rcar_mmc_reset(dev); /* Disable SD clock (SD_CLK) output */ ret = rcar_mmc_enable_clock(dev, false); if (ret) { return ret; } /* set transfer data length to 0 */ rcar_mmc_write_reg32(dev, RCAR_MMC_SIZE, 0); /* disable the SD_BUF read/write DMA transfer */ reg = rcar_mmc_read_reg32(dev, RCAR_MMC_EXTMODE); reg &= ~RCAR_MMC_EXTMODE_DMA_EN; rcar_mmc_write_reg32(dev, RCAR_MMC_EXTMODE, reg); /* mask DMA irqs and clear dma irq flags */ rcar_mmc_reset_and_mask_irqs(dev); /* set system address increment mode selector & 64-bit bus width */ reg = rcar_mmc_read_reg32(dev, RCAR_MMC_DMA_MODE); reg |= RCAR_MMC_DMA_MODE_ADDR_INC | RCAR_MMC_DMA_MODE_WIDTH; rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_MODE, reg); /* store version of introductory IP */ data->ver = rcar_mmc_read_reg32(dev, RCAR_MMC_VERSION); data->ver &= RCAR_MMC_VERSION_IP; /* * set bus width to 1 * timeout counter: SDCLK * 2^27 * card detect time counter: SD * 2^24 */ reg = rcar_mmc_read_reg32(dev, RCAR_MMC_OPTION); reg |= RCAR_MMC_OPTION_WIDTH_MASK | 0xEE; rcar_mmc_write_reg32(dev, RCAR_MMC_OPTION, reg); /* block count enable */ rcar_mmc_write_reg32(dev, RCAR_MMC_STOP, RCAR_MMC_STOP_SEC); /* number of transfer blocks */ rcar_mmc_write_reg32(dev, RCAR_MMC_SECCNT, 0); /* * SD_BUF0 data swap disabled. * Read/write access to SD_BUF0 can be performed with the 64-bit access. * * Note: when using the DMA, the bus width should be fixed at 64 bits. */ rcar_mmc_write_reg32(dev, RCAR_MMC_HOST_MODE, 0); data->width_access_sd_buf0 = 8; /* disable sampling clock controller, it is used for uhs/sdr104, hs200 and hs400 */ ret = rcar_mmc_disable_scc(dev); if (ret) { return ret; } /* * configure divider inside MMC controller * set maximum possible divider */ ios.clock = data->props.f_min; rcar_mmc_set_clk_rate(dev, &ios); data->restore_cfg_after_reset = 1; return 0; } #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT static void rcar_mmc_irq_handler(const void *arg) { const struct device *dev = arg; uint32_t dma_info1 = rcar_mmc_read_reg32(dev, RCAR_MMC_DMA_INFO1); uint32_t dma_info2 = rcar_mmc_read_reg32(dev, RCAR_MMC_DMA_INFO2); if (dma_info1 || dma_info2) { struct mmc_rcar_data *data = dev->data; rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO1_MASK, 0xfffffeff); rcar_mmc_write_reg32(dev, RCAR_MMC_DMA_INFO2_MASK, ~0); k_sem_give(&data->irq_xref_fin); } else { LOG_WRN("%s: warning: non-dma event triggers irq", dev->name); } } #endif /* CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT */ /* initialize and configure the Renesas MMC driver */ static int rcar_mmc_init(const struct device *dev) { int ret = 0; struct mmc_rcar_data *data = dev->data; const struct mmc_rcar_cfg *cfg = dev->config; #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT ret = k_sem_init(&data->irq_xref_fin, 0, 1); if (ret) { LOG_ERR("%s: can't init semaphore", dev->name); return ret; } #endif DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); /* Configure dt provided device signals when available */ ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("%s: error can't apply pinctrl state", dev->name); goto exit_unmap; } if (!device_is_ready(cfg->cpg_dev)) { LOG_ERR("%s: error cpg_dev isn't ready", dev->name); ret = -ENODEV; goto exit_unmap; } ret = rcar_mmc_init_start_clk(cfg); if (ret < 0) { LOG_ERR("%s: error can't turn on the cpg", dev->name); goto exit_unmap; } /* it's needed for SDHC */ rcar_mmc_init_host_props(dev); ret = rcar_mmc_init_controller_regs(dev); if (ret) { goto exit_disable_clk; } #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT cfg->irq_config_func(dev); #endif /* CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT */ LOG_INF("%s: initialize driver, MMC version 0x%hhx", dev->name, data->ver); return 0; exit_disable_clk: clock_control_off(cfg->cpg_dev, (clock_control_subsys_t *)&cfg->cpg_clk); exit_unmap: #if defined(DEVICE_MMIO_IS_IN_RAM) && defined(CONFIG_MMU) k_mem_unmap_phys_bare((uint8_t *)DEVICE_MMIO_GET(dev), DEVICE_MMIO_ROM_PTR(dev)->size); #endif return ret; } #ifdef CONFIG_RCAR_MMC_DMA_IRQ_DRIVEN_SUPPORT #define RCAR_MMC_CONFIG_FUNC(n) \ static void irq_config_func_##n(const struct device *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), rcar_mmc_irq_handler, \ DEVICE_DT_INST_GET(n), DT_INST_IRQ(n, flags)); \ irq_enable(DT_INST_IRQN(n)); \ } #define RCAR_MMC_IRQ_CFG_FUNC_INIT(n) .irq_config_func = irq_config_func_##n, #else #define RCAR_MMC_IRQ_CFG_FUNC_INIT(n) #define RCAR_MMC_CONFIG_FUNC(n) #endif #define RCAR_MMC_INIT(n) \ static struct mmc_rcar_data mmc_rcar_data_##n; \ PINCTRL_DT_INST_DEFINE(n); \ RCAR_MMC_CONFIG_FUNC(n); \ static const struct mmc_rcar_cfg mmc_rcar_cfg_##n = { \ DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \ .cpg_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \ .cpg_clk.module = DT_INST_CLOCKS_CELL_BY_IDX(n, 0, module), \ .cpg_clk.domain = DT_INST_CLOCKS_CELL_BY_IDX(n, 0, domain), \ .bus_clk.module = DT_INST_CLOCKS_CELL_BY_IDX(n, 1, module), \ .bus_clk.domain = DT_INST_CLOCKS_CELL_BY_IDX(n, 1, domain), \ .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .regulator_vqmmc = DEVICE_DT_GET(DT_PHANDLE(DT_DRV_INST(n), vqmmc_supply)), \ .regulator_vmmc = DEVICE_DT_GET(DT_PHANDLE(DT_DRV_INST(n), vmmc_supply)), \ .max_frequency = DT_INST_PROP(n, max_bus_freq), \ .non_removable = DT_INST_PROP(n, non_removable), \ .mmc_hs200_1_8v = DT_INST_PROP(n, mmc_hs200_1_8v), \ .mmc_hs400_1_8v = DT_INST_PROP(n, mmc_hs400_1_8v), \ .mmc_sdr104_support = DT_INST_PROP(n, mmc_sdr104_support), \ .uhs_support = 1, \ .bus_width = DT_INST_PROP(n, bus_width), \ RCAR_MMC_IRQ_CFG_FUNC_INIT(n)}; \ DEVICE_DT_INST_DEFINE(n, rcar_mmc_init, NULL, &mmc_rcar_data_##n, &mmc_rcar_cfg_##n, \ POST_KERNEL, CONFIG_SDHC_INIT_PRIORITY, &rcar_sdhc_api); DT_INST_FOREACH_STATUS_OKAY(RCAR_MMC_INIT) ```
/content/code_sandbox/drivers/sdhc/rcar_mmc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
20,145
```unknown config IMX_USDHC bool "NXP IMX USDHC Driver" default y depends on DT_HAS_NXP_IMX_USDHC_ENABLED select SDHC_SUPPORTS_UHS select SDHC_SUPPORTS_NATIVE_MODE select PINCTRL help Enable the NXP IMX SD Host controller driver if IMX_USDHC config IMX_USDHC_DAT3_PWR_TOGGLE bool "Toggle power when pulling DAT3 line low" default y help Toggle power to SD card to clear DAT3 pull when pulling line low config IMX_USDHC_DMA_SUPPORT bool "DMA support for USDHC" default y select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT help Enable DMA support for USDHC if IMX_USDHC_DMA_SUPPORT # USDHC DMA needs 32 bit aligned buffers config SDHC_BUFFER_ALIGNMENT default 4 config IMX_USDHC_DMA_BUFFER_SIZE int "Size of DMA descriptor buffer in bytes" default 128 help Size of USDHC ADMA descriptor buffer in bytes endif # IMX_USDHC_DMA_SUPPORT endif ```
/content/code_sandbox/drivers/sdhc/Kconfig.imx
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
243
```unknown menuconfig SDHC bool "Secure Digital High Capacity (SDHC) drivers" help Include drivers for SD host controller if SDHC source "drivers/sdhc/Kconfig.ifx_cat1" source "drivers/sdhc/Kconfig.imx" source "drivers/sdhc/Kconfig.spi" source "drivers/sdhc/Kconfig.mcux_sdif" source "drivers/sdhc/Kconfig.rcar" source "drivers/sdhc/Kconfig.sam_hsmci" source "drivers/sdhc/Kconfig.intel" source "drivers/sdhc/Kconfig.sdhc_cdns" source "drivers/sdhc/Kconfig.esp32" config SDHC_INIT_PRIORITY int "SDHC driver init priority" default 85 help SDHC driver system init priority config SDHC_BUFFER_ALIGNMENT int default 1 help Some SD host controllers require alignment of their data buffers in order to DMA to work correctly. Devices should change default of this value if they require alignment. This represents the alignment of buffers required in bytes config SDHC_SUPPORTS_UHS bool help Selected by host controller driver if UHS support is present. required to enable UHS portion of protocol stack. config SDHC_SUPPORTS_SPI_MODE bool help Selected by host controller driver if SPI mode support is required. Enables SPI protocol in SD protocol stack config SDHC_SUPPORTS_NATIVE_MODE bool help Selected by host controller driver if native SD mode support is required. Enables native protocol in SD protocol stack. module = SDHC module-str = sdhc source "subsys/logging/Kconfig.template.log_config" endif # SDHC ```
/content/code_sandbox/drivers/sdhc/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
363
```unknown # Infineon CAT1 SDHC configuration options # an affiliate of Cypress Semiconductor Corporation # config SDHC_INFINEON_CAT1 bool "Infineon CAT1 SDHC driver" default y depends on DT_HAS_INFINEON_CAT1_SDHC_SDIO_ENABLED select USE_INFINEON_SDIO select SDHC_SUPPORTS_NATIVE_MODE help This option enables the SDHC driver for Infineon CAT1 family. if SDHC_INFINEON_CAT1 config SDHC_INIT_PRIORITY default 70 endif ```
/content/code_sandbox/drivers/sdhc/Kconfig.ifx_cat1
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
115
```c /* * */ #define DT_DRV_COMPAT intel_emmc_host #include <zephyr/kernel.h> #include <zephyr/devicetree.h> #include <zephyr/drivers/sdhc.h> #include <zephyr/sd/sd_spec.h> #include <zephyr/cache.h> #include "intel_emmc_host.h" #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) BUILD_ASSERT(IS_ENABLED(CONFIG_PCIE), "DT need CONFIG_PCIE"); #include <zephyr/drivers/pcie/pcie.h> #endif #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(emmc_hc, CONFIG_SDHC_LOG_LEVEL); typedef void (*emmc_isr_cb_t)(const struct device *dev); #ifdef CONFIG_INTEL_EMMC_HOST_ADMA_DESC_SIZE #define ADMA_DESC_SIZE CONFIG_INTEL_EMMC_HOST_ADMA_DESC_SIZE #else #define ADMA_DESC_SIZE 0 #endif struct emmc_config { #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) struct pcie_dev *pcie; #else DEVICE_MMIO_ROM; #endif emmc_isr_cb_t config_func; uint32_t max_bus_freq; uint32_t min_bus_freq; uint32_t power_delay_ms; uint8_t hs200_mode: 1; uint8_t hs400_mode: 1; uint8_t dw_4bit: 1; uint8_t dw_8bit: 1; }; struct emmc_data { DEVICE_MMIO_RAM; uint32_t rca; struct sdhc_io host_io; struct k_sem lock; struct k_event irq_event; uint64_t desc_table[ADMA_DESC_SIZE]; struct sdhc_host_props props; bool card_present; }; static void enable_interrupts(const struct device *dev) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); regs->normal_int_stat_en = EMMC_HOST_NORMAL_INTR_MASK; regs->err_int_stat_en = EMMC_HOST_ERROR_INTR_MASK; regs->normal_int_signal_en = EMMC_HOST_NORMAL_INTR_MASK; regs->err_int_signal_en = EMMC_HOST_ERROR_INTR_MASK; regs->timeout_ctrl = EMMC_HOST_MAX_TIMEOUT; } static void disable_interrupts(const struct device *dev) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); /* Keep enable interrupt status register to update */ regs->normal_int_stat_en = EMMC_HOST_NORMAL_INTR_MASK; regs->err_int_stat_en = EMMC_HOST_ERROR_INTR_MASK; /* Disable only interrupt generation */ regs->normal_int_signal_en &= 0; regs->err_int_signal_en &= 0; regs->timeout_ctrl = EMMC_HOST_MAX_TIMEOUT; } static void clear_interrupts(const struct device *dev) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); regs->normal_int_stat = EMMC_HOST_NORMAL_INTR_MASK_CLR; regs->err_int_stat = EMMC_HOST_ERROR_INTR_MASK; } static int emmc_set_voltage(const struct device *dev, enum sd_voltage signal_voltage) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); bool power_state = regs->power_ctrl & EMMC_HOST_POWER_CTRL_SD_BUS_POWER ? true : false; int ret = 0; if (power_state) { /* Turn OFF Bus Power before config clock */ regs->power_ctrl &= ~EMMC_HOST_POWER_CTRL_SD_BUS_POWER; } switch (signal_voltage) { case SD_VOL_3_3_V: if (regs->capabilities & EMMC_HOST_VOL_3_3_V_SUPPORT) { regs->host_ctrl2 &= ~(EMMC_HOST_CTRL2_1P8V_SIG_EN << EMMC_HOST_CTRL2_1P8V_SIG_LOC); /* 3.3v voltage select */ regs->power_ctrl = EMMC_HOST_VOL_3_3_V_SELECT; LOG_DBG("3.3V Selected for MMC Card"); } else { LOG_ERR("3.3V not supported by MMC Host"); ret = -ENOTSUP; } break; case SD_VOL_3_0_V: if (regs->capabilities & EMMC_HOST_VOL_3_0_V_SUPPORT) { regs->host_ctrl2 &= ~(EMMC_HOST_CTRL2_1P8V_SIG_EN << EMMC_HOST_CTRL2_1P8V_SIG_LOC); /* 3.0v voltage select */ regs->power_ctrl = EMMC_HOST_VOL_3_0_V_SELECT; LOG_DBG("3.0V Selected for MMC Card"); } else { LOG_ERR("3.0V not supported by MMC Host"); ret = -ENOTSUP; } break; case SD_VOL_1_8_V: if (regs->capabilities & EMMC_HOST_VOL_1_8_V_SUPPORT) { regs->host_ctrl2 |= EMMC_HOST_CTRL2_1P8V_SIG_EN << EMMC_HOST_CTRL2_1P8V_SIG_LOC; /* 1.8v voltage select */ regs->power_ctrl = EMMC_HOST_VOL_1_8_V_SELECT; LOG_DBG("1.8V Selected for MMC Card"); } else { LOG_ERR("1.8V not supported by MMC Host"); ret = -ENOTSUP; } break; default: ret = -EINVAL; } if (power_state) { /* Turn ON Bus Power */ regs->power_ctrl |= EMMC_HOST_POWER_CTRL_SD_BUS_POWER; } return ret; } static int emmc_set_power(const struct device *dev, enum sdhc_power state) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); if (state == SDHC_POWER_ON) { /* Turn ON Bus Power */ regs->power_ctrl |= EMMC_HOST_POWER_CTRL_SD_BUS_POWER; } else { /* Turn OFF Bus Power */ regs->power_ctrl &= ~EMMC_HOST_POWER_CTRL_SD_BUS_POWER; } k_msleep(10u); return 0; } static bool emmc_disable_clock(const struct device *dev) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); if (regs->present_state & EMMC_HOST_PSTATE_CMD_INHIBIT) { LOG_ERR("present_state:%x", regs->present_state); return false; } if (regs->present_state & EMMC_HOST_PSTATE_DAT_INHIBIT) { LOG_ERR("present_state:%x", regs->present_state); return false; } regs->clock_ctrl &= ~EMMC_HOST_INTERNAL_CLOCK_EN; regs->clock_ctrl &= ~EMMC_HOST_SD_CLOCK_EN; while ((regs->clock_ctrl & EMMC_HOST_SD_CLOCK_EN) != 0) { ; } return true; } static bool emmc_enable_clock(const struct device *dev) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); regs->clock_ctrl |= EMMC_HOST_INTERNAL_CLOCK_EN; /* Wait for the stable Internal Clock */ while ((regs->clock_ctrl & EMMC_HOST_INTERNAL_CLOCK_STABLE) == 0) { ; } /* Enable SD Clock */ regs->clock_ctrl |= EMMC_HOST_SD_CLOCK_EN; while ((regs->clock_ctrl & EMMC_HOST_SD_CLOCK_EN) == 0) { ; } return true; } static bool emmc_clock_set(const struct device *dev, enum sdhc_clock_speed speed) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); uint8_t base_freq; uint32_t clock_divider; float freq; bool ret; switch (speed) { case SDMMC_CLOCK_400KHZ: freq = EMMC_HOST_CLK_FREQ_400K; break; case SD_CLOCK_25MHZ: case MMC_CLOCK_26MHZ: freq = EMMC_HOST_CLK_FREQ_25M; break; case SD_CLOCK_50MHZ: case MMC_CLOCK_52MHZ: freq = EMMC_HOST_CLK_FREQ_50M; break; case SD_CLOCK_100MHZ: freq = EMMC_HOST_CLK_FREQ_100M; break; case MMC_CLOCK_HS200: freq = EMMC_HOST_CLK_FREQ_200M; break; case SD_CLOCK_208MHZ: default: return false; } ret = emmc_disable_clock(dev); if (!ret) { return false; } base_freq = regs->capabilities >> 8; clock_divider = (int)(base_freq / (freq * 2)); LOG_DBG("Clock divider for MMC Clk: %d Hz is %d", speed, clock_divider); SET_BITS(regs->clock_ctrl, EMMC_HOST_CLK_SDCLCK_FREQ_SEL_LOC, EMMC_HOST_CLK_SDCLCK_FREQ_SEL_MASK, clock_divider); SET_BITS(regs->clock_ctrl, EMMC_HOST_CLK_SDCLCK_FREQ_SEL_UPPER_LOC, EMMC_HOST_CLK_SDCLCK_FREQ_SEL_UPPER_MASK, clock_divider >> 8); emmc_enable_clock(dev); return true; } static int set_timing(const struct device *dev, enum sdhc_timing_mode timing) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); int ret = 0; uint8_t mode; LOG_DBG("UHS Mode: %d", timing); switch (timing) { case SDHC_TIMING_LEGACY: case SDHC_TIMING_HS: case SDHC_TIMING_SDR12: mode = EMMC_HOST_UHSMODE_SDR12; break; case SDHC_TIMING_SDR25: mode = EMMC_HOST_UHSMODE_SDR25; break; case SDHC_TIMING_SDR50: mode = EMMC_HOST_UHSMODE_SDR50; break; case SDHC_TIMING_SDR104: mode = EMMC_HOST_UHSMODE_SDR104; break; case SDHC_TIMING_DDR50: case SDHC_TIMING_DDR52: mode = EMMC_HOST_UHSMODE_DDR50; break; case SDHC_TIMING_HS400: case SDHC_TIMING_HS200: mode = EMMC_HOST_UHSMODE_HS400; break; default: ret = -ENOTSUP; } if (!ret) { if (!emmc_disable_clock(dev)) { LOG_ERR("Disable clk failed"); return -EIO; } regs->host_ctrl2 |= EMMC_HOST_CTRL2_1P8V_SIG_EN << EMMC_HOST_CTRL2_1P8V_SIG_LOC; SET_BITS(regs->host_ctrl2, EMMC_HOST_CTRL2_UHS_MODE_SEL_LOC, EMMC_HOST_CTRL2_UHS_MODE_SEL_MASK, mode); emmc_enable_clock(dev); } return ret; } static int wait_for_cmd_complete(struct emmc_data *emmc, uint32_t time_out) { int ret; k_timeout_t wait_time; uint32_t events; if (time_out == SDHC_TIMEOUT_FOREVER) { wait_time = K_FOREVER; } else { wait_time = K_MSEC(time_out); } events = k_event_wait(&emmc->irq_event, EMMC_HOST_CMD_COMPLETE | ERR_INTR_STATUS_EVENT(EMMC_HOST_ERR_STATUS), false, wait_time); if (events & EMMC_HOST_CMD_COMPLETE) { ret = 0; } else if (events & ERR_INTR_STATUS_EVENT(EMMC_HOST_ERR_STATUS)) { LOG_ERR("wait for cmd complete error: %x", events); ret = -EIO; } else { LOG_ERR("wait for cmd complete timeout"); ret = -EAGAIN; } return ret; } static int poll_cmd_complete(const struct device *dev, uint32_t time_out) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); int ret = -EAGAIN; int32_t retry = time_out; while (retry > 0) { if (regs->normal_int_stat & EMMC_HOST_CMD_COMPLETE) { regs->normal_int_stat = EMMC_HOST_CMD_COMPLETE; ret = 0; break; } k_busy_wait(1000u); retry--; } if (regs->err_int_stat) { LOG_ERR("err_int_stat:%x", regs->err_int_stat); regs->err_int_stat &= regs->err_int_stat; ret = -EIO; } if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_ADMA)) { if (regs->adma_err_stat) { LOG_ERR("adma error: %x", regs->adma_err_stat); ret = -EIO; } } return ret; } void emmc_host_sw_reset(const struct device *dev, enum emmc_sw_reset reset) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); if (reset == EMMC_HOST_SW_RESET_DATA_LINE) { regs->sw_reset = EMMC_HOST_SW_RESET_REG_DATA; } else if (reset == EMMC_HOST_SW_RESET_CMD_LINE) { regs->sw_reset = EMMC_HOST_SW_RESET_REG_CMD; } else if (reset == EMMC_HOST_SW_RESET_ALL) { regs->sw_reset = EMMC_HOST_SW_RESET_REG_ALL; } while (regs->sw_reset != 0) { ; } k_sleep(K_MSEC(100u)); } static int emmc_dma_init(const struct device *dev, struct sdhc_data *data, bool read) { struct emmc_data *emmc = dev->data; volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); if (IS_ENABLED(CONFIG_DCACHE) && !read) { sys_cache_data_flush_range(data->data, (data->blocks * data->block_size)); } if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_ADMA)) { uint8_t *buff = data->data; /* Setup DMA trasnfer using ADMA2 */ memset(emmc->desc_table, 0, sizeof(emmc->desc_table)); #if defined(CONFIG_INTEL_EMMC_HOST_ADMA_DESC_SIZE) __ASSERT_NO_MSG(data->blocks < CONFIG_INTEL_EMMC_HOST_ADMA_DESC_SIZE); #endif for (int i = 0; i < data->blocks; i++) { emmc->desc_table[i] = ((uint64_t)buff) << EMMC_HOST_ADMA_BUFF_ADD_LOC; emmc->desc_table[i] |= data->block_size << EMMC_HOST_ADMA_BUFF_LEN_LOC; if (i == (data->blocks - 1u)) { emmc->desc_table[i] |= EMMC_HOST_ADMA_BUFF_LINK_LAST; emmc->desc_table[i] |= EMMC_HOST_ADMA_INTR_EN; emmc->desc_table[i] |= EMMC_HOST_ADMA_BUFF_LAST; } else { emmc->desc_table[i] |= EMMC_HOST_ADMA_BUFF_LINK_NEXT; } emmc->desc_table[i] |= EMMC_HOST_ADMA_BUFF_VALID; buff += data->block_size; LOG_DBG("desc_table:%llx", emmc->desc_table[i]); } regs->adma_sys_addr1 = (uint32_t)((uintptr_t)emmc->desc_table & ADDRESS_32BIT_MASK); regs->adma_sys_addr2 = (uint32_t)(((uintptr_t)emmc->desc_table >> 32) & ADDRESS_32BIT_MASK); LOG_DBG("adma: %llx %x %p", emmc->desc_table[0], regs->adma_sys_addr1, emmc->desc_table); } else { /* Setup DMA trasnfer using SDMA */ regs->sdma_sysaddr = (uint32_t)((uintptr_t)data->data); LOG_DBG("sdma_sysaddr: %x", regs->sdma_sysaddr); } return 0; } static int emmc_init_xfr(const struct device *dev, struct sdhc_data *data, bool read) { struct emmc_data *emmc = dev->data; volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); uint16_t multi_block = 0u; if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_DMA)) { emmc_dma_init(dev, data, read); } if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_ADMA)) { SET_BITS(regs->host_ctrl1, EMMC_HOST_CTRL1_DMA_SEL_LOC, EMMC_HOST_CTRL1_DMA_SEL_MASK, 2u); } else { SET_BITS(regs->host_ctrl1, EMMC_HOST_CTRL1_DMA_SEL_LOC, EMMC_HOST_CTRL1_DMA_SEL_MASK, 0u); } /* Set Block Size Register */ SET_BITS(regs->block_size, EMMC_HOST_DMA_BUF_SIZE_LOC, EMMC_HOST_DMA_BUF_SIZE_MASK, EMMC_HOST_SDMA_BOUNDARY); SET_BITS(regs->block_size, EMMC_HOST_BLOCK_SIZE_LOC, EMMC_HOST_BLOCK_SIZE_MASK, data->block_size); if (data->blocks > 1) { multi_block = 1u; } if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_AUTO_STOP)) { if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_ADMA) && emmc->host_io.timing == SDHC_TIMING_SDR104) { /* Auto cmd23 only applicable for ADMA */ SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_AUTO_CMD_EN_LOC, EMMC_HOST_XFER_AUTO_CMD_EN_MASK, multi_block ? 2 : 0); } else { SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_AUTO_CMD_EN_LOC, EMMC_HOST_XFER_AUTO_CMD_EN_MASK, multi_block ? 1 : 0); } } else { SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_AUTO_CMD_EN_LOC, EMMC_HOST_XFER_AUTO_CMD_EN_MASK, 0); } if (!IS_ENABLED(CONFIG_INTEL_EMMC_HOST_AUTO_STOP)) { /* Set block count regitser to 0 for infinite transfer mode */ regs->block_count = 0; SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_BLOCK_CNT_EN_LOC, EMMC_HOST_XFER_BLOCK_CNT_EN_MASK, 0); } else { regs->block_count = (uint16_t)data->blocks; /* Enable block count in transfer register */ SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_BLOCK_CNT_EN_LOC, EMMC_HOST_XFER_BLOCK_CNT_EN_MASK, multi_block ? 1 : 0); } SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_MULTI_BLOCK_SEL_LOC, EMMC_HOST_XFER_MULTI_BLOCK_SEL_MASK, multi_block); /* Set data transfer direction, Read = 1, Write = 0 */ SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_DATA_DIR_LOC, EMMC_HOST_XFER_DATA_DIR_MASK, read ? 1u : 0u); if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_DMA)) { /* Enable DMA or not */ SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_DMA_EN_LOC, EMMC_HOST_XFER_DMA_EN_MASK, 1u); } else { SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_DMA_EN_LOC, EMMC_HOST_XFER_DMA_EN_MASK, 0u); } if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_BLOCK_GAP)) { /* Set an interrupt at the block gap */ SET_BITS(regs->block_gap_ctrl, EMMC_HOST_BLOCK_GAP_LOC, EMMC_HOST_BLOCK_GAP_MASK, 1u); } else { SET_BITS(regs->block_gap_ctrl, EMMC_HOST_BLOCK_GAP_LOC, EMMC_HOST_BLOCK_GAP_MASK, 0u); } /* Set data timeout time */ regs->timeout_ctrl = data->timeout_ms; return 0; } static int wait_xfr_intr_complete(const struct device *dev, uint32_t time_out) { struct emmc_data *emmc = dev->data; uint32_t events; int ret; k_timeout_t wait_time; LOG_DBG(""); if (time_out == SDHC_TIMEOUT_FOREVER) { wait_time = K_FOREVER; } else { wait_time = K_MSEC(time_out); } events = k_event_wait(&emmc->irq_event, EMMC_HOST_XFER_COMPLETE | ERR_INTR_STATUS_EVENT(EMMC_HOST_DMA_TXFR_ERR), false, wait_time); if (events & EMMC_HOST_XFER_COMPLETE) { ret = 0; } else if (events & ERR_INTR_STATUS_EVENT(0xFFFF)) { LOG_ERR("wait for xfer complete error: %x", events); ret = -EIO; } else { LOG_ERR("wait for xfer complete timeout"); ret = -EAGAIN; } return ret; } static int wait_xfr_poll_complete(const struct device *dev, uint32_t time_out) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); int ret = -EAGAIN; int32_t retry = time_out; LOG_DBG(""); while (retry > 0) { if (regs->normal_int_stat & EMMC_HOST_XFER_COMPLETE) { regs->normal_int_stat |= EMMC_HOST_XFER_COMPLETE; ret = 0; break; } k_busy_wait(EMMC_HOST_MSEC_DELAY); retry--; } return ret; } static int wait_xfr_complete(const struct device *dev, uint32_t time_out) { int ret; if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) { ret = wait_xfr_intr_complete(dev, time_out); } else { ret = wait_xfr_poll_complete(dev, time_out); } return ret; } static enum emmc_response_type emmc_decode_resp_type(enum sd_rsp_type type) { enum emmc_response_type resp_type; switch (type & 0xF) { case SD_RSP_TYPE_NONE: resp_type = EMMC_HOST_RESP_NONE; break; case SD_RSP_TYPE_R1: case SD_RSP_TYPE_R3: case SD_RSP_TYPE_R4: case SD_RSP_TYPE_R5: resp_type = EMMC_HOST_RESP_LEN_48; break; case SD_RSP_TYPE_R1b: resp_type = EMMC_HOST_RESP_LEN_48B; break; case SD_RSP_TYPE_R2: resp_type = EMMC_HOST_RESP_LEN_136; break; case SD_RSP_TYPE_R5b: case SD_RSP_TYPE_R6: case SD_RSP_TYPE_R7: default: resp_type = EMMC_HOST_INVAL_HOST_RESP_LEN; } return resp_type; } static void update_cmd_response(const struct device *dev, struct sdhc_command *sdhc_cmd) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); uint32_t resp0, resp1, resp2, resp3; if (sdhc_cmd->response_type == SD_RSP_TYPE_NONE) { return; } resp0 = regs->resp_01; if (sdhc_cmd->response_type == SD_RSP_TYPE_R2) { resp1 = regs->resp_2 | (regs->resp_3 << 16u); resp2 = regs->resp_4 | (regs->resp_5 << 16u); resp3 = regs->resp_6 | (regs->resp_7 << 16u); LOG_DBG("cmd resp: %x %x %x %x", resp0, resp1, resp2, resp3); sdhc_cmd->response[0u] = resp3; sdhc_cmd->response[1U] = resp2; sdhc_cmd->response[2U] = resp1; sdhc_cmd->response[3U] = resp0; } else { LOG_DBG("cmd resp: %x", resp0); sdhc_cmd->response[0u] = resp0; } } static int emmc_host_send_cmd(const struct device *dev, const struct emmc_cmd_config *config) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); struct emmc_data *emmc = dev->data; struct sdhc_command *sdhc_cmd = config->sdhc_cmd; enum emmc_response_type resp_type = emmc_decode_resp_type(sdhc_cmd->response_type); uint16_t cmd_reg; int ret; LOG_DBG(""); /* Check if CMD line is available */ if (regs->present_state & EMMC_HOST_PSTATE_CMD_INHIBIT) { LOG_ERR("CMD line is not available"); return -EBUSY; } if (config->data_present && (regs->present_state & EMMC_HOST_PSTATE_DAT_INHIBIT)) { LOG_ERR("Data line is not available"); return -EBUSY; } if (resp_type == EMMC_HOST_INVAL_HOST_RESP_LEN) { LOG_ERR("Invalid eMMC resp type:%d", resp_type); return -EINVAL; } k_event_clear(&emmc->irq_event, EMMC_HOST_CMD_COMPLETE); regs->argument = sdhc_cmd->arg; cmd_reg = config->cmd_idx << EMMC_HOST_CMD_INDEX_LOC | config->cmd_type << EMMC_HOST_CMD_TYPE_LOC | config->data_present << EMMC_HOST_CMD_DATA_PRESENT_LOC | config->idx_check_en << EMMC_HOST_CMD_IDX_CHECK_EN_LOC | config->crc_check_en << EMMC_HOST_CMD_CRC_CHECK_EN_LOC | resp_type << EMMC_HOST_CMD_RESP_TYPE_LOC; regs->cmd = cmd_reg; LOG_DBG("CMD REG:%x %x", cmd_reg, regs->cmd); if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) { ret = wait_for_cmd_complete(emmc, sdhc_cmd->timeout_ms); } else { ret = poll_cmd_complete(dev, sdhc_cmd->timeout_ms); } if (ret) { LOG_ERR("Error on send cmd: %d, status:%d", config->cmd_idx, ret); return ret; } update_cmd_response(dev, sdhc_cmd); return 0; } static int emmc_stop_transfer(const struct device *dev) { struct emmc_data *emmc = dev->data; struct sdhc_command hdc_cmd = {0}; struct emmc_cmd_config cmd; hdc_cmd.arg = emmc->rca << EMMC_HOST_RCA_SHIFT; hdc_cmd.response_type = SD_RSP_TYPE_R1; hdc_cmd.timeout_ms = 1000; cmd.sdhc_cmd = &hdc_cmd; cmd.cmd_idx = SD_STOP_TRANSMISSION; cmd.cmd_type = EMMC_HOST_CMD_NORMAL; cmd.data_present = false; cmd.idx_check_en = false; cmd.crc_check_en = false; return emmc_host_send_cmd(dev, &cmd); } static int emmc_reset(const struct device *dev) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); LOG_DBG(""); if (!(regs->present_state & EMMC_HOST_PSTATE_CARD_INSERTED)) { LOG_ERR("No EMMC card found"); return -ENODEV; } /* Reset device to idle state */ emmc_host_sw_reset(dev, EMMC_HOST_SW_RESET_ALL); clear_interrupts(dev); if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) { enable_interrupts(dev); } else { disable_interrupts(dev); } return 0; } static int read_data_port(const struct device *dev, struct sdhc_data *sdhc) { struct emmc_data *emmc = dev->data; volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); uint32_t block_size = sdhc->block_size; uint32_t i, block_cnt = sdhc->blocks; uint32_t *data = (uint32_t *)sdhc->data; k_timeout_t wait_time; if (sdhc->timeout_ms == SDHC_TIMEOUT_FOREVER) { wait_time = K_FOREVER; } else { wait_time = K_MSEC(sdhc->timeout_ms); } LOG_DBG(""); while (block_cnt--) { if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) { uint32_t events; events = k_event_wait(&emmc->irq_event, EMMC_HOST_BUF_RD_READY, false, wait_time); k_event_clear(&emmc->irq_event, EMMC_HOST_BUF_RD_READY); if (!(events & EMMC_HOST_BUF_RD_READY)) { LOG_ERR("time out on EMMC_HOST_BUF_RD_READY:%d", (sdhc->blocks - block_cnt)); return -EIO; } } else { while ((regs->present_state & EMMC_HOST_PSTATE_BUF_READ_EN) == 0) { ; } } if (regs->present_state & EMMC_HOST_PSTATE_DAT_INHIBIT) { for (i = block_size >> 2u; i != 0u; i--) { *data = regs->data_port; data++; } } } return wait_xfr_complete(dev, sdhc->timeout_ms); } static int write_data_port(const struct device *dev, struct sdhc_data *sdhc) { struct emmc_data *emmc = dev->data; volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); uint32_t block_size = sdhc->block_size; uint32_t i, block_cnt = sdhc->blocks; uint32_t *data = (uint32_t *)sdhc->data; k_timeout_t wait_time; if (sdhc->timeout_ms == SDHC_TIMEOUT_FOREVER) { wait_time = K_FOREVER; } else { wait_time = K_MSEC(sdhc->timeout_ms); } LOG_DBG(""); while ((regs->present_state & EMMC_HOST_PSTATE_BUF_WRITE_EN) == 0) { ; } while (1) { uint32_t events; if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) { k_event_clear(&emmc->irq_event, EMMC_HOST_BUF_WR_READY); } if (regs->present_state & EMMC_HOST_PSTATE_DAT_INHIBIT) { for (i = block_size >> 2u; i != 0u; i--) { regs->data_port = *data; data++; } } LOG_DBG("EMMC_HOST_BUF_WR_READY"); if (!(--block_cnt)) { break; } if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) { events = k_event_wait(&emmc->irq_event, EMMC_HOST_BUF_WR_READY, false, wait_time); k_event_clear(&emmc->irq_event, EMMC_HOST_BUF_WR_READY); if (!(events & EMMC_HOST_BUF_WR_READY)) { LOG_ERR("time out on EMMC_HOST_BUF_WR_READY"); return -EIO; } } else { while ((regs->present_state & EMMC_HOST_PSTATE_BUF_WRITE_EN) == 0) { ; } } } return wait_xfr_complete(dev, sdhc->timeout_ms); } static int emmc_send_cmd_no_data(const struct device *dev, uint32_t cmd_idx, struct sdhc_command *cmd) { struct emmc_cmd_config emmc_cmd; emmc_cmd.sdhc_cmd = cmd; emmc_cmd.cmd_idx = cmd_idx; emmc_cmd.cmd_type = EMMC_HOST_CMD_NORMAL; emmc_cmd.data_present = false; emmc_cmd.idx_check_en = false; emmc_cmd.crc_check_en = false; return emmc_host_send_cmd(dev, &emmc_cmd); } static int emmc_send_cmd_data(const struct device *dev, uint32_t cmd_idx, struct sdhc_command *cmd, struct sdhc_data *data, bool read) { struct emmc_cmd_config emmc_cmd; int ret; emmc_cmd.sdhc_cmd = cmd; emmc_cmd.cmd_idx = cmd_idx; emmc_cmd.cmd_type = EMMC_HOST_CMD_NORMAL; emmc_cmd.data_present = true; emmc_cmd.idx_check_en = true; emmc_cmd.crc_check_en = true; ret = emmc_init_xfr(dev, data, read); if (ret) { LOG_ERR("Error on init xfr"); return ret; } ret = emmc_host_send_cmd(dev, &emmc_cmd); if (ret) { return ret; } if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_DMA)) { ret = wait_xfr_complete(dev, data->timeout_ms); } else { if (read) { ret = read_data_port(dev, data); } else { ret = write_data_port(dev, data); } } return ret; } static int emmc_xfr(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data, bool read) { struct emmc_data *emmc = dev->data; int ret; struct emmc_cmd_config emmc_cmd; ret = emmc_init_xfr(dev, data, read); if (ret) { LOG_ERR("error emmc init xfr"); return ret; } emmc_cmd.sdhc_cmd = cmd; emmc_cmd.cmd_type = EMMC_HOST_CMD_NORMAL; emmc_cmd.data_present = true; emmc_cmd.idx_check_en = true; emmc_cmd.crc_check_en = true; k_event_clear(&emmc->irq_event, EMMC_HOST_XFER_COMPLETE); k_event_clear(&emmc->irq_event, read ? EMMC_HOST_BUF_RD_READY : EMMC_HOST_BUF_WR_READY); if (data->blocks > 1) { emmc_cmd.cmd_idx = read ? SD_READ_MULTIPLE_BLOCK : SD_WRITE_MULTIPLE_BLOCK; ret = emmc_host_send_cmd(dev, &emmc_cmd); } else { emmc_cmd.cmd_idx = read ? SD_READ_SINGLE_BLOCK : SD_WRITE_SINGLE_BLOCK; ret = emmc_host_send_cmd(dev, &emmc_cmd); } if (ret) { return ret; } if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_DMA)) { ret = wait_xfr_complete(dev, data->timeout_ms); } else { if (read) { ret = read_data_port(dev, data); } else { ret = write_data_port(dev, data); } } if (!IS_ENABLED(CONFIG_INTEL_EMMC_HOST_AUTO_STOP)) { emmc_stop_transfer(dev); } return ret; } static int emmc_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data) { int ret; LOG_DBG(""); if (data) { switch (cmd->opcode) { case SD_WRITE_SINGLE_BLOCK: case SD_WRITE_MULTIPLE_BLOCK: LOG_DBG("SD_WRITE_SINGLE_BLOCK"); ret = emmc_xfr(dev, cmd, data, false); break; case SD_READ_SINGLE_BLOCK: case SD_READ_MULTIPLE_BLOCK: LOG_DBG("SD_READ_SINGLE_BLOCK"); ret = emmc_xfr(dev, cmd, data, true); break; case MMC_SEND_EXT_CSD: LOG_DBG("EMMC_HOST_SEND_EXT_CSD"); ret = emmc_send_cmd_data(dev, MMC_SEND_EXT_CSD, cmd, data, true); break; default: ret = emmc_send_cmd_data(dev, cmd->opcode, cmd, data, true); } } else { ret = emmc_send_cmd_no_data(dev, cmd->opcode, cmd); } return ret; } static int emmc_set_io(const struct device *dev, struct sdhc_io *ios) { struct emmc_data *emmc = dev->data; volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); struct sdhc_io *host_io = &emmc->host_io; int ret; LOG_DBG("emmc I/O: DW %d, Clk %d Hz, card power state %s, voltage %s", ios->bus_width, ios->clock, ios->power_mode == SDHC_POWER_ON ? "ON" : "OFF", ios->signal_voltage == SD_VOL_1_8_V ? "1.8V" : "3.3V"); if (ios->clock && (ios->clock > emmc->props.f_max || ios->clock < emmc->props.f_min)) { LOG_ERR("Invalid argument for clock freq: %d Support max:%d and Min:%d", ios->clock, emmc->props.f_max, emmc->props.f_min); return -EINVAL; } /* Set HC clock */ if (host_io->clock != ios->clock) { LOG_DBG("Clock: %d", host_io->clock); if (ios->clock != 0) { /* Enable clock */ LOG_DBG("CLOCK: %d", ios->clock); if (!emmc_clock_set(dev, ios->clock)) { return -ENOTSUP; } } else { emmc_disable_clock(dev); } host_io->clock = ios->clock; } /* Set data width */ if (host_io->bus_width != ios->bus_width) { LOG_DBG("bus_width: %d", host_io->bus_width); if (ios->bus_width == SDHC_BUS_WIDTH4BIT) { SET_BITS(regs->host_ctrl1, EMMC_HOST_CTRL1_EXT_DAT_WIDTH_LOC, EMMC_HOST_CTRL1_EXT_DAT_WIDTH_MASK, ios->bus_width == SDHC_BUS_WIDTH8BIT ? 1 : 0); } else { SET_BITS(regs->host_ctrl1, EMMC_HOST_CTRL1_DAT_WIDTH_LOC, EMMC_HOST_CTRL1_DAT_WIDTH_MASK, ios->bus_width == SDHC_BUS_WIDTH4BIT ? 1 : 0); } host_io->bus_width = ios->bus_width; } /* Set HC signal voltage */ if (ios->signal_voltage != host_io->signal_voltage) { LOG_DBG("signal_voltage: %d", ios->signal_voltage); ret = emmc_set_voltage(dev, ios->signal_voltage); if (ret) { LOG_ERR("Set signal volatge failed:%d", ret); return ret; } host_io->signal_voltage = ios->signal_voltage; } /* Set card power */ if (host_io->power_mode != ios->power_mode) { LOG_DBG("power_mode: %d", ios->power_mode); ret = emmc_set_power(dev, ios->power_mode); if (ret) { LOG_ERR("Set Bus power failed:%d", ret); return ret; } host_io->power_mode = ios->power_mode; } /* Set I/O timing */ if (host_io->timing != ios->timing) { LOG_DBG("timing: %d", ios->timing); ret = set_timing(dev, ios->timing); if (ret) { LOG_ERR("Set timing failed:%d", ret); return ret; } host_io->timing = ios->timing; } return 0; } static int emmc_get_card_present(const struct device *dev) { struct emmc_data *emmc = dev->data; volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); LOG_DBG(""); emmc->card_present = (bool)((regs->present_state >> 16u) & 1u); if (!emmc->card_present) { LOG_ERR("No MMC device detected"); } return ((int)emmc->card_present); } static int emmc_execute_tuning(const struct device *dev) { if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_TUNING)) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); LOG_DBG("Tuning starting..."); regs->host_ctrl2 |= EMMC_HOST_START_TUNING; while (!(regs->host_ctrl2 & EMMC_HOST_START_TUNING)) { ; } if (regs->host_ctrl2 & EMMC_HOST_TUNING_SUCCESS) { LOG_DBG("Tuning Completed success"); } else { LOG_ERR("Tuning failed"); return -EIO; } } return 0; } static int emmc_card_busy(const struct device *dev) { volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); LOG_DBG(""); if (regs->present_state & 7u) { return 1; } return 0; } static int emmc_get_host_props(const struct device *dev, struct sdhc_host_props *props) { struct emmc_data *emmc = dev->data; const struct emmc_config *config = dev->config; volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); uint64_t cap = regs->capabilities; LOG_DBG(""); memset(props, 0, sizeof(struct sdhc_host_props)); props->f_max = config->max_bus_freq; props->f_min = config->min_bus_freq; props->power_delay = config->power_delay_ms; props->host_caps.vol_180_support = (bool)(cap & BIT(26u)); props->host_caps.vol_300_support = (bool)(cap & BIT(25u)); props->host_caps.vol_330_support = (bool)(bool)(cap & BIT(24u)); props->host_caps.suspend_res_support = false; props->host_caps.sdma_support = (bool)(cap & BIT(22u)); props->host_caps.high_spd_support = (bool)(cap & BIT(21u)); props->host_caps.adma_2_support = (bool)(cap & BIT(19u)); props->host_caps.max_blk_len = (cap >> 16u) & 0x3u; props->host_caps.ddr50_support = (bool)(cap & BIT(34u)); props->host_caps.sdr104_support = (bool)(cap & BIT(33u)); props->host_caps.sdr50_support = (bool)(cap & BIT(32u)); props->host_caps.bus_8_bit_support = true; props->host_caps.bus_4_bit_support = true; props->host_caps.hs200_support = (bool)config->hs200_mode; props->host_caps.hs400_support = (bool)config->hs400_mode; emmc->props = *props; return 0; } static void emmc_isr(const struct device *dev) { struct emmc_data *emmc = dev->data; volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev); if (regs->normal_int_stat & EMMC_HOST_CMD_COMPLETE) { regs->normal_int_stat |= EMMC_HOST_CMD_COMPLETE; k_event_post(&emmc->irq_event, EMMC_HOST_CMD_COMPLETE); } if (regs->normal_int_stat & EMMC_HOST_XFER_COMPLETE) { regs->normal_int_stat |= EMMC_HOST_XFER_COMPLETE; k_event_post(&emmc->irq_event, EMMC_HOST_XFER_COMPLETE); } if (regs->normal_int_stat & EMMC_HOST_DMA_INTR) { regs->normal_int_stat |= EMMC_HOST_DMA_INTR; k_event_post(&emmc->irq_event, EMMC_HOST_DMA_INTR); } if (regs->normal_int_stat & EMMC_HOST_BUF_WR_READY) { regs->normal_int_stat |= EMMC_HOST_BUF_WR_READY; k_event_post(&emmc->irq_event, EMMC_HOST_BUF_WR_READY); } if (regs->normal_int_stat & EMMC_HOST_BUF_RD_READY) { regs->normal_int_stat |= EMMC_HOST_BUF_RD_READY; k_event_post(&emmc->irq_event, EMMC_HOST_BUF_RD_READY); } if (regs->err_int_stat) { LOG_ERR("err int:%x", regs->err_int_stat); k_event_post(&emmc->irq_event, ERR_INTR_STATUS_EVENT(regs->err_int_stat)); if (regs->err_int_stat & EMMC_HOST_DMA_TXFR_ERR) { regs->err_int_stat |= EMMC_HOST_DMA_TXFR_ERR; } else { regs->err_int_stat |= regs->err_int_stat; } } if (regs->normal_int_stat) { k_event_post(&emmc->irq_event, regs->normal_int_stat); regs->normal_int_stat |= regs->normal_int_stat; } if (regs->adma_err_stat) { LOG_ERR("adma err:%x", regs->adma_err_stat); } } static int emmc_init(const struct device *dev) { struct emmc_data *emmc = dev->data; const struct emmc_config *config = dev->config; k_sem_init(&emmc->lock, 1, 1); k_event_init(&emmc->irq_event); #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie) if (config->pcie) { struct pcie_bar mbar; if (config->pcie->bdf == PCIE_BDF_NONE) { LOG_ERR("Cannot probe eMMC PCI device: %x", config->pcie->id); return -ENODEV; } if (!pcie_probe_mbar(config->pcie->bdf, 0, &mbar)) { LOG_ERR("eMMC MBAR not found"); return -EINVAL; } pcie_get_mbar(config->pcie->bdf, 0, &mbar); pcie_set_cmd(config->pcie->bdf, PCIE_CONF_CMDSTAT_MEM, true); device_map(DEVICE_MMIO_RAM_PTR(dev), mbar.phys_addr, mbar.size, K_MEM_CACHE_NONE); pcie_set_cmd(config->pcie->bdf, PCIE_CONF_CMDSTAT_MASTER, true); } else #endif { DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); } LOG_DBG("MMC Device MMIO: %p", (void *)(struct emmc_reg *)DEVICE_MMIO_GET(dev)); if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) { config->config_func(dev); } return emmc_reset(dev); } static const struct sdhc_driver_api emmc_api = { .reset = emmc_reset, .request = emmc_request, .set_io = emmc_set_io, .get_card_present = emmc_get_card_present, .execute_tuning = emmc_execute_tuning, .card_busy = emmc_card_busy, .get_host_props = emmc_get_host_props, }; #define EMMC_HOST_IRQ_FLAGS_SENSE0(n) 0 #define EMMC_HOST_IRQ_FLAGS_SENSE1(n) DT_INST_IRQ(n, sense) #define EMMC_HOST_IRQ_FLAGS(n)\ _CONCAT(EMMC_HOST_IRQ_FLAGS_SENSE, DT_INST_IRQ_HAS_CELL(n, sense))(n) /* Not PCI(e) */ #define EMMC_HOST_IRQ_CONFIG_PCIE0(n) \ static void emmc_config_##n(const struct device *port) \ { \ ARG_UNUSED(port); \ IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), emmc_isr, \ DEVICE_DT_INST_GET(n), EMMC_HOST_IRQ_FLAGS(n)); \ irq_enable(DT_INST_IRQN(n)); \ } /* PCI(e) with auto IRQ detection */ #define EMMC_HOST_IRQ_CONFIG_PCIE1(n) \ static void emmc_config_##n(const struct device *port) \ { \ BUILD_ASSERT(DT_INST_IRQN(n) == PCIE_IRQ_DETECT, \ "Only runtime IRQ configuration is supported"); \ BUILD_ASSERT(IS_ENABLED(CONFIG_DYNAMIC_INTERRUPTS), \ "eMMC PCI device needs CONFIG_DYNAMIC_INTERRUPTS"); \ const struct emmc_config *const dev_cfg = port->config; \ unsigned int irq = pcie_alloc_irq(dev_cfg->pcie->bdf); \ \ if (irq == PCIE_CONF_INTR_IRQ_NONE) { \ return; \ } \ pcie_connect_dynamic_irq(dev_cfg->pcie->bdf, irq, DT_INST_IRQ(n, priority), \ (void (*)(const void *))emmc_isr, DEVICE_DT_INST_GET(n), \ EMMC_HOST_IRQ_FLAGS(n)); \ pcie_irq_enable(dev_cfg->pcie->bdf, irq); \ } #define EMMC_HOST_IRQ_CONFIG(n) _CONCAT(EMMC_HOST_IRQ_CONFIG_PCIE, DT_INST_ON_BUS(n, pcie))(n) #define INIT_PCIE0(n) #define INIT_PCIE1(n) DEVICE_PCIE_INST_INIT(n, pcie), #define INIT_PCIE(n) _CONCAT(INIT_PCIE, DT_INST_ON_BUS(n, pcie))(n) #define REG_INIT_PCIE0(n) DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), #define REG_INIT_PCIE1(n) #define REG_INIT(n) _CONCAT(REG_INIT_PCIE, DT_INST_ON_BUS(n, pcie))(n) #define DEFINE_PCIE0(n) #define DEFINE_PCIE1(n) DEVICE_PCIE_INST_DECLARE(n) #define EMMC_HOST_PCIE_DEFINE(n) _CONCAT(DEFINE_PCIE, DT_INST_ON_BUS(n, pcie))(n) #define EMMC_HOST_DEV_CFG(n) \ EMMC_HOST_PCIE_DEFINE(n); \ EMMC_HOST_IRQ_CONFIG(n); \ static const struct emmc_config emmc_config_data_##n = { \ REG_INIT(n) INIT_PCIE(n).config_func = emmc_config_##n, \ .hs200_mode = DT_INST_PROP_OR(n, mmc_hs200_1_8v, 0), \ .hs400_mode = DT_INST_PROP_OR(n, mmc_hs400_1_8v, 0), \ .dw_4bit = DT_INST_ENUM_HAS_VALUE(n, bus_width, 4), \ .dw_8bit = DT_INST_ENUM_HAS_VALUE(n, bus_width, 8), \ .max_bus_freq = DT_INST_PROP_OR(n, max_bus_freq, 40000), \ .min_bus_freq = DT_INST_PROP_OR(n, min_bus_freq, 40000), \ .power_delay_ms = DT_INST_PROP_OR(n, power_delay_ms, 500), \ }; \ \ static struct emmc_data emmc_priv_data_##n; \ \ DEVICE_DT_INST_DEFINE(n, emmc_init, NULL, &emmc_priv_data_##n, &emmc_config_data_##n, \ POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &emmc_api); DT_INST_FOREACH_STATUS_OKAY(EMMC_HOST_DEV_CFG) ```
/content/code_sandbox/drivers/sdhc/intel_emmc_host.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
11,056
```c /* * */ /** * @file * @brief Module for tracking page use within memory banks * * The memory management drivers may use the routines within this module * to track page use within their memory banks. This information in turn * could be leveraged by them to determine when to power them on or off to * better conserve energy. */ #include <zephyr/kernel.h> #include <zephyr/drivers/mm/mm_drv_bank.h> #include <zephyr/sys/mem_stats.h> void sys_mm_drv_bank_init(struct sys_mm_drv_bank *bank, uint32_t bank_pages) { bank->unmapped_pages = 0; bank->mapped_pages = bank_pages; bank->max_mapped_pages = bank_pages; } uint32_t sys_mm_drv_bank_page_mapped(struct sys_mm_drv_bank *bank) { bank->unmapped_pages--; bank->mapped_pages++; if (bank->mapped_pages > bank->max_mapped_pages) { bank->max_mapped_pages = bank->mapped_pages; } return bank->mapped_pages; } uint32_t sys_mm_drv_bank_page_unmapped(struct sys_mm_drv_bank *bank) { bank->unmapped_pages++; bank->mapped_pages--; return bank->unmapped_pages; } void sys_mm_drv_bank_stats_get(struct sys_mm_drv_bank *bank, struct sys_memory_stats *stats) { stats->free_bytes = bank->unmapped_pages * CONFIG_MM_DRV_PAGE_SIZE; stats->allocated_bytes = bank->mapped_pages * CONFIG_MM_DRV_PAGE_SIZE; stats->max_allocated_bytes = bank->max_mapped_pages * CONFIG_MM_DRV_PAGE_SIZE; } void sys_mm_drv_bank_stats_reset_max(struct sys_mm_drv_bank *bank) { bank->max_mapped_pages = bank->mapped_pages; } ```
/content/code_sandbox/drivers/mm/mm_drv_bank.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
374
```objective-c /* * */ /** * @file * @brief Header for agregating all defines for mm * */ #ifndef ZEPHYR_DRIVERS_SYSTEM_MM_DRV_INTEL_MTL_ #define ZEPHYR_DRIVERS_SYSTEM_MM_DRV_INTEL_MTL_ #define DT_DRV_COMPAT intel_adsp_mtl_tlb #include <zephyr/device.h> #include <zephyr/kernel.h> #include <zephyr/spinlock.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/check.h> #include <zephyr/kernel/mm.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/mm/system_mm.h> #include <zephyr/sys/mem_blocks.h> #include <soc.h> #include <adsp_memory.h> #include <adsp_memory_regions.h> #include "mm_drv_common.h" #define TLB_BASE (mm_reg_t)DT_REG_ADDR(DT_NODELABEL(tlb)) /* * Number of significant bits in the page index (defines the size of * the table) */ #define TLB_PADDR_SIZE DT_INST_PROP(0, paddr_size) #define TLB_EXEC_BIT BIT(DT_INST_PROP(0, exec_bit_idx)) #define TLB_WRITE_BIT BIT(DT_INST_PROP(0, write_bit_idx)) #define TLB_ENTRY_NUM (1 << TLB_PADDR_SIZE) #define TLB_PADDR_MASK ((1 << TLB_PADDR_SIZE) - 1) #define TLB_ENABLE_BIT BIT(TLB_PADDR_SIZE) /* This is used to translate from TLB entry back to physical address. */ /* base address of TLB table */ #define TLB_PHYS_BASE \ (((L2_SRAM_BASE / CONFIG_MM_DRV_PAGE_SIZE) & ~TLB_PADDR_MASK) * CONFIG_MM_DRV_PAGE_SIZE) #define HPSRAM_SEGMENTS(hpsram_ebb_quantity) \ ((ROUND_DOWN((hpsram_ebb_quantity) + 31u, 32u) / 32u) - 1u) #define L2_SRAM_PAGES_NUM (L2_SRAM_SIZE / CONFIG_MM_DRV_PAGE_SIZE) #define MAX_EBB_BANKS_IN_SEGMENT 32 #define SRAM_BANK_SIZE (128 * 1024) #define L2_SRAM_BANK_NUM (L2_SRAM_SIZE / SRAM_BANK_SIZE) #define IS_BIT_SET(value, idx) ((value) & (1 << (idx))) /** * Calculate TLB entry based on physical address. * * @param pa Page-aligned virutal address. * @return TLB entry value. */ static inline uint16_t pa_to_tlb_entry(uintptr_t pa) { return (((pa) / CONFIG_MM_DRV_PAGE_SIZE) & TLB_PADDR_MASK); } /** * Calculate physical address based on TLB entry. * * @param tlb_entry TLB entry value. * @return physcial address pointer. */ static inline uintptr_t tlb_entry_to_pa(uint16_t tlb_entry) { return ((((tlb_entry) & TLB_PADDR_MASK) * CONFIG_MM_DRV_PAGE_SIZE) + TLB_PHYS_BASE); } /** * Calculate virtual memory regions allocation based on * info from linker script. * * @param End address of staticaly allocated memory. * @return Error Code. */ int calculate_memory_regions(uintptr_t static_alloc_end_ptr); #endif /* ZEPHYR_DRIVERS_SYSTEM_MM_DRV_INTEL_MTL_ */ ```
/content/code_sandbox/drivers/mm/mm_drv_intel_adsp.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
714
```c /* * */ /** * @file * @brief Driver to utilize TLB on Intel Audio DSP * * TLB (Translation Lookup Buffer) table is used to map between * physical and virtual memory. This is global to all cores * on the DSP, as changes to the TLB table are visible to * all cores. * * Note that all passed in addresses should be in cached range * (aka cached addresses). Due to the need to calculate TLB * indexes, virtual addresses will be converted internally to * cached one via sys_cache_cached_ptr_get(). However, physical addresses * are untouched. */ #include "mm_drv_intel_adsp.h" #include <soc_util.h> #include <zephyr/drivers/mm/mm_drv_intel_adsp_mtl_tlb.h> #include <zephyr/drivers/mm/mm_drv_bank.h> #include <zephyr/debug/sparse.h> #include <zephyr/cache.h> #include <kernel_arch_interface.h> #define SRAM_BANK_PAGE_NUM (SRAM_BANK_SIZE / CONFIG_MM_DRV_PAGE_SIZE) static struct k_spinlock tlb_lock; extern struct k_spinlock sys_mm_drv_common_lock; static struct sys_mm_drv_bank hpsram_bank[L2_SRAM_BANK_NUM]; #ifdef CONFIG_SOC_INTEL_COMM_WIDGET #include <adsp_comm_widget.h> static uint32_t used_pages; /* PMC uses 32 KB banks */ static uint32_t used_pmc_banks_reported; #endif /* Define a marker which is placed by the linker script just after * last explicitly defined section. All .text, .data, .bss and .heap * sections should be placed before this marker in the memory. * This driver is using the location of the marker to * unmap the unused L2 memory and power off corresponding memory banks. */ __attribute__((__section__(".unused_ram_start_marker"))) static int unused_l2_sram_start_marker = 0xba0babce; #define UNUSED_L2_START_ALIGNED ROUND_UP(POINTER_TO_UINT(&unused_l2_sram_start_marker), \ CONFIG_MM_DRV_PAGE_SIZE) /* declare L2 physical memory block */ SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF( L2_PHYS_SRAM_REGION, CONFIG_MM_DRV_PAGE_SIZE, L2_SRAM_PAGES_NUM, (uint8_t *) L2_SRAM_BASE); /** * Calculate the index to the TLB table. * * @param vaddr Page-aligned virutal address. * @return Index to the TLB table. */ static uint32_t get_tlb_entry_idx(uintptr_t vaddr) { return (POINTER_TO_UINT(vaddr) - CONFIG_KERNEL_VM_BASE) / CONFIG_MM_DRV_PAGE_SIZE; } /** * Calculate the index of the HPSRAM bank. * * @param pa physical address. * @return Index of the HPSRAM bank. */ static uint32_t get_hpsram_bank_idx(uintptr_t pa) { uint32_t phys_offset = pa - L2_SRAM_BASE; return (phys_offset / SRAM_BANK_SIZE); } /** * Convert the SYS_MM_MEM_PERM_* flags into TLB entry permission bits. * * @param flags Access flags (SYS_MM_MEM_PERM_*) * @return TLB entry permission bits */ static uint16_t flags_to_tlb_perms(uint32_t flags) { #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE) uint16_t perms = 0; if ((flags & SYS_MM_MEM_PERM_RW) == SYS_MM_MEM_PERM_RW) { perms |= TLB_WRITE_BIT; } if ((flags & SYS_MM_MEM_PERM_EXEC) == SYS_MM_MEM_PERM_EXEC) { perms |= TLB_EXEC_BIT; } return perms; #else return 0; #endif } #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE) /** * Convert TLB entry permission bits to the SYS_MM_MEM_PERM_* flags. * * @param perms TLB entry permission bits * @return Access flags (SYS_MM_MEM_PERM_*) */ static uint16_t tlb_perms_to_flags(uint16_t perms) { uint32_t flags = 0; if ((perms & TLB_WRITE_BIT) == TLB_WRITE_BIT) { flags |= SYS_MM_MEM_PERM_RW; } if ((perms & TLB_EXEC_BIT) == TLB_EXEC_BIT) { flags |= SYS_MM_MEM_PERM_EXEC; } return flags; } #endif static int sys_mm_drv_hpsram_pwr(uint32_t bank_idx, bool enable, bool non_blocking) { #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE) if (bank_idx > ace_hpsram_get_bank_count()) { return -1; } HPSRAM_REGS(bank_idx)->HSxPGCTL = !enable; if (!non_blocking) { while (HPSRAM_REGS(bank_idx)->HSxPGISTS == enable) { k_busy_wait(1); } } #endif return 0; } #ifdef CONFIG_SOC_INTEL_COMM_WIDGET static void sys_mm_drv_report_page_usage(void) { /* PMC uses 32 KB banks */ uint32_t pmc_banks = DIV_ROUND_UP(used_pages, KB(32) / CONFIG_MM_DRV_PAGE_SIZE); if (used_pmc_banks_reported != pmc_banks) { if (!adsp_comm_widget_pmc_send_ipc(pmc_banks)) { /* Store reported value if message was sent successfully. */ used_pmc_banks_reported = pmc_banks; } } } #endif int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags) { k_spinlock_key_t key; uint32_t entry_idx, bank_idx; uint16_t entry; volatile uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); int ret = 0; void *phys_block_ptr; /* * Cached addresses for both physical and virtual. * * As the main memory is in cached address ranges, * the cached physical address is needed to perform * bound check. */ uintptr_t pa = POINTER_TO_UINT(sys_cache_cached_ptr_get(UINT_TO_POINTER(phys))); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt)); ARG_UNUSED(flags); /* Make sure VA is page-aligned */ CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { ret = -EINVAL; goto out; } /* Check bounds of virtual address space */ CHECKIF((va < UNUSED_L2_START_ALIGNED) || (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) { ret = -EINVAL; goto out; } /* * When the provided physical address is NULL * then it is a signal to the Intel ADSP TLB driver to * select the first available free physical address * autonomously within the driver. */ if (UINT_TO_POINTER(phys) == NULL) { ret = sys_mem_blocks_alloc_contiguous(&L2_PHYS_SRAM_REGION, 1, &phys_block_ptr); if (ret != 0) { __ASSERT(false, "unable to assign free phys page %d\n", ret); goto out; } pa = POINTER_TO_UINT(sys_cache_cached_ptr_get(phys_block_ptr)); } /* Check bounds of physical address space */ CHECKIF((pa < L2_SRAM_BASE) || (pa >= (L2_SRAM_BASE + L2_SRAM_SIZE))) { ret = -EINVAL; goto out; } /* Make sure PA is page-aligned */ CHECKIF(!sys_mm_drv_is_addr_aligned(pa)) { ret = -EINVAL; goto out; } key = k_spin_lock(&tlb_lock); entry_idx = get_tlb_entry_idx(va); #ifdef CONFIG_SOC_INTEL_COMM_WIDGET used_pages++; sys_mm_drv_report_page_usage(); #endif bank_idx = get_hpsram_bank_idx(pa); if (sys_mm_drv_bank_page_mapped(&hpsram_bank[bank_idx]) == 1) { sys_mm_drv_hpsram_pwr(bank_idx, true, false); } /* * The address part of the TLB entry takes the lowest * TLB_PADDR_SIZE bits of the physical page number, * and discards the highest bits. This is due to the * architecture design where the same physical page * can be accessed via two addresses. One address goes * through the cache, and the other one accesses * memory directly (without cache). The difference * between these two addresses are in the higher bits, * and the lower bits are the same. And this is why * TLB only cares about the lower part of the physical * address. */ entry = pa_to_tlb_entry(pa); /* Enable the translation in the TLB entry */ entry |= TLB_ENABLE_BIT; /* Set permissions for this entry */ entry |= flags_to_tlb_perms(flags); tlb_entries[entry_idx] = entry; #ifdef CONFIG_MMU arch_mem_map(virt, va, CONFIG_MM_DRV_PAGE_SIZE, flags); #endif /* * Invalid the cache of the newly mapped virtual page to * avoid stale data. */ sys_cache_data_invd_range(virt, CONFIG_MM_DRV_PAGE_SIZE); k_spin_unlock(&tlb_lock, key); out: return ret; } int sys_mm_drv_map_region(void *virt, uintptr_t phys, size_t size, uint32_t flags) { k_spinlock_key_t key; int ret = 0; size_t offset; uintptr_t pa; uint8_t *va; CHECKIF(!sys_mm_drv_is_addr_aligned(phys) || !sys_mm_drv_is_virt_addr_aligned(virt) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } va = (__sparse_force uint8_t *)sys_cache_cached_ptr_get(virt); pa = phys; key = k_spin_lock(&sys_mm_drv_common_lock); for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { int ret2 = sys_mm_drv_map_page(va, pa, flags); if (ret2 != 0) { __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va); ret = ret2; } va += CONFIG_MM_DRV_PAGE_SIZE; if (phys != 0) { pa += CONFIG_MM_DRV_PAGE_SIZE; } } k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } int sys_mm_drv_map_array(void *virt, uintptr_t *phys, size_t cnt, uint32_t flags) { void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt); return sys_mm_drv_simple_map_array(va, phys, cnt, flags); } static int sys_mm_drv_unmap_page_wflush(void *virt, bool flush_data) { k_spinlock_key_t key; uint32_t entry_idx, bank_idx; uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); uintptr_t pa; int ret = 0; /* Use cached virtual address */ uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt)); /* Check bounds of virtual address space */ CHECKIF((va < UNUSED_L2_START_ALIGNED) || (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) { ret = -EINVAL; goto out; } /* Make sure inputs are page-aligned */ CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { ret = -EINVAL; goto out; } key = k_spin_lock(&tlb_lock); /* * Flush the cache to make sure the backing physical page * has the latest data. * No flush when called from sys_mm_drv_mm_init(). */ if (flush_data) { sys_cache_data_flush_range(virt, CONFIG_MM_DRV_PAGE_SIZE); #ifdef CONFIG_MMU arch_mem_unmap(virt, CONFIG_MM_DRV_PAGE_SIZE); #endif } entry_idx = get_tlb_entry_idx(va); pa = tlb_entry_to_pa(tlb_entries[entry_idx]); /* Restore default entry settings with cleared the enable bit. */ tlb_entries[entry_idx] = 0; /* Check bounds of physical address space. * Initial TLB mappings could point to non existing physical pages. */ if ((pa >= L2_SRAM_BASE) && (pa < (L2_SRAM_BASE + L2_SRAM_SIZE))) { sys_mem_blocks_free_contiguous(&L2_PHYS_SRAM_REGION, UINT_TO_POINTER(pa), 1); bank_idx = get_hpsram_bank_idx(pa); #ifdef CONFIG_SOC_INTEL_COMM_WIDGET used_pages--; sys_mm_drv_report_page_usage(); #endif if (sys_mm_drv_bank_page_unmapped(&hpsram_bank[bank_idx]) == SRAM_BANK_PAGE_NUM) { sys_mm_drv_hpsram_pwr(bank_idx, false, false); } } k_spin_unlock(&tlb_lock, key); out: return ret; } int sys_mm_drv_unmap_page(void *virt) { return sys_mm_drv_unmap_page_wflush(virt, true); } int sys_mm_drv_unmap_region(void *virt, size_t size) { void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt); return sys_mm_drv_simple_unmap_region(va, size); } int sys_mm_drv_update_page_flags(void *virt, uint32_t flags) { k_spinlock_key_t key; uint32_t entry_idx; uint16_t entry; uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); int ret = 0; /* Use cached virtual address */ uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt)); /* Make sure inputs are page-aligned and check bounds of virtual address space */ CHECKIF(!sys_mm_drv_is_addr_aligned(va) || (va < UNUSED_L2_START_ALIGNED) || (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) { return -EINVAL; } key = k_spin_lock(&tlb_lock); entry_idx = get_tlb_entry_idx(va); entry = tlb_entries[entry_idx]; /* Check entry is already mapped */ if (!(entry & TLB_ENABLE_BIT)) { ret = -EFAULT; goto out; } /* Clear the access flags */ entry &= ~(TLB_EXEC_BIT | TLB_WRITE_BIT); /* Set new permissions for this entry */ entry |= flags_to_tlb_perms(flags); tlb_entries[entry_idx] = entry; out: k_spin_unlock(&tlb_lock, key); return ret; } #ifdef CONFIG_MM_DRV_INTEL_ADSP_TLB_REMAP_UNUSED_RAM static int sys_mm_drv_unmap_region_initial(void *virt_in, size_t size) { void *virt = (__sparse_force void *)sys_cache_cached_ptr_get(virt_in); k_spinlock_key_t key; int ret = 0; size_t offset; CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va = (uint8_t *)virt + offset; int ret2 = sys_mm_drv_unmap_page_wflush(va, false); if (ret2 != 0) { __ASSERT(false, "cannot unmap %p\n", va); ret = ret2; } } k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } #endif int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys) { uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); uintptr_t ent; int ret = 0; /* Use cached address */ uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt)); CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { ret = -EINVAL; goto out; } /* Check bounds of virtual address space */ CHECKIF((va < CONFIG_KERNEL_VM_BASE) || (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) { ret = -EINVAL; goto out; } ent = tlb_entries[get_tlb_entry_idx(va)]; if ((ent & TLB_ENABLE_BIT) != TLB_ENABLE_BIT) { ret = -EFAULT; } else { if (phys != NULL) { *phys = (ent & TLB_PADDR_MASK) * CONFIG_MM_DRV_PAGE_SIZE + TLB_PHYS_BASE; } ret = 0; } out: return ret; } int sys_mm_drv_page_flag_get(void *virt, uint32_t *flags) { ARG_UNUSED(virt); int ret = 0; #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE) uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); uint16_t ent; /* Use cached address */ uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt)); CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { ret = -EINVAL; goto out; } /* Check bounds of virtual address space */ CHECKIF((va < CONFIG_KERNEL_VM_BASE) || (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) { ret = -EINVAL; goto out; } ent = tlb_entries[get_tlb_entry_idx(va)]; if ((ent & TLB_ENABLE_BIT) != TLB_ENABLE_BIT) { ret = -EFAULT; } else { *flags = tlb_perms_to_flags(ent); } out: #else /* * There are no caching mode, or R/W, or eXecution (etc.) bits. * So just return 0. */ *flags = 0U; #endif return ret; } int sys_mm_drv_remap_region(void *virt_old, size_t size, void *virt_new) { void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old); return sys_mm_drv_simple_remap_region(va_old, size, va_new); } int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new, uintptr_t phys_new) { k_spinlock_key_t key; size_t offset; int ret = 0; virt_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new); virt_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old); CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) || !sys_mm_drv_is_virt_addr_aligned(virt_new) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) && (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) { ret = -EINVAL; /* overlaps */ goto out; } /* * The function's behavior has been updated to accept * phys_new == NULL and get the physical addresses from * the actual TLB instead of from the caller. */ if (phys_new != POINTER_TO_UINT(NULL) && !sys_mm_drv_is_addr_aligned(phys_new)) { ret = -EINVAL; goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) || !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) { ret = -EINVAL; goto unlock_out; } for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va_old = (uint8_t *)virt_old + offset; uint8_t *va_new = (uint8_t *)virt_new + offset; uintptr_t pa; uint32_t flags; int ret2; ret2 = sys_mm_drv_page_flag_get(va_old, &flags); if (ret2 != 0) { __ASSERT(false, "cannot query page flags %p\n", va_old); ret = ret2; goto unlock_out; } ret2 = sys_mm_drv_page_phys_get(va_old, &pa); if (ret2 != 0) { __ASSERT(false, "cannot query page paddr %p\n", va_old); ret = ret2; goto unlock_out; } /* * Only map the new page when we can retrieve * flags and phys addr of the old mapped page as We don't * want to map with unknown random flags. */ ret2 = sys_mm_drv_map_page(va_new, pa, flags); if (ret2 != 0) { __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new); ret = ret2; } ret2 = sys_mm_drv_unmap_page(va_old); if (ret2 != 0) { __ASSERT(false, "cannot unmap %p\n", va_old); ret = ret2; } } unlock_out: k_spin_unlock(&sys_mm_drv_common_lock, key); out: /* * Since move is done in virtual space, need to * flush the cache to make sure the backing physical * pages have the new data. */ sys_cache_data_flush_range(virt_new, size); sys_cache_data_flush_and_invd_range(virt_old, size); return ret; } int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new, uintptr_t *phys_new, size_t phys_cnt) { int ret; void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old); ret = sys_mm_drv_simple_move_array(va_old, size, va_new, phys_new, phys_cnt); /* * Since memcpy() is done in virtual space, need to * flush the cache to make sure the backing physical * pages have the new data. */ sys_cache_data_flush_range(va_new, size); return ret; } static int sys_mm_drv_mm_init(const struct device *dev) { int ret; ARG_UNUSED(dev); /* * Change size of avalible physical memory according to fw register information * in runtime. */ uint32_t avalible_memory_size = ace_hpsram_get_bank_count() * SRAM_BANK_SIZE; L2_PHYS_SRAM_REGION.info.num_blocks = avalible_memory_size / CONFIG_MM_DRV_PAGE_SIZE; ret = calculate_memory_regions(UNUSED_L2_START_ALIGNED); CHECKIF(ret != 0) { return ret; } /* * Initialize memblocks that will store physical * page usage. Initially all physical pages are * mapped in linear way to virtual address space * so mark all pages as allocated. */ ret = sys_mem_blocks_get(&L2_PHYS_SRAM_REGION, (void *) L2_SRAM_BASE, L2_SRAM_PAGES_NUM); CHECKIF(ret != 0) { return ret; } /* * Initialize refcounts for all HPSRAM banks * as fully used because entire HPSRAM is powered on * at system boot. Set reference count to a number * of pages within single memory bank. */ for (int i = 0; i < L2_SRAM_BANK_NUM; i++) { sys_mm_drv_bank_init(&hpsram_bank[i], SRAM_BANK_PAGE_NUM); } #ifdef CONFIG_SOC_INTEL_COMM_WIDGET used_pages = L2_SRAM_BANK_NUM * SRAM_BANK_SIZE / CONFIG_MM_DRV_PAGE_SIZE; #endif #ifdef CONFIG_MM_DRV_INTEL_ADSP_TLB_REMAP_UNUSED_RAM /* * find virtual address range which are unused * in the system */ if (L2_SRAM_BASE + L2_SRAM_SIZE < UNUSED_L2_START_ALIGNED || L2_SRAM_BASE > UNUSED_L2_START_ALIGNED) { __ASSERT(false, "unused l2 pointer is outside of l2 sram range %p\n", (void *)UNUSED_L2_START_ALIGNED); return -EFAULT; } /* * Unmap all unused physical pages from the entire * virtual address space to save power */ size_t unused_size = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE - UNUSED_L2_START_ALIGNED; ret = sys_mm_drv_unmap_region_initial(UINT_TO_POINTER(UNUSED_L2_START_ALIGNED), unused_size); /* Need to reset max pages statistics after unmap */ for (int i = 0; i < L2_SRAM_BANK_NUM; i++) { sys_mm_drv_bank_stats_reset_max(&hpsram_bank[i]); } #endif /* * Notify PMC about used HP-SRAM pages. */ #ifdef CONFIG_SOC_INTEL_COMM_WIDGET sys_mm_drv_report_page_usage(); #endif return 0; } static void adsp_mm_save_context(void *storage_buffer) { uint16_t entry; uint32_t entry_idx; int page_idx; uint32_t phys_addr; volatile uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); uint8_t *location = (uint8_t *) storage_buffer; /* first, store the existing TLB */ memcpy(location, UINT_TO_POINTER(TLB_BASE), TLB_SIZE); location += TLB_SIZE; /* save context of all the pages */ for (page_idx = 0; page_idx < L2_SRAM_PAGES_NUM; page_idx++) { phys_addr = POINTER_TO_UINT(L2_SRAM_BASE) + CONFIG_MM_DRV_PAGE_SIZE * page_idx; if (sys_mem_blocks_is_region_free( &L2_PHYS_SRAM_REGION, UINT_TO_POINTER(phys_addr), 1)) { /* skip a free page */ continue; } /* map the physical addr 1:1 to virtual address */ entry_idx = get_tlb_entry_idx(phys_addr); entry = pa_to_tlb_entry(phys_addr); if (((tlb_entries[entry_idx] & TLB_PADDR_MASK) != entry) || ((tlb_entries[entry_idx] & TLB_ENABLE_BIT) != TLB_ENABLE_BIT)) { /* this page needs remapping, invalidate cache to avoid stalled data * all cache data has been flushed before * do this for pages to remap only */ sys_cache_data_invd_range(UINT_TO_POINTER(phys_addr), CONFIG_MM_DRV_PAGE_SIZE); /* Enable the translation in the TLB entry */ entry |= TLB_ENABLE_BIT; /* map the page 1:1 virtual to physical */ tlb_entries[entry_idx] = entry; } /* save physical address */ *((uint32_t *) location) = phys_addr; location += sizeof(uint32_t); /* save the page */ memcpy(location, UINT_TO_POINTER(phys_addr), CONFIG_MM_DRV_PAGE_SIZE); location += CONFIG_MM_DRV_PAGE_SIZE; } /* write end marker - a null address */ *((uint32_t *) location) = 0; location += sizeof(uint32_t); sys_cache_data_flush_range( storage_buffer, (uint32_t)location - (uint32_t)storage_buffer); /* system state is frozen, ready to poweroff, no further changes will be stored */ } __imr void adsp_mm_restore_context(void *storage_buffer) { /* at this point system must be in a startup state * TLB must be set to initial state * Note! the stack must NOT be in the area being restored */ uint32_t phys_addr; uint8_t *location; /* restore context of all the pages */ location = (uint8_t *) storage_buffer + TLB_SIZE; phys_addr = *((uint32_t *) location); while (phys_addr != 0) { uint32_t phys_addr_uncached = POINTER_TO_UINT(sys_cache_uncached_ptr_get( (void __sparse_cache *)UINT_TO_POINTER(phys_addr))); uint32_t phys_offset = phys_addr - L2_SRAM_BASE; uint32_t bank_idx = (phys_offset / SRAM_BANK_SIZE); location += sizeof(uint32_t); /* turn on memory bank power, wait till the power is on */ __ASSERT_NO_MSG(bank_idx <= ace_hpsram_get_bank_count()); HPSRAM_REGS(bank_idx)->HSxPGCTL = 0; while (HPSRAM_REGS(bank_idx)->HSxPGISTS == 1) { /* k_busy_wait cannot be used here - not available */ } /* copy data to uncached alias and invalidate cache */ bmemcpy(UINT_TO_POINTER(phys_addr_uncached), location, CONFIG_MM_DRV_PAGE_SIZE); sys_cache_data_invd_range(UINT_TO_POINTER(phys_addr), CONFIG_MM_DRV_PAGE_SIZE); location += CONFIG_MM_DRV_PAGE_SIZE; phys_addr = *((uint32_t *) location); } /* restore original TLB table */ bmemcpy(UINT_TO_POINTER(TLB_BASE), storage_buffer, TLB_SIZE); /* HPSRAM memory is restored */ } static uint32_t adsp_mm_get_storage_size(void) { /* * FIXME - currently the function returns a maximum possible size of the buffer * as L3 memory is generally a huge area its OK (and fast) * in future the function may go through the mapping and calculate a required size */ return L2_SRAM_SIZE + TLB_SIZE + (L2_SRAM_PAGES_NUM * sizeof(void *)) + sizeof(void *); } static const struct intel_adsp_tlb_api adsp_tlb_api_func = { .save_context = adsp_mm_save_context, .get_storage_size = adsp_mm_get_storage_size }; DEVICE_DT_DEFINE(DT_INST(0, intel_adsp_mtl_tlb), sys_mm_drv_mm_init, NULL, NULL, NULL, POST_KERNEL, 0, &adsp_tlb_api_func); ```
/content/code_sandbox/drivers/mm/mm_drv_intel_adsp_mtl_tlb.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,570
```c /* * */ /** * @file * @brief Driver to utilize TLB on Intel Audio DSP * * TLB (Translation Lookup Buffer) table is used to map between * physical and virtual memory. This is global to all cores * on the DSP, as changes to the TLB table are visible to * all cores. * * Note that all passed in addresses should be in cached range * (aka cached addresses). Due to the need to calculate TLB * indexes, virtual addresses will be converted internally to * cached one via sys_cache_cached_ptr_get(). However, physical addresses * are untouched. */ #define DT_DRV_COMPAT intel_adsp_tlb #include <zephyr/device.h> #include <zephyr/kernel.h> #include <zephyr/spinlock.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/check.h> #include <zephyr/kernel/mm.h> #include <zephyr/sys/util.h> #include <zephyr/debug/sparse.h> #include <zephyr/cache.h> #include <adsp_memory.h> #include <zephyr/drivers/mm/system_mm.h> #include "mm_drv_common.h" DEVICE_MMIO_TOPLEVEL_STATIC(tlb_regs, DT_DRV_INST(0)); #define TLB_BASE \ ((mm_reg_t)DEVICE_MMIO_TOPLEVEL_GET(tlb_regs)) /* * Number of significant bits in the page index (defines the size of * the table) */ #define TLB_PADDR_SIZE DT_INST_PROP(0, paddr_size) #define TLB_PADDR_MASK ((1 << TLB_PADDR_SIZE) - 1) #define TLB_ENABLE_BIT BIT(TLB_PADDR_SIZE) static struct k_spinlock tlb_lock; /** * Calculate the index to the TLB table. * * @param vaddr Page-aligned virtual address. * @return Index to the TLB table. */ static uint32_t get_tlb_entry_idx(uintptr_t vaddr) { return (POINTER_TO_UINT(vaddr) - CONFIG_KERNEL_VM_BASE) / CONFIG_MM_DRV_PAGE_SIZE; } int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags) { k_spinlock_key_t key; uint32_t entry_idx; uint16_t entry; uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); int ret = 0; /* * Cached addresses for both physical and virtual. * * As the main memory is in cached address ranges, * the cached physical address is needed to perform * bound check. */ uintptr_t pa = POINTER_TO_UINT(sys_cache_cached_ptr_get(UINT_TO_POINTER(phys))); uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt)); ARG_UNUSED(flags); /* Make sure inputs are page-aligned */ CHECKIF(!sys_mm_drv_is_addr_aligned(pa) || !sys_mm_drv_is_addr_aligned(va)) { ret = -EINVAL; goto out; } /* Check bounds of physical address space */ CHECKIF((pa < L2_SRAM_BASE) || (pa >= (L2_SRAM_BASE + L2_SRAM_SIZE))) { ret = -EINVAL; goto out; } /* Check bounds of virtual address space */ CHECKIF((va < CONFIG_KERNEL_VM_BASE) || (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) { ret = -EINVAL; goto out; } key = k_spin_lock(&tlb_lock); entry_idx = get_tlb_entry_idx(va); /* * The address part of the TLB entry takes the lowest * TLB_PADDR_SIZE bits of the physical page number, * and discards the highest bits. This is due to the * architecture design where the same physical page * can be accessed via two addresses. One address goes * through the cache, and the other one accesses * memory directly (without cache). The difference * between these two addresses are in the higher bits, * and the lower bits are the same. And this is why * TLB only cares about the lower part of the physical * address. */ entry = ((pa / CONFIG_MM_DRV_PAGE_SIZE) & TLB_PADDR_MASK); /* Enable the translation in the TLB entry */ entry |= TLB_ENABLE_BIT; tlb_entries[entry_idx] = entry; /* * Invalid the cache of the newly mapped virtual page to * avoid stale data. */ sys_cache_data_invd_range(virt, CONFIG_MM_DRV_PAGE_SIZE); k_spin_unlock(&tlb_lock, key); out: return ret; } int sys_mm_drv_map_region(void *virt, uintptr_t phys, size_t size, uint32_t flags) { void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt); return sys_mm_drv_simple_map_region(va, phys, size, flags); } int sys_mm_drv_map_array(void *virt, uintptr_t *phys, size_t cnt, uint32_t flags) { void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt); return sys_mm_drv_simple_map_array(va, phys, cnt, flags); } int sys_mm_drv_unmap_page(void *virt) { k_spinlock_key_t key; uint32_t entry_idx; uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); int ret = 0; /* Use cached virtual address */ uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt)); /* Check bounds of virtual address space */ CHECKIF((va < CONFIG_KERNEL_VM_BASE) || (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) { ret = -EINVAL; goto out; } /* Make sure inputs are page-aligned */ CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { ret = -EINVAL; goto out; } key = k_spin_lock(&tlb_lock); /* * Flush the cache to make sure the backing physical page * has the latest data. */ sys_cache_data_flush_range(virt, CONFIG_MM_DRV_PAGE_SIZE); entry_idx = get_tlb_entry_idx(va); /* Simply clear the enable bit */ tlb_entries[entry_idx] &= ~TLB_ENABLE_BIT; k_spin_unlock(&tlb_lock, key); out: return ret; } int sys_mm_drv_unmap_region(void *virt, size_t size) { void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt); return sys_mm_drv_simple_unmap_region(va, size); } int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys) { uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE); uintptr_t ent; int ret = 0; /* Use cached address */ uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt)); CHECKIF(!sys_mm_drv_is_addr_aligned(va)) { ret = -EINVAL; goto out; } /* Check bounds of virtual address space */ CHECKIF((va < CONFIG_KERNEL_VM_BASE) || (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) { ret = -EINVAL; goto out; } ent = tlb_entries[get_tlb_entry_idx(va)]; if ((ent & TLB_ENABLE_BIT) != TLB_ENABLE_BIT) { ret = -EFAULT; } else { if (phys != NULL) { *phys = (ent & TLB_PADDR_MASK) * CONFIG_MM_DRV_PAGE_SIZE + L2_SRAM_BASE; } ret = 0; } out: return ret; } int sys_mm_drv_page_flag_get(void *virt, uint32_t *flags) { ARG_UNUSED(virt); /* * There are no caching mode, or R/W, or eXecution (etc.) bits. * So just return 0. */ *flags = 0U; return 0; } int sys_mm_drv_update_page_flags(void *virt, uint32_t flags) { ARG_UNUSED(virt); ARG_UNUSED(flags); /* * There are no caching mode, or R/W, or eXecution (etc.) bits. * So just return 0. */ return 0; } int sys_mm_drv_update_region_flags(void *virt, size_t size, uint32_t flags) { void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt); return sys_mm_drv_simple_update_region_flags(va, size, flags); } int sys_mm_drv_remap_region(void *virt_old, size_t size, void *virt_new) { void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old); return sys_mm_drv_simple_remap_region(va_old, size, va_new); } int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new, uintptr_t phys_new) { int ret; void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old); ret = sys_mm_drv_simple_move_region(va_old, size, va_new, phys_new); /* * Since memcpy() is done in virtual space, need to * flush the cache to make sure the backing physical * pages have the new data. */ sys_cache_data_flush_range(va_new, size); return ret; } int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new, uintptr_t *phys_new, size_t phys_cnt) { int ret; void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new); void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old); ret = sys_mm_drv_simple_move_array(va_old, size, va_new, phys_new, phys_cnt); /* * Since memcpy() is done in virtual space, need to * flush the cache to make sure the backing physical * pages have the new data. */ sys_cache_data_flush_range(va_new, size); return ret; } ```
/content/code_sandbox/drivers/mm/mm_drv_intel_adsp_tlb.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,176
```c /* * */ /** * @file * @brief Driver handling memory regions related * functions */ #include "mm_drv_intel_adsp.h" struct sys_mm_drv_region virtual_memory_regions[CONFIG_MP_MAX_NUM_CPUS + VIRTUAL_REGION_COUNT] = { {0} }; const struct sys_mm_drv_region *sys_mm_drv_query_memory_regions(void) { return (const struct sys_mm_drv_region *) virtual_memory_regions; } static inline void append_region(void *address, uint32_t mem_size, uint32_t attributes, uint32_t position, uint32_t *total_size) { virtual_memory_regions[position].addr = address; virtual_memory_regions[position].size = mem_size; virtual_memory_regions[position].attr = attributes; total_size += mem_size; } int calculate_memory_regions(uintptr_t static_alloc_end_ptr) { int i, total_size = 0; for (i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) { append_region((void *)(static_alloc_end_ptr + i * CORE_HEAP_SIZE), CORE_HEAP_SIZE, MEM_REG_ATTR_CORE_HEAP, i, &total_size); } append_region((void *)((uintptr_t)virtual_memory_regions[i - 1].addr + virtual_memory_regions[i - 1].size), CORE_HEAP_SIZE, MEM_REG_ATTR_SHARED_HEAP, i, &total_size); i++; append_region((void *)((uintptr_t)virtual_memory_regions[i - 1].addr + virtual_memory_regions[i - 1].size), OPPORTUNISTIC_REGION_SIZE, MEM_REG_ATTR_OPPORTUNISTIC_MEMORY, i, &total_size); i++; /* Apending last region as 0 so iterators know where table is over * check is for size = 0; */ append_region(NULL, 0, 0, i, &total_size); if (total_size > L2_VIRTUAL_SRAM_SIZE) { return -EINVAL; } return 0; } ```
/content/code_sandbox/drivers/mm/mm_drv_intel_adsp_regions.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
409
```c /* * */ /** * @file * @brief Driver handling Region based Address Translation (RAT) * related functions * * RAT is a module that is used by certain Texas Instruments SoCs * to allow some cores with a 32 bit address space to access * the full 48 bit SoC address space. This is required for the * core to be able to use peripherals. * * The driver uses the sys_mm_drv_page_phys_get() API to access * the address space. */ #include <zephyr/kernel.h> #include <zephyr/drivers/mm/rat.h> #include <zephyr/drivers/mm/system_mm.h> #include <zephyr/sys/__assert.h> static struct address_trans_params translate_config; /** * @brief Set registers for the address regions being used * * @param addr_translate_config Pointer to config struct for the RAT module * @param region_num Number of regions being initialised * @param enable Region status */ static void address_trans_set_region(struct address_trans_params *addr_translate_config, uint16_t region_num, uint32_t enable) { uint32_t rat_base_addr = addr_translate_config->rat_base_addr; uint64_t system_addr = addr_translate_config->region_config[region_num].system_addr; uint32_t local_addr = addr_translate_config->region_config[region_num].local_addr; uint32_t size = addr_translate_config->region_config[region_num].size; uint32_t system_addrL, system_addrH; if (size > address_trans_region_size_4G) { size = address_trans_region_size_4G; } system_addrL = (uint32_t)(system_addr & ~((uint32_t)((BIT64_MASK(size))))); system_addrH = (uint32_t)((system_addr >> 32) & 0xFFFF); local_addr = local_addr & ~((uint32_t)(BIT64_MASK(size))); sys_write32(0, RAT_CTRL(rat_base_addr, region_num)); sys_write32(local_addr, RAT_BASE(rat_base_addr, region_num)); sys_write32(system_addrL, RAT_TRANS_L(rat_base_addr, region_num)); sys_write32(system_addrH, RAT_TRANS_H(rat_base_addr, region_num)); sys_write32(RAT_CTRL_W(enable, size), RAT_CTRL(rat_base_addr, region_num)); } static void address_trans_init(struct address_trans_params *params) { uint32_t i; if (params != NULL) { translate_config = *params; } __ASSERT(translate_config.num_regions < ADDR_TRANSLATE_MAX_REGIONS, "Exceeding maximum number of regions"); for (i = 0; i < translate_config.num_regions; i++) { __ASSERT(translate_config.rat_base_addr != 0, "RAT base address cannot be 0"); __ASSERT(translate_config.region_config != NULL, "RAT region config cannot be NULL"); /* enable regions setup by user */ address_trans_set_region(&translate_config, i, 1); } } /** * @brief Initialise RAT module * * @param region_config Pointer to config struct for the regions * @param rat_base_addr Base address for the RAT module * @param translate_regions Number of regions being initialised */ void sys_mm_drv_ti_rat_init(void *region_config, uint64_t rat_base_addr, uint8_t translate_regions) { translate_config.num_regions = translate_regions; translate_config.rat_base_addr = rat_base_addr; translate_config.region_config = (struct address_trans_region_config *)region_config; address_trans_init(&translate_config); } int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys) { if (virt == NULL) { return -EINVAL; } uintptr_t pa = (uintptr_t) virt; uintptr_t *va = phys; uint32_t found, regionId; __ASSERT(translate_config.num_regions < ADDR_TRANSLATE_MAX_REGIONS, "Exceeding maximum number of regions"); found = 0; for (regionId = 0; regionId < translate_config.num_regions; regionId++) { uint64_t start_addr, end_addr; uint32_t size_mask; size_mask = ((uint32_t)((BIT64_MASK(translate_config.region_config[regionId].size)))); start_addr = translate_config.region_config[regionId].system_addr; end_addr = start_addr + size_mask; if (pa >= start_addr && pa <= end_addr) { found = 1; break; } } if (found) { /* translate input address to output address */ uint32_t offset = pa - translate_config.region_config[regionId].system_addr; *va = (translate_config.region_config[regionId].local_addr + offset); } else { /* no mapping found, set output = input with 32b truncation */ *va = pa; } if (va == NULL) { return -EFAULT; } return 0; } ```
/content/code_sandbox/drivers/mm/mm_drv_ti_rat.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,066
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_ #define ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_ #include <zephyr/kernel.h> #include <zephyr/toolchain.h> #include <zephyr/drivers/mm/system_mm.h> extern struct k_spinlock sys_mm_drv_common_lock; /** * @brief Get the flags of mapped virtual address. * * The function queries the translation tables to find the flags of * a mapped virtual address. This is used internally for remapping. * * Behavior when providing unaligned address is undefined, this * is assumed to be page aligned. * * @param virt Page-aligned virtual address * @param[out] flags flags of mapped virtual address * * @retval 0 if mapping is found and valid * @retval -EINVAL if invalid arguments are provided * @retval -EFAULT if virtual address is not mapped */ int sys_mm_drv_page_flag_get(void *virt, uint32_t *flags); /** * @brief Test if address is page-aligned * * @param addr address to be tested * * @retval true if page-aligned * @retval false if not page-aligned */ static inline bool sys_mm_drv_is_addr_aligned(uintptr_t addr) { return ((addr & (CONFIG_MM_DRV_PAGE_SIZE - 1)) == 0U); } /** * @brief Test if address is page-aligned * * @param addr address to be tested * * @retval true if page-aligned * @retval false if not page-aligned */ static inline bool sys_mm_drv_is_virt_addr_aligned(void *virt) { return sys_mm_drv_is_addr_aligned(POINTER_TO_UINT(virt)); } /** * @brief Test if size is page-aligned * * @param addr size to be tested * * @retval true if page-aligned * @retval false if not page-aligned */ static inline bool sys_mm_drv_is_size_aligned(size_t size) { if ((size & (CONFIG_MM_DRV_PAGE_SIZE - 1)) == 0U) { return true; } else { return false; } } /** * @brief Test if all physical addresses in array are page-aligned * * @param addr Array of physical addresses * @param cnt Number of elements in the array * * @retval true if all are page-aligned * @retval false if at least one is not page-aligned */ bool sys_mm_drv_is_addr_array_aligned(uintptr_t *addr, size_t cnt); /** * @brief Test if the virtual memory region is mapped * * @param virt Page-aligned base virtual address * @param size Size of the virtual memory region * * @retval true if all pages in the region are mapped * @retval false if at least one page is not mapped */ bool sys_mm_drv_is_virt_region_mapped(void *virt, size_t size); /** * @brief Test if the virtual memory region is unmapped * * @param virt Page-aligned base virtual address * @param size Size of the virtual memory region * * @retval true if all pages in the region are unmapped * @retval false if at least one page is mapped */ bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size); /** * @brief Simple implementation of sys_mm_drv_map_region() * * This provides a simple implementation for sys_mm_drv_map_region() * which is marked as a weak alias to sys_mm_drv_map_region(). * * Drivers do not have to implement their own sys_mm_drv_map_region() * if this works for them. Or they can override sys_mm_drv_map_region() * and call sys_mm_drv_simple_map_region() with some pre-processing done. * Or the drivers can implement their own sys_mm_drv_map_region(), then * this function will not be used. * * @see sys_mm_drv_map_region * * @param virt Page-aligned destination virtual address to map * @param phys Page-aligned source physical address to map * @param size Page-aligned size of the mapped memory region in bytes * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros * * @retval 0 if successful * @retval -EINVAL if invalid arguments are provided * @retval -EFAULT if any virtual addresses have already been mapped */ int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys, size_t size, uint32_t flags); /** * @brief Simple implementation of sys_mm_drv_map_array() * * This provides a simple implementation for sys_mm_drv_map_array() * which is marked as a weak alias to sys_mm_drv_map_array(). * * Drivers do not have to implement their own sys_mm_drv_map_array() * if this works for them. Or they can override sys_mm_drv_map_array() * and call sys_mm_drv_simple_map_array() with some pre-processing done. * Or the drivers can implement their own sys_mm_drv_map_array(), then * this function will not be used. * * @see sys_mm_drv_map_array * * @param virt Page-aligned destination virtual address to map * @param phys Array of pge-aligned source physical address to map * @param cnt Number of elements in the physical page array * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros * * @retval 0 if successful * @retval -EINVAL if invalid arguments are provided * @retval -EFAULT if any virtual addresses have already been mapped */ int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys, size_t cnt, uint32_t flags); /** * @brief Simple implementation of sys_mm_drv_unmap_region() * * This provides a simple implementation for sys_mm_drv_unmap_region() * which is marked as a weak alias to sys_mm_drv_unmap_region(). * * Drivers do not have to implement their own sys_mm_drv_unmap_region() * if this works for them. Or they can override sys_mm_drv_unmap_region() * and call sys_mm_drv_simple_unmap_region() with some pre-processing done. * Or the drivers can implement their own sys_mm_drv_unmap_region(), then * this function will not be used. * * @see sys_mm_drv_unmap_region * * @param virt Page-aligned base virtual address to un-map * @param size Page-aligned region size * * @retval 0 if successful * @retval -EINVAL if invalid arguments are provided * @retval -EFAULT if virtual addresses have already been mapped */ int sys_mm_drv_simple_unmap_region(void *virt, size_t size); /** * @brief Simple implementation of sys_mm_drv_remap_region() * * This provides a simple implementation for sys_mm_drv_remap_region() * which is marked as a weak alias to sys_mm_drv_remap_region(). * * Drivers do not have to implement their own sys_mm_drv_remap_region() * if this works for them. Or they can override sys_mm_drv_remap_region() * and call sys_mm_drv_simple_remap_region() with some pre-processing done. * Or the drivers can implement their own sys_mm_drv_remap_region(), then * this function will not be used. * * @see sys_mm_drv_remap_region * * @param virt_old Page-aligned base virtual address of existing memory * @param size Page-aligned size of the mapped memory region in bytes * @param virt_new Page-aligned base virtual address to which to remap * the memory * * @retval 0 if successful * @retval -EINVAL if invalid arguments are provided * @retval -EFAULT if old virtual addresses are not all mapped or * new virtual addresses are not all unmapped */ int sys_mm_drv_simple_remap_region(void *virt_old, size_t size, void *virt_new); /** * @brief Simple implementation of sys_mm_drv_move_region() * * This provides a simple implementation for sys_mm_drv_move_region() * which is marked as a weak alias to sys_mm_drv_move_region(). * * Drivers do not have to implement their own sys_mm_drv_move_region() * if this works for them. Or they can override sys_mm_drv_move_region() * and call sys_mm_drv_simple_move_region() with some pre-processing done. * Or the drivers can implement their own sys_mm_drv_move_region(), then * this function will not be used. * * @see sys_mm_drv_move_region * * @param virt_old Page-aligned base virtual address of existing memory * @param size Page-aligned size of the mapped memory region in bytes * @param virt_new Page-aligned base virtual address to which to map * new physical pages * @param phys_new Page-aligned base physical address to contain * the moved memory * * @retval 0 if successful * @retval -EINVAL if invalid arguments are provided * @retval -EFAULT if old virtual addresses are not all mapped or * new virtual addresses are not all unmapped */ int sys_mm_drv_simple_move_region(void *virt_old, size_t size, void *virt_new, uintptr_t phys_new); /** * @brief Simple implementation of sys_mm_drv_move_array() * * This provides a simple implementation for sys_mm_drv_move_array() * which is marked as a weak alias to sys_mm_drv_move_array(). * * Drivers do not have to implement their own sys_mm_drv_move_array() * if this works for them. Or they can override sys_mm_drv_move_array() * and call sys_mm_drv_simple_move_array() with some pre-processing done. * Or the drivers can implement their own sys_mm_drv_move_array(), then * this function will not be used. * * @see sys_mm_drv_move_array * * @param virt_old Page-aligned base virtual address of existing memory * @param size Page-aligned size of the mapped memory region in bytes * @param virt_new Page-aligned base virtual address to which to map * new physical pages * @param phys_new Array of page-aligned physical address to contain * the moved memory * @param phys_cnt Number of elements in the physical page array * * @retval 0 if successful * @retval -EINVAL if invalid arguments are provided * @retval -EFAULT if old virtual addresses are not all mapped or * new virtual addresses are not all unmapped */ int sys_mm_drv_simple_move_array(void *virt_old, size_t size, void *virt_new, uintptr_t *phys_new, size_t phys_cnt); /** * @brief Update memory region flags * * This changes the attributes of physical memory which is already * mapped to a virtual address. This is useful when use case of * specific memory region changes. * E.g. when the library/module code is copied to the memory then * it needs to be read-write and after it has already * been copied and library/module code is ready to be executed then * attributes need to be changed to read-only/executable. * Calling this API must not cause losing memory contents. * * @param virt Page-aligned virtual address to be updated * @param size Page-aligned size of the mapped memory region in bytes * @param flags Caching, access and control flags, see SYS_MM_MEM_* macros * * @retval 0 if successful * @retval -EINVAL if invalid arguments are provided * @retval -EFAULT if virtual addresses is not mapped */ int sys_mm_drv_simple_update_region_flags(void *virt, size_t size, uint32_t flags); #endif /* ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_ */ ```
/content/code_sandbox/drivers/mm/mm_drv_common.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,421
```unknown # menuconfig MM_DRV bool "Memory Management drivers [EXPERIMENTAL]" select EXPERIMENTAL select KERNEL_VM_SUPPORT help Include Memory Management drivers in system config if MM_DRV config MM_DRV_PAGE_SIZE hex "Memory Page Size" default 0x1000 help Size of memory pages. config MM_DRV_INTEL_ADSP_TLB_REMAP_UNUSED_RAM bool "Power off unused RAM" help Allows TLB driver to remap unused RAM - unused being defined as memory ranging from linker script defined "unused_l2_sram_start_marke" to end of RAM. Note that unused memory will be powered off by default. Disable this option if dynamically managing memory, such as by usinga heap allocator. config MM_DRV_INTEL_ADSP_MTL_TLB bool "Intel Audio DSP TLB Driver for Meteor Lake" default y depends on DT_HAS_INTEL_ADSP_MTL_TLB_ENABLED imply SYS_MEM_BLOCKS help Driver for the translation lookup buffer on Intel Audio DSP hardware (Meteor Lake). config MM_DRV_INTEL_ADSP_TLB bool "Intel Audio DSP TLB Driver" default y depends on DT_HAS_INTEL_ADSP_TLB_ENABLED help Driver for the translation lookup buffer on Intel Audio DSP hardware. config EXTERNAL_ADDRESS_TRANSLATION bool "Support for external address translation modules" depends on !MMU help This config is intended to support an external address translation module if required for an SoC. Uses the sys_mm_drv_page_phys_get() function from the system_mm API. if EXTERNAL_ADDRESS_TRANSLATION config MM_TI_RAT bool "Texas Instruments RAT module" depends on EXTERNAL_ADDRESS_TRANSLATION help Enables Region based address translation support functions specific to TI SoCs. endif # EXTERNAL_ADDRESS_TRANSLATION endif # MM_DRV ```
/content/code_sandbox/drivers/mm/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
410
```unknown # config CACHE_ASPEED bool "ASPEED external cache driver" depends on SOC_SERIES_AST10X0 depends on SYSCON select CACHE_HAS_DRIVER help This option enables the CACHE driver for ASPEED AST10X0 series SOC. ```
/content/code_sandbox/drivers/cache/Kconfig.aspeed
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
57
```c /* * */ #include <zephyr/cache.h> #include <zephyr/internal/syscall_handler.h> static inline int z_vrfy_sys_cache_data_flush_range(void *addr, size_t size) { K_OOPS(K_SYSCALL_MEMORY_WRITE(addr, size)); return z_impl_sys_cache_data_flush_range(addr, size); } #include <zephyr/syscalls/sys_cache_data_flush_range_mrsh.c> static inline int z_vrfy_sys_cache_data_invd_range(void *addr, size_t size) { K_OOPS(K_SYSCALL_MEMORY_WRITE(addr, size)); return z_impl_sys_cache_data_invd_range(addr, size); } #include <zephyr/syscalls/sys_cache_data_invd_range_mrsh.c> static inline int z_vrfy_sys_cache_data_flush_and_invd_range(void *addr, size_t size) { K_OOPS(K_SYSCALL_MEMORY_WRITE(addr, size)); return z_impl_sys_cache_data_flush_and_invd_range(addr, size); } #include <zephyr/syscalls/sys_cache_data_flush_and_invd_range_mrsh.c> ```
/content/code_sandbox/drivers/cache/cache_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
221
```c /* * */ /** * @file * @brief Common Memory Management Driver Code * * This file provides common implementation of memory management driver * functions, for example, sys_mm_drv_map_region() can use * sys_mm_drv_map_page() to map page by page for the whole region. * This avoids duplicate implementations of same functionality in * different drivers. The implementations here are marked as * weak functions so they can be overridden by the driver. */ #include <zephyr/kernel.h> #include <string.h> #include <zephyr/toolchain.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/check.h> #include <zephyr/sys/util.h> #include <zephyr/drivers/mm/system_mm.h> #include "mm_drv_common.h" struct k_spinlock sys_mm_drv_common_lock; bool sys_mm_drv_is_addr_array_aligned(uintptr_t *addr, size_t cnt) { size_t idx; bool ret = true; for (idx = 0; idx < cnt; idx++) { if (!sys_mm_drv_is_addr_aligned(addr[idx])) { ret = false; break; } } return ret; } bool sys_mm_drv_is_virt_region_mapped(void *virt, size_t size) { size_t offset; bool ret = true; for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va = (uint8_t *)virt + offset; if (sys_mm_drv_page_phys_get(va, NULL) != 0) { ret = false; break; } } return ret; } bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size) { size_t offset; bool ret = true; for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va = (uint8_t *)virt + offset; if (sys_mm_drv_page_phys_get(va, NULL) != -EFAULT) { ret = false; break; } } return ret; } int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys, size_t size, uint32_t flags) { k_spinlock_key_t key; int ret = 0; size_t offset; CHECKIF(!sys_mm_drv_is_addr_aligned(phys) || !sys_mm_drv_is_virt_addr_aligned(virt) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va = (uint8_t *)virt + offset; uintptr_t pa = phys + offset; int ret2 = sys_mm_drv_map_page(va, pa, flags); if (ret2 != 0) { __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va); ret = ret2; } } k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } __weak FUNC_ALIAS(sys_mm_drv_simple_map_region, sys_mm_drv_map_region, int); int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys, size_t cnt, uint32_t flags) { k_spinlock_key_t key; int ret = 0; size_t idx, offset; CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys, cnt) || !sys_mm_drv_is_virt_addr_aligned(virt)) { ret = -EINVAL; goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); offset = 0; idx = 0; while (idx < cnt) { uint8_t *va = (uint8_t *)virt + offset; int ret2 = sys_mm_drv_map_page(va, phys[idx], flags); if (ret2 != 0) { __ASSERT(false, "cannot map 0x%lx to %p\n", phys[idx], va); ret = ret2; } offset += CONFIG_MM_DRV_PAGE_SIZE; idx++; } k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } __weak FUNC_ALIAS(sys_mm_drv_simple_map_array, sys_mm_drv_map_array, int); int sys_mm_drv_simple_unmap_region(void *virt, size_t size) { k_spinlock_key_t key; int ret = 0; size_t offset; CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va = (uint8_t *)virt + offset; int ret2 = sys_mm_drv_unmap_page(va); if (ret2 != 0) { __ASSERT(false, "cannot unmap %p\n", va); ret = ret2; } } k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } __weak FUNC_ALIAS(sys_mm_drv_simple_unmap_region, sys_mm_drv_unmap_region, int); int sys_mm_drv_simple_remap_region(void *virt_old, size_t size, void *virt_new) { k_spinlock_key_t key; size_t offset; int ret = 0; CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) || !sys_mm_drv_is_virt_addr_aligned(virt_new) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) && (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) { ret = -EINVAL; /* overlaps */ goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) || !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) { ret = -EINVAL; goto unlock_out; } for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va_old = (uint8_t *)virt_old + offset; uint8_t *va_new = (uint8_t *)virt_new + offset; uintptr_t pa; uint32_t flags; int ret2; bool to_map; /* * va_old is mapped as checked above, so no need * to check for return value here. */ (void)sys_mm_drv_page_phys_get(va_old, &pa); to_map = true; ret2 = sys_mm_drv_page_flag_get(va_old, &flags); if (ret2 != 0) { __ASSERT(false, "cannot query page %p\n", va_old); ret = ret2; to_map = false; } ret2 = sys_mm_drv_unmap_page(va_old); if (ret2 != 0) { __ASSERT(false, "cannot unmap %p\n", va_old); ret = ret2; } if (!to_map) { /* * Cannot retrieve flags of mapped virtual memory. * Skip mapping this page as we don't want to map * with unknown random flags. */ continue; } ret2 = sys_mm_drv_map_page(va_new, pa, flags); if (ret2 != 0) { __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new); ret = ret2; } } unlock_out: k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } __weak FUNC_ALIAS(sys_mm_drv_simple_remap_region, sys_mm_drv_remap_region, int); int sys_mm_drv_simple_move_region(void *virt_old, size_t size, void *virt_new, uintptr_t phys_new) { k_spinlock_key_t key; size_t offset; int ret = 0; CHECKIF(!sys_mm_drv_is_addr_aligned(phys_new) || !sys_mm_drv_is_virt_addr_aligned(virt_old) || !sys_mm_drv_is_virt_addr_aligned(virt_new) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) && (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) { ret = -EINVAL; /* overlaps */ goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) || !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) { ret = -EINVAL; goto unlock_out; } for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va_old = (uint8_t *)virt_old + offset; uint8_t *va_new = (uint8_t *)virt_new + offset; uintptr_t pa = phys_new + offset; uint32_t flags; int ret2; ret2 = sys_mm_drv_page_flag_get(va_old, &flags); if (ret2 != 0) { __ASSERT(false, "cannot query page %p\n", va_old); ret = ret2; } else { /* * Only map the new page when we can retrieve * flags of the old mapped page as We don't * want to map with unknown random flags. */ ret2 = sys_mm_drv_map_page(va_new, pa, flags); if (ret2 != 0) { __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new); ret = ret2; } else { (void)memcpy(va_new, va_old, CONFIG_MM_DRV_PAGE_SIZE); } } ret2 = sys_mm_drv_unmap_page(va_old); if (ret2 != 0) { __ASSERT(false, "cannot unmap %p\n", va_old); ret = ret2; } } unlock_out: k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } __weak FUNC_ALIAS(sys_mm_drv_simple_move_region, sys_mm_drv_move_region, int); int sys_mm_drv_simple_move_array(void *virt_old, size_t size, void *virt_new, uintptr_t *phys_new, size_t phys_cnt) { k_spinlock_key_t key; size_t idx, offset; int ret = 0; CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys_new, phys_cnt) || !sys_mm_drv_is_virt_addr_aligned(virt_old) || !sys_mm_drv_is_virt_addr_aligned(virt_new) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) && (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) { ret = -EINVAL; /* overlaps */ goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) || !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) { ret = -EINVAL; goto unlock_out; } offset = 0; idx = 0; while (idx < phys_cnt) { uint8_t *va_old = (uint8_t *)virt_old + offset; uint8_t *va_new = (uint8_t *)virt_new + offset; uint32_t flags; int ret2; ret2 = sys_mm_drv_page_flag_get(va_old, &flags); if (ret2 != 0) { __ASSERT(false, "cannot query page %p\n", va_old); ret = ret2; } else { /* * Only map the new page when we can retrieve * flags of the old mapped page as We don't * want to map with unknown random flags. */ ret2 = sys_mm_drv_map_page(va_new, phys_new[idx], flags); if (ret2 != 0) { __ASSERT(false, "cannot map 0x%lx to %p\n", phys_new[idx], va_new); ret = ret2; } else { (void)memcpy(va_new, va_old, CONFIG_MM_DRV_PAGE_SIZE); } } ret2 = sys_mm_drv_unmap_page(va_old); if (ret2 != 0) { __ASSERT(false, "cannot unmap %p\n", va_old); ret = ret2; } offset += CONFIG_MM_DRV_PAGE_SIZE; idx++; } unlock_out: k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } __weak FUNC_ALIAS(sys_mm_drv_simple_move_array, sys_mm_drv_move_array, int); int sys_mm_drv_simple_update_region_flags(void *virt, size_t size, uint32_t flags) { k_spinlock_key_t key; int ret = 0; size_t offset; CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) || !sys_mm_drv_is_size_aligned(size)) { ret = -EINVAL; goto out; } key = k_spin_lock(&sys_mm_drv_common_lock); for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) { uint8_t *va = (uint8_t *)virt + offset; int ret2 = sys_mm_drv_update_page_flags(va, flags); if (ret2 != 0) { __ASSERT(false, "cannot update flags %p\n", va); ret = ret2; } } k_spin_unlock(&sys_mm_drv_common_lock, key); out: return ret; } __weak FUNC_ALIAS(sys_mm_drv_simple_update_region_flags, sys_mm_drv_update_region_flags, int); const struct sys_mm_drv_region *sys_mm_drv_simple_query_memory_regions(void) { const static struct sys_mm_drv_region empty[] = { { } }; return empty; } __weak FUNC_ALIAS(sys_mm_drv_simple_query_memory_regions, sys_mm_drv_query_memory_regions, const struct sys_mm_drv_region *); void sys_mm_drv_simple_query_memory_regions_free(const struct sys_mm_drv_region *regions) { ARG_UNUSED(regions); } __weak FUNC_ALIAS(sys_mm_drv_simple_query_memory_regions_free, sys_mm_drv_query_memory_regions_free, void); ```
/content/code_sandbox/drivers/mm/mm_drv_common.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,268
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_CACHE_CACHE_ANDES_L2_H_ #define ZEPHYR_DRIVERS_CACHE_CACHE_ANDES_L2_H_ #include <zephyr/kernel.h> #include <zephyr/arch/cpu.h> #include <zephyr/drivers/syscon.h> #define L2C_BASE DT_REG_ADDR_BY_IDX(DT_INST(0, andestech_l2c), 0) /* L2 cache Register Offset */ #define L2C_CONFIG (L2C_BASE + 0x00) #define L2C_CTRL (L2C_BASE + 0x08) #define L2C_CCTLCMD(hart_id) \ (L2C_BASE + 0x40 + (hart_id * l2_cache_cfg.cmd_offset)) #define L2C_CCTLACC(hart_id) \ (L2C_BASE + 0x48 + (hart_id * l2_cache_cfg.cmd_offset)) #define L2C_CCTLST(hart_id) \ (L2C_BASE + 0x80 + (hart_id * l2_cache_cfg.status_offset)) /* L2 cache config registers bitfields */ #define L2C_CONFIG_SIZE_SHIFT 7 #define L2C_CONFIG_MAP BIT(20) #define L2C_CONFIG_VERSION_SHIFT 24 /* L2 cache control registers bitfields */ #define L2C_CTRL_CEN BIT(0) #define L2C_CTRL_IPFDPT_3 GENMASK(4, 3) #define L2C_CTRL_DPFDPT_8 GENMASK(7, 6) /* L2 cache CCTL Access Line registers bitfields */ #define L2C_CCTLACC_WAY_SHIFT 28 /* L2 CCTL Command */ #define CCTL_L2_IX_INVAL 0x00 #define CCTL_L2_IX_WB 0x01 #define CCTL_L2_PA_INVAL 0x08 #define CCTL_L2_PA_WB 0x09 #define CCTL_L2_PA_WBINVAL 0x0a #define CCTL_L2_WBINVAL_ALL 0x12 #define K_CACHE_WB BIT(0) #define K_CACHE_INVD BIT(1) #define K_CACHE_WB_INVD (K_CACHE_WB | K_CACHE_INVD) struct nds_l2_cache_config { uint32_t size; uint32_t cmd_offset; uint32_t status_offset; uint16_t status_shift; uint8_t version; }; static struct nds_l2_cache_config l2_cache_cfg; static ALWAYS_INLINE int nds_l2_cache_is_inclusive(void) { return IS_ENABLED(CONFIG_L2C_INCLUSIVE_POLICY) && (l2_cache_cfg.version > 15); } static ALWAYS_INLINE void nds_l2_cache_wait_status(uint8_t hart_id) { uint32_t status; do { status = sys_read32(L2C_CCTLST(hart_id)); status >>= hart_id * l2_cache_cfg.status_shift; status &= BIT_MASK(4); } while (status == 1); } static ALWAYS_INLINE int nds_l2_cache_all(int op) { /* L2 cache fixed to 64 byte cache line size and 16 way */ const unsigned long line_size = 64, ways = 16; unsigned long sets, index, cmd; uint8_t hart_id; unsigned long status = csr_read(mstatus); if (!l2_cache_cfg.size) { return -ENOTSUP; } if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) { if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) { if (!nds_l2_cache_is_inclusive()) { return -ENOTSUP; } } } switch (op) { case K_CACHE_WB: cmd = CCTL_L2_IX_WB; break; case K_CACHE_INVD: cmd = CCTL_L2_IX_INVAL; break; case K_CACHE_WB_INVD: cmd = CCTL_L2_WBINVAL_ALL; break; default: return -ENOTSUP; } hart_id = arch_proc_id(); if (op == K_CACHE_WB_INVD) { sys_write32(CCTL_L2_WBINVAL_ALL, L2C_CCTLCMD(hart_id)); /* Wait L2 CCTL Commands finished */ nds_l2_cache_wait_status(hart_id); } else { sets = l2_cache_cfg.size / (ways * line_size); /* Invalidate all cache line by each way and each set */ for (int j = 0; j < ways; j++) { /* Index of way */ index = j << L2C_CCTLACC_WAY_SHIFT; for (int i = 0; i < sets; i++) { /* Index of set */ index += line_size; /* Invalidate each cache line */ sys_write32(index, L2C_CCTLACC(hart_id)); sys_write32(cmd, L2C_CCTLCMD(hart_id)); /* Wait L2 CCTL Commands finished */ nds_l2_cache_wait_status(hart_id); } } } return 0; } static ALWAYS_INLINE int nds_l2_cache_range(void *addr, size_t size, int op) { const unsigned long line_size = 64; unsigned long last_byte, align_addr, cmd; uint8_t hart_id; if (!l2_cache_cfg.size) { return -ENOTSUP; } switch (op) { case K_CACHE_WB: cmd = CCTL_L2_PA_WB; break; case K_CACHE_INVD: cmd = CCTL_L2_PA_INVAL; break; case K_CACHE_WB_INVD: cmd = CCTL_L2_PA_WBINVAL; break; default: return -ENOTSUP; } last_byte = (unsigned long)addr + size - 1; align_addr = ROUND_DOWN(addr, line_size); hart_id = arch_proc_id(); while (align_addr <= last_byte) { sys_write32(align_addr, L2C_CCTLACC(hart_id)); sys_write32(cmd, L2C_CCTLCMD(hart_id)); align_addr += line_size; /* Wait L2 CCTL Commands finished */ nds_l2_cache_wait_status(hart_id); } return 0; } static ALWAYS_INLINE void nds_l2_cache_enable(void) { if (l2_cache_cfg.size) { uint32_t l2c_ctrl = sys_read32(L2C_CTRL); if (!(l2c_ctrl & L2C_CTRL_CEN)) { WRITE_BIT(l2c_ctrl, 0, true); sys_write32(l2c_ctrl, L2C_CTRL); } } } static ALWAYS_INLINE void nds_l2_cache_disable(void) { if (l2_cache_cfg.size) { uint32_t l2c_ctrl = sys_read32(L2C_CTRL); if (l2c_ctrl & L2C_CTRL_CEN) { WRITE_BIT(l2c_ctrl, 0, false); sys_write32(l2c_ctrl, L2C_CTRL); } } } static ALWAYS_INLINE int nds_l2_cache_init(void) { unsigned long line_size; #if defined(CONFIG_SYSCON) #if DT_NODE_HAS_COMPAT_STATUS(DT_NODELABEL(syscon), andestech_atcsmu100, okay) uint32_t system_cfg; const struct device *syscon_dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); if (device_is_ready(syscon_dev)) { /* Check L2 cache feature from SMU */ syscon_read_reg(syscon_dev, 0x08, &system_cfg); /* Platform doesn't support L2 cache controller */ if (!(system_cfg & BIT(8))) { l2_cache_cfg.size = 0; return 0; } } else { LOG_ERR("Andes cache driver should be initialized after " "syscon driver initialization"); return 0; } #endif /* andestech_atcsmu100 dts node status okay */ #endif /* defined(CONFIG_SYSCON) */ uint32_t l2c_ctrl; line_size = (sys_read32(L2C_CONFIG) >> L2C_CONFIG_SIZE_SHIFT) & BIT_MASK(7); l2_cache_cfg.size = line_size * 128 * 1024; if (sys_read32(L2C_CONFIG) & L2C_CONFIG_MAP) { l2_cache_cfg.cmd_offset = 0x10; l2_cache_cfg.status_offset = 0; l2_cache_cfg.status_shift = 4; } else { l2_cache_cfg.cmd_offset = 0x1000; l2_cache_cfg.status_offset = 0x1000; l2_cache_cfg.status_shift = 0; } l2_cache_cfg.version = (sys_read32(L2C_CONFIG) >> L2C_CONFIG_VERSION_SHIFT) & BIT_MASK(8); /* Initializing L2 cache instruction, data prefetch depth */ l2c_ctrl = sys_read32(L2C_CTRL); l2c_ctrl |= (L2C_CTRL_IPFDPT_3 | L2C_CTRL_DPFDPT_8); /* Writeback and invalidate all I/D-Cache before setting L2C */ __asm__ volatile ("fence.i"); sys_write32(l2c_ctrl, L2C_CTRL); if (IS_ENABLED(CONFIG_SMP)) { if (l2_cache_cfg.size) { l2c_ctrl = sys_read32(L2C_CTRL); if (!(l2c_ctrl & L2C_CTRL_CEN)) { WRITE_BIT(l2c_ctrl, 0, true); sys_write32(l2c_ctrl, L2C_CTRL); } } } return l2_cache_cfg.size; } #endif /* ZEPHYR_DRIVERS_CACHE_CACHE_ANDES_L2_H_ */ ```
/content/code_sandbox/drivers/cache/cache_andes_l2.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,158
```unknown # DT_COMPAT_ANDESTECH_L2C := andestech,l2c config CACHE_ANDES bool "ANDES external cache driver" default y depends on SOC_FAMILY_ANDES_V5 select CACHE_HAS_DRIVER imply DCACHE_LINE_SIZE_DETECT imply ICACHE_LINE_SIZE_DETECT help This option enables the CACHE driver for ANDES V5 series SOC. if CACHE_ANDES config L2C_INCLUSIVE_POLICY bool depends on $(dt_compat_enabled,$(DT_COMPAT_ANDESTECH_L2C)) help When L2 cache is inclusive of L1, CPU only needs to perform operations on L2 cache, instead of on both L1 and L2 caches. config CACHE_ANDES_INIT_PRIORITY int "Andes cache driver init priority" default 60 help This option controls the priority of the cache initialization. Lower values indicate earlier initialization. endif # CACHE_ANDES ```
/content/code_sandbox/drivers/cache/Kconfig.andes
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
200
```unknown config CACHE_NRF_CACHE bool "nRF cache driver" select CACHE_HAS_DRIVER depends on HAS_NRFX && CACHE_MANAGEMENT help Enable support for the nRF cache driver. config CACHE_NRF_PATCH_LINEADDR bool "Patch lineaddr" default y if SOC_NRF54H20 help Manually set 28th bit in the LINEADDR in Trustzone Secure build. ```
/content/code_sandbox/drivers/cache/Kconfig.nrf
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
87
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/drivers/syscon.h> #include <zephyr/sys/barrier.h> /* * cache area control: each bit controls 32KB cache area * 1: cacheable * 0: no-cache * * bit[0]: 1st 32KB from 0x0000_0000 to 0x0000_7fff * bit[1]: 2nd 32KB from 0x0000_8000 to 0x0000_ffff * ... * bit[22]: 23th 32KB from 0x000a_8000 to 0x000a_ffff * bit[23]: 24th 32KB from 0x000b_0000 to 0x000b_ffff */ #define CACHE_AREA_CTRL_REG 0xa50 #define CACHE_INVALID_REG 0xa54 #define CACHE_FUNC_CTRL_REG 0xa58 #define CACHED_SRAM_ADDR CONFIG_SRAM_BASE_ADDRESS #define CACHED_SRAM_SIZE KB(CONFIG_SRAM_SIZE) #define CACHED_SRAM_END (CACHED_SRAM_ADDR + CACHED_SRAM_SIZE - 1) #define CACHE_AREA_SIZE_LOG2 15 #define CACHE_AREA_SIZE (1 << CACHE_AREA_SIZE_LOG2) #define DCACHE_INVALID(addr) (BIT(31) | ((addr & GENMASK(10, 0)) << 16)) #define ICACHE_INVALID(addr) (BIT(15) | ((addr & GENMASK(10, 0)) << 0)) #define ICACHE_CLEAN BIT(2) #define DCACHE_CLEAN BIT(1) #define CACHE_ENABLE BIT(0) /* cache size = 32B * 128 = 4KB */ #define CACHE_LINE_SIZE_LOG2 5 #define CACHE_LINE_SIZE (1 << CACHE_LINE_SIZE_LOG2) #define N_CACHE_LINE 128 #define CACHE_ALIGNED_ADDR(addr) \ ((addr >> CACHE_LINE_SIZE_LOG2) << CACHE_LINE_SIZE_LOG2) /* prefetch buffer */ #define PREFETCH_BUF_SIZE CACHE_LINE_SIZE static void aspeed_cache_init(void) { const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); uint32_t start_bit, end_bit, max_bit; /* set all cache areas to no-cache by default */ syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, 0); /* calculate how many areas need to be set */ max_bit = 8 * sizeof(uint32_t) - 1; start_bit = MIN(max_bit, CACHED_SRAM_ADDR >> CACHE_AREA_SIZE_LOG2); end_bit = MIN(max_bit, CACHED_SRAM_END >> CACHE_AREA_SIZE_LOG2); syscon_write_reg(dev, CACHE_AREA_CTRL_REG, GENMASK(end_bit, start_bit)); /* enable cache */ syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, CACHE_ENABLE); } /** * @brief get aligned address and the number of cachline to be invalied * @param [IN] addr - start address to be invalidated * @param [IN] size - size in byte * @param [OUT] p_aligned_addr - pointer to the cacheline aligned address variable * @return number of cacheline to be invalidated * * * addr * |--------size-------------| * |-----|-----|-----|-----|-----| * \ \ * head tail * * example 1: * addr = 0x100 (cacheline aligned), size = 64 * then head = 0x100, number of cache line to be invalidated = 64 / 32 = 2 * which means range [0x100, 0x140) will be invalidated * * example 2: * addr = 0x104 (cacheline unaligned), size = 64 * then head = 0x100, number of cache line to be invalidated = 1 + 64 / 32 = 3 * which means range [0x100, 0x160) will be invalidated */ static uint32_t get_n_cacheline(uint32_t addr, uint32_t size, uint32_t *p_head) { uint32_t n = 0; uint32_t tail; /* head */ *p_head = CACHE_ALIGNED_ADDR(addr); /* roundup the tail address */ tail = addr + size + (CACHE_LINE_SIZE - 1); tail = CACHE_ALIGNED_ADDR(tail); n = (tail - *p_head) >> CACHE_LINE_SIZE_LOG2; return n; } void cache_data_enable(void) { aspeed_cache_init(); } void cache_data_disable(void) { const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, 0); } void cache_instr_enable(void) { aspeed_cache_init(); } void cache_instr_disable(void) { const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, 0); } int cache_data_invd_all(void) { const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); uint32_t ctrl; unsigned int key = 0; syscon_read_reg(dev, CACHE_FUNC_CTRL_REG, &ctrl); /* enter critical section */ if (!k_is_in_isr()) { key = irq_lock(); } ctrl &= ~DCACHE_CLEAN; syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, ctrl); barrier_dsync_fence_full(); ctrl |= DCACHE_CLEAN; syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, ctrl); barrier_dsync_fence_full(); /* exit critical section */ if (!k_is_in_isr()) { irq_unlock(key); } return 0; } int cache_data_invd_range(void *addr, size_t size) { uint32_t aligned_addr, i, n; const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); unsigned int key = 0; if (((uint32_t)addr < CACHED_SRAM_ADDR) || ((uint32_t)addr > CACHED_SRAM_END)) { return 0; } /* enter critical section */ if (!k_is_in_isr()) { key = irq_lock(); } n = get_n_cacheline((uint32_t)addr, size, &aligned_addr); for (i = 0; i < n; i++) { syscon_write_reg(dev, CACHE_INVALID_REG, 0); syscon_write_reg(dev, CACHE_INVALID_REG, DCACHE_INVALID(aligned_addr)); aligned_addr += CACHE_LINE_SIZE; } barrier_dsync_fence_full(); /* exit critical section */ if (!k_is_in_isr()) { irq_unlock(key); } return 0; } int cache_instr_invd_all(void) { const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); uint32_t ctrl; unsigned int key = 0; syscon_read_reg(dev, CACHE_FUNC_CTRL_REG, &ctrl); /* enter critical section */ if (!k_is_in_isr()) { key = irq_lock(); } ctrl &= ~ICACHE_CLEAN; syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, ctrl); barrier_isync_fence_full(); ctrl |= ICACHE_CLEAN; syscon_write_reg(dev, CACHE_FUNC_CTRL_REG, ctrl); barrier_isync_fence_full(); /* exit critical section */ if (!k_is_in_isr()) { irq_unlock(key); } return 0; } int cache_instr_invd_range(void *addr, size_t size) { uint32_t aligned_addr, i, n; const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); unsigned int key = 0; if (((uint32_t)addr < CACHED_SRAM_ADDR) || ((uint32_t)addr > CACHED_SRAM_END)) { return 0; } n = get_n_cacheline((uint32_t)addr, size, &aligned_addr); /* enter critical section */ if (!k_is_in_isr()) { key = irq_lock(); } for (i = 0; i < n; i++) { syscon_write_reg(dev, CACHE_INVALID_REG, 0); syscon_write_reg(dev, CACHE_INVALID_REG, ICACHE_INVALID(aligned_addr)); aligned_addr += CACHE_LINE_SIZE; } barrier_dsync_fence_full(); /* exit critical section */ if (!k_is_in_isr()) { irq_unlock(key); } return 0; } int cache_data_flush_all(void) { return -ENOTSUP; } int cache_data_flush_and_invd_all(void) { return -ENOTSUP; } int cache_data_flush_range(void *addr, size_t size) { ARG_UNUSED(addr); ARG_UNUSED(size); return -ENOTSUP; } int cache_data_flush_and_invd_range(void *addr, size_t size) { ARG_UNUSED(addr); ARG_UNUSED(size); return -ENOTSUP; } int cache_instr_flush_all(void) { return -ENOTSUP; } int cache_instr_flush_and_invd_all(void) { return -ENOTSUP; } int cache_instr_flush_range(void *addr, size_t size) { ARG_UNUSED(addr); ARG_UNUSED(size); return -ENOTSUP; } int cache_instr_flush_and_invd_range(void *addr, size_t size) { ARG_UNUSED(addr); ARG_UNUSED(size); return -ENOTSUP; } #ifdef CONFIG_DCACHE_LINE_SIZE_DETECT size_t cache_data_line_size_get(void) { const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); uint32_t ctrl; syscon_read_reg(dev, CACHE_FUNC_CTRL_REG, &ctrl); return (ctrl & CACHE_ENABLE) ? CACHE_LINE_SIZE : 0; } #endif /* CONFIG_DCACHE_LINE_SIZE_DETECT */ #ifdef CONFIG_ICACHE_LINE_SIZE_DETECT size_t cache_instr_line_size_get(void) { const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(syscon)); uint32_t ctrl; syscon_read_reg(dev, CACHE_FUNC_CTRL_REG, &ctrl); return (ctrl & CACHE_ENABLE) ? CACHE_LINE_SIZE : 0; } #endif /* CONFIG_ICACHE_LINE_SIZE_DETECT */ ```
/content/code_sandbox/drivers/cache/cache_aspeed.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,181
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/drivers/cache.h> #include <hal/nrf_cache.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(cache_nrfx, CONFIG_CACHE_LOG_LEVEL); #if !defined(NRF_ICACHE) && defined(NRF_CACHE) #define NRF_ICACHE NRF_CACHE #endif #define CACHE_BUSY_RETRY_INTERVAL_US 10 enum k_nrf_cache_op { /* * Sequentially loop through all dirty lines and write those data units to * memory. * * This is FLUSH in Zephyr nomenclature. */ K_NRF_CACHE_CLEAN, /* * Mark all lines as invalid, ignoring any dirty data. * * This is INVALIDATE in Zephyr nomenclature. */ K_NRF_CACHE_INVD, /* * Clean followed by invalidate * * This is FLUSH_AND_INVALIDATE in Zephyr nomenclature. */ K_NRF_CACHE_FLUSH, }; static inline bool is_cache_busy(NRF_CACHE_Type *cache) { #if NRF_CACHE_HAS_STATUS return nrf_cache_busy_check(cache); #else return false; #endif } static inline void wait_for_cache(NRF_CACHE_Type *cache) { while (is_cache_busy(cache)) { } } static inline int _cache_all(NRF_CACHE_Type *cache, enum k_nrf_cache_op op) { /* * We really do not want to invalidate the whole cache. */ if (op == K_NRF_CACHE_INVD) { return -ENOTSUP; } wait_for_cache(cache); switch (op) { #if NRF_CACHE_HAS_TASK_CLEAN case K_NRF_CACHE_CLEAN: nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANCACHE); break; #endif case K_NRF_CACHE_INVD: nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATECACHE); break; #if NRF_CACHE_HAS_TASK_FLUSH case K_NRF_CACHE_FLUSH: nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHCACHE); break; #endif default: break; } wait_for_cache(cache); return 0; } static inline void _cache_line(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, uintptr_t line_addr) { do { wait_for_cache(cache); nrf_cache_lineaddr_set(cache, line_addr); switch (op) { #if NRF_CACHE_HAS_TASK_CLEAN case K_NRF_CACHE_CLEAN: nrf_cache_task_trigger(cache, NRF_CACHE_TASK_CLEANLINE); break; #endif case K_NRF_CACHE_INVD: nrf_cache_task_trigger(cache, NRF_CACHE_TASK_INVALIDATELINE); break; #if NRF_CACHE_HAS_TASK_FLUSH case K_NRF_CACHE_FLUSH: nrf_cache_task_trigger(cache, NRF_CACHE_TASK_FLUSHLINE); break; #endif default: break; } } while (nrf_cache_lineaddr_get(cache) != line_addr); } static inline int _cache_range(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr, size_t size) { uintptr_t line_addr = (uintptr_t)addr; uintptr_t end_addr; /* Some SOCs has a bug that requires to set 28th bit in the address on * Trustzone secure builds. */ if (IS_ENABLED(CONFIG_CACHE_NRF_PATCH_LINEADDR) && !IS_ENABLED(CONFIG_TRUSTED_EXECUTION_NONSECURE)) { line_addr |= BIT(28); } end_addr = line_addr + size; /* * Align address to line size */ line_addr &= ~(CONFIG_DCACHE_LINE_SIZE - 1); do { _cache_line(cache, op, line_addr); line_addr += CONFIG_DCACHE_LINE_SIZE; } while (line_addr < end_addr); wait_for_cache(cache); return 0; } static inline int _cache_checks(NRF_CACHE_Type *cache, enum k_nrf_cache_op op, void *addr, size_t size, bool is_range) { /* Check if the cache is enabled */ if (!(cache->ENABLE & CACHE_ENABLE_ENABLE_Enabled)) { return -EAGAIN; } if (!is_range) { return _cache_all(cache, op); } /* Check for invalid address or size */ if ((!addr) || (!size)) { return -EINVAL; } return _cache_range(cache, op, addr, size); } #if defined(NRF_DCACHE) && NRF_CACHE_HAS_TASKS void cache_data_enable(void) { nrf_cache_enable(NRF_DCACHE); } int cache_data_flush_all(void) { #if NRF_CACHE_HAS_TASK_CLEAN return _cache_checks(NRF_DCACHE, K_NRF_CACHE_CLEAN, NULL, 0, false); #else return -ENOTSUP; #endif } void cache_data_disable(void) { if (nrf_cache_enable_check(NRF_DCACHE)) { (void)cache_data_flush_all(); } nrf_cache_disable(NRF_DCACHE); } int cache_data_invd_all(void) { return _cache_checks(NRF_DCACHE, K_NRF_CACHE_INVD, NULL, 0, false); } int cache_data_flush_and_invd_all(void) { #if NRF_CACHE_HAS_TASK_FLUSH return _cache_checks(NRF_DCACHE, K_NRF_CACHE_FLUSH, NULL, 0, false); #else return -ENOTSUP; #endif } int cache_data_flush_range(void *addr, size_t size) { #if NRF_CACHE_HAS_TASK_CLEAN return _cache_checks(NRF_DCACHE, K_NRF_CACHE_CLEAN, addr, size, true); #else return -ENOTSUP; #endif } int cache_data_invd_range(void *addr, size_t size) { return _cache_checks(NRF_DCACHE, K_NRF_CACHE_INVD, addr, size, true); } int cache_data_flush_and_invd_range(void *addr, size_t size) { #if NRF_CACHE_HAS_TASK_FLUSH return _cache_checks(NRF_DCACHE, K_NRF_CACHE_FLUSH, addr, size, true); #else return -ENOTSUP; #endif } #else void cache_data_enable(void) { /* Nothing */ } void cache_data_disable(void) { /* Nothing */ } int cache_data_flush_all(void) { return -ENOTSUP; } int cache_data_invd_all(void) { return -ENOTSUP; } int cache_data_flush_and_invd_all(void) { return -ENOTSUP; } int cache_data_flush_range(void *addr, size_t size) { return -ENOTSUP; } int cache_data_invd_range(void *addr, size_t size) { return -ENOTSUP; } int cache_data_flush_and_invd_range(void *addr, size_t size) { return -ENOTSUP; } #endif /* NRF_DCACHE */ #if defined(NRF_ICACHE) && NRF_CACHE_HAS_TASKS void cache_instr_enable(void) { nrf_cache_enable(NRF_ICACHE); } void cache_instr_disable(void) { nrf_cache_disable(NRF_ICACHE); } int cache_instr_flush_all(void) { #if NRF_CACHE_HAS_TASK_CLEAN return _cache_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN, NULL, 0, false); #else return -ENOTSUP; #endif } int cache_instr_invd_all(void) { return _cache_checks(NRF_ICACHE, K_NRF_CACHE_INVD, NULL, 0, false); } int cache_instr_flush_and_invd_all(void) { #if NRF_CACHE_HAS_TASK_FLUSH return _cache_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH, NULL, 0, false); #else return -ENOTSUP; #endif } int cache_instr_flush_range(void *addr, size_t size) { #if NRF_CACHE_HAS_TASK_CLEAN return _cache_checks(NRF_ICACHE, K_NRF_CACHE_CLEAN, addr, size, true); #else return -ENOTSUP; #endif } int cache_instr_invd_range(void *addr, size_t size) { return _cache_checks(NRF_ICACHE, K_NRF_CACHE_INVD, addr, size, true); } int cache_instr_flush_and_invd_range(void *addr, size_t size) { #if NRF_CACHE_HAS_TASK_FLUSH return _cache_checks(NRF_ICACHE, K_NRF_CACHE_FLUSH, addr, size, true); #else return -ENOTSUP; #endif } #else void cache_instr_enable(void) { /* Nothing */ } void cache_instr_disable(void) { /* Nothing */ } int cache_instr_flush_all(void) { return -ENOTSUP; } int cache_instr_invd_all(void) { return -ENOTSUP; } int cache_instr_flush_and_invd_all(void) { return -ENOTSUP; } int cache_instr_flush_range(void *addr, size_t size) { return -ENOTSUP; } int cache_instr_invd_range(void *addr, size_t size) { return -ENOTSUP; } int cache_instr_flush_and_invd_range(void *addr, size_t size) { return -ENOTSUP; } #endif /* NRF_ICACHE */ ```
/content/code_sandbox/drivers/cache/cache_nrf.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,916
```unknown menuconfig CACHE bool "External cache controller drivers" default y if CACHE_MANAGEMENT help Enable support for external cache controllers drivers if CACHE config CACHE_HAS_DRIVER bool module = CACHE module-str = cache source "subsys/logging/Kconfig.template.log_config" comment "Device Drivers" source "drivers/cache/Kconfig.aspeed" source "drivers/cache/Kconfig.nrf" source "drivers/cache/Kconfig.andes" endif # CACHE ```
/content/code_sandbox/drivers/cache/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
97
```c /* * */ #include <zephyr/shell/shell.h> #include <stdlib.h> #include <zephyr/drivers/virtualization/ivshmem.h> static const struct device *ivshmem = DEVICE_DT_GET_ONE(qemu_ivshmem); #ifdef CONFIG_IVSHMEM_DOORBELL #define STACK_SIZE 512 static struct k_poll_signal doorbell_sig = K_POLL_SIGNAL_INITIALIZER(doorbell_sig); static struct k_poll_event doorbell_evt = K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &doorbell_sig); K_THREAD_STACK_DEFINE(doorbell_stack, STACK_SIZE); static bool doorbell_started; static struct k_thread doorbell_thread; static void doorbell_notification_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct shell *sh = p1; while (1) { unsigned int signaled; int vector; k_poll(&doorbell_evt, 1, K_FOREVER); k_poll_signal_check(&doorbell_sig, &signaled, &vector); if (signaled == 0) { continue; } shell_fprintf(sh, SHELL_NORMAL, "Received a notification on vector %u\n", (unsigned int)vector); k_poll_signal_init(&doorbell_sig); } } #endif /* CONFIG_IVSHMEM_DOORBELL */ static bool get_ivshmem(const struct shell *sh) { if (!device_is_ready(ivshmem)) { shell_error(sh, "IVshmem device is not ready"); return false; } return true; } static int cmd_ivshmem_shmem(const struct shell *sh, size_t argc, char **argv) { uintptr_t mem; size_t size; uint32_t id; uint16_t vectors; if (!get_ivshmem(sh)) { return 0; } size = ivshmem_get_mem(ivshmem, &mem); id = ivshmem_get_id(ivshmem); vectors = ivshmem_get_vectors(ivshmem); shell_fprintf(sh, SHELL_NORMAL, "IVshmem up and running: \n" "\tShared memory: 0x%lx of size %lu bytes\n" "\tPeer id: %u\n" "\tNotification vectors: %u\n", mem, size, id, vectors); return 0; } static int cmd_ivshmem_dump(const struct shell *sh, size_t argc, char **argv) { uintptr_t dump_pos; size_t dump_size; uintptr_t mem; size_t size; if (!get_ivshmem(sh)) { return 0; } dump_pos = strtol(argv[1], NULL, 10); dump_size = strtol(argv[2], NULL, 10); size = ivshmem_get_mem(ivshmem, &mem); if (dump_size > size) { shell_error(sh, "Size is too big"); } else if (dump_pos > size) { shell_error(sh, "Position is out of the shared memory"); } else if ((mem + dump_pos + dump_size) > (mem + size)) { shell_error(sh, "Position and size overflow"); } else { shell_hexdump(sh, (const uint8_t *)mem+dump_pos, dump_size); } return 0; } static int cmd_ivshmem_int(const struct shell *sh, size_t argc, char **argv) { int peer_id; int vector; int ret; if (!IS_ENABLED(CONFIG_IVSHMEM_DOORBELL)) { shell_error(sh, "CONFIG_IVSHMEM_DOORBELL is not enabled"); return 0; } if (!get_ivshmem(sh)) { return 0; } peer_id = strtol(argv[1], NULL, 10); vector = strtol(argv[2], NULL, 10); ret = ivshmem_int_peer(ivshmem, (uint16_t)peer_id, (uint16_t)vector); if (ret != 0) { shell_error(sh, "Could not notify peer %u on %u. status %d", peer_id, vector, ret); return -EIO; } shell_fprintf(sh, SHELL_NORMAL, "Notification sent to peer %u on vector %u\n", peer_id, vector); return 0; } static int cmd_ivshmem_get_notified(const struct shell *sh, size_t argc, char **argv) { #ifdef CONFIG_IVSHMEM_DOORBELL int vector; if (!get_ivshmem(sh)) { return 0; } vector = strtol(argv[1], NULL, 10); if (ivshmem_register_handler(ivshmem, &doorbell_sig, (uint16_t)vector)) { shell_error(sh, "Could not get notifications on vector %u", vector); return -EIO; } shell_fprintf(sh, SHELL_NORMAL, "Notifications enabled for vector %u\n", vector); if (!doorbell_started) { k_tid_t tid; tid = k_thread_create( &doorbell_thread, doorbell_stack, STACK_SIZE, doorbell_notification_thread, (void *)sh, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); if (!tid) { shell_error(sh, "Cannot start notification thread"); return -ENOEXEC; } k_thread_name_set(tid, "notification_thread"); k_thread_start(tid); doorbell_started = true; } #else shell_error(sh, "CONFIG_IVSHMEM_DOORBELL is not enabled"); #endif return 0; } SHELL_STATIC_SUBCMD_SET_CREATE(sub_ivshmem_cmds, SHELL_CMD(shmem, NULL, "Show shared memory info", cmd_ivshmem_shmem), SHELL_CMD_ARG(dump, NULL, "Dump shared memory content", cmd_ivshmem_dump, 3, 0), SHELL_CMD_ARG(int_peer, NULL, "Notify a vector on a peer", cmd_ivshmem_int, 3, 0), SHELL_CMD_ARG(get_notified, NULL, "Get notification on vector", cmd_ivshmem_get_notified, 2, 0), SHELL_SUBCMD_SET_END ); SHELL_CMD_REGISTER(ivshmem, &sub_ivshmem_cmds, "IVshmem information", cmd_ivshmem_shmem); ```
/content/code_sandbox/drivers/virtualization/virt_ivshmem_shell.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,411
```c /* * */ #include "soc_v5.h" #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/arch/riscv/csr.h> #include <zephyr/drivers/cache.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(cache_andes, CONFIG_CACHE_LOG_LEVEL); /* L1 CCTL Command */ #define CCTL_L1D_VA_INVAL 0 #define CCTL_L1D_VA_WB 1 #define CCTL_L1D_VA_WBINVAL 2 #define CCTL_L1D_WBINVAL_ALL 6 #define CCTL_L1D_WB_ALL 7 #define CCTL_L1I_VA_INVAL 8 #define CCTL_L1D_INVAL_ALL 23 #define CCTL_L1I_IX_INVAL 24 /* mcache_ctl bitfield */ #define MCACHE_CTL_IC_EN BIT(0) #define MCACHE_CTL_DC_EN BIT(1) #define MCACHE_CTL_CCTL_SUEN BIT(8) #define MCACHE_CTL_DC_COHEN BIT(19) #define MCACHE_CTL_DC_COHSTA BIT(20) /* micm_cfg bitfield */ #define MICM_CFG_ISET BIT_MASK(3) #define MICM_CFG_IWAY_SHIFT 3 #define MICM_CFG_ISZ_SHIFT 6 /* mdcm_cfg bitfield */ #define MDCM_CFG_DSZ_SHIFT 6 /* mmsc_cfg bitfield */ #define MMSC_CFG_CCTLCSR BIT(16) #define MMSC_CFG_VCCTL_2 BIT(19) #define MMSC_CFG_MSC_EXT BIT(31) #define MMSC_CFG_RVARCH BIT64(52) /* mmsc_cfg2 bitfield */ #define MMSC_CFG2_RVARCH BIT(20) /* mrvarch_cfg bitfield */ #define MRVARCH_CFG_SMEPMP BIT(4) #define K_CACHE_WB BIT(0) #define K_CACHE_INVD BIT(1) #define K_CACHE_WB_INVD (K_CACHE_WB | K_CACHE_INVD) struct cache_config { uint32_t instr_line_size; uint32_t data_line_size; uint32_t l2_cache_size; uint32_t l2_cache_inclusive; }; static struct cache_config cache_cfg; static struct k_spinlock lock; #if DT_NODE_HAS_COMPAT_STATUS(DT_INST(0, andestech_l2c), andestech_l2c, okay) #include "cache_andes_l2.h" #else static ALWAYS_INLINE void nds_l2_cache_enable(void) { } static ALWAYS_INLINE void nds_l2_cache_disable(void) { } static ALWAYS_INLINE int nds_l2_cache_range(void *addr, size_t size, int op) { return 0; } static ALWAYS_INLINE int nds_l2_cache_all(int op) { return 0; } static ALWAYS_INLINE int nds_l2_cache_is_inclusive(void) { return 0; } static ALWAYS_INLINE int nds_l2_cache_init(void) { return 0; } #endif /* DT_NODE_HAS_COMPAT_STATUS(DT_INST(0, andestech_l2c), andestech_l2c, okay) */ static ALWAYS_INLINE int nds_cctl_range_operations(void *addr, size_t size, int line_size, int cmd) { unsigned long last_byte, align_addr; unsigned long status = csr_read(mstatus); last_byte = (unsigned long)addr + size - 1; align_addr = ROUND_DOWN(addr, line_size); /* * In memory access privilige U mode, applications should use ucctl CSRs * for VA type commands. */ if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) { while (align_addr <= last_byte) { csr_write(NDS_UCCTLBEGINADDR, align_addr); csr_write(NDS_UCCTLCOMMAND, cmd); align_addr += line_size; } } else { while (align_addr <= last_byte) { csr_write(NDS_MCCTLBEGINADDR, align_addr); csr_write(NDS_MCCTLCOMMAND, cmd); align_addr += line_size; } } return 0; } static ALWAYS_INLINE int nds_l1i_cache_all(int op) { unsigned long sets, ways, end; unsigned long status = csr_read(mstatus); if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) { /* * In memory access privilige U mode, applications can only use * VA type commands for specific range. */ if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) { return -ENOTSUP; } } if (op == K_CACHE_INVD) { sets = 0x40 << (csr_read(NDS_MICM_CFG) & MICM_CFG_ISET); ways = ((csr_read(NDS_MICM_CFG) >> MICM_CFG_IWAY_SHIFT) & BIT_MASK(3)) + 1; end = ways * sets * cache_cfg.instr_line_size; for (int i = 0; i < end; i += cache_cfg.instr_line_size) { csr_write(NDS_MCCTLBEGINADDR, i); csr_write(NDS_MCCTLCOMMAND, CCTL_L1I_IX_INVAL); } } return 0; } static ALWAYS_INLINE int nds_l1d_cache_all(int op) { unsigned long status = csr_read(mstatus); if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) { /* * In memory access privilige U mode, applications can only use * VA type commands for specific range. */ if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) { return -ENOTSUP; } } switch (op) { case K_CACHE_WB: csr_write(NDS_MCCTLCOMMAND, CCTL_L1D_WB_ALL); break; case K_CACHE_INVD: csr_write(NDS_MCCTLCOMMAND, CCTL_L1D_INVAL_ALL); break; case K_CACHE_WB_INVD: csr_write(NDS_MCCTLCOMMAND, CCTL_L1D_WBINVAL_ALL); break; default: return -ENOTSUP; } return 0; } static ALWAYS_INLINE int nds_l1i_cache_range(void *addr, size_t size, int op) { unsigned long cmd; if (op == K_CACHE_INVD) { cmd = CCTL_L1I_VA_INVAL; nds_cctl_range_operations(addr, size, cache_cfg.instr_line_size, cmd); } return 0; } static ALWAYS_INLINE int nds_l1d_cache_range(void *addr, size_t size, int op) { unsigned long cmd; switch (op) { case K_CACHE_WB: cmd = CCTL_L1D_VA_WB; break; case K_CACHE_INVD: cmd = CCTL_L1D_VA_INVAL; break; case K_CACHE_WB_INVD: cmd = CCTL_L1D_VA_WBINVAL; break; default: return -ENOTSUP; } nds_cctl_range_operations(addr, size, cache_cfg.data_line_size, cmd); return 0; } void cache_data_enable(void) { if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) { return; } K_SPINLOCK(&lock) { nds_l2_cache_enable(); /* Enable D-cache coherence management */ csr_set(NDS_MCACHE_CTL, MCACHE_CTL_DC_COHEN); /* Check if CPU support CM or not. */ if (csr_read(NDS_MCACHE_CTL) & MCACHE_CTL_DC_COHEN) { /* Wait for cache coherence enabling completed */ while (!(csr_read(NDS_MCACHE_CTL) & MCACHE_CTL_DC_COHSTA)) { ; } } /* Enable D-cache */ csr_set(NDS_MCACHE_CTL, MCACHE_CTL_DC_EN); } } void cache_data_disable(void) { unsigned long status = csr_read(mstatus); if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) { return; } if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) { if ((status & MSTATUS_MPRV) && !(status & MSTATUS_MPP)) { if (!cache_cfg.l2_cache_inclusive) { return; } } } K_SPINLOCK(&lock) { if (cache_cfg.l2_cache_inclusive) { nds_l2_cache_all(K_CACHE_WB_INVD); } else { nds_l1d_cache_all(K_CACHE_WB_INVD); nds_l2_cache_all(K_CACHE_WB_INVD); } csr_clear(NDS_MCACHE_CTL, MCACHE_CTL_DC_EN); /* Check if CPU support CM or not. */ if (csr_read(NDS_MCACHE_CTL) & MCACHE_CTL_DC_COHSTA) { csr_clear(NDS_MCACHE_CTL, MCACHE_CTL_DC_COHEN); /* Wait for cache coherence disabling completed */ while (csr_read(NDS_MCACHE_CTL) & MCACHE_CTL_DC_COHSTA) { ; } } nds_l2_cache_disable(); } } void cache_instr_enable(void) { if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) { return; } csr_set(NDS_MCACHE_CTL, MCACHE_CTL_IC_EN); } void cache_instr_disable(void) { if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) { return; } csr_clear(NDS_MCACHE_CTL, MCACHE_CTL_IC_EN); } int cache_data_invd_all(void) { unsigned long ret = 0; K_SPINLOCK(&lock) { if (cache_cfg.l2_cache_inclusive) { ret |= nds_l2_cache_all(K_CACHE_WB); ret |= nds_l2_cache_all(K_CACHE_INVD); } else { ret |= nds_l1d_cache_all(K_CACHE_WB); ret |= nds_l2_cache_all(K_CACHE_WB); ret |= nds_l2_cache_all(K_CACHE_INVD); ret |= nds_l1d_cache_all(K_CACHE_INVD); } } return ret; } int cache_data_invd_range(void *addr, size_t size) { unsigned long ret = 0; K_SPINLOCK(&lock) { if (cache_cfg.l2_cache_inclusive) { ret |= nds_l2_cache_range(addr, size, K_CACHE_INVD); } else { ret |= nds_l2_cache_range(addr, size, K_CACHE_INVD); ret |= nds_l1d_cache_range(addr, size, K_CACHE_INVD); } } return ret; } int cache_instr_invd_all(void) { unsigned long ret = 0; if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) { return -ENOTSUP; } if (IS_ENABLED(CONFIG_RISCV_PMP)) { /* CCTL IX type command is not to RISC-V Smepmp */ if (IS_ENABLED(CONFIG_64BIT)) { if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_RVARCH) { if (csr_read(NDS_MRVARCH_CFG) & MRVARCH_CFG_SMEPMP) { return -ENOTSUP; } } } else { if ((csr_read(NDS_MMSC_CFG) & MMSC_CFG_MSC_EXT) && (csr_read(NDS_MMSC_CFG2) & MMSC_CFG2_RVARCH)) { if (csr_read(NDS_MRVARCH_CFG) & MRVARCH_CFG_SMEPMP) { return -ENOTSUP; } } } } K_SPINLOCK(&lock) { ret |= nds_l1i_cache_all(K_CACHE_INVD); } return ret; } int cache_instr_invd_range(void *addr, size_t size) { unsigned long ret = 0; if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) { ARG_UNUSED(addr); ARG_UNUSED(size); return -ENOTSUP; } K_SPINLOCK(&lock) { ret |= nds_l1i_cache_range(addr, size, K_CACHE_INVD); } return ret; } int cache_data_flush_all(void) { unsigned long ret = 0; K_SPINLOCK(&lock) { if (cache_cfg.l2_cache_inclusive) { ret |= nds_l2_cache_all(K_CACHE_WB); } else { ret |= nds_l1d_cache_all(K_CACHE_WB); ret |= nds_l2_cache_all(K_CACHE_WB); } } return ret; } int cache_data_flush_range(void *addr, size_t size) { unsigned long ret = 0; K_SPINLOCK(&lock) { if (cache_cfg.l2_cache_inclusive) { ret |= nds_l2_cache_range(addr, size, K_CACHE_WB); } else { ret |= nds_l1d_cache_range(addr, size, K_CACHE_WB); ret |= nds_l2_cache_range(addr, size, K_CACHE_WB); } } return ret; } int cache_data_flush_and_invd_all(void) { unsigned long ret = 0; K_SPINLOCK(&lock) { if (cache_cfg.l2_cache_size) { if (cache_cfg.l2_cache_inclusive) { ret |= nds_l2_cache_all(K_CACHE_WB_INVD); } else { ret |= nds_l1d_cache_all(K_CACHE_WB); ret |= nds_l2_cache_all(K_CACHE_WB_INVD); ret |= nds_l1d_cache_all(K_CACHE_INVD); } } else { ret |= nds_l1d_cache_all(K_CACHE_WB_INVD); } } return ret; } int cache_data_flush_and_invd_range(void *addr, size_t size) { unsigned long ret = 0; K_SPINLOCK(&lock) { if (cache_cfg.l2_cache_size) { if (cache_cfg.l2_cache_inclusive) { ret |= nds_l2_cache_range(addr, size, K_CACHE_WB_INVD); } else { ret |= nds_l1d_cache_range(addr, size, K_CACHE_WB); ret |= nds_l2_cache_range(addr, size, K_CACHE_WB_INVD); ret |= nds_l1d_cache_range(addr, size, K_CACHE_INVD); } } else { ret |= nds_l1d_cache_range(addr, size, K_CACHE_WB_INVD); } } return ret; } int cache_instr_flush_all(void) { return -ENOTSUP; } int cache_instr_flush_and_invd_all(void) { return -ENOTSUP; } int cache_instr_flush_range(void *addr, size_t size) { ARG_UNUSED(addr); ARG_UNUSED(size); return -ENOTSUP; } int cache_instr_flush_and_invd_range(void *addr, size_t size) { ARG_UNUSED(addr); ARG_UNUSED(size); return -ENOTSUP; } #if defined(CONFIG_DCACHE_LINE_SIZE_DETECT) size_t cache_data_line_size_get(void) { return cache_cfg.data_line_size; } #endif /* defined(CONFIG_DCACHE_LINE_SIZE_DETECT) */ #if defined(CONFIG_ICACHE_LINE_SIZE_DETECT) size_t cache_instr_line_size_get(void) { return cache_cfg.instr_line_size; } #endif /* defined(CONFIG_ICACHE_LINE_SIZE_DETECT) */ static int andes_cache_init(void) { unsigned long line_size; if (IS_ENABLED(CONFIG_ICACHE)) { line_size = (csr_read(NDS_MICM_CFG) >> MICM_CFG_ISZ_SHIFT) & BIT_MASK(3); if (line_size == 0) { LOG_ERR("Platform doesn't support I-cache, " "please disable CONFIG_ICACHE"); } #if defined(CONFIG_ICACHE_LINE_SIZE_DETECT) /* Icache line size */ if (line_size <= 5) { cache_cfg.instr_line_size = 1 << (line_size + 2); } else { LOG_ERR("Unknown line size of I-cache"); } #elif (CONFIG_ICACHE_LINE_SIZE != 0) cache_cfg.instr_line_size = CONFIG_ICACHE_LINE_SIZE; #elif DT_NODE_HAS_PROP(DT_PATH(cpus, cpu_0), i_cache_line_size) cache_cfg.instr_line_size = DT_PROP(DT_PATH(cpus, cpu_0), "i_cache_line_size"); #else LOG_ERR("Please specific the i-cache-line-size " "CPU0 property of the DT"); #endif /* defined(CONFIG_ICACHE_LINE_SIZE_DETECT) */ } if (IS_ENABLED(CONFIG_DCACHE)) { line_size = (csr_read(NDS_MDCM_CFG) >> MDCM_CFG_DSZ_SHIFT) & BIT_MASK(3); if (line_size == 0) { LOG_ERR("Platform doesn't support D-cache, " "please disable CONFIG_DCACHE"); } #if defined(CONFIG_DCACHE_LINE_SIZE_DETECT) /* Dcache line size */ if (line_size <= 5) { cache_cfg.data_line_size = 1 << (line_size + 2); } else { LOG_ERR("Unknown line size of D-cache"); } #elif (CONFIG_DCACHE_LINE_SIZE != 0) cache_cfg.data_line_size = CONFIG_DCACHE_LINE_SIZE; #elif DT_NODE_HAS_PROP(DT_PATH(cpus, cpu_0), d_cache_line_size) cache_cfg.data_line_size = DT_PROP(DT_PATH(cpus, cpu_0), "d_cache_line_size"); #else LOG_ERR("Please specific the d-cache-line-size " "CPU0 property of the DT"); #endif /* defined(CONFIG_DCACHE_LINE_SIZE_DETECT) */ } if (!(csr_read(NDS_MMSC_CFG) & MMSC_CFG_CCTLCSR)) { LOG_ERR("Platform doesn't support I/D cache operation"); } if (csr_read(NDS_MMSC_CFG) & MMSC_CFG_VCCTL_2) { if (IS_ENABLED(CONFIG_PMP_STACK_GUARD)) { csr_set(NDS_MCACHE_CTL, MCACHE_CTL_CCTL_SUEN); } } cache_cfg.l2_cache_size = nds_l2_cache_init(); cache_cfg.l2_cache_inclusive = nds_l2_cache_is_inclusive(); return 0; } SYS_INIT(andes_cache_init, PRE_KERNEL_1, CONFIG_CACHE_ANDES_INIT_PRIORITY); ```
/content/code_sandbox/drivers/cache/cache_andes.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,107
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_VIRTUALIZATION_VIRT_IVSHMEM_H_ #define ZEPHYR_DRIVERS_VIRTUALIZATION_VIRT_IVSHMEM_H_ #include <zephyr/drivers/pcie/pcie.h> #ifdef CONFIG_IVSHMEM_DOORBELL #include <zephyr/drivers/pcie/msi.h> #endif #define PCIE_CONF_CMDSTAT_INTX_DISABLE 0x0400 #define PCIE_CONF_INTR_PIN(x) (((x) >> 8) & 0xFFu) #define IVSHMEM_CFG_ID 0x00 #define IVSHMEM_CFG_NEXT_CAP 0x01 #define IVSHMEM_CFG_LENGTH 0x02 #define IVSHMEM_CFG_PRIV_CNTL 0x03 #define IVSHMEM_PRIV_CNTL_ONESHOT_INT BIT(0) #define IVSHMEM_CFG_STATE_TAB_SZ 0x04 #define IVSHMEM_CFG_RW_SECTION_SZ 0x08 #define IVSHMEM_CFG_OUTPUT_SECTION_SZ 0x10 #define IVSHMEM_CFG_ADDRESS 0x18 #define IVSHMEM_INT_ENABLE BIT(0) #define IVSHMEM_PCIE_REG_BAR_IDX 0 #define IVSHMEM_PCIE_MSI_X_BAR_IDX 1 #define IVSHMEM_PCIE_SHMEM_BAR_IDX 2 #define PCIE_INTX_PIN_MIN 1 #define PCIE_INTX_PIN_MAX 4 #define INTX_IRQ_UNUSED UINT32_MAX struct ivshmem_param { const struct device *dev; struct k_poll_signal *signal; uint8_t vector; }; struct ivshmem { DEVICE_MMIO_RAM; struct pcie_dev *pcie; uintptr_t shmem; size_t size; #ifdef CONFIG_IVSHMEM_DOORBELL msi_vector_t vectors[CONFIG_IVSHMEM_MSI_X_VECTORS]; struct ivshmem_param params[CONFIG_IVSHMEM_MSI_X_VECTORS]; uint16_t n_vectors; #endif #ifdef CONFIG_IVSHMEM_V2 bool ivshmem_v2; uint32_t max_peers; size_t rw_section_size; size_t output_section_size; uintptr_t state_table_shmem; uintptr_t rw_section_shmem; uintptr_t output_section_shmem[CONFIG_IVSHMEM_V2_MAX_PEERS]; #endif }; struct ivshmem_reg { uint32_t int_mask; uint32_t int_status; uint32_t iv_position; uint32_t doorbell; }; #ifdef CONFIG_IVSHMEM_V2 struct ivshmem_v2_reg { uint32_t id; uint32_t max_peers; uint32_t int_control; uint32_t doorbell; uint32_t state; }; struct ivshmem_cfg { struct intx_info { uint32_t irq; uint32_t priority; uint32_t flags; } intx_info[PCIE_INTX_PIN_MAX]; }; #endif /* CONFIG_IVSHMEM_V2 */ #define IVSHMEM_GEN_DOORBELL(i, v) ((i << 16) | (v & 0xFFFF)) #endif /* ZEPHYR_DRIVERS_VIRTUALIZATION_VIRT_IVSHMEM_H_ */ ```
/content/code_sandbox/drivers/virtualization/virt_ivshmem.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
656
```c /* * */ #include <zephyr/drivers/virtualization/ivshmem.h> #include <zephyr/internal/syscall_handler.h> #include <string.h> static inline size_t z_vrfy_ivshmem_get_mem(const struct device *dev, uintptr_t *memmap) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, get_mem)); K_OOPS(K_SYSCALL_MEMORY_WRITE(memmap, sizeof(uintptr_t))); return z_impl_ivshmem_get_mem(dev, memmap); } #include <zephyr/syscalls/ivshmem_get_mem_mrsh.c> static inline uint32_t z_vrfy_ivshmem_get_id(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, get_id)); return z_impl_ivshmem_get_id(dev); } #include <zephyr/syscalls/ivshmem_get_id_mrsh.c> static inline uint16_t z_vrfy_ivshmem_get_vectors(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, get_vectors)); return z_impl_ivshmem_get_vectors(dev); } #include <zephyr/syscalls/ivshmem_get_vectors_mrsh.c> static inline int z_vrfy_ivshmem_int_peer(const struct device *dev, uint32_t peer_id, uint16_t vector) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, int_peer)); return z_impl_ivshmem_int_peer(dev, peer_id, vector); } #include <zephyr/syscalls/ivshmem_int_peer_mrsh.c> static inline int z_vrfy_ivshmem_register_handler(const struct device *dev, struct k_poll_signal *signal, uint16_t vector) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, register_handler)); K_OOPS(K_SYSCALL_OBJ(signal, K_OBJ_POLL_SIGNAL)); return z_impl_ivshmem_register_handler(dev, signal, vector); } #include <zephyr/syscalls/ivshmem_register_handler_mrsh.c> #ifdef CONFIG_IVSHMEM_V2 static inline size_t z_vrfy_ivshmem_get_rw_mem_section(const struct device *dev, uintptr_t *memmap) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, get_rw_mem_section)); K_OOPS(K_SYSCALL_MEMORY_WRITE(memmap, sizeof(uintptr_t))); return z_impl_ivshmem_get_rw_mem_section(dev, memmap); } #include <zephyr/syscalls/ivshmem_get_rw_mem_section_mrsh.c> static inline size_t z_vrfy_ivshmem_get_output_mem_section(const struct device *dev, uint32_t peer_id, uintptr_t *memmap) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, get_output_mem_section)); K_OOPS(K_SYSCALL_MEMORY_WRITE(memmap, sizeof(uintptr_t))); return z_impl_ivshmem_get_output_mem_section(dev, peer_id, memmap); } #include <zephyr/syscalls/ivshmem_get_output_mem_section_mrsh.c> static inline uint32_t z_vrfy_ivshmem_get_state(const struct device *dev, uint32_t peer_id) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, get_state)); return z_impl_ivshmem_get_state(dev, peer_id); } #include <zephyr/syscalls/ivshmem_get_state_mrsh.c> static inline int z_vrfy_ivshmem_set_state(const struct device *dev, uint32_t state) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, set_state)); return z_impl_ivshmem_set_state(dev, state); } #include <zephyr/syscalls/ivshmem_set_state_mrsh.c> static inline uint32_t z_vrfy_ivshmem_get_max_peers(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, get_max_peers)); return z_impl_ivshmem_get_max_peers(dev); } #include <zephyr/syscalls/ivshmem_get_max_peers_mrsh.c> static inline uint16_t z_vrfy_ivshmem_get_protocol(const struct device *dev) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, get_protocol)); return z_impl_ivshmem_get_protocol(dev); } #include <zephyr/syscalls/ivshmem_get_protocol_mrsh.c> static inline int z_vrfy_ivshmem_enable_interrupts(const struct device *dev, bool enable) { K_OOPS(K_SYSCALL_DRIVER_IVSHMEM(dev, enable_interrupts)); return z_impl_ivshmem_enable_interrupts(dev, enable); } #include <zephyr/syscalls/ivshmem_enable_interrupts_mrsh.c> #endif /* CONFIG_IVSHMEM_V2 */ ```
/content/code_sandbox/drivers/virtualization/virt_ivshmem_handlers.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
999
```unknown # Virtualization drivers configuration options menuconfig VIRTUALIZATION bool "Virtualization guest drivers" help This contains various drivers meant to support and expose features when Zephyr is running as a guest in a virtualized or emulated environment. if VIRTUALIZATION config IVSHMEM bool "Inter-VM shared memory device (ivshmem)" depends on PCIE help This will enable support of qemu's ivshmem device, which is also present in ACRN hypervisor, and lets VM sharing memory with each other. if IVSHMEM module = IVSHMEM module-str = ivshmem source "subsys/logging/Kconfig.template.log_config" config IVSHMEM_DOORBELL bool "Support interrupt based ivshmem (doorbell version)" help This will enable support of ivshmem-doorbell, i.e. the interrupt based ivshmem. For ivshmem-v2 INTx interrupts are also supported. config IVSHMEM_MSI_X_VECTORS int "How many notification vectors should be pre-allocated?" default 2 depends on IVSHMEM_DOORBELL help MSI-X vector holders must be pre-allocated. One can pre-allocate more or less than necessary. Depends on how many VMs will connect with each other. These are know to be the notification vectors in ivshmem. config IVSHMEM_INT_PRIORITY int "MSI-X interrupt priority" default 2 depends on IVSHMEM_DOORBELL help Interrupt priority used for the MSI-X generated interrupts. INTx interrupt priority is configured in the device tree. config IVSHMEM_SHELL bool "IVshmem shell module" depends on SHELL help This is mostly a module to help getting info the ivshmem and/or quickly testing your ivshmem client application (from another VM). endif # IVSHMEM config IVSHMEM_V2 bool "Inter-VM shared memory v2 (ivshmem-v2)" select IVSHMEM help Enable ivshmem-v2 support. ivshmem-v2 is primarily used for IPC in the Jailhouse hypervisor. config IVSHMEM_V2_MAX_PEERS int "Maximum number of ivshmem-v2 peers" depends on IVSHMEM_V2 default 2 range 2 65536 endif # VIRTUALIZATION ```
/content/code_sandbox/drivers/virtualization/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
524
```unknown # SAM CAN configuration options config CAN_SAM bool "Atmel SAM CAN driver" default y depends on DT_HAS_ATMEL_SAM_CAN_ENABLED select CAN_MCAN ```
/content/code_sandbox/drivers/can/Kconfig.sam
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
41