text
stringlengths
9
39.2M
dir
stringlengths
25
226
lang
stringclasses
163 values
created_date
timestamp[s]
updated_date
timestamp[s]
repo_name
stringclasses
751 values
repo_full_name
stringclasses
752 values
star
int64
1.01k
183k
len_tokens
int64
1
18.5M
```c /** * */ #define LOG_LEVEL CONFIG_WIFI_LOG_LEVEL #include "eswifi_log.h" LOG_MODULE_DECLARE(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include "eswifi.h" static int eswifi_off_bind(struct net_context *context, const struct sockaddr *addr, socklen_t addrlen) { struct eswifi_off_socket *socket = context->offload_context; struct eswifi_dev *eswifi = eswifi_by_iface_idx(context->iface); int err; LOG_DBG(""); eswifi_lock(eswifi); err = __eswifi_bind(eswifi, socket, addr, addrlen); eswifi_unlock(eswifi); return err; } static int eswifi_off_listen(struct net_context *context, int backlog) { struct eswifi_off_socket *socket = context->offload_context; struct eswifi_dev *eswifi = eswifi_by_iface_idx(context->iface); int err; LOG_DBG("Listening backlog=%d", backlog); eswifi_lock(eswifi); __select_socket(eswifi, socket->index); /* Set backlog */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P8=%d\r", backlog); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to start set listen backlog"); err = -EIO; } socket->is_server = true; eswifi_unlock(eswifi); return err; } static void eswifi_off_connect_work(struct k_work *work) { struct eswifi_off_socket *socket; net_context_connect_cb_t cb; struct net_context *context; struct eswifi_dev *eswifi; void *user_data; int err; socket = CONTAINER_OF(work, struct eswifi_off_socket, connect_work); eswifi = eswifi_socket_to_dev(socket); eswifi_lock(eswifi); cb = socket->conn_cb; context = socket->context; user_data = socket->conn_data; err = __eswifi_off_start_client(eswifi, socket); if (!err) { socket->state = ESWIFI_SOCKET_STATE_CONNECTED; net_context_set_state(socket->context, NET_CONTEXT_CONNECTED); } else { socket->state = ESWIFI_SOCKET_STATE_NONE; } eswifi_unlock(eswifi); if (cb) { cb(context, err, user_data); } } static int eswifi_off_connect(struct net_context *context, const struct sockaddr *addr, socklen_t addrlen, net_context_connect_cb_t cb, int32_t timeout, void *user_data) { struct eswifi_off_socket *socket = context->offload_context; struct eswifi_dev *eswifi = eswifi_by_iface_idx(context->iface); int err; LOG_DBG("timeout=%d", timeout); if (addr->sa_family != AF_INET) { LOG_ERR("Only AF_INET is supported!"); return -EPFNOSUPPORT; } eswifi_lock(eswifi); if (socket->state != ESWIFI_SOCKET_STATE_NONE) { eswifi_unlock(eswifi); return -EBUSY; } socket->peer_addr = *addr; socket->conn_data = user_data; socket->conn_cb = cb; socket->state = ESWIFI_SOCKET_STATE_CONNECTING; if (timeout == 0) { /* async */ k_work_submit_to_queue(&eswifi->work_q, &socket->connect_work); eswifi_unlock(eswifi); return 0; } err = __eswifi_off_start_client(eswifi, socket); if (!err) { socket->state = ESWIFI_SOCKET_STATE_CONNECTED; } else { socket->state = ESWIFI_SOCKET_STATE_NONE; } eswifi_unlock(eswifi); if (cb) { cb(context, err, user_data); } return err; } static int eswifi_off_accept(struct net_context *context, net_tcp_accept_cb_t cb, int32_t timeout, void *user_data) { struct eswifi_off_socket *socket = context->offload_context; struct eswifi_dev *eswifi = eswifi_by_iface_idx(context->iface); int ret; eswifi_lock(eswifi); ret = __eswifi_accept(eswifi, socket); if (ret < 0) { eswifi_unlock(eswifi); return ret; } socket->accept_cb = cb; socket->accept_data = user_data; k_sem_reset(&socket->accept_sem); eswifi_unlock(eswifi); if (timeout == 0) { return 0; } return k_sem_take(&socket->accept_sem, K_MSEC(timeout)); } static int __eswifi_off_send_pkt(struct eswifi_dev *eswifi, struct eswifi_off_socket *socket) { struct net_pkt *pkt = socket->tx_pkt; unsigned int bytes; int err, offset; LOG_DBG(""); if (!pkt) { return -EINVAL; } bytes = net_pkt_get_len(pkt); __select_socket(eswifi, socket->index); /* header */ snprintk(eswifi->buf, sizeof(eswifi->buf), "S3=%u\r", bytes); offset = strlen(eswifi->buf); /* copy payload */ if (net_pkt_read(pkt, &eswifi->buf[offset], bytes)) { return -ENOBUFS; } offset += bytes; err = eswifi_request(eswifi, eswifi->buf, offset + 1, eswifi->buf, sizeof(eswifi->buf)); if (err < 0) { LOG_ERR("Unable to send data"); return -EIO; } net_pkt_unref(pkt); return 0; } static void eswifi_off_send_work(struct k_work *work) { struct eswifi_off_socket *socket; net_context_send_cb_t cb; struct net_context *context; struct eswifi_dev *eswifi; void *user_data; int err; socket = CONTAINER_OF(work, struct eswifi_off_socket, send_work); eswifi = eswifi_socket_to_dev(socket); eswifi_lock(eswifi); user_data = socket->send_data; cb = socket->send_cb; context = socket->context; err = __eswifi_off_send_pkt(eswifi, socket); socket->tx_pkt = NULL; eswifi_unlock(eswifi); if (cb) { cb(context, err, user_data); } } static int eswifi_off_send(struct net_pkt *pkt, net_context_send_cb_t cb, int32_t timeout, void *user_data) { struct eswifi_off_socket *socket = pkt->context->offload_context; struct eswifi_dev *eswifi = eswifi_by_iface_idx(socket->context->iface); int err; LOG_DBG("timeout=%d", timeout); eswifi_lock(eswifi); if (socket->state != ESWIFI_SOCKET_STATE_CONNECTED) { eswifi_unlock(eswifi); return -ENOTCONN; } if (socket->tx_pkt) { eswifi_unlock(eswifi); return -EBUSY; } socket->tx_pkt = pkt; if (timeout == 0) { socket->send_data = user_data; socket->send_cb = cb; k_work_submit_to_queue(&eswifi->work_q, &socket->send_work); eswifi_unlock(eswifi); return 0; } err = __eswifi_off_send_pkt(eswifi, socket); socket->tx_pkt = NULL; eswifi_unlock(eswifi); if (cb) { cb(socket->context, err, user_data); } return err; } static int eswifi_off_sendto(struct net_pkt *pkt, const struct sockaddr *dst_addr, socklen_t addrlen, net_context_send_cb_t cb, int32_t timeout, void *user_data) { struct eswifi_off_socket *socket = pkt->context->offload_context; struct eswifi_dev *eswifi = eswifi_by_iface_idx(socket->context->iface); int err; LOG_DBG("timeout=%d", timeout); eswifi_lock(eswifi); if (socket->tx_pkt) { eswifi_unlock(eswifi); return -EBUSY; } socket->tx_pkt = pkt; if (socket->state != ESWIFI_SOCKET_STATE_CONNECTED) { socket->peer_addr = *dst_addr; err = __eswifi_off_start_client(eswifi, socket); if (err < 0) { eswifi_unlock(eswifi); return err; } socket->state = ESWIFI_SOCKET_STATE_CONNECTED; } if (timeout == 0) { socket->send_data = user_data; socket->send_cb = cb; k_work_submit_to_queue(&eswifi->work_q, &socket->send_work); eswifi_unlock(eswifi); return 0; } err = __eswifi_off_send_pkt(eswifi, socket); socket->tx_pkt = NULL; eswifi_unlock(eswifi); if (cb) { cb(socket->context, err, user_data); } return err; } static int eswifi_off_recv(struct net_context *context, net_context_recv_cb_t cb, int32_t timeout, void *user_data) { struct eswifi_off_socket *socket = context->offload_context; struct eswifi_dev *eswifi = eswifi_by_iface_idx(context->iface); int err; LOG_DBG(""); eswifi_lock(eswifi); socket->recv_cb = cb; socket->recv_data = user_data; k_sem_reset(&socket->read_sem); eswifi_unlock(eswifi); if (timeout == 0) { return 0; } err = k_sem_take(&socket->read_sem, K_MSEC(timeout)); /* Unregister callback */ eswifi_lock(eswifi); socket->recv_cb = NULL; eswifi_unlock(eswifi); return err; } static int eswifi_off_put(struct net_context *context) { struct eswifi_off_socket *socket = context->offload_context; struct eswifi_dev *eswifi = eswifi_by_iface_idx(context->iface); int ret; LOG_DBG(""); eswifi_lock(eswifi); ret = __eswifi_socket_free(eswifi, socket); if (ret) { goto done; } if (--socket->usage <= 0) { socket->context = NULL; } done: eswifi_unlock(eswifi); return ret; } static int eswifi_off_get(sa_family_t family, enum net_sock_type type, enum net_ip_protocol ip_proto, struct net_context **context) { struct eswifi_dev *eswifi = eswifi_by_iface_idx((*context)->iface); struct eswifi_off_socket *socket = NULL; int idx; LOG_DBG(""); eswifi_lock(eswifi); idx = __eswifi_socket_new(eswifi, family, type, ip_proto, *context); if (idx < 0) { goto unlock; } socket = &eswifi->socket[idx]; (*context)->offload_context = socket; LOG_DBG("Socket index %d", socket->index); k_work_init(&socket->connect_work, eswifi_off_connect_work); k_work_init(&socket->send_work, eswifi_off_send_work); k_sem_init(&socket->read_sem, 1, 1); k_sem_init(&socket->accept_sem, 1, 1); k_work_reschedule_for_queue(&eswifi->work_q, &socket->read_work, K_MSEC(500)); unlock: eswifi_unlock(eswifi); return idx; } void eswifi_offload_async_msg(struct eswifi_dev *eswifi, char *msg, size_t len) { static const char msg_tcp_accept[] = "[TCP SVR] Accepted "; if (!strncmp(msg, msg_tcp_accept, sizeof(msg_tcp_accept) - 1)) { struct eswifi_off_socket *socket = NULL; struct in_addr *sin_addr; uint8_t ip[4]; uint16_t port = 0; char *str; int i = 0; /* extract client ip/port e.g. 192.168.1.1:8080 */ /* TODO: use net_ipaddr_parse */ str = msg + sizeof(msg_tcp_accept) - 1; while (*str) { if (i < 4) { ip[i++] = atoi(str); } else if (i < 5) { port = atoi(str); break; } while (*str && (*str != '.') && (*str != ':')) { str++; } str++; } for (i = 0; i < ESWIFI_OFFLOAD_MAX_SOCKETS; i++) { struct eswifi_off_socket *s = &eswifi->socket[i]; if (s->context && s->port == port && s->state == ESWIFI_SOCKET_STATE_ACCEPTING) { socket = s; break; } } if (!socket) { LOG_ERR("No listening socket"); return; } struct sockaddr_in *peer = net_sin(&socket->peer_addr); sin_addr = &peer->sin_addr; memcpy(&sin_addr->s4_addr, ip, 4); peer->sin_port = htons(port); peer->sin_family = AF_INET; socket->state = ESWIFI_SOCKET_STATE_CONNECTED; socket->usage++; /* Save information about remote. */ socket->context->flags |= NET_CONTEXT_REMOTE_ADDR_SET; memcpy(&socket->context->remote, &socket->peer_addr, sizeof(struct sockaddr)); LOG_DBG("%u.%u.%u.%u connected to port %u", ip[0], ip[1], ip[2], ip[3], port); if (socket->accept_cb) { socket->accept_cb(socket->context, &socket->peer_addr, sizeof(struct sockaddr_in), 0, socket->accept_data); } k_sem_give(&socket->accept_sem); k_yield(); } } static struct net_offload eswifi_offload = { .get = eswifi_off_get, .bind = eswifi_off_bind, .listen = eswifi_off_listen, .connect = eswifi_off_connect, .accept = eswifi_off_accept, .send = eswifi_off_send, .sendto = eswifi_off_sendto, .recv = eswifi_off_recv, .put = eswifi_off_put, }; static int eswifi_off_enable_dhcp(struct eswifi_dev *eswifi) { char cmd[] = "C4=1\r"; int err; LOG_DBG(""); eswifi_lock(eswifi); err = eswifi_at_cmd(eswifi, cmd); eswifi_unlock(eswifi); return 0; } static int eswifi_off_disable_bypass(struct eswifi_dev *eswifi) { char cmd[] = "PR=0\r"; int err; LOG_DBG(""); eswifi_lock(eswifi); err = eswifi_at_cmd(eswifi, cmd); eswifi_unlock(eswifi); return err; } int eswifi_offload_init(struct eswifi_dev *eswifi) { eswifi->iface->if_dev->offload = &eswifi_offload; int err; err = eswifi_off_enable_dhcp(eswifi); if (err < 0) { LOG_ERR("Unable to configure dhcp"); return err; } err = eswifi_off_disable_bypass(eswifi); if (err < 0) { LOG_ERR("Unable to disable bypass mode"); return err; } return 0; } ```
/content/code_sandbox/drivers/wifi/eswifi/eswifi_offload.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,325
```c /** * */ #define DT_DRV_COMPAT inventek_eswifi_uart #include "eswifi_log.h" LOG_MODULE_DECLARE(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <errno.h> #include <zephyr/sys/ring_buffer.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/uart.h> #include "eswifi.h" #define ESWIFI_RING_BUF_SIZE 2048 enum eswifi_uart_fsm { ESWIFI_UART_FSM_WAIT_CR, ESWIFI_UART_FSM_WAIT_LF, ESWIFI_UART_FSM_WAIT_MARK, ESWIFI_UART_FSM_WAIT_SPACE, ESWIFI_UART_FSM_END, }; struct eswifi_uart_data { const struct device *dev; enum eswifi_uart_fsm fsm; size_t rx_count; size_t rx_buf_size; char *rx_buf; /* RX Ring Buf */ uint8_t iface_rb_buf[ESWIFI_RING_BUF_SIZE]; struct ring_buf rx_rb; }; static struct eswifi_uart_data eswifi_uart0; /* Static instance */ static void eswifi_iface_uart_flush(struct eswifi_uart_data *uart) { uint8_t c; while (uart_fifo_read(uart->dev, &c, 1) > 0) { continue; } } static void eswifi_iface_uart_isr(const struct device *uart_dev, void *user_data) { struct eswifi_uart_data *uart = &eswifi_uart0; /* Static instance */ int rx = 0; uint8_t *dst; uint32_t partial_size = 0; uint32_t total_size = 0; ARG_UNUSED(user_data); while (uart_irq_update(uart->dev) && uart_irq_rx_ready(uart->dev)) { if (!partial_size) { partial_size = ring_buf_put_claim(&uart->rx_rb, &dst, UINT32_MAX); } if (!partial_size) { LOG_ERR("Rx buffer doesn't have enough space"); eswifi_iface_uart_flush(uart); break; } rx = uart_fifo_read(uart->dev, dst, partial_size); if (rx <= 0) { continue; } dst += rx; total_size += rx; partial_size -= rx; } ring_buf_put_finish(&uart->rx_rb, total_size); } static char get_fsm_char(int fsm) { switch (fsm) { case ESWIFI_UART_FSM_WAIT_CR: return('C'); case ESWIFI_UART_FSM_WAIT_LF: return('L'); case ESWIFI_UART_FSM_WAIT_MARK: return('M'); case ESWIFI_UART_FSM_WAIT_SPACE: return('S'); case ESWIFI_UART_FSM_END: return('E'); } return('?'); } static int eswifi_uart_get_resp(struct eswifi_uart_data *uart) { uint8_t c; while (ring_buf_get(&uart->rx_rb, &c, 1) > 0) { LOG_DBG("FSM: %c, RX: 0x%02x : %c", get_fsm_char(uart->fsm), c, c); if (uart->rx_buf_size > 0) { uart->rx_buf[uart->rx_count++] = c; if (uart->rx_count == uart->rx_buf_size) { return -ENOMEM; } } switch (uart->fsm) { case ESWIFI_UART_FSM_WAIT_CR: if (c == '\r') { uart->fsm = ESWIFI_UART_FSM_WAIT_LF; } break; case ESWIFI_UART_FSM_WAIT_LF: if (c == '\n') { uart->fsm = ESWIFI_UART_FSM_WAIT_MARK; } else if (c != '\r') { uart->fsm = ESWIFI_UART_FSM_WAIT_CR; } break; case ESWIFI_UART_FSM_WAIT_MARK: if (c == '>') { uart->fsm = ESWIFI_UART_FSM_WAIT_SPACE; } else if (c == '\r') { uart->fsm = ESWIFI_UART_FSM_WAIT_LF; } else { uart->fsm = ESWIFI_UART_FSM_WAIT_CR; } break; case ESWIFI_UART_FSM_WAIT_SPACE: if (c == ' ') { uart->fsm = ESWIFI_UART_FSM_END; } else if (c == '\r') { uart->fsm = ESWIFI_UART_FSM_WAIT_LF; } else { uart->fsm = ESWIFI_UART_FSM_WAIT_CR; } break; default: break; } } return 0; } static int eswifi_uart_wait_prompt(struct eswifi_uart_data *uart) { unsigned int max_retries = 60 * 1000; /* 1 minute */ int err; while (--max_retries) { err = eswifi_uart_get_resp(uart); if (err) { LOG_DBG("Err: 0x%08x - %d", err, err); return err; } if (uart->fsm == ESWIFI_UART_FSM_END) { LOG_DBG("Success!"); return uart->rx_count; } /* allow other threads to be scheduled */ k_sleep(K_MSEC(1)); } LOG_DBG("Timeout"); return -ETIMEDOUT; } static int eswifi_uart_request(struct eswifi_dev *eswifi, char *cmd, size_t clen, char *rsp, size_t rlen) { struct eswifi_uart_data *uart = eswifi->bus_data; int count; int err; LOG_DBG("cmd=%p (%u byte), rsp=%p (%u byte)", cmd, clen, rsp, rlen); /* Send CMD */ for (count = 0; count < clen; count++) { uart_poll_out(uart->dev, cmd[count]); } uart->fsm = ESWIFI_UART_FSM_WAIT_CR; uart->rx_count = 0; uart->rx_buf = rsp; uart->rx_buf_size = rlen; err = eswifi_uart_wait_prompt(uart); if (err > 0) { LOG_HEXDUMP_DBG(uart->rx_buf, uart->rx_count, "Stream"); } return err; } int eswifi_uart_init(struct eswifi_dev *eswifi) { struct eswifi_uart_data *uart = &eswifi_uart0; /* Static instance */ uart->dev = DEVICE_DT_GET(DT_INST_BUS(0)); if (!device_is_ready(uart->dev)) { LOG_ERR("Bus device is not ready"); return -ENODEV; } eswifi->bus_data = uart; uart_irq_rx_disable(uart->dev); uart_irq_tx_disable(uart->dev); eswifi_iface_uart_flush(uart); uart_irq_callback_set(uart->dev, eswifi_iface_uart_isr); uart_irq_rx_enable(uart->dev); ring_buf_init(&uart->rx_rb, sizeof(uart->iface_rb_buf), uart->iface_rb_buf); LOG_DBG("success"); return 0; } static struct eswifi_bus_ops eswifi_bus_ops_uart = { .init = eswifi_uart_init, .request = eswifi_uart_request, }; struct eswifi_bus_ops *eswifi_get_bus(void) { return &eswifi_bus_ops_uart; } ```
/content/code_sandbox/drivers/wifi/eswifi/eswifi_bus_uart.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,642
```c /* * */ #include "eswifi_log.h" LOG_MODULE_DECLARE(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <errno.h> #include "eswifi.h" #include <zephyr/net/net_pkt.h> int eswifi_socket_type_from_zephyr(int proto, enum eswifi_transport_type *type) { if (IS_ENABLED(CONFIG_NET_SOCKETS_SOCKOPT_TLS) && proto >= IPPROTO_TLS_1_0 && proto <= IPPROTO_TLS_1_2) { *type = ESWIFI_TRANSPORT_TCP_SSL; } else if (proto == IPPROTO_TCP) { *type = ESWIFI_TRANSPORT_TCP; } else if (proto == IPPROTO_UDP) { *type = ESWIFI_TRANSPORT_UDP; } else { return -EPFNOSUPPORT; } return 0; } static int __stop_socket(struct eswifi_dev *eswifi, struct eswifi_off_socket *socket) { char cmd_srv[] = "P5=0\r"; char cmd_cli[] = "P6=0\r"; LOG_DBG("Stopping socket %d", socket->index); if (socket->state != ESWIFI_SOCKET_STATE_CONNECTED) { return 0; } socket->state = ESWIFI_SOCKET_STATE_NONE; return eswifi_at_cmd(eswifi, socket->is_server ? cmd_srv : cmd_cli); } static int __read_data(struct eswifi_dev *eswifi, size_t len, char **data) { char cmd[] = "R0\r"; char size[] = "R1=9999\r"; char timeout[] = "R2=30000\r"; int ret; /* Set max read size */ snprintk(size, sizeof(size), "R1=%u\r", len); ret = eswifi_at_cmd(eswifi, size); if (ret < 0) { LOG_ERR("Unable to set read size"); return -EIO; } /* Set timeout */ snprintk(timeout, sizeof(timeout), "R2=%u\r", 30); /* 30 ms */ ret = eswifi_at_cmd(eswifi, timeout); if (ret < 0) { LOG_ERR("Unable to set timeout"); return -EIO; } return eswifi_at_cmd_rsp(eswifi, cmd, data); } int __eswifi_bind(struct eswifi_dev *eswifi, struct eswifi_off_socket *socket, const struct sockaddr *addr, socklen_t addrlen) { int err; if (addr->sa_family != AF_INET) { LOG_ERR("Only AF_INET is supported!"); return -EPFNOSUPPORT; } __select_socket(eswifi, socket->index); socket->port = sys_be16_to_cpu(net_sin(addr)->sin_port); /* Set Local Port */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P2=%d\r", socket->port); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set local port"); return -EIO; } if (socket->type == ESWIFI_TRANSPORT_UDP) { /* No listen or accept, so start UDP server now */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P5=1\r"); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to start UDP server"); return -EIO; } } return 0; } static void eswifi_off_read_work(struct k_work *work) { struct eswifi_off_socket *socket; struct eswifi_dev *eswifi; struct net_pkt *pkt = NULL; int next_timeout_ms = 100; int err, len; char *data; struct k_work_delayable *dwork = k_work_delayable_from_work(work); LOG_DBG(""); socket = CONTAINER_OF(dwork, struct eswifi_off_socket, read_work); eswifi = eswifi_socket_to_dev(socket); eswifi_lock(eswifi); if ((socket->type == ESWIFI_TRANSPORT_TCP || socket->type == ESWIFI_TRANSPORT_TCP_SSL) && socket->state != ESWIFI_SOCKET_STATE_CONNECTED) { goto done; } __select_socket(eswifi, socket->index); /* Verify if we can allocate a rx packet before reading data to prevent leaks */ pkt = net_pkt_rx_alloc_with_buffer(eswifi->iface, 1460, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { LOG_ERR("Cannot allocate rx packet"); goto done; } len = __read_data(eswifi, 1460, &data); /* 1460 is max size */ if (len < 0) { __stop_socket(eswifi, socket); if (socket->recv_cb) { /* send EOF (null pkt) */ net_pkt_unref(pkt); pkt = NULL; goto do_recv_cb; } } if (!len || !socket->recv_cb) { net_pkt_unref(pkt); goto done; } LOG_DBG("payload sz = %d", len); if (net_pkt_write(pkt, data, len) < 0) { LOG_WRN("Incomplete buffer copy"); } /* Resize the packet */ net_pkt_trim_buffer(pkt); net_pkt_cursor_init(pkt); do_recv_cb: socket->recv_cb(socket->context, pkt, NULL, NULL, 0, socket->recv_data); if (!socket->context) { /* something destroyed the socket in the recv path */ eswifi_unlock(eswifi); return; } k_sem_give(&socket->read_sem); next_timeout_ms = 0; done: err = k_work_reschedule_for_queue(&eswifi->work_q, &socket->read_work, K_MSEC(next_timeout_ms)); if (err < 0) { LOG_ERR("Rescheduling socket read error"); } eswifi_unlock(eswifi); } int __eswifi_off_start_client(struct eswifi_dev *eswifi, struct eswifi_off_socket *socket) { struct sockaddr *addr = &socket->peer_addr; struct in_addr *sin_addr = &net_sin(addr)->sin_addr; int err; LOG_DBG(""); __select_socket(eswifi, socket->index); /* Stop any running client */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P6=0\r"); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to stop running client"); return -EIO; } /* Stop any running server */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P5=0\r"); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to stop running client"); return -EIO; } /* Clear local port */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P2=0\r"); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to stop running client"); return -EIO; } /* Set Remote IP */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P3=%u.%u.%u.%u\r", sin_addr->s4_addr[0], sin_addr->s4_addr[1], sin_addr->s4_addr[2], sin_addr->s4_addr[3]); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set remote ip"); return -EIO; } /* Set Remote Port */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P4=%d\r", (uint16_t)sys_be16_to_cpu(net_sin(addr)->sin_port)); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set remote port"); return -EIO; } /* Start TCP/UDP client */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P6=1\r"); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to start TCP/UDP client"); return -EIO; } #if !defined(CONFIG_NET_SOCKETS_OFFLOAD) net_context_set_state(socket->context, NET_CONTEXT_CONNECTED); #endif return 0; } int __eswifi_listen(struct eswifi_dev *eswifi, struct eswifi_off_socket *socket, int backlog) { int err; __select_socket(eswifi, socket->index); /* Set backlog */ snprintk(eswifi->buf, sizeof(eswifi->buf), "P8=%d\r", backlog); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to start set listen backlog"); err = -EIO; } socket->is_server = true; return 0; } int __eswifi_accept(struct eswifi_dev *eswifi, struct eswifi_off_socket *socket) { char cmd[] = "P5=1\r"; if (socket->state != ESWIFI_SOCKET_STATE_NONE) { /* we can only handle one connection at a time */ return -EBUSY; } __select_socket(eswifi, socket->index); /* Start TCP Server */ if (eswifi_at_cmd(eswifi, cmd) < 0) { LOG_ERR("Unable to start TCP server"); return -EIO; } LOG_DBG("TCP Server started"); socket->state = ESWIFI_SOCKET_STATE_ACCEPTING; return 0; } int __eswifi_socket_free(struct eswifi_dev *eswifi, struct eswifi_off_socket *socket) { __select_socket(eswifi, socket->index); k_work_cancel_delayable(&socket->read_work); __select_socket(eswifi, socket->index); __stop_socket(eswifi, socket); return 0; } int __eswifi_socket_new(struct eswifi_dev *eswifi, int family, int type, int proto, void *context) { struct eswifi_off_socket *socket = NULL; int err, i; LOG_DBG(""); if (family != AF_INET) { LOG_ERR("Only AF_INET is supported!"); return -EPFNOSUPPORT; } /* pickup available socket */ for (i = 0; i < ESWIFI_OFFLOAD_MAX_SOCKETS; i++) { if (!eswifi->socket[i].context) { socket = &eswifi->socket[i]; socket->index = i; socket->context = context; break; } } if (!socket) { LOG_ERR("No socket resource available"); return -ENOMEM; } err = eswifi_socket_type_from_zephyr(proto, &socket->type); if (err) { LOG_ERR("Only TCP & UDP is supported"); return err; } err = __select_socket(eswifi, socket->index); if (err < 0) { LOG_ERR("Unable to select socket %u", socket->index); return -EIO; } snprintk(eswifi->buf, sizeof(eswifi->buf), "P1=%d\r", socket->type); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set transport protocol"); return -EIO; } k_work_init_delayable(&socket->read_work, eswifi_off_read_work); socket->usage = 1; LOG_DBG("Socket index %d", socket->index); return socket->index; } ```
/content/code_sandbox/drivers/wifi/eswifi/eswifi_socket.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,564
```c /** * */ #define DT_DRV_COMPAT inventek_eswifi #include "eswifi_log.h" LOG_MODULE_DECLARE(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include "eswifi.h" #define ESWIFI_SPI_THREAD_STACK_SIZE 1024 K_KERNEL_STACK_MEMBER(eswifi_spi_poll_stack, ESWIFI_SPI_THREAD_STACK_SIZE); #define SPI_READ_CHUNK_SIZE 32 struct eswifi_spi_config { struct gpio_dt_spec dr; struct spi_dt_spec bus; }; struct eswifi_spi_data { const struct eswifi_spi_config *cfg; struct k_thread poll_thread; }; static const struct eswifi_spi_config eswifi_config_spi0 = { .dr = GPIO_DT_SPEC_INST_GET(0, data_gpios), .bus = SPI_DT_SPEC_INST_GET(0, SPI_OP_MODE_MASTER | SPI_TRANSFER_MSB | SPI_WORD_SET(16) | SPI_HOLD_ON_CS | SPI_LOCK_ON, 1000U), }; static struct eswifi_spi_data eswifi_spi0; static bool eswifi_spi_cmddata_ready(struct eswifi_spi_data *spi) { return gpio_pin_get_dt(&spi->cfg->dr) > 0; } static int eswifi_spi_wait_cmddata_ready(struct eswifi_spi_data *spi) { unsigned int max_retries = 60 * 1000; /* 1 minute */ do { /* allow other threads to be scheduled */ k_sleep(K_MSEC(1)); } while (!eswifi_spi_cmddata_ready(spi) && --max_retries); return max_retries ? 0 : -ETIMEDOUT; } static int eswifi_spi_write(struct eswifi_dev *eswifi, char *data, size_t dlen) { struct eswifi_spi_data *spi = eswifi->bus_data; struct spi_buf spi_tx_buf[1]; struct spi_buf_set spi_tx; int status; spi_tx_buf[0].buf = data; spi_tx_buf[0].len = dlen; spi_tx.buffers = spi_tx_buf; spi_tx.count = ARRAY_SIZE(spi_tx_buf); status = spi_write_dt(&spi->cfg->bus, &spi_tx); if (status) { LOG_ERR("SPI write error %d", status); } else { status = dlen; } return status; } static int eswifi_spi_read(struct eswifi_dev *eswifi, char *data, size_t dlen) { struct eswifi_spi_data *spi = eswifi->bus_data; struct spi_buf spi_rx_buf[1]; struct spi_buf_set spi_rx; int status; spi_rx_buf[0].buf = data; spi_rx_buf[0].len = dlen; spi_rx.buffers = spi_rx_buf; spi_rx.count = ARRAY_SIZE(spi_rx_buf); status = spi_read_dt(&spi->cfg->bus, &spi_rx); if (status) { LOG_ERR("SPI read error %d", status); } else { status = dlen; } return status; } static int eswifi_spi_request(struct eswifi_dev *eswifi, char *cmd, size_t clen, char *rsp, size_t rlen) { struct eswifi_spi_data *spi = eswifi->bus_data; unsigned int offset = 0U, to_read = SPI_READ_CHUNK_SIZE; char tmp[2]; int err; LOG_DBG("cmd=%p (%u byte), rsp=%p (%u byte)", cmd, clen, rsp, rlen); /* * CMD/DATA protocol: * 1. Module raises data-ready when ready for **command phase** * 2. Host announces command start by lowering chip-select * 3. Host write the command (possibly several spi transfers) * 4. Host announces end of command by raising chip-select * 5. Module lowers data-ready signal * 6. Module raises data-ready to signal start of the **data phase** * 7. Host lowers chip-select * 8. Host fetch data as long as data-ready pin is up * 9. Module lowers data-ready to signal the end of the data Phase * 10. Host raises chip-select * * Note: * All commands to the eS-WiFi module must be post-padded with * 0x0A (Line Feed) to an even number of bytes. * All data from eS-WiFi module are post-padded with 0x15(NAK) to an * even number of bytes. */ if (!cmd) { goto data; } /* CMD/DATA READY signals the Command Phase */ err = eswifi_spi_wait_cmddata_ready(spi); if (err) { LOG_ERR("CMD ready timeout\n"); return err; } if (clen % 2) { /* Add post-padding if necessary */ /* cmd is a string so cmd[clen] is 0x00 */ cmd[clen] = 0x0a; clen++; } eswifi_spi_write(eswifi, cmd, clen); /* Our device is flagged with SPI_HOLD_ON_CS|SPI_LOCK_ON, release */ spi_release_dt(&spi->cfg->bus); data: /* CMD/DATA READY signals the Data Phase */ err = eswifi_spi_wait_cmddata_ready(spi); if (err) { LOG_ERR("DATA ready timeout\n"); return err; } while (eswifi_spi_cmddata_ready(spi) && to_read) { to_read = MIN(rlen - offset, to_read); memset(rsp + offset, 0, to_read); eswifi_spi_read(eswifi, rsp + offset, to_read); offset += to_read; k_yield(); } /* Flush remaining data if receiving buffer not large enough */ while (eswifi_spi_cmddata_ready(spi)) { eswifi_spi_read(eswifi, tmp, 2); k_sleep(K_MSEC(1)); } /* Our device is flagged with SPI_HOLD_ON_CS|SPI_LOCK_ON, release */ spi_release_dt(&spi->cfg->bus); LOG_DBG("success"); return offset; } static void eswifi_spi_read_msg(struct eswifi_dev *eswifi) { const char startstr[] = "[SOMA]"; const char endstr[] = "[EOMA]"; char cmd[] = "MR\r"; size_t msg_len; char *rsp; int ret; LOG_DBG(""); eswifi_lock(eswifi); ret = eswifi_at_cmd_rsp(eswifi, cmd, &rsp); if (ret < 0) { LOG_ERR("Unable to read msg %d", ret); eswifi_unlock(eswifi); return; } if (strncmp(rsp, startstr, sizeof(endstr) - 1)) { LOG_ERR("Malformed async msg"); eswifi_unlock(eswifi); return; } /* \r\n[SOMA]...[EOMA]\r\nOK\r\n> */ msg_len = ret - (sizeof(startstr) - 1) - (sizeof(endstr) - 1); if (msg_len > 0) { eswifi_async_msg(eswifi, rsp + sizeof(endstr) - 1, msg_len); } eswifi_unlock(eswifi); } static void eswifi_spi_poll_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); struct eswifi_dev *eswifi = p1; while (1) { k_sleep(K_MSEC(1000)); eswifi_spi_read_msg(eswifi); } } int eswifi_spi_init(struct eswifi_dev *eswifi) { struct eswifi_spi_data *spi = &eswifi_spi0; /* Static instance */ const struct eswifi_spi_config *cfg = &eswifi_config_spi0; /* Static instance */ /* SPI DATA READY PIN */ if (!gpio_is_ready_dt(&cfg->dr)) { LOG_ERR("device %s is not ready", cfg->dr.port->name); return -ENODEV; } gpio_pin_configure_dt(&cfg->dr, GPIO_INPUT); /* SPI BUS */ if (!spi_is_ready_dt(&cfg->bus)) { LOG_ERR("SPI bus is not ready"); return -ENODEV; }; spi->cfg = cfg; eswifi->bus_data = spi; LOG_DBG("success"); k_thread_create(&spi->poll_thread, eswifi_spi_poll_stack, ESWIFI_SPI_THREAD_STACK_SIZE, eswifi_spi_poll_thread, eswifi, NULL, NULL, K_PRIO_COOP(CONFIG_WIFI_ESWIFI_THREAD_PRIO), 0, K_NO_WAIT); return 0; } static struct eswifi_bus_ops eswifi_bus_ops_spi = { .init = eswifi_spi_init, .request = eswifi_spi_request, }; struct eswifi_bus_ops *eswifi_get_bus(void) { return &eswifi_bus_ops_spi; } ```
/content/code_sandbox/drivers/wifi/eswifi/eswifi_bus_spi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,947
```c /** * */ #if defined(CONFIG_WIFI_ESWIFI_BUS_UART) #define DT_DRV_COMPAT inventek_eswifi_uart #else #define DT_DRV_COMPAT inventek_eswifi #endif #include "eswifi_log.h" LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_context.h> #include <zephyr/net/net_offload.h> #include <zephyr/net/wifi_mgmt.h> #include <zephyr/net/conn_mgr/connectivity_wifi_mgmt.h> #include <zephyr/net/ethernet.h> #include <net_private.h> #include <zephyr/net/net_core.h> #include <zephyr/net/net_pkt.h> #include <stdio.h> #include <stdlib.h> #include <zephyr/sys/printk.h> #include "eswifi.h" #define ESWIFI_WORKQUEUE_STACK_SIZE 1024 K_KERNEL_STACK_DEFINE(eswifi_work_q_stack, ESWIFI_WORKQUEUE_STACK_SIZE); static const struct eswifi_cfg eswifi0_cfg = { .resetn = GPIO_DT_SPEC_INST_GET(0, resetn_gpios), .wakeup = GPIO_DT_SPEC_INST_GET(0, wakeup_gpios), }; static struct eswifi_dev eswifi0; /* static instance */ static int eswifi_reset(struct eswifi_dev *eswifi, const struct eswifi_cfg *cfg) { gpio_pin_set_dt(&cfg->resetn, 0); k_sleep(K_MSEC(10)); gpio_pin_set_dt(&cfg->resetn, 1); gpio_pin_set_dt(&cfg->wakeup, 1); k_sleep(K_MSEC(500)); /* fetch the cursor */ return eswifi_request(eswifi, NULL, 0, eswifi->buf, sizeof(eswifi->buf)); } static inline int __parse_ssid(char *str, char *ssid) { int i = 0; /* fmt => "SSID" */ if (*str != '"') { return 0; } str++; while (*str && (*str != '"') && i < WIFI_SSID_MAX_LEN) { ssid[i++] = *str++; } if (*str != '"') { return 0; } return i; } static void __parse_scan_res(char *str, struct wifi_scan_result *res) { int field = 0; /* fmt => #001,"SSID",MACADDR,RSSI,BITRATE,MODE,SECURITY,BAND,CHANNEL */ while (*str) { if (*str != ',') { str++; continue; } if (!*++str) { break; } switch (++field) { case 1: /* SSID */ res->ssid_length = __parse_ssid(str, res->ssid); str += res->ssid_length; break; case 2: /* mac addr */ break; case 3: /* RSSI */ res->rssi = atoi(str); break; case 4: /* bitrate */ break; case 5: /* mode */ break; case 6: /* security */ if (!strncmp(str, "Open", 4)) { res->security = WIFI_SECURITY_TYPE_NONE; } else { res->security = WIFI_SECURITY_TYPE_PSK; } break; case 7: /* band */ break; case 8: /* channel */ res->channel = atoi(str); break; } } } int eswifi_at_cmd_rsp(struct eswifi_dev *eswifi, char *cmd, char **rsp) { const char startstr[] = "\r\n"; const char endstr[] = "\r\nOK\r\n>"; int i, len, rsplen = -EINVAL; len = eswifi_request(eswifi, cmd, strlen(cmd), eswifi->buf, sizeof(eswifi->buf)); if (len < 0) { return -EIO; } if (len >= CONFIG_WIFI_ESWIFI_MAX_DATA_SIZE) { LOG_WRN("Buffer might be too small for response!"); LOG_WRN("Data length %d", len); LOG_WRN("See CONFIG_WIFI_ESWIFI_MAX_DATA_SIZE (in build: %d)", CONFIG_WIFI_ESWIFI_MAX_DATA_SIZE); } /* * Check response, format should be "\r\n[DATA]\r\nOK\r\n>" * Data is in arbitrary format (not only ASCII) */ /* Check start characters */ if (strncmp(eswifi->buf, startstr, strlen(startstr))) { return -EINVAL; } if (len < sizeof(endstr) - 1 + sizeof(startstr) - 1) { return -EINVAL; } /* Check end characters */ for (i = len - sizeof(endstr); i > 0; i--) { if (!strncmp(&eswifi->buf[i], endstr, 7)) { if (rsp) { eswifi->buf[i] = '\0'; *rsp = &eswifi->buf[2]; rsplen = &eswifi->buf[i] - *rsp; } else { rsplen = 0; } break; } } return rsplen; } int eswifi_at_cmd(struct eswifi_dev *eswifi, char *cmd) { return eswifi_at_cmd_rsp(eswifi, cmd, NULL); } struct eswifi_dev *eswifi_by_iface_idx(uint8_t iface) { /* only one instance */ LOG_DBG("%d", iface); return &eswifi0; } static int __parse_ipv4_address(char *str, char *ssid, uint8_t ip[4]) { int byte = -1; /* fmt => [JOIN ] SSID,192.168.2.18,0,0 */ while (*str && byte < 4) { if (byte == -1) { if (!strncmp(str, ssid, strlen(ssid))) { byte = 0; str += strlen(ssid); } str++; continue; } ip[byte++] = atoi(str); while (*str && (*str++ != '.')) { } } return 0; } static void eswifi_scan(struct eswifi_dev *eswifi) { char cmd[] = "F0\r"; char *data; int i, ret; LOG_DBG(""); eswifi_lock(eswifi); ret = eswifi_at_cmd_rsp(eswifi, cmd, &data); if (ret < 0) { eswifi->scan_cb(eswifi->iface, -EIO, NULL); eswifi_unlock(eswifi); return; } for (i = 0; i < ret; i++) { if (data[i] == '#') { struct wifi_scan_result res = {0}; __parse_scan_res(&data[i], &res); eswifi->scan_cb(eswifi->iface, 0, &res); k_yield(); while (data[i] && data[i] != '\n') { i++; } } } /* WiFi scan is done. */ eswifi->scan_cb(eswifi->iface, 0, NULL); eswifi_unlock(eswifi); } static int eswifi_connect(struct eswifi_dev *eswifi) { char connect[] = "C0\r"; struct in_addr addr; char *rsp; int err; LOG_DBG("Connecting to %s (pass=%s)", eswifi->sta.ssid, eswifi->sta.pass); eswifi_lock(eswifi); /* Set SSID */ snprintk(eswifi->buf, sizeof(eswifi->buf), "C1=%s\r", eswifi->sta.ssid); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set SSID"); goto error; } /* Set passphrase */ snprintk(eswifi->buf, sizeof(eswifi->buf), "C2=%s\r", eswifi->sta.pass); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set passphrase"); goto error; } /* Set Security type */ snprintk(eswifi->buf, sizeof(eswifi->buf), "C3=%u\r", eswifi->sta.security); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to configure security"); goto error; } /* Join Network */ err = eswifi_at_cmd_rsp(eswifi, connect, &rsp); if (err < 0) { LOG_ERR("Unable to join network"); goto error; } /* Any IP assigned ? (dhcp offload or manually) */ err = __parse_ipv4_address(rsp, eswifi->sta.ssid, (uint8_t *)&addr.s4_addr); if (err < 0) { LOG_ERR("Unable to retrieve IP address"); goto error; } LOG_DBG("ip = %d.%d.%d.%d", addr.s4_addr[0], addr.s4_addr[1], addr.s4_addr[2], addr.s4_addr[3]); net_if_ipv4_addr_add(eswifi->iface, &addr, NET_ADDR_DHCP, 0); eswifi->sta.connected = true; LOG_DBG("Connected!"); eswifi_unlock(eswifi); return 0; error: eswifi_unlock(eswifi); return -EIO; } static int eswifi_disconnect(struct eswifi_dev *eswifi) { char disconnect[] = "CD\r"; int err; LOG_DBG(""); eswifi_lock(eswifi); err = eswifi_at_cmd(eswifi, disconnect); if (err < 0) { LOG_ERR("Unable to disconnect network"); err = -EIO; } eswifi->sta.connected = false; eswifi_unlock(eswifi); return err; } static void eswifi_status_work(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct eswifi_dev *eswifi; char status[] = "CS\r"; char rssi[] = "CR\r"; char *rsp; int ret; eswifi = CONTAINER_OF(dwork, struct eswifi_dev, status_work); eswifi_lock(eswifi); if (eswifi->role == ESWIFI_ROLE_AP) { goto done; } ret = eswifi_at_cmd_rsp(eswifi, status, &rsp); if (ret < 1) { LOG_ERR("Unable to retrieve status"); goto done; } if (rsp[0] == '0' && eswifi->sta.connected) { eswifi->sta.connected = false; wifi_mgmt_raise_disconnect_result_event(eswifi->iface, 0); goto done; } else if (rsp[0] == '1' && !eswifi->sta.connected) { eswifi->sta.connected = true; wifi_mgmt_raise_connect_result_event(eswifi->iface, 0); } ret = eswifi_at_cmd_rsp(eswifi, rssi, &rsp); if (ret < 1) { LOG_ERR("Unable to retrieve rssi"); /* continue */ } else { eswifi->sta.rssi = atoi(rsp); } k_work_reschedule_for_queue(&eswifi->work_q, &eswifi->status_work, K_MSEC(1000 * 30)); done: eswifi_unlock(eswifi); } static void eswifi_request_work(struct k_work *item) { struct eswifi_dev *eswifi; int err; LOG_DBG(""); eswifi = CONTAINER_OF(item, struct eswifi_dev, request_work); switch (eswifi->req) { case ESWIFI_REQ_CONNECT: err = eswifi_connect(eswifi); wifi_mgmt_raise_connect_result_event(eswifi->iface, err); k_work_reschedule_for_queue(&eswifi->work_q, &eswifi->status_work, K_MSEC(1000)); break; case ESWIFI_REQ_DISCONNECT: err = eswifi_disconnect(eswifi); wifi_mgmt_raise_disconnect_result_event(eswifi->iface, err); break; case ESWIFI_REQ_SCAN: eswifi_scan(eswifi); break; case ESWIFI_REQ_NONE: default: break; } } static int eswifi_get_mac_addr(struct eswifi_dev *eswifi, uint8_t addr[6]) { char cmd[] = "Z5\r"; int ret, i, byte = 0; char *rsp; ret = eswifi_at_cmd_rsp(eswifi, cmd, &rsp); if (ret < 0) { return ret; } /* format is "ff:ff:ff:ff:ff:ff" */ for (i = 0; i < ret && byte < 6; i++) { addr[byte++] = strtol(&rsp[i], NULL, 16); i += 2; } if (byte != 6) { return -EIO; } return 0; } static void eswifi_iface_init(struct net_if *iface) { struct eswifi_dev *eswifi = &eswifi0; const struct eswifi_cfg *cfg = &eswifi0_cfg; uint8_t mac[6]; LOG_DBG(""); eswifi_lock(eswifi); if (eswifi_reset(eswifi, cfg) < 0) { LOG_ERR("Unable to reset device"); return; } if (eswifi_get_mac_addr(eswifi, mac) < 0) { LOG_ERR("Unable to read MAC address"); return; } LOG_DBG("MAC Address %02X:%02X:%02X:%02X:%02X:%02X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); memcpy(eswifi->mac, mac, sizeof(eswifi->mac)); net_if_set_link_addr(iface, eswifi->mac, sizeof(eswifi->mac), NET_LINK_ETHERNET); eswifi->iface = iface; eswifi_unlock(eswifi); eswifi_offload_init(eswifi); #if defined(CONFIG_NET_SOCKETS_OFFLOAD) eswifi_socket_offload_init(eswifi); net_if_socket_offload_set(iface, eswifi_socket_create); #endif } int eswifi_mgmt_iface_status(const struct device *dev, struct wifi_iface_status *status) { struct eswifi_dev *eswifi = dev->data; struct eswifi_sta *sta = &eswifi->sta; /* Update status */ eswifi_status_work(&eswifi->status_work.work); if (!sta->connected) { status->state = WIFI_STATE_DISCONNECTED; return 0; } status->state = WIFI_STATE_COMPLETED; status->ssid_len = strnlen(sta->ssid, WIFI_SSID_MAX_LEN); strncpy(status->ssid, sta->ssid, status->ssid_len); status->band = WIFI_FREQ_BAND_2_4_GHZ; status->channel = 0; if (eswifi->role == ESWIFI_ROLE_CLIENT) { status->iface_mode = WIFI_MODE_INFRA; } else { status->iface_mode = WIFI_MODE_AP; } status->link_mode = WIFI_LINK_MODE_UNKNOWN; switch (sta->security) { case ESWIFI_SEC_OPEN: status->security = WIFI_SECURITY_TYPE_NONE; break; case ESWIFI_SEC_WPA2_MIXED: status->security = WIFI_SECURITY_TYPE_PSK; break; default: status->security = WIFI_SECURITY_TYPE_UNKNOWN; } status->mfp = WIFI_MFP_DISABLE; status->rssi = sta->rssi; return 0; } static int eswifi_mgmt_scan(const struct device *dev, struct wifi_scan_params *params, scan_result_cb_t cb) { struct eswifi_dev *eswifi = dev->data; ARG_UNUSED(params); LOG_DBG(""); eswifi_lock(eswifi); eswifi->scan_cb = cb; eswifi->req = ESWIFI_REQ_SCAN; k_work_submit_to_queue(&eswifi->work_q, &eswifi->request_work); eswifi_unlock(eswifi); return 0; } static int eswifi_mgmt_disconnect(const struct device *dev) { struct eswifi_dev *eswifi = dev->data; LOG_DBG(""); eswifi_lock(eswifi); eswifi->req = ESWIFI_REQ_DISCONNECT; k_work_submit_to_queue(&eswifi->work_q, &eswifi->request_work); eswifi_unlock(eswifi); return 0; } static int __eswifi_sta_config(struct eswifi_dev *eswifi, struct wifi_connect_req_params *params) { memcpy(eswifi->sta.ssid, params->ssid, params->ssid_length); eswifi->sta.ssid[params->ssid_length] = '\0'; switch (params->security) { case WIFI_SECURITY_TYPE_NONE: eswifi->sta.pass[0] = '\0'; eswifi->sta.security = ESWIFI_SEC_OPEN; break; case WIFI_SECURITY_TYPE_PSK: memcpy(eswifi->sta.pass, params->psk, params->psk_length); eswifi->sta.pass[params->psk_length] = '\0'; eswifi->sta.security = ESWIFI_SEC_WPA2_MIXED; break; default: return -EINVAL; } if (params->channel == WIFI_CHANNEL_ANY) { eswifi->sta.channel = 0U; } else { eswifi->sta.channel = params->channel; } return 0; } static int eswifi_mgmt_connect(const struct device *dev, struct wifi_connect_req_params *params) { struct eswifi_dev *eswifi = dev->data; int err; LOG_DBG(""); eswifi_lock(eswifi); err = __eswifi_sta_config(eswifi, params); if (!err) { eswifi->req = ESWIFI_REQ_CONNECT; k_work_submit_to_queue(&eswifi->work_q, &eswifi->request_work); } eswifi_unlock(eswifi); return err; } void eswifi_async_msg(struct eswifi_dev *eswifi, char *msg, size_t len) { eswifi_offload_async_msg(eswifi, msg, len); } #if defined(CONFIG_NET_IPV4) static int eswifi_mgmt_ap_enable(const struct device *dev, struct wifi_connect_req_params *params) { struct eswifi_dev *eswifi = dev->data; struct net_if_ipv4 *ipv4 = eswifi->iface->config.ip.ipv4; struct net_if_addr *unicast = NULL; int err = -EIO, i; LOG_DBG(""); eswifi_lock(eswifi); if (eswifi->role == ESWIFI_ROLE_AP) { err = -EALREADY; goto error; } err = __eswifi_sta_config(eswifi, params); if (err) { goto error; } /* security */ snprintk(eswifi->buf, sizeof(eswifi->buf), "A1=%u\r", eswifi->sta.security); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set Security"); goto error; } /* Passkey */ if (eswifi->sta.security != ESWIFI_SEC_OPEN) { snprintk(eswifi->buf, sizeof(eswifi->buf), "A2=%s\r", eswifi->sta.pass); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set passkey"); goto error; } } /* Set SSID (0=no MAC, 1=append MAC) */ snprintk(eswifi->buf, sizeof(eswifi->buf), "AS=0,%s\r", eswifi->sta.ssid); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set SSID"); goto error; } /* Set Channel */ snprintk(eswifi->buf, sizeof(eswifi->buf), "AC=%u\r", eswifi->sta.channel); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to set Channel"); goto error; } /* Set IP Address */ for (i = 0; ipv4 && i < NET_IF_MAX_IPV4_ADDR; i++) { if (ipv4->unicast[i].ipv4.is_used) { unicast = &ipv4->unicast[i].ipv4; break; } } if (!unicast) { LOG_ERR("No IPv4 assigned for AP mode"); err = -EADDRNOTAVAIL; goto error; } snprintk(eswifi->buf, sizeof(eswifi->buf), "Z6=%s\r", net_sprint_ipv4_addr(&unicast->address.in_addr)); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to active access point"); goto error; } /* Enable AP */ snprintk(eswifi->buf, sizeof(eswifi->buf), "AD\r"); err = eswifi_at_cmd(eswifi, eswifi->buf); if (err < 0) { LOG_ERR("Unable to active access point"); goto error; } eswifi->role = ESWIFI_ROLE_AP; eswifi_unlock(eswifi); return 0; error: eswifi_unlock(eswifi); return err; } #else static int eswifi_mgmt_ap_enable(const struct device *dev, struct wifi_connect_req_params *params) { LOG_ERR("IPv4 requested for AP mode"); return -ENOTSUP; } #endif /* CONFIG_NET_IPV4 */ static int eswifi_mgmt_ap_disable(const struct device *dev) { struct eswifi_dev *eswifi = dev->data; char cmd[] = "AE\r"; int err; eswifi_lock(eswifi); err = eswifi_at_cmd(eswifi, cmd); if (err < 0) { eswifi_unlock(eswifi); return -EIO; } eswifi->role = ESWIFI_ROLE_CLIENT; eswifi_unlock(eswifi); return 0; } static int eswifi_init(const struct device *dev) { struct eswifi_dev *eswifi = dev->data; const struct eswifi_cfg *cfg = dev->config; LOG_DBG(""); eswifi->role = ESWIFI_ROLE_CLIENT; k_mutex_init(&eswifi->mutex); eswifi->bus = eswifi_get_bus(); eswifi->bus->init(eswifi); if (!gpio_is_ready_dt(&cfg->resetn)) { LOG_ERR("%s: device %s is not ready", dev->name, cfg->resetn.port->name); return -ENODEV; } gpio_pin_configure_dt(&cfg->resetn, GPIO_OUTPUT_INACTIVE); if (!gpio_is_ready_dt(&cfg->wakeup)) { LOG_ERR("%s: device %s is not ready", dev->name, cfg->wakeup.port->name); return -ENODEV; } gpio_pin_configure_dt(&cfg->wakeup, GPIO_OUTPUT_ACTIVE); k_work_queue_start(&eswifi->work_q, eswifi_work_q_stack, K_KERNEL_STACK_SIZEOF(eswifi_work_q_stack), CONFIG_SYSTEM_WORKQUEUE_PRIORITY - 1, NULL); k_work_init(&eswifi->request_work, eswifi_request_work); k_work_init_delayable(&eswifi->status_work, eswifi_status_work); eswifi_shell_register(eswifi); return 0; } static enum offloaded_net_if_types eswifi_get_type(void) { return L2_OFFLOADED_NET_IF_TYPE_WIFI; } static const struct wifi_mgmt_ops eswifi_mgmt_api = { .scan = eswifi_mgmt_scan, .connect = eswifi_mgmt_connect, .disconnect = eswifi_mgmt_disconnect, .ap_enable = eswifi_mgmt_ap_enable, .ap_disable = eswifi_mgmt_ap_disable, .iface_status = eswifi_mgmt_iface_status, }; static const struct net_wifi_mgmt_offload eswifi_offload_api = { .wifi_iface.iface_api.init = eswifi_iface_init, .wifi_iface.get_type = eswifi_get_type, .wifi_mgmt_api = &eswifi_mgmt_api, }; NET_DEVICE_DT_INST_OFFLOAD_DEFINE(0, eswifi_init, NULL, &eswifi0, &eswifi0_cfg, CONFIG_WIFI_INIT_PRIORITY, &eswifi_offload_api, 1500); CONNECTIVITY_WIFI_MGMT_BIND(Z_DEVICE_DT_DEV_ID(DT_DRV_INST(0))); ```
/content/code_sandbox/drivers/wifi/eswifi/eswifi_core.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,305
```unknown # SimpleLink WiFi driver options menuconfig WIFI_SIMPLELINK bool "SimpleLink Wi-Fi driver support" select SIMPLELINK_HOST_DRIVER select WIFI_OFFLOAD select NET_L2_WIFI_MGMT select FDTABLE select POSIX_SEMAPHORES if WIFI_SIMPLELINK config WIFI_SIMPLELINK_NAME string "Driver name" default "SimpleLink" config WIFI_SIMPLELINK_MAX_PACKET_SIZE int "Maximum size of a packet, in bytes" # MTU (ipv4) per: path_to_url default 1472 help Set the maximum size of a network packet going through the chip. This sets the size of each buffer, in each buffer pool. Do not modify it unless you know what you are doing. config WIFI_SIMPLELINK_SCAN_COUNT int "Number of entries in network scan table: Max: 30" default 20 help The number of results to request on a Wi-Fi scan operation. Actual number returned may be less. Maximum is 30. config WIFI_SIMPLELINK_MAX_SCAN_RETRIES int "Number of retries to get network scan table" default 10 help The number of times, separated by a one second interval, to retry a request for the network list. config WIFI_SIMPLELINK_FAST_CONNECT_TIMEOUT int "Time (in seconds) to wait for fast connect on startup" default 7 help SimpleLink uses the "FastConnect" feature to reconnect to the previously connected AP on startup. Should the Wi-Fi connection timeout, the SimpleLink driver will fail to initialize, and LOG an error. endif # WIFI_SIMPLELINK ```
/content/code_sandbox/drivers/wifi/simplelink/Kconfig.simplelink
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
344
```objective-c /** * */ #ifndef ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_SOCKETS_H_ #define ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_SOCKETS_H_ #ifdef __cplusplus extern "C" { #endif extern const struct socket_dns_offload simplelink_dns_ops; extern void simplelink_sockets_init(void); #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_SOCKETS_H_ */ ```
/content/code_sandbox/drivers/wifi/simplelink/simplelink_sockets.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
93
```objective-c /** * */ #ifndef ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_LOG_H_ #define ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_LOG_H_ #define LOG_MODULE_NAME wifi_simplelink #define LOG_LEVEL CONFIG_WIFI_LOG_LEVEL #include <zephyr/logging/log.h> #endif /* ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_LOG_H_ */ ```
/content/code_sandbox/drivers/wifi/simplelink/simplelink_log.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
76
```c /** * */ #include "simplelink_log.h" LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/net/net_if.h> #include <zephyr/net/wifi_mgmt.h> #include <zephyr/net/net_offload.h> #include <zephyr/net/conn_mgr/connectivity_wifi_mgmt.h> #ifdef CONFIG_NET_SOCKETS_OFFLOAD #include <zephyr/net/socket_offload.h> #endif #include <ti/drivers/net/wifi/wlan.h> #include "simplelink_support.h" #include "simplelink_sockets.h" #define SCAN_RETRY_DELAY 2000 /* ms */ #define FC_TIMEOUT K_SECONDS(CONFIG_WIFI_SIMPLELINK_FAST_CONNECT_TIMEOUT) #define SIMPLELINK_IPV4 0x1 #define SIMPLELINK_IPV6 0x2 struct simplelink_data { struct net_if *iface; unsigned char mac[6]; /* Fields for scan API to emulate an asynchronous scan: */ struct k_work_delayable work; scan_result_cb_t cb; int num_results_or_err; int scan_retries; bool initialized; uint8_t mask; }; static struct simplelink_data simplelink_data; static K_SEM_DEFINE(ip_acquired, 0, 1); /* Handle connection events from the SimpleLink Event Handlers: */ static void simplelink_wifi_cb(uint32_t event, struct sl_connect_state *conn) { int status; /* * Once Zephyr wifi_mgmt wifi_status codes are defined, will need * to map from SimpleLink error codes. For now, just return -EIO. */ status = (conn->error ? -EIO : 0); switch (event) { case SL_WLAN_EVENT_CONNECT: /* Only get this event if connect succeeds: */ wifi_mgmt_raise_connect_result_event(simplelink_data.iface, status); break; case SL_WLAN_EVENT_DISCONNECT: /* Could be during a connect, disconnect, or async error: */ wifi_mgmt_raise_disconnect_result_event(simplelink_data.iface, status); break; case SIMPLELINK_WIFI_CB_IPACQUIRED: simplelink_data.mask &= ~SIMPLELINK_IPV4; if ((simplelink_data.mask == 0) && (!simplelink_data.initialized)) { simplelink_data.initialized = true; k_sem_give(&ip_acquired); } break; case SIMPLELINK_WIFI_CB_IPV6ACQUIRED: simplelink_data.mask &= ~SIMPLELINK_IPV6; if ((simplelink_data.mask == 0) && (!simplelink_data.initialized)) { simplelink_data.initialized = true; k_sem_give(&ip_acquired); } break; default: LOG_DBG("Unrecognized mgmt event: 0x%x", event); break; } } static void simplelink_scan_work_handler(struct k_work *work) { if (simplelink_data.num_results_or_err > 0) { int index = 0; struct wifi_scan_result scan_result; /* Iterate over the table, and call the scan_result callback. */ while (index < simplelink_data.num_results_or_err) { z_simplelink_get_scan_result(index, &scan_result); simplelink_data.cb(simplelink_data.iface, 0, &scan_result); /* Yield, to ensure notifications get delivered: */ k_yield(); index++; } /* Sending a NULL entry indicates e/o results, and * triggers the NET_EVENT_WIFI_SCAN_DONE event: */ simplelink_data.cb(simplelink_data.iface, 0, NULL); } else if ((simplelink_data.num_results_or_err == SL_ERROR_WLAN_GET_NETWORK_LIST_EAGAIN) && (simplelink_data.scan_retries++ < CONFIG_WIFI_SIMPLELINK_MAX_SCAN_RETRIES)) { int32_t delay; /* Try again: */ simplelink_data.num_results_or_err = z_simplelink_start_scan(); simplelink_data.scan_retries++; delay = (simplelink_data.num_results_or_err > 0 ? 0 : SCAN_RETRY_DELAY); if (delay > 0) { LOG_DBG("Retrying scan..."); } k_work_reschedule(&simplelink_data.work, K_MSEC(delay)); } else { /* Encountered an error, or max retries exceeded: */ LOG_ERR("Scan failed: retries: %d; err: %d", simplelink_data.scan_retries, simplelink_data.num_results_or_err); simplelink_data.cb(simplelink_data.iface, -EIO, NULL); } } static int simplelink_mgmt_scan(const struct device *dev, struct wifi_scan_params *params, scan_result_cb_t cb) { int err; int status; ARG_UNUSED(params); /* Cancel any previous scan processing in progress: */ k_work_cancel_delayable(&simplelink_data.work); /* "Request" the scan: */ err = z_simplelink_start_scan(); /* Now, launch a delayed work handler to do retries and reporting. * Indicate (to the work handler) either a positive number of results * already returned, or indicate a retry is required: */ if ((err > 0) || (err == SL_ERROR_WLAN_GET_NETWORK_LIST_EAGAIN)) { int32_t delay = (err > 0 ? 0 : SCAN_RETRY_DELAY); /* Store for later reference by delayed work handler: */ simplelink_data.cb = cb; simplelink_data.num_results_or_err = err; simplelink_data.scan_retries = 0; k_work_reschedule(&simplelink_data.work, K_MSEC(delay)); status = 0; } else { status = -EIO; } return status; } static int simplelink_mgmt_connect(const struct device *dev, struct wifi_connect_req_params *params) { int ret; ret = z_simplelink_connect(params); return ret ? -EIO : ret; } static int simplelink_mgmt_disconnect(const struct device *dev) { int ret; ret = z_simplelink_disconnect(); return ret ? -EIO : ret; } static int simplelink_dummy_get(sa_family_t family, enum net_sock_type type, enum net_ip_protocol ip_proto, struct net_context **context) { LOG_ERR("NET_SOCKETS_OFFLOAD must be configured for this driver"); return -1; } /* Placeholders, until Zephyr IP stack updated to handle a NULL net_offload */ static struct net_offload simplelink_offload = { .get = simplelink_dummy_get, .bind = NULL, .listen = NULL, .connect = NULL, .accept = NULL, .send = NULL, .sendto = NULL, .recv = NULL, .put = NULL, }; static void simplelink_iface_init(struct net_if *iface) { int ret; simplelink_data.iface = iface; simplelink_data.mask = 0; simplelink_data.mask |= IS_ENABLED(CONFIG_NET_IPV4) ? SIMPLELINK_IPV4 : 0; simplelink_data.mask |= IS_ENABLED(CONFIG_NET_IPV6) ? SIMPLELINK_IPV6 : 0; /* Direct socket offload used instead of net offload: */ iface->if_dev->offload = &simplelink_offload; /* Initialize and configure NWP to defaults: */ ret = z_simplelink_init(simplelink_wifi_cb); if (ret) { LOG_ERR("z_simplelink_init failed!"); return; } ret = k_sem_take(&ip_acquired, FC_TIMEOUT); if (ret < 0) { simplelink_data.initialized = false; LOG_ERR("FastConnect timed out connecting to previous AP."); LOG_ERR("Please re-establish WiFi connection."); } /* Grab our MAC address: */ z_simplelink_get_mac(simplelink_data.mac); LOG_DBG("MAC Address %02X:%02X:%02X:%02X:%02X:%02X", simplelink_data.mac[0], simplelink_data.mac[1], simplelink_data.mac[2], simplelink_data.mac[3], simplelink_data.mac[4], simplelink_data.mac[5]); net_if_set_link_addr(iface, simplelink_data.mac, sizeof(simplelink_data.mac), NET_LINK_ETHERNET); #ifdef CONFIG_NET_SOCKETS_OFFLOAD /* Direct socket offload: */ socket_offload_dns_register(&simplelink_dns_ops); simplelink_sockets_init(); net_if_socket_offload_set(iface, simplelink_socket_create); #endif } static enum offloaded_net_if_types simplelink_get_type(void) { return L2_OFFLOADED_NET_IF_TYPE_WIFI; } static const struct wifi_mgmt_ops simplelink_mgmt = { .scan = simplelink_mgmt_scan, .connect = simplelink_mgmt_connect, .disconnect = simplelink_mgmt_disconnect, }; static const struct net_wifi_mgmt_offload simplelink_api = { .wifi_iface.iface_api.init = simplelink_iface_init, .wifi_iface.get_type = simplelink_get_type, .wifi_mgmt_api = &simplelink_mgmt, }; static int simplelink_init(const struct device *dev) { ARG_UNUSED(dev); /* We use system workqueue to deal with scan retries: */ k_work_init_delayable(&simplelink_data.work, simplelink_scan_work_handler); LOG_DBG("SimpleLink driver Initialized"); return 0; } NET_DEVICE_OFFLOAD_INIT(simplelink, CONFIG_WIFI_SIMPLELINK_NAME, simplelink_init, NULL, &simplelink_data, NULL, CONFIG_WIFI_INIT_PRIORITY, &simplelink_api, CONFIG_WIFI_SIMPLELINK_MAX_PACKET_SIZE); CONNECTIVITY_WIFI_MGMT_BIND(simplelink); ```
/content/code_sandbox/drivers/wifi/simplelink/simplelink.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,098
```c /** * */ #include "simplelink_log.h" LOG_MODULE_DECLARE(LOG_MODULE_NAME); #include <stdlib.h> #include <limits.h> #include <zephyr/posix/fcntl.h> #include <zephyr/kernel.h> /* Define sockaddr, etc, before simplelink.h */ #include <zephyr/net/socket_offload.h> #include <errno.h> #include <ti/drivers/net/wifi/simplelink.h> #include <ti/drivers/net/wifi/source/driver.h> #include <ti/net/slnetutils.h> #include <ti/net/slnetif.h> #include "simplelink_support.h" #include "sockets_internal.h" #include "tls_internal.h" #define FAILED (-1) /* Increment by 1 to make sure we do not store the value of 0, which has * a special meaning in the fdtable subsys. */ #define SD_TO_OBJ(sd) ((void *)(sd + 1)) #define OBJ_TO_SD(obj) (((int)obj) - 1) static int simplelink_socket_accept(void *obj, struct sockaddr *addr, socklen_t *addrlen); /* * Convert SL error codes into BSD errno values * note that we are handling the same set of values as in TI SlNetSock * minus the ones that are not defined in ti/drivers/net/wifi/errors.h. */ static int getErrno(_i32 error) { if (error >= 0) { return error; } /* This switch case block is necessary for translating the NWP error * code to BSD ones. The #ifdef in each case are made in order to * reduce code footprint: These cases are compiled if and only if * there's a discrepancy between the BSD error number and the error * code returned by the NWP. */ switch (error) { #if EBADF != SL_ERROR_BSD_EBADF case SL_ERROR_BSD_EBADF: error = EBADF; break; #endif #if ENSOCK != SL_ERROR_BSD_ENSOCK case SL_ERROR_BSD_ENSOCK: /* The limit on total # of open sockets has been reached */ error = ENSOCK; break; #endif #if EAGAIN != SL_ERROR_BSD_EAGAIN case SL_ERROR_BSD_EAGAIN: error = EAGAIN; break; #endif #if ENOMEM != SL_ERROR_BSD_ENOMEM case SL_ERROR_BSD_ENOMEM: error = ENOMEM; break; #endif #if EACCES != SL_ERROR_BSD_EACCES case SL_ERROR_BSD_EACCES: error = EACCES; break; #endif #if EFAULT != SL_ERROR_BSD_EFAULT case SL_ERROR_BSD_EFAULT: error = EFAULT; break; #endif #if EINVAL != SL_ERROR_BSD_EINVAL case SL_ERROR_BSD_EINVAL: error = EINVAL; break; #endif #if EDESTADDRREQ != SL_ERROR_BSD_EDESTADDRREQ case SL_ERROR_BSD_EDESTADDRREQ: error = EDESTADDRREQ; break; #endif #if EPROTOTYPE != SL_ERROR_BSD_EPROTOTYPE case SL_ERROR_BSD_EPROTOTYPE: error = EPROTOTYPE; break; #endif #if ENOPROTOOPT != SL_ERROR_BSD_ENOPROTOOPT case SL_ERROR_BSD_ENOPROTOOPT: error = ENOPROTOOPT; break; #endif #if EPROTONOSUPPORT != SL_ERROR_BSD_EPROTONOSUPPORT case SL_ERROR_BSD_EPROTONOSUPPORT: error = EPROTONOSUPPORT; break; #endif #if EOPNOTSUPP != SL_ERROR_BSD_EOPNOTSUPP case SL_ERROR_BSD_EOPNOTSUPP: error = EOPNOTSUPP; break; #endif #if EAFNOSUPPORT != SL_ERROR_BSD_EAFNOSUPPORT case SL_ERROR_BSD_EAFNOSUPPORT: error = EAFNOSUPPORT; break; #endif #if EADDRINUSE != SL_ERROR_BSD_EADDRINUSE case SL_ERROR_BSD_EADDRINUSE: error = EADDRINUSE; break; #endif #if EADDRNOTAVAIL != SL_ERROR_BSD_EADDRNOTAVAIL case SL_ERROR_BSD_EADDRNOTAVAIL: error = EADDRNOTAVAIL; break; #endif #if ENETUNREACH != SL_ERROR_BSD_ENETUNREACH case SL_ERROR_BSD_ENETUNREACH: error = ENETUNREACH; break; #endif #if ENOBUFS != SL_ERROR_BSD_ENOBUFS case SL_ERROR_BSD_ENOBUFS: error = ENOBUFS; break; #endif #if EISCONN != SL_ERROR_BSD_EISCONN case SL_ERROR_BSD_EISCONN: error = EISCONN; break; #endif #if ENOTCONN != SL_ERROR_BSD_ENOTCONN case SL_ERROR_BSD_ENOTCONN: error = ENOTCONN; break; #endif #if ETIMEDOUT != SL_ERROR_BSD_ETIMEDOUT case SL_ERROR_BSD_ETIMEDOUT: error = ETIMEDOUT; break; #endif #if ECONNREFUSED != SL_ERROR_BSD_ECONNREFUSED case SL_ERROR_BSD_ECONNREFUSED: error = ECONNREFUSED; break; #endif /* The cases below are proprietary driver errors, which can * be returned by the SimpleLink Driver, in various cases of failure. * Each is mapped to the corresponding BSD error. */ case SL_POOL_IS_EMPTY: case SL_RET_CODE_NO_FREE_ASYNC_BUFFERS_ERROR: case SL_RET_CODE_MALLOC_ERROR: error = ENOMEM; break; case SL_RET_CODE_INVALID_INPUT: case SL_EZEROLEN: case SL_ESMALLBUF: case SL_INVALPARAM: error = EINVAL; break; default: /* Do nothing .. * If no case is true, that means that the BSD error * code and the code returned by the NWP are either identical, * or no proprietary error has occurred. */ break; } return error; } static int simplelink_socket_family_from_posix(int family, int *family_sl) { switch (family) { case AF_INET: *family_sl = SL_AF_INET; break; case AF_INET6: *family_sl = SL_AF_INET6; break; default: return -EAFNOSUPPORT; } return 0; } static int simplelink_socket_type_from_posix(int type, int *type_sl) { switch (type) { case SOCK_STREAM: *type_sl = SL_SOCK_STREAM; break; case SOCK_DGRAM: *type_sl = SL_SOCK_DGRAM; break; case SOCK_RAW: *type_sl = SL_SOCK_RAW; break; default: return -ESOCKTNOSUPPORT; } return 0; } static int simplelink_socket_proto_from_zephyr(int proto, int *proto_sl) { if (proto >= IPPROTO_TLS_1_0 && proto <= IPPROTO_TLS_1_2) { *proto_sl = SL_SEC_SOCKET; } else if (proto >= IPPROTO_DTLS_1_0 && proto <= IPPROTO_DTLS_1_2) { /* SimpleLink doesn't handle DTLS yet! */ return -EPROTONOSUPPORT; } else { switch (proto) { case IPPROTO_TCP: *proto_sl = SL_IPPROTO_TCP; break; case IPPROTO_UDP: *proto_sl = SL_IPPROTO_UDP; break; default: return -EPROTONOSUPPORT; } } return 0; } static int simplelink_socket(int family, int type, int proto) { uint8_t sec_method = SL_SO_SEC_METHOD_SSLv3_TLSV1_2; int sd; int retval = 0; int sl_proto = proto; int err; /* Map Zephyr socket.h family to SimpleLink's: */ err = simplelink_socket_family_from_posix(family, &family); if (err) { LOG_ERR("unsupported family: %d", family); retval = slcb_SetErrno(-err); goto exit; } /* Map Zephyr socket.h type to SimpleLink's: */ err = simplelink_socket_type_from_posix(type, &type); if (err) { LOG_ERR("unsupported type: %d", type); retval = slcb_SetErrno(-err); goto exit; } /* Map Zephyr protocols to TI's values: */ err = simplelink_socket_proto_from_zephyr(proto, &sl_proto); if (err) { LOG_ERR("unsupported proto: %d", proto); retval = slcb_SetErrno(-err); goto exit; } sd = sl_Socket(family, type, sl_proto); if (sd >= 0) { if (IS_ENABLED(CONFIG_NET_SOCKETS_SOCKOPT_TLS) && sl_proto == SL_SEC_SOCKET) { /* Now, set specific TLS version via setsockopt(): */ sec_method = (proto - IPPROTO_TLS_1_0) + SL_SO_SEC_METHOD_TLSV1; retval = sl_SetSockOpt(sd, SL_SOL_SOCKET, SL_SO_SECMETHOD, &sec_method, sizeof(sec_method)); if (retval < 0) { retval = slcb_SetErrno(EPROTONOSUPPORT); (void)sl_Close(sd); goto exit; } } } retval = sd; if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } exit: return retval; } static int simplelink_close(void *obj) { int sd = OBJ_TO_SD(obj); int retval; retval = sl_Close(sd); if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } return retval; } static SlSockAddr_t *translate_z_to_sl_addrlen(socklen_t addrlen, SlSockAddrIn_t *sl_addr_in, SlSockAddrIn6_t *sl_addr_in6, SlSocklen_t *sl_addrlen) { SlSockAddr_t *sl_addr = NULL; if (addrlen == sizeof(struct sockaddr_in)) { *sl_addrlen = sizeof(SlSockAddrIn_t); sl_addr = (SlSockAddr_t *)sl_addr_in; } else if (addrlen == sizeof(struct sockaddr_in6)) { *sl_addrlen = sizeof(SlSockAddrIn6_t); sl_addr = (SlSockAddr_t *)sl_addr_in6; } return sl_addr; } static SlSockAddr_t *translate_z_to_sl_addrs(const struct sockaddr *addr, socklen_t addrlen, SlSockAddrIn_t *sl_addr_in, SlSockAddrIn6_t *sl_addr_in6, SlSocklen_t *sl_addrlen) { SlSockAddr_t *sl_addr = NULL; if (addrlen == sizeof(struct sockaddr_in)) { struct sockaddr_in *z_sockaddr_in = (struct sockaddr_in *)addr; *sl_addrlen = sizeof(SlSockAddrIn_t); sl_addr_in->sin_family = SL_AF_INET; sl_addr_in->sin_port = z_sockaddr_in->sin_port; sl_addr_in->sin_addr.s_addr = z_sockaddr_in->sin_addr.s_addr; sl_addr = (SlSockAddr_t *)sl_addr_in; } else if (addrlen == sizeof(struct sockaddr_in6)) { struct sockaddr_in6 *z_sockaddr_in6 = (struct sockaddr_in6 *)addr; *sl_addrlen = sizeof(SlSockAddrIn6_t); sl_addr_in6->sin6_family = SL_AF_INET6; sl_addr_in6->sin6_port = z_sockaddr_in6->sin6_port; memcpy(sl_addr_in6->sin6_addr._S6_un._S6_u32, z_sockaddr_in6->sin6_addr.s6_addr, sizeof(sl_addr_in6->sin6_addr._S6_un._S6_u32)); sl_addr = (SlSockAddr_t *)sl_addr_in6; } return sl_addr; } static void translate_sl_to_z_addr(SlSockAddr_t *sl_addr, SlSocklen_t sl_addrlen, struct sockaddr *addr, socklen_t *addrlen) { SlSockAddrIn_t *sl_addr_in; SlSockAddrIn6_t *sl_addr_in6; if (sl_addr->sa_family == SL_AF_INET) { if (sl_addrlen == (SlSocklen_t)sizeof(SlSockAddrIn_t)) { struct sockaddr_in *z_sockaddr_in = (struct sockaddr_in *)addr; sl_addr_in = (SlSockAddrIn_t *)sl_addr; z_sockaddr_in->sin_family = AF_INET; z_sockaddr_in->sin_port = sl_addr_in->sin_port; z_sockaddr_in->sin_addr.s_addr = sl_addr_in->sin_addr.s_addr; *addrlen = sizeof(struct sockaddr_in); } else { *addrlen = sl_addrlen; } } else if (sl_addr->sa_family == SL_AF_INET6) { if (sl_addrlen == sizeof(SlSockAddrIn6_t)) { struct sockaddr_in6 *z_sockaddr_in6 = (struct sockaddr_in6 *)addr; sl_addr_in6 = (SlSockAddrIn6_t *)sl_addr; z_sockaddr_in6->sin6_family = AF_INET6; z_sockaddr_in6->sin6_port = sl_addr_in6->sin6_port; z_sockaddr_in6->sin6_scope_id = (uint8_t)sl_addr_in6->sin6_scope_id; memcpy(z_sockaddr_in6->sin6_addr.s6_addr, sl_addr_in6->sin6_addr._S6_un._S6_u32, sizeof(z_sockaddr_in6->sin6_addr.s6_addr)); *addrlen = sizeof(struct sockaddr_in6); } else { *addrlen = sl_addrlen; } } } static int simplelink_accept(void *obj, struct sockaddr *addr, socklen_t *addrlen) { int sd = OBJ_TO_SD(obj); int retval; SlSockAddr_t *sl_addr; SlSockAddrIn_t sl_addr_in; SlSockAddrIn6_t sl_addr_in6; SlSocklen_t sl_addrlen; if ((addrlen == NULL) || (addr == NULL)) { retval = SL_RET_CODE_INVALID_INPUT; goto exit; } /* Translate between Zephyr's and SimpleLink's sockaddr's: */ sl_addr = translate_z_to_sl_addrlen(*addrlen, &sl_addr_in, &sl_addr_in6, &sl_addrlen); if (sl_addr == NULL) { retval = SL_RET_CODE_INVALID_INPUT; goto exit; } retval = sl_Accept(sd, sl_addr, &sl_addrlen); if (retval < 0) { goto exit; } /* Translate returned sl_addr into *addr and set *addrlen: */ translate_sl_to_z_addr(sl_addr, sl_addrlen, addr, addrlen); exit: if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } return retval; } static int simplelink_bind(void *obj, const struct sockaddr *addr, socklen_t addrlen) { int sd = OBJ_TO_SD(obj); int retval; SlSockAddr_t *sl_addr; SlSockAddrIn_t sl_addr_in; SlSockAddrIn6_t sl_addr_in6; SlSocklen_t sl_addrlen; if (addr == NULL) { retval = slcb_SetErrno(EISDIR); return retval; } /* Translate to sl_Bind() parameters: */ sl_addr = translate_z_to_sl_addrs(addr, addrlen, &sl_addr_in, &sl_addr_in6, &sl_addrlen); if (sl_addr == NULL) { retval = SL_RET_CODE_INVALID_INPUT; goto exit; } retval = sl_Bind(sd, sl_addr, sl_addrlen); exit: if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } return retval; } static int simplelink_listen(void *obj, int backlog) { int sd = OBJ_TO_SD(obj); int retval; retval = (int)sl_Listen(sd, backlog); if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } return retval; } static int simplelink_connect(void *obj, const struct sockaddr *addr, socklen_t addrlen) { int sd = OBJ_TO_SD(obj); int retval; SlSockAddr_t *sl_addr; SlSockAddrIn_t sl_addr_in; SlSockAddrIn6_t sl_addr_in6; SlSocklen_t sl_addrlen; __ASSERT_NO_MSG(addr); /* Translate to sl_Connect() parameters: */ sl_addr = translate_z_to_sl_addrs(addr, addrlen, &sl_addr_in, &sl_addr_in6, &sl_addrlen); if (sl_addr == NULL) { retval = SL_RET_CODE_INVALID_INPUT; goto exit; } retval = sl_Connect(sd, sl_addr, sl_addrlen); /* TBD: Until we have a good way to get correct date from Zephyr, * log a date validation error as a warning, but continue connection: */ if (retval == SL_ERROR_BSD_ESECDATEERROR) { LOG_WRN("Failed certificate date validation: %d", retval); retval = 0; } /* Warn users when root CA is not in the certificate catalog. * For enhanced security, users should update the catalog with the * certificates for sites the device is expected to connect to. Note * the connection is established successfully even when the root CA * is not part of the catalog. */ if (retval == SL_ERROR_BSD_ESECUNKNOWNROOTCA) { LOG_WRN("Unknown root CA used. For proper security, please " "use a root CA that is part of the certificate " "catalog in production systems."); retval = 0; } exit: if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } return retval; } #define ONE_THOUSAND 1000 static const struct socket_op_vtable simplelink_socket_fd_op_vtable; static int simplelink_poll(struct zsock_pollfd *fds, int nfds, int msecs) { int max_sd = 0; struct SlTimeval_t tv, *ptv; SlFdSet_t rfds; /* Set of read file descriptors */ SlFdSet_t wfds; /* Set of write file descriptors */ int i, retval, sd; void *obj; if (nfds > SL_FD_SETSIZE) { retval = slcb_SetErrno(EINVAL); goto exit; } /* Convert time to SlTimeval struct values: */ if (msecs == SYS_FOREVER_MS) { ptv = NULL; } else { tv.tv_sec = msecs / ONE_THOUSAND; tv.tv_usec = (msecs % ONE_THOUSAND) * ONE_THOUSAND; ptv = &tv; } /* Setup read and write fds for select, based on pollfd fields: */ SL_SOCKET_FD_ZERO(&rfds); SL_SOCKET_FD_ZERO(&wfds); for (i = 0; i < nfds; i++) { fds[i].revents = 0; if (fds[i].fd < 0) { continue; } else { obj = zvfs_get_fd_obj(fds[i].fd, (const struct fd_op_vtable *) &simplelink_socket_fd_op_vtable, ENOTSUP); if (obj != NULL) { /* Offloaded socket found. */ sd = OBJ_TO_SD(obj); } else { /* Non-offloaded socket, return an error. */ retval = slcb_SetErrno(EINVAL); goto exit; } } if (fds[i].events & ZSOCK_POLLIN) { SL_SOCKET_FD_SET(sd, &rfds); } if (fds[i].events & ZSOCK_POLLOUT) { SL_SOCKET_FD_SET(sd, &wfds); } if (sd > max_sd) { max_sd = sd; } } /* Wait for requested read and write fds to be ready: */ retval = sl_Select(max_sd + 1, &rfds, &wfds, NULL, ptv); if (retval > 0) { for (i = 0; i < nfds; i++) { if (fds[i].fd >= 0) { obj = zvfs_get_fd_obj( fds[i].fd, (const struct fd_op_vtable *) &simplelink_socket_fd_op_vtable, ENOTSUP); sd = OBJ_TO_SD(obj); if (SL_SOCKET_FD_ISSET(sd, &rfds)) { fds[i].revents |= ZSOCK_POLLIN; } if (SL_SOCKET_FD_ISSET(sd, &wfds)) { fds[i].revents |= ZSOCK_POLLOUT; } } } } if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } exit: return retval; } #ifdef CONFIG_NET_SOCKETS_SOCKOPT_TLS /* Iterate through the list of Zephyr's credential types, and * map to SimpleLink values, then set stored filenames * via SimpleLink's sl_SetSockOpt() */ static int map_credentials(int sd, const void *optval, socklen_t optlen) { sec_tag_t *sec_tags = (sec_tag_t *)optval; int retval = 0; int sec_tags_len; sec_tag_t tag; int opt; int i; struct tls_credential *cert; if ((optlen % sizeof(sec_tag_t)) != 0 || (optlen == 0)) { retval = EINVAL; goto exit; } else { sec_tags_len = optlen / sizeof(sec_tag_t); } /* For each tag, retrieve the credentials value and type: */ for (i = 0; i < sec_tags_len; i++) { tag = sec_tags[i]; cert = credential_next_get(tag, NULL); while (cert != NULL) { /* Map Zephyr cert types to Simplelink cert options: */ switch (cert->type) { case TLS_CREDENTIAL_CA_CERTIFICATE: opt = SL_SO_SECURE_FILES_CA_FILE_NAME; break; case TLS_CREDENTIAL_SERVER_CERTIFICATE: opt = SL_SO_SECURE_FILES_CERTIFICATE_FILE_NAME; break; case TLS_CREDENTIAL_PRIVATE_KEY: opt = SL_SO_SECURE_FILES_PRIVATE_KEY_FILE_NAME; break; case TLS_CREDENTIAL_NONE: case TLS_CREDENTIAL_PSK: case TLS_CREDENTIAL_PSK_ID: default: /* Not handled by SimpleLink: */ retval = EINVAL; goto exit; } retval = sl_SetSockOpt(sd, SL_SOL_SOCKET, opt, cert->buf, (SlSocklen_t)cert->len); if (retval < 0) { retval = getErrno(retval); break; } cert = credential_next_get(tag, cert); } } exit: return retval; } #else static int map_credentials(int sd, const void *optval, socklen_t optlen) { return 0; } #endif /* CONFIG_NET_SOCKETS_SOCKOPT_TLS */ /* Needed to keep line lengths < 80: */ #define _SEC_DOMAIN_VERIF SL_SO_SECURE_DOMAIN_NAME_VERIFICATION static int simplelink_setsockopt(void *obj, int level, int optname, const void *optval, socklen_t optlen) { int sd = OBJ_TO_SD(obj); int retval; if (IS_ENABLED(CONFIG_NET_SOCKETS_SOCKOPT_TLS) && level == SOL_TLS) { /* Handle Zephyr's SOL_TLS secure socket options: */ switch (optname) { case TLS_SEC_TAG_LIST: /* Bind credential filenames to this socket: */ retval = map_credentials(sd, optval, optlen); if (retval != 0) { retval = slcb_SetErrno(retval); goto exit; } break; case TLS_HOSTNAME: retval = sl_SetSockOpt(sd, SL_SOL_SOCKET, _SEC_DOMAIN_VERIF, (const char *)optval, optlen); break; case TLS_PEER_VERIFY: if (optval) { /* * Not currently supported. Verification * is automatically performed if a CA * certificate is set. We are returning * success here to allow * mqtt_client_tls_connect() * to proceed, given it requires * verification and it is indeed * performed when the cert is set. */ if (*(uint32_t *)optval != 2U) { retval = slcb_SetErrno(ENOTSUP); goto exit; } else { retval = 0; } } else { retval = slcb_SetErrno(EINVAL); goto exit; } break; case TLS_CIPHERSUITE_LIST: case TLS_DTLS_ROLE: /* Not yet supported: */ retval = slcb_SetErrno(ENOTSUP); goto exit; default: retval = slcb_SetErrno(EINVAL); goto exit; } } else { /* Can be SOL_SOCKET or TI specific: */ /* Note: this logic should match SimpleLink SDK's socket.c: */ switch (optname) { case TCP_NODELAY: if (optval) { /* if user wishes to have TCP_NODELAY = FALSE, * we return EINVAL and fail in the cases below. */ if (*(uint32_t *)optval) { retval = 0; goto exit; } } /* These sock opts aren't supported by the cc32xx * network stack, so we ignore them and set errno to * EINVAL in order to not break "off-the-shelf" BSD * code. */ case SO_BROADCAST: case SO_REUSEADDR: case SO_SNDBUF: retval = slcb_SetErrno(EINVAL); goto exit; default: break; } retval = sl_SetSockOpt(sd, SL_SOL_SOCKET, optname, optval, (SlSocklen_t)optlen); } if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } exit: return retval; } static int simplelink_getsockopt(void *obj, int level, int optname, void *optval, socklen_t *optlen) { int sd = OBJ_TO_SD(obj); int retval; if (IS_ENABLED(CONFIG_NET_SOCKETS_SOCKOPT_TLS) && level == SOL_TLS) { /* Handle Zephyr's SOL_TLS secure socket options: */ switch (optname) { case TLS_SEC_TAG_LIST: case TLS_CIPHERSUITE_LIST: case TLS_CIPHERSUITE_USED: /* Not yet supported: */ retval = slcb_SetErrno(ENOTSUP); goto exit; default: retval = slcb_SetErrno(EINVAL); goto exit; } } else { /* Can be SOL_SOCKET or TI specific: */ /* Note: this logic should match SimpleLink SDK's socket.c: */ switch (optname) { /* TCP_NODELAY always set by the NWP, so return True */ case TCP_NODELAY: if (optval) { (*(_u32 *)optval) = TRUE; retval = 0; goto exit; } /* These sock opts aren't supported by the cc32xx * network stack, so we silently ignore them and set * errno to EINVAL in order to not break "off-the-shelf" * BSD code. */ case SO_BROADCAST: case SO_REUSEADDR: case SO_SNDBUF: retval = slcb_SetErrno(EINVAL); goto exit; default: break; } retval = sl_GetSockOpt(sd, SL_SOL_SOCKET, optname, optval, (SlSocklen_t *)optlen); } if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } exit: return retval; } /* SimpleLink does not support flags in recv. * However, to enable more Zephyr apps to use this socket_offload, rather than * failing with ENOTSUP, we can closely emulate the MSG_DONTWAIT feature using * SimpleLink socket options. */ static int handle_recv_flags(int sd, int flags, bool set, int *nb_enabled) { ssize_t retval = 0; SlSocklen_t optlen = sizeof(SlSockNonblocking_t); SlSockNonblocking_t enableOption; if (flags & ZSOCK_MSG_PEEK) { retval = ENOTSUP; } else if (flags & ZSOCK_MSG_DONTWAIT) { if (set) { /* Get previous state, to restore later: */ sl_GetSockOpt(sd, SL_SOL_SOCKET, SL_SO_NONBLOCKING, (_u8 *)&enableOption, &optlen); *nb_enabled = enableOption.NonBlockingEnabled; /* Now, set to non_blocking if not already set: */ if (!*nb_enabled) { enableOption.NonBlockingEnabled = 1; sl_SetSockOpt(sd, SL_SOL_SOCKET, SL_SO_NONBLOCKING, (_u8 *)&enableOption, sizeof(enableOption)); } } else { /* Restore socket to previous state: */ enableOption.NonBlockingEnabled = *nb_enabled; sl_SetSockOpt(sd, SL_SOL_SOCKET, SL_SO_NONBLOCKING, (_u8 *)&enableOption, sizeof(enableOption)); } } return retval; } static ssize_t simplelink_recvfrom(void *obj, void *buf, size_t len, int flags, struct sockaddr *from, socklen_t *fromlen) { int sd = OBJ_TO_SD(obj); ssize_t retval; SlSockAddr_t *sl_addr; SlSockAddrIn_t sl_addr_in; SlSockAddrIn6_t sl_addr_in6; SlSocklen_t sl_addrlen; int nb_enabled; retval = handle_recv_flags(sd, flags, TRUE, &nb_enabled); if (!retval) { /* Translate to sl_RecvFrom() parameters: */ if (fromlen != NULL) { sl_addr = translate_z_to_sl_addrlen(*fromlen, &sl_addr_in, &sl_addr_in6, &sl_addrlen); retval = (ssize_t)sl_RecvFrom(sd, buf, len, 0, sl_addr, &sl_addrlen); } else { retval = (ssize_t)sl_Recv(sd, buf, len, 0); } handle_recv_flags(sd, flags, FALSE, &nb_enabled); if (retval >= 0) { if (fromlen != NULL) { /* * Translate sl_addr into *addr and set * *addrlen */ translate_sl_to_z_addr(sl_addr, sl_addrlen, from, fromlen); } } else { retval = slcb_SetErrno(getErrno(retval)); } } else { retval = slcb_SetErrno(retval); } return retval; } static ssize_t simplelink_sendto(void *obj, const void *buf, size_t len, int flags, const struct sockaddr *to, socklen_t tolen) { int sd = OBJ_TO_SD(obj); ssize_t retval; SlSockAddr_t *sl_addr; SlSockAddrIn_t sl_addr_in; SlSockAddrIn6_t sl_addr_in6; SlSocklen_t sl_addrlen; if (to != NULL) { /* Translate to sl_SendTo() parameters: */ sl_addr = translate_z_to_sl_addrs(to, tolen, &sl_addr_in, &sl_addr_in6, &sl_addrlen); if (sl_addr == NULL) { retval = SL_RET_CODE_INVALID_INPUT; goto exit; } retval = sl_SendTo(sd, buf, (uint16_t)len, flags, sl_addr, sl_addrlen); } else { retval = (ssize_t)sl_Send(sd, buf, len, flags); } exit: if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } return retval; } static ssize_t simplelink_sendmsg(void *obj, const struct msghdr *msg, int flags) { errno = -ENOTSUP; return -1; } /* Adds address info entry to a list */ static int set_addr_info(const struct SlNetUtil_addrInfo_t *sl_ai, struct zsock_addrinfo **res) { struct zsock_addrinfo *ai; struct sockaddr *ai_addr; int retval = 0; ai = calloc(1, sizeof(struct zsock_addrinfo)); if (!ai) { retval = DNS_EAI_MEMORY; goto exit; } else { /* Now, alloc the embedded sockaddr struct: */ ai_addr = calloc(1, sizeof(struct sockaddr)); if (!ai_addr) { retval = DNS_EAI_MEMORY; free(ai); goto exit; } } /* Now, fill in the fields of res (addrinfo struct): */ ai->ai_family = (sl_ai->ai_family == SL_AF_INET6 ? AF_INET6 : AF_INET); ai->ai_socktype = (sl_ai->ai_socktype == SLNETSOCK_SOCK_DGRAM ? SOCK_DGRAM : SOCK_STREAM); ai->ai_protocol = (sl_ai->ai_protocol == SLNETSOCK_PROTO_UDP ? IPPROTO_UDP : IPPROTO_TCP); /* Fill sockaddr struct fields based on family: */ if (ai->ai_family == AF_INET) { SlNetSock_AddrIn_t *sl_addr = (SlNetSock_AddrIn_t *)sl_ai->ai_addr; net_sin(ai_addr)->sin_family = ai->ai_family; net_sin(ai_addr)->sin_addr.s_addr = sl_addr->sin_addr.s_addr; net_sin(ai_addr)->sin_port = sl_addr->sin_port; ai->ai_addrlen = sizeof(struct sockaddr_in); } else { SlNetSock_AddrIn6_t *sl_addr = (SlNetSock_AddrIn6_t *)sl_ai->ai_addr; net_sin6(ai_addr)->sin6_family = ai->ai_family; net_sin6(ai_addr)->sin6_addr.s6_addr32[0] = sl_addr->sin6_addr._S6_un._S6_u32[0]; net_sin6(ai_addr)->sin6_addr.s6_addr32[1] = sl_addr->sin6_addr._S6_un._S6_u32[1]; net_sin6(ai_addr)->sin6_addr.s6_addr32[2] = sl_addr->sin6_addr._S6_un._S6_u32[2]; net_sin6(ai_addr)->sin6_addr.s6_addr32[3] = sl_addr->sin6_addr._S6_un._S6_u32[3]; net_sin6(ai_addr)->sin6_port = sl_addr->sin6_port; ai->ai_addrlen = sizeof(struct sockaddr_in6); } ai->ai_addr = ai_addr; ai->ai_next = *res; *res = ai; exit: return retval; } static int simplelink_getaddrinfo(const char *node, const char *service, const struct zsock_addrinfo *hints, struct zsock_addrinfo **res) { int32_t retval; struct SlNetUtil_addrInfo_t sl_hints; struct SlNetUtil_addrInfo_t *sl_res, *sl_ai; /* Initialize sl_hints to the defaults */ memset(&sl_hints, 0, sizeof(sl_hints)); /* Check args: */ if (!res) { retval = DNS_EAI_NONAME; goto exit; } if (hints) { /* * SlNetUtil only supports AI_NUMERICHOST and AI_PASSIVE, so * the rest are ignored. */ sl_hints.ai_flags |= ((hints->ai_flags & AI_PASSIVE) ? SLNETUTIL_AI_PASSIVE : 0); sl_hints.ai_flags |= ((hints->ai_flags & AI_NUMERICHOST) ? SLNETUTIL_AI_NUMERICHOST : 0); if (hints->ai_family == AF_UNSPEC) { sl_hints.ai_family = SLNETSOCK_AF_UNSPEC; } else { sl_hints.ai_family = (hints->ai_family == AF_INET6 ? SLNETSOCK_AF_INET6 : SLNETSOCK_AF_INET); } if (hints->ai_socktype == 0) { sl_hints.ai_socktype = 0; } else { sl_hints.ai_socktype = (hints->ai_socktype == SOCK_DGRAM ? SLNETSOCK_SOCK_DGRAM : SLNETSOCK_SOCK_STREAM); } if (hints->ai_protocol == 0) { sl_hints.ai_protocol = 0; } else { sl_hints.ai_protocol = (hints->ai_protocol == IPPROTO_UDP ? SLNETSOCK_PROTO_UDP : SLNETSOCK_PROTO_TCP); } } /* Now, try to resolve host name: */ retval = SlNetUtil_getAddrInfo(SLNETIF_ID_1, node, service, &sl_hints, &sl_res); if (retval < 0) { LOG_ERR("Could not resolve name: %s, retval: %d", node, retval); retval = DNS_EAI_NONAME; goto exit; } sl_ai = sl_res; *res = NULL; while (sl_ai != NULL) { retval = set_addr_info(sl_ai, res); if (retval < 0) { LOG_ERR("Unable to set address info, retval: %d", retval); goto exit; } sl_ai = sl_ai->ai_next; } SlNetUtil_freeAddrInfo(sl_res); exit: return retval; } static void simplelink_freeaddrinfo(struct zsock_addrinfo *res) { __ASSERT_NO_MSG(res); free(res->ai_addr); free(res); } static int simplelink_fcntl(int sd, int cmd, va_list args) { int retval = 0; SlSockNonblocking_t enableOption; SlSocklen_t optlen = sizeof(SlSockNonblocking_t); switch (cmd) { case F_GETFL: retval = sl_GetSockOpt(sd, SL_SOL_SOCKET, SL_SO_NONBLOCKING, (_u8 *)&enableOption, &optlen); if (retval == 0) { if (enableOption.NonBlockingEnabled) { retval |= O_NONBLOCK; } } break; case F_SETFL: if ((va_arg(args, int) & O_NONBLOCK) != 0) { enableOption.NonBlockingEnabled = 1; } else { enableOption.NonBlockingEnabled = 0; } retval = sl_SetSockOpt(sd, SL_SOL_SOCKET, SL_SO_NONBLOCKING, &enableOption, optlen); break; default: LOG_ERR("Invalid command: %d", cmd); retval = slcb_SetErrno(EINVAL); goto exit; } if (retval < 0) { retval = slcb_SetErrno(getErrno(retval)); } exit: return retval; } static int simplelink_ioctl(void *obj, unsigned int request, va_list args) { int sd = OBJ_TO_SD(obj); switch (request) { case ZFD_IOCTL_POLL_PREPARE: return -EXDEV; case ZFD_IOCTL_POLL_UPDATE: return -EOPNOTSUPP; case ZFD_IOCTL_POLL_OFFLOAD: { struct zsock_pollfd *fds; int nfds; int timeout; fds = va_arg(args, struct zsock_pollfd *); nfds = va_arg(args, int); timeout = va_arg(args, int); return simplelink_poll(fds, nfds, timeout); } /* Otherwise, just forward to offloaded fcntl() * In Zephyr, fcntl() is just an alias of ioctl(). */ default: return simplelink_fcntl(sd, request, args); } } static ssize_t simplelink_read(void *obj, void *buffer, size_t count) { return simplelink_recvfrom(obj, buffer, count, 0, NULL, 0); } static ssize_t simplelink_write(void *obj, const void *buffer, size_t count) { return simplelink_sendto(obj, buffer, count, 0, NULL, 0); } static const struct socket_op_vtable simplelink_socket_fd_op_vtable = { .fd_vtable = { .read = simplelink_read, .write = simplelink_write, .close = simplelink_close, .ioctl = simplelink_ioctl, }, .bind = simplelink_bind, .connect = simplelink_connect, .listen = simplelink_listen, .accept = simplelink_socket_accept, .sendto = simplelink_sendto, .sendmsg = simplelink_sendmsg, .recvfrom = simplelink_recvfrom, .getsockopt = simplelink_getsockopt, .setsockopt = simplelink_setsockopt, }; static bool simplelink_is_supported(int family, int type, int proto) { int dummy; int err; err = simplelink_socket_family_from_posix(family, &dummy); if (err) { return false; } err = simplelink_socket_type_from_posix(type, &dummy); if (err) { return false; } err = simplelink_socket_proto_from_zephyr(proto, &dummy); if (err) { return false; } return true; } int simplelink_socket_create(int family, int type, int proto) { int fd = zvfs_reserve_fd(); int sock; if (fd < 0) { return -1; } sock = simplelink_socket(family, type, proto); if (sock < 0) { zvfs_free_fd(fd); return -1; } zvfs_finalize_typed_fd(fd, SD_TO_OBJ(sock), (const struct fd_op_vtable *)&simplelink_socket_fd_op_vtable, ZVFS_MODE_IFSOCK); return fd; } static int simplelink_socket_accept(void *obj, struct sockaddr *addr, socklen_t *addrlen) { int fd = zvfs_reserve_fd(); int sock; if (fd < 0) { return -1; } sock = simplelink_accept(obj, addr, addrlen); if (sock < 0) { zvfs_free_fd(fd); return -1; } zvfs_finalize_typed_fd(fd, SD_TO_OBJ(sock), (const struct fd_op_vtable *)&simplelink_socket_fd_op_vtable, ZVFS_MODE_IFSOCK); return fd; } #ifdef CONFIG_NET_SOCKETS_OFFLOAD NET_SOCKET_OFFLOAD_REGISTER(simplelink, CONFIG_NET_SOCKETS_OFFLOAD_PRIORITY, AF_UNSPEC, simplelink_is_supported, simplelink_socket_create); #endif void simplelink_sockets_init(void) { } const struct socket_dns_offload simplelink_dns_ops = { .getaddrinfo = simplelink_getaddrinfo, .freeaddrinfo = simplelink_freeaddrinfo, }; ```
/content/code_sandbox/drivers/wifi/simplelink/simplelink_sockets.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,585
```objective-c /* * * */ #ifndef ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_SUPPORT_H_ #define ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_SUPPORT_H_ #include <zephyr/net/wifi_mgmt.h> #ifdef __cplusplus extern "C" { #endif #define SSID_LEN_MAX (32) #define BSSID_LEN_MAX (6) /* Define ID for simplelink_wifi_cb to not conflict with WLAN event IDs: */ #define SIMPLELINK_WIFI_CB_IPACQUIRED \ (SL_WLAN_EVENT_MAX + SL_NETAPP_EVENT_IPV4_ACQUIRED) #define SIMPLELINK_WIFI_CB_IPV6ACQUIRED \ (SL_WLAN_EVENT_MAX + SL_NETAPP_EVENT_IPV6_ACQUIRED) struct sl_connect_state { uint32_t gateway_ip; uint8_t ssid[SSID_LEN_MAX + 1]; uint8_t bssid[BSSID_LEN_MAX]; uint32_t ip_addr; uint32_t sta_ip; uint32_t ipv6_addr[4]; int16_t error; }; /* Callback from SimpleLink Event Handlers: */ typedef void (*simplelink_wifi_cb_t)(uint32_t mgmt_event, struct sl_connect_state *conn); extern int z_simplelink_start_scan(void); extern void z_simplelink_get_scan_result(int index, struct wifi_scan_result *scan_result); extern void z_simplelink_get_mac(unsigned char *mac); extern int z_simplelink_init(simplelink_wifi_cb_t wifi_cb); extern int z_simplelink_connect(struct wifi_connect_req_params *params); extern int z_simplelink_disconnect(void); int simplelink_socket_create(int family, int type, int proto); #ifdef __cplusplus } #endif /** * @} */ #endif /* ZEPHYR_DRIVERS_WIFI_SIMPLELINK_SIMPLELINK_SUPPORT_H_ */ ```
/content/code_sandbox/drivers/wifi/simplelink/simplelink_support.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
361
```c /* */ #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/sys/winstream.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #include <zephyr/devicetree.h> #include <zephyr/cache.h> #include <adsp_memory.h> #include <mem_window.h> struct k_spinlock trace_lock; static struct sys_winstream *winstream; void winstream_console_trace_out(int8_t *str, size_t len) { if (len == 0) { return; } #ifdef CONFIG_ADSP_TRACE_SIMCALL register int a2 __asm__("a2") = 4; /* SYS_write */ register int a3 __asm__("a3") = 1; /* fd 1 == stdout */ register int a4 __asm__("a4") = (int)str; register int a5 __asm__("a5") = len; __asm__ volatile("simcall" : "+r"(a2), "+r"(a3) : "r"(a4), "r"(a5) : "memory"); #endif k_spinlock_key_t key = k_spin_lock(&trace_lock); sys_winstream_write(winstream, str, len); k_spin_unlock(&trace_lock, key); } int arch_printk_char_out(int c) { int8_t s = c; winstream_console_trace_out(&s, 1); return 0; } static void winstream_console_hook_install(void) { #if defined(CONFIG_STDOUT_CONSOLE) __stdout_hook_install(arch_printk_char_out); #endif #if defined(CONFIG_PRINTK) __printk_hook_install(arch_printk_char_out); #endif } static int winstream_console_init(void) { const struct device *dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_console)); if (!device_is_ready(dev)) { return -ENODEV; } const struct mem_win_config *config = dev->config; void *buf = sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)config->mem_base); winstream = sys_winstream_init(buf, config->size); winstream_console_hook_install(); return 0; } SYS_INIT(winstream_console_init, PRE_KERNEL_1, CONFIG_CONSOLE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/console/winstream_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
495
```c /* * */ /** * @file * @brief UART-driven console * * * Serial console driver. * Hooks into the printk and fputc (for printf) modules. Poll driven. */ #include <zephyr/kernel.h> #include <stdio.h> #include <zephyr/types.h> #include <zephyr/sys/__assert.h> #include <errno.h> #include <ctype.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/uart.h> #include <zephyr/drivers/console/console.h> #include <zephyr/drivers/console/uart_console.h> #include <zephyr/toolchain.h> #include <zephyr/linker/sections.h> #include <zephyr/sys/atomic.h> #include <zephyr/sys/printk.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #include <zephyr/pm/device_runtime.h> #ifdef CONFIG_UART_CONSOLE_MCUMGR #include <zephyr/mgmt/mcumgr/transport/serial.h> #endif static const struct device *const uart_console_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_console)); #ifdef CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS static uart_console_in_debug_hook_t debug_hook_in; void uart_console_in_debug_hook_install(uart_console_in_debug_hook_t hook) { debug_hook_in = hook; } static UART_CONSOLE_OUT_DEBUG_HOOK_SIG(debug_hook_out_nop) { ARG_UNUSED(c); return !UART_CONSOLE_DEBUG_HOOK_HANDLED; } static uart_console_out_debug_hook_t *debug_hook_out = debug_hook_out_nop; void uart_console_out_debug_hook_install(uart_console_out_debug_hook_t *hook) { debug_hook_out = hook; } #define HANDLE_DEBUG_HOOK_OUT(c) \ (debug_hook_out(c) == UART_CONSOLE_DEBUG_HOOK_HANDLED) #endif /* CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS */ #if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE) /** * * @brief Output one character to UART * * Outputs both line feed and carriage return in the case of a '\n'. * * @param c Character to output * * @return The character passed as input. */ static int console_out(int c) { #ifdef CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS int handled_by_debug_server = HANDLE_DEBUG_HOOK_OUT(c); if (handled_by_debug_server) { return c; } #endif /* CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS */ if (pm_device_runtime_get(uart_console_dev) < 0) { /* Enabling the UART instance has failed but this * function MUST return the byte output. */ return c; } if ('\n' == c) { uart_poll_out(uart_console_dev, '\r'); } uart_poll_out(uart_console_dev, c); /* Use async put to avoid useless device suspension/resumption * when tranmiting chain of chars. * As errors cannot be returned, ignore the return value */ (void)pm_device_runtime_put_async(uart_console_dev, K_MSEC(1)); return c; } #endif #if defined(CONFIG_CONSOLE_HANDLER) static struct k_fifo *avail_queue; static struct k_fifo *lines_queue; static uint8_t (*completion_cb)(char *line, uint8_t len); /* Control characters */ #define BS 0x08 #define ESC 0x1b #define DEL 0x7f /* ANSI escape sequences */ #define ANSI_ESC '[' #define ANSI_UP 'A' #define ANSI_DOWN 'B' #define ANSI_FORWARD 'C' #define ANSI_BACKWARD 'D' #define ANSI_END 'F' #define ANSI_HOME 'H' #define ANSI_DEL '~' static int read_uart(const struct device *uart, uint8_t *buf, unsigned int size) { int rx; rx = uart_fifo_read(uart, buf, size); if (rx < 0) { /* Overrun issue. Stop the UART */ uart_irq_rx_disable(uart); return -EIO; } return rx; } static inline void cursor_forward(unsigned int count) { printk("\x1b[%uC", count); } static inline void cursor_backward(unsigned int count) { printk("\x1b[%uD", count); } static inline void cursor_save(void) { printk("\x1b[s"); } static inline void cursor_restore(void) { printk("\x1b[u"); } static void insert_char(char *pos, char c, uint8_t end) { char tmp; /* Echo back to console */ uart_poll_out(uart_console_dev, c); if (end == 0U) { *pos = c; return; } tmp = *pos; *(pos++) = c; cursor_save(); while (end-- > 0) { uart_poll_out(uart_console_dev, tmp); c = *pos; *(pos++) = tmp; tmp = c; } /* Move cursor back to right place */ cursor_restore(); } static void del_char(char *pos, uint8_t end) { uart_poll_out(uart_console_dev, '\b'); if (end == 0U) { uart_poll_out(uart_console_dev, ' '); uart_poll_out(uart_console_dev, '\b'); return; } cursor_save(); while (end-- > 0) { *pos = *(pos + 1); uart_poll_out(uart_console_dev, *(pos++)); } uart_poll_out(uart_console_dev, ' '); /* Move cursor back to right place */ cursor_restore(); } enum { ESC_ESC, ESC_ANSI, ESC_ANSI_FIRST, ESC_ANSI_VAL, ESC_ANSI_VAL_2, #ifdef CONFIG_UART_CONSOLE_MCUMGR ESC_MCUMGR_PKT_1, ESC_MCUMGR_PKT_2, ESC_MCUMGR_FRAG_1, ESC_MCUMGR_FRAG_2, #endif }; static atomic_t esc_state; static unsigned int ansi_val, ansi_val_2; static uint8_t cur, end; static void handle_ansi(uint8_t byte, char *line) { if (atomic_test_and_clear_bit(&esc_state, ESC_ANSI_FIRST)) { if (isdigit(byte) == 0) { ansi_val = 1U; goto ansi_cmd; } atomic_set_bit(&esc_state, ESC_ANSI_VAL); ansi_val = byte - '0'; ansi_val_2 = 0U; return; } if (atomic_test_bit(&esc_state, ESC_ANSI_VAL)) { if (isdigit(byte) != 0) { if (atomic_test_bit(&esc_state, ESC_ANSI_VAL_2)) { ansi_val_2 *= 10U; ansi_val_2 += byte - '0'; } else { ansi_val *= 10U; ansi_val += byte - '0'; } return; } /* Multi value sequence, e.g. Esc[Line;ColumnH */ if (byte == ';' && !atomic_test_and_set_bit(&esc_state, ESC_ANSI_VAL_2)) { return; } atomic_clear_bit(&esc_state, ESC_ANSI_VAL); atomic_clear_bit(&esc_state, ESC_ANSI_VAL_2); } ansi_cmd: switch (byte) { case ANSI_BACKWARD: if (ansi_val > cur) { break; } end += ansi_val; cur -= ansi_val; cursor_backward(ansi_val); break; case ANSI_FORWARD: if (ansi_val > end) { break; } end -= ansi_val; cur += ansi_val; cursor_forward(ansi_val); break; case ANSI_HOME: if (!cur) { break; } cursor_backward(cur); end += cur; cur = 0U; break; case ANSI_END: if (!end) { break; } cursor_forward(end); cur += end; end = 0U; break; case ANSI_DEL: if (!end) { break; } cursor_forward(1); del_char(&line[cur], --end); break; default: break; } atomic_clear_bit(&esc_state, ESC_ANSI); } #ifdef CONFIG_UART_CONSOLE_MCUMGR static void clear_mcumgr(void) { atomic_clear_bit(&esc_state, ESC_MCUMGR_PKT_1); atomic_clear_bit(&esc_state, ESC_MCUMGR_PKT_2); atomic_clear_bit(&esc_state, ESC_MCUMGR_FRAG_1); atomic_clear_bit(&esc_state, ESC_MCUMGR_FRAG_2); } /** * These states indicate whether an mcumgr frame is being received. */ #define CONSOLE_MCUMGR_STATE_NONE 1 #define CONSOLE_MCUMGR_STATE_HEADER 2 #define CONSOLE_MCUMGR_STATE_PAYLOAD 3 static int read_mcumgr_byte(uint8_t byte) { bool frag_1; bool frag_2; bool pkt_1; bool pkt_2; pkt_1 = atomic_test_bit(&esc_state, ESC_MCUMGR_PKT_1); pkt_2 = atomic_test_bit(&esc_state, ESC_MCUMGR_PKT_2); frag_1 = atomic_test_bit(&esc_state, ESC_MCUMGR_FRAG_1); frag_2 = atomic_test_bit(&esc_state, ESC_MCUMGR_FRAG_2); if (pkt_2 || frag_2) { /* Already fully framed. */ return CONSOLE_MCUMGR_STATE_PAYLOAD; } if (pkt_1) { if (byte == MCUMGR_SERIAL_HDR_PKT_2) { /* Final framing byte received. */ atomic_set_bit(&esc_state, ESC_MCUMGR_PKT_2); return CONSOLE_MCUMGR_STATE_PAYLOAD; } } else if (frag_1) { if (byte == MCUMGR_SERIAL_HDR_FRAG_2) { /* Final framing byte received. */ atomic_set_bit(&esc_state, ESC_MCUMGR_FRAG_2); return CONSOLE_MCUMGR_STATE_PAYLOAD; } } else { if (byte == MCUMGR_SERIAL_HDR_PKT_1) { /* First framing byte received. */ atomic_set_bit(&esc_state, ESC_MCUMGR_PKT_1); return CONSOLE_MCUMGR_STATE_HEADER; } else if (byte == MCUMGR_SERIAL_HDR_FRAG_1) { /* First framing byte received. */ atomic_set_bit(&esc_state, ESC_MCUMGR_FRAG_1); return CONSOLE_MCUMGR_STATE_HEADER; } } /* Non-mcumgr byte received. */ return CONSOLE_MCUMGR_STATE_NONE; } /** * @brief Attempts to process a received byte as part of an mcumgr frame. * * @param cmd The console command currently being received. * @param byte The byte just received. * * @return true if the command being received is an mcumgr frame; false if it * is a plain console command. */ static bool handle_mcumgr(struct console_input *cmd, uint8_t byte) { int mcumgr_state; mcumgr_state = read_mcumgr_byte(byte); if (mcumgr_state == CONSOLE_MCUMGR_STATE_NONE) { /* Not an mcumgr command; let the normal console handling * process the byte. */ cmd->is_mcumgr = 0; return false; } /* The received byte is part of an mcumgr command. Process the byte * and return true to indicate that normal console handling should * ignore it. */ if (cur + end < sizeof(cmd->line) - 1) { cmd->line[cur++] = byte; } if (mcumgr_state == CONSOLE_MCUMGR_STATE_PAYLOAD && byte == '\n') { cmd->line[cur + end] = '\0'; cmd->is_mcumgr = 1; k_fifo_put(lines_queue, cmd); clear_mcumgr(); cmd = NULL; cur = 0U; end = 0U; } return true; } #endif /* CONFIG_UART_CONSOLE_MCUMGR */ static void uart_console_isr(const struct device *unused, void *user_data) { ARG_UNUSED(unused); ARG_UNUSED(user_data); static uint8_t last_char = '\0'; while (uart_irq_update(uart_console_dev) > 0 && uart_irq_is_pending(uart_console_dev) > 0) { static struct console_input *cmd; uint8_t byte; int rx; rx = uart_irq_rx_ready(uart_console_dev); if (rx < 0) { return; } if (rx == 0) { continue; } /* Character(s) have been received */ rx = read_uart(uart_console_dev, &byte, 1); if (rx < 0) { return; } #ifdef CONFIG_UART_CONSOLE_DEBUG_SERVER_HOOKS if (debug_hook_in != NULL && debug_hook_in(byte) != 0) { /* * The input hook indicates that no further processing * should be done by this handler. */ continue; } #endif if (!cmd) { cmd = k_fifo_get(avail_queue, K_NO_WAIT); if (!cmd) { return; } } #ifdef CONFIG_UART_CONSOLE_MCUMGR /* Divert this byte from normal console handling if it is part * of an mcumgr frame. */ if (handle_mcumgr(cmd, byte)) { continue; } #endif /* CONFIG_UART_CONSOLE_MCUMGR */ /* Handle ANSI escape mode */ if (atomic_test_bit(&esc_state, ESC_ANSI)) { handle_ansi(byte, cmd->line); continue; } /* Handle escape mode */ if (atomic_test_and_clear_bit(&esc_state, ESC_ESC)) { if (byte == ANSI_ESC) { atomic_set_bit(&esc_state, ESC_ANSI); atomic_set_bit(&esc_state, ESC_ANSI_FIRST); } continue; } /* Handle special control characters */ if (isprint(byte) == 0) { switch (byte) { case BS: case DEL: if (cur > 0) { del_char(&cmd->line[--cur], end); } break; case ESC: atomic_set_bit(&esc_state, ESC_ESC); break; case '\n': if (last_char == '\r') { /* break to avoid double line*/ break; } case '\r': cmd->line[cur + end] = '\0'; uart_poll_out(uart_console_dev, '\r'); uart_poll_out(uart_console_dev, '\n'); cur = 0U; end = 0U; k_fifo_put(lines_queue, cmd); cmd = NULL; break; case '\t': if (completion_cb && !end) { cur += completion_cb(cmd->line, cur); } break; default: break; } /* Ignore characters if there's no more buffer space */ } else if (cur + end < sizeof(cmd->line) - 1) { insert_char(&cmd->line[cur++], byte, end); } last_char = byte; } } static void console_input_init(void) { uint8_t c; uart_irq_rx_disable(uart_console_dev); uart_irq_tx_disable(uart_console_dev); uart_irq_callback_set(uart_console_dev, uart_console_isr); /* Drain the fifo */ while (uart_irq_rx_ready(uart_console_dev) > 0) { uart_fifo_read(uart_console_dev, &c, 1); } uart_irq_rx_enable(uart_console_dev); } void uart_register_input(struct k_fifo *avail, struct k_fifo *lines, uint8_t (*completion)(char *str, uint8_t len)) { avail_queue = avail; lines_queue = lines; completion_cb = completion; console_input_init(); } #else void uart_register_input(struct k_fifo *avail, struct k_fifo *lines, uint8_t (*completion)(char *str, uint8_t len)) { ARG_UNUSED(avail); ARG_UNUSED(lines); ARG_UNUSED(completion); } #endif /** * @brief Install printk/stdout hook for UART console output */ static void uart_console_hook_install(void) { #if defined(CONFIG_STDOUT_CONSOLE) __stdout_hook_install(console_out); #endif #if defined(CONFIG_PRINTK) __printk_hook_install(console_out); #endif } /** * @brief Initialize one UART as the console/debug port * * @return 0 if successful, otherwise failed. */ static int uart_console_init(void) { if (!device_is_ready(uart_console_dev)) { return -ENODEV; } uart_console_hook_install(); return 0; } /* UART console initializes after the UART device itself */ SYS_INIT(uart_console_init, #if defined(CONFIG_EARLY_CONSOLE) PRE_KERNEL_1, #else POST_KERNEL, #endif CONFIG_CONSOLE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/console/uart_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,758
```c /* */ #include <xtensa/simcall.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE) /** * @brief Output one character to SIMULATOR console * @param c Character to output * @return The character passed as input. */ int arch_printk_char_out(int c) { char buf[16]; register int a2 __asm__ ("a2") = SYS_write; register int a3 __asm__ ("a3") = 1; register char *a4 __asm__ ("a4") = buf; register int a5 __asm__ ("a5") = 1; register int ret_val __asm__ ("a2"); register int ret_err __asm__ ("a3"); buf[0] = (char)c; __asm__ volatile ("simcall" : "=a" (ret_val), "=a" (ret_err) : "a" (a2), "a" (a3), "a" (a4), "a" (a5) : "memory"); return c; } #endif /** * @brief Install printk/stdout hook for Xtensa Simulator console output */ static void xt_sim_console_hook_install(void) { #if defined(CONFIG_STDOUT_CONSOLE) __stdout_hook_install(arch_printk_char_out); #endif #if defined(CONFIG_PRINTK) __printk_hook_install(arch_printk_char_out); #endif } /** * @brief Initialize the console/debug port * @return 0 if successful, otherwise failed. */ static int xt_sim_console_init(void) { xt_sim_console_hook_install(); return 0; } /* UART console initializes after the UART device itself */ SYS_INIT(xt_sim_console_init, #if defined(CONFIG_EARLY_CONSOLE) PRE_KERNEL_1, #else POST_KERNEL, #endif CONFIG_CONSOLE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/console/xtensa_sim_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
427
```c /* * * */ #include <stdlib.h> #include <string.h> #include "simplelink_log.h" LOG_MODULE_DECLARE(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <stdint.h> #include <ti/drivers/net/wifi/simplelink.h> #include <ti/net/slnetif.h> #include <ti/net/slnetutils.h> #include <ti/drivers/net/wifi/slnetifwifi.h> #include <CC3220SF_LAUNCHXL.h> #include "simplelink_support.h" #define SET_STATUS_BIT(status, bit) {status |= (1 << (bit)); } #define CLR_STATUS_BIT(status, bit) {status &= ~(1 << (bit)); } #define GET_STATUS_BIT(status, bit) (0 != (status & (1 << (bit)))) #define SL_STOP_TIMEOUT (200) #undef ASSERT_ON_ERROR #define ASSERT_ON_ERROR(ret, e) __ASSERT(ret >= 0, e) #define DEVICE_ERROR "See \"DEVICE ERRORS CODES\" in SimpleLink errors.h" #define WLAN_ERROR "See \"WLAN ERRORS CODES\" in SimpleLink errors.h" #define NETAPP_ERROR "See \"NETAPP ERRORS CODES\" in SimpleLink errors.h" #define CHANNEL_MASK_ALL (0x1FFF) #define RSSI_TH_MAX (-95) #define SLNET_IF_WIFI_PRIO (5) #define SLNET_IF_WIFI_NAME "CC32xx" enum status_bits { /* Network Processor is powered up */ STATUS_BIT_NWP_INIT = 0, /* The device is connected to the AP */ STATUS_BIT_CONNECTION, /* The device has leased IP to any connected client */ STATUS_BIT_IP_LEASED, /* The device has acquired an IP */ STATUS_BIT_IP_ACQUIRED, /* The device has acquired an IPv6 address */ STATUS_BIT_IPV6_ACQUIRED, }; struct nwp_status { /* Callback to notify net & wifi mgmt events from SL Event Handlers */ simplelink_wifi_cb_t cb; /* Status Variables */ uint32_t status; /* The state of the NWP */ uint32_t role; /* The device's role (STA, P2P or AP) */ /* Scan results table: */ SlWlanNetworkEntry_t net_entries[CONFIG_WIFI_SIMPLELINK_SCAN_COUNT]; }; /* STA/AP mode state: shared with simplelink.c */ struct sl_connect_state sl_conn; /* Network Coprocessor state, including role and connection state: */ static struct nwp_status nwp; /* Minimal configuration of SlNetIfWifi for Zephyr */ static SlNetIf_Config_t slnetifwifi_config_zephyr = { .sockCreate = SlNetIfWifi_socket, .sockClose = SlNetIfWifi_close, .sockSelect = SlNetIfWifi_select, .sockSetOpt = SlNetIfWifi_setSockOpt, .sockGetOpt = SlNetIfWifi_getSockOpt, .sockRecvFrom = SlNetIfWifi_recvFrom, .sockSendTo = SlNetIfWifi_sendTo, .utilGetHostByName = SlNetIfWifi_getHostByName, .ifGetIPAddr = SlNetIfWifi_getIPAddr, .ifGetConnectionStatus = SlNetIfWifi_getConnectionStatus }; /* Configure the device to a default state, resetting previous parameters .*/ static int32_t configure_simplelink(void) { int32_t retval = -1; int32_t mode = -1; uint32_t if_bitmap = 0U; SlWlanScanParamCommand_t scan_default = { 0 }; SlWlanRxFilterOperationCommandBuff_t rx_filterid_mask = { { 0 } }; uint8_t config_opt; uint8_t power; #if defined(CONFIG_NET_IPV4) && defined(CONFIG_NET_CONFIG_MY_IPV4_ADDR) struct in_addr addr4; SlNetCfgIpV4Args_t ipV4; memset(&ipV4, 0, sizeof(ipV4)); #endif /* Turn on NWP */ mode = sl_Start(0, 0, 0); ASSERT_ON_ERROR(mode, DEVICE_ERROR); if (mode != ROLE_STA) { /* Set NWP role as STA */ mode = sl_WlanSetMode(ROLE_STA); ASSERT_ON_ERROR(mode, WLAN_ERROR); /* For changes to take affect, we restart the NWP */ retval = sl_Stop(SL_STOP_TIMEOUT); ASSERT_ON_ERROR(retval, DEVICE_ERROR); mode = sl_Start(0, 0, 0); ASSERT_ON_ERROR(mode, DEVICE_ERROR); } if (mode != ROLE_STA) { LOG_ERR("Failed to configure NWP to default state"); return -1; } /* Use Fast Connect Policy, to automatically connect to last AP: */ retval = sl_WlanPolicySet(SL_WLAN_POLICY_CONNECTION, SL_WLAN_CONNECTION_POLICY(1, 1, 0, 0), NULL, 0); ASSERT_ON_ERROR(retval, WLAN_ERROR); /* Disable Auto Provisioning*/ retval = sl_WlanProvisioning(SL_WLAN_PROVISIONING_CMD_STOP, 0xFF, 0, NULL, 0x0); ASSERT_ON_ERROR(retval, WLAN_ERROR); /* Delete existing profiles */ retval = sl_WlanProfileDel(0xFF); ASSERT_ON_ERROR(retval, WLAN_ERROR); #if defined(CONFIG_NET_IPV4) && defined(CONFIG_NET_CONFIG_MY_IPV4_ADDR) if (net_addr_pton(AF_INET, CONFIG_NET_CONFIG_MY_IPV4_ADDR, &addr4) < 0) { LOG_ERR("Invalid CONFIG_NET_CONFIG_MY_IPV4_ADDR"); return -1; } ipV4.Ip = (_u32)SL_IPV4_VAL(addr4.s4_addr[0], addr4.s4_addr[1], addr4.s4_addr[2], addr4.s4_addr[3]); #if defined(CONFIG_NET_CONFIG_MY_IPV4_GW) if (strcmp(CONFIG_NET_CONFIG_MY_IPV4_GW, "") != 0) { if (net_addr_pton(AF_INET, CONFIG_NET_CONFIG_MY_IPV4_GW, &addr4) < 0) { LOG_ERR("Invalid CONFIG_NET_CONFIG_MY_IPV4_GW"); return -1; } ipV4.IpGateway = (_u32)SL_IPV4_VAL(addr4.s4_addr[0], addr4.s4_addr[1], addr4.s4_addr[2], addr4.s4_addr[3]); } #endif #if defined(CONFIG_NET_CONFIG_MY_IPV4_NETMASK) if (strcmp(CONFIG_NET_CONFIG_MY_IPV4_NETMASK, "") != 0) { if (net_addr_pton(AF_INET, CONFIG_NET_CONFIG_MY_IPV4_NETMASK, &addr4) < 0) { LOG_ERR("Invalid CONFIG_NET_CONFIG_MY_IPV4_NETMASK"); return -1; } ipV4.IpMask = (_u32)SL_IPV4_VAL(addr4.s4_addr[0], addr4.s4_addr[1], addr4.s4_addr[2], addr4.s4_addr[3]); } #endif retval = sl_NetCfgSet(SL_NETCFG_IPV4_STA_ADDR_MODE, SL_NETCFG_ADDR_STATIC, sizeof(SlNetCfgIpV4Args_t), (_u8 *)&ipV4); ASSERT_ON_ERROR(retval, NETAPP_ERROR); #else /* enable DHCP client */ retval = sl_NetCfgSet(SL_NETCFG_IPV4_STA_ADDR_MODE, SL_NETCFG_ADDR_DHCP, 0, 0); ASSERT_ON_ERROR(retval, NETAPP_ERROR); #endif #if defined(CONFIG_NET_IPV6) if_bitmap = ~0; #else /* Disable ipv6 */ if_bitmap = !(SL_NETCFG_IF_IPV6_STA_LOCAL | SL_NETCFG_IF_IPV6_STA_GLOBAL); #endif retval = sl_NetCfgSet(SL_NETCFG_IF, SL_NETCFG_IF_STATE, sizeof(if_bitmap), (const unsigned char *)&if_bitmap); ASSERT_ON_ERROR(retval, NETAPP_ERROR); /* Configure scan parameters to default */ scan_default.ChannelsMask = CHANNEL_MASK_ALL; scan_default.RssiThreshold = RSSI_TH_MAX; retval = sl_WlanSet(SL_WLAN_CFG_GENERAL_PARAM_ID, SL_WLAN_GENERAL_PARAM_OPT_SCAN_PARAMS, sizeof(scan_default), (uint8_t *)&scan_default); ASSERT_ON_ERROR(retval, WLAN_ERROR); /* Disable scans: In other words, use "one-shot" scanning */ config_opt = SL_WLAN_SCAN_POLICY(0, 0); retval = sl_WlanPolicySet(SL_WLAN_POLICY_SCAN, config_opt, NULL, 0); ASSERT_ON_ERROR(retval, WLAN_ERROR); /* Set TX power lvl to max */ power = 0U; retval = sl_WlanSet(SL_WLAN_CFG_GENERAL_PARAM_ID, SL_WLAN_GENERAL_PARAM_OPT_STA_TX_POWER, 1, (uint8_t *)&power); ASSERT_ON_ERROR(retval, WLAN_ERROR); /* Set NWP Power policy to 'normal' */ retval = sl_WlanPolicySet(SL_WLAN_POLICY_PM, SL_WLAN_NORMAL_POLICY, NULL, 0); ASSERT_ON_ERROR(retval, WLAN_ERROR); /* Unregister mDNS services */ retval = sl_NetAppMDNSUnRegisterService(0, 0, 0); ASSERT_ON_ERROR(retval, NETAPP_ERROR); /* Remove all 64 RX filters (8*8) */ (void)memset(rx_filterid_mask.FilterBitmap, 0xFF, 8); retval = sl_WlanSet(SL_WLAN_RX_FILTERS_ID, SL_WLAN_RX_FILTER_REMOVE, sizeof(SlWlanRxFilterOperationCommandBuff_t), (uint8_t *)&rx_filterid_mask); ASSERT_ON_ERROR(retval, WLAN_ERROR); /* Set NWP role as STA */ retval = sl_WlanSetMode(ROLE_STA); ASSERT_ON_ERROR(retval, WLAN_ERROR); /* For changes to take affect, we restart the NWP */ retval = sl_Stop(0xFF); ASSERT_ON_ERROR(retval, DEVICE_ERROR); mode = sl_Start(0, 0, 0); ASSERT_ON_ERROR(mode, DEVICE_ERROR); if (mode != ROLE_STA) { LOG_ERR("Failed to configure device to it's default state"); retval = -1; } else { nwp.role = ROLE_STA; SET_STATUS_BIT(nwp.status, STATUS_BIT_NWP_INIT); retval = 0; } return retval; } /** * @brief SimpleLinkWlanEventHandler * * This handler gets called whenever a WLAN event is reported * by the host driver / NWP. * * @note See the CC3120/CC3220 NWP programmer's guide (SWRU455) * sections 4.3.4, 4.4.5 and 4.5.5. */ void SimpleLinkWlanEventHandler(SlWlanEvent_t *wlan_event) { SlWlanEventDisconnect_t *event_data = NULL; if (!wlan_event) { return; } switch (wlan_event->Id) { case SL_WLAN_EVENT_CONNECT: SET_STATUS_BIT(nwp.status, STATUS_BIT_CONNECTION); /* Store new connection SSID and BSSID: */ memcpy(sl_conn.ssid, wlan_event->Data.Connect.SsidName, wlan_event->Data.Connect.SsidLen); memcpy(sl_conn.bssid, wlan_event->Data.Connect.Bssid, BSSID_LEN_MAX); LOG_INF("[WLAN EVENT] STA Connected to the AP: %s, " "BSSID: %x:%x:%x:%x:%x:%x", sl_conn.ssid, sl_conn.bssid[0], sl_conn.bssid[1], sl_conn.bssid[2], sl_conn.bssid[3], sl_conn.bssid[4], sl_conn.bssid[5]); /* Continue the notification callback chain... */ sl_conn.error = 0; nwp.cb(SL_WLAN_EVENT_CONNECT, &sl_conn); break; case SL_WLAN_EVENT_DISCONNECT: CLR_STATUS_BIT(nwp.status, STATUS_BIT_CONNECTION); CLR_STATUS_BIT(nwp.status, STATUS_BIT_IP_ACQUIRED); CLR_STATUS_BIT(nwp.status, STATUS_BIT_IPV6_ACQUIRED); event_data = &wlan_event->Data.Disconnect; /* If the user has initiated 'Disconnect' request, * 'reason_code' is SL_WLAN_DISCONNECT_USER_INITIATED */ if (SL_WLAN_DISCONNECT_USER_INITIATED == event_data->ReasonCode) { LOG_INF("[WLAN EVENT] " "Device disconnected from the AP: %s", event_data->SsidName); LOG_INF("BSSID: %x:%x:%x:%x:%x:%x on application's" " request", event_data->Bssid[0], event_data->Bssid[1], event_data->Bssid[2], event_data->Bssid[3], event_data->Bssid[4], event_data->Bssid[5]); sl_conn.error = 0; } else { LOG_ERR("[WLAN ERROR] " "Device disconnected from the AP: %s", event_data->SsidName); LOG_ERR("BSSID: %x:%x:%x:%x:%x:%x on error: %d", event_data->Bssid[0], event_data->Bssid[1], event_data->Bssid[2], event_data->Bssid[3], event_data->Bssid[4], event_data->Bssid[5], event_data->ReasonCode); sl_conn.error = event_data->ReasonCode; } (void)memset(&(sl_conn.ssid), 0x0, sizeof(sl_conn.ssid)); (void)memset(&(sl_conn.bssid), 0x0, sizeof(sl_conn.bssid)); /* Continue the notification callback chain... */ nwp.cb(SL_WLAN_EVENT_DISCONNECT, &sl_conn); break; case SL_WLAN_EVENT_STA_ADDED: memcpy(&(sl_conn.bssid), wlan_event->Data.STAAdded.Mac, SL_WLAN_BSSID_LENGTH); LOG_INF("[WLAN EVENT] STA was added to AP: " "BSSID: %x:%x:%x:%x:%x:%x", sl_conn.bssid[0], sl_conn.bssid[1], sl_conn.bssid[2], sl_conn.bssid[3], sl_conn.bssid[4], sl_conn.bssid[5]); break; case SL_WLAN_EVENT_STA_REMOVED: memcpy(&(sl_conn.bssid), wlan_event->Data.STAAdded.Mac, SL_WLAN_BSSID_LENGTH); LOG_INF("[WLAN EVENT] STA was removed from AP: " "BSSID: %x:%x:%x:%x:%x:%x", sl_conn.bssid[0], sl_conn.bssid[1], sl_conn.bssid[2], sl_conn.bssid[3], sl_conn.bssid[4], sl_conn.bssid[5]); (void)memset(&(sl_conn.bssid), 0x0, sizeof(sl_conn.bssid)); break; default: LOG_ERR("[WLAN EVENT] Unexpected event [0x%lx]", wlan_event->Id); break; } } /** * @brief SimpleLinkNetAppEventHandler * * This handler gets called whenever a Netapp event is reported * by the host driver / NWP. * * @note See the CC3120/CC3220 NWP programmer's guide (SWRU455) * section 5.7. */ void SimpleLinkNetAppEventHandler(SlNetAppEvent_t *netapp_event) { SlIpV4AcquiredAsync_t *event_data = NULL; uint32_t i; if (!netapp_event) { return; } switch (netapp_event->Id) { case SL_NETAPP_EVENT_IPV4_ACQUIRED: SET_STATUS_BIT(nwp.status, STATUS_BIT_IP_ACQUIRED); /* Ip Acquired Event Data */ event_data = &netapp_event->Data.IpAcquiredV4; sl_conn.ip_addr = event_data->Ip; /* Gateway IP address */ sl_conn.gateway_ip = event_data->Gateway; LOG_INF("[NETAPP EVENT] IP set to: IPv4=%d.%d.%d.%d, " "Gateway=%d.%d.%d.%d", SL_IPV4_BYTE(sl_conn.ip_addr, 3), SL_IPV4_BYTE(sl_conn.ip_addr, 2), SL_IPV4_BYTE(sl_conn.ip_addr, 1), SL_IPV4_BYTE(sl_conn.ip_addr, 0), SL_IPV4_BYTE(sl_conn.gateway_ip, 3), SL_IPV4_BYTE(sl_conn.gateway_ip, 2), SL_IPV4_BYTE(sl_conn.gateway_ip, 1), SL_IPV4_BYTE(sl_conn.gateway_ip, 0)); nwp.cb(SIMPLELINK_WIFI_CB_IPACQUIRED, &sl_conn); break; case SL_NETAPP_EVENT_IPV6_ACQUIRED: SET_STATUS_BIT(nwp.status, STATUS_BIT_IPV6_ACQUIRED); for (i = 0U; i < 4; i++) { sl_conn.ipv6_addr[i] = netapp_event->Data.IpAcquiredV6.Ip[i]; } if (LOG_LEVEL >= LOG_LEVEL_INF) { LOG_INF("[NETAPP EVENT] IP Acquired: " "IPv6=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", ((sl_conn.ipv6_addr[0] >> 16) & 0xffff), sl_conn.ipv6_addr[0] & 0xffff, ((sl_conn.ipv6_addr[1] >> 16) & 0xffff), sl_conn.ipv6_addr[1] & 0xffff, ((sl_conn.ipv6_addr[2] >> 16) & 0xffff), sl_conn.ipv6_addr[2] & 0xffff, ((sl_conn.ipv6_addr[3] >> 16) & 0xffff), sl_conn.ipv6_addr[3] & 0xffff); } nwp.cb(SIMPLELINK_WIFI_CB_IPV6ACQUIRED, &sl_conn); break; case SL_DEVICE_EVENT_DROPPED_NETAPP_IP_LEASED: SET_STATUS_BIT(nwp.status, STATUS_BIT_IP_LEASED); SET_STATUS_BIT(nwp.status, STATUS_BIT_IP_ACQUIRED); sl_conn.sta_ip = netapp_event->Data.IpLeased.IpAddress; LOG_INF("[NETAPP EVENT] IP Leased to Client: " "IP=%d.%d.%d.%d", SL_IPV4_BYTE(sl_conn.sta_ip, 3), SL_IPV4_BYTE(sl_conn.sta_ip, 2), SL_IPV4_BYTE(sl_conn.sta_ip, 1), SL_IPV4_BYTE(sl_conn.sta_ip, 0)); break; case SL_DEVICE_EVENT_DROPPED_NETAPP_IP_RELEASED: LOG_INF("[NETAPP EVENT] IP is released."); break; default: LOG_ERR("[NETAPP EVENT] Unexpected event [0x%lx]", netapp_event->Id); break; } if ((netapp_event->Id == SL_NETAPP_EVENT_IPV4_ACQUIRED) || (netapp_event->Id == SL_NETAPP_EVENT_IPV6_ACQUIRED)) { /* Initialize SlNetSock layer for getaddrinfo */ SlNetIf_init(0); /* * We are only using SlNetSock to support getaddrinfo() * for the WiFi interface, so hardcoding the interface * id to 1 here. */ SlNetIf_add(SLNETIF_ID_1, SLNET_IF_WIFI_NAME, (const SlNetIf_Config_t *)&slnetifwifi_config_zephyr, SLNET_IF_WIFI_PRIO); SlNetSock_init(0); SlNetUtil_init(0); } } /** * @brief SimpleLinkGeneralEventHandler * * This handler gets called whenever a general error is reported * by the NWP / Host driver. Since these errors are not fatal, * the application can handle them. * * @note See the CC3120/CC3220 NWP programmer's guide (SWRU455) * section 17.9. */ void SimpleLinkGeneralEventHandler(SlDeviceEvent_t *dev_event) { if (!dev_event) { return; } LOG_INF("[GENERAL EVENT] - ID=[%d] Sender=[%d]", dev_event->Data.Error.Code, dev_event->Data.Error.Source); } /** * @brief SimpleLinkFatalErrorEventHandler * * This handler gets called whenever a driver error occurs requiring * restart of the device in order to recover. */ void SimpleLinkFatalErrorEventHandler(SlDeviceFatal_t *fatal_err_event) { switch (fatal_err_event->Id) { case SL_DEVICE_EVENT_FATAL_DEVICE_ABORT: LOG_ERR("[ERROR] - FATAL ERROR: " "Abort NWP event detected: " "AbortType=%ld, AbortData=0x%lx", fatal_err_event->Data.DeviceAssert.Code, fatal_err_event->Data.DeviceAssert.Value); break; case SL_DEVICE_EVENT_FATAL_DRIVER_ABORT: LOG_ERR("[ERROR] - FATAL ERROR: Driver Abort detected."); break; case SL_DEVICE_EVENT_FATAL_NO_CMD_ACK: LOG_ERR("[ERROR] - FATAL ERROR: No Cmd Ack detected " "[cmd opcode = 0x%lx]", fatal_err_event->Data.NoCmdAck.Code); break; case SL_DEVICE_EVENT_FATAL_SYNC_LOSS: LOG_ERR("[ERROR] - FATAL ERROR: Sync loss detected"); break; case SL_DEVICE_EVENT_FATAL_CMD_TIMEOUT: LOG_ERR("[ERROR] - FATAL ERROR: " "Async event timeout detected " "[event opcode =0x%lx]", fatal_err_event->Data.CmdTimeout.Code); break; default: LOG_ERR("[ERROR] - FATAL ERROR: " "Unspecified error detected"); break; } } /* Unused, but must be defined to link. */ void SimpleLinkSockEventHandler(SlSockEvent_t *psock) { ARG_UNUSED(psock); } /* Unused, but must be defined to link. */ void SimpleLinkHttpServerEventHandler(SlNetAppHttpServerEvent_t *http_event, SlNetAppHttpServerResponse_t *http_resp) { ARG_UNUSED(http_event); ARG_UNUSED(http_resp); } /* Unused, but must be defined to link. */ void SimpleLinkNetAppRequestEventHandler(SlNetAppRequest_t *netapp_request, SlNetAppResponse_t *netapp_response) { ARG_UNUSED(netapp_request); ARG_UNUSED(netapp_response); } /* Unused, but must be defined to link. */ void SimpleLinkNetAppRequestMemFreeEventHandler(uint8_t *buffer) { ARG_UNUSED(buffer); } /* Note: SimpleLink WiFi scan also can return the following: * - BSSID * - Whether network hidden or visible * - Other types of security */ void z_simplelink_get_scan_result(int index, struct wifi_scan_result *scan_result) { SlWlanNetworkEntry_t *net_entry; int sec_bmp; __ASSERT_NO_MSG(index <= CONFIG_WIFI_SIMPLELINK_SCAN_COUNT); net_entry = &nwp.net_entries[index]; (void)memset(scan_result, 0x0, sizeof(struct wifi_scan_result)); __ASSERT_NO_MSG(net_entry->SsidLen <= WIFI_SSID_MAX_LEN); memcpy(scan_result->ssid, net_entry->Ssid, net_entry->SsidLen); scan_result->ssid_length = net_entry->SsidLen; scan_result->channel = net_entry->Channel; /* Parse security bitmap: */ sec_bmp = net_entry->SecurityInfo; if (SL_WLAN_SCAN_RESULT_SEC_TYPE_BITMAP(sec_bmp) & 0x6) { scan_result->security = WIFI_SECURITY_TYPE_PSK; } else { scan_result->security = WIFI_SECURITY_TYPE_NONE; } scan_result->rssi = net_entry->Rssi; } int z_simplelink_start_scan(void) { int32_t ret; /* Clear the results buffer */ (void)memset(&nwp.net_entries, 0x0, sizeof(nwp.net_entries)); /* Attempt to get scan results from NWP * Note: If scan policy isn't set, invoking 'sl_WlanGetNetworkList()' * for the first time triggers 'one shot' scan. */ ret = sl_WlanGetNetworkList(0, CONFIG_WIFI_SIMPLELINK_SCAN_COUNT, &nwp.net_entries[0]); LOG_DBG("sl_WlanGetNetworkList: %d", ret); return ret; } void z_simplelink_get_mac(unsigned char *mac) { uint16_t mac_len = SL_MAC_ADDR_LEN; uint16_t config_opt = 0U; sl_NetCfgGet(SL_NETCFG_MAC_ADDRESS_GET, &config_opt, &mac_len, (uint8_t *)mac); } int z_simplelink_connect(struct wifi_connect_req_params *params) { SlWlanSecParams_t secParams = { 0 }; long lretval; if (params->security == WIFI_SECURITY_TYPE_PSK) { secParams.Key = (signed char *)params->psk; secParams.KeyLen = params->psk_length; /* This is only mapping handled for now: */ secParams.Type = SL_WLAN_SEC_TYPE_WPA_WPA2; } else { secParams.Key = (signed char *)NULL; secParams.KeyLen = 0; secParams.Type = SL_WLAN_SEC_TYPE_OPEN; } lretval = sl_WlanConnect((signed char *)params->ssid, params->ssid_length, 0, &secParams, 0); LOG_DBG("sl_WlanConnect: %ld", lretval); return lretval; } int z_simplelink_disconnect(void) { long lretval; lretval = sl_WlanDisconnect(); LOG_DBG("sl_WlanDisconnect: %ld", lretval); return lretval; } int z_simplelink_init(simplelink_wifi_cb_t wifi_cb) { int retval; __ASSERT(wifi_cb, "callback must be supplied"); /* Init the board: */ CC3220SF_LAUNCHXL_init(); /* Configure SimpleLink NWP: */ nwp.status = 0U; nwp.role = ROLE_RESERVED; nwp.cb = wifi_cb; (void)memset(&sl_conn, 0x0, sizeof(sl_conn)); retval = configure_simplelink(); __ASSERT(retval >= 0, "Unable to configure SimpleLink"); return retval; } ```
/content/code_sandbox/drivers/wifi/simplelink/simplelink_support.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,752
```c /* * */ #include <zephyr/init.h> #include <zephyr/arch/posix/posix_trace.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #define _STDOUT_BUF_SIZE 256 static char stdout_buff[_STDOUT_BUF_SIZE]; static int n_pend; /* Number of pending characters in buffer */ static int print_char(int c) { int printnow = 0; if ((c != '\n') && (c != '\r')) { stdout_buff[n_pend++] = c; stdout_buff[n_pend] = 0; } else { printnow = 1; } if (n_pend >= _STDOUT_BUF_SIZE - 1) { printnow = 1; } if (printnow) { posix_print_trace("%s\n", stdout_buff); n_pend = 0; stdout_buff[0] = 0; } return c; } /** * Ensure that whatever was written thru printk is displayed now */ void posix_flush_stdout(void) { if (n_pend) { stdout_buff[n_pend] = 0; posix_print_trace("%s", stdout_buff); n_pend = 0; stdout_buff[0] = 0; } } static int posix_arch_console_init(void) { #ifdef CONFIG_PRINTK __printk_hook_install(print_char); #endif #ifdef CONFIG_STDOUT_CONSOLE __stdout_hook_install(print_char); #endif return 0; } SYS_INIT(posix_arch_console_init, PRE_KERNEL_1, CONFIG_POSIX_ARCH_CONSOLE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/console/posix_arch_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
352
```c /* */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/arch/common/semihost.h> #include <zephyr/sys/libc-hooks.h> int arch_printk_char_out(int _c) { semihost_poll_out((char)_c); return 0; } static int semihost_console_init(void) { /* * The printk output callback is arch_printk_char_out by default and * is installed at link time. That makes printk() usable very early. * * We still need to install the stdout callback manually at run time. */ __stdout_hook_install(arch_printk_char_out); return 0; } SYS_INIT(semihost_console_init, PRE_KERNEL_1, CONFIG_CONSOLE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/console/semihost_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
173
```c /* */ #include <zephyr/kernel.h> #include <zephyr/sys/printk.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #include <zephyr/device.h> #include <zephyr/init.h> #if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE) /** * @brief Output one character to SIMULATOR console * @param c Character to output * @return The character passed as input. */ static int console_out(int c) { register unsigned long x0 __asm__("x0") = 8; register unsigned long x1 __asm__("x1") = c; __asm__ volatile ("hvc #0x4a48\r\n" : "+r" (x0), "+r" (x1) : : ); return c; } #endif /** * @brief Initialize the console/debug port * @return 0 if successful, otherwise failed. */ static int jailhouse_console_init(void) { #if defined(CONFIG_STDOUT_CONSOLE) __stdout_hook_install(console_out); #endif #if defined(CONFIG_PRINTK) __printk_hook_install(console_out); #endif return 0; } SYS_INIT(jailhouse_console_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); ```
/content/code_sandbox/drivers/console/jailhouse_debug_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
272
```c /* * */ /** * @file * @brief EFI console driver * * @details EFI console driver. * Hooks into the printk and fputc (for printf) modules. */ #include <stdio.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/sys/printk.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> extern int efi_console_putchar(int c); #if defined(CONFIG_PRINTK) || defined(CONFIG_STDOUT_CONSOLE) /** * * @brief Output one character to EFI console * * Outputs both line feed and carriage return in the case of a '\n'. * * @param c Character to output * * @return The character passed as input. */ static int console_out(int c) { return efi_console_putchar(c); } #endif /** * @brief Install printk/stdout hook for EFI console output */ static void efi_console_hook_install(void) { #if defined(CONFIG_STDOUT_CONSOLE) __stdout_hook_install(console_out); #endif #if defined(CONFIG_PRINTK) __printk_hook_install(console_out); #endif } /** * @brief Initialize one EFI as the console port * * @return 0 if successful, otherwise failed. */ static int efi_console_init(void) { efi_console_hook_install(); return 0; } /* EFI console initializes */ SYS_INIT(efi_console_init, PRE_KERNEL_1, 0); ```
/content/code_sandbox/drivers/console/efi_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
309
```c /* ram_console.c - Console messages to a RAM buffer */ /* * */ #include <zephyr/kernel.h> #include <zephyr/sys/printk.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/linker/devicetree_regions.h> #ifdef CONFIG_RAM_CONSOLE_BUFFER_SECTION #if !DT_HAS_CHOSEN(zephyr_ram_console) #error "Lack of chosen property zephyr,ram_console!" #elif (CONFIG_RAM_CONSOLE_BUFFER_SIZE > DT_REG_SIZE(DT_CHOSEN(zephyr_ram_console))) #error "Custom RAM console buffer exceeds the section size!" #endif #define RAM_CONSOLE_BUF_ATTR \ __attribute__((__section__(LINKER_DT_NODE_REGION_NAME(DT_CHOSEN(zephyr_ram_console))))) #else #define RAM_CONSOLE_BUF_ATTR #endif char ram_console_buf[CONFIG_RAM_CONSOLE_BUFFER_SIZE] RAM_CONSOLE_BUF_ATTR; char *ram_console; static int pos; static int ram_console_out(int character) { ram_console[pos] = (char)character; /* Leave one byte to ensure we're always NULL-terminated */ pos = (pos + 1) % (CONFIG_RAM_CONSOLE_BUFFER_SIZE - 1); return character; } static int ram_console_init(void) { #ifdef CONFIG_RAM_CONSOLE_BUFFER_SECTION mm_reg_t ram_console_va; device_map((mm_reg_t *)&ram_console_va, DT_REG_ADDR(DT_CHOSEN(zephyr_ram_console)), CONFIG_RAM_CONSOLE_BUFFER_SIZE, K_MEM_CACHE_NONE | K_MEM_DIRECT_MAP); ram_console = (char *)ram_console_va, #else ram_console = ram_console_buf, #endif __printk_hook_install(ram_console_out); __stdout_hook_install(ram_console_out); return 0; } SYS_INIT(ram_console_init, PRE_KERNEL_1, CONFIG_CONSOLE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/console/ram_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
405
```c /* rtt_console.c - Console messages to a RAM buffer that is then read by * the Segger J-Link debugger */ /* * */ #include <zephyr/kernel.h> #include <zephyr/sys/printk.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <SEGGER_RTT.h> static bool host_present; /** @brief Wait for fixed period. * */ static void wait(void) { if (!IS_ENABLED(CONFIG_MULTITHREADING) || k_is_in_isr()) { if (IS_ENABLED(CONFIG_RTT_TX_RETRY_IN_INTERRUPT)) { k_busy_wait(1000*CONFIG_RTT_TX_RETRY_DELAY_MS); } } else { k_msleep(CONFIG_RTT_TX_RETRY_DELAY_MS); } } static int rtt_console_out(int character) { char c = (char)character; unsigned int cnt; int max_cnt = CONFIG_RTT_TX_RETRY_CNT; do { cnt = SEGGER_RTT_Write(0, &c, 1); /* There are two possible reasons for not writing any data to * RTT: * - The host is not connected and not reading the data. * - The buffer got full and will be read by the host. * These two situations are distinguished using the following * algorithm: * At the beginning, the module assumes that the host is active, * so when no data is read, it busy waits and retries. * If, after retrying, the host reads the data, the module * assumes that the host is active. If it fails, the module * assumes that the host is inactive and stores that * information. On next call, only one attempt takes place. * The host is marked as active if the attempt is successful. */ if (cnt) { /* byte processed - host is present. */ host_present = true; } else if (host_present) { if (max_cnt) { wait(); max_cnt--; continue; } else { host_present = false; } } break; } while (1); return character; } static int rtt_console_init(void) { #ifdef CONFIG_PRINTK __printk_hook_install(rtt_console_out); #endif __stdout_hook_install(rtt_console_out); return 0; } SYS_INIT(rtt_console_init, PRE_KERNEL_1, CONFIG_CONSOLE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/console/rtt_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
545
```c /* * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/drivers/ipm.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(ipm_console, CONFIG_IPM_LOG_LEVEL); const struct device *ipm_dev; static int console_out(int c) { static char buf[CONFIG_IPM_CONSOLE_LINE_BUF_LEN]; static size_t len; int ret; if (c != '\n' && len < sizeof(buf)) { buf[len++] = c; return c; } ret = ipm_send(ipm_dev, 1, len, buf, len); if (ret) { LOG_ERR("Error sending character %c over IPM, ret %d", c, ret); } memset(buf, 0, sizeof(buf)); len = 0; /* After buffer is full start a new one */ if (c != '\n') { buf[len++] = c; } return c; } /* Install printk/stdout hooks */ static void ipm_console_hook_install(void) { #if defined(CONFIG_STDOUT_CONSOLE) __stdout_hook_install(console_out); #endif #if defined(CONFIG_PRINTK) __printk_hook_install(console_out); #endif } static int ipm_console_init(void) { LOG_DBG("IPM console initialization"); ipm_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_console)); if (!device_is_ready(ipm_dev)) { LOG_ERR("%s is not ready", ipm_dev->name); return -ENODEV; } if (ipm_max_id_val_get(ipm_dev) < CONFIG_IPM_CONSOLE_LINE_BUF_LEN) { LOG_ERR("IPM driver does not support buffer length %d", CONFIG_IPM_CONSOLE_LINE_BUF_LEN); return -ENOTSUP; } ipm_console_hook_install(); return 0; } /* Need to be initialized after IPM */ SYS_INIT(ipm_console_init, POST_KERNEL, CONFIG_CONSOLE_INIT_PRIORITY); ```
/content/code_sandbox/drivers/console/ipm_console.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
446
```c /* ipm_console_send.c - Console messages to another processor */ /* * */ #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/sys/printk.h> #include <zephyr/sys/printk-hooks.h> #include <zephyr/sys/libc-hooks.h> #include <zephyr/drivers/ipm.h> #include <zephyr/drivers/console/ipm_console.h> static const struct device *ipm_console_device; static int console_out(int character) { if (character == '\r') { return character; } /* * We just stash the character into the id field and don't supply * any extra data */ ipm_send(ipm_console_device, 1, character, NULL, 0); return character; } int ipm_console_sender_init(const struct device *d) { const struct ipm_console_sender_config_info *config_info; config_info = d->config; ipm_console_device = device_get_binding(config_info->bind_to); if (!ipm_console_device) { printk("unable to bind IPM console sender to '%s'\n", config_info->bind_to); return -EINVAL; } if (config_info->flags & IPM_CONSOLE_STDOUT) { __stdout_hook_install(console_out); } if (config_info->flags & IPM_CONSOLE_PRINTK) { __printk_hook_install(console_out); } return 0; } ```
/content/code_sandbox/drivers/console/ipm_console_sender.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
304
```c /* ipm_console.c - Console messages to/from another processor */ /* * */ #include <errno.h> #include <zephyr/kernel.h> #include <zephyr/sys/ring_buffer.h> #include <zephyr/sys/printk.h> #include <stdio.h> #include <zephyr/drivers/ipm.h> #include <zephyr/drivers/console/ipm_console.h> #include <zephyr/sys/__assert.h> static void ipm_console_thread(void *arg1, void *arg2, void *arg3) { uint8_t size32; uint16_t type; int ret, key; const struct ipm_console_receiver_config_info *config_info; struct ipm_console_receiver_runtime_data *driver_data; int pos; driver_data = (struct ipm_console_receiver_runtime_data *)arg1; config_info = (const struct ipm_console_receiver_config_info *)arg2; ARG_UNUSED(arg3); size32 = 0U; pos = 0; while (1) { k_sem_take(&driver_data->sem, K_FOREVER); ret = ring_buf_item_get(&driver_data->rb, &type, (uint8_t *)&config_info->line_buf[pos], NULL, &size32); if (ret) { /* Shouldn't ever happen... */ printk("ipm console ring buffer error: %d\n", ret); size32 = 0U; continue; } if (config_info->line_buf[pos] == '\n' || pos == config_info->lb_size - 2) { if (pos != config_info->lb_size - 2) { config_info->line_buf[pos] = '\0'; } else { config_info->line_buf[pos + 1] = '\0'; } if (config_info->flags & IPM_CONSOLE_PRINTK) { printk("ipm_console: '%s'\n", config_info->line_buf); } if (config_info->flags & IPM_CONSOLE_STDOUT) { printf("ipm_console: '%s'\n", config_info->line_buf); } pos = 0; } else { ++pos; } /* ISR may have disabled the channel due to full buffer at * some point. If that happened and there is now room, * re-enable it. * * Lock interrupts to avoid pathological scenario where * the buffer fills up in between enabling the channel and * clearing the channel_disabled flag. */ if (driver_data->channel_disabled && ring_buf_item_space_get(&driver_data->rb)) { key = irq_lock(); ipm_set_enabled(driver_data->ipm_device, 1); driver_data->channel_disabled = 0; irq_unlock(key); } } } static void ipm_console_receive_callback(const struct device *ipm_dev, void *user_data, uint32_t id, volatile void *data) { struct ipm_console_receiver_runtime_data *driver_data = user_data; int ret; ARG_UNUSED(data); /* Should always be at least one free buffer slot */ ret = ring_buf_item_put(&driver_data->rb, 0, id, NULL, 0); __ASSERT(ret == 0, "Failed to insert data into ring buffer"); k_sem_give(&driver_data->sem); /* If the buffer is now full, disable future interrupts for this channel * until the thread has a chance to consume characters. * * This works without losing data if the sending side tries to send * more characters because the sending side is making an ipm_send() * call with the wait flag enabled. It blocks until the receiver side * re-enables the channel and consumes the data. */ if (ring_buf_item_space_get(&driver_data->rb) == 0) { ipm_set_enabled(ipm_dev, 0); driver_data->channel_disabled = 1; } } int ipm_console_receiver_init(const struct device *d) { const struct ipm_console_receiver_config_info *config_info = d->config; struct ipm_console_receiver_runtime_data *driver_data = d->data; const struct device *ipm; ipm = device_get_binding(config_info->bind_to); if (!ipm) { printk("unable to bind IPM console receiver to '%s'\n", config_info->bind_to); return -EINVAL; } if (ipm_max_id_val_get(ipm) < 0xFF) { printk("IPM driver %s doesn't support 8-bit id values", config_info->bind_to); return -EINVAL; } driver_data->ipm_device = ipm; driver_data->channel_disabled = 0; k_sem_init(&driver_data->sem, 0, K_SEM_MAX_LIMIT); ring_buf_item_init(&driver_data->rb, config_info->rb_size32, config_info->ring_buf_data); ipm_register_callback(ipm, ipm_console_receive_callback, driver_data); k_thread_create(&driver_data->rx_thread, config_info->thread_stack, CONFIG_IPM_CONSOLE_STACK_SIZE, ipm_console_thread, driver_data, (void *)config_info, NULL, K_PRIO_COOP(IPM_CONSOLE_PRI), 0, K_NO_WAIT); ipm_set_enabled(ipm, 1); return 0; } ```
/content/code_sandbox/drivers/console/ipm_console_receiver.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,160
```unknown # Console driver configuration options # Setting shared by different subsystems menuconfig CONSOLE bool "Console drivers" if CONSOLE config CONSOLE_INPUT_MAX_LINE_LEN int "Console maximum input line length" default 128 help This option can be used to modify the maximum length a console input can be. config CONSOLE_HAS_DRIVER bool help This is an option to be enabled by console drivers to signal that some kind of console exists. config CONSOLE_HANDLER bool "Console input handler" depends on UART_CONSOLE && SERIAL_SUPPORT_INTERRUPT select UART_INTERRUPT_DRIVEN help This option enables console input handler allowing to write simple interaction between serial console and the OS. config CONSOLE_INIT_PRIORITY int "Console init priority" default 60 if UART_CONSOLE || XTENSA_SIM_CONSOLE default KERNEL_INIT_PRIORITY_DEFAULT help Console driver device initialization priority. config UART_CONSOLE bool "Use UART for console" depends on SERIAL && SERIAL_HAS_DRIVER select CONSOLE_HAS_DRIVER help Enable this option to use one UART for console. config UART_CONSOLE_DEBUG_SERVER_HOOKS bool "Debug server hooks in debug console" depends on UART_CONSOLE help This option allows a debug server agent such as GDB to take over the handling of traffic that goes through the console logic. The debug server looks at characters received and decides to handle them itself if they are some sort of control characters, or let the regular console code handle them if they are of no special significance to it. config UART_CONSOLE_MCUMGR bool "UART console mcumgr passthrough" depends on UART_CONSOLE help Enables the UART console to receive mcumgr frames for image upgrade and device management. When enabled, the UART console does not process mcumgr frames, but it hands them up to a higher level module (e.g., the shell). If unset, incoming mcumgr frames are dropped. config UART_CONSOLE_INPUT_EXPIRED bool "Support for UART console input expired mechanism" default y depends on UART_CONSOLE && PM help This option allows a notification to the power management module that the module for UART console is in use now. If the interval of console module doesn't receive any input message exceeds expired timeout, such as UART_CONSOLE_INPUT_EXPIRED_TIMEOUT, the power management module is allowed to enter sleep/deep sleep state and turn off the clock of UART console module. This mechanism gives a window in which the users can organize input message if CONFIG_PM is enabled. config UART_CONSOLE_INPUT_EXPIRED_TIMEOUT int "Fixed amount of time to keep the UART console in use flag true" default 15000 depends on UART_CONSOLE_INPUT_EXPIRED help Fixed amount of time which unit is milliseconds to keep the UART console in use flag true. config JAILHOUSE_DEBUG_CONSOLE bool "Use JAILHOUSE_DEBUG console" select CONSOLE_HAS_DRIVER depends on ARM64 help Emit console messages to a jailhouse hypervisor debug console. Useful in board bring-up if there aren't any working serial drivers. # Workaround for not being able to have commas in macro arguments DT_CHOSEN_Z_RAM_CONSOLE := zephyr,ram-console config RAM_CONSOLE bool "Use RAM console" select CONSOLE_HAS_DRIVER select RAM_CONSOLE_BUFFER_SECTION if $(dt_chosen_enabled,$(DT_CHOSEN_Z_RAM_CONSOLE)) help Emit console messages to a RAM buffer "ram_console" which can be examined at runtime with a debugger. Useful in board bring-up if there aren't any working serial drivers. config RAM_CONSOLE_BUFFER_SECTION bool "Use dedicated section as RAM console buffer" depends on RAM_CONSOLE && $(dt_chosen_enabled,$(DT_CHOSEN_Z_RAM_CONSOLE)) select KERNEL_DIRECT_MAP if MMU help Use a dedicated section as the RAM console buffer, whose address is known before build so that the console output messages can be easily examined by a debugger or software tool from a parallel-running OS. config RAM_CONSOLE_BUFFER_SIZE int "Ram Console buffer size" default $(dt_chosen_reg_size_int,$(DT_CHOSEN_Z_RAM_CONSOLE)) if RAM_CONSOLE_BUFFER_SECTION default 1024 depends on RAM_CONSOLE help Total size of the RAM console buffer, to ensure it's always NULL-terminated leave one byte unused, the actual length is one byte less. Messages will wrap around if the actual length is exceeded. config RTT_CONSOLE bool "Use RTT console" depends on USE_SEGGER_RTT select CONSOLE_HAS_DRIVER help Emit console messages to a RAM buffer that is then read by the Segger J-Link software and displayed on a computer in real-time. Requires support for Segger J-Link on the companion IC onboard. if RTT_CONSOLE config RTT_TX_RETRY_CNT int "Number of TX retries" default 2 help Number of TX retries before dropping the byte and assuming that RTT session is inactive. config RTT_TX_RETRY_DELAY_MS int "Delay between TX retries in milliseconds" default 2 help Sleep period between TX retry attempts. During RTT session, host pulls data periodically. Period starts from 1-2 milliseconds and can be increased if traffic on RTT increases (also from host to device). In case of heavy traffic data can be lost and it may be necessary to increase delay or number of retries. config RTT_TX_RETRY_IN_INTERRUPT bool "Busy wait in the interrupt context for TX retry" help If enabled RTT console will busy wait between TX retries when console assumes that RTT session is active. In case of heavy traffic data can be lost and it may be necessary to increase delay or number of retries. endif config IPM_CONSOLE_SENDER bool "Inter-processor Mailbox console sender" select CONSOLE_HAS_DRIVER help Enable the sending side of IPM console config IPM_CONSOLE_RECEIVER bool "Inter-processor Mailbox console receiver" select RING_BUFFER help Enable the receiving side of IPM console config IPM_CONSOLE_STACK_SIZE int "Stack size for IPM console receiver thread" depends on IPM_CONSOLE_RECEIVER default 2048 if COVERAGE_GCOV default 512 help Each instance of the IPM console receiver driver creates a worker thread to print out incoming messages from the remote CPU. Specify the stack size for these threads here. config IPM_CONSOLE bool "Inter-processor Mailbox console" depends on IPM select CONSOLE_HAS_DRIVER help Enable console over Inter-processor Mailbox. config IPM_CONSOLE_LINE_BUF_LEN int "IPM console line buffer length" default 128 depends on IPM_CONSOLE help IPM console line buffer length specify amount of the buffer where characters are stored before sending the whole line. config UART_MCUMGR bool "Mcumgr UART driver" select UART_INTERRUPT_DRIVEN help Enable the mcumgr UART driver. This driver allows the application to communicate over UART using the mcumgr protocol for image upgrade and device management. The driver doesn't inspect received data (as contrary to console UART driver) and all aspects of received protocol data are handled by an application provided callback. if UART_MCUMGR config UART_MCUMGR_RX_BUF_SIZE int "Size of receive buffer for mcumgr fragments received over UART, in bytes" default 128 help Specifies the size of the mcumgr UART receive buffer, in bytes. This value must be large enough to accommodate any line sent by an mcumgr client. config UART_MCUMGR_RX_BUF_COUNT int "Number of receive buffers for mcumgr fragments received over UART" default 2 help Specifies the number of the mcumgr UART receive buffers. Receive buffers hold received mcumgr fragments prior to reassembly. This setting's value must satisfy the following relation: UART_MCUMGR_RX_BUF_COUNT * UART_MCUMGR_RX_BUF_SIZE >= MCUMGR_TRANSPORT_UART_MTU endif # UART_MCUMGR config XTENSA_SIM_CONSOLE bool "Use Xtensa simulator console" depends on SIMULATOR_XTENSA depends on !WINSTREAM_CONSOLE select CONSOLE_HAS_DRIVER default y help Use simulator console to print messages. config POSIX_ARCH_CONSOLE bool "Use the host terminal for console" depends on ARCH_POSIX select CONSOLE_HAS_DRIVER help Zephyr's printk messages will be directed to the host terminal stdout config POSIX_ARCH_CONSOLE_INIT_PRIORITY int "Init priority" default 99 depends on POSIX_ARCH_CONSOLE help Device driver initialization priority. config SEMIHOST_CONSOLE bool "Use semihosting for console" select CONSOLE_HAS_DRIVER depends on SEMIHOST help Enable this option to use semihosting for console. See SEMIHOST for a more complete description of semihosting. module = UART_CONSOLE module-str = UART console source "subsys/logging/Kconfig.template.log_config" config EFI_CONSOLE bool "Use EFI console for console output" select CONSOLE_HAS_DRIVER help Enable this option to use EFI console output. config WINSTREAM_CONSOLE bool "Use Winstream console" depends on WINSTREAM select CONSOLE_HAS_DRIVER help Use winstream as a console. See the WINSTREAM Kconfig help for more information. endif # CONSOLE ```
/content/code_sandbox/drivers/console/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,062
```unknown config ARM_SIP_SVC_HAS_INTEL_SDM_MAILBOX_FIFO bool default y depends on DT_HAS_INTEL_SOCFPGA_AGILEX_SIP_SMC_ENABLED imply ARM_SIP_SVC_HAS_DRIVER help Support for SDM mailbox fifo in Intel SoC FPGA Agilex via SMC calls. config ARM_SIP_SVC_EL3_MAILBOX_RESPONSE_SIZE int "Size of response buffer used for ASYNC transactions." default 4096 depends on ARM_SIP_SVC_HAS_INTEL_SDM_MAILBOX_FIFO help Size of response buffer used for ASYNC transactions.For Intel Agilex platform the maximum size of response buffer size is 4096 and minimum is 4 bytes. Also it should be multiple of 4 bytes. config ARM_SIP_SVC_EL3_MAX_ALLOWED_TRANSACTIONS int "Maximum allowable ongoing transactions." default 16 depends on ARM_SIP_SVC_HAS_INTEL_SDM_MAILBOX_FIFO help Allowed number of active transactions in sip_svc subsystem for this driver. ```
/content/code_sandbox/drivers/sip_svc/Kconfig.sip_smc_agilex
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
225
```c /* * */ /** * @file * @brief A driver for sending and receiving mcumgr packets over UART. */ #include <string.h> #include <zephyr/kernel.h> #include <zephyr/drivers/uart.h> #include <zephyr/mgmt/mcumgr/transport/serial.h> #include <zephyr/drivers/console/uart_mcumgr.h> static const struct device *const uart_mcumgr_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_uart_mcumgr)); /** Callback to execute when a valid fragment has been received. */ static uart_mcumgr_recv_fn *uart_mgumgr_recv_cb; /** Contains the fragment currently being received. */ static struct uart_mcumgr_rx_buf *uart_mcumgr_cur_buf; /** * Whether the line currently being read should be ignored. This is true if * the line is too long or if there is no buffer available to hold it. */ static bool uart_mcumgr_ignoring; /** Contains buffers to hold incoming request fragments. */ K_MEM_SLAB_DEFINE(uart_mcumgr_slab, sizeof(struct uart_mcumgr_rx_buf), CONFIG_UART_MCUMGR_RX_BUF_COUNT, 1); #if defined(CONFIG_MCUMGR_TRANSPORT_UART_ASYNC) uint8_t async_buffer[CONFIG_MCUMGR_TRANSPORT_UART_ASYNC_BUFS] [CONFIG_MCUMGR_TRANSPORT_UART_ASYNC_BUF_SIZE]; static int async_current; #endif static struct uart_mcumgr_rx_buf *uart_mcumgr_alloc_rx_buf(void) { struct uart_mcumgr_rx_buf *rx_buf; void *block; int rc; rc = k_mem_slab_alloc(&uart_mcumgr_slab, &block, K_NO_WAIT); if (rc != 0) { return NULL; } rx_buf = block; rx_buf->length = 0; return rx_buf; } void uart_mcumgr_free_rx_buf(struct uart_mcumgr_rx_buf *rx_buf) { void *block; block = rx_buf; k_mem_slab_free(&uart_mcumgr_slab, block); } #if !defined(CONFIG_MCUMGR_TRANSPORT_UART_ASYNC) /** * Reads a chunk of received data from the UART. */ static int uart_mcumgr_read_chunk(void *buf, int capacity) { if (!uart_irq_rx_ready(uart_mcumgr_dev)) { return 0; } return uart_fifo_read(uart_mcumgr_dev, buf, capacity); } #endif /** * Processes a single incoming byte. */ static struct uart_mcumgr_rx_buf *uart_mcumgr_rx_byte(uint8_t byte) { struct uart_mcumgr_rx_buf *rx_buf; if (!uart_mcumgr_ignoring) { if (uart_mcumgr_cur_buf == NULL) { uart_mcumgr_cur_buf = uart_mcumgr_alloc_rx_buf(); if (uart_mcumgr_cur_buf == NULL) { /* Insufficient buffers; drop this fragment. */ uart_mcumgr_ignoring = true; } } } rx_buf = uart_mcumgr_cur_buf; if (!uart_mcumgr_ignoring) { if (rx_buf->length >= sizeof(rx_buf->data)) { /* Line too long; drop this fragment. */ uart_mcumgr_free_rx_buf(uart_mcumgr_cur_buf); uart_mcumgr_cur_buf = NULL; uart_mcumgr_ignoring = true; } else { rx_buf->data[rx_buf->length++] = byte; } } if (byte == '\n') { /* Fragment complete. */ if (uart_mcumgr_ignoring) { uart_mcumgr_ignoring = false; } else { uart_mcumgr_cur_buf = NULL; return rx_buf; } } return NULL; } #if defined(CONFIG_MCUMGR_TRANSPORT_UART_ASYNC) static void uart_mcumgr_async(const struct device *dev, struct uart_event *evt, void *user_data) { struct uart_mcumgr_rx_buf *rx_buf; uint8_t *p; int len; ARG_UNUSED(dev); switch (evt->type) { case UART_TX_DONE: case UART_TX_ABORTED: break; case UART_RX_RDY: len = evt->data.rx.len; p = &evt->data.rx.buf[evt->data.rx.offset]; for (int i = 0; i < len; i++) { rx_buf = uart_mcumgr_rx_byte(p[i]); if (rx_buf != NULL) { uart_mgumgr_recv_cb(rx_buf); } } break; case UART_RX_DISABLED: async_current = 0; break; case UART_RX_BUF_REQUEST: /* * Note that when buffer gets filled, the UART_RX_BUF_RELEASED will be reported, * aside to UART_RX_RDY. The UART_RX_BUF_RELEASED is not processed because * it has been assumed that the mcumgr will be able to consume bytes faster * than UART will receive them and, since there is nothing to release, only * UART_RX_BUF_REQUEST is processed. */ ++async_current; async_current %= CONFIG_MCUMGR_TRANSPORT_UART_ASYNC_BUFS; uart_rx_buf_rsp(dev, async_buffer[async_current], sizeof(async_buffer[async_current])); break; case UART_RX_BUF_RELEASED: case UART_RX_STOPPED: break; } } #else /** * ISR that is called when UART bytes are received. */ static void uart_mcumgr_isr(const struct device *unused, void *user_data) { struct uart_mcumgr_rx_buf *rx_buf; uint8_t buf[32]; int chunk_len; int i; ARG_UNUSED(unused); ARG_UNUSED(user_data); while (uart_irq_update(uart_mcumgr_dev) && uart_irq_is_pending(uart_mcumgr_dev)) { chunk_len = uart_mcumgr_read_chunk(buf, sizeof(buf)); if (chunk_len == 0) { continue; } for (i = 0; i < chunk_len; i++) { rx_buf = uart_mcumgr_rx_byte(buf[i]); if (rx_buf != NULL) { uart_mgumgr_recv_cb(rx_buf); } } } } #endif /** * Sends raw data over the UART. */ static int uart_mcumgr_send_raw(const void *data, int len) { const uint8_t *u8p; u8p = data; while (len--) { uart_poll_out(uart_mcumgr_dev, *u8p++); } return 0; } int uart_mcumgr_send(const uint8_t *data, int len) { return mcumgr_serial_tx_pkt(data, len, uart_mcumgr_send_raw); } #if defined(CONFIG_MCUMGR_TRANSPORT_UART_ASYNC) static void uart_mcumgr_setup(const struct device *uart) { uart_callback_set(uart, uart_mcumgr_async, NULL); uart_rx_enable(uart, async_buffer[0], sizeof(async_buffer[0]), 0); } #else static void uart_mcumgr_setup(const struct device *uart) { uart_irq_rx_disable(uart); uart_irq_tx_disable(uart); uart_irq_callback_set(uart, uart_mcumgr_isr); uart_irq_rx_enable(uart); } #endif void uart_mcumgr_register(uart_mcumgr_recv_fn *cb) { uart_mgumgr_recv_cb = cb; if (device_is_ready(uart_mcumgr_dev)) { uart_mcumgr_setup(uart_mcumgr_dev); } } ```
/content/code_sandbox/drivers/console/uart_mcumgr.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,653
```unknown # Silicon vendor Provided Supervisory call driver for sip_svc subsystem menuconfig ARM_SIP_SVC_DRIVER bool "ARM SIP SVC driver" depends on ARM64 help ARM supervisory call driver for communicating with EL2 or EL3 firmware if ARM_SIP_SVC_DRIVER module = ARM_SIP_SVC_DRIVER module-str = arm_sip_svc_driver source "subsys/logging/Kconfig.template.log_config" config ARM_SIP_SVC_DRIVER_INIT_PRIORITY int "Initialization priority" default 50 config ARM_SIP_SVC_HAS_DRIVER bool help This is an option to be enabled by individual sip svc driver to signal that there is a sip svc driver. This is used by other modules which depends on sip svc driver. source "drivers/sip_svc/Kconfig.sip_smc_agilex" endif # ARM_SIP_SVC_DRIVER ```
/content/code_sandbox/drivers/sip_svc/Kconfig
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
188
```c /* * * * Intel SoC FPGA platform specific functions used by ARM SiP Services for * supporting EL3 communication from zephyr. */ #include <string.h> #include <zephyr/drivers/sip_svc/sip_svc_agilex_mailbox.h> #include <zephyr/drivers/sip_svc/sip_svc_agilex_smc.h> #include <zephyr/drivers/sip_svc/sip_svc_driver.h> #include <zephyr/internal/syscall_handler.h> #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(intel_socfpga_agilex_sip_smc, CONFIG_ARM_SIP_SVC_DRIVER_LOG_LEVEL); #define DT_DRV_COMPAT intel_socfpga_agilex_sip_smc #define DT_SIP_SMC DT_COMPAT_GET_ANY_STATUS_OKAY(DT_DRV_COMPAT) static bool intel_sip_smc_plat_func_id_valid(const struct device *dev, uint32_t command, uint32_t func_id) { ARG_UNUSED(dev); bool valid = false; if (command > SIP_SVC_PROTO_CMD_MAX) { return false; } if (command == SIP_SVC_PROTO_CMD_SYNC) { /* Synchronous SMC Function IDs */ switch (func_id) { case SMC_FUNC_ID_GET_SVC_VERSION: case SMC_FUNC_ID_REG_READ: case SMC_FUNC_ID_REG_WRITE: case SMC_FUNC_ID_REG_UPDATE: case SMC_FUNC_ID_SET_HPS_BRIDGES: case SMC_FUNC_ID_RSU_UPDATE_ADDR: valid = true; break; default: valid = false; break; } } else if (command == SIP_SVC_PROTO_CMD_ASYNC) { /* Asynchronous SMC Function IDs */ switch (func_id) { case SMC_FUNC_ID_MAILBOX_SEND_COMMAND: case SMC_FUNC_ID_MAILBOX_POLL_RESPONSE: valid = true; break; default: valid = false; break; } } return valid; } static uint32_t intel_sip_smc_plat_format_trans_id(const struct device *dev, uint32_t client_idx, uint32_t trans_idx) { ARG_UNUSED(dev); /* Combine the transaction id and client id to get the job id*/ return (((client_idx & 0xF) << 4) | (trans_idx & 0xF)); } static uint32_t intel_sip_smc_plat_get_trans_idx(const struct device *dev, uint32_t trans_id) { ARG_UNUSED(dev); return (trans_id & 0xF); } static void intel_sip_smc_plat_update_trans_id(const struct device *dev, struct sip_svc_request *request, uint32_t trans_id) { ARG_UNUSED(dev); uint32_t *data; if (request == NULL) { LOG_ERR("request is empty"); return; } /* Assign the trans id into intel smc header a1 */ SMC_PLAT_PROTO_HEADER_SET_TRANS_ID(request->a1, trans_id); /* Assign the trans id into mailbox header */ if ((void *)request->a2 != NULL) { data = (uint32_t *)request->a2; SIP_SVC_MB_HEADER_SET_TRANS_ID(data[0], trans_id); } } static void intel_sip_smc_plat_free_async_memory(const struct device *dev, struct sip_svc_request *request) { ARG_UNUSED(dev); /* Free mailbox command data dynamic memory space, * this function will be called after sip_svc service * process the async request. */ if (request->a2) { k_free((void *)request->a2); } } static int intel_sip_smc_plat_async_res_req(const struct device *dev, unsigned long *a0, unsigned long *a1, unsigned long *a2, unsigned long *a3, unsigned long *a4, unsigned long *a5, unsigned long *a6, unsigned long *a7, char *buf, size_t size) { ARG_UNUSED(dev); /* Fill in SMC parameter to read mailbox response */ *a0 = SMC_FUNC_ID_MAILBOX_POLL_RESPONSE; *a1 = 0; *a2 = (unsigned long)buf; *a3 = size; return 0; } static int intel_sip_smc_plat_async_res_res(const struct device *dev, struct arm_smccc_res *res, char *buf, size_t *size, uint32_t *trans_id) { ARG_UNUSED(dev); uint32_t *resp = (uint32_t *)buf; __ASSERT((res && buf && size && trans_id), "invalid parameters\n"); if (((long)res->a0) <= SMC_STATUS_OKAY) { /* Extract transaction id from mailbox response header */ *trans_id = SIP_SVC_MB_HEADER_GET_TRANS_ID(resp[0]); /* The final length should include both header and body */ *size = (SIP_SVC_MB_HEADER_GET_LENGTH(resp[0]) + 1) * 4; } else { LOG_INF("There is no valid polling response %ld", (long)res->a0); return -EINPROGRESS; } LOG_INF("Got a valid polling response"); return 0; } static uint32_t intel_sip_smc_plat_get_error_code(const struct device *dev, struct arm_smccc_res *res) { ARG_UNUSED(dev); if (res != NULL) { return res->a0; } else { return SIP_SVC_ID_INVALID; } } static void intel_sip_secure_monitor_call(const struct device *dev, unsigned long function_id, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5, unsigned long arg6, struct arm_smccc_res *res) { __ASSERT_NO_MSG(dev != NULL); __ASSERT_NO_MSG(res != NULL); uint64_t start, end; LOG_DBG("Before %s call", DT_PROP(DT_SIP_SMC, method)); LOG_DBG("\tfunction_id %08lx", function_id); LOG_DBG("\targ0 %08lx", arg0); LOG_DBG("\targ1 %08lx", arg1); LOG_DBG("\targ2 %08lx", arg2); LOG_DBG("\targ3 %08lx", arg3); LOG_DBG("\targ4 %08lx", arg4); LOG_DBG("\targ5 %08lx", arg5); LOG_DBG("\targ6 %08lx", arg6); start = k_cycle_get_64(); arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, arg5, arg6, res); end = k_cycle_get_64(); LOG_INF("Time taken for %08lx is %08lld ns", function_id, k_cyc_to_ns_ceil64(end - start)); LOG_DBG("After %s call", DT_PROP(DT_SIP_SMC, method)); LOG_DBG("\tres->a0 %08lx", res->a0); LOG_DBG("\tres->a1 %08lx", res->a1); LOG_DBG("\tres->a2 %08lx", res->a2); LOG_DBG("\tres->a3 %08lx", res->a3); LOG_DBG("\tres->a4 %08lx", res->a4); LOG_DBG("\tres->a5 %08lx", res->a5); LOG_DBG("\tres->a6 %08lx", res->a6); LOG_DBG("\tres->a7 %08lx", res->a7); } static int arm_sip_smc_init(const struct device *dev) { ARG_UNUSED(dev); LOG_INF("Supervisory call %s registered successfully", DT_PROP(DT_SIP_SMC, method)); return 0; } static const struct svc_driver_api api = { .sip_supervisory_call = intel_sip_secure_monitor_call, .sip_svc_plat_get_trans_idx = intel_sip_smc_plat_get_trans_idx, .sip_svc_plat_format_trans_id = intel_sip_smc_plat_format_trans_id, .sip_svc_plat_func_id_valid = intel_sip_smc_plat_func_id_valid, .sip_svc_plat_update_trans_id = intel_sip_smc_plat_update_trans_id, .sip_svc_plat_get_error_code = intel_sip_smc_plat_get_error_code, .sip_svc_plat_async_res_req = intel_sip_smc_plat_async_res_req, .sip_svc_plat_async_res_res = intel_sip_smc_plat_async_res_res, .sip_svc_plat_free_async_memory = intel_sip_smc_plat_free_async_memory}; BUILD_ASSERT((DT_PROP(DT_SIP_SMC, zephyr_num_clients) != 0), "num-clients should not be zero"); BUILD_ASSERT((CONFIG_ARM_SIP_SVC_EL3_MAX_ALLOWED_TRANSACTIONS > 0), "CONFIG_ARM_SIP_SVC_EL3_MAX_ALLOWED_TRANSACTIONS should be greater than 0"); #if DT_HAS_COMPAT_STATUS_OKAY(DT_DRV_COMPAT) SIP_SVC_CONTROLLER_DEFINE(0, DT_PROP(DT_SIP_SMC, method), DEVICE_DT_GET(DT_SIP_SMC), DT_PROP(DT_SIP_SMC, zephyr_num_clients), CONFIG_ARM_SIP_SVC_EL3_MAX_ALLOWED_TRANSACTIONS, CONFIG_ARM_SIP_SVC_EL3_MAILBOX_RESPONSE_SIZE); DEVICE_DT_DEFINE(DT_SIP_SMC, arm_sip_smc_init, NULL, NULL, NULL, POST_KERNEL, CONFIG_ARM_SIP_SVC_DRIVER_INIT_PRIORITY, &api); #endif ```
/content/code_sandbox/drivers/sip_svc/sip_smc_intel_socfpga.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,090
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_ETHERNET_PHY_GECKO_H_ #define ZEPHYR_DRIVERS_ETHERNET_PHY_GECKO_H_ #include <zephyr/types.h> #include <soc.h> #ifdef __cplusplus extern "C" { #endif struct phy_gecko_dev { ETH_TypeDef *regs; uint8_t address; }; /** * @brief Initialize Ethernet PHY device. * * @param phy PHY instance * @return 0 on success or a negative error value on failure */ int phy_gecko_init(const struct phy_gecko_dev *phy); /** * @brief Auto-negotiate and configure link parameters. * * @param phy PHY instance * @param status link parameters common to remote and local PHY * @return 0 on success or a negative error value on failure */ int phy_gecko_auto_negotiate(const struct phy_gecko_dev *phy, uint32_t *status); /** * @brief Get PHY ID value. * * @param phy PHY instance * @return PHY ID value or 0xFFFFFFFF on failure */ uint32_t phy_gecko_id_get(const struct phy_gecko_dev *phy); /** * @brief Get PHY linked status. * * @param phy PHY instance * @return PHY linked status */ bool phy_gecko_is_linked(const struct phy_gecko_dev *phy); #ifdef __cplusplus } #endif #endif /* ZEPHYR_DRIVERS_ETHERNET_PHY_GECKO_H_ */ ```
/content/code_sandbox/drivers/ethernet/phy_gecko.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
312
```c /* Stellaris Ethernet Controller * * */ #define DT_DRV_COMPAT ti_stellaris_ethernet #define LOG_MODULE_NAME eth_stellaris #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/net/ethernet.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/device.h> #include <soc.h> #include <ethernet/eth_stats.h> #include <zephyr/irq.h> #include "eth_stellaris_priv.h" static void eth_stellaris_assign_mac(const struct device *dev) { uint8_t mac_addr[6] = DT_INST_PROP(0, local_mac_address); uint32_t value = 0x0; value |= mac_addr[0]; value |= mac_addr[1] << 8; value |= mac_addr[2] << 16; value |= mac_addr[3] << 24; sys_write32(value, REG_MACIA0); value = 0x0; value |= mac_addr[4]; value |= mac_addr[5] << 8; sys_write32(value, REG_MACIA1); } static void eth_stellaris_flush(const struct device *dev) { struct eth_stellaris_runtime *dev_data = dev->data; if (dev_data->tx_pos != 0) { sys_write32(dev_data->tx_word, REG_MACDATA); dev_data->tx_pos = 0; dev_data->tx_word = 0U; } } static void eth_stellaris_send_byte(const struct device *dev, uint8_t byte) { struct eth_stellaris_runtime *dev_data = dev->data; dev_data->tx_word |= byte << (dev_data->tx_pos * 8); dev_data->tx_pos++; if (dev_data->tx_pos == 4) { sys_write32(dev_data->tx_word, REG_MACDATA); dev_data->tx_pos = 0; dev_data->tx_word = 0U; } } static int eth_stellaris_send(const struct device *dev, struct net_pkt *pkt) { struct eth_stellaris_runtime *dev_data = dev->data; struct net_buf *frag; uint16_t i, data_len; /* Frame transmission * * First two bytes is the length of the frame, exclusive of * the header length. */ data_len = net_pkt_get_len(pkt) - sizeof(struct net_eth_hdr); eth_stellaris_send_byte(dev, data_len & 0xff); eth_stellaris_send_byte(dev, (data_len & 0xff00) >> 8); /* Send the payload */ for (frag = pkt->frags; frag; frag = frag->frags) { for (i = 0U; i < frag->len; ++i) { eth_stellaris_send_byte(dev, frag->data[i]); } } /* Will transmit the partial word. */ eth_stellaris_flush(dev); /* Enable transmit. */ sys_write32(BIT_MACTR_NEWTX, REG_MACTR); /* Wait and check if transmit successful or not. */ k_sem_take(&dev_data->tx_sem, K_FOREVER); if (dev_data->tx_err) { dev_data->tx_err = false; return -EIO; } LOG_DBG("pkt sent %p len %d", pkt, data_len); return 0; } static void eth_stellaris_rx_error(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); uint32_t val; eth_stats_update_errors_rx(iface); /* Clear the rx_frame buffer, * otherwise it could lead to underflow errors */ sys_write32(0x0, REG_MACRCTL); sys_write32(BIT_MACRCTL_RSTFIFO, REG_MACRCTL); val = BIT_MACRCTL_BADCRC | BIT_MACRCTL_RXEN; sys_write32(val, REG_MACRCTL); } static struct net_pkt *eth_stellaris_rx_pkt(const struct device *dev, struct net_if *iface) { int frame_len, bytes_left; struct net_pkt *pkt; uint32_t reg_val; uint16_t count; uint8_t *data; /* * The Ethernet frame received from the hardware has the * following format. The first two bytes contains the ethernet * frame length, followed by the actual ethernet frame. * * +---------+---- ... -------+ * | Length | Ethernet Frame | * +---------+---- ... -------+ */ /* * The first word contains the frame length and a portion of * the ethernet frame. Extract the frame length. */ reg_val = sys_read32(REG_MACDATA); frame_len = reg_val & 0x0000ffff; pkt = net_pkt_rx_alloc_with_buffer(iface, frame_len, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { return NULL; } /* * The remaining 2 bytes, in the first word is appended to the * ethernet frame. */ count = 2U; data = (uint8_t *)&reg_val + 2; if (net_pkt_write(pkt, data, count)) { goto error; } /* A word has been read already, thus minus 4 bytes to be read. */ bytes_left = frame_len - 4; /* Read the rest of words, minus the partial word and FCS byte. */ for (; bytes_left > 7; bytes_left -= 4) { reg_val = sys_read32(REG_MACDATA); count = 4U; data = (uint8_t *)&reg_val; if (net_pkt_write(pkt, data, count)) { goto error; } } /* Handle the last partial word and discard the 4 Byte FCS. */ while (bytes_left > 0) { /* Read the partial word. */ reg_val = sys_read32(REG_MACDATA); /* Discard the last FCS word. */ if (bytes_left <= 4) { bytes_left = 0; break; } count = bytes_left - 4; data = (uint8_t *)&reg_val; if (net_pkt_write(pkt, data, count)) { goto error; } bytes_left -= 4; } return pkt; error: net_pkt_unref(pkt); return NULL; } static int eth_stellaris_rx(const struct device *dev) { struct eth_stellaris_runtime *dev_data = dev->data; struct net_if *iface = dev_data->iface; struct net_pkt *pkt; pkt = eth_stellaris_rx_pkt(dev, iface); if (!pkt) { LOG_ERR("Failed to read data"); goto err_mem; } if (net_recv_data(iface, pkt) < 0) { LOG_ERR("Failed to place frame in RX Queue"); goto pkt_unref; } return 0; pkt_unref: net_pkt_unref(pkt); err_mem: eth_stellaris_rx_error(iface); return -EIO; } static void eth_stellaris_isr(const struct device *dev) { struct eth_stellaris_runtime *dev_data = dev->data; int isr_val = sys_read32(REG_MACRIS); int num_packets; uint32_t lock; lock = irq_lock(); /* Acknowledge the interrupt. */ sys_write32(isr_val, REG_MACRIS); if (isr_val & BIT_MACRIS_RXINT) { /* * When multiple packets are received by the Ethernet, * only one interrupt may be dispatched to the driver * Therefore, it is necessary to obtain the register NP value * to get how many packets are in the Ethernet. */ num_packets = sys_read32(REG_MACNP); for (int i = 0; i < num_packets; i++) { if (eth_stellaris_rx(dev) != 0) { break; } } } if (isr_val & BIT_MACRIS_TXEMP) { dev_data->tx_err = false; k_sem_give(&dev_data->tx_sem); } if (isr_val & BIT_MACRIS_TXER) { LOG_ERR("Transmit Frame Error"); eth_stats_update_errors_tx(dev_data->iface); dev_data->tx_err = true; k_sem_give(&dev_data->tx_sem); } if (isr_val & BIT_MACRIS_RXER) { LOG_ERR("Error Receiving Frame"); eth_stellaris_rx_error(dev_data->iface); } if (isr_val & BIT_MACRIS_FOV) { LOG_ERR("Error Rx Overrun"); eth_stellaris_rx_error(dev_data->iface); } irq_unlock(lock); } static void eth_stellaris_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); const struct eth_stellaris_config *dev_conf = dev->config; struct eth_stellaris_runtime *dev_data = dev->data; dev_data->iface = iface; /* Assign link local address. */ net_if_set_link_addr(iface, dev_data->mac_addr, 6, NET_LINK_ETHERNET); ethernet_init(iface); /* Initialize semaphore. */ k_sem_init(&dev_data->tx_sem, 0, 1); /* Initialize Interrupts. */ dev_conf->config_func(dev); } #if defined(CONFIG_NET_STATISTICS_ETHERNET) static struct net_stats_eth *eth_stellaris_stats(const struct device *dev) { struct eth_stellaris_runtime *dev_data = dev->data; return &dev_data->stats; } #endif static int eth_stellaris_dev_init(const struct device *dev) { uint32_t value; /* Assign MAC address to Hardware */ eth_stellaris_assign_mac(dev); /* Program MCRCTL to clear RXFIFO */ value = BIT_MACRCTL_RSTFIFO; sys_write32(value, REG_MACRCTL); /* Enable transmitter */ value = BIT_MACTCTL_DUPLEX | BIT_MACTCTL_CRC | BIT_MACTCTL_PADEN | BIT_MACTCTL_TXEN; sys_write32(value, REG_MACTCTL); /* Enable Receiver */ value = BIT_MACRCTL_BADCRC | BIT_MACRCTL_RXEN; sys_write32(value, REG_MACRCTL); return 0; } static void eth_stellaris_irq_config(const struct device *dev) { /* Enable Interrupt. */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_stellaris_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } struct eth_stellaris_config eth_cfg = { .mac_base = DT_INST_REG_ADDR(0), .config_func = eth_stellaris_irq_config, }; struct eth_stellaris_runtime eth_data = { .mac_addr = DT_INST_PROP(0, local_mac_address), .tx_err = false, .tx_word = 0, .tx_pos = 0, }; static const struct ethernet_api eth_stellaris_apis = { .iface_api.init = eth_stellaris_init, .send = eth_stellaris_send, #if defined(CONFIG_NET_STATISTICS_ETHERNET) .get_stats = eth_stellaris_stats, #endif }; NET_DEVICE_DT_INST_DEFINE(0, eth_stellaris_dev_init, NULL, &eth_data, &eth_cfg, CONFIG_ETH_INIT_PRIORITY, &eth_stellaris_apis, ETHERNET_L2, NET_L2_GET_CTX_TYPE(ETHERNET_L2), NET_ETH_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_stellaris.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,541
```objective-c /* * */ #ifndef ETH_LAN865X_PRIV_H__ #define ETH_LAN865X_PRIV_H__ #include <stdint.h> #include <stdbool.h> #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include <zephyr/net/net_if.h> #include <ethernet/eth_stats.h> #include "oa_tc6.h" #define LAN865X_SPI_MAX_FREQUENCY 25000000U #define LAN865X_HW_BOOT_DELAY_MS 7 #define LAN8650_DEVID 0x8650 #define LAN8651_DEVID 0x8651 #define LAN865X_REV_MASK GENMASK(3, 0) #define LAN865X_RESET_TIMEOUT 10 /* Memory Map Sector (MMS) 1 (0x1) */ #define LAN865x_MAC_NCR MMS_REG(0x1, 0x000) #define LAN865x_MAC_NCR_TXEN BIT(3) #define LAN865x_MAC_NCR_RXEN BIT(2) #define LAN865x_MAC_NCFGR MMS_REG(0x1, 0x001) #define LAN865x_MAC_NCFGR_CAF BIT(4) #define LAN865x_MAC_NCFGR_MTIHEN BIT(6) #define LAN865x_MAC_HRB MMS_REG(0x1, 0x020) #define LAN865x_MAC_HRT MMS_REG(0x1, 0x021) #define LAN865x_MAC_SAB1 MMS_REG(0x1, 0x022) #define LAN865x_MAC_SAB2 MMS_REG(0x1, 0x024) #define LAN865x_MAC_SAT2 MMS_REG(0x1, 0x025) #define LAN865x_MAC_TXRX_ON 1 #define LAN865x_MAC_TXRX_OFF 0 /* Memory Map Sector (MMS) 10 (0xA) */ #define LAN865x_DEVID MMS_REG(0xA, 0x094) struct lan865x_config_plca { bool enable : 1; /* 1 - PLCA enable, 0 - CSMA/CD enable */ uint8_t node_id /* PLCA node id range: 0 to 254 */; uint8_t node_count; /* PLCA node count range: 1 to 255 */ uint8_t burst_count; /* PLCA burst count range: 0x0 to 0xFF */ uint8_t burst_timer; /* PLCA burst timer */ uint8_t to_timer; /* PLCA TO value */ }; struct lan865x_config { struct spi_dt_spec spi; struct gpio_dt_spec interrupt; struct gpio_dt_spec reset; int32_t timeout; /* PLCA */ struct lan865x_config_plca *plca; /* MAC */ bool tx_cut_through_mode; /* 1 - tx cut through, 0 - Store and forward */ bool rx_cut_through_mode; /* 1 - rx cut through, 0 - Store and forward */ }; struct lan865x_data { struct net_if *iface; struct gpio_callback gpio_int_callback; struct k_sem tx_rx_sem; struct k_sem int_sem; struct oa_tc6 *tc6; uint16_t chip_id; uint8_t silicon_rev; uint8_t mac_address[6]; bool iface_initialized; bool reset; K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_ETH_LAN865X_IRQ_THREAD_STACK_SIZE); struct k_thread thread; k_tid_t tid_int; }; #endif /* ETH_LAN865X_PRIV_H__ */ ```
/content/code_sandbox/drivers/ethernet/eth_lan865x_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
764
```c /* W5500 Stand-alone Ethernet Controller with SPI * * Author: Parthiban Nallathambi <parthiban@linumiz.com> * */ #define DT_DRV_COMPAT wiznet_w5500 #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(eth_w5500, CONFIG_ETHERNET_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include "eth.h" #include "eth_w5500_priv.h" #define WIZNET_OUI_B0 0x00 #define WIZNET_OUI_B1 0x08 #define WIZNET_OUI_B2 0xdc #define W5500_SPI_BLOCK_SELECT(addr) (((addr) >> 16) & 0x1f) #define W5500_SPI_READ_CONTROL(addr) (W5500_SPI_BLOCK_SELECT(addr) << 3) #define W5500_SPI_WRITE_CONTROL(addr) \ ((W5500_SPI_BLOCK_SELECT(addr) << 3) | BIT(2)) static int w5500_spi_read(const struct device *dev, uint32_t addr, uint8_t *data, uint32_t len) { const struct w5500_config *cfg = dev->config; int ret; uint8_t cmd[3] = { addr >> 8, addr, W5500_SPI_READ_CONTROL(addr) }; const struct spi_buf tx_buf = { .buf = cmd, .len = ARRAY_SIZE(cmd), }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1, }; /* skip the default dummy 0x010203 */ const struct spi_buf rx_buf[2] = { { .buf = NULL, .len = 3 }, { .buf = data, .len = len }, }; const struct spi_buf_set rx = { .buffers = rx_buf, .count = ARRAY_SIZE(rx_buf), }; ret = spi_transceive_dt(&cfg->spi, &tx, &rx); return ret; } static int w5500_spi_write(const struct device *dev, uint32_t addr, uint8_t *data, uint32_t len) { const struct w5500_config *cfg = dev->config; int ret; uint8_t cmd[3] = { addr >> 8, addr, W5500_SPI_WRITE_CONTROL(addr), }; const struct spi_buf tx_buf[2] = { { .buf = cmd, .len = ARRAY_SIZE(cmd), }, { .buf = data, .len = len, }, }; const struct spi_buf_set tx = { .buffers = tx_buf, .count = ARRAY_SIZE(tx_buf), }; ret = spi_write_dt(&cfg->spi, &tx); return ret; } static int w5500_readbuf(const struct device *dev, uint16_t offset, uint8_t *buf, int len) { uint32_t addr; int remain = 0; int ret; const uint32_t mem_start = W5500_Sn_RX_MEM_START; const uint16_t mem_size = W5500_RX_MEM_SIZE; offset %= mem_size; addr = mem_start + offset; if (offset + len > mem_size) { remain = (offset + len) % mem_size; len = mem_size - offset; } ret = w5500_spi_read(dev, addr, buf, len); if (ret || !remain) { return ret; } return w5500_spi_read(dev, mem_start, buf + len, remain); } static int w5500_writebuf(const struct device *dev, uint16_t offset, uint8_t *buf, int len) { uint32_t addr; int ret = 0; int remain = 0; const uint32_t mem_start = W5500_Sn_TX_MEM_START; const uint32_t mem_size = W5500_TX_MEM_SIZE; offset %= mem_size; addr = mem_start + offset; if (offset + len > mem_size) { remain = (offset + len) % mem_size; len = mem_size - offset; } ret = w5500_spi_write(dev, addr, buf, len); if (ret || !remain) { return ret; } return w5500_spi_write(dev, mem_start, buf + len, remain); } static int w5500_command(const struct device *dev, uint8_t cmd) { uint8_t reg; k_timepoint_t end = sys_timepoint_calc(K_MSEC(100)); w5500_spi_write(dev, W5500_S0_CR, &cmd, 1); while (1) { w5500_spi_read(dev, W5500_S0_CR, &reg, 1); if (!reg) { break; } if (sys_timepoint_expired(end)) { return -EIO; } k_busy_wait(W5500_PHY_ACCESS_DELAY); } return 0; } static int w5500_tx(const struct device *dev, struct net_pkt *pkt) { struct w5500_runtime *ctx = dev->data; uint16_t len = net_pkt_get_len(pkt); uint16_t offset; uint8_t off[2]; int ret; w5500_spi_read(dev, W5500_S0_TX_WR, off, 2); offset = sys_get_be16(off); if (net_pkt_read(pkt, ctx->buf, len)) { return -EIO; } ret = w5500_writebuf(dev, offset, ctx->buf, len); if (ret < 0) { return ret; } sys_put_be16(offset + len, off); w5500_spi_write(dev, W5500_S0_TX_WR, off, 2); w5500_command(dev, S0_CR_SEND); if (k_sem_take(&ctx->tx_sem, K_MSEC(10))) { return -EIO; } return 0; } static void w5500_rx(const struct device *dev) { uint8_t header[2]; uint8_t tmp[2]; uint16_t off; uint16_t rx_len; uint16_t rx_buf_len; uint16_t read_len; uint16_t reader; struct net_buf *pkt_buf = NULL; struct net_pkt *pkt; struct w5500_runtime *ctx = dev->data; const struct w5500_config *config = dev->config; w5500_spi_read(dev, W5500_S0_RX_RSR, tmp, 2); rx_buf_len = sys_get_be16(tmp); if (rx_buf_len == 0) { return; } w5500_spi_read(dev, W5500_S0_RX_RD, tmp, 2); off = sys_get_be16(tmp); w5500_readbuf(dev, off, header, 2); rx_len = sys_get_be16(header) - 2; pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, rx_len, AF_UNSPEC, 0, K_MSEC(config->timeout)); if (!pkt) { eth_stats_update_errors_rx(ctx->iface); return; } pkt_buf = pkt->buffer; read_len = rx_len; reader = off + 2; do { size_t frag_len; uint8_t *data_ptr; size_t frame_len; data_ptr = pkt_buf->data; frag_len = net_buf_tailroom(pkt_buf); if (read_len > frag_len) { frame_len = frag_len; } else { frame_len = read_len; } w5500_readbuf(dev, reader, data_ptr, frame_len); net_buf_add(pkt_buf, frame_len); reader += frame_len; read_len -= frame_len; pkt_buf = pkt_buf->frags; } while (read_len > 0); if (net_recv_data(ctx->iface, pkt) < 0) { net_pkt_unref(pkt); } sys_put_be16(off + 2 + rx_len, tmp); w5500_spi_write(dev, W5500_S0_RX_RD, tmp, 2); w5500_command(dev, S0_CR_RECV); } static void w5500_update_link_status(const struct device *dev) { uint8_t phycfgr; struct w5500_runtime *ctx = dev->data; if (w5500_spi_read(dev, W5500_PHYCFGR, &phycfgr, 1) < 0) { return; } if (phycfgr & 0x01) { if (ctx->link_up != true) { LOG_INF("%s: Link up", dev->name); ctx->link_up = true; net_eth_carrier_on(ctx->iface); } } else { if (ctx->link_up != false) { LOG_INF("%s: Link down", dev->name); ctx->link_up = false; net_eth_carrier_off(ctx->iface); } } } static void w5500_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct device *dev = p1; uint8_t ir; int res; struct w5500_runtime *ctx = dev->data; const struct w5500_config *config = dev->config; while (true) { res = k_sem_take(&ctx->int_sem, K_MSEC(CONFIG_PHY_MONITOR_PERIOD)); if (res == 0) { /* semaphore taken, update link status and receive packets */ if (ctx->link_up != true) { w5500_update_link_status(dev); } while (gpio_pin_get_dt(&(config->interrupt))) { /* Read interrupt */ w5500_spi_read(dev, W5500_S0_IR, &ir, 1); if (ir) { /* Clear interrupt */ w5500_spi_write(dev, W5500_S0_IR, &ir, 1); LOG_DBG("IR received"); if (ir & S0_IR_SENDOK) { k_sem_give(&ctx->tx_sem); LOG_DBG("TX Done"); } if (ir & S0_IR_RECV) { w5500_rx(dev); LOG_DBG("RX Done"); } } } } else if (res == -EAGAIN) { /* semaphore timeout period expired, check link status */ w5500_update_link_status(dev); } } } static void w5500_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct w5500_runtime *ctx = dev->data; net_if_set_link_addr(iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET); if (!ctx->iface) { ctx->iface = iface; } ethernet_init(iface); /* Do not start the interface until PHY link is up */ net_if_carrier_off(iface); } static enum ethernet_hw_caps w5500_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T #if defined(CONFIG_NET_PROMISCUOUS_MODE) | ETHERNET_PROMISC_MODE #endif ; } static int w5500_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct w5500_runtime *ctx = dev->data; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(ctx->mac_addr, config->mac_address.addr, sizeof(ctx->mac_addr)); w5500_spi_write(dev, W5500_SHAR, ctx->mac_addr, sizeof(ctx->mac_addr)); LOG_INF("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name, ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2], ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]); /* Register Ethernet MAC Address with the upper layer */ net_if_set_link_addr(ctx->iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET); return 0; case ETHERNET_CONFIG_TYPE_PROMISC_MODE: if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE)) { uint8_t mode; uint8_t mr = W5500_S0_MR_MF; w5500_spi_read(dev, W5500_S0_MR, &mode, 1); if (config->promisc_mode) { if (!(mode & BIT(mr))) { return -EALREADY; } /* disable MAC filtering */ WRITE_BIT(mode, mr, 0); } else { if (mode & BIT(mr)) { return -EALREADY; } /* enable MAC filtering */ WRITE_BIT(mode, mr, 1); } return w5500_spi_write(dev, W5500_S0_MR, &mode, 1); } return -ENOTSUP; default: return -ENOTSUP; } } static int w5500_hw_start(const struct device *dev) { uint8_t mode = S0_MR_MACRAW | BIT(W5500_S0_MR_MF); uint8_t mask = IR_S0; /* configure Socket 0 with MACRAW mode and MAC filtering enabled */ w5500_spi_write(dev, W5500_S0_MR, &mode, 1); w5500_command(dev, S0_CR_OPEN); /* enable interrupt */ w5500_spi_write(dev, W5500_SIMR, &mask, 1); return 0; } static int w5500_hw_stop(const struct device *dev) { uint8_t mask = 0; /* disable interrupt */ w5500_spi_write(dev, W5500_SIMR, &mask, 1); w5500_command(dev, S0_CR_CLOSE); return 0; } static struct ethernet_api w5500_api_funcs = { .iface_api.init = w5500_iface_init, .get_capabilities = w5500_get_capabilities, .set_config = w5500_set_config, .start = w5500_hw_start, .stop = w5500_hw_stop, .send = w5500_tx, }; static int w5500_soft_reset(const struct device *dev) { int ret; uint8_t mask = 0; uint8_t tmp = MR_RST; ret = w5500_spi_write(dev, W5500_MR, &tmp, 1); if (ret < 0) { return ret; } k_msleep(5); tmp = MR_PB; w5500_spi_write(dev, W5500_MR, &tmp, 1); /* disable interrupt */ return w5500_spi_write(dev, W5500_SIMR, &mask, 1); } static void w5500_gpio_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct w5500_runtime *ctx = CONTAINER_OF(cb, struct w5500_runtime, gpio_cb); k_sem_give(&ctx->int_sem); } static void w5500_set_macaddr(const struct device *dev) { struct w5500_runtime *ctx = dev->data; #if DT_INST_PROP(0, zephyr_random_mac_address) gen_random_mac(ctx->mac_addr, WIZNET_OUI_B0, WIZNET_OUI_B1, WIZNET_OUI_B2); #endif w5500_spi_write(dev, W5500_SHAR, ctx->mac_addr, sizeof(ctx->mac_addr)); } static void w5500_memory_configure(const struct device *dev) { int i; uint8_t mem = 0x10; /* Configure RX & TX memory to 16K */ w5500_spi_write(dev, W5500_Sn_RXMEM_SIZE(0), &mem, 1); w5500_spi_write(dev, W5500_Sn_TXMEM_SIZE(0), &mem, 1); mem = 0; for (i = 1; i < 8; i++) { w5500_spi_write(dev, W5500_Sn_RXMEM_SIZE(i), &mem, 1); w5500_spi_write(dev, W5500_Sn_TXMEM_SIZE(i), &mem, 1); } } static int w5500_init(const struct device *dev) { int err; uint8_t rtr[2]; const struct w5500_config *config = dev->config; struct w5500_runtime *ctx = dev->data; ctx->link_up = false; if (!spi_is_ready_dt(&config->spi)) { LOG_ERR("SPI master port %s not ready", config->spi.bus->name); return -EINVAL; } if (!gpio_is_ready_dt(&config->interrupt)) { LOG_ERR("GPIO port %s not ready", config->interrupt.port->name); return -EINVAL; } if (gpio_pin_configure_dt(&config->interrupt, GPIO_INPUT)) { LOG_ERR("Unable to configure GPIO pin %u", config->interrupt.pin); return -EINVAL; } gpio_init_callback(&(ctx->gpio_cb), w5500_gpio_callback, BIT(config->interrupt.pin)); if (gpio_add_callback(config->interrupt.port, &(ctx->gpio_cb))) { return -EINVAL; } gpio_pin_interrupt_configure_dt(&config->interrupt, GPIO_INT_EDGE_FALLING); if (config->reset.port) { if (!gpio_is_ready_dt(&config->reset)) { LOG_ERR("GPIO port %s not ready", config->reset.port->name); return -EINVAL; } if (gpio_pin_configure_dt(&config->reset, GPIO_OUTPUT)) { LOG_ERR("Unable to configure GPIO pin %u", config->reset.pin); return -EINVAL; } gpio_pin_set_dt(&config->reset, 0); k_usleep(500); } err = w5500_soft_reset(dev); if (err) { LOG_ERR("Reset failed"); return err; } w5500_set_macaddr(dev); w5500_memory_configure(dev); /* check retry time value */ w5500_spi_read(dev, W5500_RTR, rtr, 2); if (sys_get_be16(rtr) != RTR_DEFAULT) { LOG_ERR("Unable to read RTR register"); return -ENODEV; } k_thread_create(&ctx->thread, ctx->thread_stack, CONFIG_ETH_W5500_RX_THREAD_STACK_SIZE, w5500_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_W5500_RX_THREAD_PRIO), 0, K_NO_WAIT); k_thread_name_set(&ctx->thread, "eth_w5500"); LOG_INF("W5500 Initialized"); return 0; } static struct w5500_runtime w5500_0_runtime = { #if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0)) .mac_addr = DT_INST_PROP(0, local_mac_address), #endif .tx_sem = Z_SEM_INITIALIZER(w5500_0_runtime.tx_sem, 1, UINT_MAX), .int_sem = Z_SEM_INITIALIZER(w5500_0_runtime.int_sem, 0, UINT_MAX), }; static const struct w5500_config w5500_0_config = { .spi = SPI_DT_SPEC_INST_GET(0, SPI_WORD_SET(8), 0), .interrupt = GPIO_DT_SPEC_INST_GET(0, int_gpios), .reset = GPIO_DT_SPEC_INST_GET_OR(0, reset_gpios, { 0 }), .timeout = CONFIG_ETH_W5500_TIMEOUT, }; ETH_NET_DEVICE_DT_INST_DEFINE(0, w5500_init, NULL, &w5500_0_runtime, &w5500_0_config, CONFIG_ETH_INIT_PRIORITY, &w5500_api_funcs, NET_ETH_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_w5500.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,425
```objective-c /* * Driver for Synopsys DesignWare MAC * * * * Definitions in this file are based on: * * DesignWare Cores Ethernet Quality-of-Service Databook * Version 5.10a, December 2017 */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_DWMAC_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_DWMAC_PRIV_H_ /* * Global driver parameters */ /* number of hardware descriptors in uncached memory */ #define NB_TX_DESCS CONFIG_DWMAC_NB_TX_DESCS #define NB_RX_DESCS CONFIG_DWMAC_NB_RX_DESCS /* stack size for RX refill thread */ #define RX_REFILL_STACK_SIZE 1024 /* * Common structure definitions */ /* hardware descriptor representation */ struct dwmac_dma_desc { uint32_t des0; uint32_t des1; uint32_t des2; uint32_t des3; }; /* our private instance structure */ struct dwmac_priv { mem_addr_t base_addr; struct net_if *iface; const struct device *clock; uint8_t mac_addr[6]; uint32_t feature0; uint32_t feature1; uint32_t feature2; uint32_t feature3; struct dwmac_dma_desc *tx_descs, *rx_descs; struct k_sem free_tx_descs, free_rx_descs; unsigned int tx_desc_head, tx_desc_tail; unsigned int rx_desc_head, rx_desc_tail; #ifdef CONFIG_MMU uintptr_t tx_descs_phys, rx_descs_phys; #endif struct net_buf *tx_frags[NB_TX_DESCS]; /* index shared with tx_descs */ struct net_buf *rx_frags[NB_RX_DESCS]; /* index shared with rx_descs */ struct net_pkt *rx_pkt; unsigned int rx_bytes; K_KERNEL_STACK_MEMBER(rx_refill_thread_stack, RX_REFILL_STACK_SIZE); struct k_thread rx_refill_thread; }; /* * Handy register accessors */ #define REG_READ(r) sys_read32(p->base_addr + (r)) #define REG_WRITE(r, v) sys_write32((v), p->base_addr + (r)) /* * Shared declarations between core and platform glue code */ int dwmac_probe(const struct device *dev); int dwmac_bus_init(struct dwmac_priv *p); void dwmac_platform_init(struct dwmac_priv *p); void dwmac_isr(const struct device *ddev); extern const struct ethernet_api dwmac_api; /* * MAC Register Definitions */ /* 17.1.1 */ #define MAC_CONF 0x0000 #define MAC_CONF_ARPEN BIT(31) #define MAC_CONF_SARC GENMASK(30, 28) #define MAC_CONF_IPC BIT(27) #define MAC_CONF_IPG GENMASK(26, 24) #define MAC_CONF_GPSLCE BIT(23) #define MAC_CONF_S2KP BIT(22) #define MAC_CONF_CST BIT(21) #define MAC_CONF_ACS BIT(20) #define MAC_CONF_WD BIT(19) #define MAC_CONF_BE BIT(18) #define MAC_CONF_JD BIT(17) #define MAC_CONF_JE BIT(16) #define MAC_CONF_PS BIT(15) #define MAC_CONF_FES BIT(14) #define MAC_CONF_DM BIT(13) #define MAC_CONF_LM BIT(12) #define MAC_CONF_ECRSFD BIT(11) #define MAC_CONF_DO BIT(10) #define MAC_CONF_DCRS BIT(9) #define MAC_CONF_DR BIT(8) #define MAC_CONF_BL GENMASK(6, 5) #define MAC_CONF_DC BIT(4) #define MAC_CONF_PRELEN GENMASK(3, 2) #define MAC_CONF_TE BIT(1) #define MAC_CONF_RE BIT(0) /* 17.1.2 */ #define MAC_EXT_CONF 0x0004 #define MAC_EXT_CONF_FHE BIT(31) #define MAC_EXT_CONF_EIPG GENMASK(29, 25) #define MAC_EXT_CONF_EIPGEN BIT(24) #define MAC_EXT_CONF_HDSMS GENMASK(22, 20) #define MAC_EXT_CONF_PDC BIT(19) #define MAC_EXT_CONF_USP BIT(18) #define MAC_EXT_CONF_SPEN BIT(17) #define MAC_EXT_CONF_DCRCC BIT(16) #define MAC_EXT_CONF_GPSL GENMASK(13, 0) /* 17.1.3 */ #define MAC_PKT_FILTER 0x0008 #define MAC_PKT_FILTER_RA BIT(31) #define MAC_PKT_FILTER_DNTU BIT(21) #define MAC_PKT_FILTER_IPFE BIT(20) #define MAC_PKT_FILTER_VTFE BIT(16) #define MAC_PKT_FILTER_HPF it(10) #define MAC_PKT_FILTER_SAF BIT(9) #define MAC_PKT_FILTER_SAIF BIT(8) #define MAC_PKT_FILTER_PCF GENMASK(7, 6) #define MAC_PKT_FILTER_DBF BIT(5) #define MAC_PKT_FILTER_PM BIT(4) #define MAC_PKT_FILTER_DAIF BIT(3) #define MAC_PKT_FILTER_HMC BIT(2) #define MAC_PKT_FILTER_HUC BIT(1) #define MAC_PKT_FILTER_PR BIT(0) /* 17.1.4 */ #define MAC_WDOG_TIMEOUT 0x000c #define MAC_WDOG_TIMEOUT_PWE BIT(8) #define MAC_WDOG_TIMEOUT_WTO GENMASK(3, 0) /* 17.1.5 ... 17.1.12 */ #define MAC_HASH_TABLE(n) (0x0010 + 4 * (n)) /* 17.1.13 */ #define MAC_VLAN_TAG 0x0050 /* 17.1.14 */ #define MAC_VLAN_TAG_CTRL 0x0050 #define MAC_VLAN_TAG_CTRL_EIVLRXS BIT(31) #define MAC_VLAN_TAG_CTRL_EIVLS GENMASK(29, 28) #define MAC_VLAN_TAG_CTRL_ERIVLT BIT(27) #define MAC_VLAN_TAG_CTRL_EDVLP BIT(26) #define MAC_VLAN_TAG_CTRL_VTHM BIT(25) #define MAC_VLAN_TAG_CTRL_EVLRXS BIT(24) #define MAC_VLAN_TAG_CTRL_EVLS GENMASK(22, 21) #define MAC_VLAN_TAG_CTRL_DOVLTC BIT(20) #define MAC_VLAN_TAG_CTRL_ERSVLM BIT(19) #define MAC_VLAN_TAG_CTRL_ESVL BIT(18) #define MAC_VLAN_TAG_CTRL_VTIM BIT(17) #define MAC_VLAN_TAG_CTRL_ETV BIT(16) #define MAC_VLAN_TAG_CTRL_VL GENMASK(15, 0) #define MAC_VLAN_TAG_CTRL_OFS GENMASK(6, 2) #define MAC_VLAN_TAG_CTRL_CT BIT(1) #define MAC_VLAN_TAG_CTRL_OB BIT(0) /* 17.1.15 */ #define MAC_VLAN_TAG_DATA 0x0054 /* 17.1.17 */ #define MAC_VLAN_HASH_TBL 0x0058 /* 17.1.19 */ #define MAC_VLAN_INCL 0x0060 /* 17.1.20 */ #define MAC_INNER_VLAN_INCL 0x0064 /* 17.1.21 */ #define MAC_Qn_TX_FLOW_CTRL(n) (0x0070 + 4 * (n)) #define MAC_Qn_TX_FLOW_CTRL_PT GENMASK(31, 16) #define MAC_Qn_TX_FLOW_CTRL_DZPQ BIT(7) #define MAC_Qn_TX_FLOW_CTRL_PLT GENMASK(6, 4) #define MAC_Qn_TX_FLOW_CTRL_TFE BIT(1) #define MAC_Qn_TX_FLOW_CTRL_FCB_BPA BIT(0) /* 17.1.23 */ #define MAC_RX_FLOW_CTRL 0x0090 #define MAC_RX_FLOW_CTRL_PFCE BIT(8) #define MAC_RX_FLOW_CTRL_UP BIT(1) #define MAC_RX_FLOW_CTRL_RFE BIT(0) /* 17.1.24 */ #define MAC_RXQ_CTRL4 0x0094 /* 17.1.5 */ #define MAC_TXQ_PRTY_MAP0 0x0098 /* 17.1.26 */ #define MAC_TXQ_PRTY_MAP1 0x009c /* 17.1.27 */ #define MAC_RXQ_CTRL0 0x00a0 /* 17.1.28 */ #define MAC_RXQ_CTRL1 0x00a4 /* 17.1.29 */ #define MAC_RXQ_CTRL2 0x00a8 /* 17.1.30 */ #define MAC_RXQ_CTRL3 0x00ac /* 17.1.31 */ #define MAC_IRQ_STATUS 0x00b0 #define MAC_IRQ_STATUS_MFRIS BIT(20) #define MAC_IRQ_STATUS_MFTIS BIT(19) #define MAC_IRQ_STATUS_MDIOIS BIT(18) #define MAC_IRQ_STATUS_FPEIS BIT(17) #define MAC_IRQ_STATUS_GPIIS BIT(15) #define MAC_IRQ_STATUS_RXSTSIS BIT(14) #define MAC_IRQ_STATUS_TXSTSIS BIT(13) #define MAC_IRQ_STATUS_TSIS BIT(12) #define MAC_IRQ_STATUS_MMCRXIPIS BIT(11) #define MAC_IRQ_STATUS_MMCTXIS BIT(10) #define MAC_IRQ_STATUS_MMCRXIS BIT(9) #define MAC_IRQ_STATUS_MMCIS BIT(8) #define MAC_IRQ_STATUS_LPIIS BIT(5) #define MAC_IRQ_STATUS_PMTIS BIT(4) #define MAC_IRQ_STATUS_PHYIS BIT(3) #define MAC_IRQ_STATUS_PCSANCIS BIT(2) #define MAC_IRQ_STATUS_PCSLCHGIS BIT(1) #define MAC_IRQ_STATUS_RGSMIIIS BIT(0) /* 17.1.32 */ #define MAC_IRQ_ENABLE 0x00b4 #define MAC_IRQ_ENABLE_MDIOIE BIT(18) #define MAC_IRQ_ENABLE_FPEIE BIT(17) #define MAC_IRQ_ENABLE_RXSTSIE BIT(14) #define MAC_IRQ_ENABLE_TXSTSIE BIT(13) #define MAC_IRQ_ENABLE_TSIE BIT(12) #define MAC_IRQ_ENABLE_LPIIE BIT(5) #define MAC_IRQ_ENABLE_PMTIE BIT(4) #define MAC_IRQ_ENABLE_PHYIE BIT(3) #define MAC_IRQ_ENABLE_PCSANCIE BIT(2) #define MAC_IRQ_ENABLE_PCSLCHGIE BIT(1) #define MAC_IRQ_ENABLE_RGSMIIIE BIT(0) /* 17.1.33 */ #define MAC_RX_TX_STATUS 0x00b8 #define MAC_RX_TX_STATUS_WT BIT(8) #define MAC_RX_TX_STATUS_EXCOL BIT(5) #define MAC_RX_TX_STATUS_LCOL BIT(4) #define MAC_RX_TX_STATUS_EXDEF BIT(3) #define MAC_RX_TX_STATUS_LCARR BIT(2) #define MAC_RX_TX_STATUS_NCARR BIT(1) #define MAC_RX_TX_STATUS_TJT BIT(0) /* 17.1.34 */ #define MAC_PMT_CTRL_STATUS 0x00c0 #define MAC_PMT_CTRL_STATUS_RWKFILTRST BIT(31) #define MAC_PMT_CTRL_STATUS_RWKPTR GENMASK(28, 24) #define MAC_PMT_CTRL_STATUS_RWKPFE BIT(10) #define MAC_PMT_CTRL_STATUS_GLBLUCAST BIT(9) #define MAC_PMT_CTRL_STATUS_RWKPRCVD BIT(6) #define MAC_PMT_CTRL_STATUS_MGKPRCVD BIT(5) #define MAC_PMT_CTRL_STATUS_RWKPKTEN BIT(2) #define MAC_PMT_CTRL_STATUS_MGKPKTEN BIT(1) #define MAC_PMT_CTRL_STATUS_PWRDWN BIT(0) /* 17.1.35 */ #define MAC_RWK_PKT_FILTER 0x00c4 /* 17.1.40 */ #define MAC_LPI_CTRL_STATUS 0x00d0 #define MAC_LPI_CTRL_STATUS_LPITCSE BIT(21) #define MAC_LPI_CTRL_STATUS_LPIATE BIT(20) #define MAC_LPI_CTRL_STATUS_LPITXA BIT(19) #define MAC_LPI_CTRL_STATUS_PLSEN BIT(18) #define MAC_LPI_CTRL_STATUS_PLS BIT(17) #define MAC_LPI_CTRL_STATUS_LPIEN BIT(16) #define MAC_LPI_CTRL_STATUS_RLPIST BIT(9) #define MAC_LPI_CTRL_STATUS_TLPIST BIT(8) #define MAC_LPI_CTRL_STATUS_RLPIEX BIT(3) #define MAC_LPI_CTRL_STATUS_RLPIEN BIT(2) #define MAC_LPI_CTRL_STATUS_TLPIEX BIT(1) #define MAC_LPI_CTRL_STATUS_TLPIEN BIT(0) /* 17.1.41 */ #define MAC_LPI_TIMERS_CTRL 0x00d4 /* 17.1.42 */ #define MAC_LPI_ENTRY_TIMER 0x00d8 /* 17.1.43 */ #define MAC_1US_TIC_COUNTERR 0x00dc /* 17.1.44 */ #define MAC_AN_CTRL 0x00e0 #define MAC_AN_CTRL_SGMRAL BIT(18) #define MAC_AN_CTRL_LR BIT(17) #define MAC_AN_CTRL_ECD BIT(16) #define MAC_AN_CTRL_ELE BIT(14) #define MAC_AN_CTRL_ANE BIT(12) #define MAC_AN_CTRL_RAN BIT(9) /* 17.1.45 */ #define MAC_AN_STATUS 0x00e4 #define MAC_AN_STATUS_ES BIT(8) #define MAC_AN_STATUS_ANC BIT(5) #define MAC_AN_STATUS_ANA BIT(3) #define MAC_AN_STATUS_LS BIT(2) /* 17.1.46 */ #define MAC_AN_ADVERT 0x00e8 #define MAC_AN_ADVERT_NP BIT(15) #define MAC_AN_ADVERT_RFE GENMASK(13, 12) #define MAC_AN_ADVERT_PSE GENMASK(8, 7) #define MAC_AN_ADVERT_HD BIT(6) #define MAC_AN_ADVERT_FD BIT(5) /* 17.1.47 */ #define MAC_AN_LINK_PRTNR 0x00ec #define MAC_AN_LINK_PRTNR_NP BIT(15) #define MAC_AN_LINK_PRTNR_ACK BIT(14) #define MAC_AN_LINK_PRTNR_RFE GENMASK(13, 12) #define MAC_AN_LINK_PRTNR_PSE GENMASK(8, 7) #define MAC_AN_LINK_PRTNR_HD BIT(6) #define MAC_AN_LINK_PRTNR_FD BIT(5) /* 17.1.48 */ #define MAC_AN_EXPANSION 0x00f0 #define MAC_AN_EXPANSION_NPA BIT(2) #define MAC_AN_EXPANSION_NPR BIT(1) /* 17.1.49 */ #define MAC_TBI_EXT_STATUS 0x00f4 #define MAC_TBI_EXT_STATUS_GFD BIT(15) #define MAC_TBI_EXT_STATUS_GHD BIT(14) /* 17.1.50 */ #define MAC_PHYIF_CTRL_STATUS 0x00f8 #define MAC_PHYIF_CTRL_STATUS_FALSCARDET BIT(21) #define MAC_PHYIF_CTRL_STATUS_JABTO BIT(20) #define MAC_PHYIF_CTRL_STATUS_LNKSTS BIT(19) #define MAC_PHYIF_CTRL_STATUS_LNKSPEED GENMASK(18, 17) #define MAC_PHYIF_CTRL_STATUS_LNKMOD BIT(16) #define MAC_PHYIF_CTRL_STATUS_SMIDRXS BIT(4) #define MAC_PHYIF_CTRL_STATUS_SFTERR BIT(2) #define MAC_PHYIF_CTRL_STATUS_LUD BIT(1) #define MAC_PHYIF_CTRL_STATUS_TC BIT(0) /* 17.1.51 */ #define MAC_VERSION 0x0110 #define MAC_VERSION_USERVER GENMASK(15, 8) #define MAC_VERSION_SNPSVER GENMASK(7, 0) /* 17.1.52 */ #define MAC_DEBUG 0x0114 /* 17.1.53 */ #define MAC_HW_FEATURE0 0x011c #define MAC_HW_FEATURE0_ACTPHYSEL GENMASK(30, 28) #define MAC_HW_FEATURE0_SAVLANINS BIT(27) #define MAC_HW_FEATURE0_TSSTSSEL GENMASK(26, 25) #define MAC_HW_FEATURE0_MACADR64SEL BIT(24) #define MAC_HW_FEATURE0_MACADR32SEL BIT(23) #define MAC_HW_FEATURE0_ADDMACADRSEL GENMASK(22, 18) #define MAC_HW_FEATURE0_RXCOESEL BIT(16) #define MAC_HW_FEATURE0_TXCOESEL BIT(14) #define MAC_HW_FEATURE0_EEESEL BIT(13) #define MAC_HW_FEATURE0_TSSEL BIT(12) #define MAC_HW_FEATURE0_ARPOFFSEL BIT(9) #define MAC_HW_FEATURE0_MMCSEL BIT(8) #define MAC_HW_FEATURE0_MGKSEL BIT(7) #define MAC_HW_FEATURE0_RWKSEL BIT(6) #define MAC_HW_FEATURE0_SMASEL BIT(5) #define MAC_HW_FEATURE0_VLHASH BIT(4) #define MAC_HW_FEATURE0_PCSSEL BIT(3) #define MAC_HW_FEATURE0_HDSEL BIT(2) #define MAC_HW_FEATURE0_GMIISEL BIT(1) #define MAC_HW_FEATURE0_MIISEL BIT(0) /* 17.1.54 */ #define MAC_HW_FEATURE1 0x0120 #define MAC_HW_FEATURE1_L3L4FNUM GENMASK(30, 27) #define MAC_HW_FEATURE1_HASHTBLSZ GENMASK(25, 24) #define MAC_HW_FEATURE1_POUOST BIT(23) #define MAC_HW_FEATURE1_RAVSEL BIT(21) #define MAC_HW_FEATURE1_AVSEL BIT(20) #define MAC_HW_FEATURE1_DBGMEMA BIT(19) #define MAC_HW_FEATURE1_TSOEN BIT(18) #define MAC_HW_FEATURE1_SPHEN BIT(17) #define MAC_HW_FEATURE1_DCBEN BIT(16) #define MAC_HW_FEATURE1_ADDR64 GENMASK(15, 14) #define MAC_HW_FEATURE1_ADVTHWORD BIT(13) #define MAC_HW_FEATURE1_PTOEN BIT(12) #define MAC_HW_FEATURE1_OSTEN BIT(11) #define MAC_HW_FEATURE1_TXFIFOSIZE GENMASK(10, 6) #define MAC_HW_FEATURE1_SPRAM BIT(5) #define MAC_HW_FEATURE1_RXFIFOSIZE GENMASK(4, 0) /* 17.1.55 */ #define MAC_HW_FEATURE2 0x0124 #define MAC_HW_FEATURE2_AUXSNAPNUM GENMASK(30, 28) #define MAC_HW_FEATURE2_PPSOUTNUM GENMASK(28, 24) #define MAC_HW_FEATURE2_TXCHCNT GENMASK(21, 18) #define MAC_HW_FEATURE2_RXCHCNT GENMASK(15, 12) #define MAC_HW_FEATURE2_TXQCNT GENMASK(9, 6) #define MAC_HW_FEATURE2_RXQCNT GENMASK(3, 0) /* 17.1.56 */ #define MAC_HW_FEATURE3 0x0128 #define MAC_HW_FEATURE3_ASP GENMASK(29, 28) #define MAC_HW_FEATURE3_TBSSEL BIT(27) #define MAC_HW_FEATURE3_FPESEL BIT(26) #define MAC_HW_FEATURE3_ESTWID GENMASK(21, 20) #define MAC_HW_FEATURE3_ESTDEP GENMASK(19, 17) #define MAC_HW_FEATURE3_ESTSEL BIT(16) #define MAC_HW_FEATURE3_FRPES GENMASK(14, 13) #define MAC_HW_FEATURE3_FRPBS GENMASK(12, 11) #define MAC_HW_FEATURE3_FRPSEL BIT(10) #define MAC_HW_FEATURE3_PDUPSEL BIT(9) #define MAC_HW_FEATURE3_DVLAN BIT(5) #define MAC_HW_FEATURE3_CBTISEL BIT(4) #define MAC_HW_FEATURE3_NRVF GENMASK(2, 0) /* 17.1.57 */ #define MAC_DPP_FSM_IRQ_STATUS 0x0140 #define MAC_DPP_FSM_IRQ_STATUS_FSMPES BIT(24) #define MAC_DPP_FSM_IRQ_STATUS_SLVTES BIT(17) #define MAC_DPP_FSM_IRQ_STATUS_MSTTES BIT(16) #define MAC_DPP_FSM_IRQ_STATUS_RVCTES BIT(15) #define MAC_DPP_FSM_IRQ_STATUS_R125ES BIT(14) #define MAC_DPP_FSM_IRQ_STATUS_T125ES BIT(13) #define MAC_DPP_FSM_IRQ_STATUS_PTES BIT(12) #define MAC_DPP_FSM_IRQ_STATUS_ATES BIT(11) #define MAC_DPP_FSM_IRQ_STATUS_CTES BIT(10) #define MAC_DPP_FSM_IRQ_STATUS_RTES BIT(9) #define MAC_DPP_FSM_IRQ_STATUS_TTES BIT(8) #define MAC_DPP_FSM_IRQ_STATUS_ASRPES BIT(7) #define MAC_DPP_FSM_IRQ_STATUS_CWPES BIT(6) #define MAC_DPP_FSM_IRQ_STATUS_ARPES BIT(5) #define MAC_DPP_FSM_IRQ_STATUS_MTSPES BIT(4) #define MAC_DPP_FSM_IRQ_STATUS_MPES BIT(3) #define MAC_DPP_FSM_IRQ_STATUS_RDPES BIT(2) #define MAC_DPP_FSM_IRQ_STATUS_TPES BIT(1) #define MAC_DPP_FSM_IRQ_STATUS_ATPES BIT(0) /* 17.1.58 */ #define MAC_AXI_SLV_DPE_ADDR_STATUS 0x0144 #define MAC_AXI_SLV_DPE_ADDR_STATUS_ASPEAS GENMASK(13, 0) /* 17.1.59 */ #define MAC_FSM_CTRL 0x0148 #define MAC_FSM_CTRL_RVCLGRNML BIT(31) #define MAC_FSM_CTRL_R125LGRNML BIT(30) #define MAC_FSM_CTRL_T125LGRNML BIT(29) #define MAC_FSM_CTRL_PLGRNML BIT(28) #define MAC_FSM_CTRL_ALGRNML BIT(27) #define MAC_FSM_CTRL_CLGRNML BIT(26) #define MAC_FSM_CTRL_RLGRNML BIT(25) #define MAC_FSM_CTRL_TLGRNML BIT(24) #define MAC_FSM_CTRL_RVCPEIN BIT(23) #define MAC_FSM_CTRL_R125PEIN BIT(22) #define MAC_FSM_CTRL_T125PEIN BIT(21) #define MAC_FSM_CTRL_PPEIN BIT(20) #define MAC_FSM_CTRL_APEIN BIT(19) #define MAC_FSM_CTRL_CPEIN BIT(18) #define MAC_FSM_CTRL_RPEIN BIT(17) #define MAC_FSM_CTRL_TPEIN BIT(16) #define MAC_FSM_CTRL_RVCTEIN BIT(15) #define MAC_FSM_CTRL_R125TEIN BIT(14) #define MAC_FSM_CTRL_T125TEIN BIT(13) #define MAC_FSM_CTRL_PTEIN BIT(12) #define MAC_FSM_CTRL_ATEIN BIT(11) #define MAC_FSM_CTRL_CTEIN BIT(10) #define MAC_FSM_CTRL_RTEIN BIT(9) #define MAC_FSM_CTRL_TTEIN BIT(8) #define MAC_FSM_CTRL_PRTYEN BIT(1) #define MAC_FSM_CTRL_TMOUTEN BIT(0) /* 17.1.60 */ #define MAC_FSM_ACT_TIMER 0x014c #define MAC_FSM_ACT_TIMER_LTMRMD GENMASK(23, 20) #define MAC_FSM_ACT_TIMER_NTMRMD GENMASK(19, 16) #define MAC_FSM_ACT_TIMER_TMR GENMASK(9, 0) /* 17.1.62 */ #define MAC_MDIO_ADDRESS 0x0200 #define MAC_MDIO_ADDRESS_PSE BIT(27) #define MAC_MDIO_ADDRESS_BTB BIT(26) #define MAC_MDIO_ADDRESS_PA GENMASK(25, 21) #define MAC_MDIO_ADDRESS_RDA GENMASK(20, 16) #define MAC_MDIO_ADDRESS_NTC GENMASK(14, 12) #define MAC_MDIO_ADDRESS_CR BIT(11, 8) #define MAC_MDIO_ADDRESS_SKAP BIT(4) #define MAC_MDIO_ADDRESS_GOC_1 BIT(3) #define MAC_MDIO_ADDRESS_GOC_0 BIT(2) #define MAC_MDIO_ADDRESS_GOC_C45E BIT(1) #define MAC_MDIO_ADDRESS_GOC_GB BIT(0) /* 17.1.63 */ #define MAC_MDIO_DATA 0x0204 #define MAC_MDIO_DATA_RA GENMASK(31, 16) #define MAC_MDIO_DATA_GD GENMASK(15, 0) /* 17.1.64 */ #define MAC_GPIO_CTRL 0x0208 /* 17.1.65 */ #define MAC_GPIO_STATUS 0x020c /* 17.1.66 */ #define MAC_ARP_ADDRESS 0x0210 /* 17.1.67 */ #define MAC_CSR_SW_CTRL 0x0230 /* 17.1.68 */ #define MAC_FPE_CTRL_STS 0x0234 /* 17.1.69 */ #define MAC_EXT_CFG1 0x0238 #define MAC_EXT_CFG1_SPLM GENMASK(9, 8) #define MAC_EXT_CFG1_SPLOFST GENMASK(6, 0) /* 17.1.70 */ #define MAC_PRESN_TIME_NS 0x0240 /* 17.1.71 */ #define MAC_PRESN_TIME_UPDT 0x0244 /* 17.1.72, 17.1.74 */ #define MAC_ADDRESS_HIGH(n) (0x0300 + 8 * (n)) #define MAC_ADDRESS_HIGH_AE BIT(31) /* 17.1.73, 17.1.75 */ #define MAC_ADDRESS_LOW(n) (0x0304 + 8 * (n)) /* * MTL Register Definitions */ /* 17.2.1 */ #define MTL_OPERATION_MODE 0x0c00 /* 17.2.2 */ #define MTL_DBG_CTL 0x0c08 /* 17.2.3 */ #define MTL_DBG_STS 0x0c0c /* 17.2.4 */ #define MTL_FIFO_DEBUG_DATA 0x0c10 /* 17.2.5 */ #define MTL_IRQ_STATUS 0x0c20 #define MTL_IRQ_STATUS_MTLPIS BIT(23) #define MTL_IRQ_STATUS_ESTIS BIT(18) #define MTL_IRQ_STATUS_DBGIS BIT(17) #define MTL_IRQ_STATUS_MACIS BIT(16) #define MTL_IRQ_STATUS_Q7IS BIT(7) #define MTL_IRQ_STATUS_Q6IS BIT(6) #define MTL_IRQ_STATUS_Q5IS BIT(5) #define MTL_IRQ_STATUS_Q4IS BIT(4) #define MTL_IRQ_STATUS_Q3IS BIT(3) #define MTL_IRQ_STATUS_Q2IS BIT(2) #define MTL_IRQ_STATUS_Q1IS BIT(1) #define MTL_IRQ_STATUS_Q0IS BIT(0) /* 17.2.6 */ #define MTL_RXQ_DMA_MAP0 0x0c30 /* 17.2.7 */ #define MTL_RXQ_DMA_MAP1 0x0c34 /* 17.2.8 */ #define MTL_TBS_CTRL 0x0c40 /* 17.2.9 */ #define MTL_EST_CTRL 0x0c50 /* 17.2.10 */ #define MTL_EST_STATUS 0x0c58 /* 17.2.11 */ #define MTL_EST_SCH_ERROR 0x0c60 /* 17.2.12 */ #define MTL_EST_FRM_SIZE_ERROR 0x0c64 /* 17.2.13 */ #define MTL_EST_FRM_SIZE_CAPTURE 0x0c68 /* 17.2.14 */ #define MTL_EST_IRQ_ENABLE 0x0c70 /* 17.2.15 */ #define MTL_EST_GCL_CONTROL 0x0c80 /* 17.2.16 */ #define MTL_EST_GCL_DATA 0x0c84 /* 17.2.17 */ #define MTL_FPE_CTRL_STS 0x0c90 /* 17.2.18 */ #define MTL_FPE_ADVANCE 0x0c94 /* 17.2.19 */ #define MTL_RXP_CTRL_STATUS 0x0ca0 /* 17.2.20 */ #define MTL_RXP_IRQ_CTRL_STATUS 0x0ca4 /* 17.2.21 */ #define MTL_RXP_DROP_CNT 0x0ca8 /* 17.2.22 */ #define MTL_RXP_ERROR_CNT 0x0cac /* 17.2.23 */ #define MTL_RXP_INDIRECT_ACC_CTRL_STATUS 0x0cb0 /* 17.2.24 */ #define MTL_RXP_INDIRECT_ACC_DATA 0x0cb4 /* 17.2.25 */ #define MTL_ECC_CTRL 0x0cc0 /* 17.2.26 */ #define MTL_SAFETY_IRQ_STATUS 0x0cc4 /* 17.2.27 */ #define MTL_ECC_IRQ_ENABLE 0x0cc8 /* 17.2.28 */ #define MTL_ECC_IRQ_STATUS 0x0ccc /* 17.2.29 */ #define MTL_ECC_ERR_STS_RCTL 0x0cd0 /* 17.2.30 */ #define MTL_ECC_ERR_ADDR_STATUS 0x0cd4 /* 17.2.31 */ #define MTL_ECC_ERR_CNTR_STATUS 0x0cd8 /* 17.2.32 */ #define MTL_DPP_CTRL 0x0ce0 /* 17.3.1, 17.4.1 */ #define MTL_TXQn_OPERATION_MODE(n) (0x0d00 + 0x40 * (n)) /* 17.3.2, 17.4.2 */ #define MTL_TXQn_UNDERFLOW(n) (0x0d04 + 0x40 * (n)) /* 17.3.3, 17.4.3 */ #define MTL_TXQn_DEBUG(n) (0x0d08 + 0x40 * (n)) /* 17.4.4 */ #define MTL_TXQn_ETS_CTRL(n) (0x0d10 + 0x40 * (n)) /* 17.3.4, 17.4.5 */ #define MTL_TXQn_ETS_STATUS(n) (0x0d14 + 0x40 * (n)) /* 17.3.5, 17.4.6 */ #define MTL_TXQn_QUANTUM_WEIGHT(n) (0x0d18 + 0x40 * (n)) /* 17.4.7 */ #define MTL_TXQn_SENDSLOPECREDIT(n) (0x0d1c + 0x40 * (n)) /* 17.4.8 */ #define MTL_TXQn_HICREDIT(n) (0x0d20 + 0x40 * (n)) /* 17.4.9 */ #define MTL_TXQn_LOCREDIT(n) (0x0d24 + 0x40 * (n)) /* 17.3.6, 17.4.10 */ #define MTL_Qn_IRQ_CTRL_STATUS(n) (0x0d2c + 0x40 * (n)) /* 17.3.7, 17.4.11 */ #define MTL_RXQn_OPERATION_MODE(n) (0x0d30 + 0x40 * (n)) /* 17.3.8, 17.4.12 */ #define MTL_RXQn_MISSED_PKT_OVFL_CNT(n) (0x0d34 + 0x40 * (n)) /* 17.3.9, 17.4.13 */ #define MTL_RXQn_DEBUG(n) (0x0d38 + 0x40 * (n)) /* 17.3.10, 17.4.14 */ #define MTL_RXQn_CTRL(n) (0x0d3c + 0x40 * (n)) /* * DMA Register Definitions */ /* 17.5.1 */ #define DMA_MODE 0x1000 #define DMA_MODE_INTM GENMASK(17, 16) #define DMA_MODE_PR GENMASK(14, 12) #define DMA_MODE_TXPR BIT(12) #define DMA_MODE_ARBC BIT(9) #define DMA_MODE_DSPW BIT(8) #define DMA_MODE_TAA GENMASK(4, 2) #define DMA_MODE_DA BIT(1) #define DMA_MODE_SWR BIT(0) /* 17.5.2 */ #define DMA_SYSBUS_MODE 0x1004 #define DMA_SYSBUS_MODE_EN_LPI BIT(31) #define DMA_SYSBUS_MODE_LPI_XIT_PKT BIT(30) #define DMA_SYSBUS_MODE_WR_OSR_LMT GENMASK(27, 24) #define DMA_SYSBUS_MODE_RD_OSR_LMT GENMASK(19, 16) #define DMA_SYSBUS_MODE_RB BIT(15) #define DMA_SYSBUS_MODE_MB BIT(14) #define DMA_SYSBUS_MODE_ONEKBBE BIT(13) #define DMA_SYSBUS_MODE_AAL BIT(12) #define DMA_SYSBUS_MODE_EAME BIT(11) #define DMA_SYSBUS_MODE_AALE BIT(10) #define DMA_SYSBUS_MODE_BLEN256 BIT(7) #define DMA_SYSBUS_MODE_BLEN128 BIT(6) #define DMA_SYSBUS_MODE_BLEN64 BIT(5) #define DMA_SYSBUS_MODE_BLEN32 BIT(4) #define DMA_SYSBUS_MODE_BLEN16 BIT(3) #define DMA_SYSBUS_MODE_BLEN8 BIT(2) #define DMA_SYSBUS_MODE_BLEN4 BIT(1) #define DMA_SYSBUS_MODE_FB BIT(0) /* 17.5.3 */ #define DMA_IRQ_STATUS 0x1008 #define DMA_IRQ_STATUS_MACIS BIT(17) #define DMA_IRQ_STATUS_MTLIS BIT(16) #define DMA_IRQ_STATUS_DC7IS BIT(7) #define DMA_IRQ_STATUS_DC6IS BIT(6) #define DMA_IRQ_STATUS_DC5IS BIT(5) #define DMA_IRQ_STATUS_DC4IS BIT(4) #define DMA_IRQ_STATUS_DC3IS BIT(3) #define DMA_IRQ_STATUS_DC2IS BIT(2) #define DMA_IRQ_STATUS_DC1IS BIT(1) #define DMA_IRQ_STATUS_DC0IS BIT(0) /* 17.5.4 */ #define DMA_DEBUG_STATUS0 0x100c /* 17.5.5 */ #define DMA_DEBUG_STATUS1 0x1010 /* 17.5.6 */ #define DMA_DEBUG_STATUS2 0x1014 /* 17.5.7 */ #define AXI4_TX_AR_ACE_CTRL 0x1020 /* 17.5.8 */ #define AXI4_RX_AW_ACE_CTRL 0x1024 /* 17.5.9 */ #define AXI4_TXRX_AWAR_ACE_CTRL 0x1028 /* 17.5.10 */ #define AXI_LPI_ENTRY_INTERVAL 0x1040 /* 17.5.11 */ #define DMA_TBS_CTRL 0x1050 /* 17.5.12 */ #define DMA_SAFETY_IRQ_STATUS 0x1080 /* 17.5.13 */ #define DMA_ECC_IRQ_ENABLE 0x1084 /* 17.5.14 */ #define DMA_ECC_IRQ_STATUS 0x1088 /* 17.6.1 */ #define DMA_CHn_CTRL(n) (0x1100 + 0x80 * (n)) #define DMA_CHn_CTRL_SPH BIT(24) #define DMA_CHn_CTRL_DSL GENMASK(20, 18) #define DMA_CHn_CTRL_PBLx8 BIT(16) #define DMA_CHn_CTRL_MSS GENMASK(13, 0) /* 17.6.2 */ #define DMA_CHn_TX_CTRL(n) (0x1104 + 0x80 * (n)) #define DMA_CHn_TX_CTRL_EDSE BIT(28) #define DMA_CHn_TX_CTRL_TQOS GENMASK(27, 24) #define DMA_CHn_TX_CTRL_ETIC BIT(22) #define DMA_CHn_TX_CTRL_PBL GENMASK(21, 16) #define DMA_CHn_TX_CTRL_IPBL BIT(15) #define DMA_CHn_TX_CTRL_TSE_MODE GENMASK(14, 13) #define DMA_CHn_TX_CTRL_TSE BIT(12) #define DMA_CHn_TX_CTRL_OSF BIT(4) #define DMA_CHn_TX_CTRL_TCW GENMASK(3, 1) #define DMA_CHn_TX_CTRL_St BIT(0) /* 17.6.3 */ #define DMA_CHn_RX_CTRL(n) (0x1108 + 0x80 * (n)) #define DMA_CHn_RX_CTRL_RPF BIT(31) #define DMA_CHn_RX_CTRL_RQOS GENMASK(27, 24) #define DMA_CHn_RX_CTRL_ERIC BIT(22) #define DMA_CHn_RX_CTRL_PBL GENMASK(21, 16) #define DMA_CHn_RX_CTRL_RBSZ GENMASK(14, 1) #define DMA_CHn_RX_CTRL_SR BIT(0) /* 17.6.4 */ #define DMA_CHn_TXDESC_LIST_HADDR(n) (0x1110 + 0x80 * (n)) /* 17.6.5 */ #define DMA_CHn_TXDESC_LIST_ADDR(n) (0x1114 + 0x80 * (n)) /* 17.6.6 */ #define DMA_CHn_RXDESC_LIST_HADDR(n) (0x1118 + 0x80 * (n)) /* 17.6.7 */ #define DMA_CHn_RXDESC_LIST_ADDR(n) (0x111c + 0x80 * (n)) /* 17.6.8 */ #define DMA_CHn_TXDESC_TAIL_PTR(n) (0x1120 + 0x80 * (n)) /* 17.6.9 */ #define DMA_CHn_RXDESC_TAIL_PTR(n) (0x1128 + 0x80 * (n)) /* 17.6.10 */ #define DMA_CHn_TXDESC_RING_LENGTH(n) (0x112c + 0x80 * (n)) /* 17.6.11 */ #define DMA_CHn_RXDESC_RING_LENGTH(n) (0x1130 + 0x80 * (n)) /* 17.6.12 */ #define DMA_CHn_IRQ_ENABLE(n) (0x1134 + 0x80 * (n)) #define DMA_CHn_IRQ_ENABLE_NIE BIT(15) #define DMA_CHn_IRQ_ENABLE_AIE BIT(14) #define DMA_CHn_IRQ_ENABLE_CDEE BIT(13) #define DMA_CHn_IRQ_ENABLE_FBEE BIT(12) #define DMA_CHn_IRQ_ENABLE_ERIE BIT(11) #define DMA_CHn_IRQ_ENABLE_ETIE BIT(10) #define DMA_CHn_IRQ_ENABLE_RWTE BIT(9) #define DMA_CHn_IRQ_ENABLE_RSE BIT(8) #define DMA_CHn_IRQ_ENABLE_RBUE BIT(7) #define DMA_CHn_IRQ_ENABLE_RIE BIT(6) #define DMA_CHn_IRQ_ENABLE_TBUE BIT(2) #define DMA_CHn_IRQ_ENABLE_TXSE BIT(1) #define DMA_CHn_IRQ_ENABLE_TIE BIT(0) /* 17.6.13 */ #define DMA_CHn_RX_IRQ_WATCHDOG_TIMER(n) (0x1138 + 0x80 * (n)) /* 17.6.14 */ #define DMA_CHn_SLOT_FN_CTRL_STATUS(n) (0x113c + 0x80 * (n)) /* 17.6.15 */ #define DMA_CHn_CURR_APP_TXDESC(n) (0x1144 + 0x80 * (n)) /* 17.6.16 */ #define DMA_CHn_CURR_APP_RXDESC(n) (0x114c + 0x80 * (n)) /* 17.6.17 */ #define DMA_CHn_CURR_APP_TX_BUF_H(n) (0x1150 + 0x80 * (n)) /* 17.6.18 */ #define DMA_CHn_CURR_APP_TX_BUF(n) (0x1154 + 0x80 * (n)) /* 17.6.19 */ #define DMA_CHn_CURR_APP_RX_BUF_H(n) (0x1158 + 0x80 * (n)) /* 17.6.20 */ #define DMA_CHn_CURR_APP_RX_BUF(n) (0x115c + 0x80 * (n)) /* 17.6.21 */ #define DMA_CHn_STATUS(n) (0x1160 + 0x80 * (n)) #define DMA_CHn_STATUS_REB GENMASK(21, 19) #define DMA_CHn_STATUS_TEB GENMASK(18, 16) #define DMA_CHn_STATUS_NIS BIT(15) #define DMA_CHn_STATUS_AIS BIT(14) #define DMA_CHn_STATUS_CDE BIT(13) #define DMA_CHn_STATUS_FBE BIT(12) #define DMA_CHn_STATUS_ERI BIT(11) #define DMA_CHn_STATUS_ETI BIT(10) #define DMA_CHn_STATUS_RWT BIT(9) #define DMA_CHn_STATUS_RPS BIT(8) #define DMA_CHn_STATUS_RBU BIT(7) #define DMA_CHn_STATUS_RI BIT(6) #define DMA_CHn_STATUS_TBU BIT(2) #define DMA_CHn_STATUS_TPS BIT(1) #define DMA_CHn_STATUS_TI BIT(0) /* 17.6.22 */ #define DMA_CHn_MISS_FRAME_CNT(n) (0x1164 + 0x80 * (n)) /* 17.6.23 */ #define DMA_CHn_RXP_ACCEPT_CNT(n) (0x1168 + 0x80 * (n)) /* 17.6.24 */ #define DMA_CHn_RX_ERI_CNT(n) (0x116c + 0x80 * (n)) /* * DMA Descriptor Flag Definitions */ /* 19.5.1.3 */ #define TDES2_IOC BIT(31) #define TDES2_TTSE BIT(30) #define TDES2_TMWD BIT(30) #define TDES2_B2L GENMASK(29, 16) #define TDES2_VTIR GENMASK(15, 14) #define TDES2_HL GENMASK(13, 0) #define TDES2_B1L GENMASK(13, 0) /* 19.5.1.4 */ #define TDES3_OWN BIT(31) #define TDES3_CTXT BIT(30) #define TDES3_FD BIT(29) #define TDES3_LD BIT(28) #define TDES3_CPC GENMASK(27, 26) #define TDES3_SAIC GENMASK(25, 23) #define TDES3_SLOTNUM GENMASK(22, 19) #define TDES3_THL GENMASK(22, 19) #define TDES3_TSE BIT(18) #define TDES3_CIC GENMASK(17, 16) #define TDES3_TPL GENMASK(17, 0) #define TDES3_FL GENMASK(14, 0) /* 19.5.1.9 */ /* TDES3_OWN BIT(31) */ #define TDES3_CTXT BIT(30) /* TDES3_FD BIT(29) */ /* TDES3_LD BIT(28) */ #define TDES3_DE BIT(23) #define TDES3_TTSS BIT(17) #define TDES3_EUE BIT(16) #define TDES3_ES BIT(15) #define TDES3_JT BIT(14) #define TDES3_FF BIT(13) #define TDES3_PCE BIT(12) #define TDES3_LoC BIT(11) #define TDES3_NC BIT(10) #define TDES3_LC BIT(9) #define TDES3_EC BIT(8) #define TDES3_CC GENMASK(7, 4) #define TDES3_ED BIT(3) #define TDES3_UF BIT(2) #define TDES3_DB BIT(1) #define TDES3_IHE BIT(0) /* 19.6.1.4 */ #define RDES3_OWN BIT(31) #define RDES3_IOC BIT(30) #define RDES3_BUF2V BIT(25) #define RDES3_BUF1V BIT(24) /* 19.6.2.1 */ #define RDES0_IVT GENMASK(31, 16) #define RDES0_OVT GENMASK(15, 0) /* 19.6.2.2 */ #define RDES1_OPC GENMASK(31, 16) #define RDES1_TD BIT(15) #define RDES1_TSA BIT(14) #define RDES1_PV BIT(13) #define RDES1_PFT BIT(12) #define RDES1_PMT GENMASK(11, 8) #define RDES1_ipce BIT(7) #define RDES1_IPCB BIT(6) #define RDES1_IPV6 BIT(5) #define RDES1_IPV4 BIT(4) #define RDES1_IPHE BIT(3) #define RDES1_PT GENMASK(2, 0) /* 19.6.2.3 */ #define RDES2_L3L4FM GENMASK(31, 29) #define RDES2_L4FM BIT(28) #define RDES2_L3FM BIT(27) #define RDES2_MADRM GENMASK(26, 19) #define RDES2_HF BIT(18) #define RDES2_DAF BIT(17) #define RDES2_RXPI BIT(17) #define RDES2_SAF BIT(16) #define RDES2_RXPD BIT(16) #define RDES2_OTS BIT(15) #define RDES2_ITS BIT(14) #define RDES2_ARPNR BIT(10) #define RDES2_HL GENMASK(9, 0) /* 19.6.2.4 */ /* RDES3_OWN BIT(31) */ #define RDES3_CTXT BIT(30) #define RDES3_FD BIT(29) #define RDES3_LD BIT(28) #define RDES3_RS2V BIT(27) #define RDES3_RS1V BIT(26) #define RDES3_RS0V BIT(25) #define RDES3_CE BIT(24) #define RDES3_GP BIT(23) #define RDES3_RWT BIT(22) #define RDES3_OE BIT(21) #define RDES3_RE BIT(20) #define RDES3_DE BIT(19) #define RDES3_LT GENMASK(18, 16) #define RDES3_ES BIT(15) #define RDES3_PL GENMASK(14, 0) #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_DWMAC_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_dwmac_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,616
```c /* * */ #define DT_DRV_COMPAT espressif_esp32_eth #include <ethernet/eth_stats.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/interrupt_controller/intc_esp32.h> #include <zephyr/logging/log.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/phy.h> #include <esp_attr.h> #include <esp_mac.h> #include <hal/emac_hal.h> #include <hal/emac_ll.h> #include <soc/rtc.h> #include <soc/io_mux_reg.h> #include <clk_ctrl_os.h> #include "eth.h" LOG_MODULE_REGISTER(eth_esp32, CONFIG_ETHERNET_LOG_LEVEL); #define MAC_RESET_TIMEOUT_MS 100 struct eth_esp32_dma_data { uint8_t descriptors[ CONFIG_ETH_DMA_RX_BUFFER_NUM * sizeof(eth_dma_rx_descriptor_t) + CONFIG_ETH_DMA_TX_BUFFER_NUM * sizeof(eth_dma_tx_descriptor_t)]; uint8_t rx_buf[CONFIG_ETH_DMA_RX_BUFFER_NUM][CONFIG_ETH_DMA_BUFFER_SIZE]; uint8_t tx_buf[CONFIG_ETH_DMA_TX_BUFFER_NUM][CONFIG_ETH_DMA_BUFFER_SIZE]; }; struct eth_esp32_dev_data { struct net_if *iface; uint8_t mac_addr[6]; emac_hal_context_t hal; struct eth_esp32_dma_data *dma; uint8_t txb[NET_ETH_MAX_FRAME_SIZE]; uint8_t rxb[NET_ETH_MAX_FRAME_SIZE]; uint8_t *dma_rx_buf[CONFIG_ETH_DMA_RX_BUFFER_NUM]; uint8_t *dma_tx_buf[CONFIG_ETH_DMA_TX_BUFFER_NUM]; struct k_sem int_sem; K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_ESP32_RX_THREAD_STACK_SIZE); struct k_thread rx_thread; }; static const struct device *eth_esp32_phy_dev = DEVICE_DT_GET( DT_INST_PHANDLE(0, phy_handle)); static enum ethernet_hw_caps eth_esp32_caps(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T; } static int eth_esp32_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct eth_esp32_dev_data *const dev_data = dev->data; int ret = -ENOTSUP; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(dev_data->mac_addr, config->mac_address.addr, 6); emac_hal_set_address(&dev_data->hal, dev_data->mac_addr); net_if_set_link_addr(dev_data->iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); ret = 0; break; default: break; } return ret; } static int eth_esp32_send(const struct device *dev, struct net_pkt *pkt) { struct eth_esp32_dev_data *dev_data = dev->data; size_t len = net_pkt_get_len(pkt); if (net_pkt_read(pkt, dev_data->txb, len)) { return -EIO; } uint32_t sent_len = emac_hal_transmit_frame(&dev_data->hal, dev_data->txb, len); int res = len == sent_len ? 0 : -EIO; return res; } static struct net_pkt *eth_esp32_rx( struct eth_esp32_dev_data *const dev_data, uint32_t *frames_remaining) { uint32_t free_rx_descriptor; uint32_t receive_len = emac_hal_receive_frame( &dev_data->hal, dev_data->rxb, sizeof(dev_data->rxb), frames_remaining, &free_rx_descriptor); if (receive_len == 0) { /* Nothing to receive */ return NULL; } struct net_pkt *pkt = net_pkt_rx_alloc_with_buffer( dev_data->iface, receive_len, AF_UNSPEC, 0, K_MSEC(100)); if (pkt == NULL) { eth_stats_update_errors_rx(dev_data->iface); LOG_ERR("Could not allocate rx buffer"); return NULL; } if (net_pkt_write(pkt, dev_data->rxb, receive_len) != 0) { LOG_ERR("Unable to write frame into the pkt"); eth_stats_update_errors_rx(dev_data->iface); net_pkt_unref(pkt); return NULL; } return pkt; } FUNC_NORETURN static void eth_esp32_rx_thread(void *arg1, void *arg2, void *arg3) { const struct device *dev = arg1; struct eth_esp32_dev_data *const dev_data = dev->data; ARG_UNUSED(arg2); ARG_UNUSED(arg3); while (true) { k_sem_take(&dev_data->int_sem, K_FOREVER); uint32_t frames_remaining; do { struct net_pkt *pkt = eth_esp32_rx( dev_data, &frames_remaining); if (pkt == NULL) { break; } if (net_recv_data(dev_data->iface, pkt) < 0) { /* Upper layers are not ready to receive packets */ net_pkt_unref(pkt); } } while (frames_remaining > 0); } } IRAM_ATTR static void eth_esp32_isr(void *arg) { const struct device *dev = arg; struct eth_esp32_dev_data *const dev_data = dev->data; uint32_t intr_stat = emac_ll_get_intr_status(dev_data->hal.dma_regs); emac_ll_clear_corresponding_intr(dev_data->hal.dma_regs, intr_stat); if (intr_stat & EMAC_LL_DMA_RECEIVE_FINISH_INTR) { k_sem_give(&dev_data->int_sem); } } static int generate_mac_addr(uint8_t mac_addr[6]) { int res = 0; #if DT_INST_PROP(0, zephyr_random_mac_address) gen_random_mac(mac_addr, 0x24, 0xD7, 0xEB); #elif NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0)) static const uint8_t addr[6] = DT_INST_PROP(0, local_mac_address); memcpy(mac_addr, addr, sizeof(addr)); #else if (esp_read_mac(mac_addr, ESP_MAC_ETH) != ESP_OK) { res = -EIO; } #endif return res; } static void phy_link_state_changed(const struct device *phy_dev, struct phy_link_state *state, void *user_data) { const struct device *dev = (const struct device *)user_data; struct eth_esp32_dev_data *const dev_data = dev->data; ARG_UNUSED(phy_dev); if (state->is_up) { net_eth_carrier_on(dev_data->iface); } else { net_eth_carrier_off(dev_data->iface); } } #if DT_INST_NODE_HAS_PROP(0, ref_clk_output_gpios) static int emac_config_apll_clock(void) { uint32_t expt_freq = MHZ(50); uint32_t real_freq = 0; esp_err_t ret = periph_rtc_apll_freq_set(expt_freq, &real_freq); if (ret == ESP_ERR_INVALID_ARG) { LOG_ERR("Set APLL clock coefficients failed"); return -EIO; } if (ret == ESP_ERR_INVALID_STATE) { LOG_INF("APLL is occupied already, it is working at %d Hz", real_freq); } /* If the difference of real APLL frequency * is not within 50 ppm, i.e. 2500 Hz, * the APLL is unavailable */ if (abs((int)real_freq - (int)expt_freq) > 2500) { LOG_ERR("The APLL is working at an unusable frequency"); return -EIO; } return 0; } #endif /* DT_INST_NODE_HAS_PROP(0, ref_clk_output_gpios) */ int eth_esp32_initialize(const struct device *dev) { struct eth_esp32_dev_data *const dev_data = dev->data; int res; k_sem_init(&dev_data->int_sem, 0, 1); const struct device *clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_NODELABEL(eth))); clock_control_subsys_t clock_subsys = (clock_control_subsys_t)DT_CLOCKS_CELL(DT_NODELABEL(eth), offset); /* clock is shared, so do not bail out if already enabled */ res = clock_control_on(clock_dev, clock_subsys); if (res < 0 && res != -EALREADY) { goto err; } /* Convert 2D array DMA buffers to arrays of pointers */ for (int i = 0; i < CONFIG_ETH_DMA_RX_BUFFER_NUM; i++) { dev_data->dma_rx_buf[i] = dev_data->dma->rx_buf[i]; } for (int i = 0; i < CONFIG_ETH_DMA_TX_BUFFER_NUM; i++) { dev_data->dma_tx_buf[i] = dev_data->dma->tx_buf[i]; } emac_hal_init(&dev_data->hal, dev_data->dma->descriptors, dev_data->dma_rx_buf, dev_data->dma_tx_buf); /* Configure ISR */ res = esp_intr_alloc(DT_IRQN(DT_NODELABEL(eth)), ESP_INTR_FLAG_IRAM, eth_esp32_isr, (void *)dev, NULL); if (res != 0) { goto err; } /* Configure phy for Media-Independent Interface (MII) or * Reduced Media-Independent Interface (RMII) mode */ const char *phy_connection_type = DT_INST_PROP_OR(0, phy_connection_type, "rmii"); if (strcmp(phy_connection_type, "rmii") == 0) { emac_hal_iomux_init_rmii(); #if DT_INST_NODE_HAS_PROP(0, ref_clk_output_gpios) BUILD_ASSERT(DT_INST_GPIO_PIN(0, ref_clk_output_gpios) == 16 || DT_INST_GPIO_PIN(0, ref_clk_output_gpios) == 17, "Only GPIO16/17 are allowed as a GPIO REF_CLK source!"); int ref_clk_gpio = DT_INST_GPIO_PIN(0, ref_clk_output_gpios); emac_hal_iomux_rmii_clk_output(ref_clk_gpio); emac_ll_clock_enable_rmii_output(dev_data->hal.ext_regs); periph_rtc_apll_acquire(); res = emac_config_apll_clock(); if (res != 0) { goto err; } rtc_clk_apll_enable(true); #else emac_hal_iomux_rmii_clk_input(); emac_ll_clock_enable_rmii_input(dev_data->hal.ext_regs); #endif } else if (strcmp(phy_connection_type, "mii") == 0) { emac_hal_iomux_init_mii(); emac_ll_clock_enable_mii(dev_data->hal.ext_regs); } else { res = -EINVAL; goto err; } /* Reset mac registers and wait until ready */ emac_ll_reset(dev_data->hal.dma_regs); bool reset_success = false; for (uint32_t t_ms = 0; t_ms < MAC_RESET_TIMEOUT_MS; t_ms += 10) { /* Busy wait rather than sleep in case kernel is not yet initialized */ k_busy_wait(10 * 1000); if (emac_ll_is_reset_done(dev_data->hal.dma_regs)) { reset_success = true; break; } } if (!reset_success) { res = -ETIMEDOUT; goto err; } /* Set dma_burst_len as ETH_DMA_BURST_LEN_32 by default */ emac_hal_dma_config_t dma_config = { .dma_burst_len = 0 }; emac_hal_reset_desc_chain(&dev_data->hal); emac_hal_init_mac_default(&dev_data->hal); emac_hal_init_dma_default(&dev_data->hal, &dma_config); res = generate_mac_addr(dev_data->mac_addr); if (res != 0) { goto err; } emac_hal_set_address(&dev_data->hal, dev_data->mac_addr); k_tid_t tid = k_thread_create( &dev_data->rx_thread, dev_data->rx_thread_stack, K_KERNEL_STACK_SIZEOF(dev_data->rx_thread_stack), eth_esp32_rx_thread, (void *)dev, NULL, NULL, CONFIG_ETH_ESP32_RX_THREAD_PRIORITY, K_ESSENTIAL, K_NO_WAIT); if (IS_ENABLED(CONFIG_THREAD_NAME)) { k_thread_name_set(tid, "esp32_eth"); } emac_hal_start(&dev_data->hal); return 0; err: return res; } static const struct device *eth_esp32_phy_get(const struct device *dev) { ARG_UNUSED(dev); return eth_esp32_phy_dev; } static void eth_esp32_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_esp32_dev_data *dev_data = dev->data; dev_data->iface = iface; net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); ethernet_init(iface); if (device_is_ready(eth_esp32_phy_dev)) { phy_link_callback_set(eth_esp32_phy_dev, phy_link_state_changed, (void *)dev); } else { LOG_ERR("PHY device not ready"); } /* Do not start the interface until PHY link is up */ net_if_carrier_off(iface); } static const struct ethernet_api eth_esp32_api = { .iface_api.init = eth_esp32_iface_init, .get_capabilities = eth_esp32_caps, .set_config = eth_esp32_set_config, .get_phy = eth_esp32_phy_get, .send = eth_esp32_send, }; /* DMA data must be in DRAM */ static struct eth_esp32_dma_data eth_esp32_dma_data WORD_ALIGNED_ATTR DRAM_ATTR; static struct eth_esp32_dev_data eth_esp32_dev = { .dma = &eth_esp32_dma_data, }; ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_esp32_initialize, NULL, &eth_esp32_dev, NULL, CONFIG_ETH_INIT_PRIORITY, &eth_esp32_api, NET_ETH_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_esp32.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,142
```unknown menuconfig ETH_ADIN2111 bool "ADIN2111 2-port 10BASE-T1L Controller" default y depends on DT_HAS_ADI_ADIN2111_ENABLED || DT_HAS_ADI_ADIN1110_ENABLED select SPI select MDIO imply CRC help The ADIN2111 is a low power, 2-port 10BASE-T1L transceiver designed for industrial Ethernet applications, and is compliant with the IEEE 802.3cg-2019 Ethernet standard for long reach, 10 Mbps single pair Ethernet (SPE). Featuring an integrated media access control (MAC) and a switch, the ADIN2111 enables direct connectivity with a variety of controllers via a serial peripheral inter-face (SPI). if ETH_ADIN2111 config ETH_ADIN2111_IRQ_THREAD_STACK_SIZE int "Stack size for a thread that processes ADIN IRQ" default 2048 help Size of the stack used for internal thread which is ran to process raised INT IRQ. config ETH_ADIN2111_IRQ_THREAD_PRIO int "Priority for internal incoming packet handler" default 2 help Priority level for internal thread which is ran for ADIN INT IRQ processing. config ETH_ADIN2111_TIMEOUT int "IP buffer timeout" default 100 help Given timeout in milliseconds. Maximum amount of time that the driver will wait from the IP stack to get a memory buffer before the Ethernet frame is dropped. config ETH_ADIN2111_SPI_CFG0 bool "SPI_CFG0" default y help Must be set when ADIN uses 8-bit CRC (Generic SPI) or Protection Mode (OPEN Alliance) on the SPI Host Interface. config ETH_ADIN2111_BUFFER_SIZE int "Buffer size in bytes use for frame transmission" default 1524 help Transmission and reception buffer size. endif # ETH_ADIN2111 ```
/content/code_sandbox/drivers/ethernet/Kconfig.adin2111
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
419
```objective-c /* * Arvin Farahmand <arvinf@ip-logix.com> */ #ifndef __DSA_KSZ8863_H__ #define __DSA_KSZ8863_H__ /* SPI commands */ #define KSZ8863_SPI_CMD_WR (BIT(6)) #define KSZ8863_SPI_CMD_RD (BIT(6) | BIT(5)) /* PHY registers */ #define KSZ8863_BMCR 0x00 #define KSZ8863_BMSR 0x01 #define KSZ8863_PHYID1 0x02 #define KSZ8863_PHYID2 0x03 #define KSZ8863_ANAR 0x04 #define KSZ8863_ANLPAR 0x05 #define KSZ8863_LINKMD 0x1D #define KSZ8863_PHYSCS 0x1F /* SWITCH registers */ #define KSZ8863_CHIP_ID0 0x00 #define KSZ8863_CHIP_ID1 0x01 #define KSZ8863_GLOBAL_CTRL0 0x02 #define KSZ8863_GLOBAL_CTRL1 0x03 #define KSZ8863_GLOBAL_CTRL2 0x04 #define KSZ8863_GLOBAL_CTRL3 0x05 #define KSZ8863_GLOBAL_CTRL4 0x06 #define KSZ8863_GLOBAL_CTRL5 0x07 #define KSZ8863_GLOBAL_CTRL9 0x0B #define KSZ8863_GLOBAL_CTRL10 0x0C #define KSZ8863_GLOBAL_CTRL11 0x0D #define KSZ8863_GLOBAL_CTRL12 0x0E #define KSZ8863_GLOBAL_CTRL13 0x0F #define KSZ8863_PORT1_CTRL0 0x10 #define KSZ8863_PORT1_CTRL1 0x11 #define KSZ8863_PORT1_CTRL2 0x12 #define KSZ8863_PORT1_CTRL3 0x13 #define KSZ8863_PORT1_CTRL4 0x14 #define KSZ8863_PORT1_CTRL5 0x15 #define KSZ8863_PORT1_Q0_IG_LIMIT 0x16 #define KSZ8863_PORT1_Q1_IG_LIMIT 0x17 #define KSZ8863_PORT1_Q2_IG_LIMIT 0x18 #define KSZ8863_PORT1_Q3_IG_LIMIT 0x19 #define KSZ8863_PORT1_PHY_CTRL 0x1A #define KSZ8863_PORT1_LINKMD 0x1B #define KSZ8863_PORT1_CTRL12 0x1C #define KSZ8863_PORT1_CTRL13 0x1D #define KSZ8863_PORT1_STAT0 0x1E #define KSZ8863_PORT1_STAT1 0x1F #define KSZ8863_PORT2_CTRL0 0x20 #define KSZ8863_PORT2_CTRL1 0x21 #define KSZ8863_PORT2_CTRL2 0x22 #define KSZ8863_PORT2_CTRL3 0x23 #define KSZ8863_PORT2_CTRL4 0x24 #define KSZ8863_PORT2_CTRL5 0x25 #define KSZ8863_PORT2_Q0_IG_LIMIT 0x26 #define KSZ8863_PORT2_Q1_IG_LIMIT 0x27 #define KSZ8863_PORT2_Q2_IG_LIMIT 0x28 #define KSZ8863_PORT2_Q3_IG_LIMIT 0x29 #define KSZ8863_PORT2_PHY_CTRL 0x2A #define KSZ8863_PORT2_LINKMD 0x2B #define KSZ8863_PORT2_CTRL12 0x2C #define KSZ8863_PORT2_CTRL13 0x2D #define KSZ8863_PORT2_STAT0 0x2E #define KSZ8863_PORT2_STAT1 0x2F #define KSZ8863_PORT3_CTRL0 0x30 #define KSZ8863_PORT3_CTRL1 0x31 #define KSZ8863_PORT3_CTRL2 0x32 #define KSZ8863_PORT3_CTRL3 0x33 #define KSZ8863_PORT3_CTRL4 0x34 #define KSZ8863_PORT3_CTRL5 0x35 #define KSZ8863_PORT3_Q0_IG_LIMIT 0x36 #define KSZ8863_PORT3_Q1_IG_LIMIT 0x37 #define KSZ8863_PORT3_Q2_IG_LIMIT 0x38 #define KSZ8863_PORT3_Q3_IG_LIMIT 0x39 #define KSZ8863_PORT3_STAT1 0x3F #define KSZ8863_MAC_ADDR0 0x70 #define KSZ8863_MAC_ADDR1 0x71 #define KSZ8863_MAC_ADDR2 0x72 #define KSZ8863_MAC_ADDR3 0x73 #define KSZ8863_MAC_ADDR4 0x74 #define KSZ8863_MAC_ADDR5 0x75 #define KSZ8863_USER0 0x76 #define KSZ8863_USER1 0x77 #define KSZ8863_USER2 0x78 #define KSZ8863_GLOBAL_CTRL1_TAIL_TAG_EN BIT(6) #define KSZ8863_GLOBAL_CTRL2_LEG_MAX_PKT_SIZ_CHK_ENA BIT(1) #define KSZ8863_CTRL2_PORTn(n) (0x12 + ((n) * 0x10)) #define KSZ8863_CTRL2_TRANSMIT_EN BIT(2) #define KSZ8863_CTRL2_RECEIVE_EN BIT(1) #define KSZ8863_CTRL2_LEARNING_DIS BIT(0) #define KSZ8863_STAT2_PORTn(n) (0x1E + ((n) * 0x10)) #define KSZ8863_STAT2_LINK_GOOD BIT(5) #define KSZ8863_CHIP_ID0_ID_DEFAULT 0x88 #define KSZ8863_CHIP_ID1_ID_DEFAULT 0x31 #define KSZ8863_REGISTER_67 0x43 #define KSZ8863_SOFTWARE_RESET_SET BIT(4) #define KSZ8863_SOFTWARE_RESET_CLEAR 0 enum { /* LAN ports for the ksz8863 switch */ KSZ8863_PORT1 = 0, KSZ8863_PORT2, /* SWITCH <-> CPU port */ KSZ8863_PORT3, }; #define KSZ8863_REG_IND_CTRL_0 0x79 #define KSZ8863_REG_IND_CTRL_1 0x7A #define KSZ8863_REG_IND_DATA_8 0x7B #define KSZ8863_REG_IND_DATA_7 0x7C #define KSZ8863_REG_IND_DATA_6 0x7D #define KSZ8863_REG_IND_DATA_5 0x7E #define KSZ8863_REG_IND_DATA_4 0x7F #define KSZ8863_REG_IND_DATA_3 0x80 #define KSZ8863_REG_IND_DATA_2 0x81 #define KSZ8863_REG_IND_DATA_1 0x82 #define KSZ8863_REG_IND_DATA_0 0x83 #define KSZ8863_STATIC_MAC_TABLE_VALID BIT(3) #define KSZ8863_STATIC_MAC_TABLE_OVRD BIT(4) #define KSZ8863_STATIC_MAC_TABLE_USE_FID BIT(5) #define KSZ8XXX_CHIP_ID0 KSZ8863_CHIP_ID0 #define KSZ8XXX_CHIP_ID1 KSZ8863_CHIP_ID1 #define KSZ8XXX_CHIP_ID0_ID_DEFAULT KSZ8863_CHIP_ID0_ID_DEFAULT #define KSZ8XXX_CHIP_ID1_ID_DEFAULT KSZ8863_CHIP_ID1_ID_DEFAULT #define KSZ8XXX_FIRST_PORT KSZ8863_PORT1 #define KSZ8XXX_LAST_PORT KSZ8863_PORT3 #define KSZ8XXX_CPU_PORT KSZ8863_PORT3 #define KSZ8XXX_REG_IND_CTRL_0 KSZ8863_REG_IND_CTRL_0 #define KSZ8XXX_REG_IND_CTRL_1 KSZ8863_REG_IND_CTRL_1 #define KSZ8XXX_REG_IND_DATA_8 KSZ8863_REG_IND_DATA_8 #define KSZ8XXX_REG_IND_DATA_7 KSZ8863_REG_IND_DATA_7 #define KSZ8XXX_REG_IND_DATA_6 KSZ8863_REG_IND_DATA_6 #define KSZ8XXX_REG_IND_DATA_5 KSZ8863_REG_IND_DATA_5 #define KSZ8XXX_REG_IND_DATA_4 KSZ8863_REG_IND_DATA_4 #define KSZ8XXX_REG_IND_DATA_3 KSZ8863_REG_IND_DATA_3 #define KSZ8XXX_REG_IND_DATA_2 KSZ8863_REG_IND_DATA_2 #define KSZ8XXX_REG_IND_DATA_1 KSZ8863_REG_IND_DATA_1 #define KSZ8XXX_REG_IND_DATA_0 KSZ8863_REG_IND_DATA_0 #define KSZ8XXX_STATIC_MAC_TABLE_VALID KSZ8863_STATIC_MAC_TABLE_VALID #define KSZ8XXX_STATIC_MAC_TABLE_OVRD KSZ8863_STATIC_MAC_TABLE_OVRD #define KSZ8XXX_STAT2_LINK_GOOD KSZ8863_STAT2_LINK_GOOD #define KSZ8XXX_RESET_REG KSZ8863_REGISTER_67 #define KSZ8XXX_RESET_SET KSZ8863_SOFTWARE_RESET_SET #define KSZ8XXX_RESET_CLEAR KSZ8863_SOFTWARE_RESET_CLEAR #define KSZ8XXX_STAT2_PORTn KSZ8863_STAT2_PORTn #define KSZ8XXX_SPI_CMD_RD KSZ8863_SPI_CMD_RD #define KSZ8XXX_SPI_CMD_WR KSZ8863_SPI_CMD_WR #define KSZ8XXX_SOFT_RESET_DURATION 1000 #define KSZ8XXX_HARD_RESET_WAIT 10000 #endif /* __DSA_KSZ8863_H__ */ ```
/content/code_sandbox/drivers/ethernet/dsa_ksz8863.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,247
```unknown config ETH_LITEX_LITEETH bool "LiteX LiteEth Ethernet core driver" default y depends on DT_HAS_LITEX_LITEETH_ENABLED imply MDIO ```
/content/code_sandbox/drivers/ethernet/Kconfig.litex
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
42
```unknown config ETH_SMSC91X bool "SMSC91x Ethernet driver" default y depends on DT_HAS_SMSC_LAN91C111_ENABLED select MDIO help Enable driver for SMSC/LAN91x family of chips. ```
/content/code_sandbox/drivers/ethernet/Kconfig.smsc91x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
55
```c /* * */ /** @file * @brief Atmel SAM MCU family Ethernet MAC (GMAC) driver. * * This is a zero-copy networking implementation of an Ethernet driver. To * prepare for the incoming frames the driver will permanently reserve a defined * amount of RX data net buffers when the interface is brought up and thus * reduce the total amount of RX data net buffers available to the application. * * Limitations: * - one shot PHY setup, no support for PHY disconnect/reconnect * - no statistics collection */ #if defined(CONFIG_SOC_FAMILY_ATMEL_SAM) #define DT_DRV_COMPAT atmel_sam_gmac #else #define DT_DRV_COMPAT atmel_sam0_gmac #endif #define LOG_MODULE_NAME eth_sam #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/barrier.h> #include <zephyr/sys/util.h> #include <errno.h> #include <stdbool.h> #include <zephyr/net/phy.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include <zephyr/drivers/i2c.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/clock_control/atmel_sam_pmc.h> #include <soc.h> #include "eth_sam_gmac_priv.h" #include "eth.h" #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM0 #include "eth_sam0_gmac.h" #endif #include <zephyr/drivers/ptp_clock.h> #include <zephyr/net/gptp.h> #include <zephyr/irq.h> #ifdef __DCACHE_PRESENT static bool dcache_enabled; static inline void dcache_is_enabled(void) { dcache_enabled = (SCB->CCR & SCB_CCR_DC_Msk); } static inline void dcache_invalidate(uint32_t addr, uint32_t size) { if (!dcache_enabled) { return; } /* Make sure it is aligned to 32B */ uint32_t start_addr = addr & (uint32_t)~(GMAC_DCACHE_ALIGNMENT - 1); uint32_t size_full = size + addr - start_addr; SCB_InvalidateDCache_by_Addr((uint32_t *)start_addr, size_full); } static inline void dcache_clean(uint32_t addr, uint32_t size) { if (!dcache_enabled) { return; } /* Make sure it is aligned to 32B */ uint32_t start_addr = addr & (uint32_t)~(GMAC_DCACHE_ALIGNMENT - 1); uint32_t size_full = size + addr - start_addr; SCB_CleanDCache_by_Addr((uint32_t *)start_addr, size_full); } #else #define dcache_is_enabled() #define dcache_invalidate(addr, size) #define dcache_clean(addr, size) #endif #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM0 #define MCK_FREQ_HZ SOC_ATMEL_SAM0_MCK_FREQ_HZ #elif CONFIG_SOC_FAMILY_ATMEL_SAM #define MCK_FREQ_HZ SOC_ATMEL_SAM_MCK_FREQ_HZ #else #error Unsupported SoC family #endif /* * Verify Kconfig configuration */ /* No need to verify things for unit tests */ #if !defined(CONFIG_NET_TEST) #if CONFIG_NET_BUF_DATA_SIZE * CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT \ < GMAC_FRAME_SIZE_MAX #error CONFIG_NET_BUF_DATA_SIZE * CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT is \ not large enough to hold a full frame #endif #if CONFIG_NET_BUF_DATA_SIZE * (CONFIG_NET_BUF_RX_COUNT - \ CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT) < GMAC_FRAME_SIZE_MAX #error (CONFIG_NET_BUF_RX_COUNT - CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT) * \ CONFIG_NET_BUF_DATA_SIZE are not large enough to hold a full frame #endif #if CONFIG_NET_BUF_DATA_SIZE & 0x3F #pragma message "CONFIG_NET_BUF_DATA_SIZE should be a multiple of 64 bytes " \ "due to the granularity of RX DMA" #endif #if (CONFIG_ETH_SAM_GMAC_BUF_RX_COUNT + 1) * GMAC_ACTIVE_QUEUE_NUM \ > CONFIG_NET_BUF_RX_COUNT #error Not enough RX buffers to allocate descriptors for each HW queue #endif #endif /* !CONFIG_NET_TEST */ BUILD_ASSERT(DT_INST_ENUM_IDX(0, phy_connection_type) <= 1, "Invalid PHY connection"); /* RX descriptors list */ static struct gmac_desc rx_desc_que0[MAIN_QUEUE_RX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #if GMAC_PRIORITY_QUEUE_NUM >= 1 static struct gmac_desc rx_desc_que1[PRIORITY_QUEUE1_RX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif #if GMAC_PRIORITY_QUEUE_NUM >= 2 static struct gmac_desc rx_desc_que2[PRIORITY_QUEUE2_RX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif #if GMAC_PRIORITY_QUEUE_NUM >= 3 static struct gmac_desc rx_desc_que3[PRIORITY_QUEUE3_RX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif #if GMAC_PRIORITY_QUEUE_NUM >= 4 static struct gmac_desc rx_desc_que4[PRIORITY_QUEUE4_RX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif #if GMAC_PRIORITY_QUEUE_NUM >= 5 static struct gmac_desc rx_desc_que5[PRIORITY_QUEUE5_RX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif /* TX descriptors list */ static struct gmac_desc tx_desc_que0[MAIN_QUEUE_TX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #if GMAC_PRIORITY_QUEUE_NUM >= 1 static struct gmac_desc tx_desc_que1[PRIORITY_QUEUE1_TX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif #if GMAC_PRIORITY_QUEUE_NUM >= 2 static struct gmac_desc tx_desc_que2[PRIORITY_QUEUE2_TX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif #if GMAC_PRIORITY_QUEUE_NUM >= 3 static struct gmac_desc tx_desc_que3[PRIORITY_QUEUE3_TX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif #if GMAC_PRIORITY_QUEUE_NUM >= 4 static struct gmac_desc tx_desc_que4[PRIORITY_QUEUE4_TX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif #if GMAC_PRIORITY_QUEUE_NUM >= 5 static struct gmac_desc tx_desc_que5[PRIORITY_QUEUE5_TX_DESC_COUNT] __nocache __aligned(GMAC_DESC_ALIGNMENT); #endif /* RX buffer accounting list */ static struct net_buf *rx_frag_list_que0[MAIN_QUEUE_RX_DESC_COUNT]; #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static struct net_buf *rx_frag_list_que1[PRIORITY_QUEUE1_RX_DESC_COUNT]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2 static struct net_buf *rx_frag_list_que2[PRIORITY_QUEUE2_RX_DESC_COUNT]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3 static struct net_buf *rx_frag_list_que3[PRIORITY_QUEUE3_RX_DESC_COUNT]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4 static struct net_buf *rx_frag_list_que4[PRIORITY_QUEUE4_RX_DESC_COUNT]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5 static struct net_buf *rx_frag_list_que5[PRIORITY_QUEUE5_RX_DESC_COUNT]; #endif #if GMAC_MULTIPLE_TX_PACKETS == 1 /* TX buffer accounting list */ static struct net_buf *tx_frag_list_que0[MAIN_QUEUE_TX_DESC_COUNT]; #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static struct net_buf *tx_frag_list_que1[PRIORITY_QUEUE1_TX_DESC_COUNT]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2 static struct net_buf *tx_frag_list_que2[PRIORITY_QUEUE2_TX_DESC_COUNT]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3 static struct net_buf *tx_frag_list_que3[PRIORITY_QUEUE3_TX_DESC_COUNT]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4 static struct net_buf *tx_frag_list_que4[PRIORITY_QUEUE4_TX_DESC_COUNT]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5 static struct net_buf *tx_frag_list_que5[PRIORITY_QUEUE5_TX_DESC_COUNT]; #endif #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) /* TX frames accounting list */ static struct net_pkt *tx_frame_list_que0[CONFIG_NET_PKT_TX_COUNT + 1]; #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static struct net_pkt *tx_frame_list_que1[CONFIG_NET_PKT_TX_COUNT + 1]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2 static struct net_pkt *tx_frame_list_que2[CONFIG_NET_PKT_TX_COUNT + 1]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3 static struct net_pkt *tx_frame_list_que3[CONFIG_NET_PKT_TX_COUNT + 1]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4 static struct net_pkt *tx_frame_list_que4[CONFIG_NET_PKT_TX_COUNT + 1]; #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5 static struct net_pkt *tx_frame_list_que5[CONFIG_NET_PKT_TX_COUNT + 1]; #endif #endif #endif #define MODULO_INC(val, max) {val = (++val < max) ? val : 0; } static int rx_descriptors_init(Gmac *gmac, struct gmac_queue *queue); static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue); static int nonpriority_queue_init(Gmac *gmac, struct gmac_queue *queue); #if GMAC_PRIORITY_QUEUE_NUM >= 1 static inline void set_receive_buf_queue_pointer(Gmac *gmac, struct gmac_queue *queue) { /* Set Receive Buffer Queue Pointer Register */ if (queue->que_idx == GMAC_QUE_0) { gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf; } else { gmac->GMAC_RBQBAPQ[queue->que_idx - 1] = (uint32_t)queue->rx_desc_list.buf; } } static inline void disable_all_priority_queue_interrupt(Gmac *gmac) { uint32_t idx; for (idx = 0; idx < GMAC_PRIORITY_QUEUE_NUM; idx++) { gmac->GMAC_IDRPQ[idx] = UINT32_MAX; (void)gmac->GMAC_ISRPQ[idx]; } } static int priority_queue_init(Gmac *gmac, struct gmac_queue *queue) { int result; int queue_index; __ASSERT_NO_MSG(queue->rx_desc_list.len > 0); __ASSERT_NO_MSG(queue->tx_desc_list.len > 0); __ASSERT(!((uint32_t)queue->rx_desc_list.buf & ~GMAC_RBQB_ADDR_Msk), "RX descriptors have to be word aligned"); __ASSERT(!((uint32_t)queue->tx_desc_list.buf & ~GMAC_TBQB_ADDR_Msk), "TX descriptors have to be word aligned"); /* Extract queue index for easier referencing */ queue_index = queue->que_idx - 1; /* Setup descriptor lists */ result = rx_descriptors_init(gmac, queue); if (result < 0) { return result; } tx_descriptors_init(gmac, queue); #if GMAC_MULTIPLE_TX_PACKETS == 0 k_sem_init(&queue->tx_sem, 0, 1); #else k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1, queue->tx_desc_list.len - 1); #endif /* Setup RX buffer size for DMA */ gmac->GMAC_RBSRPQ[queue_index] = GMAC_RBSRPQ_RBS(CONFIG_NET_BUF_DATA_SIZE >> 6); /* Set Receive Buffer Queue Pointer Register */ gmac->GMAC_RBQBAPQ[queue_index] = (uint32_t)queue->rx_desc_list.buf; /* Set Transmit Buffer Queue Pointer Register */ gmac->GMAC_TBQBAPQ[queue_index] = (uint32_t)queue->tx_desc_list.buf; /* Enable RX/TX completion and error interrupts */ gmac->GMAC_IERPQ[queue_index] = GMAC_INTPQ_EN_FLAGS; queue->err_rx_frames_dropped = 0U; queue->err_rx_flushed_count = 0U; queue->err_tx_flushed_count = 0U; LOG_INF("Queue %d activated", queue->que_idx); return 0; } static int priority_queue_init_as_idle(Gmac *gmac, struct gmac_queue *queue) { struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list; struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; __ASSERT(!((uint32_t)rx_desc_list->buf & ~GMAC_RBQB_ADDR_Msk), "RX descriptors have to be word aligned"); __ASSERT(!((uint32_t)tx_desc_list->buf & ~GMAC_TBQB_ADDR_Msk), "TX descriptors have to be word aligned"); __ASSERT((rx_desc_list->len == 1U) && (tx_desc_list->len == 1U), "Priority queues are currently not supported, descriptor " "list has to have a single entry"); /* Setup RX descriptor lists */ /* Take ownership from GMAC and set the wrap bit */ rx_desc_list->buf[0].w0 = GMAC_RXW0_WRAP; rx_desc_list->buf[0].w1 = 0U; /* Setup TX descriptor lists */ tx_desc_list->buf[0].w0 = 0U; /* Take ownership from GMAC and set the wrap bit */ tx_desc_list->buf[0].w1 = GMAC_TXW1_USED | GMAC_TXW1_WRAP; /* Set Receive Buffer Queue Pointer Register */ gmac->GMAC_RBQBAPQ[queue->que_idx - 1] = (uint32_t)rx_desc_list->buf; /* Set Transmit Buffer Queue Pointer Register */ gmac->GMAC_TBQBAPQ[queue->que_idx - 1] = (uint32_t)tx_desc_list->buf; LOG_INF("Queue %d set to idle", queue->que_idx); return 0; } static int queue_init(Gmac *gmac, struct gmac_queue *queue) { if (queue->que_idx == GMAC_QUE_0) { return nonpriority_queue_init(gmac, queue); } else if (queue->que_idx <= GMAC_ACTIVE_PRIORITY_QUEUE_NUM) { return priority_queue_init(gmac, queue); } else { return priority_queue_init_as_idle(gmac, queue); } } #else static inline void set_receive_buf_queue_pointer(Gmac *gmac, struct gmac_queue *queue) { gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf; } static int queue_init(Gmac *gmac, struct gmac_queue *queue) { return nonpriority_queue_init(gmac, queue); } #define disable_all_priority_queue_interrupt(gmac) #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static int eth_sam_gmac_setup_qav(Gmac *gmac, int queue_id, bool enable); static inline void eth_sam_gmac_init_qav(Gmac *gmac) { uint32_t idx; for (idx = GMAC_QUE_1; idx <= GMAC_ACTIVE_PRIORITY_QUEUE_NUM; idx++) { eth_sam_gmac_setup_qav(gmac, idx, true); } } #else #define eth_sam_gmac_init_qav(gmac) #endif #if GMAC_MULTIPLE_TX_PACKETS == 1 /* * Reset ring buffer */ static void ring_buf_reset(struct ring_buf *rb) { rb->head = 0U; rb->tail = 0U; } /* * Get one 32 bit item from the ring buffer */ static uint32_t ring_buf_get(struct ring_buf *rb) { uint32_t val; __ASSERT(rb->tail != rb->head, "retrieving data from empty ring buffer"); val = rb->buf[rb->tail]; MODULO_INC(rb->tail, rb->len); return val; } /* * Put one 32 bit item into the ring buffer */ static void ring_buf_put(struct ring_buf *rb, uint32_t val) { rb->buf[rb->head] = val; MODULO_INC(rb->head, rb->len); __ASSERT(rb->tail != rb->head, "ring buffer overflow"); } #endif /* * Free pre-reserved RX buffers */ static void free_rx_bufs(struct net_buf **rx_frag_list, uint16_t len) { for (int i = 0; i < len; i++) { if (rx_frag_list[i]) { net_buf_unref(rx_frag_list[i]); rx_frag_list[i] = NULL; } } } /* * Set MAC Address for frame filtering logic */ static void mac_addr_set(Gmac *gmac, uint8_t index, uint8_t mac_addr[6]) { __ASSERT(index < 4, "index has to be in the range 0..3"); gmac->GMAC_SA[index].GMAC_SAB = (mac_addr[3] << 24) | (mac_addr[2] << 16) | (mac_addr[1] << 8) | (mac_addr[0]); gmac->GMAC_SA[index].GMAC_SAT = (mac_addr[5] << 8) | (mac_addr[4]); } /* * Initialize RX descriptor list */ static int rx_descriptors_init(Gmac *gmac, struct gmac_queue *queue) { struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list; struct net_buf **rx_frag_list = queue->rx_frag_list; struct net_buf *rx_buf; uint8_t *rx_buf_addr; __ASSERT_NO_MSG(rx_frag_list); rx_desc_list->tail = 0U; for (int i = 0; i < rx_desc_list->len; i++) { rx_buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT); if (rx_buf == NULL) { free_rx_bufs(rx_frag_list, rx_desc_list->len); LOG_ERR("Failed to reserve data net buffers"); return -ENOBUFS; } rx_frag_list[i] = rx_buf; rx_buf_addr = rx_buf->data; __ASSERT(!((uint32_t)rx_buf_addr & ~GMAC_RXW0_ADDR), "Misaligned RX buffer address"); __ASSERT(rx_buf->size == CONFIG_NET_BUF_DATA_SIZE, "Incorrect length of RX data buffer"); /* Give ownership to GMAC and remove the wrap bit */ rx_desc_list->buf[i].w0 = (uint32_t)rx_buf_addr & GMAC_RXW0_ADDR; rx_desc_list->buf[i].w1 = 0U; } /* Set the wrap bit on the last descriptor */ rx_desc_list->buf[rx_desc_list->len - 1U].w0 |= GMAC_RXW0_WRAP; return 0; } /* * Initialize TX descriptor list */ static void tx_descriptors_init(Gmac *gmac, struct gmac_queue *queue) { struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; tx_desc_list->head = 0U; tx_desc_list->tail = 0U; for (int i = 0; i < tx_desc_list->len; i++) { tx_desc_list->buf[i].w0 = 0U; tx_desc_list->buf[i].w1 = GMAC_TXW1_USED; } /* Set the wrap bit on the last descriptor */ tx_desc_list->buf[tx_desc_list->len - 1U].w1 |= GMAC_TXW1_WRAP; #if GMAC_MULTIPLE_TX_PACKETS == 1 /* Reset TX frame list */ ring_buf_reset(&queue->tx_frag_list); #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) ring_buf_reset(&queue->tx_frames); #endif #endif } #if defined(CONFIG_NET_GPTP) static struct gptp_hdr *check_gptp_msg(struct net_if *iface, struct net_pkt *pkt, bool is_tx) { uint8_t *msg_start = net_pkt_data(pkt); struct gptp_hdr *gptp_hdr; int eth_hlen; struct net_eth_hdr *hdr; hdr = (struct net_eth_hdr *)msg_start; if (ntohs(hdr->type) != NET_ETH_PTYPE_PTP) { return NULL; } eth_hlen = sizeof(struct net_eth_hdr); /* In TX, the first net_buf contains the Ethernet header * and the actual gPTP header is in the second net_buf. * In RX, the Ethernet header + other headers are in the * first net_buf. */ if (is_tx) { if (pkt->frags->frags == NULL) { return false; } gptp_hdr = (struct gptp_hdr *)pkt->frags->frags->data; } else { gptp_hdr = (struct gptp_hdr *)(pkt->frags->data + eth_hlen); } return gptp_hdr; } static bool need_timestamping(struct gptp_hdr *hdr) { switch (hdr->message_type) { case GPTP_SYNC_MESSAGE: case GPTP_PATH_DELAY_RESP_MESSAGE: return true; default: return false; } } static void update_pkt_priority(struct gptp_hdr *hdr, struct net_pkt *pkt) { if (GPTP_IS_EVENT_MSG(hdr->message_type)) { net_pkt_set_priority(pkt, NET_PRIORITY_CA); } else { net_pkt_set_priority(pkt, NET_PRIORITY_IC); } } static inline struct net_ptp_time get_ptp_event_rx_ts(Gmac *gmac) { struct net_ptp_time ts; ts.second = ((uint64_t)(gmac->GMAC_EFRSH & 0xffff) << 32) | gmac->GMAC_EFRSL; ts.nanosecond = gmac->GMAC_EFRN; return ts; } static inline struct net_ptp_time get_ptp_peer_event_rx_ts(Gmac *gmac) { struct net_ptp_time ts; ts.second = ((uint64_t)(gmac->GMAC_PEFRSH & 0xffff) << 32) | gmac->GMAC_PEFRSL; ts.nanosecond = gmac->GMAC_PEFRN; return ts; } static inline struct net_ptp_time get_ptp_event_tx_ts(Gmac *gmac) { struct net_ptp_time ts; ts.second = ((uint64_t)(gmac->GMAC_EFTSH & 0xffff) << 32) | gmac->GMAC_EFTSL; ts.nanosecond = gmac->GMAC_EFTN; return ts; } static inline struct net_ptp_time get_ptp_peer_event_tx_ts(Gmac *gmac) { struct net_ptp_time ts; ts.second = ((uint64_t)(gmac->GMAC_PEFTSH & 0xffff) << 32) | gmac->GMAC_PEFTSL; ts.nanosecond = gmac->GMAC_PEFTN; return ts; } static inline struct net_ptp_time get_current_ts(Gmac *gmac) { struct net_ptp_time ts; ts.second = ((uint64_t)(gmac->GMAC_TSH & 0xffff) << 32) | gmac->GMAC_TSL; ts.nanosecond = gmac->GMAC_TN; return ts; } static inline void timestamp_tx_pkt(Gmac *gmac, struct gptp_hdr *hdr, struct net_pkt *pkt) { struct net_ptp_time timestamp; if (hdr) { switch (hdr->message_type) { case GPTP_SYNC_MESSAGE: timestamp = get_ptp_event_tx_ts(gmac); break; default: timestamp = get_ptp_peer_event_tx_ts(gmac); } } else { timestamp = get_current_ts(gmac); } net_pkt_set_timestamp(pkt, &timestamp); } static inline void timestamp_rx_pkt(Gmac *gmac, struct gptp_hdr *hdr, struct net_pkt *pkt) { struct net_ptp_time timestamp; if (hdr) { switch (hdr->message_type) { case GPTP_SYNC_MESSAGE: timestamp = get_ptp_event_rx_ts(gmac); break; default: timestamp = get_ptp_peer_event_rx_ts(gmac); } } else { timestamp = get_current_ts(gmac); } net_pkt_set_timestamp(pkt, &timestamp); } #endif static inline struct net_if *get_iface(struct eth_sam_dev_data *ctx) { return ctx->iface; } /* * Process successfully sent packets */ static void tx_completed(Gmac *gmac, struct gmac_queue *queue) { #if GMAC_MULTIPLE_TX_PACKETS == 0 k_sem_give(&queue->tx_sem); #else struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; struct gmac_desc *tx_desc; struct net_buf *frag; #if defined(CONFIG_NET_GPTP) struct net_pkt *pkt; struct gptp_hdr *hdr; struct eth_sam_dev_data *dev_data = CONTAINER_OF(queue, struct eth_sam_dev_data, queue_list[queue->que_idx]); #endif __ASSERT(tx_desc_list->buf[tx_desc_list->tail].w1 & GMAC_TXW1_USED, "first buffer of a frame is not marked as own by GMAC"); while (tx_desc_list->tail != tx_desc_list->head) { tx_desc = &tx_desc_list->buf[tx_desc_list->tail]; MODULO_INC(tx_desc_list->tail, tx_desc_list->len); k_sem_give(&queue->tx_desc_sem); /* Release net buffer to the buffer pool */ frag = UINT_TO_POINTER(ring_buf_get(&queue->tx_frag_list)); net_pkt_frag_unref(frag); LOG_DBG("Dropping frag %p", frag); if (tx_desc->w1 & GMAC_TXW1_LASTBUFFER) { #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) /* Release net packet to the packet pool */ pkt = UINT_TO_POINTER(ring_buf_get(&queue->tx_frames)); #if defined(CONFIG_NET_GPTP) hdr = check_gptp_msg(get_iface(dev_data), pkt, true); timestamp_tx_pkt(gmac, hdr, pkt); if (hdr && need_timestamping(hdr)) { net_if_add_tx_timestamp(pkt); } #endif net_pkt_unref(pkt); LOG_DBG("Dropping pkt %p", pkt); #endif break; } } #endif } /* * Reset TX queue when errors are detected */ static void tx_error_handler(Gmac *gmac, struct gmac_queue *queue) { #if GMAC_MULTIPLE_TX_PACKETS == 1 struct net_buf *frag; struct ring_buf *tx_frag_list = &queue->tx_frag_list; #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) struct net_pkt *pkt; struct ring_buf *tx_frames = &queue->tx_frames; #endif #endif queue->err_tx_flushed_count++; /* Stop transmission, clean transmit pipeline and control registers */ gmac->GMAC_NCR &= ~GMAC_NCR_TXEN; #if GMAC_MULTIPLE_TX_PACKETS == 1 /* Free all frag resources in the TX path */ while (tx_frag_list->tail != tx_frag_list->head) { /* Release net buffer to the buffer pool */ frag = UINT_TO_POINTER(tx_frag_list->buf[tx_frag_list->tail]); net_pkt_frag_unref(frag); LOG_DBG("Dropping frag %p", frag); MODULO_INC(tx_frag_list->tail, tx_frag_list->len); } #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) /* Free all pkt resources in the TX path */ while (tx_frames->tail != tx_frames->head) { /* Release net packet to the packet pool */ pkt = UINT_TO_POINTER(tx_frames->buf[tx_frames->tail]); net_pkt_unref(pkt); LOG_DBG("Dropping pkt %p", pkt); MODULO_INC(tx_frames->tail, tx_frames->len); } #endif /* Reinitialize TX descriptor list */ k_sem_reset(&queue->tx_desc_sem); for (int i = 0; i < queue->tx_desc_list.len - 1; i++) { k_sem_give(&queue->tx_desc_sem); } #endif tx_descriptors_init(gmac, queue); #if GMAC_MULTIPLE_TX_PACKETS == 0 /* Reinitialize TX mutex */ k_sem_give(&queue->tx_sem); #endif /* Restart transmission */ gmac->GMAC_NCR |= GMAC_NCR_TXEN; } /* * Clean RX queue, any received data still stored in the buffers is abandoned. */ static void rx_error_handler(Gmac *gmac, struct gmac_queue *queue) { queue->err_rx_flushed_count++; /* Stop reception */ gmac->GMAC_NCR &= ~GMAC_NCR_RXEN; queue->rx_desc_list.tail = 0U; for (int i = 0; i < queue->rx_desc_list.len; i++) { queue->rx_desc_list.buf[i].w1 = 0U; queue->rx_desc_list.buf[i].w0 &= ~GMAC_RXW0_OWNERSHIP; } set_receive_buf_queue_pointer(gmac, queue); /* Restart reception */ gmac->GMAC_NCR |= GMAC_NCR_RXEN; } /* * Set MCK to MDC clock divisor. * * According to 802.3 MDC should be less then 2.5 MHz. */ static int get_mck_clock_divisor(uint32_t mck) { uint32_t mck_divisor; if (mck <= 20000000U) { mck_divisor = GMAC_NCFGR_CLK_MCK_8; } else if (mck <= 40000000U) { mck_divisor = GMAC_NCFGR_CLK_MCK_16; } else if (mck <= 80000000U) { mck_divisor = GMAC_NCFGR_CLK_MCK_32; } else if (mck <= 120000000U) { mck_divisor = GMAC_NCFGR_CLK_MCK_48; } else if (mck <= 160000000U) { mck_divisor = GMAC_NCFGR_CLK_MCK_64; } else if (mck <= 240000000U) { mck_divisor = GMAC_NCFGR_CLK_MCK_96; } else { LOG_ERR("No valid MDC clock"); mck_divisor = -ENOTSUP; } return mck_divisor; } #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static int eth_sam_gmac_setup_qav(Gmac *gmac, int queue_id, bool enable) { /* Verify queue id */ if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) { return -EINVAL; } if (queue_id == GMAC_QUE_2) { if (enable) { gmac->GMAC_CBSCR |= GMAC_CBSCR_QAE; } else { gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QAE; } } else { if (enable) { gmac->GMAC_CBSCR |= GMAC_CBSCR_QBE; } else { gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QBE; } } return 0; } static int eth_sam_gmac_get_qav_status(Gmac *gmac, int queue_id, bool *enabled) { /* Verify queue id */ if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) { return -EINVAL; } if (queue_id == GMAC_QUE_2) { *enabled = gmac->GMAC_CBSCR & GMAC_CBSCR_QAE; } else { *enabled = gmac->GMAC_CBSCR & GMAC_CBSCR_QBE; } return 0; } static int eth_sam_gmac_setup_qav_idle_slope(Gmac *gmac, int queue_id, unsigned int idle_slope) { uint32_t cbscr_val; /* Verify queue id */ if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) { return -EINVAL; } cbscr_val = gmac->GMAC_CBSISQA; if (queue_id == GMAC_QUE_2) { gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QAE; gmac->GMAC_CBSISQA = idle_slope; } else { gmac->GMAC_CBSCR &= ~GMAC_CBSCR_QBE; gmac->GMAC_CBSISQB = idle_slope; } gmac->GMAC_CBSCR = cbscr_val; return 0; } static uint32_t eth_sam_gmac_get_bandwidth(Gmac *gmac) { uint32_t bandwidth; /* See if we operate in 10Mbps or 100Mbps mode, * Note: according to the manual, portTransmitRate is 0x07735940 for * 1Gbps - therefore we cannot use the KB/MB macros - we have to * multiply it by a round 1000 to get it right. */ if (gmac->GMAC_NCFGR & GMAC_NCFGR_SPD) { /* 100Mbps */ bandwidth = (100 * 1000 * 1000) / 8; } else { /* 10Mbps */ bandwidth = (10 * 1000 * 1000) / 8; } return bandwidth; } static int eth_sam_gmac_get_qav_idle_slope(Gmac *gmac, int queue_id, unsigned int *idle_slope) { /* Verify queue id */ if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) { return -EINVAL; } if (queue_id == GMAC_QUE_2) { *idle_slope = gmac->GMAC_CBSISQA; } else { *idle_slope = gmac->GMAC_CBSISQB; } /* Convert to bps as expected by upper layer */ *idle_slope *= 8U; return 0; } static int eth_sam_gmac_get_qav_delta_bandwidth(Gmac *gmac, int queue_id, unsigned int *delta_bandwidth) { uint32_t bandwidth; unsigned int idle_slope; int ret; ret = eth_sam_gmac_get_qav_idle_slope(gmac, queue_id, &idle_slope); if (ret) { return ret; } /* Calculate in Bps */ idle_slope /= 8U; /* Get bandwidth and convert to bps */ bandwidth = eth_sam_gmac_get_bandwidth(gmac); /* Calculate percentage - instead of multiplying idle_slope by 100, * divide bandwidth - these numbers are so large that it should not * influence the outcome and saves us from employing larger data types. */ *delta_bandwidth = idle_slope / (bandwidth / 100U); return 0; } static int eth_sam_gmac_setup_qav_delta_bandwidth(Gmac *gmac, int queue_id, int queue_share) { uint32_t bandwidth; uint32_t idle_slope; /* Verify queue id */ if (queue_id < GMAC_QUE_1 || queue_id > GMAC_ACTIVE_PRIORITY_QUEUE_NUM) { return -EINVAL; } bandwidth = eth_sam_gmac_get_bandwidth(gmac); idle_slope = (bandwidth * queue_share) / 100U; return eth_sam_gmac_setup_qav_idle_slope(gmac, queue_id, idle_slope); } #endif #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) static void gmac_setup_ptp_clock_divisors(Gmac *gmac) { int mck_divs[] = {10, 5, 2}; double min_cycles; double min_period; int div; int i; uint8_t cns, acns, nit; min_cycles = MCK_FREQ_HZ; min_period = NSEC_PER_SEC; for (i = 0; i < ARRAY_SIZE(mck_divs); ++i) { div = mck_divs[i]; while ((double)(min_cycles / div) == (int)(min_cycles / div) && (double)(min_period / div) == (int)(min_period / div)) { min_cycles /= div; min_period /= div; } } nit = min_cycles - 1; cns = 0U; acns = 0U; while ((cns + 2) * nit < min_period) { cns++; } acns = min_period - (nit * cns); gmac->GMAC_TI = GMAC_TI_CNS(cns) | GMAC_TI_ACNS(acns) | GMAC_TI_NIT(nit); gmac->GMAC_TISUBN = 0; } #endif static int gmac_init(Gmac *gmac, uint32_t gmac_ncfgr_val) { int mck_divisor; mck_divisor = get_mck_clock_divisor(MCK_FREQ_HZ); if (mck_divisor < 0) { return mck_divisor; } /* Set Network Control Register to its default value, clear stats. */ gmac->GMAC_NCR = GMAC_NCR_CLRSTAT | GMAC_NCR_MPE; /* Disable all interrupts */ gmac->GMAC_IDR = UINT32_MAX; /* Clear all interrupts */ (void)gmac->GMAC_ISR; disable_all_priority_queue_interrupt(gmac); /* Setup Hash Registers - enable reception of all multicast frames when * GMAC_NCFGR_MTIHEN is set. */ gmac->GMAC_HRB = UINT32_MAX; gmac->GMAC_HRT = UINT32_MAX; /* Setup Network Configuration Register */ gmac->GMAC_NCFGR = gmac_ncfgr_val | mck_divisor; /* Default (RMII) is defined at atmel,gmac-common.yaml file */ switch (DT_INST_ENUM_IDX(0, phy_connection_type)) { case 0: /* mii */ gmac->GMAC_UR = 0x1; break; case 1: /* rmii */ gmac->GMAC_UR = 0x0; break; default: /* Build assert at top of file should catch this case */ LOG_ERR("The phy connection type is invalid"); return -EINVAL; } #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) /* Initialize PTP Clock Registers */ gmac_setup_ptp_clock_divisors(gmac); gmac->GMAC_TN = 0; gmac->GMAC_TSH = 0; gmac->GMAC_TSL = 0; #endif /* Enable Qav if priority queues are used, and setup the default delta * bandwidth according to IEEE802.1Qav (34.3.1) */ #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 1 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 75); #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 2 /* For multiple priority queues, 802.1Qav suggests using 75% for the * highest priority queue, and 0% for the lower priority queues. * This is because the lower priority queues are supposed to be using * the bandwidth available from the higher priority queues AND its own * available bandwidth (see 802.1Q 34.3.1 for more details). * This does not work like that in SAM GMAC - the lower priority queues * are not using the bandwidth reserved for the higher priority queues * at all. Thus we still set the default to a total of the recommended * 75%, but split the bandwidth between them manually. */ eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 25); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 50); #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 3 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 25); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 25); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 25); #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 4 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 21); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 18); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 18); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 4, 18); #elif GMAC_ACTIVE_PRIORITY_QUEUE_NUM == 5 eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 1, 15); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 2, 15); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 3, 15); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 4, 15); eth_sam_gmac_setup_qav_delta_bandwidth(gmac, 5, 15); #endif eth_sam_gmac_init_qav(gmac); return 0; } static void link_configure(Gmac *gmac, bool full_duplex, bool speed_100M) { uint32_t val; val = gmac->GMAC_NCFGR; val &= ~(GMAC_NCFGR_FD | GMAC_NCFGR_SPD); val |= (full_duplex) ? GMAC_NCFGR_FD : 0; val |= (speed_100M) ? GMAC_NCFGR_SPD : 0; gmac->GMAC_NCFGR = val; gmac->GMAC_NCR |= (GMAC_NCR_RXEN | GMAC_NCR_TXEN); } static int nonpriority_queue_init(Gmac *gmac, struct gmac_queue *queue) { int result; __ASSERT_NO_MSG(queue->rx_desc_list.len > 0); __ASSERT_NO_MSG(queue->tx_desc_list.len > 0); __ASSERT(!((uint32_t)queue->rx_desc_list.buf & ~GMAC_RBQB_ADDR_Msk), "RX descriptors have to be word aligned"); __ASSERT(!((uint32_t)queue->tx_desc_list.buf & ~GMAC_TBQB_ADDR_Msk), "TX descriptors have to be word aligned"); /* Setup descriptor lists */ result = rx_descriptors_init(gmac, queue); if (result < 0) { return result; } tx_descriptors_init(gmac, queue); #if GMAC_MULTIPLE_TX_PACKETS == 0 /* Initialize TX semaphore. This semaphore is used to wait until the TX * data has been sent. */ k_sem_init(&queue->tx_sem, 0, 1); #else /* Initialize TX descriptors semaphore. The semaphore is required as the * size of the TX descriptor list is limited while the number of TX data * buffers is not. */ k_sem_init(&queue->tx_desc_sem, queue->tx_desc_list.len - 1, queue->tx_desc_list.len - 1); #endif /* Set Receive Buffer Queue Pointer Register */ gmac->GMAC_RBQB = (uint32_t)queue->rx_desc_list.buf; /* Set Transmit Buffer Queue Pointer Register */ gmac->GMAC_TBQB = (uint32_t)queue->tx_desc_list.buf; /* Configure GMAC DMA transfer */ gmac->GMAC_DCFGR = /* Receive Buffer Size (defined in multiples of 64 bytes) */ GMAC_DCFGR_DRBS(CONFIG_NET_BUF_DATA_SIZE >> 6) | #if defined(GMAC_DCFGR_RXBMS) /* Use full receive buffer size on parts where this is selectable */ GMAC_DCFGR_RXBMS(3) | #endif /* Attempt to use INCR4 AHB bursts (Default) */ GMAC_DCFGR_FBLDO_INCR4 | /* DMA Queue Flags */ GMAC_DMA_QUEUE_FLAGS; /* Setup RX/TX completion and error interrupts */ gmac->GMAC_IER = GMAC_INT_EN_FLAGS; queue->err_rx_frames_dropped = 0U; queue->err_rx_flushed_count = 0U; queue->err_tx_flushed_count = 0U; LOG_INF("Queue %d activated", queue->que_idx); return 0; } static struct net_pkt *frame_get(struct gmac_queue *queue) { struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list; struct gmac_desc *rx_desc; struct net_buf **rx_frag_list = queue->rx_frag_list; struct net_pkt *rx_frame; bool frame_is_complete; struct net_buf *frag; struct net_buf *new_frag; struct net_buf *last_frag = NULL; uint8_t *frag_data; uint32_t frag_len; uint32_t frame_len = 0U; uint16_t tail; uint8_t wrap; /* Check if there exists a complete frame in RX descriptor list */ tail = rx_desc_list->tail; rx_desc = &rx_desc_list->buf[tail]; frame_is_complete = false; while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP) && !frame_is_complete) { frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF); MODULO_INC(tail, rx_desc_list->len); rx_desc = &rx_desc_list->buf[tail]; } /* Frame which is not complete can be dropped by GMAC. Do not process * it, even partially. */ if (!frame_is_complete) { return NULL; } rx_frame = net_pkt_rx_alloc(K_NO_WAIT); /* Process a frame */ tail = rx_desc_list->tail; rx_desc = &rx_desc_list->buf[tail]; frame_is_complete = false; /* TODO: Don't assume first RX fragment will have SOF (Start of frame) * bit set. If SOF bit is missing recover gracefully by dropping * invalid frame. */ __ASSERT(rx_desc->w1 & GMAC_RXW1_SOF, "First RX fragment is missing SOF bit"); /* TODO: We know already tail and head indexes of fragments containing * complete frame. Loop over those indexes, don't search for them * again. */ while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP) && !frame_is_complete) { frag = rx_frag_list[tail]; frag_data = (uint8_t *)(rx_desc->w0 & GMAC_RXW0_ADDR); __ASSERT(frag->data == frag_data, "RX descriptor and buffer list desynchronized"); frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF); if (frame_is_complete) { frag_len = (rx_desc->w1 & GMAC_RXW1_LEN) - frame_len; } else { frag_len = CONFIG_NET_BUF_DATA_SIZE; } frame_len += frag_len; /* Link frame fragments only if RX net buffer is valid */ if (rx_frame != NULL) { /* Assure cache coherency after DMA write operation */ dcache_invalidate((uint32_t)frag_data, frag->size); /* Get a new data net buffer from the buffer pool */ new_frag = net_pkt_get_frag(rx_frame, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT); if (new_frag == NULL) { queue->err_rx_frames_dropped++; net_pkt_unref(rx_frame); rx_frame = NULL; } else { net_buf_add(frag, frag_len); if (!last_frag) { net_pkt_frag_insert(rx_frame, frag); } else { net_buf_frag_insert(last_frag, frag); } last_frag = frag; frag = new_frag; rx_frag_list[tail] = frag; } } /* Update buffer descriptor status word */ rx_desc->w1 = 0U; /* Guarantee that status word is written before the address * word to avoid race condition. */ barrier_dmem_fence_full(); /* Update buffer descriptor address word */ wrap = (tail == rx_desc_list->len-1U ? GMAC_RXW0_WRAP : 0); rx_desc->w0 = ((uint32_t)frag->data & GMAC_RXW0_ADDR) | wrap; MODULO_INC(tail, rx_desc_list->len); rx_desc = &rx_desc_list->buf[tail]; } rx_desc_list->tail = tail; LOG_DBG("Frame complete: rx=%p, tail=%d", rx_frame, tail); __ASSERT_NO_MSG(frame_is_complete); return rx_frame; } static void eth_rx(struct gmac_queue *queue) { struct eth_sam_dev_data *dev_data = CONTAINER_OF(queue, struct eth_sam_dev_data, queue_list[queue->que_idx]); struct net_pkt *rx_frame; #if defined(CONFIG_NET_GPTP) const struct device *const dev = net_if_get_device(dev_data->iface); const struct eth_sam_dev_cfg *const cfg = dev->config; Gmac *gmac = cfg->regs; struct gptp_hdr *hdr; #endif /* More than one frame could have been received by GMAC, get all * complete frames stored in the GMAC RX descriptor list. */ rx_frame = frame_get(queue); while (rx_frame) { LOG_DBG("ETH rx"); #if defined(CONFIG_NET_GPTP) hdr = check_gptp_msg(get_iface(dev_data), rx_frame, false); timestamp_rx_pkt(gmac, hdr, rx_frame); if (hdr) { update_pkt_priority(hdr, rx_frame); } #endif /* CONFIG_NET_GPTP */ if (net_recv_data(get_iface(dev_data), rx_frame) < 0) { eth_stats_update_errors_rx(get_iface(dev_data)); net_pkt_unref(rx_frame); } rx_frame = frame_get(queue); } } #if !defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE) && \ ((GMAC_ACTIVE_QUEUE_NUM != NET_TC_TX_COUNT) || \ ((NET_TC_TX_COUNT != NET_TC_RX_COUNT) && defined(CONFIG_NET_VLAN))) static int priority2queue(enum net_priority priority) { static const uint8_t queue_priority_map[] = { #if GMAC_ACTIVE_QUEUE_NUM == 1 0, 0, 0, 0, 0, 0, 0, 0 #endif #if GMAC_ACTIVE_QUEUE_NUM == 2 0, 0, 0, 0, 1, 1, 1, 1 #endif #if GMAC_ACTIVE_QUEUE_NUM == 3 0, 0, 0, 0, 1, 1, 2, 2 #endif #if GMAC_ACTIVE_QUEUE_NUM == 4 0, 0, 0, 0, 1, 1, 2, 3 #endif #if GMAC_ACTIVE_QUEUE_NUM == 5 0, 0, 0, 0, 1, 2, 3, 4 #endif #if GMAC_ACTIVE_QUEUE_NUM == 6 0, 0, 0, 1, 2, 3, 4, 5 #endif }; return queue_priority_map[priority]; } #endif static int eth_tx(const struct device *dev, struct net_pkt *pkt) { const struct eth_sam_dev_cfg *const cfg = dev->config; struct eth_sam_dev_data *const dev_data = dev->data; Gmac *gmac = cfg->regs; struct gmac_queue *queue; struct gmac_desc_list *tx_desc_list; struct gmac_desc *tx_desc; struct gmac_desc *tx_first_desc; struct net_buf *frag; uint8_t *frag_data; uint16_t frag_len; uint32_t err_tx_flushed_count_at_entry; #if GMAC_MULTIPLE_TX_PACKETS == 1 unsigned int key; #endif uint8_t pkt_prio; #if GMAC_MULTIPLE_TX_PACKETS == 0 #if defined(CONFIG_NET_GPTP) struct gptp_hdr *hdr; #endif #endif __ASSERT(pkt, "buf pointer is NULL"); __ASSERT(pkt->frags, "Frame data missing"); LOG_DBG("ETH tx"); /* Decide which queue should be used */ pkt_prio = net_pkt_priority(pkt); #if defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE) /* Route eveything to the forced queue */ queue = &dev_data->queue_list[CONFIG_ETH_SAM_GMAC_FORCED_QUEUE]; #elif GMAC_ACTIVE_QUEUE_NUM == CONFIG_NET_TC_TX_COUNT /* Prefer to chose queue based on its traffic class */ queue = &dev_data->queue_list[net_tx_priority2tc(pkt_prio)]; #else /* If that's not possible due to config - use builtin mapping */ queue = &dev_data->queue_list[priority2queue(pkt_prio)]; #endif tx_desc_list = &queue->tx_desc_list; err_tx_flushed_count_at_entry = queue->err_tx_flushed_count; frag = pkt->frags; /* Keep reference to the descriptor */ tx_first_desc = &tx_desc_list->buf[tx_desc_list->head]; while (frag) { frag_data = frag->data; frag_len = frag->len; /* Assure cache coherency before DMA read operation */ dcache_clean((uint32_t)frag_data, frag->size); #if GMAC_MULTIPLE_TX_PACKETS == 1 k_sem_take(&queue->tx_desc_sem, K_FOREVER); /* The following section becomes critical and requires IRQ lock * / unlock protection only due to the possibility of executing * tx_error_handler() function. */ key = irq_lock(); /* Check if tx_error_handler() function was executed */ if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { irq_unlock(key); return -EIO; } #endif tx_desc = &tx_desc_list->buf[tx_desc_list->head]; /* Update buffer descriptor address word */ tx_desc->w0 = (uint32_t)frag_data; /* Update buffer descriptor status word (clear used bit except * for the first frag). */ tx_desc->w1 = (frag_len & GMAC_TXW1_LEN) | (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0) | (tx_desc_list->head == tx_desc_list->len - 1U ? GMAC_TXW1_WRAP : 0) | (tx_desc == tx_first_desc ? GMAC_TXW1_USED : 0); /* Update descriptor position */ MODULO_INC(tx_desc_list->head, tx_desc_list->len); #if GMAC_MULTIPLE_TX_PACKETS == 1 __ASSERT(tx_desc_list->head != tx_desc_list->tail, "tx_desc_list overflow"); /* Account for a sent frag */ ring_buf_put(&queue->tx_frag_list, POINTER_TO_UINT(frag)); /* frag is internally queued, so it requires to hold a reference */ net_pkt_frag_ref(frag); irq_unlock(key); #endif /* Continue with the rest of fragments (only data) */ frag = frag->frags; } #if GMAC_MULTIPLE_TX_PACKETS == 1 key = irq_lock(); /* Check if tx_error_handler() function was executed */ if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { irq_unlock(key); return -EIO; } #endif /* Ensure the descriptor following the last one is marked as used */ tx_desc_list->buf[tx_desc_list->head].w1 = GMAC_TXW1_USED; /* Guarantee that all the fragments have been written before removing * the used bit to avoid race condition. */ barrier_dmem_fence_full(); /* Remove the used bit of the first fragment to allow the controller * to process it and the following fragments. */ tx_first_desc->w1 &= ~GMAC_TXW1_USED; #if GMAC_MULTIPLE_TX_PACKETS == 1 #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) /* Account for a sent frame */ ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt)); /* pkt is internally queued, so it requires to hold a reference */ net_pkt_ref(pkt); #endif irq_unlock(key); #endif /* Guarantee that the first fragment got its bit removed before starting * sending packets to avoid packets getting stuck. */ barrier_dmem_fence_full(); /* Start transmission */ gmac->GMAC_NCR |= GMAC_NCR_TSTART; #if GMAC_MULTIPLE_TX_PACKETS == 0 /* Wait until the packet is sent */ k_sem_take(&queue->tx_sem, K_FOREVER); /* Check if transmit successful or not */ if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { return -EIO; } #if defined(CONFIG_NET_GPTP) #if defined(CONFIG_NET_GPTP) hdr = check_gptp_msg(get_iface(dev_data), pkt, true); timestamp_tx_pkt(gmac, hdr, pkt); if (hdr && need_timestamping(hdr)) { net_if_add_tx_timestamp(pkt); } #endif #endif #endif return 0; } static void queue0_isr(const struct device *dev) { const struct eth_sam_dev_cfg *const cfg = dev->config; struct eth_sam_dev_data *const dev_data = dev->data; Gmac *gmac = cfg->regs; struct gmac_queue *queue; struct gmac_desc_list *rx_desc_list; struct gmac_desc_list *tx_desc_list; struct gmac_desc *tail_desc; uint32_t isr; /* Interrupt Status Register is cleared on read */ isr = gmac->GMAC_ISR; LOG_DBG("GMAC_ISR=0x%08x", isr); queue = &dev_data->queue_list[0]; rx_desc_list = &queue->rx_desc_list; tx_desc_list = &queue->tx_desc_list; /* RX packet */ if (isr & GMAC_INT_RX_ERR_BITS) { rx_error_handler(gmac, queue); } else if (isr & GMAC_ISR_RCOMP) { tail_desc = &rx_desc_list->buf[rx_desc_list->tail]; LOG_DBG("rx.w1=0x%08x, tail=%d", tail_desc->w1, rx_desc_list->tail); eth_rx(queue); } /* TX packet */ if (isr & GMAC_INT_TX_ERR_BITS) { tx_error_handler(gmac, queue); } else if (isr & GMAC_ISR_TCOMP) { #if GMAC_MULTIPLE_TX_PACKETS == 1 tail_desc = &tx_desc_list->buf[tx_desc_list->tail]; LOG_DBG("tx.w1=0x%08x, tail=%d", tail_desc->w1, tx_desc_list->tail); #endif tx_completed(gmac, queue); } if (isr & GMAC_IER_HRESP) { LOG_DBG("IER HRESP"); } } #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static inline void priority_queue_isr(const struct device *dev, unsigned int queue_idx) { const struct eth_sam_dev_cfg *const cfg = dev->config; struct eth_sam_dev_data *const dev_data = dev->data; Gmac *gmac = cfg->regs; struct gmac_queue *queue; struct gmac_desc_list *rx_desc_list; struct gmac_desc_list *tx_desc_list; struct gmac_desc *tail_desc; uint32_t isrpq; isrpq = gmac->GMAC_ISRPQ[queue_idx - 1]; LOG_DBG("GMAC_ISRPQ%d=0x%08x", queue_idx - 1, isrpq); queue = &dev_data->queue_list[queue_idx]; rx_desc_list = &queue->rx_desc_list; tx_desc_list = &queue->tx_desc_list; /* RX packet */ if (isrpq & GMAC_INTPQ_RX_ERR_BITS) { rx_error_handler(gmac, queue); } else if (isrpq & GMAC_ISRPQ_RCOMP) { tail_desc = &rx_desc_list->buf[rx_desc_list->tail]; LOG_DBG("rx.w1=0x%08x, tail=%d", tail_desc->w1, rx_desc_list->tail); eth_rx(queue); } /* TX packet */ if (isrpq & GMAC_INTPQ_TX_ERR_BITS) { tx_error_handler(gmac, queue); } else if (isrpq & GMAC_ISRPQ_TCOMP) { #if GMAC_MULTIPLE_TX_PACKETS == 1 tail_desc = &tx_desc_list->buf[tx_desc_list->tail]; LOG_DBG("tx.w1=0x%08x, tail=%d", tail_desc->w1, tx_desc_list->tail); #endif tx_completed(gmac, queue); } if (isrpq & GMAC_IERPQ_HRESP) { LOG_DBG("IERPQ%d HRESP", queue_idx - 1); } } #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static void queue1_isr(const struct device *dev) { priority_queue_isr(dev, 1); } #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2 static void queue2_isr(const struct device *dev) { priority_queue_isr(dev, 2); } #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3 static void queue3_isr(const struct device *dev) { priority_queue_isr(dev, 3); } #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4 static void queue4_isr(const struct device *dev) { priority_queue_isr(dev, 4); } #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5 static void queue5_isr(const struct device *dev) { priority_queue_isr(dev, 5); } #endif static int eth_initialize(const struct device *dev) { const struct eth_sam_dev_cfg *const cfg = dev->config; int retval; cfg->config_func(); #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM /* Enable GMAC module's clock */ (void)clock_control_on(SAM_DT_PMC_CONTROLLER, (clock_control_subsys_t)&cfg->clock_cfg); #else /* Enable MCLK clock on GMAC */ MCLK->AHBMASK.reg |= MCLK_AHBMASK_GMAC; *MCLK_GMAC |= MCLK_GMAC_MASK; #endif /* Connect pins to the peripheral */ retval = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); return retval; } #if DT_INST_NODE_HAS_PROP(0, mac_eeprom) static void get_mac_addr_from_i2c_eeprom(uint8_t mac_addr[6]) { uint32_t iaddr = CONFIG_ETH_SAM_GMAC_MAC_I2C_INT_ADDRESS; int ret; const struct i2c_dt_spec i2c = I2C_DT_SPEC_GET(DT_INST_PHANDLE(0, mac_eeprom)); if (!device_is_ready(i2c.bus)) { LOG_ERR("Bus device is not ready"); return; } ret = i2c_write_read_dt(&i2c, &iaddr, CONFIG_ETH_SAM_GMAC_MAC_I2C_INT_ADDRESS_SIZE, mac_addr, 6); if (ret != 0) { LOG_ERR("I2C: failed to read MAC addr"); return; } } #endif static void generate_mac(uint8_t mac_addr[6]) { #if DT_INST_NODE_HAS_PROP(0, mac_eeprom) get_mac_addr_from_i2c_eeprom(mac_addr); #elif DT_INST_PROP(0, zephyr_random_mac_address) gen_random_mac(mac_addr, ATMEL_OUI_B0, ATMEL_OUI_B1, ATMEL_OUI_B2); #endif } static void phy_link_state_changed(const struct device *pdev, struct phy_link_state *state, void *user_data) { const struct device *dev = (const struct device *) user_data; struct eth_sam_dev_data *const dev_data = dev->data; const struct eth_sam_dev_cfg *const cfg = dev->config; bool is_up; is_up = state->is_up; if (is_up && !dev_data->link_up) { LOG_INF("Link up"); /* Announce link up status */ dev_data->link_up = true; net_eth_carrier_on(dev_data->iface); /* Set up link */ link_configure(cfg->regs, PHY_LINK_IS_FULL_DUPLEX(state->speed), PHY_LINK_IS_SPEED_100M(state->speed)); } else if (!is_up && dev_data->link_up) { LOG_INF("Link down"); /* Announce link down status */ dev_data->link_up = false; net_eth_carrier_off(dev_data->iface); } } static const struct device *eth_sam_gmac_get_phy(const struct device *dev) { const struct eth_sam_dev_cfg *const cfg = dev->config; return cfg->phy_dev; } static void eth0_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_sam_dev_data *const dev_data = dev->data; const struct eth_sam_dev_cfg *const cfg = dev->config; static bool init_done; uint32_t gmac_ncfgr_val; int result; int i; if (dev_data->iface == NULL) { dev_data->iface = iface; } ethernet_init(iface); /* The rest of initialization should only be done once */ if (init_done) { return; } /* Check the status of data caches */ dcache_is_enabled(); /* Initialize GMAC driver */ gmac_ncfgr_val = GMAC_NCFGR_MTIHEN /* Multicast Hash Enable */ | GMAC_NCFGR_LFERD /* Length Field Error Frame Discard */ | GMAC_NCFGR_RFCS /* Remove Frame Check Sequence */ | GMAC_NCFGR_RXCOEN /* Receive Checksum Offload Enable */ | GMAC_MAX_FRAME_SIZE; result = gmac_init(cfg->regs, gmac_ncfgr_val); if (result < 0) { LOG_ERR("Unable to initialize ETH driver"); return; } generate_mac(dev_data->mac_addr); LOG_INF("MAC: %02x:%02x:%02x:%02x:%02x:%02x", dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2], dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]); /* Set MAC Address for frame filtering logic */ mac_addr_set(cfg->regs, 0, dev_data->mac_addr); /* Register Ethernet MAC Address with the upper layer */ net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); /* Initialize GMAC queues */ for (i = GMAC_QUE_0; i < GMAC_QUEUE_NUM; i++) { result = queue_init(cfg->regs, &dev_data->queue_list[i]); if (result < 0) { LOG_ERR("Unable to initialize ETH queue%d", i); return; } } #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 #if defined(CONFIG_ETH_SAM_GMAC_FORCE_QUEUE) for (i = 0; i < CONFIG_NET_TC_RX_COUNT; ++i) { cfg->regs->GMAC_ST1RPQ[i] = GMAC_ST1RPQ_DSTCM(i) | GMAC_ST1RPQ_QNB(CONFIG_ETH_SAM_GMAC_FORCED_QUEUE); } #elif GMAC_ACTIVE_QUEUE_NUM == NET_TC_RX_COUNT /* If TC configuration is compatible with HW configuration, setup the * screening registers based on the DS/TC values. * Map them 1:1 - TC 0 -> Queue 0, TC 1 -> Queue 1 etc. */ for (i = 0; i < CONFIG_NET_TC_RX_COUNT; ++i) { cfg->regs->GMAC_ST1RPQ[i] = GMAC_ST1RPQ_DSTCM(i) | GMAC_ST1RPQ_QNB(i); } #elif defined(CONFIG_NET_VLAN) /* If VLAN is enabled, route packets according to VLAN priority */ int j; i = 0; for (j = NET_PRIORITY_NC; j >= 0; --j) { if (priority2queue(j) == 0) { /* No point to set rules for the regular queue */ continue; } if (i >= ARRAY_SIZE(cfg->regs->GMAC_ST2RPQ)) { /* No more screening registers available */ break; } cfg->regs->GMAC_ST2RPQ[i++] = GMAC_ST2RPQ_QNB(priority2queue(j)) | GMAC_ST2RPQ_VLANP(j) | GMAC_ST2RPQ_VLANE; } #endif #endif if (device_is_ready(cfg->phy_dev)) { phy_link_callback_set(cfg->phy_dev, &phy_link_state_changed, (void *)dev); } else { LOG_ERR("PHY device not ready"); } /* Do not start the interface until PHY link is up */ if (!(dev_data->link_up)) { net_if_carrier_off(iface); } init_done = true; } static enum ethernet_hw_caps eth_sam_gmac_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | #if defined(CONFIG_NET_VLAN) ETHERNET_HW_VLAN | #endif #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) ETHERNET_PTP | #endif ETHERNET_PRIORITY_QUEUES | #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 ETHERNET_QAV | #endif ETHERNET_LINK_100BASE_T; } #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static int eth_sam_gmac_set_qav_param(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { const struct eth_sam_dev_cfg *const cfg = dev->config; Gmac *gmac = cfg->regs; enum ethernet_qav_param_type qav_param_type; unsigned int delta_bandwidth; unsigned int idle_slope; int queue_id; bool enable; /* Priority queue IDs start from 1 for SAM GMAC */ queue_id = config->qav_param.queue_id + 1; qav_param_type = config->qav_param.type; switch (qav_param_type) { case ETHERNET_QAV_PARAM_TYPE_STATUS: enable = config->qav_param.enabled; return eth_sam_gmac_setup_qav(gmac, queue_id, enable); case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH: delta_bandwidth = config->qav_param.delta_bandwidth; return eth_sam_gmac_setup_qav_delta_bandwidth(gmac, queue_id, delta_bandwidth); case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE: idle_slope = config->qav_param.idle_slope; /* The standard uses bps, SAM GMAC uses Bps - convert now */ idle_slope /= 8U; return eth_sam_gmac_setup_qav_idle_slope(gmac, queue_id, idle_slope); default: break; } return -ENOTSUP; } #endif static int eth_sam_gmac_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { int result = 0; switch (type) { #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 case ETHERNET_CONFIG_TYPE_QAV_PARAM: return eth_sam_gmac_set_qav_param(dev, type, config); #endif case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: { struct eth_sam_dev_data *const dev_data = dev->data; const struct eth_sam_dev_cfg *const cfg = dev->config; memcpy(dev_data->mac_addr, config->mac_address.addr, sizeof(dev_data->mac_addr)); /* Set MAC Address for frame filtering logic */ mac_addr_set(cfg->regs, 0, dev_data->mac_addr); LOG_INF("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name, dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2], dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]); /* Register Ethernet MAC Address with the upper layer */ net_if_set_link_addr(dev_data->iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); break; } default: result = -ENOTSUP; break; } return result; } #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 static int eth_sam_gmac_get_qav_param(const struct device *dev, enum ethernet_config_type type, struct ethernet_config *config) { const struct eth_sam_dev_cfg *const cfg = dev->config; Gmac *gmac = cfg->regs; enum ethernet_qav_param_type qav_param_type; int queue_id; bool *enabled; unsigned int *idle_slope; unsigned int *delta_bandwidth; /* Priority queue IDs start from 1 for SAM GMAC */ queue_id = config->qav_param.queue_id + 1; qav_param_type = config->qav_param.type; switch (qav_param_type) { case ETHERNET_QAV_PARAM_TYPE_STATUS: enabled = &config->qav_param.enabled; return eth_sam_gmac_get_qav_status(gmac, queue_id, enabled); case ETHERNET_QAV_PARAM_TYPE_IDLE_SLOPE: idle_slope = &config->qav_param.idle_slope; return eth_sam_gmac_get_qav_idle_slope(gmac, queue_id, idle_slope); case ETHERNET_QAV_PARAM_TYPE_OPER_IDLE_SLOPE: idle_slope = &config->qav_param.oper_idle_slope; return eth_sam_gmac_get_qav_idle_slope(gmac, queue_id, idle_slope); case ETHERNET_QAV_PARAM_TYPE_DELTA_BANDWIDTH: delta_bandwidth = &config->qav_param.delta_bandwidth; return eth_sam_gmac_get_qav_delta_bandwidth(gmac, queue_id, delta_bandwidth); case ETHERNET_QAV_PARAM_TYPE_TRAFFIC_CLASS: #if GMAC_ACTIVE_QUEUE_NUM == NET_TC_TX_COUNT config->qav_param.traffic_class = queue_id; return 0; #else /* Invalid configuration - no direct TC to queue mapping */ return -ENOTSUP; #endif default: break; } return -ENOTSUP; } #endif static int eth_sam_gmac_get_config(const struct device *dev, enum ethernet_config_type type, struct ethernet_config *config) { switch (type) { case ETHERNET_CONFIG_TYPE_PRIORITY_QUEUES_NUM: config->priority_queues_num = GMAC_ACTIVE_PRIORITY_QUEUE_NUM; return 0; #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 case ETHERNET_CONFIG_TYPE_QAV_PARAM: return eth_sam_gmac_get_qav_param(dev, type, config); #endif default: break; } return -ENOTSUP; } #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) static const struct device *eth_sam_gmac_get_ptp_clock(const struct device *dev) { struct eth_sam_dev_data *const dev_data = dev->data; return dev_data->ptp_clock; } #endif static const struct ethernet_api eth_api = { .iface_api.init = eth0_iface_init, .get_capabilities = eth_sam_gmac_get_capabilities, .set_config = eth_sam_gmac_set_config, .get_config = eth_sam_gmac_get_config, .get_phy = eth_sam_gmac_get_phy, .send = eth_tx, #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) .get_ptp_clock = eth_sam_gmac_get_ptp_clock, #endif }; static void eth0_irq_config(void) { IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, gmac, irq), DT_INST_IRQ_BY_NAME(0, gmac, priority), queue0_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, gmac, irq)); #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q1, irq), DT_INST_IRQ_BY_NAME(0, q1, priority), queue1_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, q1, irq)); #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q2, irq), DT_INST_IRQ_BY_NAME(0, q1, priority), queue2_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, q2, irq)); #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q3, irq), DT_INST_IRQ_BY_NAME(0, q3, priority), queue3_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, q3, irq)); #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q4, irq), DT_INST_IRQ_BY_NAME(0, q4, priority), queue4_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, q4, irq)); #endif #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(0, q5, irq), DT_INST_IRQ_BY_NAME(0, q5, priority), queue5_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQ_BY_NAME(0, q5, irq)); #endif } PINCTRL_DT_INST_DEFINE(0); static const struct eth_sam_dev_cfg eth0_config = { .regs = (Gmac *)DT_INST_REG_ADDR(0), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), #ifdef CONFIG_SOC_FAMILY_ATMEL_SAM .clock_cfg = SAM_DT_INST_CLOCK_PMC_CFG(0), #endif .config_func = eth0_irq_config, .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, phy_handle)) }; static struct eth_sam_dev_data eth0_data = { #if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0)) .mac_addr = DT_INST_PROP(0, local_mac_address), #endif .queue_list = { { .que_idx = GMAC_QUE_0, .rx_desc_list = { .buf = rx_desc_que0, .len = ARRAY_SIZE(rx_desc_que0), }, .tx_desc_list = { .buf = tx_desc_que0, .len = ARRAY_SIZE(tx_desc_que0), }, .rx_frag_list = rx_frag_list_que0, #if GMAC_MULTIPLE_TX_PACKETS == 1 .tx_frag_list = { .buf = (uint32_t *)tx_frag_list_que0, .len = ARRAY_SIZE(tx_frag_list_que0), }, #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) .tx_frames = { .buf = (uint32_t *)tx_frame_list_que0, .len = ARRAY_SIZE(tx_frame_list_que0), }, #endif #endif #if GMAC_PRIORITY_QUEUE_NUM >= 1 }, { .que_idx = GMAC_QUE_1, .rx_desc_list = { .buf = rx_desc_que1, .len = ARRAY_SIZE(rx_desc_que1), }, .tx_desc_list = { .buf = tx_desc_que1, .len = ARRAY_SIZE(tx_desc_que1), }, #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 1 .rx_frag_list = rx_frag_list_que1, #if GMAC_MULTIPLE_TX_PACKETS == 1 .tx_frag_list = { .buf = (uint32_t *)tx_frag_list_que1, .len = ARRAY_SIZE(tx_frag_list_que1), }, #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) .tx_frames = { .buf = (uint32_t *)tx_frame_list_que1, .len = ARRAY_SIZE(tx_frame_list_que1), } #endif #endif #endif #endif #if GMAC_PRIORITY_QUEUE_NUM >= 2 }, { .que_idx = GMAC_QUE_2, .rx_desc_list = { .buf = rx_desc_que2, .len = ARRAY_SIZE(rx_desc_que2), }, .tx_desc_list = { .buf = tx_desc_que2, .len = ARRAY_SIZE(tx_desc_que2), }, #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 2 .rx_frag_list = rx_frag_list_que2, #if GMAC_MULTIPLE_TX_PACKETS == 1 .tx_frag_list = { .buf = (uint32_t *)tx_frag_list_que2, .len = ARRAY_SIZE(tx_frag_list_que2), }, #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) .tx_frames = { .buf = (uint32_t *)tx_frame_list_que2, .len = ARRAY_SIZE(tx_frame_list_que2), } #endif #endif #endif #endif #if GMAC_PRIORITY_QUEUE_NUM >= 3 }, { .que_idx = GMAC_QUE_3, .rx_desc_list = { .buf = rx_desc_que3, .len = ARRAY_SIZE(rx_desc_que3), }, .tx_desc_list = { .buf = tx_desc_que3, .len = ARRAY_SIZE(tx_desc_que3), }, #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 3 .rx_frag_list = rx_frag_list_que3, #if GMAC_MULTIPLE_TX_PACKETS == 1 .tx_frag_list = { .buf = (uint32_t *)tx_frag_list_que3, .len = ARRAY_SIZE(tx_frag_list_que3), }, #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) .tx_frames = { .buf = (uint32_t *)tx_frame_list_que3, .len = ARRAY_SIZE(tx_frame_list_que3), } #endif #endif #endif #endif #if GMAC_PRIORITY_QUEUE_NUM >= 4 }, { .que_idx = GMAC_QUE_4, .rx_desc_list = { .buf = rx_desc_que4, .len = ARRAY_SIZE(rx_desc_que4), }, .tx_desc_list = { .buf = tx_desc_que4, .len = ARRAY_SIZE(tx_desc_que4), }, #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 4 .rx_frag_list = rx_frag_list_que4, #if GMAC_MULTIPLE_TX_PACKETS == 1 .tx_frag_list = { .buf = (uint32_t *)tx_frag_list_que4, .len = ARRAY_SIZE(tx_frag_list_que4), }, #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) .tx_frames = { .buf = (uint32_t *)tx_frame_list_que4, .len = ARRAY_SIZE(tx_frame_list_que4), } #endif #endif #endif #endif #if GMAC_PRIORITY_QUEUE_NUM >= 5 }, { .que_idx = GMAC_QUE_5, .rx_desc_list = { .buf = rx_desc_que5, .len = ARRAY_SIZE(rx_desc_que5), }, .tx_desc_list = { .buf = tx_desc_que5, .len = ARRAY_SIZE(tx_desc_que5), }, #if GMAC_ACTIVE_PRIORITY_QUEUE_NUM >= 5 .rx_frag_list = rx_frag_list_que5, #if GMAC_MULTIPLE_TX_PACKETS == 1 .tx_frag_list = { .buf = (uint32_t *)tx_frag_list_que5, .len = ARRAY_SIZE(tx_frag_list_que5), }, #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) .tx_frames = { .buf = (uint32_t *)tx_frame_list_que5, .len = ARRAY_SIZE(tx_frame_list_que5), } #endif #endif #endif #endif } }, }; ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_initialize, NULL, &eth0_data, &eth0_config, CONFIG_ETH_INIT_PRIORITY, &eth_api, GMAC_MTU); #if defined(CONFIG_PTP_CLOCK_SAM_GMAC) struct ptp_context { const struct device *eth_dev; }; static struct ptp_context ptp_gmac_0_context; static int ptp_clock_sam_gmac_set(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config; Gmac *gmac = cfg->regs; gmac->GMAC_TSH = tm->_sec.high & 0xffff; gmac->GMAC_TSL = tm->_sec.low & 0xffffffff; gmac->GMAC_TN = tm->nanosecond & 0xffffffff; return 0; } static int ptp_clock_sam_gmac_get(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config; Gmac *gmac = cfg->regs; tm->second = ((uint64_t)(gmac->GMAC_TSH & 0xffff) << 32) | gmac->GMAC_TSL; tm->nanosecond = gmac->GMAC_TN; return 0; } static int ptp_clock_sam_gmac_adjust(const struct device *dev, int increment) { struct ptp_context *ptp_context = dev->data; const struct eth_sam_dev_cfg *const cfg = ptp_context->eth_dev->config; Gmac *gmac = cfg->regs; if ((increment <= -(int)NSEC_PER_SEC) || (increment >= (int)NSEC_PER_SEC)) { return -EINVAL; } if (increment < 0) { gmac->GMAC_TA = GMAC_TA_ADJ | GMAC_TA_ITDT(-increment); } else { gmac->GMAC_TA = GMAC_TA_ITDT(increment); } return 0; } static int ptp_clock_sam_gmac_rate_adjust(const struct device *dev, double ratio) { return -ENOTSUP; } static const struct ptp_clock_driver_api ptp_api = { .set = ptp_clock_sam_gmac_set, .get = ptp_clock_sam_gmac_get, .adjust = ptp_clock_sam_gmac_adjust, .rate_adjust = ptp_clock_sam_gmac_rate_adjust, }; static int ptp_gmac_init(const struct device *port) { const struct device *const eth_dev = DEVICE_DT_INST_GET(0); struct eth_sam_dev_data *dev_data = eth_dev->data; struct ptp_context *ptp_context = port->data; dev_data->ptp_clock = port; ptp_context->eth_dev = eth_dev; return 0; } DEVICE_DEFINE(gmac_ptp_clock_0, PTP_CLOCK_NAME, ptp_gmac_init, NULL, &ptp_gmac_0_context, NULL, POST_KERNEL, CONFIG_PTP_CLOCK_INIT_PRIORITY, &ptp_api); #endif /* CONFIG_PTP_CLOCK_SAM_GMAC */ ```
/content/code_sandbox/drivers/ethernet/eth_sam_gmac.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
19,557
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_NXP_S32_NETC_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_NXP_S32_NETC_PRIV_H_ #define NETC_F3_PSICFGR0_SIVC_CVLAN_BIT BIT(0) /* 0x8100 */ #define NETC_F3_PSICFGR0_SIVC_SVLAN_BIT BIT(1) /* 0x88A8 */ #define NETC_MIN_RING_LEN 8U #define NETC_MIN_RING_BUF_SIZE 64U #define NETC_SWITCH_IDX 0U #define NETC_SWITCH_PORT_IDX 0U #define NETC_SWITCH_PORT_AGING 300U #define NETC_ETH_0_RX_CLK_IDX 49U #define NETC_ETH_1_RX_CLK_IDX 51U #define NETC_MSIX_EVENTS_COUNT 2U /* Timeout for various operations */ #define NETC_TIMEOUT K_MSEC(20) /* Helper macros to convert from Zephyr PHY speed to NETC baudrate/duplex types */ #define PHY_TO_NETC_SPEED(x) \ (PHY_LINK_IS_SPEED_1000M(x) ? ETHTRCV_BAUD_RATE_1000MBIT : \ (PHY_LINK_IS_SPEED_100M(x) ? ETHTRCV_BAUD_RATE_100MBIT : ETHTRCV_BAUD_RATE_10MBIT)) #define PHY_TO_NETC_DUPLEX_MODE(x) \ (PHY_LINK_IS_FULL_DUPLEX(x) ? NETC_ETHSWT_PORT_FULL_DUPLEX : NETC_ETHSWT_PORT_HALF_DUPLEX) /* * Get the first MRU mailbox address for an specific mbox handle * mbox[0] addr = MRU base addr + (channel channel offset), with channel=1..N */ #define MRU_CHANNEL_OFFSET 0x1000 #define MRU_MBOX_ADDR(node, name) \ (DT_REG_ADDR(DT_MBOX_CTLR_BY_NAME(node, name)) \ + ((DT_MBOX_CHANNEL_BY_NAME(node, name) + 1) * MRU_CHANNEL_OFFSET)) #define NETC_MSIX(node, name, cb) \ { \ .handler = cb, \ .mbox_spec = MBOX_DT_SPEC_GET(node, name), \ } /* Tx/Rx ENETC ring definitions */ #define _NETC_RING(n, idx, len, buf_size, prefix1, prefix2) \ static Netc_Eth_Ip_##prefix1##BDRType nxp_s32_eth##n##_##prefix2##ring##idx##_desc[len] \ __nocache __aligned(FEATURE_NETC_BUFFDESCR_ALIGNMENT_BYTES); \ static uint8_t nxp_s32_eth##n##_##prefix2##ring##idx##_buf[len * buf_size] \ __nocache __aligned(FEATURE_NETC_BUFF_ALIGNMENT_BYTES) #define NETC_RX_RING(n, idx, len, buf_size) _NETC_RING(n, idx, len, buf_size, Rx, rx) #define NETC_TX_RING(n, idx, len, buf_size) _NETC_RING(n, idx, len, buf_size, Tx, tx) /* Helper function to generate an Ethernet MAC address for a given ENETC instance */ #define FREESCALE_OUI_B0 0x00 #define FREESCALE_OUI_B1 0x04 #define FREESCALE_OUI_B2 0x9f #define _NETC_GENERATE_MAC_ADDRESS_RANDOM \ gen_random_mac(mac_addr, FREESCALE_OUI_B0, FREESCALE_OUI_B1, FREESCALE_OUI_B2) #define _NETC_GENERATE_MAC_ADDRESS_UNIQUE(n) \ do { \ uint32_t id = 0x001100; \ \ mac_addr[0] = FREESCALE_OUI_B0; \ mac_addr[1] = FREESCALE_OUI_B1; \ /* Set MAC address locally administered bit (LAA) */ \ mac_addr[2] = FREESCALE_OUI_B2 | 0x02; \ mac_addr[3] = (id >> 16) & 0xff; \ mac_addr[4] = (id >> 8) & 0xff; \ mac_addr[5] = (id + n) & 0xff; \ } while (0) #define NETC_GENERATE_MAC_ADDRESS(n) \ static void nxp_s32_eth##n##_generate_mac(uint8_t mac_addr[6]) \ { \ COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), \ (_NETC_GENERATE_MAC_ADDRESS_RANDOM), \ (COND_CODE_0(DT_INST_NODE_HAS_PROP(n, local_mac_address),\ (_NETC_GENERATE_MAC_ADDRESS_UNIQUE(n)), \ (ARG_UNUSED(mac_addr))))); \ } #define NETC_SI_NXP_S32_HW_INSTANCE_CHECK(i, n) \ ((DT_INST_REG_ADDR(n) == IP_NETC__ENETC0_SI##i##_BASE) ? i : 0) #define NETC_SI_NXP_S32_HW_INSTANCE(n) \ LISTIFY(__DEBRACKET FEATURE_NETC_ETH_NUMBER_OF_CTRLS, \ NETC_SI_NXP_S32_HW_INSTANCE_CHECK, (|), n) /* Helper macros to concatenate tokens that require further expansions */ #define _CONCAT3(a, b, c) DT_CAT3(a, b, c) struct nxp_s32_eth_msix { void (*handler)(uint8_t chan, const uint32 *buf, uint8_t buf_size); struct mbox_dt_spec mbox_spec; }; struct nxp_s32_eth_config { const Netc_Eth_Ip_ConfigType netc_cfg; Netc_Eth_Ip_MACFilterHashTableEntryType *mac_filter_hash_table; uint8_t si_idx; uint8_t port_idx; const struct device *phy_dev; uint8_t tx_ring_idx; uint8_t rx_ring_idx; void (*generate_mac)(uint8_t *mac_addr); struct nxp_s32_eth_msix msix[NETC_MSIX_EVENTS_COUNT]; const struct pinctrl_dev_config *pincfg; }; struct nxp_s32_eth_data { struct net_if *iface; uint8_t mac_addr[6]; struct k_mutex tx_mutex; struct k_sem rx_sem; struct k_thread rx_thread; K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_NXP_S32_RX_THREAD_STACK_SIZE); }; int nxp_s32_eth_initialize_common(const struct device *dev); int nxp_s32_eth_tx(const struct device *dev, struct net_pkt *pkt); enum ethernet_hw_caps nxp_s32_eth_get_capabilities(const struct device *dev); void nxp_s32_eth_mcast_filter(const struct device *dev, const struct ethernet_filter *filter); int nxp_s32_eth_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config); extern void Netc_Eth_Ip_MSIX_Rx(uint8_t si_idx); #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_NXP_S32_NETC_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_nxp_s32_netc_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,587
```objective-c /* */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_SMSC91X_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_SMSC91X_PRIV_H_ #include <zephyr/sys/util.h> /* All Banks, Offset 0xe: Bank Select Register */ #define BSR 0xe #define BSR_BANK_MASK GENMASK(2, 0) /* Which bank is currently selected */ #define BSR_IDENTIFY 0x33 #define BSR_IDENTIFY_MASK GENMASK(15, 8) /* Bank 0, Offset 0x0: Transmit Control Register */ #define TCR 0x0 #define TCR_TXENA 0x0001 /* Enable/disable transmitter */ #define TCR_PAD_EN 0x0080 /* Pad TX frames to 64 bytes */ /* Bank 0, Offset 0x02: EPH status register */ #define EPHSR 0x2 #define EPHSR_TX_SUC 0x0001 /* Last TX was successful */ /* Bank 0, Offset 0x4: Receive Control Register */ #define RCR 0x4 #define RCR_PRMS 0x0002 /* Promiscuous mode */ #define RCR_RXEN 0x0100 /* Enable/disable receiver */ #define RCR_STRIP_CRC 0x0200 /* Strip CRC from RX packets */ #define RCR_SOFT_RST 0x8000 /* Software reset */ /* Bank0, Offset 0x6: Counter Register */ #define ECR 0x6 #define ECR_SNGLCOL_MASK GENMASK(3, 0) /* Single collisions */ #define ECR_MULCOL_MASK GENMASK(7, 4) /* Multiple collisions */ #define ECR_TX_DEFR_MASK GENMASK(11, 8) /* Transmit deferrals */ #define ECR_EXC_DEFR_MASK GENMASK(15, 12) /* Excessive deferrals */ /* Bank 0, Offset 0x8: Memory information register */ #define MIR 0x8 #define MIR_SIZE_MASK GENMASK(7, 0) /* Memory size (2k pages) */ #define MIR_FREE_MASK GENMASK(15, 8) /* Memory free (2k pages) */ /* bank 0, offset 0xa: receive/phy control register */ #define RPCR 0xa #define RPCR_ANEG 0x0800 /* Put PHY in autonegotiation mode */ #define RPCR_DPLX 0x1000 /* Put PHY in full-duplex mode */ #define RPCR_SPEED 0x2000 /* Manual speed selection */ #define RPCR_LSA_MASK GENMASK(7, 5) #define RPCR_LSB_MASK GENMASK(4, 2) #define RPCR_LED_LINK_ANY 0x0 /* 10baseT or 100baseTX link detected */ #define RPCR_LED_LINK_10 0x2 /* 10baseT link detected */ #define RPCR_LED_LINK_FDX 0x3 /* Full-duplex link detect */ #define RPCR_LED_LINK_100 0x5 /* 100baseTX link detected */ #define RPCR_LED_ACT_ANY 0x4 /* TX or RX activity detected */ #define RPCR_LED_ACT_RX 0x6 /* RX activity detected */ #define RPCR_LED_ACT_TX 0x7 /* TX activity detected */ /* Bank 1, Offset 0x0: Configuration Register */ #define CR 0x0 #define CR_EPH_POWER_EN 0x8000 /* Disable/enable low power mode */ /* Bank 1, Offset 0x2: Base Address Register */ #define BAR 0x2 /* Bank 1, Offsets 0x4: Individual Address Registers */ #define IAR0 0x4 #define IAR1 0x5 #define IAR2 0x6 #define IAR3 0x7 #define IAR4 0x8 #define IAR5 0x9 /* Bank 1, Offset 0xc: Control Register */ #define CTR 0xc #define CTR_LE_ENABLE 0x0080 /* Link error causes EPH interrupt */ #define CTR_AUTO_RELEASE 0x0800 /* Automatically release TX packets */ /* Bank 2, Offset 0x0: MMU Command Register */ #define MMUCR 0x0 #define MMUCR_BUSY 0x0001 /* MMU is busy */ #define MMUCR_CMD_MASK GENMASK(7, 5) /* MMU command mask */ #define MMUCR_CMD_TX_ALLOC 1 /* Alloc TX memory (256b chunks) */ #define MMUCR_CMD_MMU_RESET 2 /* Reset MMU */ #define MMUCR_CMD_RELEASE 4 /* Remove and release from RX FIFO */ #define MMUCR_CMD_RELEASE_PKT 5 /* Release packet specified in PNR */ #define MMUCR_CMD_ENQUEUE 6 /* Enqueue packet for TX */ /* Bank2, Offset 0x2: Packet Number Register */ #define PNR 0x2 #define PNR_MASK GENMASK(5, 0) /* Bank2, Offset 0x3: Allocation Result Register */ #define ARR 0x3 #define ARR_FAILED 0x80 #define ARR_MASK GENMASK(5, 0) /* Bank 2, Offset 0x4: FIFO Ports Register */ #define FIFO 0x04 #define FIFO_TX 0x4 #define FIFO_RX 0x5 #define FIFO_EMPTY 0x80 /* FIFO empty */ #define FIFO_PACKET_MASK GENMASK(5, 0) /* Packet number mask */ /* Bank2, Offset 0x6: Point Register */ #define PTR 0x6 #define PTR_MASK GENMASK(10, 0) /* Address accessible within TX/RX */ #define PTR_NOT_EMPTY 0x0800 /* Write Data FIFO not empty */ #define PTR_READ 0x2000 /* Set read/write */ #define PTR_AUTO_INCR 0x4000 /* Auto increment on read/write */ #define PTR_RCV 0x8000 /* Read/write to/from RX/TX */ /* Bank2, Offset 0x8: Data register */ #define DATA0 0x8 #define DATA1 0xa /* Bank 2, Offset 0xc: Interrupt Status Registers */ #define IST 0xc /* read only */ #define ACK 0xc /* write only */ #define MSK 0xd #define RCV_INT 0x0001 /* RX */ #define TX_INT 0x0002 /* TX */ #define TX_EMPTY_INT 0x0004 /* TX empty */ #define ALLOC_INT 0x0008 /* Allocation complete */ #define RX_OVRN_INT 0x0010 /* RX overrun */ #define EPH_INT 0x0020 /* EPH interrupt */ #define ERCV_INT 0x0040 /* Early RX */ #define MD_INT 0x0080 /* MII */ /* Bank 3, Offset 0x8: Management interface register */ #define MGMT 0x8 #define MGMT_MDO 0x0001 /* MII management output */ #define MGMT_MDI 0x0002 /* MII management input */ #define MGMT_MCLK 0x0004 /* MII management clock */ #define MGMT_MDOE 0x0008 /* MII management output enable */ /* Bank 3, Offset 0xa: Revision Register */ #define REV 0xa #define REV_CHIP_MASK GENMASK(7, 4) #define REV_REV_MASK GENMASK(3, 0) /* Control Byte */ #define CTRL_CRC 0x10 /* Frame has CRC */ #define CTRL_ODD 0x20 /* Frame has odd bytes count */ /* Receive frame status */ #define RX_TOOSHORT 0x0400 /* Frame was too short */ #define RX_TOOLNG 0x0800 /* Frame was too long */ #define RX_ODDFRM 0x1000 /* Frame has odd number of bytes */ #define RX_BADCRC 0x2000 /* Frame failed CRC */ #define RX_ALIGNERR 0x8000 /* Frame has alignment error */ #define RX_LEN_MASK GENMASK(10, 0) /* Length of status word + byte count + control bytes for packets */ #define PKT_CTRL_DATA_LEN 6 #endif ```
/content/code_sandbox/drivers/ethernet/eth_smsc91x_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,798
```c /* * */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(eth_adin2111, CONFIG_ETHERNET_LOG_LEVEL); #include <zephyr/net/net_pkt.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/phy.h> #if CONFIG_ETH_ADIN2111_SPI_CFG0 #include <zephyr/sys/crc.h> #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ #include <string.h> #include <errno.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/phy.h> #include <zephyr/drivers/ethernet/eth_adin2111.h> #include "phy/phy_adin2111_priv.h" #include "eth_adin2111_priv.h" #define DT_DRV_COMPAT adi_adin2111 /* SPI Communication check retry delay */ #define ADIN2111_DEV_AWAIT_DELAY_POLL_US 100U /* Number of retries SPI Communication check */ #define ADIN2111_DEV_AWAIT_RETRY_COUNT 200U /* ADIN RESETC check retry delay */ #define ADIN2111_RESETC_AWAIT_DELAY_POLL_US 100U /* Number of retries for ADIN RESETC check */ #define ADIN2111_RESETC_AWAIT_RETRY_COUNT 200U /* Boot delay for clocks stabilisation (maximum 90ms) */ #define ADIN2111_HW_BOOT_DELAY_MS 100 /* MAC Address Rule and DA Filter multicast slot/idx */ #define ADIN2111_MULTICAST_ADDR_SLOT 0U /* MAC Address Rule and DA Filter broadcast slot/idx */ #define ADIN2111_BROADCAST_ADDR_SLOT 1U /* MAC Address Rule and DA Filter Port 1 slot/idx */ #define ADIN2111_UNICAST_P1_ADDR_SLOT 2U /* MAC Address Rule and DA Filter Port 2 slot/idx */ #define ADIN2111_UNICAST_P2_ADDR_SLOT 3U /* Free slots for further filtering */ #define ADIN2111_FILTER_FIRST_SLOT 4U #define ADIN2111_FILTER_SLOTS 16U /* As per RM rev. A table 3, t3 >= 50ms, delay for SPI interface to be ready */ #define ADIN2111_SPI_ACTIVE_DELAY_MS 50U /* As per RM rev. A page 20: approximately 10 ms (maximum) for internal logic to be ready. */ #define ADIN2111_SW_RESET_DELAY_MS 10U int eth_adin2111_mac_reset(const struct device *dev) { uint32_t val; int ret; ret = eth_adin2111_reg_write(dev, ADIN2111_SOFT_RST_REG, ADIN2111_SWRESET_KEY1); if (ret < 0) { return ret; } ret = eth_adin2111_reg_write(dev, ADIN2111_SOFT_RST_REG, ADIN2111_SWRESET_KEY2); if (ret < 0) { return ret; } ret = eth_adin2111_reg_write(dev, ADIN2111_SOFT_RST_REG, ADIN2111_SWRELEASE_KEY1); if (ret < 0) { return ret; } ret = eth_adin2111_reg_write(dev, ADIN2111_SOFT_RST_REG, ADIN2111_SWRELEASE_KEY2); if (ret < 0) { return ret; } ret = eth_adin2111_reg_read(dev, ADIN1110_MAC_RST_STATUS_REG, &val); if (ret < 0) { return ret; } if (val == 0) { return -EBUSY; } return 0; } int eth_adin2111_reg_update(const struct device *dev, const uint16_t reg, uint32_t mask, uint32_t data) { uint32_t val; int ret; ret = eth_adin2111_reg_read(dev, reg, &val); if (ret < 0) { return ret; } val &= ~mask; val |= mask & data; return eth_adin2111_reg_write(dev, reg, val); } struct net_if *eth_adin2111_get_iface(const struct device *dev, const uint16_t port_idx) { struct adin2111_data *ctx = dev->data; return ((struct adin2111_port_data *)ctx->port[port_idx]->data)->iface; } int eth_adin2111_lock(const struct device *dev, k_timeout_t timeout) { struct adin2111_data *ctx = dev->data; return k_mutex_lock(&ctx->lock, timeout); } int eth_adin2111_unlock(const struct device *dev) { struct adin2111_data *ctx = dev->data; return k_mutex_unlock(&ctx->lock); } static inline bool eth_adin2111_oa_get_parity(const uint32_t x) { uint32_t y; y = x ^ (x >> 1); y = y ^ (y >> 2); y = y ^ (y >> 4); y = y ^ (y >> 8); y = y ^ (y >> 16); return !(y & 1); } int eth_adin2111_oa_spi_xfer(const struct device *dev, uint8_t *buf_rx, uint8_t *buf_tx, int len) { const struct adin2111_config *cfg = dev->config; struct spi_buf tx_buf[1]; struct spi_buf rx_buf[1]; struct spi_buf_set tx; struct spi_buf_set rx; int ret; tx_buf[0].buf = buf_tx; tx_buf[0].len = len; rx_buf[0].buf = buf_rx; rx_buf[0].len = len; rx.buffers = rx_buf; rx.count = 1; tx.buffers = tx_buf; tx.count = 1; ret = spi_transceive_dt(&cfg->spi, &tx, &rx); if (ret < 0) { LOG_ERR("ERRR dma!\n"); return ret; } return 0; } static int eth_adin2111_reg_read_oa(const struct device *dev, const uint16_t reg, uint32_t *val) { struct adin2111_data *ctx = dev->data; uint32_t pval; uint32_t *hdr = (uint32_t *)ctx->oa_tx_buf; int len; int ret; *hdr = reg << 8; if (reg >= 0x30) { *hdr |= ADIN2111_OA_CTL_MMS; } *hdr |= eth_adin2111_oa_get_parity(*hdr); *hdr = sys_cpu_to_be32(*hdr); len = (ctx->oa_prot) ? ADIN2111_OA_CTL_LEN_PROT : ADIN2111_OA_CTL_LEN; ret = eth_adin2111_oa_spi_xfer(dev, ctx->oa_rx_buf, ctx->oa_tx_buf, len); if (ret < 0) { return ret; } *val = sys_be32_to_cpu(*(uint32_t *)&ctx->oa_rx_buf[8]); /* In protected mode read data is followed by its compliment value */ if (ctx->oa_prot) { pval = sys_be32_to_cpu(*(uint32_t *)&ctx->oa_rx_buf[12]); if (*val != ~pval) { LOG_ERR("OA protected mode rx error !"); return -1; } } return 0; } static int eth_adin2111_reg_write_oa(const struct device *dev, const uint16_t reg, uint32_t val) { struct adin2111_data *ctx = dev->data; uint32_t pval; uint32_t *hdr = (uint32_t *)ctx->oa_tx_buf; int len; int ret; *hdr = reg << 8 | ADIN2111_OA_CTL_WNR; if (reg >= 0x30) { *hdr |= ADIN2111_OA_CTL_MMS; } *hdr |= eth_adin2111_oa_get_parity(*hdr); *hdr = sys_cpu_to_be32(*hdr); len = (ctx->oa_prot) ? ADIN2111_OA_CTL_LEN_PROT : ADIN2111_OA_CTL_LEN; *(uint32_t *)&ctx->oa_tx_buf[4] = sys_cpu_to_be32(val); if (ctx->oa_prot) { *(uint32_t *)&ctx->oa_tx_buf[8] = sys_cpu_to_be32(~val); } ret = eth_adin2111_oa_spi_xfer(dev, ctx->oa_rx_buf, ctx->oa_tx_buf, len); if (ret < 0) { return ret; } if (ctx->oa_prot) { pval = sys_be32_to_cpu(*(uint32_t *)&ctx->oa_rx_buf[12]); if (val != ~pval) { LOG_ERR("OA protected mode tx error !"); return -1; } } return 0; } int eth_adin2111_oa_data_read(const struct device *dev, const uint16_t port_idx) { struct adin2111_data *ctx = dev->data; struct net_if *iface = ((struct adin2111_port_data *)ctx->port[port_idx]->data)->iface; struct net_pkt *pkt; uint32_t hdr, ftr; int i, len, rx_pos, ret, rca, swo; ret = eth_adin2111_reg_read(dev, ADIN2111_BUFSTS, &rca); if (ret < 0) { LOG_ERR("can't read BUFSTS"); return -EIO; } rca &= ADIN2111_BUFSTS_RCA_MASK; /* Preare all tx headers */ for (i = 0, len = 0; i < rca; ++i) { hdr = ADIN2111_OA_DATA_HDR_DNC; hdr |= eth_adin2111_oa_get_parity(hdr); *(uint32_t *)&ctx->oa_tx_buf[len] = sys_cpu_to_be32(hdr); len += sizeof(uint32_t) + ctx->oa_cps; } ret = eth_adin2111_oa_spi_xfer(dev, ctx->oa_rx_buf, ctx->oa_tx_buf, len); if (ret < 0) { LOG_ERR("SPI xfer failed"); return ret; } for (i = 0, rx_pos = 0; i < rca; ++i) { ftr = sys_be32_to_cpu(*(uint32_t *)&ctx->oa_rx_buf[rx_pos + ctx->oa_cps]); if (eth_adin2111_oa_get_parity(ftr)) { LOG_ERR("OA RX: Footer parity error !"); return -EIO; } if (!(ftr & ADIN2111_OA_DATA_FTR_SYNC)) { LOG_ERR("OA RX: Configuration not in sync !"); return -EIO; } if (!(ftr & ADIN2111_OA_DATA_FTR_DV)) { LOG_DBG("OA RX: Data chunk not valid, skip !"); goto update_pos; } if (ftr & ADIN2111_OA_DATA_FTR_SV) { swo = (ftr & ADIN2111_OA_DATA_FTR_SWO_MSK) >> ADIN2111_OA_DATA_FTR_SWO; if (swo != 0) { LOG_ERR("OA RX: Misalignbed start of frame !"); return -EIO; } /* Reset store cursor */ ctx->scur = 0; } len = (ftr & ADIN2111_OA_DATA_FTR_EV) ? ((ftr & ADIN2111_OA_DATA_FTR_EBO_MSK) >> ADIN2111_OA_DATA_FTR_EBO) + 1 : ctx->oa_cps; memcpy(&ctx->buf[ctx->scur], &ctx->oa_rx_buf[rx_pos], len); ctx->scur += len; if (ftr & ADIN2111_OA_DATA_FTR_EV) { pkt = net_pkt_rx_alloc_with_buffer(iface, CONFIG_ETH_ADIN2111_BUFFER_SIZE, AF_UNSPEC, 0, K_MSEC(CONFIG_ETH_ADIN2111_TIMEOUT)); if (!pkt) { LOG_ERR("OA RX: cannot allcate packet space, skipping."); return -EIO; } /* Skipping CRC32 */ ret = net_pkt_write(pkt, ctx->buf, ctx->scur - sizeof(uint32_t)); if (ret < 0) { net_pkt_unref(pkt); LOG_ERR("Failed to write pkt, scur %d, err %d", ctx->scur, ret); return ret; } ret = net_recv_data(iface, pkt); if (ret < 0) { net_pkt_unref(pkt); LOG_ERR("Port %u failed to enqueue frame to RX queue, %d", port_idx, ret); return ret; } } update_pos: rx_pos += ctx->oa_cps + sizeof(uint32_t); } return ret; } /* * Setting up for a single dma transfer. */ static int eth_adin2111_send_oa_frame(const struct device *dev, struct net_pkt *pkt, const uint16_t port_idx) { struct adin2111_data *ctx = dev->data; uint16_t clen, len = net_pkt_get_len(pkt); uint32_t hdr; uint8_t chunks, i; int ret, txc, cur; chunks = len / ctx->oa_cps; if (len % ctx->oa_cps) { chunks++; } ret = eth_adin2111_reg_read(dev, ADIN2111_BUFSTS, &txc); if (ret < 0) { LOG_ERR("Cannot read txc"); return -EIO; } txc = (txc & ADIN2111_BUFSTS_TXC_MASK) >> ADIN2111_BUFSTS_TXC; if (txc < chunks) { return -EIO; } /* Prepare for single dma transfer */ for (i = 1, cur = 0; i <= chunks; i++) { hdr = ADIN2111_OA_DATA_HDR_DNC | ADIN2111_OA_DATA_HDR_DV | ADIN2111_OA_DATA_HDR_NORX; hdr |= (!!port_idx << ADIN2111_OA_DATA_HDR_VS); if (i == 1) { hdr |= ADIN2111_OA_DATA_HDR_SV; } if (i == chunks) { hdr |= ADIN2111_OA_DATA_HDR_EV; hdr |= (ctx->oa_cps - 1) << ADIN2111_OA_DATA_HDR_EBO; } hdr |= eth_adin2111_oa_get_parity(hdr); *(uint32_t *)&ctx->oa_tx_buf[cur] = sys_cpu_to_be32(hdr); cur += sizeof(uint32_t); clen = len > ctx->oa_cps ? ctx->oa_cps : len; ret = net_pkt_read(pkt, &ctx->oa_tx_buf[cur], clen); if (ret < 0) { LOG_ERR("Cannot read from tx packet"); return ret; } cur += ctx->oa_cps; len -= clen; } ret = eth_adin2111_oa_spi_xfer(dev, ctx->oa_rx_buf, ctx->oa_tx_buf, cur); if (ret < 0) { LOG_ERR("Error on SPI xfer"); return ret; } return 0; } static int eth_adin2111_reg_read_generic(const struct device *dev, const uint16_t reg, uint32_t *val) { const struct adin2111_config *cfg = dev->config; size_t header_len = ADIN2111_READ_HEADER_SIZE; size_t read_len = sizeof(uint32_t); int ret; #if CONFIG_ETH_ADIN2111_SPI_CFG0 uint8_t rcv_crc; uint8_t comp_crc; uint8_t buf[ADIN2111_REG_READ_BUF_SIZE_CRC] = { 0 }; #else uint8_t buf[ADIN2111_REG_READ_BUF_SIZE] = { 0 }; #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ /* spi header */ *(uint16_t *)buf = htons((ADIN2111_READ_TXN_CTRL | reg)); #if CONFIG_ETH_ADIN2111_SPI_CFG0 buf[2] = crc8_ccitt(0, buf, ADIN2111_SPI_HEADER_SIZE); /* TA */ buf[3] = 0U; ++header_len; ++read_len; #else /* TA */ buf[2] = 0U; #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ const struct spi_buf tx_buf = { .buf = buf, .len = header_len + read_len }; const struct spi_buf rx_buf = { .buf = buf, .len = header_len + read_len }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1U }; const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1U }; ret = spi_transceive_dt(&cfg->spi, &tx, &rx); if (ret < 0) { return ret; } #if CONFIG_ETH_ADIN2111_SPI_CFG0 comp_crc = crc8_ccitt(0, &buf[header_len], sizeof(uint32_t)); rcv_crc = buf[header_len + sizeof(uint32_t)]; if (rcv_crc != comp_crc) { /* invalid crc */ return -EIO; } #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ *val = ntohl((*(uint32_t *)(&buf[header_len]))); return ret; } static int eth_adin2111_reg_write_generic(const struct device *dev, const uint16_t reg, const uint32_t val) { const struct adin2111_config *cfg = dev->config; size_t header_size = ADIN2111_WRITE_HEADER_SIZE; size_t data_size = sizeof(uint32_t); #if CONFIG_ETH_ADIN2111_SPI_CFG0 uint8_t buf[ADIN2111_REG_WRITE_BUF_SIZE_CRC] = { 0 }; #else uint8_t buf[ADIN2111_REG_WRITE_BUF_SIZE] = { 0 }; #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ /* spi header */ *(uint16_t *)buf = htons((ADIN2111_WRITE_TXN_CTRL | reg)); #if CONFIG_ETH_ADIN2111_SPI_CFG0 buf[2] = crc8_ccitt(0, buf, header_size); ++header_size; #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ /* reg */ *(uint32_t *)(buf + header_size) = htonl(val); #if CONFIG_ETH_ADIN2111_SPI_CFG0 buf[header_size + data_size] = crc8_ccitt(0, &buf[header_size], data_size); ++data_size; #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ const struct spi_buf spi_tx_buf = { .buf = buf, .len = header_size + data_size }; const struct spi_buf_set tx = { .buffers = &spi_tx_buf, .count = 1U }; return spi_write_dt(&cfg->spi, &tx); } int eth_adin2111_reg_read(const struct device *dev, const uint16_t reg, uint32_t *val) { struct adin2111_data *ctx = dev->data; int rval; if (ctx->oa) { rval = eth_adin2111_reg_read_oa(dev, reg, val); } else { rval = eth_adin2111_reg_read_generic(dev, reg, val); } return rval; } int eth_adin2111_reg_write(const struct device *dev, const uint16_t reg, const uint32_t val) { struct adin2111_data *ctx = dev->data; int rval; if (ctx->oa) { rval = eth_adin2111_reg_write_oa(dev, reg, val); } else { rval = eth_adin2111_reg_write_generic(dev, reg, val); } return rval; } static int adin2111_read_fifo(const struct device *dev, const uint16_t port_idx) { const struct adin2111_config *cfg = dev->config; struct adin2111_data *ctx = dev->data; struct net_if *iface; struct net_pkt *pkt; uint16_t fsize_reg = ((port_idx == 0U) ? ADIN2111_P1_RX_FSIZE : ADIN2111_P2_RX_FSIZE); uint16_t rx_reg = ((port_idx == 0U) ? ADIN2111_P1_RX : ADIN2111_P2_RX); uint32_t fsize; uint32_t fsize_real; uint32_t padding_len; #if CONFIG_ETH_ADIN2111_SPI_CFG0 uint8_t cmd_buf[ADIN2111_FIFO_READ_CMD_BUF_SIZE_CRC] = { 0 }; #else uint8_t cmd_buf[ADIN2111_FIFO_READ_CMD_BUF_SIZE] = { 0 }; #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ int ret; iface = ((struct adin2111_port_data *)ctx->port[port_idx]->data)->iface; /* get received frame size in bytes */ ret = eth_adin2111_reg_read(dev, fsize_reg, &fsize); if (ret < 0) { eth_stats_update_errors_rx(iface); LOG_ERR("Port %u failed to read RX FSIZE, %d", port_idx, ret); return ret; } /* burst read must be in multiples of 4 */ padding_len = ((fsize % 4) == 0) ? 0U : (ROUND_UP(fsize, 4U) - fsize); /* actual frame length is FSIZE - FRAME HEADER - CRC32 */ fsize_real = fsize - (ADIN2111_FRAME_HEADER_SIZE + sizeof(uint32_t)); /* spi header */ *(uint16_t *)cmd_buf = htons((ADIN2111_READ_TXN_CTRL | rx_reg)); #if CONFIG_ETH_ADIN2111_SPI_CFG0 cmd_buf[2] = crc8_ccitt(0, cmd_buf, ADIN2111_SPI_HEADER_SIZE); /* TA */ cmd_buf[3] = 0U; #else /* TA */ cmd_buf[2] = 0U; #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ const struct spi_buf tx_buf = { .buf = cmd_buf, .len = sizeof(cmd_buf) }; const struct spi_buf rx_buf[3] = { {.buf = NULL, .len = sizeof(cmd_buf) + ADIN2111_FRAME_HEADER_SIZE}, {.buf = ctx->buf, .len = fsize_real}, {.buf = NULL, .len = padding_len } }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1U }; const struct spi_buf_set rx = { .buffers = rx_buf, .count = ((padding_len == 0U) ? 2U : 3U) }; ret = spi_transceive_dt(&cfg->spi, &tx, &rx); if (ret < 0) { eth_stats_update_errors_rx(iface); LOG_ERR("Port %u failed to read RX FIFO, %d", port_idx, ret); return ret; } pkt = net_pkt_rx_alloc_with_buffer(iface, fsize_real, AF_UNSPEC, 0, K_MSEC(CONFIG_ETH_ADIN2111_TIMEOUT)); if (!pkt) { eth_stats_update_errors_rx(iface); LOG_ERR("Port %u failed to alloc frame RX buffer, %u bytes", port_idx, fsize_real); return -ENOMEM; } ret = net_pkt_write(pkt, ctx->buf, fsize_real); if (ret < 0) { eth_stats_update_errors_rx(iface); net_pkt_unref(pkt); LOG_ERR("Port %u failed to fill RX frame, %d", port_idx, ret); return ret; } ret = net_recv_data(iface, pkt); if (ret < 0) { eth_stats_update_errors_rx(iface); net_pkt_unref(pkt); LOG_ERR("Port %u failed to enqueue frame to RX queue, %d", port_idx, ret); return ret; } eth_stats_update_bytes_rx(iface, fsize_real); eth_stats_update_pkts_rx(iface); return ret; } static inline void adin2111_port_on_phyint(const struct device *dev) { const struct adin2111_port_config *cfg = dev->config; struct adin2111_port_data *data = dev->data; struct phy_link_state state; if (phy_adin2111_handle_phy_irq(cfg->phy, &state) < 0) { /* no change or error */ return; } if (state.is_up) { net_eth_carrier_on(data->iface); } else { net_eth_carrier_off(data->iface); } } static void adin2111_offload_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct device *dev = p1; struct adin2111_data *ctx = dev->data; const struct adin2111_config *adin_cfg = dev->config; bool is_adin2111 = (adin_cfg->id == ADIN2111_MAC); uint32_t status0; uint32_t status1; int ret; for (;;) { /* await INT */ k_sem_take(&ctx->offload_sem, K_FOREVER); /* lock device */ eth_adin2111_lock(dev, K_FOREVER); /* disable interrupts */ ret = eth_adin2111_reg_write(dev, ADIN2111_IMASK0, UINT32_MAX); if (ret < 0) { goto continue_unlock; } ret = eth_adin2111_reg_write(dev, ADIN2111_IMASK1, UINT32_MAX); if (ret < 0) { goto continue_unlock; } /* read interrupts */ ret = eth_adin2111_reg_read(dev, ADIN2111_STATUS0, &status0); if (ret < 0) { goto continue_unlock; } ret = eth_adin2111_reg_read(dev, ADIN2111_STATUS1, &status1); if (ret < 0) { goto continue_unlock; } if (!ctx->oa) { #if CONFIG_ETH_ADIN2111_SPI_CFG0 if (status0 & ADIN2111_STATUS1_SPI_ERR) { LOG_WRN("Detected TX SPI CRC error"); } #endif } /* handle port 1 phy interrupts */ if (status0 & ADIN2111_STATUS0_PHYINT) { adin2111_port_on_phyint(ctx->port[0]); } /* handle port 2 phy interrupts */ if ((status1 & ADIN2111_STATUS1_PHYINT) && is_adin2111) { adin2111_port_on_phyint(ctx->port[1]); } if (ctx->oa) { if (status1 & ADIN2111_STATUS1_P1_RX_RDY) { ret = eth_adin2111_oa_data_read(dev, 0); if (ret < 0) { break; } } if (status1 & ADIN2111_STATUS1_P2_RX_RDY) { ret = eth_adin2111_oa_data_read(dev, 1); if (ret < 0) { break; } } goto continue_unlock; } /* handle port 1 rx */ if (status1 & ADIN2111_STATUS1_P1_RX_RDY) { do { ret = adin2111_read_fifo(dev, 0U); if (ret < 0) { break; } ret = eth_adin2111_reg_read(dev, ADIN2111_STATUS1, &status1); if (ret < 0) { goto continue_unlock; } } while (!!(status1 & ADIN2111_STATUS1_P1_RX_RDY)); } /* handle port 2 rx */ if ((status1 & ADIN2111_STATUS1_P2_RX_RDY) && is_adin2111) { do { ret = adin2111_read_fifo(dev, 1U); if (ret < 0) { break; } ret = eth_adin2111_reg_read(dev, ADIN2111_STATUS1, &status1); if (ret < 0) { goto continue_unlock; } } while (!!(status1 & ADIN2111_STATUS1_P2_RX_RDY)); } continue_unlock: /* clear interrupts */ ret = eth_adin2111_reg_write(dev, ADIN2111_STATUS0, ADIN2111_STATUS0_CLEAR); if (ret < 0) { LOG_ERR("Failed to clear STATUS0, %d", ret); } ret = eth_adin2111_reg_write(dev, ADIN2111_STATUS1, ADIN2111_STATUS1_CLEAR); if (ret < 0) { LOG_ERR("Failed to clear STATUS1, %d", ret); } /* enable interrupts */ ret = eth_adin2111_reg_write(dev, ADIN2111_IMASK0, ctx->imask0); if (ret < 0) { LOG_ERR("Failed to write IMASK0, %d", ret); } ret = eth_adin2111_reg_write(dev, ADIN2111_IMASK1, ctx->imask1); if (ret < 0) { LOG_ERR("Failed to write IMASK1, %d", ret); } eth_adin2111_unlock(dev); } } static void adin2111_int_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { ARG_UNUSED(dev); ARG_UNUSED(pins); struct adin2111_data *ctx = CONTAINER_OF(cb, struct adin2111_data, gpio_int_callback); k_sem_give(&ctx->offload_sem); } static int adin2111_read_tx_space(const struct device *dev, uint32_t *space) { uint32_t val; int ret; ret = eth_adin2111_reg_read(dev, ADIN2111_TX_SPACE, &val); if (ret < 0) { return ret; } /* tx space is a number of halfwords (16-bits), multiply by 2 for bytes */ *space = val * 2; return ret; } static int adin2111_port_send(const struct device *dev, struct net_pkt *pkt) { const struct adin2111_port_config *cfg = dev->config; #if defined(CONFIG_NET_STATISTICS_ETHERNET) struct adin2111_port_data *data = dev->data; #endif /* CONFIG_NET_STATISTICS_ETHERNET */ const struct device *adin = cfg->adin; struct adin2111_data *ctx = cfg->adin->data; size_t pkt_len = net_pkt_get_len(pkt); size_t header_size = ADIN2111_WRITE_HEADER_SIZE; size_t padded_size; size_t burst_size; uint32_t tx_space; int ret; eth_adin2111_lock(adin, K_FOREVER); if (ctx->oa) { uint32_t val, rca = 0; /* * By high-traffic zperf test, noted that ADIN2111 does not like we send * if there is something to be received. It stops to issue rx interrupts * and zperf transfer hangs. Forcing a receive for this case. */ ret = eth_adin2111_reg_read(adin, ADIN2111_BUFSTS, &val); if (ret < 0) { return ret; } rca = val & ADIN2111_BUFSTS_RCA_MASK; if (rca > 0) { eth_adin2111_unlock(adin); k_sem_give(&ctx->offload_sem); k_yield(); eth_adin2111_lock(adin, K_FOREVER); } ret = eth_adin2111_send_oa_frame(cfg->adin, pkt, htons(cfg->port_idx)); goto end_check; } /* query remaining tx fifo space */ ret = adin2111_read_tx_space(adin, &tx_space); if (ret < 0) { eth_stats_update_errors_tx(data->iface); LOG_ERR("Failed to read TX FIFO space, %d", ret); goto end_unlock; } /** * verify that there is space for the frame * (frame + 2b header + 2b size field) */ if (tx_space < (pkt_len + ADIN2111_FRAME_HEADER_SIZE + ADIN2111_INTERNAL_HEADER_SIZE)) { /* tx buffer is full */ eth_stats_update_errors_tx(data->iface); ret = -EBUSY; goto end_unlock; } /** * pad to 64 bytes, otherwise MAC/PHY has to do it * internally MAC adds 4 bytes for forward error correction */ if ((pkt_len + ADIN2111_TX_FIFO_BUFFER_MARGIN) < 64) { padded_size = pkt_len + (64 - (pkt_len + ADIN2111_TX_FIFO_BUFFER_MARGIN)) + ADIN2111_FRAME_HEADER_SIZE; } else { padded_size = pkt_len + ADIN2111_FRAME_HEADER_SIZE; } /* prepare burst write (write data must be in multiples of 4) */ burst_size = ROUND_UP(padded_size, 4); if ((burst_size + ADIN2111_WRITE_HEADER_SIZE) > CONFIG_ETH_ADIN2111_BUFFER_SIZE) { ret = -ENOMEM; eth_stats_update_errors_tx(data->iface); goto end_unlock; } /* prepare tx buffer */ memset(ctx->buf, 0, burst_size + ADIN2111_WRITE_HEADER_SIZE); /* spi header */ *(uint16_t *)ctx->buf = htons(ADIN2111_TXN_CTRL_TX_REG); #if CONFIG_ETH_ADIN2111_SPI_CFG0 ctx->buf[2] = crc8_ccitt(0, ctx->buf, header_size); ++header_size; #endif /* CONFIG_ETH_ADIN2111_SPI_CFG0 */ /* frame header */ *(uint16_t *)(ctx->buf + header_size) = htons(cfg->port_idx); /* read pkt into tx buffer */ ret = net_pkt_read(pkt, (ctx->buf + header_size + ADIN2111_FRAME_HEADER_SIZE), pkt_len); if (ret < 0) { eth_stats_update_errors_tx(data->iface); LOG_ERR("Port %u failed to read PKT into TX buffer, %d", cfg->port_idx, ret); goto end_unlock; } /* write transmit size */ ret = eth_adin2111_reg_write(adin, ADIN2111_TX_FSIZE, padded_size); if (ret < 0) { eth_stats_update_errors_tx(data->iface); LOG_ERR("Port %u write FSIZE failed, %d", cfg->port_idx, ret); goto end_unlock; } /* write transaction */ const struct spi_buf buf = { .buf = ctx->buf, .len = header_size + burst_size }; const struct spi_buf_set tx = { .buffers = &buf, .count = 1U }; ret = spi_write_dt(&((const struct adin2111_config *) adin->config)->spi, &tx); end_check: if (ret < 0) { eth_stats_update_errors_tx(data->iface); LOG_ERR("Port %u frame SPI write failed, %d", cfg->port_idx, ret); goto end_unlock; } eth_stats_update_bytes_tx(data->iface, pkt_len); eth_stats_update_pkts_tx(data->iface); end_unlock: eth_adin2111_unlock(adin); return ret; } static int adin2111_config_sync(const struct device *dev) { int ret; uint32_t val; ret = eth_adin2111_reg_read(dev, ADIN2111_CONFIG0, &val); if (ret < 0) { return ret; } val |= ADIN2111_CONFIG0_SYNC; ret = eth_adin2111_reg_write(dev, ADIN2111_CONFIG0, val); if (ret < 0) { return ret; } return 0; } static int adin2111_write_filter_address(const struct device *dev, uint8_t *addr, uint8_t *mask, uint32_t rules, uint16_t slot) { uint16_t offset = slot * 2U; int ret; ret = eth_adin2111_reg_write(dev, ADIN2111_ADDR_FILT_UPR + offset, rules | sys_get_be16(&addr[0])); if (ret < 0) { return ret; } ret = eth_adin2111_reg_write(dev, ADIN2111_ADDR_FILT_LWR + offset, sys_get_be32(&addr[2])); if (ret < 0) { return ret; } if (offset > 2U) { /* mask filter addresses are limited to 2 */ return 0; } ret = eth_adin2111_reg_write(dev, ADIN2111_ADDR_MSK_UPR + offset, sys_get_be16(&mask[0])); if (ret < 0) { return ret; } ret = eth_adin2111_reg_write(dev, ADIN2111_ADDR_MSK_LWR + offset, sys_get_be32(&mask[2])); if (ret < 0) { return ret; } return ret; } static int adin2111_filter_multicast(const struct device *dev) { const struct adin2111_config *cfg = dev->config; bool is_adin2111 = (cfg->id == ADIN2111_MAC); uint8_t mm[NET_ETH_ADDR_LEN] = {BIT(0), 0U, 0U, 0U, 0U, 0U}; uint8_t mmask[NET_ETH_ADDR_LEN] = {0xFFU, 0U, 0U, 0U, 0U, 0U}; uint32_t rules = ADIN2111_ADDR_APPLY2PORT1 | (is_adin2111 ? ADIN2111_ADDR_APPLY2PORT2 : 0) | ADIN2111_ADDR_TO_HOST | ADIN2111_ADDR_TO_OTHER_PORT; return adin2111_write_filter_address(dev, mm, mmask, rules, ADIN2111_MULTICAST_ADDR_SLOT); } static int adin2111_filter_broadcast(const struct device *dev) { const struct adin2111_config *cfg = dev->config; bool is_adin2111 = (cfg->id == ADIN2111_MAC); uint8_t mac[NET_ETH_ADDR_LEN] = {0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU, 0xFFU}; uint32_t rules = ADIN2111_ADDR_APPLY2PORT1 | (is_adin2111 ? ADIN2111_ADDR_APPLY2PORT2 : 0) | ADIN2111_ADDR_TO_HOST | ADIN2111_ADDR_TO_OTHER_PORT; return adin2111_write_filter_address(dev, mac, mac, rules, ADIN2111_BROADCAST_ADDR_SLOT); } static int adin2111_filter_unicast(const struct device *dev, uint8_t *addr, const uint16_t port_idx) { uint32_t rules = (port_idx == 0 ? ADIN2111_ADDR_APPLY2PORT1 : ADIN2111_ADDR_APPLY2PORT2) | ADIN2111_ADDR_TO_HOST; uint16_t slot = (port_idx == 0 ? ADIN2111_UNICAST_P1_ADDR_SLOT : ADIN2111_UNICAST_P2_ADDR_SLOT); return adin2111_write_filter_address(dev, addr, NULL, rules, slot); } int eth_adin2111_broadcast_filter(const struct device *dev, bool enable) { if (!enable) { /* Clean up */ uint8_t mac[NET_ETH_ADDR_LEN] = {0}; return adin2111_write_filter_address(dev, mac, mac, 0, ADIN2111_BROADCAST_ADDR_SLOT); } return adin2111_filter_broadcast(dev); } /* * Check if a filter exists already. */ static int eth_adin2111_find_filter(const struct device *dev, uint8_t *mac, const uint16_t port_idx) { int i, offset, reg, ret; for (i = ADIN2111_FILTER_FIRST_SLOT; i < ADIN2111_FILTER_SLOTS; i++) { offset = i << 1; ret = eth_adin2111_reg_read(dev, ADIN2111_ADDR_FILT_UPR + offset, &reg); if (ret < 0) { return ret; } if ((reg & UINT16_MAX) == sys_get_be16(&mac[0])) { if ((port_idx == 0 && !(reg & ADIN2111_ADDR_APPLY2PORT1)) || (port_idx == 1 && !(reg & ADIN2111_ADDR_APPLY2PORT2))) { continue; } ret = eth_adin2111_reg_read(dev, ADIN2111_ADDR_FILT_LWR + offset, &reg); if (ret < 0) { return ret; } if (reg == sys_get_be32(&mac[2])) { return i; } } } return -ENOENT; } static int eth_adin2111_set_mac_filter(const struct device *dev, uint8_t *mac, const uint16_t port_idx) { int i, ret, offset; uint32_t reg; ret = eth_adin2111_find_filter(dev, mac, port_idx); if (ret >= 0) { LOG_WRN("MAC filter already set at pos %d, not setting it.", ret); return ret; } if (ret != -ENOENT) { return ret; } for (i = ADIN2111_FILTER_FIRST_SLOT; i < ADIN2111_FILTER_SLOTS; i++) { offset = i << 1; ret = eth_adin2111_reg_read(dev, ADIN2111_ADDR_FILT_UPR + offset, &reg); if (ret < 0) { return ret; } if (reg == 0) { uint32_t rules = (port_idx == 0 ? ADIN2111_ADDR_APPLY2PORT1 : ADIN2111_ADDR_APPLY2PORT2) | ADIN2111_ADDR_TO_HOST; return adin2111_write_filter_address(dev, mac, NULL, rules, i); } } return -ENOSPC; } static int eth_adin2111_clear_mac_filter(const struct device *dev, uint8_t *mac, const uint16_t port_idx) { int i; uint8_t cmac[NET_ETH_ADDR_LEN] = {0}; i = eth_adin2111_find_filter(dev, mac, port_idx); if (i < 0) { return i; } return adin2111_write_filter_address(dev, cmac, cmac, 0, i); } #if defined(CONFIG_NET_PROMISCUOUS_MODE) static int eth_adin2111_set_promiscuous(const struct device *dev, const uint16_t port_idx, bool enable) { const struct adin2111_config *cfg = dev->config; bool is_adin2111 = (cfg->id == ADIN2111_MAC); uint32_t fwd_mask; if ((!is_adin2111 && port_idx > 0) || (is_adin2111 && port_idx > 1)) { return -EINVAL; } fwd_mask = port_idx ? ADIN2111_CONFIG2_P2_FWD_UNK2HOST : ADIN2111_CONFIG2_P1_FWD_UNK2HOST; return eth_adin2111_reg_update(dev, ADIN2111_CONFIG2, fwd_mask, enable ? fwd_mask : 0); } #endif static void adin2111_port_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); const struct adin2111_port_config *cfg = dev->config; struct adin2111_port_data *data = dev->data; const struct device *adin = cfg->adin; struct adin2111_data *ctx = adin->data; int ret; if (!device_is_ready(adin)) { LOG_ERR("ADIN %s is not ready, can't init port %u iface", cfg->adin->name, cfg->port_idx); return; } if (!device_is_ready(cfg->phy)) { LOG_ERR("PHY %u is not ready, can't init port %u iface", cfg->phy_addr, cfg->port_idx); return; } ctx->port[cfg->port_idx] = dev; data->iface = iface; ret = adin2111_filter_unicast(adin, data->mac_addr, cfg->port_idx); if (ret < 0) { LOG_ERR("Port %u, failed to set unicast filter, %d", cfg->port_idx, ret); return; } net_if_set_link_addr(iface, data->mac_addr, sizeof(data->mac_addr), NET_LINK_ETHERNET); ethernet_init(iface); net_if_carrier_off(iface); --ctx->ifaces_left_to_init; /* if all ports are initialized */ if (ctx->ifaces_left_to_init == 0U) { /* setup rx filters */ ret = adin2111_filter_multicast(adin); if (ret < 0) { LOG_ERR("Couldn't set multicast filter, %d", ret); return; } ret = adin2111_filter_broadcast(adin); if (ret < 0) { LOG_ERR("Couldn't set broadcast filter, %d", ret); return; } /* sync */ ret = adin2111_config_sync(adin); if (ret < 0) { LOG_ERR("Failed to write CONFIG0 SYNC, %d", ret); return; } /* all ifaces are done, start INT processing */ k_thread_create(&ctx->rx_thread, ctx->rx_thread_stack, K_KERNEL_STACK_SIZEOF(ctx->rx_thread_stack), adin2111_offload_thread, (void *)adin, NULL, NULL, CONFIG_ETH_ADIN2111_IRQ_THREAD_PRIO, K_ESSENTIAL, K_NO_WAIT); k_thread_name_set(&ctx->rx_thread, "eth_adin2111_offload"); } } static enum ethernet_hw_caps adin2111_port_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_HW_FILTERING #if defined(CONFIG_NET_LLDP) | ETHERNET_LLDP #endif | ETHERNET_PROMISC_MODE; } static int adin2111_port_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { const struct adin2111_port_config *cfg = dev->config; struct adin2111_port_data *data = dev->data; const struct device *adin = cfg->adin; int ret = -ENOTSUP; (void)eth_adin2111_lock(adin, K_FOREVER); if (type == ETHERNET_CONFIG_TYPE_MAC_ADDRESS) { ret = adin2111_filter_unicast(adin, (uint8_t *)&config->mac_address.addr[0], cfg->port_idx); if (ret < 0) { goto end_unlock; } (void)memcpy(data->mac_addr, config->mac_address.addr, sizeof(data->mac_addr)); (void)net_if_set_link_addr(data->iface, data->mac_addr, sizeof(data->mac_addr), NET_LINK_ETHERNET); } if (type == ETHERNET_CONFIG_TYPE_FILTER) { /* Filtering for DA only */ if (config->filter.type & ETHERNET_FILTER_TYPE_DST_MAC_ADDRESS) { uint8_t *mac = (uint8_t *)config->filter.mac_address.addr; if (config->filter.set) { ret = eth_adin2111_set_mac_filter(adin, mac, cfg->port_idx); } else { ret = eth_adin2111_clear_mac_filter(adin, mac, cfg->port_idx); } } } #if defined(CONFIG_NET_PROMISCUOUS_MODE) if (type == ETHERNET_CONFIG_TYPE_PROMISC_MODE) { ret = eth_adin2111_set_promiscuous(adin, cfg->port_idx, config->promisc_mode); } #endif end_unlock: (void)eth_adin2111_unlock(adin); return ret; } #if defined(CONFIG_NET_STATISTICS_ETHERNET) static struct net_stats_eth *adin2111_port_get_stats(const struct device *dev) { struct adin2111_port_data *data = dev->data; return &data->stats; } #endif /* CONFIG_NET_STATISTICS_ETHERNET */ static int adin2111_check_spi(const struct device *dev) { uint32_t count; uint32_t val; int ret; /* check SPI communication by reading PHYID */ for (count = 0U; count < ADIN2111_DEV_AWAIT_RETRY_COUNT; ++count) { ret = eth_adin2111_reg_read(dev, ADIN2111_PHYID, &val); if (ret >= 0) { if (val == ADIN2111_PHYID_RST_VAL || val == ADIN1110_PHYID_RST_VAL) { break; } ret = -ETIMEDOUT; } k_sleep(K_USEC(ADIN2111_DEV_AWAIT_DELAY_POLL_US)); } return ret; } static int adin2111_await_device(const struct device *dev) { uint32_t count; uint32_t val; int ret; /* await reset complete (RESETC) and clear it */ for (count = 0U; count < ADIN2111_RESETC_AWAIT_RETRY_COUNT; ++count) { ret = eth_adin2111_reg_read(dev, ADIN2111_PHYID, &val); if (ret >= 0) { /* * Even after getting RESETC, for some milliseconds registers are * still not properly readable (they reads 0), * so checking OUI read-only value instead. */ if ((val >> 10) == ADIN2111_PHYID_OUI) { /* clear RESETC */ ret = eth_adin2111_reg_write(dev, ADIN2111_STATUS0, ADIN2111_STATUS0_RESETC); if (ret >= 0) { break; } } ret = -ETIMEDOUT; } k_sleep(K_USEC(ADIN2111_RESETC_AWAIT_DELAY_POLL_US)); } return ret; } int eth_adin2111_sw_reset(const struct device *dev, uint16_t delay) { int ret; ret = eth_adin2111_reg_write(dev, ADIN2111_RESET, ADIN2111_RESET_SWRESET); if (ret < 0) { return ret; } k_msleep(delay); ret = adin2111_await_device(dev); if (ret < 0) { LOG_ERR("ADIN did't come out of the reset, %d", ret); return ret; } return ret; } static int adin2111_init(const struct device *dev) { const struct adin2111_config *cfg = dev->config; bool is_adin2111 = (cfg->id == ADIN2111_MAC); struct adin2111_data *ctx = dev->data; int ret; uint32_t val; __ASSERT(cfg->spi.config.frequency <= ADIN2111_SPI_MAX_FREQUENCY, "SPI frequency exceeds supported maximum\n"); if (!spi_is_ready_dt(&cfg->spi)) { LOG_ERR("SPI bus %s not ready", cfg->spi.bus->name); return -ENODEV; } if (!gpio_is_ready_dt(&cfg->interrupt)) { LOG_ERR("Interrupt GPIO device %s is not ready", cfg->interrupt.port->name); return -ENODEV; } ret = gpio_pin_configure_dt(&cfg->interrupt, GPIO_INPUT); if (ret < 0) { LOG_ERR("Failed to configure interrupt GPIO, %d", ret); return ret; } if (cfg->reset.port != NULL) { if (!gpio_is_ready_dt(&cfg->reset)) { LOG_ERR("Reset GPIO device %s is not ready", cfg->reset.port->name); return -ENODEV; } ret = gpio_pin_configure_dt(&cfg->reset, GPIO_OUTPUT_INACTIVE); if (ret < 0) { LOG_ERR("Failed to configure reset GPIO, %d", ret); return ret; } /* perform hard reset */ /* assert pin low for 16 s (10 s min) */ gpio_pin_set_dt(&cfg->reset, 1); k_busy_wait(16U); /* deassert and wait for 90 ms (max) for clocks stabilisation */ gpio_pin_set_dt(&cfg->reset, 0); k_msleep(ADIN2111_HW_BOOT_DELAY_MS); } gpio_init_callback(&(ctx->gpio_int_callback), adin2111_int_callback, BIT(cfg->interrupt.pin)); ret = gpio_add_callback(cfg->interrupt.port, &ctx->gpio_int_callback); if (ret < 0) { LOG_ERR("Failed to add INT callback, %d", ret); return ret; } k_msleep(ADIN2111_SPI_ACTIVE_DELAY_MS); ret = adin2111_check_spi(dev); if (ret < 0) { LOG_ERR("Failed to communicate over SPI, %d", ret); return ret; } /* perform MACPHY soft reset */ ret = eth_adin2111_sw_reset(dev, ADIN2111_SW_RESET_DELAY_MS); if (ret < 0) { LOG_ERR("MACPHY software reset failed, %d", ret); return ret; } /* CONFIG 0 */ /* disable Frame Check Sequence validation on the host */ /* if that is enabled, then CONFIG_ETH_ADIN2111_SPI_CFG0 must be off */ ret = eth_adin2111_reg_read(dev, ADIN2111_CONFIG0, &val); if (ret < 0) { LOG_ERR("Failed to read CONFIG0, %d", ret); return ret; } /* RXCTE must be disabled for Generic SPI */ val &= ~ADIN2111_CONFIG0_RXCTE; val &= ~(ADIN2111_CONFIG0_TXCTE | ADIN2111_CONFIG0_TXFCSVE); if (ctx->oa) { val |= ADIN2111_CONFIG0_ZARFE; } ret = eth_adin2111_reg_write(dev, ADIN2111_CONFIG0, val); if (ret < 0) { LOG_ERR("Failed to write CONFIG0, %d", ret); return ret; } /* CONFIG 2 */ ret = eth_adin2111_reg_read(dev, ADIN2111_CONFIG2, &val); if (ret < 0) { LOG_ERR("Failed to read CONFIG2, %d", ret); return ret; } val |= ADIN2111_CONFIG2_CRC_APPEND; /* configure forwarding of frames with unknown destination address */ /* to the other port. This forwarding is done in hardware. */ /* The setting will take effect after the ports */ /* are out of software powerdown. */ val |= (ADIN2111_CONFIG2_PORT_CUT_THRU_EN | (is_adin2111 ? ADIN2111_CONFIG2_P1_FWD_UNK2P2 : 0) | (is_adin2111 ? ADIN2111_CONFIG2_P2_FWD_UNK2P1 : 0)); ret = eth_adin2111_reg_write(dev, ADIN2111_CONFIG2, val); if (ret < 0) { LOG_ERR("Failed to write CONFIG2, %d", ret); return ret; } /* configure interrupt masks */ ctx->imask0 = ~((uint32_t)ADIN2111_IMASK0_PHYINTM); ctx->imask1 = ~(ADIN2111_IMASK1_TX_RDY_MASK | ADIN2111_IMASK1_P1_RX_RDY_MASK | ADIN2111_IMASK1_SPI_ERR_MASK | (is_adin2111 ? ADIN2111_IMASK1_P2_RX_RDY_MASK : 0) | (is_adin2111 ? ADIN2111_IMASK1_P2_PHYINT_MASK : 0)); /* enable interrupts */ ret = eth_adin2111_reg_write(dev, ADIN2111_IMASK0, ctx->imask0); if (ret < 0) { LOG_ERR("Failed to write IMASK0, %d", ret); return ret; } ret = eth_adin2111_reg_write(dev, ADIN2111_IMASK1, ctx->imask1); if (ret < 0) { LOG_ERR("Failed to write IMASK1, %d", ret); return ret; } ret = gpio_pin_interrupt_configure_dt(&cfg->interrupt, GPIO_INT_EDGE_TO_ACTIVE); if (ret < 0) { LOG_ERR("Failed to enable INT, %d", ret); return ret; } return ret; } static const struct ethernet_api adin2111_port_api = { .iface_api.init = adin2111_port_iface_init, .get_capabilities = adin2111_port_get_capabilities, .set_config = adin2111_port_set_config, .send = adin2111_port_send, #if defined(CONFIG_NET_STATISTICS_ETHERNET) .get_stats = adin2111_port_get_stats, #endif /* CONFIG_NET_STATISTICS_ETHERNET */ }; #define ADIN2111_STR(x) #x #define ADIN2111_XSTR(x) ADIN2111_STR(x) #define ADIN2111_DEF_BUF(name, size) static uint8_t __aligned(4) name[size] #define ADIN2111_MDIO_PHY_BY_ADDR(adin_n, phy_addr) \ DEVICE_DT_GET(DT_CHILD(DT_INST_CHILD(adin_n, mdio), ethernet_phy_##phy_addr)) #define ADIN2111_PORT_MAC(adin_n, port_n) \ DT_PROP(DT_CHILD(DT_DRV_INST(adin_n), port##port_n), local_mac_address) #define ADIN2111_PORT_DEVICE_INIT_INSTANCE(parent_n, port_n, phy_n, name) \ static struct adin2111_port_data name##_port_data_##port_n = { \ .mac_addr = ADIN2111_PORT_MAC(parent_n, phy_n), \ }; \ static const struct adin2111_port_config name##_port_config_##port_n = { \ .adin = DEVICE_DT_INST_GET(parent_n), \ .phy = ADIN2111_MDIO_PHY_BY_ADDR(parent_n, phy_n), \ .port_idx = port_n, \ .phy_addr = phy_n, \ }; \ ETH_NET_DEVICE_INIT_INSTANCE(name##_port_##port_n, "port_" ADIN2111_XSTR(port_n), \ port_n, NULL, NULL, &name##_port_data_##port_n, \ &name##_port_config_##port_n, CONFIG_ETH_INIT_PRIORITY, \ &adin2111_port_api, NET_ETH_MTU); #define ADIN2111_SPI_OPERATION ((uint16_t)(SPI_OP_MODE_MASTER | SPI_TRANSFER_MSB | SPI_WORD_SET(8))) #define ADIN2111_MAC_INITIALIZE(inst, dev_id, ifaces, name) \ ADIN2111_DEF_BUF(name##_buffer_##inst, CONFIG_ETH_ADIN2111_BUFFER_SIZE); \ COND_CODE_1(DT_INST_PROP(inst, spi_oa), \ ( \ ADIN2111_DEF_BUF(name##_oa_tx_buf_##inst, ADIN2111_OA_BUF_SZ); \ ADIN2111_DEF_BUF(name##_oa_rx_buf_##inst, ADIN2111_OA_BUF_SZ); \ ), ()) \ static const struct adin2111_config name##_config_##inst = { \ .id = dev_id, \ .spi = SPI_DT_SPEC_INST_GET(inst, ADIN2111_SPI_OPERATION, 0), \ .interrupt = GPIO_DT_SPEC_INST_GET(inst, int_gpios), \ .reset = GPIO_DT_SPEC_INST_GET_OR(inst, reset_gpios, { 0 }), \ }; \ static struct adin2111_data name##_data_##inst = { \ .ifaces_left_to_init = ifaces, \ .port = {}, \ .offload_sem = Z_SEM_INITIALIZER(name##_data_##inst.offload_sem, 0, 1), \ .lock = Z_MUTEX_INITIALIZER(name##_data_##inst.lock), \ .buf = name##_buffer_##inst, \ .oa = DT_INST_PROP(inst, spi_oa), \ .oa_prot = DT_INST_PROP(inst, spi_oa_protection), \ .oa_cps = 64, \ .oa_tx_buf = COND_CODE_1(DT_INST_PROP(inst, spi_oa), \ (name##_oa_tx_buf_##inst), (NULL)), \ .oa_rx_buf = COND_CODE_1(DT_INST_PROP(inst, spi_oa), \ (name##_oa_rx_buf_##inst), (NULL)), \ }; \ /* adin */ \ DEVICE_DT_DEFINE(DT_DRV_INST(inst), adin2111_init, NULL, \ &name##_data_##inst, &name##_config_##inst, \ POST_KERNEL, CONFIG_ETH_INIT_PRIORITY, \ NULL); #define ADIN2111_MAC_INIT(inst) ADIN2111_MAC_INITIALIZE(inst, ADIN2111_MAC, 2, adin2111) \ /* ports */ \ ADIN2111_PORT_DEVICE_INIT_INSTANCE(inst, 0, 1, adin2111) \ ADIN2111_PORT_DEVICE_INIT_INSTANCE(inst, 1, 2, adin2111) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT adi_adin2111 DT_INST_FOREACH_STATUS_OKAY(ADIN2111_MAC_INIT) #define ADIN1110_MAC_INIT(inst) ADIN2111_MAC_INITIALIZE(inst, ADIN1110_MAC, 1, adin1110) \ /* ports */ \ ADIN2111_PORT_DEVICE_INIT_INSTANCE(inst, 0, 1, adin1110) #undef DT_DRV_COMPAT #define DT_DRV_COMPAT adi_adin1110 DT_INST_FOREACH_STATUS_OKAY(ADIN1110_MAC_INIT) ```
/content/code_sandbox/drivers/ethernet/eth_adin2111.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
13,969
```unknown menuconfig ETH_GECKO bool "SiLabs Gecko Ethernet driver" default y depends on DT_HAS_SILABS_GECKO_ETHERNET_ENABLED help Enable Ethernet driver for Silicon Labs Gecko chips. if ETH_GECKO config ETH_GECKO_RX_THREAD_STACK_SIZE int "RX thread stack size" default 1500 help RX thread stack size config ETH_GECKO_RX_THREAD_PRIO int "RX thread priority" default 2 help RX thread priority config ETH_GECKO_CARRIER_CHECK_RX_IDLE_TIMEOUT_MS int "Carrier check timeout period (ms)" default 500 range 100 30000 help Set the RX idle timeout period in milliseconds after which the PHY's carrier status is re-evaluated. endif # ETH_GECKO ```
/content/code_sandbox/drivers/ethernet/Kconfig.gecko
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
178
```objective-c #ifndef ETH_CYCLONEV_HEADER #define ETH_CYCLONEV_HEADER /* * Description: * Driver for the Synopsys DesignWare * 3504-0 Universal 10/100/1000 Ethernet MAC (DWC_gmac) * specifically designed for Cyclone V SoC DevKit use only. */ #include <zephyr/kernel.h> #include <zephyr/types.h> #define alt_replbits_word(dest, msk, src) \ (sys_write32((sys_read32(dest) & ~(msk)) | ((src) & (msk)), dest)) #define NB_TX_DESCS CONFIG_ETH_CVSX_NB_TX_DESCS #define NB_RX_DESCS CONFIG_ETH_CVSX_NB_RX_DESCS #define ETH_BUFFER_SIZE 1536 /* Descriptor Structure */ struct eth_cyclonev_dma_desc { uint32_t status; /*!< Status */ uint32_t control_buffer_size; /*!< Control and Buffer1, Buffer2 sizes */ uint32_t buffer1_addr; /*!< Buffer1 address pointer */ uint32_t buffer2_next_desc_addr; /*!< Buffer2 or next desc address pointer */ }; struct eth_cyclonev_priv { mem_addr_t base_addr; /* Base address */ uint8_t mac_addr[6]; uint32_t interrupt_mask; struct net_if *iface; /* Zephyr net_if Interface Struct (for interface initialisation) */ uint32_t tx_current_desc_number; uint32_t rx_current_desc_number; uint32_t tx_tail; uint32_t feature; /* HW feature register */ /* Tx/Rx Descriptor Rings */ struct eth_cyclonev_dma_desc tx_desc_ring[NB_TX_DESCS], rx_desc_ring[NB_RX_DESCS]; uint32_t rxints; /* Tx stats */ uint32_t txints; /* Rx stats */ uint8_t rx_buf[ETH_BUFFER_SIZE * NB_RX_DESCS]; /* Receive Buffer */ uint8_t tx_buf[ETH_BUFFER_SIZE * NB_TX_DESCS]; /* Transmit Buffer */ struct k_sem free_tx_descs; uint8_t running; /* Running state flag */ uint8_t initialised; /* Initialised state flag */ }; /* * Reset Manager Regs */ /* The base address of the Rstmgr register group. */ #define RSTMGR_BASE 0xffd05000 /* The byte offset of the ALT_RSTMGR_PERMODRST register from the beginning of * the component. */ #define RSTMGR_PERMODRST_OFST 0x14 /* The address of the ALT_RSTMGR_PERMODRST register. */ #define RSTMGR_PERMODRST_ADDR 0xFFD05014 /* The mask used to set the ALT_RSTMGR_PERMODRST_EMAC0 register field value. */ #define RSTMGR_PERMODRST_EMAC0_SET_MSK 0x00000001 /* The mask used to set the ALT_RSTMGR_PERMODRST_EMAC1 register field value. */ #define RSTMGR_PERMODRST_EMAC1_SET_MSK 0x00000002 /* * System Manager Regs */ #define SYSMGR_BASE 0xffd08000 #define SYSMGR_EMAC_ADDR 0xffd08060 #define SYSMGR_FPGAINTF_INDIV_ADDR 0xffd08004 /* The byte offset of the SYSMGR_EMAC register from the beginning of the * component. */ #define SYSMGR_EMAC_OFST 0x60 /* The byte offset of the SYSMGR_FPGAINTF_INDIV register from the beginning of * the component. */ #define SYSMGR_FPGAINTF_INDIV_OFST 0x4 /* * Enumerated value for register field ALT_SYSMGR_EMACn_PHY_INTF_SEL * */ #define SYSMGR_EMAC_PHY_INTF_SEL_E_GMII_MII 0x0 /* * Enumerated value for register field ALT_SYSMGR_EMACn_PHY_INTF_SEL * */ #define SYSMGR_EMAC0_PHY_INTF_SEL_E_RGMII 0x1 #define SYSMGR_EMAC1_PHY_INTF_SEL_E_RGMII 0x4 /* * Enumerated value for register field ALT_SYSMGR_EMACn_PHY_INTF_SEL * */ #define SYSMGR_EMAC_PHY_INTF_SEL_E_RMII 0x2 /* The mask used to set the ALT_SYSMGR_EMACn_PHY_INTF_SEL register field value. */ #define SYSMGR_EMAC0_PHY_INTF_SEL_SET_MSK 0x00000003 #define SYSMGR_EMAC1_PHY_INTF_SEL_SET_MSK 0x0000000c /* The mask used to set the ALT_SYSMGR_FPGAINTF_MODULE_EMAC_0 register field * value. */ #define SYSMGR_FPGAINTF_MODULE_EMAC_0_SET_MSK 0x00000004 /* The mask used to set the ALT_SYSMGR_FPGAINTF_MODULE_EMAC_1 register field * value. */ #define SYSMGR_FPGAINTF_MODULE_EMAC_1_SET_MSK 0x00000008 /* * Emac Registers */ /* Macros */ #define EMAC_BASE_ADDRESS DT_INST_REG_ADDR(0) #define EMAC_DMAGRP_BUS_MODE_ADDR(base) (uint32_t)((base) + EMAC_DMA_MODE_OFST) /* Bus Mode */ #define EMAC_DMA_RX_DESC_LIST_ADDR(base) (uint32_t)((base) + EMAC_DMA_RX_DESC_LIST_OFST) /* Receive Descriptor Address List */ #define EMAC_DMA_TX_DESC_LIST_ADDR(base) (uint32_t)((base) + EMAC_DMA_TX_DESC_LIST_OFST) /* Transceive Descriptor Address List */ #define EMAC_DMAGRP_OPERATION_MODE_ADDR(base) (uint32_t)((base) + EMAC_DMAGRP_OPERATION_MODE_OFST) /* Operation Mode */ #define EMAC_DMAGRP_STATUS_ADDR(base) (uint32_t)((base) + EMAC_DMAGRP_STATUS_OFST) /* Status */ #define EMAC_DMAGRP_DEBUG_ADDR(base) (uint32_t)((base) + EMAC_DMAGRP_DEBUG_OFST) /* Debug */ #define EMAC_DMA_INT_EN_ADDR(base) (uint32_t)((base) + EMAC_DMA_INT_EN_OFST) /* Interrupt Enable */ #define EMAC_DMAGRP_AXI_BUS_MODE_ADDR(base) (uint32_t)((base) + EMAC_DMAGRP_AXI_BUS_MODE_OFST) /* AXI Bus Mode */ #define EMAC_DMAGRP_AHB_OR_AXI_STATUS_ADDR(base) \ (uint32_t)((base) + EMAC_DMAGRP_AHB_OR_AXI_STATUS_OFST) /* AHB or AXI Status */ #define GMACGRP_CONTROL_STATUS_ADDR(base) \ (uint32_t)((base) + \ EMAC_GMACGRP_SGMII_RGMII_SMII_CONTROL_STATUS_OFST) \ /* SGMII RGMII SMII Control Status */ #define EMAC_GMAC_INT_MSK_ADDR(base) (uint32_t)((base) + EMAC_GMAC_INT_MSK_OFST) /* Interrupt Mask */ #define EMAC_GMAC_INT_STAT_ADDR(base) (uint32_t)((base) + EMAC_GMAC_INT_STAT_OFST) /* Interrupt Status */ #define GMACGRP_MAC_CONFIG_ADDR(base) (uint32_t)((base) + EMAC_GMACGRP_MAC_CONFIGURATION_OFST) /* MAC Configuration */ #define EMAC_GMACGRP_MAC_FRAME_FILTER_ADDR(base) \ (uint32_t)((base) + EMAC_GMACGRP_MAC_FRAME_FILTER_OFST) /* MAC Frame Filter */ #define EMAC_GMAC_MAC_ADDR0_HIGH_ADDR(base) (uint32_t)((base) + EMAC_GMAC_MAC_ADDR0_HIGH_OFST) /* MAC Address 0 High */ #define EMAC_GMAC_MAC_ADDR0_LOW_ADDR(base) (uint32_t)((base) + EMAC_GMAC_MAC_ADDR0_LOW_OFST) /* MAC Address 0 Low */ #define EMAC_GMAC_MAC_ADDR_HIGH_ADDR(base, n) (uint32_t)((base) + EMAC_GMAC_MAC_ADDR_HIGH_OFST(n)) /* MAC Address 0 High */ #define EMAC_GMAC_MAC_ADDR_LOW_ADDR(base, n) (uint32_t)((base) + EMAC_GMAC_MAC_ADDR_LOW_OFST(n)) /* MAC Address 0 High */ #define EMAC_GMAC_GMII_ADDR_ADDR(base) (uint32_t)((base) + EMAC_GMAC_GMII_ADDR_OFST) /* GMII Address */ #define EMAC_GMAC_GMII_DATA_ADDR(base) (uint32_t)((base) + EMAC_GMAC_GMII_DATA_OFST) /* GMII Data */ #define EMAC_DMA_TX_POLL_DEMAND_ADDR(base) (uint32_t)((base) + EMAC_DMA_TX_POLL_DEMAND_OFST) /* Transmit Poll Demand */ #define EMAC_DMA_RX_POLL_DEMAND_ADDR(base) (uint32_t)((base) + EMAC_DMA_RX_POLL_DEMAND_OFST) /* Receive Poll Demand */ #define EMAC_DMA_CURR_HOST_TX_DESC_ADDR(base) (uint32_t)((base) + EMAC_DMA_CURR_HOST_TX_DESC_OFST) /* Current Host Transmit Descriptor */ #define EMAC_DMA_CURR_HOST_RX_DESC_ADDR(base) (uint32_t)((base) + EMAC_DMA_CURR_HOST_RX_DESC_OFST) /* Current Host Receive Descriptor */ #define EMAC_DMA_CURR_HOST_TX_BUFF_ADDR(base) (uint32_t)((base) + EMAC_DMA_CURR_HOST_TX_BUFF_OFST) /* Current Host Transmit Buffer Address */ #define EMAC_DMA_CURR_HOST_RX_BUFF_ADDR(base) (uint32_t)((base) + EMAC_DMA_CURR_HOST_RX_BUFF_OFST) /* Current Host Receive Buffer Address */ #define EMAC_DMA_HW_FEATURE_ADDR(base) (uint32_t)((base) + EMAC_DMA_HW_FEATURE_OFST) /* HW Feature */ /* Bus Mode */ #define EMAC_DMA_MODE_OFST 0x1000 #define EMAC_DMA_MODE_SWR_SET_MSK 0x00000001 #define EMAC_DMA_MODE_SWR_GET(value) (((value)&0x00000001) >> 0) #define EMAC_DMA_MODE_FB_SET_MSK 0x00010000 #define EMAC_DMA_MODE_RPBL_SET(value) (((value) << 17) & 0x007e0000) #define EMAC_DMA_MODE_PBL_SET(value) (((value) << 8) & 0x00003f00) #define EMAC_DMA_MODE_EIGHTXPBL_SET(value) (((value) << 24) & 0x01000000) #define EMAC_DMA_MODE_AAL_SET_MSK 0x02000000 #define EMAC_DMA_MODE_USP_SET_MSK 0x00800000 /* Receive Descriptor Address List */ #define EMAC_DMA_RX_DESC_LIST_OFST 0x100c /* Transceive Descriptor Address List */ #define EMAC_DMA_TX_DESC_LIST_OFST 0x1010 /* Operation Mode */ #define EMAC_DMAGRP_OPERATION_MODE_OFST 0x1018 #define EMAC_DMAGRP_OPERATION_MODE_OSF_SET_MSK 0x00000004 /* Operate on Second Frame */ #define EMAC_DMAGRP_OPERATION_MODE_TSF_SET_MSK 0x00200000 /* Transmit Store and Forward */ #define EMAC_DMAGRP_OPERATION_MODE_RSF_SET_MSK 0x02000000 /* Receive Store and Forward */ #define EMAC_DMAGRP_OPERATION_MODE_FTF_SET_MSK 0x00100000 /* Receive Store and Forward */ #define EMAC_DMAGRP_OPERATION_MODE_ST_SET_MSK 0x00002000 #define EMAC_DMAGRP_OPERATION_MODE_SR_SET_MSK 0x00000002 #define EMAC_DMAGRP_OPERATION_MODE_DT_SET_MSK 0x04000000 /* Ignore frame errors */ /* Interrupt Enable */ #define EMAC_DMA_INT_EN_OFST 0x101C #define EMAC_DMA_INT_EN_NIE_SET_MSK 0x00010000 #define EMAC_DMA_INT_EN_AIE_SET_MSK 0x00008000 #define EMAC_DMA_INT_EN_ERE_SET_MSK 0x00004000 #define EMAC_DMA_INT_EN_FBE_SET_MSK 0x00002000 #define EMAC_DMA_INT_EN_ETE_SET_MSK 0x00000400 #define EMAC_DMA_INT_EN_RWE_SET_MSK 0x00000200 #define EMAC_DMA_INT_EN_RSE_SET_MSK 0x00000100 #define EMAC_DMA_INT_EN_RUE_SET_MSK 0x00000080 #define EMAC_DMA_INT_EN_RIE_SET_MSK 0x00000040 #define EMAC_DMA_INT_EN_UNE_SET_MSK 0x00000020 #define EMAC_DMA_INT_EN_OVE_SET_MSK 0x00000010 #define EMAC_DMA_INT_EN_TJE_SET_MSK 0x00000008 #define EMAC_DMA_INT_EN_TUE_SET_MSK 0x00000004 #define EMAC_DMA_INT_EN_TSE_SET_MSK 0x00000002 #define EMAC_DMA_INT_EN_TIE_SET_MSK 0x00000001 /* Status */ #define EMAC_DMAGRP_STATUS_OFST 0x1014 #define EMAC_DMAGRP_STATUS_TS_SET_MSK 0x00700000 #define EMAC_DMAGRP_STATUS_TS_E_SUSPTX 0x00600000 #define EMAC_DMAGRP_STATUS_RS_SET_MSK 0x000e0000 #define EMAC_DMAGRP_STATUS_RS_E_SUSPRX 0x00080000 #define EMAC_DMAGRP_DEBUG_OFST 0x24 #define EMAC_DMAGRP_DEBUG_TWCSTS 0x00400000 #define EMAC_DMAGRP_DEBUG_RWCSTS 0x00000010 #define EMAC_DMAGRP_DEBUG_RXFSTS_GET(value) (((value)&0x00000300) >> 8) /* AXI Bus Mode */ #define EMAC_DMAGRP_AXI_BUS_MODE_OFST 0x1028 #define EMAC_DMAGRP_AXI_BUS_MODE_BLEN16_SET_MSK 0x00000008 /* AHB or AXI Status */ #define EMAC_DMAGRP_AHB_OR_AXI_STATUS_OFST 0x102c /* MAC Configuration */ #define EMAC_GMACGRP_MAC_CONFIGURATION_OFST 0x0000 #define EMAC_GMACGRP_MAC_CONFIGURATION_IPC_SET_MSK 0x00000400 #define EMAC_GMACGRP_MAC_CONFIGURATION_JD_SET_MSK 0x00400000 /* Jabber Disable */ #define EMAC_GMACGRP_MAC_CONFIGURATION_PS_SET_MSK 0x00008000 /* Port Select = MII */ #define EMAC_GMACGRP_MAC_CONFIGURATION_BE_SET_MSK 0x00200000 /* Frame Burst Enable */ #define EMAC_GMACGRP_MAC_CONFIGURATION_WD_SET_MSK 0x00800000 /* Watchdog Disable */ #define EMAC_GMACGRP_MAC_CONFIGURATION_DO_SET_MSK 0x00002000 #define EMAC_GMACGRP_MAC_CONFIGURATION_TE_SET_MSK 0x00000008 #define EMAC_GMACGRP_MAC_CONFIGURATION_RE_SET_MSK 0x00000004 #define EMAC_GMACGRP_MAC_CONFIGURATION_TC_SET_MSK 0x01000000 #define EMAC_GMACGRP_MAC_CONFIGURATION_DM_SET_MSK 0x00000800 #define EMAC_GMACGRP_MAC_CONFIGURATION_FES_SET_MSK 0x00004000 /* SGMII RGMII SMII Control Status */ #define EMAC_GMACGRP_SGMII_RGMII_SMII_CONTROL_STATUS_OFST 0x00d8 #define EMAC_GMAC_MII_CTL_STAT_LNKSTS_GET(value) (((value)&0x00000008) >> 3) #define EMAC_GMAC_MII_CTL_STAT_LNKSPEED_GET(value) (((value)&0x00000007) >> 1) #define EMAC_GMAC_MII_CTL_STAT_LNKMOD_GET(value) ((value)&0x00000001) /* Interrupt Mask */ #define EMAC_GMAC_INT_MSK_OFST 0x003c #define EMAC_GMAC_INT_STAT_LPIIS_SET_MSK 0x00000400 #define EMAC_GMAC_INT_STAT_TSIS_SET_MSK 0x00000200 #define EMAC_GMAC_INT_STAT_RGSMIIIS_SET_MSK 0x00000001 /* Interrupt Status (Gmac)*/ #define EMAC_GMAC_INT_STAT_OFST 0x0038 /* MAC Frame Filter */ #define EMAC_GMACGRP_MAC_FRAME_FILTER_OFST 0x0004 #define EMAC_GMACGRP_MAC_FRAME_FILTER_PR_SET_MSK 0x00000001 /* MAC Address 0 High */ #define EMAC_GMAC_MAC_ADDR0_HIGH_OFST 0x40 #define EMAC_GMAC_MAC_ADDR_HIGH_OFST(n) (0x40 + 8 * (n)) /* MAC Address 0 Low */ #define EMAC_GMAC_MAC_ADDR0_LOW_OFST 0x44 #define EMAC_GMAC_MAC_ADDR_LOW_OFST(n) (0x44 + 8 * (n)) /* GMII Address */ #define EMAC_GMAC_GMII_ADDR_OFST 0x10 #define EMAC_GMAC_GMII_ADDR_PA_SET(value) (((value) << 11) & 0x0000f800) #define EMAC_GMAC_GMII_ADDR_GR_SET(value) (((value) << 6) & 0x000007c0) #define EMAC_GMAC_GMII_ADDR_GW_SET_MSK 0x00000002 #define EMAC_GMAC_GMII_ADDR_GW_CLR_MSK 0xfffffffd #define EMAC_GMAC_GMII_ADDR_CR_SET(value) (((value) << 2) & 0x0000003c) #define EMAC_GMAC_GMII_ADDR_GB_SET(value) (((value) << 0) & 0x00000001) #define EMAC_GMAC_GMII_ADDR_CR_E_DIV102 0x4 #define EMAC_GMAC_GMII_ADDR_GB_SET_MSK 0x00000001 /* GMII Data */ #define EMAC_GMAC_GMII_DATA_OFST 0x14 /* Transmit Poll Demand */ #define EMAC_DMA_TX_POLL_DEMAND_OFST 0x1004 /* Receive Poll Demand */ #define EMAC_DMA_RX_POLL_DEMAND_OFST 0x1008 /* Current Host Transmit Descriptor */ #define EMAC_DMA_CURR_HOST_TX_DESC_OFST 0x1048 /* Current Host Receive Descriptor */ #define EMAC_DMA_CURR_HOST_RX_DESC_OFST 0x104C /* Current Host Transmit Buffer Address */ #define EMAC_DMA_CURR_HOST_TX_BUFF_OFST 0x1050 /* Current Host Receive Buffer Address */ #define EMAC_DMA_CURR_HOST_RX_BUFF_OFST 0x1054 /* HW Feature */ #define EMAC_DMA_HW_FEATURE_OFST 0x1058 #define EMAC_DMA_HW_FEATURE_MIISEL 0x00000001 /* 10/100 Mbps support */ #define EMAC_DMA_HW_FEATURE_GMIISEL 0x00000002 /* 1000 Mbps support */ #define EMAC_DMA_HW_FEATURE_HDSEL 0x00000004 /* Half-Duplex support */ #define EMAC_DMA_HW_FEATURE_RXTYP2COE 0x00040000 /* IP Checksum Offload (Type 2) in Rx */ #define EMAC_DMA_HW_FEATURE_RXTYP1COE 0x00020000 /* IP Checksum Offload (Type 1) in Rx */ #define EMAC_DMA_HW_FEATURE_TXOESEL 0x00010000 /* Checksum Offload in Tx */ /* * DMA Descriptor Flag Definitions */ /* * DMA Rx Descriptor * your_sha256_hash--------------------------- * RDES0 | OWN(31) | Status [30:0] | * your_sha256_hash--------------------------- * RDES1 |CTRL(31)|Reserv[30:29]|Buff2ByteCt[28:16]|CTRL[15:14] * Reservr(13)|Buff1ByteCt[12:0]| * your_sha256_hash--------------------------- * RDES2 | Buffer1 Address [31:0] | * your_sha256_hash---------------------------- * RDES3 | Buffer2 Address [31:0] / Next Descriptor Address [31:0] * | * your_sha256_hash---------------------------- */ /* Bit definition of RDES0 register: DMA Rx descriptor status register */ #define ETH_DMARXDESC_OWN ((uint32_t)0x80000000) /*!< OWN bit: descriptor is owned by DMA engine */ #define ETH_DMARXDESC_AFM ((uint32_t)0x40000000) /*!< DA Filter Fail for the rx frame */ #define ETH_DMARXDESC_FL ((uint32_t)0x3FFF0000) /*!< Receive descriptor frame length */ #define ETH_DMARXDESC_ES ((uint32_t)0x00008000) /*!< Error summary: OR of the following bits: * DE || OE || IPC || LC || RWT || RE || CE */ #define ETH_DMARXDESC_DE ((uint32_t)0x00004000) /*!< Descriptor error: no more descriptors for receive frame */ #define ETH_DMARXDESC_SAF ((uint32_t)0x00002000) /*!< SA Filter Fail for the received frame */ #define ETH_DMARXDESC_LE ((uint32_t)0x00001000) /*!< Frame size not matching with length field */ #define ETH_DMARXDESC_OE ((uint32_t)0x00000800) /*!< Overflow Error: Frame was damaged due to buffer overflow */ #define ETH_DMARXDESC_VLAN ((uint32_t)0x00000400) /*!< VLAN Tag: received frame is a VLAN frame */ #define ETH_DMARXDESC_FS ((uint32_t)0x00000200) /*!< First descriptor of the frame */ #define ETH_DMARXDESC_LS ((uint32_t)0x00000100) /*!< Last descriptor of the frame */ #define ETH_DMARXDESC_IPV4HCE ((uint32_t)0x00000080) /*!< IPC Checksum Error: Rx Ipv4 header checksum error */ #define ETH_DMARXDESC_LC ((uint32_t)0x00000040) /*!< Late collision occurred during reception */ #define ETH_DMARXDESC_FT ((uint32_t)0x00000020) /*!< Frame type - Ethernet, otherwise 802.3 */ #define ETH_DMARXDESC_RWT ((uint32_t)0x00000010) /*!< Receive Watchdog Timeout: watchdog timer expired during reception */ #define ETH_DMARXDESC_RE ((uint32_t)0x00000008) /*!< Receive error: error reported by MII interface */ #define ETH_DMARXDESC_DBE ((uint32_t)0x00000004) /*!< Dribble bit error: frame contains non int multiple of 8 bits */ #define ETH_DMARXDESC_CE ((uint32_t)0x00000002) /*!< CRC error */ #define ETH_DMARXDESC_MAMPCE ((uint32_t)0x00000001) /* !< Rx MAC Address/Payload Checksum Error: Rx MAC address matched/ * Rx Payload Checksum Error */ /* Bit definition of RDES1 register */ #define ETH_DMARXDESC_DIC ((uint32_t)0x80000000) /*!< Disable Interrupt on Completion */ #define ETH_DMARXDESC_RBS2 ((uint32_t)0x1FFF0000) /*!< Receive Buffer2 Size */ #define ETH_DMARXDESC_RER ((uint32_t)0x00008000) /*!< Receive End of Ring */ #define ETH_DMARXDESC_RCH \ ((uint32_t)0x00004000) /*!< Second Address Chained \ */ #define ETH_DMARXDESC_RBS1 ((uint32_t)0x00001FFF) /*!< Receive Buffer1 Size */ /* *DMA Tx Descriptor *your_sha256_hash------------------------------- *TDES0 | OWN(31) | CTRL[30:26] | Reserved[25:24] | CTRL[23:20] | *Reserved[19:17] | Status[16:0] | *your_sha256_hash------------------------------- *TDES1 | Reserved[31:29] | Buffer2 ByteCount[28:16] | Reserved[15:13] | Buffer1 *ByteCount[12:0] | *your_sha256_hash------------------------------- *TDES2 | Buffer1 Address [31:0] *| *your_sha256_hash------------------------------- *TDES3 | Buffer2 Address [31:0] / Next Descriptor Address [31:0] *| *your_sha256_hash------------------------------- */ /* Bit definition of TDES0 register: DMA Tx descriptor status register */ #define ETH_DMATXDESC_OWN ((uint32_t)0x80000000) /*!< OWN bit: descriptor is owned by DMA engine */ #define ETH_DMATXDESC_IC ((uint32_t)0x40000000) /*!< Interrupt on Completion */ #define ETH_DMATXDESC_LS ((uint32_t)0x20000000) /*!< Last Segment */ #define ETH_DMATXDESC_FS ((uint32_t)0x10000000) /*!< First Segment */ #define ETH_DMATXDESC_DC ((uint32_t)0x08000000) /*!< Disable CRC */ #define ETH_DMATXDESC_DP ((uint32_t)0x04000000) /*!< Disable Padding */ #define ETH_DMATXDESC_TTSE ((uint32_t)0x02000000) /*!< Transmit Time Stamp Enable */ #define ETH_DMATXDESC_CIC ((uint32_t)0x00C00000) /*!< Checksum Insertion Control: 4 cases */ #define ETH_DMATXDESC_CIC_BYPASS ((uint32_t)0x00000000) /*!< Do Nothing: Checksum Engine is bypassed */ #define ETH_DMATXDESC_CIC_IPV4HEADER ((uint32_t)0x00400000) /*!< IPV4 header Checksum Insertion */ #define ETH_DMATXDESC_CIC_TCPUDPICMP_SEGMENT ((uint32_t)0x00800000) /*!< TCP/UDP/ICMP Checksum Insertion calculated over segment only */ #define ETH_DMATXDESC_CIC_TCPUDPICMP_FULL ((uint32_t)0x00C00000) /*!< TCP/UDP/ICMP Checksum Insertion fully calculated */ #define ETH_DMATXDESC_TER ((uint32_t)0x00200000) /*!< Transmit End of Ring */ #define ETH_DMATXDESC_TCH ((uint32_t)0x00100000) /*!< Second Address Chained */ #define ETH_DMATXDESC_TTSS ((uint32_t)0x00020000) /*!< Tx Time Stamp Status */ #define ETH_DMATXDESC_IHE ((uint32_t)0x00010000) /*!< IP Header Error */ #define ETH_DMATXDESC_ES ((uint32_t)0x00008000) /*!< Error summary: OR of the following bits: UE||ED||EC||LCO||NC||LCA||FF||JT */ #define ETH_DMATXDESC_JT ((uint32_t)0x00004000) /*!< Jabber Timeout */ #define ETH_DMATXDESC_FF ((uint32_t)0x00002000) /*!< Frame Flushed: DMA/MTL flushed the frame due to SW flush */ #define ETH_DMATXDESC_PCE ((uint32_t)0x00001000) /*!< Payload Checksum Error */ #define ETH_DMATXDESC_LCA ((uint32_t)0x00000800) /*!< Loss of Carrier: carrier lost during transmission */ #define ETH_DMATXDESC_NC ((uint32_t)0x00000400) /*!< No Carrier: no carrier signal from the transceiver */ #define ETH_DMATXDESC_LCO ((uint32_t)0x00000200) /*!< Late Collision: transmission aborted due to collision */ #define ETH_DMATXDESC_EC ((uint32_t)0x00000100) /*!< Excessive Collision: transmission aborted after 16 collisions */ #define ETH_DMATXDESC_VF ((uint32_t)0x00000080) /*!< VLAN Frame */ #define ETH_DMATXDESC_CC ((uint32_t)0x00000078) /*!< Collision Count */ #define ETH_DMATXDESC_ED ((uint32_t)0x00000004) /*!< Excessive Deferral */ #define ETH_DMATXDESC_UF ((uint32_t)0x00000002) /*!< Underflow Error: late data arrival from the memory */ #define ETH_DMATXDESC_DB ((uint32_t)0x00000001) /*!< Deferred Bit */ /* Bit definition of TDES1 register */ #define ETH_DMATXDESC_TBS2 \ ((uint32_t)0x1FFF0000) /*!< Transmit Buffer2 Size \ */ #define ETH_DMATXDESC_TBS1 \ ((uint32_t)0x00001FFF) /*!< Transmit Buffer1 Size \ */ /* Bit definition of TDES2 register */ #define ETH_DMATXDESC_B1AP ((uint32_t)0xFFFFFFFF) /*!< Buffer1 Address Pointer */ /* Bit definition of TDES3 register */ #define ETH_DMATXDESC_B2AP ((uint32_t)0xFFFFFFFF) /*!< Buffer2 Address Pointer */ static const uint32_t Rstmgr_Permodrst_Emac_Set_Msk[] = {RSTMGR_PERMODRST_EMAC0_SET_MSK, RSTMGR_PERMODRST_EMAC1_SET_MSK}; static const uint32_t Sysmgr_Core_Emac_Phy_Intf_Sel_Set_Msk[] = {SYSMGR_EMAC0_PHY_INTF_SEL_SET_MSK, SYSMGR_EMAC1_PHY_INTF_SEL_SET_MSK}; static const uint32_t Sysmgr_Fpgaintf_En_3_Emac_Set_Msk[] = {SYSMGR_FPGAINTF_MODULE_EMAC_0_SET_MSK, SYSMGR_FPGAINTF_MODULE_EMAC_1_SET_MSK}; static const uint32_t Sysmgr_Emac_Phy_Intf_Sel_E_Rgmii[] = {SYSMGR_EMAC0_PHY_INTF_SEL_E_RGMII, SYSMGR_EMAC1_PHY_INTF_SEL_E_RGMII}; #endif ```
/content/code_sandbox/drivers/ethernet/eth_cyclonev_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,529
```c #ifndef PHY_CYCLONEV_SRC #define PHY_CYCLONEV_SRC /* * Description: * Driver for the PHY KSZ9021RL/RN Datasheet:(path_to_url * downloads/en/DeviceDoc/KSZ9021RL-RN-Data-Sheet-DS00003050A.pdf) * specifically designed for Cyclone V SoC DevKit use only. */ /* PHY */ /* According to default Cyclone V DevKit Bootstrap Encoding Scheme */ #include "eth_cyclonev_priv.h" #include <stdio.h> #include <zephyr/kernel.h> #include <sys/types.h> #define PHY_ADDR (4) /* PHY_Read_write_Timeouts */ #define PHY_READ_TO ((uint32_t)0x0004FFFF) #define PHY_WRITE_TO ((uint32_t)0x0004FFFF) /* Speed and Duplex mask values */ #define PHY_SPEED_100 (0x0020) #define PHY_SPEED_1000 (0x0040) #define PHY_CLK_AND_CONTROL_PAD_SKEW_VALUE 0xa0d0 #define PHY_RX_DATA_PAD_SKEW_VALUE 0x0000 /* Write/read to/from extended registers */ #define MII_KSZPHY_EXTREG 0x0b #define KSZPHY_EXTREG_WRITE 0x8000 #define MII_KSZPHY_EXTREG_WRITE 0x0c #define MII_KSZPHY_EXTREG_READ 0x0d /* PHY Regs */ /* Basic Control Register */ #define PHY_BCR (0) #define PHY_RESET BIT(15) /* Do a PHY reset */ #define PHY_AUTONEGOTIATION BIT(12) #define PHY_RESTART_AUTONEGOTIATION BIT(9) /* Basic Status Register */ #define PHY_BSR BIT(0) #define PHY_AUTOCAP BIT(3) /* Auto-negotiation capability */ #define PHY_LINKED_STATUS BIT(2) #define PHY_AUTONEGO_COMPLETE BIT(5) /* Auto-Negotiation Advertisement */ #define PHY_AUTON (4) #define PHYANA_10BASET BIT(5) #define PHYANA_10BASETFD BIT(6) #define PHYANA_100BASETX BIT(7) #define PHYANA_100BASETXFD BIT(8) #define PHYSYMETRIC_PAUSE BIT(10) #define PHYASYMETRIC_PAUSE BIT(11) /* 1000Base-T Control */ #define PHY_1GCTL (9) #define PHYADVERTISE_1000HALF BIT(8) #define PHYADVERTISE_1000FULL BIT(9) #define PHYINDICATE_PORTTYPE BIT(10) #define PHYCONFIG_MASTER BIT(11) #define PHYENABLE_MANUALCONFIG BIT(12) /* PHY Control Register */ #define PHY_CR (31) #define PHY_DUPLEX_STATUS BIT(3) /* Extended registers */ #define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104 #define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105 #define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106 int alt_eth_phy_write_register(uint16_t emac_instance, uint16_t phy_reg, uint16_t phy_value, struct eth_cyclonev_priv *p); int alt_eth_phy_read_register(uint16_t emac_instance, uint16_t phy_reg, uint16_t *rdval, struct eth_cyclonev_priv *p); int alt_eth_phy_write_register_extended(uint16_t emac_instance, uint16_t phy_reg, uint16_t phy_value, struct eth_cyclonev_priv *p); int alt_eth_phy_read_register_extended(uint16_t emac_instance, uint16_t phy_reg, uint16_t *rdval, struct eth_cyclonev_priv *p); int alt_eth_phy_config(uint16_t instance, struct eth_cyclonev_priv *p); int alt_eth_phy_reset(uint16_t instance, struct eth_cyclonev_priv *p); int alt_eth_phy_get_duplex_and_speed(uint16_t *phy_duplex_status, uint16_t *phy_speed, uint16_t instance, struct eth_cyclonev_priv *p); int alt_eth_phy_write_register(uint16_t emac_instance, uint16_t phy_reg, uint16_t phy_value, struct eth_cyclonev_priv *p) { uint16_t tmpreg = 0; volatile uint32_t timeout = 0; uint16_t phy_addr; if (emac_instance > 1) { return -1; } phy_addr = PHY_ADDR; /* Prepare the MII address register value */ tmpreg = 0; /* Set the PHY device address */ tmpreg |= EMAC_GMAC_GMII_ADDR_PA_SET(phy_addr); /* Set the PHY register address */ tmpreg |= EMAC_GMAC_GMII_ADDR_GR_SET(phy_reg); /* Set the write mode */ tmpreg |= EMAC_GMAC_GMII_ADDR_GW_SET_MSK; /* Set the clock divider */ tmpreg |= EMAC_GMAC_GMII_ADDR_CR_SET(EMAC_GMAC_GMII_ADDR_CR_E_DIV102); /* Set the MII Busy bit */ tmpreg |= EMAC_GMAC_GMII_ADDR_GB_SET(EMAC_GMAC_GMII_ADDR_GB_SET_MSK); /* Give the value to the MII data register */ sys_write32(phy_value & 0xffff, EMAC_GMAC_GMII_DATA_ADDR(p->base_addr)); /* Write the result value into the MII Address register */ sys_write32(tmpreg & 0xffff, EMAC_GMAC_GMII_ADDR_ADDR(p->base_addr)); /* Check the Busy flag */ do { timeout++; tmpreg = sys_read32(EMAC_GMAC_GMII_ADDR_ADDR(p->base_addr)); } while ((tmpreg & EMAC_GMAC_GMII_ADDR_GB_SET_MSK) && (timeout < PHY_WRITE_TO)); /* Return ERROR in case of timeout */ if (timeout == PHY_WRITE_TO) { return -1; } /* Return SUCCESS */ return 0; } int alt_eth_phy_read_register(uint16_t emac_instance, uint16_t phy_reg, uint16_t *rdval, struct eth_cyclonev_priv *p) { uint16_t tmpreg = 0; volatile uint32_t timeout = 0; uint16_t phy_addr; if (emac_instance > 1) { return -1; } phy_addr = PHY_ADDR; /* Prepare the MII address register value */ tmpreg = 0; /* Set the PHY device address */ tmpreg |= EMAC_GMAC_GMII_ADDR_PA_SET(phy_addr); /* Set the PHY register address */ tmpreg |= EMAC_GMAC_GMII_ADDR_GR_SET(phy_reg); /* Set the read mode */ tmpreg &= EMAC_GMAC_GMII_ADDR_GW_CLR_MSK; /* Set the clock divider */ tmpreg |= EMAC_GMAC_GMII_ADDR_CR_SET(EMAC_GMAC_GMII_ADDR_CR_E_DIV102); /* Set the MII Busy bit */ tmpreg |= EMAC_GMAC_GMII_ADDR_GB_SET(EMAC_GMAC_GMII_ADDR_GB_SET_MSK); /* Write the result value into the MII Address register */ sys_write32(tmpreg & 0xffff, EMAC_GMAC_GMII_ADDR_ADDR(p->base_addr)); /* Check the Busy flag */ do { timeout++; tmpreg = sys_read32(EMAC_GMAC_GMII_ADDR_ADDR(p->base_addr)); } while ((tmpreg & EMAC_GMAC_GMII_ADDR_GB_SET_MSK) && (timeout < PHY_READ_TO)); /* Return ERROR in case of timeout */ if (timeout == PHY_READ_TO) { return -1; } /* Return data register value */ *rdval = sys_read32(EMAC_GMAC_GMII_DATA_ADDR(p->base_addr)); return 0; } int alt_eth_phy_write_register_extended(uint16_t emac_instance, uint16_t phy_reg, uint16_t phy_value, struct eth_cyclonev_priv *p) { int rc; rc = alt_eth_phy_write_register(emac_instance, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | phy_reg, p); if (rc == -1) { return rc; } rc = alt_eth_phy_write_register(emac_instance, MII_KSZPHY_EXTREG_WRITE, phy_value, p); return rc; } int alt_eth_phy_read_register_extended(uint16_t emac_instance, uint16_t phy_reg, uint16_t *rdval, struct eth_cyclonev_priv *p) { int rc; rc = alt_eth_phy_write_register(emac_instance, MII_KSZPHY_EXTREG, phy_reg, p); if (rc == -1) { return rc; } k_sleep(K_MSEC(1)); rc = alt_eth_phy_read_register(emac_instance, MII_KSZPHY_EXTREG_READ, rdval, p); return rc; } int alt_eth_phy_config(uint16_t instance, struct eth_cyclonev_priv *p) { int rc; uint16_t rdval; uint32_t timeout; /*-------------------- Configure the PHY skew values ----------------*/ rc = alt_eth_phy_write_register_extended(instance, MII_KSZPHY_CLK_CONTROL_PAD_SKEW, PHY_CLK_AND_CONTROL_PAD_SKEW_VALUE, p); if (rc == -1) { return rc; } rc = alt_eth_phy_write_register_extended(instance, MII_KSZPHY_RX_DATA_PAD_SKEW, PHY_RX_DATA_PAD_SKEW_VALUE, p); if (rc == -1) { return rc; } /* Implement Auto-negotiation Process */ /* Check PHY Status if auto-negotiation is supported */ rc = alt_eth_phy_read_register(instance, PHY_BSR, &rdval, p); if (((rdval & PHY_AUTOCAP) == 0) || (rc == -1)) { return -1; } /* Set Advertise capabilities for 10Base-T/ *10Base-T full-duplex/100Base-T/100Base-T full-duplex */ rc = alt_eth_phy_read_register(instance, PHY_AUTON, &rdval, p); if (rc == -1) { return rc; } rdval |= (PHYANA_10BASET | PHYANA_10BASETFD | PHYANA_100BASETX | PHYANA_100BASETXFD | PHYSYMETRIC_PAUSE); rc = alt_eth_phy_write_register(instance, PHY_AUTON, rdval, p); if (rc == -1) { return rc; } /* Set Advertise capabilities for 1000 Base-T/1000 Base-T full-duplex */ rc = alt_eth_phy_write_register(instance, PHY_1GCTL, PHYADVERTISE_1000FULL | PHYADVERTISE_1000HALF | PHYINDICATE_PORTTYPE | PHYCONFIG_MASTER | PHYENABLE_MANUALCONFIG , p); if (rc == -1) { return rc; } /* Wait for linked status... */ timeout = 0; do { timeout++; rc = alt_eth_phy_read_register(instance, PHY_BSR, &rdval, p); } while (!(rdval & PHY_LINKED_STATUS) && (timeout < PHY_READ_TO) && (rc == 0)); /* Return ERROR in case of timeout */ if ((timeout == PHY_READ_TO) || (rc == -1)) { LOG_ERR("Error Link Down\n"); return -1; } LOG_INF("Link is up!"); /* Configure the PHY for AutoNegotiate */ rc = alt_eth_phy_read_register(instance, PHY_BCR, &rdval, p); if (rc == -1) { return rc; } rdval |= PHY_AUTONEGOTIATION; rdval |= PHY_RESTART_AUTONEGOTIATION; rc = alt_eth_phy_write_register(instance, PHY_BCR, rdval, p); if (rc == -1) { return rc; } /* Wait until the auto-negotiation is completed */ timeout = 0; do { timeout++; rc = alt_eth_phy_read_register(instance, PHY_BSR, &rdval, p); } while (!(rdval & PHY_AUTONEGO_COMPLETE) && (timeout < PHY_READ_TO) && (rc == 0)); /* Return ERROR in case of timeout */ if ((timeout == PHY_READ_TO) || (rc == -1)) { alt_eth_phy_read_register(instance, PHY_BSR, &rdval, p); LOG_ERR("Auto Negotiation: Status reg = 0x%x\n", rdval); return -1; } LOG_INF("Auto Negotiation Complete!"); return rc; }; int alt_eth_phy_reset(uint16_t instance, struct eth_cyclonev_priv *p) { int i; int rc; uint16_t rdval; /* Put the PHY in reset mode */ if ((alt_eth_phy_write_register(instance, PHY_BCR, PHY_RESET, p)) != 0) { /* Return ERROR in case of write timeout */ return -1; } /* Wait for the reset to clear */ for (i = 0; i < 10; i++) { k_sleep(K_MSEC(10)); rc = alt_eth_phy_read_register(instance, PHY_BCR, &rdval, p); if (((rdval & PHY_RESET) == 0) || (rc == -1)) { break; } } if (i == 10) { return -1; } /* Delay to assure PHY reset */ k_sleep(K_MSEC(10)); return rc; }; int alt_eth_phy_get_duplex_and_speed(uint16_t *phy_duplex_status, uint16_t *phy_speed, uint16_t instance, struct eth_cyclonev_priv *p) { LOG_DBG("PHY: func_alt_eth_phy_get_duplex_and_speed\n"); uint16_t regval = 0; int rc; rc = alt_eth_phy_read_register(instance, PHY_CR, &regval, p); if (regval & PHY_DUPLEX_STATUS) { *phy_duplex_status = 1; } else { *phy_duplex_status = 0; } if (regval & PHY_SPEED_100) { *phy_speed = 100; } else { if (regval & PHY_SPEED_1000) { *phy_speed = 1000; } else { *phy_speed = 10; } } return rc; } #endif ```
/content/code_sandbox/drivers/ethernet/phy_cyclonev.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,143
```c /* * */ #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_s32_eth); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/mbox.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/phy.h> #include <ethernet/eth_stats.h> #include <soc.h> #include <Netc_Eth_Ip.h> #include <Netc_Eth_Ip_Irq.h> #include <Netc_EthSwt_Ip.h> #include "eth.h" #include "eth_nxp_s32_netc_priv.h" /* Global MAC filter hash table required for the baremetal driver */ Netc_Eth_Ip_MACFilterHashTableEntryType * MACFilterHashTableAddrs[FEATURE_NETC_ETH_NUMBER_OF_CTRLS]; static void nxp_s32_eth_rx_thread(void *arg1, void *unused1, void *unused2); static void nxp_s32_eth_msix_wrapper(const struct device *dev, uint32_t channel, void *user_data, struct mbox_msg *msg) { const struct nxp_s32_eth_msix *msix = (const struct nxp_s32_eth_msix *)user_data; ARG_UNUSED(dev); ARG_UNUSED(msg); /* Handler doesn't require any data to be passed, used only for signalling */ msix->handler(channel, NULL, 0); } static inline struct net_if *get_iface(struct nxp_s32_eth_data *ctx) { return ctx->iface; } int nxp_s32_eth_initialize_common(const struct device *dev) { const struct nxp_s32_eth_config *cfg = dev->config; struct nxp_s32_eth_data *ctx = dev->data; Netc_Eth_Ip_StatusType status; const struct nxp_s32_eth_msix *msix; int err; /* Populate the MAC filter hash table addresses for this SI */ __ASSERT_NO_MSG(cfg->si_idx < FEATURE_NETC_ETH_NUMBER_OF_CTRLS); MACFilterHashTableAddrs[cfg->si_idx] = cfg->mac_filter_hash_table; status = Netc_Eth_Ip_Init(cfg->si_idx, &cfg->netc_cfg); if (status != NETC_ETH_IP_STATUS_SUCCESS) { LOG_ERR("Failed to initialize SI%d (%d)", cfg->si_idx, status); return -EIO; } for (int i = 0; i < NETC_MSIX_EVENTS_COUNT; i++) { msix = &cfg->msix[i]; if (mbox_is_ready_dt(&msix->mbox_spec)) { err = mbox_register_callback_dt(&msix->mbox_spec, nxp_s32_eth_msix_wrapper, (void *)msix); if (err != 0) { LOG_ERR("Failed to register MRU callback on channel %u", msix->mbox_spec.channel_id); return err; } } } k_mutex_init(&ctx->tx_mutex); k_sem_init(&ctx->rx_sem, 0, 1); k_thread_create(&ctx->rx_thread, ctx->rx_thread_stack, K_KERNEL_STACK_SIZEOF(ctx->rx_thread_stack), nxp_s32_eth_rx_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_NXP_S32_RX_THREAD_PRIO), 0, K_NO_WAIT); k_thread_name_set(&ctx->rx_thread, "nxp_s32_eth_rx"); status = Netc_Eth_Ip_EnableController(cfg->si_idx); if (status != NETC_ETH_IP_STATUS_SUCCESS) { LOG_ERR("Failed to enable ENETC SI%d (%d)", cfg->si_idx, status); return -EIO; } if (cfg->generate_mac) { cfg->generate_mac(&ctx->mac_addr[0]); } return 0; } void nxp_s32_eth_mcast_filter(const struct device *dev, const struct ethernet_filter *filter) { const struct nxp_s32_eth_config *cfg = dev->config; Netc_Eth_Ip_StatusType status; if (filter->set) { status = Netc_Eth_Ip_AddMulticastDstAddrToHashFilter(cfg->si_idx, filter->mac_address.addr); } else { status = Netc_Eth_Ip_RemoveMulticastDstAddrFromHashFilter(cfg->si_idx, filter->mac_address.addr); } if (status != NETC_ETH_IP_STATUS_SUCCESS) { LOG_ERR("Failed to update multicast hash table: %d", status); } } int nxp_s32_eth_tx(const struct device *dev, struct net_pkt *pkt) { struct nxp_s32_eth_data *ctx = dev->data; const struct nxp_s32_eth_config *cfg = dev->config; size_t pkt_len = net_pkt_get_len(pkt); int res = 0; Netc_Eth_Ip_StatusType status; Netc_Eth_Ip_BufferType buf; __ASSERT(pkt, "Packet pointer is NULL"); k_mutex_lock(&ctx->tx_mutex, K_FOREVER); buf.length = (uint16_t)pkt_len; buf.data = NULL; status = Netc_Eth_Ip_GetTxBuff(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL); if (status == NETC_ETH_IP_STATUS_TX_BUFF_BUSY) { /* Reclaim the buffers already transmitted and try again */ Netc_Eth_Ip_ReleaseTxBuffers(cfg->si_idx, cfg->tx_ring_idx); status = Netc_Eth_Ip_GetTxBuff(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL); } if (status != NETC_ETH_IP_STATUS_SUCCESS) { LOG_ERR("Failed to get tx buffer: %d", status); res = -ENOBUFS; goto error; } buf.length = (uint16_t)pkt_len; res = net_pkt_read(pkt, buf.data, pkt_len); if (res) { LOG_ERR("Failed to copy packet to tx buffer: %d", res); res = -ENOBUFS; goto error; } status = Netc_Eth_Ip_SendFrame(cfg->si_idx, cfg->tx_ring_idx, &buf, NULL); if (status != NETC_ETH_IP_STATUS_SUCCESS) { LOG_ERR("Failed to tx frame: %d", status); res = -EIO; goto error; } error: k_mutex_unlock(&ctx->tx_mutex); if (res != 0) { eth_stats_update_errors_tx(ctx->iface); } return res; } static struct net_pkt *nxp_s32_eth_get_pkt(const struct device *dev, Netc_Eth_Ip_BufferType *buf) { struct nxp_s32_eth_data *ctx = dev->data; struct net_pkt *pkt = NULL; int res = 0; /* Use root iface, it will be updated later in net_recv_data() */ pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, buf->length, AF_UNSPEC, 0, NETC_TIMEOUT); if (!pkt) { goto exit; } res = net_pkt_write(pkt, buf->data, buf->length); if (res) { net_pkt_unref(pkt); pkt = NULL; goto exit; } exit: if (!pkt) { eth_stats_update_errors_rx(get_iface(ctx)); } return pkt; } static int nxp_s32_eth_rx(const struct device *dev) { struct nxp_s32_eth_data *ctx = dev->data; const struct nxp_s32_eth_config *cfg = dev->config; Netc_Eth_Ip_BufferType buf; Netc_Eth_Ip_RxInfoType info; Netc_Eth_Ip_StatusType status; struct net_pkt *pkt; int key; int res = 0; key = irq_lock(); status = Netc_Eth_Ip_ReadFrame(cfg->si_idx, cfg->rx_ring_idx, &buf, &info); if (status == NETC_ETH_IP_STATUS_RX_QUEUE_EMPTY) { res = -ENOBUFS; } else if (status != NETC_ETH_IP_STATUS_SUCCESS) { LOG_ERR("Error on received frame: %d (0x%X)", status, info.rxStatus); res = -EIO; } else { pkt = nxp_s32_eth_get_pkt(dev, &buf); Netc_Eth_Ip_ProvideRxBuff(cfg->si_idx, cfg->rx_ring_idx, &buf); if (pkt != NULL) { res = net_recv_data(get_iface(ctx), pkt); if (res < 0) { eth_stats_update_errors_rx(get_iface(ctx)); net_pkt_unref(pkt); LOG_ERR("Failed to enqueue frame into rx queue: %d", res); } } } irq_unlock(key); return res; } static void nxp_s32_eth_rx_thread(void *arg1, void *unused1, void *unused2) { const struct device *dev = (const struct device *)arg1; struct nxp_s32_eth_data *ctx = dev->data; int res; int work; ARG_UNUSED(unused1); ARG_UNUSED(unused2); __ASSERT_NO_MSG(arg1 != NULL); __ASSERT_NO_MSG(ctx != NULL); while (1) { res = k_sem_take(&ctx->rx_sem, K_FOREVER); __ASSERT_NO_MSG(res == 0); work = 0; while (nxp_s32_eth_rx(dev) != -ENOBUFS) { if (++work == CONFIG_ETH_NXP_S32_RX_BUDGET) { /* more work to do, reschedule */ work = 0; k_yield(); } } } } enum ethernet_hw_caps nxp_s32_eth_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return (ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_1000BASE_T | ETHERNET_HW_RX_CHKSUM_OFFLOAD | ETHERNET_HW_FILTERING #if defined(CONFIG_NET_VLAN) | ETHERNET_HW_VLAN #endif #if defined(CONFIG_NET_PROMISCUOUS_MODE) | ETHERNET_PROMISC_MODE #endif ); } int nxp_s32_eth_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct nxp_s32_eth_data *ctx = dev->data; const struct nxp_s32_eth_config *cfg = dev->config; int res = 0; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: /* Set new Ethernet MAC address and register it with the upper layer */ memcpy(ctx->mac_addr, config->mac_address.addr, sizeof(ctx->mac_addr)); Netc_Eth_Ip_SetMacAddr(cfg->si_idx, (const uint8_t *)ctx->mac_addr); net_if_set_link_addr(ctx->iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET); LOG_INF("SI%d MAC set to: %02x:%02x:%02x:%02x:%02x:%02x", cfg->si_idx, ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2], ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]); break; case ETHERNET_CONFIG_TYPE_FILTER: nxp_s32_eth_mcast_filter(dev, &config->filter); break; default: res = -ENOTSUP; break; } return res; } BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_LEN % 8) == 0, "Rx ring length must be multiple of 8"); BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_LEN % 8) == 0, "Tx ring length must be multiple of 8"); BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE % 8) == 0, "Rx ring data buffer size must be multiple of 8"); BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE % 8) == 0, "Tx ring data buffer size must be multiple of 8"); ```
/content/code_sandbox/drivers/ethernet/eth_nxp_s32_netc.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,707
```unknown # # Xilinx Processor System Gigabit Ethernet controller (GEM) driver # configuration options # # menuconfig ETH_XLNX_GEM bool "Xilinx GEM Ethernet driver" default y depends on DT_HAS_XLNX_GEM_ENABLED depends on !QEMU_TARGET || (QEMU_TARGET && NET_QEMU_ETHERNET) help Enable Xilinx GEM Ethernet driver. config ETH_NIC_MODEL string default "cadence_gem" depends on ETH_XLNX_GEM help Value of the -nic parameter to be used when starting QEMU. This parameter specifies which type of Ethernet controller shall be simulated by QEMU. ```
/content/code_sandbox/drivers/ethernet/Kconfig.xlnx_gem
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
146
```objective-c /* STELLARIS Ethernet Controller * * */ #ifndef ETH_STELLARIS_PRIV_H_ #define ETH_STELLARIS_PRIV_H_ #define REG_BASE(dev) \ ((const struct eth_stellaris_config *const)(dev)->config)->mac_base /* * Register mapping */ /* Registers for ethernet system, mac_base + offset */ #define REG_MACRIS (REG_BASE(dev) + 0x000) #define REG_MACIM (REG_BASE(dev) + 0x004) #define REG_MACRCTL (REG_BASE(dev) + 0x008) #define REG_MACTCTL (REG_BASE(dev) + 0x00C) #define REG_MACDATA (REG_BASE(dev) + 0x010) #define REG_MACIA0 (REG_BASE(dev) + 0x014) #define REG_MACIA1 (REG_BASE(dev) + 0x018) #define REG_MACNP (REG_BASE(dev) + 0x034) #define REG_MACTR (REG_BASE(dev) + 0x038) /* ETH MAC Receive Control bit fields set value */ #define BIT_MACRCTL_RSTFIFO 0x10 #define BIT_MACRCTL_BADCRC 0x8 #define BIT_MACRCTL_RXEN 0x1 #define BIT_MACRCTL_PRMS 0x4 /* ETH MAC Transmit Control bit fields set value */ #define BIT_MACTCTL_DUPLEX 0x10 #define BIT_MACTCTL_CRC 0x4 #define BIT_MACTCTL_PADEN 0x2 #define BIT_MACTCTL_TXEN 0x1 /* ETH MAC Txn req bit fields set value */ #define BIT_MACTR_NEWTX 0x1 /* Ethernet MAC RAW Interrupt Status/Ack bit set values */ #define BIT_MACRIS_RXINT 0x1 #define BIT_MACRIS_TXER 0x2 #define BIT_MACRIS_TXEMP 0x4 #define BIT_MACRIS_FOV 0x8 #define BIT_MACRIS_RXER 0x10 struct eth_stellaris_runtime { struct net_if *iface; uint8_t mac_addr[6]; struct k_sem tx_sem; bool tx_err; uint32_t tx_word; int tx_pos; #if defined(CONFIG_NET_STATISTICS_ETHERNET) struct net_stats_eth stats; #endif }; typedef void (*eth_stellaris_config_irq_t)(const struct device *dev); struct eth_stellaris_config { uint32_t mac_base; uint32_t sys_ctrl_base; uint32_t irq_num; eth_stellaris_config_irq_t config_func; }; #endif /* ETH_STELLARIS_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_stellaris_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
568
```unknown # ESP32 Ethernet driver configuration options menuconfig ETH_ESP32 bool "ESP32 Ethernet driver" default y depends on SOC_SERIES_ESP32 depends on DT_HAS_ESPRESSIF_ESP32_ETH_ENABLED select MDIO help Enable ESP32 Ethernet driver. if ETH_ESP32 config ETH_ESP32_RX_THREAD_STACK_SIZE int "ESP32 Ethernet receive thread stack size" default 768 config ETH_ESP32_RX_THREAD_PRIORITY int "ESP32 Ethernet receive thread priority" default 2 endif # ETH_ESP32 ```
/content/code_sandbox/drivers/ethernet/Kconfig.esp32
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
122
```unknown # W5500 Ethernet driver configuration options # Author: Parthiban Nallathambi <parthiban@linumiz.com> menuconfig ETH_W5500 bool "W5500 Ethernet Controller" default y depends on DT_HAS_WIZNET_W5500_ENABLED select SPI help W5500 Stand-Alone Ethernet Controller with SPI Interface config ETH_W5500_RX_THREAD_STACK_SIZE int "Stack size for internal incoming packet handler" depends on ETH_W5500 default 800 help Size of the stack used for internal thread which is ran for incoming packet processing. config ETH_W5500_RX_THREAD_PRIO int "Priority for internal incoming packet handler" depends on ETH_W5500 default 2 help Priority level for internal thread which is ran for incoming packet processing. config ETH_W5500_TIMEOUT int "IP buffer timeout" depends on ETH_W5500 default 100 help Given timeout in milliseconds. Maximum amount of time that the driver will wait from the IP stack to get a memory buffer before the Ethernet frame is dropped. ```
/content/code_sandbox/drivers/ethernet/Kconfig.w5500
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
245
```unknown # ETH_ENC28J60 Ethernet driver configuration options menuconfig ETH_ENC28J60 bool "ENC28J60C Ethernet Controller" default y depends on DT_HAS_MICROCHIP_ENC28J60_ENABLED select SPI help ENC28J60C Stand-Alone Ethernet Controller with SPI Interface if ETH_ENC28J60 config ETH_ENC28J60_RX_THREAD_STACK_SIZE int "Stack size for internal incoming packet handler" depends on ETH_ENC28J60 default 800 help Size of the stack used for internal thread which is ran for incoming packet processing. config ETH_ENC28J60_RX_THREAD_PRIO int "Priority for internal incoming packet handler" depends on ETH_ENC28J60 default 2 help Priority level for internal thread which is ran for incoming packet processing. config ETH_ENC28J60_CLKRDY_INIT_WAIT_MS int "Time to wait for the CLKRDY bit on driver init" depends on ETH_ENC28J60 default 2 help Timeout in milliseconds. Maximum time the initialisation of the driver will wait for the OST to expire, indicated by the CLKRDY bit set. If timeout driver init will fail with -ETIMEDOUT. config ETH_ENC28J60_TIMEOUT int "IP buffer timeout" depends on ETH_ENC28J60 default 100 help Given timeout in milliseconds. Maximum amount of time that the driver will wait from the IP stack to get a memory buffer before the Ethernet frame is dropped. endif # ETH_ENC28J60 ```
/content/code_sandbox/drivers/ethernet/Kconfig.enc28j60
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
346
```c /* * */ #define DT_DRV_COMPAT nuvoton_numaker_ethernet #include <zephyr/kernel.h> #include <zephyr/drivers/reset.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/clock_control_numaker.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/logging/log.h> #include <zephyr/net/ethernet.h> #include "eth_numaker_priv.h" #include "ethernet/eth_stats.h" #include <soc.h> #include <NuMicro.h> #include <synopGMAC_network_interface.h> #ifdef CONFIG_SOC_M467 #include <m460_eth.h> #endif LOG_MODULE_REGISTER(eth_numaker, CONFIG_ETHERNET_LOG_LEVEL); /* Device EMAC Interface port */ #define NUMAKER_GMAC_INTF 0 /* 2KB Data Flash at 0xFF800 */ #define NUMAKER_DATA_FLASH (0xFF800U) #define NUMAKER_MASK_32 (0xFFFFFFFFU) #define NUMAKER_MII_CONFIG (ADVERTISE_CSMA | ADVERTISE_10HALF | ADVERTISE_10FULL | \ ADVERTISE_100HALF | ADVERTISE_100FULL) #define NUMAKER_MII_LINKED (BMSR_ANEGCOMPLETE | BMSR_LSTATUS) extern synopGMACdevice GMACdev[GMAC_CNT]; extern struct sk_buff tx_buf[GMAC_CNT][TRANSMIT_DESC_SIZE]; extern struct sk_buff rx_buf[GMAC_CNT][RECEIVE_DESC_SIZE]; static uint32_t eth_phy_addr; /* Device config */ struct eth_numaker_config { uint32_t gmac_base; const struct reset_dt_spec reset; uint32_t phy_addr; uint32_t clk_modidx; uint32_t clk_src; uint32_t clk_div; const struct device *clk_dev; const struct pinctrl_dev_config *pincfg; }; /* Driver context/data */ struct eth_numaker_data { synopGMACdevice *gmacdev; struct net_if *iface; uint8_t mac_addr[NU_HWADDR_SIZE]; struct k_mutex tx_frame_buf_mutex; struct k_spinlock rx_frame_buf_lock; }; /* Delay execution for given amount of ticks for SDK-HAL */ void plat_delay(uint32_t delay) { uint32_t us_cnt = k_ticks_to_us_floor32((uint64_t)delay); k_busy_wait(us_cnt); } static void mdio_write(synopGMACdevice *gmacdev, uint32_t addr, uint32_t reg, int data) { synopGMAC_write_phy_reg((u32 *)gmacdev->MacBase, addr, reg, data); } static int mdio_read(synopGMACdevice *gmacdev, uint32_t addr, uint32_t reg) { uint16_t data; synopGMAC_read_phy_reg((u32 *)gmacdev->MacBase, addr, reg, &data); return data; } static int numaker_eth_link_ok(synopGMACdevice *gmacdev) { /* first, a dummy read to latch */ mdio_read(gmacdev, eth_phy_addr, MII_BMSR); if (mdio_read(gmacdev, eth_phy_addr, MII_BMSR) & BMSR_LSTATUS) { return 1; } return 0; } static int reset_phy(synopGMACdevice *gmacdev) { uint16_t reg; uint32_t delay_us; bool ret; mdio_write(gmacdev, eth_phy_addr, MII_BMCR, BMCR_RESET); delay_us = 200000U; ret = WAIT_FOR(!(mdio_read(gmacdev, eth_phy_addr, MII_BMCR) & BMCR_RESET), delay_us, k_msleep(1)); if (ret == false) { LOG_DBG("Reset phy failed"); return -EIO; } LOG_INF("PHY ID 1:0x%x", mdio_read(gmacdev, eth_phy_addr, MII_PHYSID1)); LOG_INF("PHY ID 2:0x%x", mdio_read(gmacdev, eth_phy_addr, MII_PHYSID2)); delay_us = 3000000U; ret = WAIT_FOR(numaker_eth_link_ok(gmacdev), delay_us, k_msleep(1)); if (ret) { gmacdev->LinkState = LINKUP; LOG_DBG("Link Up"); } else { gmacdev->LinkState = LINKDOWN; LOG_DBG("Link Down"); return -EIO; } mdio_write(gmacdev, eth_phy_addr, MII_ADVERTISE, NUMAKER_MII_CONFIG); reg = mdio_read(gmacdev, eth_phy_addr, MII_BMCR); mdio_write(gmacdev, eth_phy_addr, MII_BMCR, reg | BMCR_ANRESTART); delay_us = 3000000U; ret = WAIT_FOR((mdio_read(gmacdev, eth_phy_addr, MII_BMSR) & NUMAKER_MII_LINKED) == NUMAKER_MII_LINKED, delay_us, k_msleep(1)); if (ret == false) { LOG_DBG("AN failed. Set to 100 FULL"); synopGMAC_set_full_duplex(gmacdev); synopGMAC_set_mode(NUMAKER_GMAC_INTF, 1); /* Set mode 1: 100Mbps; 2: 10Mbps */ return -EIO; } reg = mdio_read(gmacdev, eth_phy_addr, MII_LPA); if (reg & ADVERTISE_100FULL) { LOG_DBG("100 full"); gmacdev->DuplexMode = FULLDUPLEX; gmacdev->Speed = SPEED100; synopGMAC_set_full_duplex(gmacdev); synopGMAC_set_mode(NUMAKER_GMAC_INTF, 1); /* Set mode 1: 100Mbps; 2: 10Mbps */ } else if (reg & ADVERTISE_100HALF) { LOG_DBG("100 half"); gmacdev->DuplexMode = HALFDUPLEX; gmacdev->Speed = SPEED100; synopGMAC_set_half_duplex(gmacdev); synopGMAC_set_mode(NUMAKER_GMAC_INTF, 1); /* Set mode 1: 100Mbps; 2: 10Mbps */ } else if (reg & ADVERTISE_10FULL) { LOG_DBG("10 full"); gmacdev->DuplexMode = FULLDUPLEX; gmacdev->Speed = SPEED10; synopGMAC_set_full_duplex(gmacdev); synopGMAC_set_mode(NUMAKER_GMAC_INTF, 2); /* Set mode 1: 100Mbps; 2: 10Mbps */ } else { LOG_DBG("10 half"); gmacdev->DuplexMode = HALFDUPLEX; gmacdev->Speed = SPEED10; synopGMAC_set_half_duplex(gmacdev); synopGMAC_set_mode(NUMAKER_GMAC_INTF, 2); /* Set mode 1: 100Mbps; 2: 10Mbps */ } return 0; } static void m_numaker_read_mac_addr(char *mac) { uint32_t uid1; /* Fetch word 0 of data flash */ uint32_t word0 = *(uint32_t *)(NUMAKER_DATA_FLASH + 0x04U); /* * Fetch word 1 of data flash * we only want bottom 16 bits of word1 (MAC bits 32-47) * and bit 9 forced to 1, bit 8 forced to 0 * Locally administered MAC, reduced conflicts * path_to_url */ uint32_t word1 = *(uint32_t *)NUMAKER_DATA_FLASH; /* Not burn any mac address at the beginning of data flash */ if (word0 == NUMAKER_MASK_32) { /* Generate a semi-unique MAC address from the UUID */ SYS_UnlockReg(); /* Enable FMC ISP function */ FMC_Open(); uid1 = FMC_ReadUID(1); word1 = (uid1 & 0x003FFFFF) | ((uid1 & 0x030000) << 6) >> 8; word0 = ((FMC_ReadUID(0) >> 4) << 20) | ((uid1 & 0xFF) << 12) | (FMC_ReadUID(2) & 0xFFF); /* Disable FMC ISP function */ FMC_Close(); /* Lock protected registers */ SYS_LockReg(); } word1 |= 0x00000200; word1 &= 0x0000FEFF; mac[0] = (word1 & 0x0000ff00) >> 8; mac[1] = (word1 & 0x000000ff); mac[2] = (word0 & 0xff000000) >> 24; mac[3] = (word0 & 0x00ff0000) >> 16; mac[4] = (word0 & 0x0000ff00) >> 8; mac[5] = (word0 & 0x000000ff); LOG_INF("mac address %02x:%02x:%02x:%02x:%02x:%02x", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); } static void m_numaker_gmacdev_enable(synopGMACdevice *gmacdev) { synopGMAC_clear_interrupt(gmacdev); /* Enable INT & TX/RX */ synopGMAC_enable_interrupt(gmacdev, DmaIntEnable); synopGMAC_enable_dma_rx(gmacdev); synopGMAC_enable_dma_tx(gmacdev); synopGMAC_tx_enable(gmacdev); synopGMAC_rx_enable(gmacdev); } static int m_numaker_gmacdev_init(synopGMACdevice *gmacdev, uint8_t *mac_addr, uint32_t gmac_base) { int status; int i; uint32_t offload_needed = 0; struct sk_buff *skb; LOG_DBG(""); /*Attach the device to MAC struct This will configure all the required base * addresses such as Mac base, configuration base, phy base address(out of 32 * possible phys ) */ synopGMAC_attach(gmacdev, gmac_base + MACBASE, gmac_base + DMABASE, DEFAULT_PHY_BASE); synopGMAC_disable_interrupt_all(gmacdev); /* Reset MAC */ synopGMAC_reset(gmacdev); gmacdev->Intf = NUMAKER_GMAC_INTF; synopGMAC_read_version(gmacdev); /* Check for Phy initialization */ synopGMAC_set_mdc_clk_div(gmacdev, GmiiCsrClk5); gmacdev->ClockDivMdc = synopGMAC_get_mdc_clk_div(gmacdev); /* Reset PHY */ status = reset_phy(gmacdev); /* Set up the tx and rx descriptor queue/ring */ synopGMAC_setup_tx_desc_queue(gmacdev, TRANSMIT_DESC_SIZE, RINGMODE); synopGMAC_init_tx_desc_base(gmacdev); synopGMAC_setup_rx_desc_queue(gmacdev, RECEIVE_DESC_SIZE, RINGMODE); synopGMAC_init_rx_desc_base(gmacdev); /* Initialize the dma interface */ synopGMAC_dma_bus_mode_init(gmacdev, DmaBurstLength32 | DmaDescriptorSkip0 | DmaDescriptor8Words); synopGMAC_dma_control_init(gmacdev, DmaStoreAndForward | DmaTxSecondFrame | DmaRxThreshCtrl128); /* Initialize the mac interface */ synopGMAC_mac_init(gmacdev); synopGMAC_promisc_enable(gmacdev); /* This enables the pause control in Full duplex mode of operation */ synopGMAC_pause_control(gmacdev); #if defined(NU_USING_HW_CHECKSUM) /*IPC Checksum offloading is enabled for this driver. Should only be used if * Full Ip checksumm offload engine is configured in the hardware */ offload_needed = 1; /* Enable the offload engine in the receive path */ synopGMAC_enable_rx_chksum_offload(gmacdev); /* Default configuration, DMA drops the packets if error in encapsulated ethernet payload */ synopGMAC_rx_tcpip_chksum_drop_enable(gmacdev); #endif for (i = 0; i < RECEIVE_DESC_SIZE; i++) { skb = &rx_buf[NUMAKER_GMAC_INTF][i]; synopGMAC_set_rx_qptr(gmacdev, (u32)((u64)(skb->data) & NUMAKER_MASK_32), sizeof(skb->data), (u32)((u64)skb & NUMAKER_MASK_32)); } for (i = 0; i < TRANSMIT_DESC_SIZE; i++) { skb = &tx_buf[NUMAKER_GMAC_INTF][i]; synopGMAC_set_tx_qptr(gmacdev, (u32)((u64)(skb->data) & NUMAKER_MASK_32), sizeof(skb->data), (u32)((u64)skb & NUMAKER_MASK_32), offload_needed, 0); } synopGMAC_set_mac_address(NUMAKER_GMAC_INTF, mac_addr); synopGMAC_clear_interrupt(gmacdev); return status; } static int m_numaker_gmacdev_get_rx_buf(synopGMACdevice *gmacdev, uint16_t *len, uint8_t **buf) { DmaDesc *rxdesc = gmacdev->RxBusyDesc; LOG_DBG("start"); if (synopGMAC_is_desc_owned_by_dma(rxdesc)) { return -EIO; } if (synopGMAC_is_desc_empty(rxdesc)) { return -EIO; } *len = synop_handle_received_data(NUMAKER_GMAC_INTF, buf); if (*len <= 0) { synopGMAC_enable_interrupt(gmacdev, DmaIntEnable); return -ENOSPC; /* No available RX frame */ } /* length of payload should be <= 1514 */ if (*len > (NU_ETH_MAX_FLEN - 4)) { LOG_DBG("unexpected long packet length=%d, buf=0x%x", *len, (uint32_t)*buf); *len = 0; /* Skip this unexpected long packet */ } LOG_DBG("end"); return 0; } static void m_numaker_gmacdev_rx_next(synopGMACdevice *gmacdev) { LOG_DBG("RX Next"); /* Already did in synop_handle_received_data * No-op at this stage * DmaDesc * rxdesc = (gmacdev->RxBusyDesc - 1); * rxdesc->status = DescOwnByDma; */ } static void m_numaker_gmacdev_trigger_rx(synopGMACdevice *gmacdev) { LOG_DBG("start"); /* Enable the interrupt */ synopGMAC_enable_interrupt(gmacdev, DmaIntEnable); /* Trigger RX DMA */ synopGMAC_enable_dma_rx(gmacdev); synopGMAC_resume_dma_rx(gmacdev); LOG_DBG("resume RX DMA"); LOG_DBG("end"); } static void m_numaker_gmacdev_packet_rx(const struct device *dev) { struct eth_numaker_data *data = dev->data; synopGMACdevice *gmacdev = data->gmacdev; uint8_t *buffer; uint16_t len; struct net_pkt *pkt; k_spinlock_key_t key; int res; /* Get exclusive access, use spin-lock instead of mutex in ISR */ key = k_spin_lock(&data->rx_frame_buf_lock); /* Two approach: 1. recv all RX packets in one time. * 2. recv one RX and set pending interrupt for rx-next. */ while (1) { /* get received frame */ if (m_numaker_gmacdev_get_rx_buf(gmacdev, &len, &buffer) != 0) { break; } if (len == 0) { LOG_WRN("No available RX frame"); break; } /* Allocate a memory buffer chain from buffer pool * Using root iface. It will be updated in net_recv_data() */ pkt = net_pkt_rx_alloc_with_buffer(data->iface, len, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { LOG_ERR("pkt alloc frame-len=%d failed", len); goto next; } LOG_DBG("length=%d, pkt=0x%x", len, (uint32_t)pkt); /* deliver RX packet to upper layer, pack as one net_pkt */ if (net_pkt_write(pkt, buffer, len)) { LOG_ERR("Unable to write RX frame into the pkt"); net_pkt_unref(pkt); goto error; } if (pkt != NULL) { res = net_recv_data(data->iface, pkt); if (res < 0) { LOG_ERR("net_recv_data: %d", res); net_pkt_unref(pkt); goto error; } } next: m_numaker_gmacdev_rx_next(gmacdev); } m_numaker_gmacdev_trigger_rx(gmacdev); error: k_spin_unlock(&data->rx_frame_buf_lock, key); } static uint8_t *m_numaker_gmacdev_get_tx_buf(synopGMACdevice *gmacdev) { DmaDesc *txdesc = gmacdev->TxNextDesc; if (!synopGMAC_is_desc_empty(txdesc)) { return NULL; } if (synopGMAC_is_desc_owned_by_dma(txdesc)) { return NULL; } return (uint8_t *)(txdesc->buffer1); } static void m_numaker_gmacdev_trigger_tx(synopGMACdevice *gmacdev, uint16_t length) { DmaDesc *txdesc = gmacdev->TxNextDesc; uint32_t txnext = gmacdev->TxNext; bool offload_needed = IS_ENABLED(NU_USING_HW_CHECKSUM); /* busy tx descriptor is incremented by one as it will be handed over to DMA */ (gmacdev->BusyTxDesc)++; txdesc->length |= ((length << DescSize1Shift) & DescSize1Mask); txdesc->status |= (DescTxFirst | DescTxLast | DescTxIntEnable); if (offload_needed) { /* * Make sure that the OS you are running supports the IP and TCP checksum * offloading, before calling any of the functions given below. */ synopGMAC_tx_checksum_offload_tcp_pseudo(gmacdev, txdesc); } else { synopGMAC_tx_checksum_offload_bypass(gmacdev, txdesc); } __DSB(); txdesc->status |= DescOwnByDma; gmacdev->TxNext = synopGMAC_is_last_tx_desc(gmacdev, txdesc) ? 0 : txnext + 1; gmacdev->TxNextDesc = synopGMAC_is_last_tx_desc(gmacdev, txdesc) ? gmacdev->TxDesc : (txdesc + 1); /* Enable the interrupt */ synopGMAC_enable_interrupt(gmacdev, DmaIntEnable); /* Trigger TX DMA */ synopGMAC_resume_dma_tx(gmacdev); } static int numaker_eth_tx(const struct device *dev, struct net_pkt *pkt) { struct eth_numaker_data *data = dev->data; synopGMACdevice *gmacdev = data->gmacdev; uint16_t total_len = net_pkt_get_len(pkt); uint8_t *buffer; /* Get exclusive access */ k_mutex_lock(&data->tx_frame_buf_mutex, K_FOREVER); if (total_len > NET_ETH_MAX_FRAME_SIZE) { /* NuMaker SDK reserve 2048 for tx_buf */ LOG_ERR("TX packet length [%d] over max [%d]", total_len, NET_ETH_MAX_FRAME_SIZE); goto error; } buffer = m_numaker_gmacdev_get_tx_buf(gmacdev); LOG_DBG("buffer=0x%x", (uint32_t)buffer); if (buffer == NULL) { goto error; } if (net_pkt_read(pkt, buffer, total_len)) { goto error; } /* Prepare transmit descriptors to give to DMA */ m_numaker_gmacdev_trigger_tx(gmacdev, total_len); k_mutex_unlock(&data->tx_frame_buf_mutex); return 0; error: LOG_ERR("Writing pkt to TX descriptor failed"); k_mutex_unlock(&data->tx_frame_buf_mutex); return -EIO; } static void numaker_eth_if_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_numaker_data *data = dev->data; synopGMACdevice *gmacdev = data->gmacdev; LOG_DBG("eth_if_init"); /* Read mac address */ m_numaker_read_mac_addr(data->mac_addr); net_if_set_link_addr(iface, data->mac_addr, sizeof(data->mac_addr), NET_LINK_ETHERNET); data->iface = iface; ethernet_init(iface); /* Enable GMAC device INT & TX/RX */ m_numaker_gmacdev_enable(gmacdev); } static int numaker_eth_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct eth_numaker_data *data = dev->data; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(data->mac_addr, config->mac_address.addr, sizeof(data->mac_addr)); synopGMAC_set_mac_address(NUMAKER_GMAC_INTF, data->mac_addr); net_if_set_link_addr(data->iface, data->mac_addr, sizeof(data->mac_addr), NET_LINK_ETHERNET); LOG_DBG("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name, data->mac_addr[0], data->mac_addr[1], data->mac_addr[2], data->mac_addr[3], data->mac_addr[4], data->mac_addr[5]); return 0; default: return -ENOTSUP; } } static enum ethernet_hw_caps numaker_eth_get_cap(const struct device *dev) { ARG_UNUSED(dev); #if defined(NU_USING_HW_CHECKSUM) return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_HW_RX_CHKSUM_OFFLOAD; #else return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T; #endif } static const struct ethernet_api eth_numaker_driver_api = { .iface_api.init = numaker_eth_if_init, .get_capabilities = numaker_eth_get_cap, .set_config = numaker_eth_set_config, .send = numaker_eth_tx, }; /* EMAC IRQ Handler */ static void eth_numaker_isr(const struct device *dev) { struct eth_numaker_data *data = dev->data; synopGMACdevice *gmacdev = data->gmacdev; uint32_t interrupt; uint32_t dma_status_reg; uint32_t mac_status_reg; int status; uint32_t dma_ie = DmaIntEnable; uint32_t volatile reg; /* Check GMAC interrupt */ mac_status_reg = synopGMACReadReg((u32 *)gmacdev->MacBase, GmacInterruptStatus); if (mac_status_reg & GmacTSIntSts) { gmacdev->synopGMACNetStats.ts_int = 1; status = synopGMACReadReg((u32 *)gmacdev->MacBase, GmacTSStatus); if (!(status & BIT(1))) { LOG_WRN("TS alarm flag not set??"); } else { LOG_DBG("TS alarm"); } } if (mac_status_reg & GmacLPIIntSts) { LOG_DBG("LPI"); } if (mac_status_reg & GmacRgmiiIntSts) { reg = synopGMACReadReg((u32 *)gmacdev->MacBase, GmacRgmiiCtrlSts); } synopGMACWriteReg((u32 *)gmacdev->MacBase, GmacInterruptStatus, mac_status_reg); /* Read the Dma interrupt status to know whether the interrupt got generated by * our device or not */ dma_status_reg = synopGMACReadReg((u32 *)gmacdev->DmaBase, DmaStatus); LOG_DBG("i %08x %08x", mac_status_reg, dma_status_reg); if (dma_status_reg == 0) { return; } synopGMAC_disable_interrupt_all(gmacdev); LOG_DBG("Dma Status Reg: 0x%08x", dma_status_reg); if (dma_status_reg & GmacPmtIntr) { LOG_DBG("Interrupt due to PMT module"); synopGMAC_powerup_mac(gmacdev); } if (dma_status_reg & GmacLineIntfIntr) { LOG_DBG("Interrupt due to GMAC LINE module"); } /* Now lets handle the DMA interrupts */ interrupt = synopGMAC_get_interrupt_type(gmacdev); LOG_DBG("Interrupts to be handled: 0x%08x", interrupt); if (interrupt & synopGMACDmaError) { LOG_DBG("Fatal Bus Error Interrupt Seen"); synopGMAC_disable_dma_tx(gmacdev); synopGMAC_disable_dma_rx(gmacdev); synopGMAC_take_desc_ownership_tx(gmacdev); synopGMAC_take_desc_ownership_rx(gmacdev); synopGMAC_init_tx_rx_desc_queue(gmacdev); synopGMAC_reset(gmacdev); /* reset the DMA engine and the GMAC ip */ synopGMAC_set_mac_address(NUMAKER_GMAC_INTF, data->mac_addr); synopGMAC_dma_bus_mode_init(gmacdev, DmaFixedBurstEnable | DmaBurstLength8 | DmaDescriptorSkip0); synopGMAC_dma_control_init(gmacdev, DmaStoreAndForward); synopGMAC_init_rx_desc_base(gmacdev); synopGMAC_init_tx_desc_base(gmacdev); synopGMAC_mac_init(gmacdev); synopGMAC_enable_dma_rx(gmacdev); synopGMAC_enable_dma_tx(gmacdev); } if (interrupt & synopGMACDmaRxNormal) { LOG_DBG("Rx Normal"); /* disable RX interrupt */ dma_ie &= ~DmaIntRxNormMask; /* to handle received data */ m_numaker_gmacdev_packet_rx(dev); } if (interrupt & synopGMACDmaRxAbnormal) { LOG_ERR("Abnormal Rx Interrupt Seen"); /* If Mac is not in powerdown */ if (gmacdev->GMAC_Power_down == 0) { gmacdev->synopGMACNetStats.rx_over_errors++; dma_ie &= ~DmaIntRxAbnMask; /* To handle GBPS with 12 descriptors. */ synopGMAC_resume_dma_rx(gmacdev); } } /* Receiver gone in to stopped state */ if (interrupt & synopGMACDmaRxStopped) { LOG_ERR("Receiver stopped seeing Rx interrupts"); if (gmacdev->GMAC_Power_down == 0) { gmacdev->synopGMACNetStats.rx_over_errors++; synopGMAC_enable_dma_rx(gmacdev); } } if (interrupt & synopGMACDmaTxNormal) { LOG_DBG("Finished Normal Transmission"); synop_handle_transmit_over(0); /* No-op at this stage for TX INT */ } if (interrupt & synopGMACDmaTxAbnormal) { LOG_ERR("Abnormal Tx Interrupt Seen"); if (gmacdev->GMAC_Power_down == 0) { synop_handle_transmit_over(0); /* No-op at this stage for TX INT */ } } if (interrupt & synopGMACDmaTxStopped) { LOG_ERR("Transmitter stopped sending the packets"); if (gmacdev->GMAC_Power_down == 0) { synopGMAC_disable_dma_tx(gmacdev); synopGMAC_take_desc_ownership_tx(gmacdev); synopGMAC_enable_dma_tx(gmacdev); LOG_ERR("Transmission Resumed"); } } /* Enable the interrupt before returning from ISR*/ synopGMAC_enable_interrupt(gmacdev, dma_ie); } /* Declare pin-ctrl __pinctrl_dev_config__device_dts_ord_xx before * PINCTRL_DT_INST_DEV_CONFIG_GET() */ PINCTRL_DT_INST_DEFINE(0); static int eth_numaker_init(const struct device *dev) { const struct eth_numaker_config *cfg = dev->config; struct eth_numaker_data *data = dev->data; synopGMACdevice *gmacdev; /* Init MAC Address based on UUID*/ uint8_t mac_addr[NU_HWADDR_SIZE]; int ret = 0; struct numaker_scc_subsys scc_subsys; gmacdev = &GMACdev[NUMAKER_GMAC_INTF]; data->gmacdev = gmacdev; k_mutex_init(&data->tx_frame_buf_mutex); eth_phy_addr = cfg->phy_addr; /* CLK controller */ memset(&scc_subsys, 0x00, sizeof(scc_subsys)); scc_subsys.subsys_id = NUMAKER_SCC_SUBSYS_ID_PCC; scc_subsys.pcc.clk_modidx = cfg->clk_modidx; scc_subsys.pcc.clk_src = cfg->clk_src; scc_subsys.pcc.clk_div = cfg->clk_div; /* Equivalent to CLK_EnableModuleClock() */ ret = clock_control_on(cfg->clk_dev, (clock_control_subsys_t)&scc_subsys); if (ret != 0) { goto done; } /* For EMAC, not need CLK_SetModuleClock() * Validate this module's reset object */ if (!device_is_ready(cfg->reset.dev)) { LOG_ERR("reset controller not ready"); return -ENODEV; } SYS_UnlockReg(); irq_disable(DT_INST_IRQN(0)); ret = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT); if (ret != 0) { LOG_ERR("Failed to apply pinctrl state"); goto done; } /* Reset EMAC to default state, same as BSP's SYS_ResetModule(id_rst) */ reset_line_toggle_dt(&cfg->reset); /* Read mac address */ m_numaker_read_mac_addr(mac_addr); /* Configure GMAC device */ ret = m_numaker_gmacdev_init(gmacdev, mac_addr, cfg->gmac_base); if (ret != 0) { LOG_ERR("GMAC failed to initialize"); goto done; } IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_numaker_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); done: SYS_LockReg(); return ret; } static struct eth_numaker_data eth_numaker_data_inst; /* Set config based on DTS */ static struct eth_numaker_config eth_numaker_cfg_inst = { .gmac_base = (uint32_t)DT_INST_REG_ADDR(0), .reset = RESET_DT_SPEC_INST_GET(0), .phy_addr = DT_INST_PROP(0, phy_addr), .clk_modidx = DT_INST_CLOCKS_CELL(0, clock_module_index), .clk_src = DT_INST_CLOCKS_CELL(0, clock_source), .clk_div = DT_INST_CLOCKS_CELL(0, clock_divider), .clk_dev = DEVICE_DT_GET(DT_PARENT(DT_INST_CLOCKS_CTLR(0))), .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), .reset = RESET_DT_SPEC_INST_GET(0), }; ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_numaker_init, NULL, &eth_numaker_data_inst, &eth_numaker_cfg_inst, CONFIG_ETH_INIT_PRIORITY, &eth_numaker_driver_api, NET_ETH_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_numaker.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,165
```unknown menuconfig ETH_NXP_S32_NETC bool "NXP S32 Ethernet Switch and Controller (NETC) driver" default y depends on (DT_HAS_NXP_S32_NETC_PSI_ENABLED || DT_HAS_NXP_S32_NETC_VSI_ENABLED) select MBOX select MDIO if DT_HAS_NXP_S32_NETC_PSI_ENABLED select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT help Enable Ethernet Switch and Controller (NETC) driver for NXP S32 SoCs. if ETH_NXP_S32_NETC config ETH_NXP_S32_RX_THREAD_PRIO int "RX thread priority" default 2 help RX thread priority. RX thread is a cooperative thread. config ETH_NXP_S32_RX_THREAD_STACK_SIZE int "RX thread stack size" default 1500 help RX thread stack size. config ETH_NXP_S32_RX_BUDGET int "RX thread budget" default 128 range 1 1024 help The budget parameter places a limit on the amount of work the driver may do in the RX thread before yielding the processor, in case there is more work to do. This is to prevent the RX thread to starve other threads. Each received frame counts as one unit of work. config ETH_NXP_S32_TX_RING_LEN int "TX ring length" default 64 range 8 256 help Length of the TX ring. The value must be a multiple of 8. config ETH_NXP_S32_TX_RING_BUF_SIZE int "TX ring data buffer size" default 1536 range 64 1536 help Size, in bytes, of the TX data buffer. The size must be big enough to store one complete Ethernet frame, and be a multiple of 8. config ETH_NXP_S32_RX_RING_LEN int "RX ring length" default 64 range 8 256 help Length of the RX ring. The value must be a multiple of 8. config ETH_NXP_S32_RX_RING_BUF_SIZE int "RX ring data buffer size" default 1536 range 64 1536 help Size, in bytes, of the RX data buffer. The size must be big enough to store one complete Ethernet frame, and be a multiple of 8. config ETH_NXP_S32_RX_IRQ_PACKET_THRESHOLD int "RX interrupt coalescing packet threshold" default 32 range 1 511 help RX interrupt coalescing packet threshold. This values determines the minimum number of packets received before raising an interrupt. config ETH_NXP_S32_RX_IRQ_TIMER_THRESHOLD int "RX interrupt coalescing timer threshold" default 60000 range 1 $(UINT32_MAX) help RX interrupt coalescing timer threshold, in units of NETC clock cycles. This value determines the maximum amount of time allowed between a first received packet until RX interrupt coalescing packet threshold is reached. If the timer expires before the threshold is reached, an interrupt is raised. config ETH_NXP_S32_MAC_FILTER_TABLE_SIZE int "MAC filter hash table size" default 8 range 8 128 help Maximum number of entries supported in the MAC filter hash table. config ETH_NXP_S32_VSI_INIT_PRIORITY int default 81 help VSI initialization priority. It must be bigger than PSI init priority (CONFIG_ETH_INIT_PRIORITY) and lower than CONFIG_NET_INIT_PRIO, so that it can start after the PSI but before the networking sub-system. endif # ETH_NXP_S32_NETC ```
/content/code_sandbox/drivers/ethernet/Kconfig.nxp_s32_netc
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
785
```c /* * */ #define DT_DRV_COMPAT nxp_s32_netc_vsi #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_s32_eth_vsi); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/mbox.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/phy.h> #include <ethernet/eth_stats.h> #include <soc.h> #include <Netc_Eth_Ip.h> #include <Netc_Eth_Ip_Irq.h> #include <Netc_EthSwt_Ip.h> #include "eth.h" #include "eth_nxp_s32_netc_priv.h" #define TX_RING_IDX 0 #define RX_RING_IDX 0 static void nxp_s32_eth_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct nxp_s32_eth_data *ctx = dev->data; const struct nxp_s32_eth_config *cfg = dev->config; const struct nxp_s32_eth_msix *msix; /* * For VLAN, this value is only used to get the correct L2 driver. * The iface pointer in context should contain the main interface * if the VLANs are enabled. */ if (ctx->iface == NULL) { ctx->iface = iface; } Netc_Eth_Ip_SetMacAddr(cfg->si_idx, (const uint8_t *)ctx->mac_addr); net_if_set_link_addr(iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET); LOG_INF("SI%d MAC: %02x:%02x:%02x:%02x:%02x:%02x", cfg->si_idx, ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2], ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]); ethernet_init(iface); /* Assumes PSI is already started and link is up, iface will auto-start after init */ net_eth_carrier_on(iface); for (int i = 0; i < NETC_MSIX_EVENTS_COUNT; i++) { msix = &cfg->msix[i]; if (mbox_is_ready_dt(&msix->mbox_spec)) { if (mbox_set_enabled_dt(&msix->mbox_spec, true)) { LOG_ERR("Failed to enable MRU channel %u", msix->mbox_spec.channel_id); } } } } static const struct ethernet_api nxp_s32_eth_api = { .iface_api.init = nxp_s32_eth_iface_init, .get_capabilities = nxp_s32_eth_get_capabilities, .set_config = nxp_s32_eth_set_config, .send = nxp_s32_eth_tx }; BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(nxp_s32_netc_vsi) == 1, "Only one VSI enabled supported"); #define NETC_VSI_INSTANCE_DEFINE(n) \ NETC_GENERATE_MAC_ADDRESS(n) \ \ void nxp_s32_eth_vsi##n##_rx_event(uint8_t chan, const uint32 *buf, uint8_t buf_size) \ { \ Netc_Eth_Ip_MSIX_Rx(NETC_SI_NXP_S32_HW_INSTANCE(n)); \ } \ \ static void nxp_s32_eth##n##_rx_callback(const uint8_t unused, const uint8_t ring) \ { \ const struct device *dev = DEVICE_DT_INST_GET(n); \ const struct nxp_s32_eth_config *cfg = dev->config; \ struct nxp_s32_eth_data *ctx = dev->data; \ \ if (ring == cfg->rx_ring_idx) { \ k_sem_give(&ctx->rx_sem); \ } \ } \ \ static __nocache Netc_Eth_Ip_StateType nxp_s32_eth##n##_state; \ __nocache Netc_Eth_Ip_VsiToPsiMsgType nxp_s32_eth##n##_vsi2psi_msg \ __aligned(FEATURE_NETC_ETH_VSI_MSG_ALIGNMENT); \ static __nocache Netc_Eth_Ip_MACFilterHashTableEntryType \ nxp_s32_eth##n##_mac_filter_hash_table[CONFIG_ETH_NXP_S32_MAC_FILTER_TABLE_SIZE]; \ \ NETC_RX_RING(n, TX_RING_IDX, CONFIG_ETH_NXP_S32_RX_RING_LEN, \ CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE); \ NETC_TX_RING(n, RX_RING_IDX, CONFIG_ETH_NXP_S32_TX_RING_LEN, \ CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE); \ \ static const Netc_Eth_Ip_RxRingConfigType nxp_s32_eth##n##_rxring_cfg[1] = { \ { \ .RingDesc = nxp_s32_eth##n##_rxring0_desc, \ .Buffer = nxp_s32_eth##n##_rxring0_buf, \ .ringSize = CONFIG_ETH_NXP_S32_RX_RING_LEN, \ .maxRingSize = CONFIG_ETH_NXP_S32_RX_RING_LEN, \ .bufferLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ .maxBuffLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ .TimerThreshold = CONFIG_ETH_NXP_S32_RX_IRQ_TIMER_THRESHOLD, \ .PacketsThreshold = CONFIG_ETH_NXP_S32_RX_IRQ_PACKET_THRESHOLD, \ .Callback = nxp_s32_eth##n##_rx_callback, \ } \ }; \ static const Netc_Eth_Ip_TxRingConfigType nxp_s32_eth##n##_txring_cfg[1] = { \ { \ .RingDesc = nxp_s32_eth##n##_txring0_desc, \ .Buffer = nxp_s32_eth##n##_txring0_buf, \ .ringSize = CONFIG_ETH_NXP_S32_TX_RING_LEN, \ .maxRingSize = CONFIG_ETH_NXP_S32_TX_RING_LEN, \ .bufferLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ .maxBuffLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ } \ }; \ \ static const Netc_Eth_Ip_StationInterfaceConfigType nxp_s32_eth##n##_si_cfg = { \ .NumberOfRxBDR = 1, \ .NumberOfTxBDR = 1, \ .txMruMailboxAddr = NULL, \ .rxMruMailboxAddr = (uint32 *)MRU_MBOX_ADDR(DT_DRV_INST(n), rx), \ .EnableSIMsgInterrupt = true, \ .RxInterrupts = (uint32_t)true, \ .TxInterrupts = (uint32_t)false, \ .MACFilterTableMaxNumOfEntries = CONFIG_ETH_NXP_S32_MAC_FILTER_TABLE_SIZE, \ .VSItoPSIMsgCommand = &nxp_s32_eth##n##_vsi2psi_msg, \ }; \ \ static struct nxp_s32_eth_data nxp_s32_eth##n##_data = { \ .mac_addr = DT_INST_PROP_OR(n, local_mac_address, {0}), \ }; \ \ static const struct nxp_s32_eth_config nxp_s32_eth##n##_cfg = { \ .netc_cfg = { \ .SiType = NETC_ETH_IP_VIRTUAL_SI, \ .siConfig = &nxp_s32_eth##n##_si_cfg, \ .stateStructure = &nxp_s32_eth##n##_state, \ .paCtrlRxRingConfig = &nxp_s32_eth##n##_rxring_cfg, \ .paCtrlTxRingConfig = &nxp_s32_eth##n##_txring_cfg, \ }, \ .si_idx = NETC_SI_NXP_S32_HW_INSTANCE(n), \ .tx_ring_idx = TX_RING_IDX, \ .rx_ring_idx = RX_RING_IDX, \ .msix = { \ NETC_MSIX(DT_DRV_INST(n), rx, nxp_s32_eth_vsi##n##_rx_event), \ }, \ .mac_filter_hash_table = &nxp_s32_eth##n##_mac_filter_hash_table[0], \ .generate_mac = nxp_s32_eth##n##_generate_mac, \ }; \ \ ETH_NET_DEVICE_DT_INST_DEFINE(n, \ nxp_s32_eth_initialize_common, \ NULL, \ &nxp_s32_eth##n##_data, \ &nxp_s32_eth##n##_cfg, \ CONFIG_ETH_NXP_S32_VSI_INIT_PRIORITY, \ &nxp_s32_eth_api, \ NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(NETC_VSI_INSTANCE_DEFINE) ```
/content/code_sandbox/drivers/ethernet/eth_nxp_s32_netc_vsi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,167
```c /* * */ #define DT_DRV_COMPAT silabs_gecko_ethernet /* Silicon Labs EFM32 Giant Gecko 11 Ethernet driver. * Limitations: * - no link monitoring through PHY interrupt */ #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(eth_gecko, CONFIG_ETHERNET_LOG_LEVEL); #include <soc.h> #include <zephyr/device.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <errno.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include <em_cmu.h> #include <zephyr/irq.h> #include "phy_gecko.h" #include "eth_gecko_priv.h" #include "eth.h" static uint8_t dma_tx_buffer[ETH_TX_BUF_COUNT][ETH_TX_BUF_SIZE] __aligned(ETH_BUF_ALIGNMENT); static uint8_t dma_rx_buffer[ETH_RX_BUF_COUNT][ETH_RX_BUF_SIZE] __aligned(ETH_BUF_ALIGNMENT); static struct eth_buf_desc dma_tx_desc_tab[ETH_TX_BUF_COUNT] __aligned(ETH_DESC_ALIGNMENT); static struct eth_buf_desc dma_rx_desc_tab[ETH_RX_BUF_COUNT] __aligned(ETH_DESC_ALIGNMENT); static uint32_t tx_buf_idx; static uint32_t rx_buf_idx; static void link_configure(ETH_TypeDef *eth, uint32_t flags) { uint32_t val; __ASSERT_NO_MSG(eth != NULL); /* Disable receiver & transmitter */ eth->NETWORKCTRL &= ~(ETH_NETWORKCTRL_ENBTX | ETH_NETWORKCTRL_ENBRX); /* Set duplex mode and speed */ val = eth->NETWORKCFG; val &= ~(_ETH_NETWORKCFG_FULLDUPLEX_MASK | _ETH_NETWORKCFG_SPEED_MASK); val |= flags & (_ETH_NETWORKCFG_FULLDUPLEX_MASK | _ETH_NETWORKCFG_SPEED_MASK); eth->NETWORKCFG = val; /* Enable transmitter and receiver */ eth->NETWORKCTRL |= (ETH_NETWORKCTRL_ENBTX | ETH_NETWORKCTRL_ENBRX); } static void eth_gecko_setup_mac(const struct device *dev) { const struct eth_gecko_dev_cfg *const cfg = dev->config; ETH_TypeDef *eth = cfg->regs; uint32_t link_status; int result; /* PHY auto-negotiate link parameters */ result = phy_gecko_auto_negotiate(&cfg->phy, &link_status); if (result < 0) { LOG_ERR("ETH PHY auto-negotiate sequence failed"); return; } LOG_INF("Speed %s Mb", link_status & ETH_NETWORKCFG_SPEED ? "100" : "10"); LOG_INF("%s duplex", link_status & ETH_NETWORKCFG_FULLDUPLEX ? "Full" : "Half"); /* Set up link parameters and enable receiver/transmitter */ link_configure(eth, link_status); } static void eth_init_tx_buf_desc(void) { uint32_t address; int i; /* Initialize TX buffer descriptors */ for (i = 0; i < ETH_TX_BUF_COUNT; i++) { address = (uint32_t) dma_tx_buffer[i]; dma_tx_desc_tab[i].address = address; dma_tx_desc_tab[i].status = ETH_TX_USED; } /* Mark last descriptor entry with wrap flag */ dma_tx_desc_tab[i - 1].status |= ETH_TX_WRAP; tx_buf_idx = 0; } static void eth_init_rx_buf_desc(void) { uint32_t address; int i; for (i = 0; i < ETH_RX_BUF_COUNT; i++) { address = (uint32_t) dma_rx_buffer[i]; dma_rx_desc_tab[i].address = address & ETH_RX_ADDRESS; dma_rx_desc_tab[i].status = 0; } /* Mark last descriptor entry with wrap flag */ dma_rx_desc_tab[i - 1].address |= ETH_RX_WRAP; rx_buf_idx = 0; } static void rx_error_handler(ETH_TypeDef *eth) { __ASSERT_NO_MSG(eth != NULL); /* Stop reception */ ETH_RX_DISABLE(eth); /* Reset RX buffer descriptor list */ eth_init_rx_buf_desc(); eth->RXQPTR = (uint32_t)dma_rx_desc_tab; /* Restart reception */ ETH_RX_ENABLE(eth); } static struct net_pkt *frame_get(const struct device *dev) { struct eth_gecko_dev_data *const dev_data = dev->data; const struct eth_gecko_dev_cfg *const cfg = dev->config; ETH_TypeDef *eth = cfg->regs; struct net_pkt *rx_frame = NULL; uint16_t frag_len, total_len; uint32_t sofIdx, eofIdx; uint32_t i, j; __ASSERT_NO_MSG(dev != NULL); __ASSERT_NO_MSG(dev_data != NULL); __ASSERT_NO_MSG(cfg != NULL); /* Preset indices and total frame length */ sofIdx = UINT32_MAX; eofIdx = UINT32_MAX; total_len = 0; /* Check if a full frame is received (SOF/EOF present) * and determine total length of frame */ for (i = 0; i < ETH_RX_BUF_COUNT; i++) { j = (i + rx_buf_idx); if (j >= ETH_RX_BUF_COUNT) { j -= ETH_RX_BUF_COUNT; } /* Verify it is an ETH owned buffer */ if (!(dma_rx_desc_tab[j].address & ETH_RX_OWNERSHIP)) { /* No more ETH owned buffers to process */ break; } /* Check for SOF */ if (dma_rx_desc_tab[j].status & ETH_RX_SOF) { sofIdx = j; } if (sofIdx != UINT32_MAX) { total_len += (dma_rx_desc_tab[j].status & ETH_RX_LENGTH); /* Check for EOF */ if (dma_rx_desc_tab[j].status & ETH_RX_EOF) { eofIdx = j; break; } } } LOG_DBG("sof/eof: %u/%u, rx_buf_idx: %u, len: %u", sofIdx, eofIdx, rx_buf_idx, total_len); /* Verify we found a full frame */ if (eofIdx != UINT32_MAX) { /* Allocate room for full frame */ rx_frame = net_pkt_rx_alloc_with_buffer(dev_data->iface, total_len, AF_UNSPEC, 0, K_NO_WAIT); if (!rx_frame) { LOG_ERR("Failed to obtain RX buffer"); ETH_RX_DISABLE(eth); eth_init_rx_buf_desc(); eth->RXQPTR = (uint32_t)dma_rx_desc_tab; ETH_RX_ENABLE(eth); return rx_frame; } /* Copy frame (fragments)*/ j = sofIdx; while (total_len) { frag_len = MIN(total_len, ETH_RX_BUF_SIZE); LOG_DBG("frag: %u, fraglen: %u, rx_buf_idx: %u", j, frag_len, rx_buf_idx); if (net_pkt_write(rx_frame, &dma_rx_buffer[j], frag_len) < 0) { LOG_ERR("Failed to append RX buffer"); dma_rx_desc_tab[j].address &= ~ETH_RX_OWNERSHIP; net_pkt_unref(rx_frame); rx_frame = NULL; break; } dma_rx_desc_tab[j].address &= ~ETH_RX_OWNERSHIP; total_len -= frag_len; if (++j >= ETH_RX_BUF_COUNT) { j -= ETH_RX_BUF_COUNT; } if (++rx_buf_idx >= ETH_RX_BUF_COUNT) { rx_buf_idx -= ETH_RX_BUF_COUNT; } } } return rx_frame; } static void eth_rx(const struct device *dev) { struct eth_gecko_dev_data *const dev_data = dev->data; struct net_pkt *rx_frame; int res = 0; __ASSERT_NO_MSG(dev != NULL); __ASSERT_NO_MSG(dev_data != NULL); /* Iterate across (possibly multiple) frames */ rx_frame = frame_get(dev); while (rx_frame) { /* All data for this frame received */ res = net_recv_data(dev_data->iface, rx_frame); if (res < 0) { LOG_ERR("Failed to enqueue frame into RX queue: %d", res); eth_stats_update_errors_rx(dev_data->iface); net_pkt_unref(rx_frame); } /* Check if more frames are received */ rx_frame = frame_get(dev); } } static int eth_tx(const struct device *dev, struct net_pkt *pkt) { struct eth_gecko_dev_data *const dev_data = dev->data; const struct eth_gecko_dev_cfg *const cfg = dev->config; ETH_TypeDef *eth = cfg->regs; uint16_t total_len; uint8_t *dma_buffer; int res = 0; __ASSERT_NO_MSG(dev != NULL); __ASSERT_NO_MSG(dev_data != NULL); __ASSERT_NO_MSG(cfg != NULL); __ASSERT(pkt, "Buf pointer is NULL"); __ASSERT(pkt->frags, "Frame data missing"); /* Determine length of frame */ total_len = net_pkt_get_len(pkt); if (total_len > ETH_TX_BUF_SIZE) { LOG_ERR("PKT to big"); res = -EIO; goto error; } if (k_sem_take(&dev_data->tx_sem, K_MSEC(100)) != 0) { LOG_ERR("TX process did not complete within 100ms"); res = -EIO; goto error; } /* Make sure current buffer is available for writing */ if (!(dma_tx_desc_tab[tx_buf_idx].status & ETH_TX_USED)) { LOG_ERR("Buffer already in use"); res = -EIO; goto error; } dma_buffer = (uint8_t *)dma_tx_desc_tab[tx_buf_idx].address; if (net_pkt_read(pkt, dma_buffer, total_len)) { LOG_ERR("Failed to read packet into buffer"); res = -EIO; goto error; } if (tx_buf_idx < (ETH_TX_BUF_COUNT - 1)) { dma_tx_desc_tab[tx_buf_idx].status = (total_len & ETH_TX_LENGTH) | ETH_TX_LAST; tx_buf_idx++; } else { dma_tx_desc_tab[tx_buf_idx].status = (total_len & ETH_TX_LENGTH) | (ETH_TX_LAST | ETH_TX_WRAP); tx_buf_idx = 0; } /* Kick off transmission */ eth->NETWORKCTRL |= ETH_NETWORKCTRL_TXSTRT; error: return res; } static void rx_thread(void *arg1, void *unused1, void *unused2) { const struct device *dev = (const struct device *)arg1; struct eth_gecko_dev_data *const dev_data = dev->data; const struct eth_gecko_dev_cfg *const cfg = dev->config; int res; __ASSERT_NO_MSG(arg1 != NULL); ARG_UNUSED(unused1); ARG_UNUSED(unused2); __ASSERT_NO_MSG(dev_data != NULL); __ASSERT_NO_MSG(cfg != NULL); while (1) { res = k_sem_take(&dev_data->rx_sem, K_MSEC( CONFIG_ETH_GECKO_CARRIER_CHECK_RX_IDLE_TIMEOUT_MS)); if (res == 0) { if (dev_data->link_up != true) { dev_data->link_up = true; LOG_INF("Link up"); eth_gecko_setup_mac(dev); net_eth_carrier_on(dev_data->iface); } /* Process received data */ eth_rx(dev); } else if (res == -EAGAIN) { if (phy_gecko_is_linked(&cfg->phy)) { if (dev_data->link_up != true) { dev_data->link_up = true; LOG_INF("Link up"); eth_gecko_setup_mac(dev); net_eth_carrier_on(dev_data->iface); } } else { if (dev_data->link_up != false) { dev_data->link_up = false; LOG_INF("Link down"); net_eth_carrier_off(dev_data->iface); } } } } } static void eth_isr(const struct device *dev) { struct eth_gecko_dev_data *const dev_data = dev->data; const struct eth_gecko_dev_cfg *const cfg = dev->config; ETH_TypeDef *eth = cfg->regs; uint32_t int_clr = 0; uint32_t int_stat = eth->IFCR; uint32_t tx_irq_mask = (ETH_IENS_TXCMPLT | ETH_IENS_TXUNDERRUN | ETH_IENS_RTRYLMTORLATECOL | ETH_IENS_TXUSEDBITREAD | ETH_IENS_AMBAERR); uint32_t rx_irq_mask = (ETH_IENS_RXCMPLT | ETH_IENS_RXUSEDBITREAD); __ASSERT_NO_MSG(dev_data != NULL); __ASSERT_NO_MSG(cfg != NULL); /* Receive handling */ if (int_stat & rx_irq_mask) { if (int_stat & ETH_IENS_RXCMPLT) { /* Receive complete */ k_sem_give(&dev_data->rx_sem); } else { /* Receive error */ LOG_DBG("RX Error"); rx_error_handler(eth); } int_clr |= rx_irq_mask; } /* Transmit handling */ if (int_stat & tx_irq_mask) { if (int_stat & ETH_IENS_TXCMPLT) { /* Transmit complete */ } else { /* Transmit error: no actual handling, the current * buffer is no longer used and we release the * semaphore which signals the user thread to * start TX of a new packet */ } int_clr |= tx_irq_mask; /* Signal TX thread we're ready to start transmission */ k_sem_give(&dev_data->tx_sem); } /* Clear interrupts */ eth->IFCR = int_clr; } static void eth_init_clocks(const struct device *dev) { __ASSERT_NO_MSG(dev != NULL); CMU_ClockEnable(cmuClock_HFPER, true); CMU_ClockEnable(cmuClock_ETH, true); } static void eth_init_pins(const struct device *dev) { const struct eth_gecko_dev_cfg *const cfg = dev->config; ETH_TypeDef *eth = cfg->regs; uint32_t idx; __ASSERT_NO_MSG(dev != NULL); __ASSERT_NO_MSG(cfg != NULL); eth->ROUTELOC1 = 0; eth->ROUTEPEN = 0; #if DT_INST_NODE_HAS_PROP(0, location_rmii) for (idx = 0; idx < ARRAY_SIZE(cfg->pin_list->rmii); idx++) { GPIO_PinModeSet(cfg->pin_list->rmii[idx].port, cfg->pin_list->rmii[idx].pin, cfg->pin_list->rmii[idx].mode, cfg->pin_list->rmii[idx].out); } eth->ROUTELOC1 |= (DT_INST_PROP(0, location_rmii) << _ETH_ROUTELOC1_RMIILOC_SHIFT); eth->ROUTEPEN |= ETH_ROUTEPEN_RMIIPEN; #endif #if DT_INST_NODE_HAS_PROP(0, location_mdio) for (idx = 0; idx < ARRAY_SIZE(cfg->pin_list->mdio); idx++) { GPIO_PinModeSet(cfg->pin_list->mdio[idx].port, cfg->pin_list->mdio[idx].pin, cfg->pin_list->mdio[idx].mode, cfg->pin_list->mdio[idx].out); } eth->ROUTELOC1 |= (DT_INST_PROP(0, location_mdio) << _ETH_ROUTELOC1_MDIOLOC_SHIFT); eth->ROUTEPEN |= ETH_ROUTEPEN_MDIOPEN; #endif } static int eth_init(const struct device *dev) { const struct eth_gecko_dev_cfg *const cfg = dev->config; ETH_TypeDef *eth = cfg->regs; __ASSERT_NO_MSG(dev != NULL); __ASSERT_NO_MSG(cfg != NULL); /* Enable clocks */ eth_init_clocks(dev); /* Connect pins to peripheral */ eth_init_pins(dev); #if DT_INST_NODE_HAS_PROP(0, location_rmii) /* Enable global clock and RMII operation */ eth->CTRL = ETH_CTRL_GBLCLKEN | ETH_CTRL_MIISEL_RMII; #endif /* Connect and enable IRQ */ cfg->config_func(); LOG_INF("Device %s initialized", dev->name); return 0; } static void generate_mac(uint8_t mac_addr[6]) { #if DT_INST_PROP(0, zephyr_random_mac_address) gen_random_mac(mac_addr, SILABS_OUI_B0, SILABS_OUI_B1, SILABS_OUI_B2); #elif !NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0)) mac_addr[0] = DEVINFO->EUI48H >> 8; mac_addr[1] = DEVINFO->EUI48H >> 0; mac_addr[2] = DEVINFO->EUI48L >> 24; mac_addr[3] = DEVINFO->EUI48L >> 16; mac_addr[4] = DEVINFO->EUI48L >> 8; mac_addr[5] = DEVINFO->EUI48L >> 0; #endif } static void eth_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_gecko_dev_data *const dev_data = dev->data; const struct eth_gecko_dev_cfg *const cfg = dev->config; ETH_TypeDef *eth = cfg->regs; int result; __ASSERT_NO_MSG(iface != NULL); __ASSERT_NO_MSG(dev != NULL); __ASSERT_NO_MSG(dev_data != NULL); __ASSERT_NO_MSG(cfg != NULL); LOG_DBG("eth_initialize"); dev_data->iface = iface; dev_data->link_up = false; ethernet_init(iface); net_if_carrier_off(iface); /* Generate MAC address, possibly used for filtering */ generate_mac(dev_data->mac_addr); /* Set link address */ LOG_DBG("MAC %02x:%02x:%02x:%02x:%02x:%02x", dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2], dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]); net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); /* Disable transmit and receive circuits */ eth->NETWORKCTRL = 0; eth->NETWORKCFG = 0; /* Filtering MAC addresses */ eth->SPECADDR1BOTTOM = (dev_data->mac_addr[0] << 0) | (dev_data->mac_addr[1] << 8) | (dev_data->mac_addr[2] << 16) | (dev_data->mac_addr[3] << 24); eth->SPECADDR1TOP = (dev_data->mac_addr[4] << 0) | (dev_data->mac_addr[5] << 8); eth->SPECADDR2BOTTOM = 0; eth->SPECADDR3BOTTOM = 0; eth->SPECADDR4BOTTOM = 0; /* Initialise hash table */ eth->HASHBOTTOM = 0; eth->HASHTOP = 0; /* Initialise DMA buffers */ eth_init_tx_buf_desc(); eth_init_rx_buf_desc(); /* Point to locations of TX/RX DMA descriptor lists */ eth->TXQPTR = (uint32_t)dma_tx_desc_tab; eth->RXQPTR = (uint32_t)dma_rx_desc_tab; /* DMA RX size configuration */ eth->DMACFG = (eth->DMACFG & ~_ETH_DMACFG_RXBUFSIZE_MASK) | ((ETH_RX_BUF_SIZE / 64) << _ETH_DMACFG_RXBUFSIZE_SHIFT); /* Clear status/interrupt registers */ eth->IFCR |= _ETH_IFCR_MASK; eth->TXSTATUS = ETH_TXSTATUS_TXUNDERRUN | ETH_TXSTATUS_TXCMPLT | ETH_TXSTATUS_AMBAERR | ETH_TXSTATUS_TXGO | ETH_TXSTATUS_RETRYLMTEXCD | ETH_TXSTATUS_COLOCCRD | ETH_TXSTATUS_USEDBITREAD; eth->RXSTATUS = ETH_RXSTATUS_RESPNOTOK | ETH_RXSTATUS_RXOVERRUN | ETH_RXSTATUS_FRMRX | ETH_RXSTATUS_BUFFNOTAVAIL; /* Enable interrupts */ eth->IENS = ETH_IENS_RXCMPLT | ETH_IENS_RXUSEDBITREAD | ETH_IENS_TXCMPLT | ETH_IENS_TXUNDERRUN | ETH_IENS_RTRYLMTORLATECOL | ETH_IENS_TXUSEDBITREAD | ETH_IENS_AMBAERR; /* Additional DMA configuration */ eth->DMACFG |= _ETH_DMACFG_AMBABRSTLEN_MASK | ETH_DMACFG_FRCDISCARDONERR | ETH_DMACFG_TXPBUFTCPEN; eth->DMACFG &= ~ETH_DMACFG_HDRDATASPLITEN; /* Set network configuration */ eth->NETWORKCFG |= ETH_NETWORKCFG_FCSREMOVE | ETH_NETWORKCFG_UNICASTHASHEN | ETH_NETWORKCFG_MULTICASTHASHEN | ETH_NETWORKCFG_RX1536BYTEFRAMES | ETH_NETWORKCFG_RXCHKSUMOFFLOADEN; /* Setup PHY management port */ eth->NETWORKCFG |= (4 << _ETH_NETWORKCFG_MDCCLKDIV_SHIFT) & _ETH_NETWORKCFG_MDCCLKDIV_MASK; eth->NETWORKCTRL |= ETH_NETWORKCTRL_MANPORTEN; /* Initialise PHY */ result = phy_gecko_init(&cfg->phy); if (result < 0) { LOG_ERR("ETH PHY Initialization Error"); return; } /* Initialise TX/RX semaphores */ k_sem_init(&dev_data->tx_sem, 1, ETH_TX_BUF_COUNT); k_sem_init(&dev_data->rx_sem, 0, K_SEM_MAX_LIMIT); /* Start interruption-poll thread */ k_thread_create(&dev_data->rx_thread, dev_data->rx_thread_stack, K_KERNEL_STACK_SIZEOF(dev_data->rx_thread_stack), rx_thread, (void *) dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_GECKO_RX_THREAD_PRIO), 0, K_NO_WAIT); } static enum ethernet_hw_caps eth_gecko_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return (ETHERNET_AUTO_NEGOTIATION_SET | ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_DUPLEX_SET); } static const struct ethernet_api eth_api = { .iface_api.init = eth_iface_init, .get_capabilities = eth_gecko_get_capabilities, .send = eth_tx, }; static void eth0_irq_config(void) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } static const struct eth_gecko_pin_list pins_eth0 = { .mdio = PIN_LIST_PHY, .rmii = PIN_LIST_RMII }; static const struct eth_gecko_dev_cfg eth0_config = { .regs = (ETH_TypeDef *) DT_INST_REG_ADDR(0), .pin_list = &pins_eth0, .pin_list_size = ARRAY_SIZE(pins_eth0.mdio) + ARRAY_SIZE(pins_eth0.rmii), .config_func = eth0_irq_config, .phy = { (ETH_TypeDef *) DT_INST_REG_ADDR(0), DT_INST_PROP(0, phy_address) }, }; static struct eth_gecko_dev_data eth0_data = { #if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0)) .mac_addr = DT_INST_PROP(0, local_mac_address), #endif }; ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_init, NULL, &eth0_data, &eth0_config, CONFIG_ETH_INIT_PRIORITY, &eth_api, ETH_GECKO_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_gecko.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,314
```c /* XMC4XXX Ethernet controller * * */ #define DT_DRV_COMPAT infineon_xmc4xxx_ethernet #include "eth.h" #include <stdint.h> #include <soc.h> #include <zephyr/device.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/drivers/ptp_clock.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/gptp.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/phy.h> #include <zephyr/kernel.h> #include <zephyr/sys/slist.h> #include <zephyr/sys/util.h> #include <ethernet/eth_stats.h> #include <xmc_eth_mac.h> #include <xmc_scu.h> #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(eth_xmc4xxx); #define NUM_TX_DMA_DESCRIPTORS CONFIG_ETH_XMC4XXX_NUM_TX_DMA_DESCRIPTORS #define NUM_RX_DMA_DESCRIPTORS CONFIG_ETH_XMC4XXX_NUM_RX_DMA_DESCRIPTORS #define ETH_NODE DT_NODELABEL(eth) #define PHY_NODE DT_PHANDLE_BY_IDX(ETH_NODE, phy, 0) #define INFINEON_OUI_B0 0x00 #define INFINEON_OUI_B1 0x03 #define INFINEON_OUI_B2 0x19 #define MODULO_INC_TX(val) {(val) = (++(val) < NUM_TX_DMA_DESCRIPTORS) ? (val) : 0; } #define MODULO_INC_RX(val) {(val) = (++(val) < NUM_RX_DMA_DESCRIPTORS) ? (val) : 0; } #define IS_OWNED_BY_DMA_TX(desc) (((desc)->status & ETH_MAC_DMA_TDES0_OWN) != 0) #define IS_OWNED_BY_DMA_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_OWN) != 0) #define IS_START_OF_FRAME_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_FS) != 0) #define IS_END_OF_FRAME_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_LS) != 0) #define IS_TIMESTAMP_AVAILABLE_RX(desc) (((desc)->status & ETH_MAC_DMA_RDES0_TSA) != 0) #define IS_TIMESTAMP_AVAILABLE_TX(desc) (((desc)->status & ETH_MAC_DMA_TDES0_TTSS) != 0) #define TOTAL_FRAME_LENGTH(desc) (FIELD_GET(ETH_MAC_DMA_RDES0_FL, (desc)->status) - 4) #define ETH_STATUS_ERROR_TRANSMIT_EVENTS \ (XMC_ETH_MAC_EVENT_BUS_ERROR | XMC_ETH_MAC_EVENT_TRANSMIT_JABBER_TIMEOUT | \ XMC_ETH_MAC_EVENT_TRANSMIT_UNDERFLOW | XMC_ETH_MAC_EVENT_TRANSMIT_PROCESS_STOPPED) #define ETH_STATUS_ERROR_RECEIVE_EVENTS \ (XMC_ETH_MAC_EVENT_BUS_ERROR | XMC_ETH_MAC_EVENT_RECEIVE_OVERFLOW) #define ETH_STATUS_ALL_EVENTS \ (ETH_STATUS_ERROR_TRANSMIT_EVENTS | ETH_STATUS_ERROR_RECEIVE_EVENTS | \ XMC_ETH_MAC_EVENT_RECEIVE | XMC_ETH_MAC_EVENT_TRANSMIT | ETH_INTERRUPT_ENABLE_NIE_Msk | \ ETH_INTERRUPT_ENABLE_AIE_Msk) #define ETH_MAC_DISABLE_MMC_INTERRUPT_MSK 0x03ffffffu #define ETH_MAC_DISABLE_MMC_IPC_RECEIVE_INTERRUPT_MSK 0x3fff3fffu #define ETH_STATUS_CLEARABLE_BITS 0x1e7ffu #define ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK BIT(14) #define ETH_RESET_TIMEOUT_USEC 200000u #define ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC 100000u #define ETH_LINK_SPEED_10M 0 #define ETH_LINK_SPEED_100M 1 #define ETH_LINK_DUPLEX_HALF 0 #define ETH_LINK_DUPLEX_FULL 1 #define ETH_PTP_CLOCK_FREQUENCY 50000000 #define ETH_PTP_RATE_ADJUST_RATIO_MIN 0.9 #define ETH_PTP_RATE_ADJUST_RATIO_MAX 1.1 struct eth_xmc4xxx_data { struct net_if *iface; uint8_t mac_addr[6]; struct k_sem tx_desc_sem; bool link_up; #if defined(CONFIG_NET_STATISTICS_ETHERNET) struct net_stats_eth stats; #endif bool tx_frames_flushed; uint16_t dma_desc_tx_head; uint16_t dma_desc_rx_tail; sys_slist_t tx_frame_list; struct net_buf *rx_frag_list[NUM_RX_DMA_DESCRIPTORS]; #if defined(CONFIG_PTP_CLOCK_XMC4XXX) const struct device *ptp_clock; #endif }; struct eth_xmc4xxx_config { ETH_GLOBAL_TypeDef *regs; const struct device *phy_dev; void (*irq_config_func)(void); const struct pinctrl_dev_config *pcfg; const uint8_t phy_connection_type; XMC_ETH_MAC_PORT_CTRL_t port_ctrl; }; struct eth_xmc4xxx_tx_frame { sys_snode_t node; struct net_pkt *pkt; uint16_t tail_index; uint16_t head_index; }; K_MEM_SLAB_DEFINE_STATIC(tx_frame_slab, sizeof(struct eth_xmc4xxx_tx_frame), CONFIG_ETH_XMC4XXX_TX_FRAME_POOL_SIZE, 4); static XMC_ETH_MAC_DMA_DESC_t __aligned(4) tx_dma_desc[NUM_TX_DMA_DESCRIPTORS]; static XMC_ETH_MAC_DMA_DESC_t __aligned(4) rx_dma_desc[NUM_RX_DMA_DESCRIPTORS]; static inline struct net_if *get_iface(struct eth_xmc4xxx_data *ctx) { return ctx->iface; } static void eth_xmc4xxx_tx_dma_descriptors_init(const struct device *dev) { const struct eth_xmc4xxx_config *dev_cfg = dev->config; memset(tx_dma_desc, 0, sizeof(tx_dma_desc)); dev_cfg->regs->TRANSMIT_DESCRIPTOR_LIST_ADDRESS = (uint32_t)&tx_dma_desc[0]; /* chain the descriptors */ for (int i = 0; i < NUM_TX_DMA_DESCRIPTORS - 1; i++) { XMC_ETH_MAC_DMA_DESC_t *dma_desc = &tx_dma_desc[i]; dma_desc->buffer2 = (volatile uint32_t)&tx_dma_desc[i + 1]; } /* TER: transmit end of ring - it is the last descriptor in ring */ tx_dma_desc[NUM_TX_DMA_DESCRIPTORS - 1].status |= ETH_MAC_DMA_TDES0_TER; tx_dma_desc[NUM_TX_DMA_DESCRIPTORS - 1].buffer2 = (volatile uint32_t)&tx_dma_desc[0]; } static void eth_xmc4xxx_flush_rx(const struct device *dev) { const struct eth_xmc4xxx_config *dev_cfg = dev->config; struct eth_xmc4xxx_data *dev_data = dev->data; dev_cfg->regs->OPERATION_MODE &= ~ETH_OPERATION_MODE_SR_Msk; for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) { rx_dma_desc[i].status = ETH_MAC_DMA_RDES0_OWN; } dev_cfg->regs->OPERATION_MODE |= ETH_OPERATION_MODE_SR_Msk; dev_data->dma_desc_rx_tail = 0; } static void eth_xmc4xxx_flush_tx(const struct device *dev) { const struct eth_xmc4xxx_config *dev_cfg = dev->config; struct eth_xmc4xxx_data *dev_data = dev->data; sys_snode_t *node; LOG_DBG("Flushing tx frames"); if (dev_data->tx_frames_flushed) { return; } dev_cfg->regs->OPERATION_MODE &= ~ETH_OPERATION_MODE_ST_Msk; node = sys_slist_get(&dev_data->tx_frame_list); while (node) { struct eth_xmc4xxx_tx_frame *tx_frame = SYS_SLIST_CONTAINER(node, tx_frame, node); net_pkt_unref(tx_frame->pkt); k_mem_slab_free(&tx_frame_slab, (void *)tx_frame); node = sys_slist_get(&dev_data->tx_frame_list); #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.errors.tx++; dev_data->stats.error_details.tx_aborted_errors++; #endif } k_sem_reset(&dev_data->tx_desc_sem); eth_xmc4xxx_tx_dma_descriptors_init(dev); dev_cfg->regs->OPERATION_MODE |= ETH_OPERATION_MODE_ST_Msk; dev_data->dma_desc_tx_head = 0; dev_data->tx_frames_flushed = true; for (int i = 0; i < NUM_TX_DMA_DESCRIPTORS; i++) { k_sem_give(&dev_data->tx_desc_sem); } } static inline void eth_xmc4xxx_trigger_dma_tx(ETH_GLOBAL_TypeDef *regs) { regs->STATUS = ETH_STATUS_TPS_Msk; regs->TRANSMIT_POLL_DEMAND = 0; } static inline void eth_xmc4xxx_trigger_dma_rx(ETH_GLOBAL_TypeDef *regs) { regs->STATUS = ETH_STATUS_RU_Msk; regs->RECEIVE_POLL_DEMAND = 0U; } static int eth_xmc4xxx_send(const struct device *dev, struct net_pkt *pkt) { struct eth_xmc4xxx_data *dev_data = dev->data; const struct eth_xmc4xxx_config *dev_cfg = dev->config; struct net_buf *frag; uint8_t *frag_data; uint16_t frag_len; int ret = 0; XMC_ETH_MAC_DMA_DESC_t *dma_desc = NULL; struct eth_xmc4xxx_tx_frame *tx_frame; int num_frags = 0; bool first_descriptor = false; frag = pkt->frags; while (frag) { num_frags++; frag = frag->frags; } if (num_frags > NUM_TX_DMA_DESCRIPTORS) { #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.error_details.tx_dma_failed++; #endif LOG_DBG("Number of fragments exceeds total descriptors. Dropping packet"); return -ENOMEM; } /* All available frames buffered inside the driver. Apply back pressure in the driver. */ while (tx_frame_slab.info.num_used == CONFIG_ETH_XMC4XXX_TX_FRAME_POOL_SIZE) { eth_xmc4xxx_trigger_dma_tx(dev_cfg->regs); k_yield(); } ret = k_mem_slab_alloc(&tx_frame_slab, (void **)&tx_frame, K_NO_WAIT); __ASSERT_NO_MSG(ret == 0); net_pkt_ref(pkt); dev_data->tx_frames_flushed = false; first_descriptor = true; tx_frame->pkt = pkt; tx_frame->tail_index = dev_data->dma_desc_tx_head; frag = pkt->frags; while (frag) { ret = k_sem_take(&dev_data->tx_desc_sem, K_FOREVER); /* isr may call k_sem_reset() */ if (ret < 0 || dev_data->tx_frames_flushed) { k_mem_slab_free(&tx_frame_slab, (void **)&tx_frame); net_pkt_unref(pkt); #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.error_details.tx_aborted_errors++; #endif LOG_DBG("Dropping frame. Buffered Tx frames were flushed in ISR."); return -EIO; } unsigned int key = irq_lock(); /* Critical section for dma_desc_tx_head and tx_dma_desc. Isr may */ /* reinitialize the descriptors and set dma_desc_tx_head to 0 */ dma_desc = &tx_dma_desc[dev_data->dma_desc_tx_head]; frag_data = frag->data; frag_len = frag->len; dma_desc->buffer1 = (volatile uint32_t)frag_data; dma_desc->length = frag_len; /* give ownership of descriptor back to dma and set checksum offload */ /* TCH we are using a circular list */ dma_desc->status = ETH_MAC_DMA_TDES0_CIC | ETH_MAC_DMA_TDES0_TCH; if (!first_descriptor) { /* Delay giving ownership of first frag to DMA. Prevents race condition */ /* where second other frags are not ready */ dma_desc->status |= ETH_MAC_DMA_TDES0_OWN; } else { dma_desc->status |= ETH_MAC_DMA_TDES0_FS; #if defined(CONFIG_NET_GPTP) struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); if (ntohs(hdr->type) == NET_ETH_PTYPE_PTP) { dma_desc->status |= ETH_MAC_DMA_TDES0_TTSE; } #endif } first_descriptor = false; tx_frame->head_index = dev_data->dma_desc_tx_head; MODULO_INC_TX(dev_data->dma_desc_tx_head); irq_unlock(key); frag = frag->frags; } if (dev_data->tx_frames_flushed) { k_mem_slab_free(&tx_frame_slab, (void **)&tx_frame); net_pkt_unref(pkt); #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.error_details.tx_aborted_errors++; #endif LOG_DBG("Dropping frame. Buffered Tx frames were flushed in ISR."); return -EIO; } unsigned int key = irq_lock(); /* label last dma descriptor as last segment and trigger interrupt on last segment */ dma_desc->status |= ETH_MAC_DMA_TDES0_IC | ETH_MAC_DMA_TDES0_LS; /* Finally give ownership of first frag to DMA. After this point the DMA engine */ /* may transfer the whole frame from RAM to Ethernet */ tx_dma_desc[tx_frame->tail_index].status |= ETH_MAC_DMA_TDES0_OWN; sys_slist_append(&dev_data->tx_frame_list, &tx_frame->node); eth_xmc4xxx_trigger_dma_tx(dev_cfg->regs); irq_unlock(key); return 0; } static struct net_pkt *eth_xmc4xxx_rx_pkt(const struct device *dev) { struct eth_xmc4xxx_data *dev_data = dev->data; const struct eth_xmc4xxx_config *dev_cfg = dev->config; struct net_pkt *pkt = NULL; struct net_buf *new_frag; bool eof_found = false; uint16_t tail; XMC_ETH_MAC_DMA_DESC_t *dma_desc; int num_frags = 0; uint16_t frame_end_index; struct net_buf *frag, *last_frag = NULL; tail = dev_data->dma_desc_rx_tail; dma_desc = &rx_dma_desc[tail]; if (IS_OWNED_BY_DMA_RX(dma_desc)) { return NULL; } if (!IS_START_OF_FRAME_RX(dma_desc)) { /* handle this error - missing SOF packet? */ eth_xmc4xxx_flush_rx(dev); return NULL; } while (!IS_OWNED_BY_DMA_RX(dma_desc)) { eof_found = IS_END_OF_FRAME_RX(dma_desc); num_frags++; if (eof_found) { break; } MODULO_INC_RX(tail); if (tail == dev_data->dma_desc_rx_tail) { /* wrapped */ break; } dma_desc = &rx_dma_desc[tail]; } if (!eof_found) { return NULL; } frame_end_index = tail; pkt = net_pkt_rx_alloc(K_NO_WAIT); if (pkt == NULL) { #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.errors.rx++; dev_data->stats.error_details.rx_no_buffer_count++; #endif LOG_DBG("Net packet allocation error"); /* continue because we still need to read out the packet */ } tail = dev_data->dma_desc_rx_tail; dma_desc = &rx_dma_desc[tail]; for (;;) { if (pkt != NULL) { uint16_t frag_len = CONFIG_NET_BUF_DATA_SIZE; frag = dev_data->rx_frag_list[tail]; if (tail == frame_end_index) { frag_len = TOTAL_FRAME_LENGTH(dma_desc) - CONFIG_NET_BUF_DATA_SIZE * (num_frags - 1); if (IS_TIMESTAMP_AVAILABLE_RX(dma_desc)) { struct net_ptp_time timestamp = { .second = dma_desc->time_stamp_seconds, .nanosecond = dma_desc->time_stamp_nanoseconds}; net_pkt_set_timestamp(pkt, &timestamp); net_pkt_set_priority(pkt, NET_PRIORITY_CA); } } new_frag = net_pkt_get_frag(pkt, CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT); if (new_frag == NULL) { #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.errors.rx++; dev_data->stats.error_details.rx_buf_alloc_failed++; #endif LOG_DBG("Frag allocation error. Increase CONFIG_NET_BUF_RX_COUNT."); net_pkt_unref(pkt); pkt = NULL; } else { net_buf_add(frag, frag_len); if (!last_frag) { net_pkt_frag_insert(pkt, frag); } else { net_buf_frag_insert(last_frag, frag); } last_frag = frag; frag = new_frag; dev_data->rx_frag_list[tail] = frag; } } dma_desc->buffer1 = (uint32_t)dev_data->rx_frag_list[tail]->data; dma_desc->length = dev_data->rx_frag_list[tail]->size | ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK; dma_desc->status = ETH_MAC_DMA_RDES0_OWN; if (tail == frame_end_index) { break; } MODULO_INC_RX(tail); dma_desc = &rx_dma_desc[tail]; } MODULO_INC_RX(tail); dev_data->dma_desc_rx_tail = tail; eth_xmc4xxx_trigger_dma_rx(dev_cfg->regs); return pkt; } static void eth_xmc4xxx_handle_rx(const struct device *dev) { struct eth_xmc4xxx_data *dev_data = dev->data; struct net_pkt *pkt = NULL; for (;;) { pkt = eth_xmc4xxx_rx_pkt(dev); if (!pkt) { return; } if (net_recv_data(get_iface(dev_data), pkt) < 0) { eth_stats_update_errors_rx(get_iface(dev_data)); net_pkt_unref(pkt); } } } static void eth_xmc4xxx_handle_tx(const struct device *dev) { struct eth_xmc4xxx_data *dev_data = dev->data; sys_snode_t *node = sys_slist_peek_head(&dev_data->tx_frame_list); while (node) { struct eth_xmc4xxx_tx_frame *tx_frame = SYS_SLIST_CONTAINER(node, tx_frame, node); bool owned_by_mcu = true; uint8_t index; int num_descriptors; if (tx_frame->head_index >= tx_frame->tail_index) { num_descriptors = tx_frame->head_index - tx_frame->tail_index + 1; } else { num_descriptors = tx_frame->head_index + NUM_TX_DMA_DESCRIPTORS - tx_frame->tail_index + 1; } index = tx_frame->tail_index; for (int i = 0; i < num_descriptors; i++) { if (IS_OWNED_BY_DMA_TX(&tx_dma_desc[index])) { owned_by_mcu = false; break; } MODULO_INC_TX(index); } if (owned_by_mcu) { #if defined(CONFIG_NET_GPTP) XMC_ETH_MAC_DMA_DESC_t *dma_desc = &tx_dma_desc[tx_frame->head_index]; if (IS_TIMESTAMP_AVAILABLE_TX(dma_desc)) { struct net_pkt *pkt = tx_frame->pkt; if (atomic_get(&pkt->atomic_ref) > 1) { struct net_ptp_time timestamp = { .second = dma_desc->time_stamp_seconds, .nanosecond = dma_desc->time_stamp_nanoseconds}; net_pkt_set_timestamp(pkt, &timestamp); net_if_add_tx_timestamp(pkt); } } #endif for (int i = 0; i < num_descriptors; i++) { k_sem_give(&dev_data->tx_desc_sem); } sys_slist_get(&dev_data->tx_frame_list); net_pkt_unref(tx_frame->pkt); k_mem_slab_free(&tx_frame_slab, (void *)tx_frame); node = sys_slist_peek_head(&dev_data->tx_frame_list); } else { node = NULL; } } } static void eth_xmc4xxx_isr(const struct device *dev) { uint32_t lock; uint32_t status; const struct eth_xmc4xxx_config *dev_cfg = dev->config; lock = irq_lock(); status = dev_cfg->regs->STATUS; if ((status & XMC_ETH_MAC_EVENT_RECEIVE) != 0) { eth_xmc4xxx_handle_rx(dev); } if ((status & XMC_ETH_MAC_EVENT_TRANSMIT) != 0) { eth_xmc4xxx_handle_tx(dev); } if ((status & ETH_STATUS_ERROR_TRANSMIT_EVENTS) != 0) { LOG_ERR("Transmit error event [0x%x]", status); eth_xmc4xxx_flush_tx(dev); } if ((status & ETH_STATUS_ERROR_RECEIVE_EVENTS) != 0) { LOG_ERR("Receive error event [0x%x]", status); eth_xmc4xxx_flush_rx(dev); } dev_cfg->regs->STATUS = status & ETH_STATUS_CLEARABLE_BITS; irq_unlock(lock); } static inline void eth_xmc4xxx_enable_tx(ETH_GLOBAL_TypeDef *regs) { regs->OPERATION_MODE |= ETH_OPERATION_MODE_ST_Msk; regs->MAC_CONFIGURATION |= ETH_MAC_CONFIGURATION_TE_Msk; } static inline void eth_xmc4xxx_enable_rx(ETH_GLOBAL_TypeDef *regs) { regs->OPERATION_MODE |= ETH_OPERATION_MODE_SR_Msk; regs->MAC_CONFIGURATION |= ETH_MAC_CONFIGURATION_RE_Msk; } static inline void eth_xmc4xxx_set_link(ETH_GLOBAL_TypeDef *regs, struct phy_link_state *state) { uint32_t reg = regs->MAC_CONFIGURATION; uint32_t val; reg &= ~(ETH_MAC_CONFIGURATION_DM_Msk | ETH_MAC_CONFIGURATION_FES_Msk); val = PHY_LINK_IS_FULL_DUPLEX(state->speed) ? ETH_LINK_DUPLEX_FULL : ETH_LINK_DUPLEX_HALF; reg |= FIELD_PREP(ETH_MAC_CONFIGURATION_DM_Msk, val); val = PHY_LINK_IS_SPEED_100M(state->speed) ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_10M; reg |= FIELD_PREP(ETH_MAC_CONFIGURATION_FES_Msk, val); regs->MAC_CONFIGURATION = reg; } static void phy_link_state_changed(const struct device *phy_dev, struct phy_link_state *state, void *user_data) { struct device *dev = user_data; struct eth_xmc4xxx_data *dev_data = dev->data; const struct eth_xmc4xxx_config *dev_cfg = dev->config; bool is_up = state->is_up; if (is_up && !dev_data->link_up) { LOG_INF("Link up"); dev_data->link_up = true; net_eth_carrier_on(dev_data->iface); eth_xmc4xxx_set_link(dev_cfg->regs, state); } else if (!is_up && dev_data->link_up) { LOG_INF("Link down"); dev_data->link_up = false; net_eth_carrier_off(dev_data->iface); } } static const struct device *eth_xmc4xxx_get_phy(const struct device *dev) { const struct eth_xmc4xxx_config *dev_cfg = dev->config; return dev_cfg->phy_dev; } static void eth_xmc4xxx_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_xmc4xxx_data *dev_data = dev->data; const struct eth_xmc4xxx_config *dev_cfg = dev->config; dev_data->iface = iface; net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); ethernet_init(iface); dev_cfg->irq_config_func(); /* Do not start the interface until PHY link is up */ net_if_carrier_off(iface); phy_link_callback_set(dev_cfg->phy_dev, &phy_link_state_changed, (void *)dev); dev_cfg->regs->INTERRUPT_ENABLE |= ETH_STATUS_ALL_EVENTS; eth_xmc4xxx_enable_tx(dev_cfg->regs); eth_xmc4xxx_enable_rx(dev_cfg->regs); } #if defined(CONFIG_NET_STATISTICS_ETHERNET) static struct net_stats_eth *eth_xmc4xxx_stats(const struct device *dev) { struct eth_xmc4xxx_data *dev_data = dev->data; return &dev_data->stats; } #endif static inline void eth_xmc4xxx_free_rx_bufs(const struct device *dev) { struct eth_xmc4xxx_data *dev_data = dev->data; for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) { if (dev_data->rx_frag_list[i]) { net_buf_unref(dev_data->rx_frag_list[i]); dev_data->rx_frag_list[i] = NULL; } } } static int eth_xmc4xxx_rx_dma_descriptors_init(const struct device *dev) { struct eth_xmc4xxx_data *dev_data = dev->data; const struct eth_xmc4xxx_config *dev_cfg = dev->config; dev_cfg->regs->RECEIVE_DESCRIPTOR_LIST_ADDRESS = (uint32_t)&rx_dma_desc[0]; for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS - 1; i++) { XMC_ETH_MAC_DMA_DESC_t *dma_desc = &rx_dma_desc[i]; dma_desc->buffer2 = (volatile uint32_t)&rx_dma_desc[i + 1]; } rx_dma_desc[NUM_RX_DMA_DESCRIPTORS - 1].status |= ETH_MAC_DMA_TDES0_TER; rx_dma_desc[NUM_RX_DMA_DESCRIPTORS - 1].buffer2 = (volatile uint32_t)&rx_dma_desc[0]; for (int i = 0; i < NUM_RX_DMA_DESCRIPTORS; i++) { XMC_ETH_MAC_DMA_DESC_t *dma_desc = &rx_dma_desc[i]; struct net_buf *rx_buf = net_pkt_get_reserve_rx_data(CONFIG_NET_BUF_DATA_SIZE, K_NO_WAIT); if (rx_buf == NULL) { eth_xmc4xxx_free_rx_bufs(dev); LOG_ERR("Failed to reserve data net buffers"); return -ENOBUFS; } dev_data->rx_frag_list[i] = rx_buf; dma_desc->buffer1 = (uint32_t)rx_buf->data; dma_desc->length = rx_buf->size | ETH_RX_DMA_DESC_SECOND_ADDR_CHAINED_MASK; dma_desc->status = ETH_MAC_DMA_RDES0_OWN; } return 0; } static inline int eth_xmc4xxx_reset(const struct device *dev) { const struct eth_xmc4xxx_config *dev_cfg = dev->config; dev_cfg->regs->BUS_MODE |= ETH_BUS_MODE_SWR_Msk; /* reset may fail if the clocks are not properly setup */ if (!WAIT_FOR((dev_cfg->regs->BUS_MODE & ETH_BUS_MODE_SWR_Msk) == 0, ETH_RESET_TIMEOUT_USEC,)) { return -ETIMEDOUT; } return 0; } static inline void eth_xmc4xxx_set_mac_address(ETH_GLOBAL_TypeDef *regs, uint8_t *const addr) { regs->MAC_ADDRESS0_HIGH = addr[4] | (addr[5] << 8); regs->MAC_ADDRESS0_LOW = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); } static inline void eth_xmc4xxx_mask_unused_interrupts(ETH_GLOBAL_TypeDef *regs) { /* Disable Mac Management Counter (MMC) interrupt events */ regs->MMC_TRANSMIT_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_INTERRUPT_MSK; regs->MMC_RECEIVE_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_INTERRUPT_MSK; /* IPC - Receive IP checksum checker */ regs->MMC_IPC_RECEIVE_INTERRUPT_MASK = ETH_MAC_DISABLE_MMC_IPC_RECEIVE_INTERRUPT_MSK; /* Disable PMT and timestamp interrupt events */ regs->INTERRUPT_MASK = ETH_INTERRUPT_MASK_PMTIM_Msk | ETH_INTERRUPT_MASK_TSIM_Msk; } static inline int eth_xmc4xxx_init_timestamp_control_reg(ETH_GLOBAL_TypeDef *regs) { #if defined(CONFIG_NET_GPTP) regs->TIMESTAMP_CONTROL = ETH_TIMESTAMP_CONTROL_TSENA_Msk | ETH_TIMESTAMP_CONTROL_TSENALL_Msk; #endif #if defined(CONFIG_PTP_CLOCK_XMC4XXX) /* use fine control */ regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSCFUPDT_Msk | ETH_TIMESTAMP_CONTROL_TSCTRLSSR_Msk; /* make ptp run at 50MHz - implies 20ns increment for each increment of the */ /* sub_second_register */ regs->SUB_SECOND_INCREMENT = 20; /* f_out = f_cpu * K / 2^32, where K = TIMESTAMP_ADDEND. Target F_out = 50MHz */ /* Therefore, K = ceil(f_out * 2^32 / f_cpu) */ uint32_t f_cpu = XMC_SCU_CLOCK_GetSystemClockFrequency(); uint32_t K = (BIT64(32) * ETH_PTP_CLOCK_FREQUENCY + f_cpu / 2) / f_cpu; regs->TIMESTAMP_ADDEND = K; /* Addend register update */ regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSADDREG_Msk; if (!WAIT_FOR((regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSADDREG_Msk) == 0, ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) { return -ETIMEDOUT; } regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSINIT_Msk; if (!WAIT_FOR((regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSINIT_Msk) == 0, ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) { return -ETIMEDOUT; } #endif return 0; } static int eth_xmc4xxx_init(const struct device *dev) { struct eth_xmc4xxx_data *dev_data = dev->data; const struct eth_xmc4xxx_config *dev_cfg = dev->config; XMC_ETH_MAC_PORT_CTRL_t port_ctrl; int ret; sys_slist_init(&dev_data->tx_frame_list); k_sem_init(&dev_data->tx_desc_sem, NUM_TX_DMA_DESCRIPTORS, NUM_TX_DMA_DESCRIPTORS); if (!device_is_ready(dev_cfg->phy_dev)) { LOG_ERR("Phy device not ready"); return -ENODEV; } /* get the port control initialized by MDIO driver */ port_ctrl.raw = ETH0_CON->CON; port_ctrl.raw |= dev_cfg->port_ctrl.raw; XMC_ETH_MAC_Disable(NULL); ret = pinctrl_apply_state(dev_cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret) { return ret; } XMC_ETH_MAC_SetPortControl(NULL, port_ctrl); XMC_ETH_MAC_Enable(NULL); ret = eth_xmc4xxx_reset(dev); if (ret != 0) { LOG_ERR("Error resetting ethernet [%d]", ret); return ret; } /* Initialize MAC configuration */ /* enable checksum offload */ dev_cfg->regs->MAC_CONFIGURATION = ETH_MAC_CONFIGURATION_IPC_Msk; /* disable jumbo frames */ dev_cfg->regs->MAC_CONFIGURATION &= ~ETH_MAC_CONFIGURATION_JE_Msk; /* Initialize Filter registers - disable zero quanta pause*/ dev_cfg->regs->FLOW_CONTROL = ETH_FLOW_CONTROL_DZPQ_Msk; /* rsf - receive store and forward */ /* tsf - transmit store and forward */ dev_cfg->regs->OPERATION_MODE = ETH_OPERATION_MODE_RSF_Msk | ETH_OPERATION_MODE_TSF_Msk | ETH_OPERATION_MODE_OSF_Msk; /* Increase enhanced descriptor to 8 WORDS, required when the Advanced */ /* Time-Stamp feature or Full IPC Offload Engine is enabled */ dev_cfg->regs->BUS_MODE = ETH_BUS_MODE_ATDS_Msk | ETH_BUS_MODE_AAL_Msk | ETH_BUS_MODE_FB_Msk | (0x20 << ETH_BUS_MODE_PBL_Pos); eth_xmc4xxx_tx_dma_descriptors_init(dev); ret = eth_xmc4xxx_rx_dma_descriptors_init(dev); if (ret != 0) { return ret; } /* Clear interrupts */ dev_cfg->regs->STATUS = ETH_STATUS_CLEARABLE_BITS; eth_xmc4xxx_mask_unused_interrupts(dev_cfg->regs); #if !DT_INST_NODE_HAS_PROP(0, local_mac_address) gen_random_mac(dev_data->mac_addr, INFINEON_OUI_B0, INFINEON_OUI_B1, INFINEON_OUI_B2); #endif eth_xmc4xxx_set_mac_address(dev_cfg->regs, dev_data->mac_addr); uint32_t reg = dev_cfg->regs->MAC_FRAME_FILTER; /* enable reception of broadcast frames */ reg &= ~ETH_MAC_FRAME_FILTER_DBF_Msk; /* pass all multicast frames */ reg |= ETH_MAC_FRAME_FILTER_PM_Msk; dev_cfg->regs->MAC_FRAME_FILTER = reg; return eth_xmc4xxx_init_timestamp_control_reg(dev_cfg->regs); } static enum ethernet_hw_caps eth_xmc4xxx_capabilities(const struct device *dev) { ARG_UNUSED(dev); enum ethernet_hw_caps caps = ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_HW_TX_CHKSUM_OFFLOAD | ETHERNET_HW_RX_CHKSUM_OFFLOAD; #if defined(CONFIG_PTP_CLOCK_XMC4XXX) caps |= ETHERNET_PTP; #endif #if defined(CONFIG_NET_VLAN) caps |= ETHERNET_HW_VLAN; #endif return caps; } static int eth_xmc4xxx_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct eth_xmc4xxx_data *dev_data = dev->data; const struct eth_xmc4xxx_config *dev_cfg = dev->config; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(dev_data->mac_addr, config->mac_address.addr, sizeof(dev_data->mac_addr)); LOG_INF("%s MAC set to %02x:%02x:%02x:%02x:%02x:%02x", dev->name, dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2], dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]); eth_xmc4xxx_set_mac_address(dev_cfg->regs, dev_data->mac_addr); net_if_set_link_addr(dev_data->iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); return 0; default: break; } return -ENOTSUP; } static void eth_xmc4xxx_irq_config(void) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_xmc4xxx_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } #if defined(CONFIG_PTP_CLOCK_XMC4XXX) static const struct device *eth_xmc4xxx_get_ptp_clock(const struct device *dev) { struct eth_xmc4xxx_data *dev_data = dev->data; return dev_data->ptp_clock; } #endif #if defined(CONFIG_ETH_XMC4XXX_VLAN_HW_FILTER) int eth_xmc4xxx_vlan_setup(const struct device *dev, struct net_if *iface, uint16_t tag, bool enable) { ARG_UNUSED(iface); const struct eth_xmc4xxx_config *dev_cfg = dev->config; LOG_INF("Configuring vlan %d", tag); if (enable) { dev_cfg->regs->VLAN_TAG = FIELD_PREP(ETH_VLAN_TAG_VL_Msk, tag) | ETH_VLAN_TAG_ETV_Msk | ETH_VLAN_TAG_ESVL_Msk; dev_cfg->regs->MAC_FRAME_FILTER |= ETH_MAC_FRAME_FILTER_VTFE_Msk; } else { dev_cfg->regs->VLAN_TAG = 0; dev_cfg->regs->MAC_FRAME_FILTER &= ~ETH_MAC_FRAME_FILTER_VTFE_Msk; } return 0; } #endif static const struct ethernet_api eth_xmc4xxx_api = { .iface_api.init = eth_xmc4xxx_iface_init, .send = eth_xmc4xxx_send, .set_config = eth_xmc4xxx_set_config, .get_phy = eth_xmc4xxx_get_phy, .get_capabilities = eth_xmc4xxx_capabilities, #if defined(CONFIG_NET_STATISTICS_ETHERNET) .get_stats = eth_xmc4xxx_stats, #endif #if defined(CONFIG_PTP_CLOCK_XMC4XXX) .get_ptp_clock = eth_xmc4xxx_get_ptp_clock, #endif #if defined(CONFIG_ETH_XMC4XXX_VLAN_HW_FILTER) .vlan_setup = eth_xmc4xxx_vlan_setup, #endif }; PINCTRL_DT_INST_DEFINE(0); static struct eth_xmc4xxx_config eth_xmc4xxx_config = { .regs = (ETH_GLOBAL_TypeDef *)DT_REG_ADDR(DT_INST_PARENT(0)), .irq_config_func = eth_xmc4xxx_irq_config, .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, phy_handle)), .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), .port_ctrl = { .rxd0 = DT_INST_ENUM_IDX(0, rxd0_port_ctrl), .rxd1 = DT_INST_ENUM_IDX(0, rxd1_port_ctrl), .rxd2 = DT_INST_ENUM_IDX_OR(0, rxd2_port_ctrl, 0), .rxd3 = DT_INST_ENUM_IDX_OR(0, rxd3_port_ctrl, 0), .clk_rmii = DT_INST_ENUM_IDX(0, rmii_rx_clk_port_ctrl), .crs_dv = DT_INST_ENUM_IDX(0, crs_rx_dv_port_ctrl), .crs = DT_INST_ENUM_IDX_OR(0, crs_port_ctrl, 0), .rxer = DT_INST_ENUM_IDX(0, rxer_port_ctrl), .col = DT_INST_ENUM_IDX_OR(0, col_port_ctrl, 0), .clk_tx = DT_INST_ENUM_IDX_OR(0, tx_clk_port_ctrl, 0), .mode = DT_INST_ENUM_IDX_OR(0, phy_connection_type, 0), } }; static struct eth_xmc4xxx_data eth_xmc4xxx_data = { .mac_addr = DT_INST_PROP_OR(0, local_mac_address, {0}), }; ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_xmc4xxx_init, NULL, &eth_xmc4xxx_data, &eth_xmc4xxx_config, CONFIG_ETH_INIT_PRIORITY, &eth_xmc4xxx_api, NET_ETH_MTU); #if defined(CONFIG_PTP_CLOCK_XMC4XXX) struct ptp_context { const struct device *eth_dev; }; static struct ptp_context ptp_xmc4xxx_context_0; static int eth_xmc4xxx_ptp_clock_set(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config; dev_cfg->regs->SYSTEM_TIME_NANOSECONDS_UPDATE = tm->nanosecond; dev_cfg->regs->SYSTEM_TIME_SECONDS_UPDATE = tm->second; dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSINIT_Msk; if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSINIT_Msk) == 0, ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) { return -ETIMEDOUT; } return 0; } static int eth_xmc4xxx_ptp_clock_get(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config; uint32_t nanosecond_0 = dev_cfg->regs->SYSTEM_TIME_NANOSECONDS; uint32_t second_0 = dev_cfg->regs->SYSTEM_TIME_SECONDS; uint32_t nanosecond_1 = dev_cfg->regs->SYSTEM_TIME_NANOSECONDS; uint32_t second_1 = dev_cfg->regs->SYSTEM_TIME_SECONDS; /* check that there is no roll over while we read the timestamp. If roll over happens */ /* just choose the later value */ if (second_0 == second_1) { tm->second = second_0; tm->nanosecond = nanosecond_0; } else { tm->second = second_1; tm->nanosecond = nanosecond_1; } return 0; } static int eth_xmc4xxx_ptp_clock_adjust(const struct device *dev, int increment) { struct ptp_context *ptp_context = dev->data; const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config; uint32_t increment_tmp; if ((increment <= -(int)NSEC_PER_SEC) || (increment >= (int)NSEC_PER_SEC)) { return -EINVAL; } if (increment < 0) { increment_tmp = -increment; increment_tmp |= ETH_SYSTEM_TIME_NANOSECONDS_UPDATE_ADDSUB_Msk; } else { increment_tmp = increment; } dev_cfg->regs->SYSTEM_TIME_NANOSECONDS_UPDATE = increment_tmp; dev_cfg->regs->SYSTEM_TIME_SECONDS_UPDATE = 0; dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSUPDT_Msk; if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSUPDT_Msk) == 0, ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) { return -ETIMEDOUT; } return 0; } static int eth_xmc4xxx_ptp_clock_rate_adjust(const struct device *dev, double ratio) { struct ptp_context *ptp_context = dev->data; const struct eth_xmc4xxx_config *dev_cfg = ptp_context->eth_dev->config; uint64_t K = dev_cfg->regs->TIMESTAMP_ADDEND; if (ratio < ETH_PTP_RATE_ADJUST_RATIO_MIN || ratio > ETH_PTP_RATE_ADJUST_RATIO_MAX) { return -EINVAL; } /* f_out = f_cpu * K / 2^32, where K = TIMESTAMP_ADDEND. Target F_out = 50MHz */ K = K * ratio + 0.5; if (K > UINT32_MAX) { return -EINVAL; } dev_cfg->regs->TIMESTAMP_ADDEND = K; /* Addend register update */ dev_cfg->regs->TIMESTAMP_CONTROL |= ETH_TIMESTAMP_CONTROL_TSADDREG_Msk; if (!WAIT_FOR((dev_cfg->regs->TIMESTAMP_CONTROL & ETH_TIMESTAMP_CONTROL_TSADDREG_Msk) == 0, ETH_TIMESTAMP_CONTROL_REG_TIMEOUT_USEC,)) { return -ETIMEDOUT; } return 0; } static const struct ptp_clock_driver_api ptp_api_xmc4xxx = { .set = eth_xmc4xxx_ptp_clock_set, .get = eth_xmc4xxx_ptp_clock_get, .adjust = eth_xmc4xxx_ptp_clock_adjust, .rate_adjust = eth_xmc4xxx_ptp_clock_rate_adjust, }; static int ptp_clock_xmc4xxx_init(const struct device *port) { const struct device *const eth_dev = DEVICE_DT_INST_GET(0); struct eth_xmc4xxx_data *dev_data = eth_dev->data; struct ptp_context *ptp_context = port->data; dev_data->ptp_clock = port; ptp_context->eth_dev = eth_dev; return 0; } DEVICE_DEFINE(xmc4xxx_ptp_clock_0, PTP_CLOCK_NAME, ptp_clock_xmc4xxx_init, NULL, &ptp_xmc4xxx_context_0, NULL, POST_KERNEL, CONFIG_PTP_CLOCK_INIT_PRIORITY, &ptp_api_xmc4xxx); #endif /* CONFIG_PTP_CLOCK_XMC4XXX */ ```
/content/code_sandbox/drivers/ethernet/eth_xmc4xxx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,585
```c /* * */ #define LOG_MODULE_NAME eth_slip_tap #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/net/ethernet.h> #include "../net/slip.h" static struct slip_context slip_context_data; static enum ethernet_hw_caps eth_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_HW_VLAN #if defined(CONFIG_NET_LLDP) | ETHERNET_LLDP #endif ; } static const struct ethernet_api slip_if_api = { .iface_api.init = slip_iface_init, .get_capabilities = eth_capabilities, .send = slip_send, }; #define _SLIP_L2_LAYER ETHERNET_L2 #define _SLIP_L2_CTX_TYPE NET_L2_GET_CTX_TYPE(ETHERNET_L2) ETH_NET_DEVICE_INIT(slip, CONFIG_SLIP_DRV_NAME, slip_init, NULL, &slip_context_data, NULL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &slip_if_api, _SLIP_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_slip_tap.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
229
```unknown # Description: # Cyclone V ethernet configuration options menuconfig ETH_CYCLONEV bool "Cyclone V ethernet MAC driver" default y depends on DT_HAS_SNPS_ETHERNET_CYCLONEV_ENABLED if ETH_CYCLONEV config ETH_CVSX_NB_TX_DESCS int "Number of entries in the transmit descriptor ring" default 32 range 4 128 help A higher number allows for more packets to be queued which may improve throughput, but that requires more transient memory. However there must be enough descriptors to hold all fragments of a full-size packet to be transmitted or the packet will be dropped. Fragment size is influenced by CONFIG_NET_BUF_DATA_SIZE. config ETH_CVSX_NB_RX_DESCS int "Number of entries in the receive descriptor ring" default 32 range 4 128 help Received packets are spread across the required number of fragment buffers. Each RX fragment has a size of CONFIG_NET_BUF_DATA_SIZE. There is one descriptor entry per fragment. endif # ETH_CYCLONEV ```
/content/code_sandbox/drivers/ethernet/Kconfig.cyclonev
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
234
```c /* * */ /** * @file * * Routines setting up the host system. Those are placed in separate file * because there is naming conflicts between host and zephyr network stacks. */ /* Host include files */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <errno.h> #include <string.h> #include <stdbool.h> #include <unistd.h> #include <fcntl.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <sys/select.h> #include <net/if.h> #include <time.h> #include <inttypes.h> #include <nsi_tracing.h> #ifdef __linux #include <linux/if.h> #include <linux/if_tun.h> #endif #include "eth_native_posix_priv.h" /* Note that we cannot create the TUN/TAP device from the setup script * as we need to get a file descriptor to communicate with the interface. */ int eth_iface_create(const char *dev_name, const char *if_name, bool tun_only) { struct ifreq ifr; int fd, ret = -EINVAL; fd = open(dev_name, O_RDWR); if (fd < 0) { return -errno; } (void)memset(&ifr, 0, sizeof(ifr)); #ifdef __linux ifr.ifr_flags = (tun_only ? IFF_TUN : IFF_TAP) | IFF_NO_PI; strncpy(ifr.ifr_name, if_name, IFNAMSIZ - 1); ret = ioctl(fd, TUNSETIFF, (void *)&ifr); if (ret < 0) { ret = -errno; close(fd); return ret; } #endif return fd; } int eth_iface_remove(int fd) { return close(fd); } static int ssystem(const char *fmt, ...) __attribute__((__format__(__printf__, 1, 2))); static int ssystem(const char *fmt, ...) { char cmd[255]; va_list ap; int ret; va_start(ap, fmt); vsnprintf(cmd, sizeof(cmd), fmt, ap); va_end(ap); nsi_print_trace("%s\n", cmd); ret = system(cmd); return -WEXITSTATUS(ret); } int eth_wait_data(int fd) { struct timeval timeout; fd_set rset; int ret; FD_ZERO(&rset); FD_SET(fd, &rset); timeout.tv_sec = 0; timeout.tv_usec = 0; ret = select(fd + 1, &rset, NULL, NULL, &timeout); if (ret < 0 && errno != EINTR) { return -errno; } else if (ret > 0) { if (FD_ISSET(fd, &rset)) { return 0; } } return -EAGAIN; } int eth_clock_gettime(uint64_t *second, uint32_t *nanosecond) { struct timespec tp; int ret; ret = clock_gettime(CLOCK_MONOTONIC_RAW, &tp); if (ret < 0) { return -errno; } *second = (uint64_t)tp.tv_sec; *nanosecond = (uint32_t)tp.tv_nsec; return 0; } int eth_promisc_mode(const char *if_name, bool enable) { return ssystem("ip link set dev %s promisc %s", if_name, enable ? "on" : "off"); } ```
/content/code_sandbox/drivers/ethernet/eth_native_posix_adapt.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
726
```unknown # NUMAKER Ethernet Driver configuration options config ETH_NUMAKER bool "Nuvoton NUMAKER MCU Ethernet driver" default y select HAS_NUMAKER_ETH depends on DT_HAS_NUVOTON_NUMAKER_ETHERNET_ENABLED help This option enables the Ethernet driver for Nuvoton NuMaker family of processors. Say y if you wish to enable NuMaker ETH. ```
/content/code_sandbox/drivers/ethernet/Kconfig.numaker
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
83
```c /* * */ /** * @file * * Ethernet driver for native posix board. This is meant for network * connectivity between host and Zephyr. */ #define LOG_MODULE_NAME eth_posix #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <stdio.h> #include <zephyr/kernel.h> #include <stdbool.h> #include <errno.h> #include <stddef.h> #include <cmdline.h> #include <posix_native_task.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_core.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include <zephyr/drivers/ptp_clock.h> #include <zephyr/net/gptp.h> #include <zephyr/net/lldp.h> #include "eth_native_posix_priv.h" #include "nsi_host_trampolines.h" #include "eth.h" #define NET_BUF_TIMEOUT K_MSEC(100) #if defined(CONFIG_NET_VLAN) #define ETH_HDR_LEN sizeof(struct net_eth_vlan_hdr) #else #define ETH_HDR_LEN sizeof(struct net_eth_hdr) #endif struct eth_context { uint8_t recv[NET_ETH_MTU + ETH_HDR_LEN]; uint8_t send[NET_ETH_MTU + ETH_HDR_LEN]; uint8_t mac_addr[6]; struct net_linkaddr ll_addr; struct net_if *iface; const char *if_name; k_tid_t rx_thread; struct z_thread_stack_element *rx_stack; size_t rx_stack_size; int dev_fd; bool init_done; bool status; bool promisc_mode; #if defined(CONFIG_NET_STATISTICS_ETHERNET) struct net_stats_eth stats; #endif #if defined(CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK) const struct device *ptp_clock; #endif }; static const char *if_name_cmd_opt; #define DEFINE_RX_THREAD(x, _) \ K_KERNEL_STACK_DEFINE(rx_thread_stack_##x, \ CONFIG_ARCH_POSIX_RECOMMENDED_STACK_SIZE);\ static struct k_thread rx_thread_data_##x LISTIFY(CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT, DEFINE_RX_THREAD, (;), _); #if defined(CONFIG_NET_GPTP) static bool need_timestamping(struct gptp_hdr *hdr) { switch (hdr->message_type) { case GPTP_SYNC_MESSAGE: case GPTP_PATH_DELAY_RESP_MESSAGE: return true; default: return false; } } static struct gptp_hdr *check_gptp_msg(struct net_if *iface, struct net_pkt *pkt, bool is_tx) { uint8_t *msg_start = net_pkt_data(pkt); struct gptp_hdr *gptp_hdr; int eth_hlen; struct net_eth_hdr *hdr; hdr = (struct net_eth_hdr *)msg_start; if (ntohs(hdr->type) != NET_ETH_PTYPE_PTP) { return NULL; } eth_hlen = sizeof(struct net_eth_hdr); /* In TX, the first net_buf contains the Ethernet header * and the actual gPTP header is in the second net_buf. * In RX, the Ethernet header + other headers are in the * first net_buf. */ if (is_tx) { if (pkt->frags->frags == NULL) { return false; } gptp_hdr = (struct gptp_hdr *)pkt->frags->frags->data; } else { gptp_hdr = (struct gptp_hdr *)(pkt->frags->data + eth_hlen); } return gptp_hdr; } static void update_pkt_priority(struct gptp_hdr *hdr, struct net_pkt *pkt) { if (GPTP_IS_EVENT_MSG(hdr->message_type)) { net_pkt_set_priority(pkt, NET_PRIORITY_CA); } else { net_pkt_set_priority(pkt, NET_PRIORITY_IC); } } static void update_gptp(struct net_if *iface, struct net_pkt *pkt, bool send) { struct net_ptp_time timestamp; struct gptp_hdr *hdr; int ret; ret = eth_clock_gettime(&timestamp.second, &timestamp.nanosecond); if (ret < 0) { return; } net_pkt_set_timestamp(pkt, &timestamp); hdr = check_gptp_msg(iface, pkt, send); if (!hdr) { return; } if (send) { ret = need_timestamping(hdr); if (ret) { net_if_add_tx_timestamp(pkt); } } else { update_pkt_priority(hdr, pkt); } } #else #define update_gptp(iface, pkt, send) #endif /* CONFIG_NET_GPTP */ static int eth_send(const struct device *dev, struct net_pkt *pkt) { struct eth_context *ctx = dev->data; int count = net_pkt_get_len(pkt); int ret; ret = net_pkt_read(pkt, ctx->send, count); if (ret) { return ret; } update_gptp(net_pkt_iface(pkt), pkt, true); LOG_DBG("Send pkt %p len %d", pkt, count); ret = nsi_host_write(ctx->dev_fd, ctx->send, count); if (ret < 0) { LOG_DBG("Cannot send pkt %p (%d)", pkt, ret); } return ret < 0 ? ret : 0; } static struct net_linkaddr *eth_get_mac(struct eth_context *ctx) { ctx->ll_addr.addr = ctx->mac_addr; ctx->ll_addr.len = sizeof(ctx->mac_addr); return &ctx->ll_addr; } static struct net_pkt *prepare_pkt(struct eth_context *ctx, int count, int *status) { struct net_pkt *pkt; pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, count, AF_UNSPEC, 0, NET_BUF_TIMEOUT); if (!pkt) { *status = -ENOMEM; return NULL; } if (net_pkt_write(pkt, ctx->recv, count)) { net_pkt_unref(pkt); *status = -ENOBUFS; return NULL; } *status = 0; LOG_DBG("Recv pkt %p len %d", pkt, count); return pkt; } static int read_data(struct eth_context *ctx, int fd) { struct net_if *iface = ctx->iface; struct net_pkt *pkt = NULL; int status; int count; count = nsi_host_read(fd, ctx->recv, sizeof(ctx->recv)); if (count <= 0) { return 0; } pkt = prepare_pkt(ctx, count, &status); if (!pkt) { return status; } update_gptp(iface, pkt, false); if (net_recv_data(iface, pkt) < 0) { net_pkt_unref(pkt); } return 0; } static void eth_rx(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); struct eth_context *ctx = p1; LOG_DBG("Starting ZETH RX thread"); while (1) { if (net_if_is_up(ctx->iface)) { while (!eth_wait_data(ctx->dev_fd)) { read_data(ctx, ctx->dev_fd); k_yield(); } } k_sleep(K_MSEC(CONFIG_ETH_NATIVE_POSIX_RX_TIMEOUT)); } } #if defined(CONFIG_THREAD_MAX_NAME_LEN) #define THREAD_MAX_NAME_LEN CONFIG_THREAD_MAX_NAME_LEN #else #define THREAD_MAX_NAME_LEN 1 #endif static void create_rx_handler(struct eth_context *ctx) { k_thread_create(ctx->rx_thread, ctx->rx_stack, ctx->rx_stack_size, eth_rx, ctx, NULL, NULL, K_PRIO_COOP(14), 0, K_NO_WAIT); if (IS_ENABLED(CONFIG_THREAD_NAME)) { char name[THREAD_MAX_NAME_LEN]; snprintk(name, sizeof(name), "eth_native_posix_rx-%s", ctx->if_name); k_thread_name_set(ctx->rx_thread, name); } } static void eth_iface_init(struct net_if *iface) { struct eth_context *ctx = net_if_get_device(iface)->data; struct net_linkaddr *ll_addr = eth_get_mac(ctx); ctx->iface = iface; ethernet_init(iface); if (ctx->init_done) { return; } net_lldp_set_lldpdu(iface); ctx->init_done = true; #if defined(CONFIG_ETH_NATIVE_POSIX_RANDOM_MAC) /* 00-00-5E-00-53-xx Documentation RFC 7042 */ gen_random_mac(ctx->mac_addr, 0x00, 0x00, 0x5E); ctx->mac_addr[3] = 0x00; ctx->mac_addr[4] = 0x53; /* The TUN/TAP setup script will by default set the MAC address of host * interface to 00:00:5E:00:53:FF so do not allow that. */ if (ctx->mac_addr[5] == 0xff) { ctx->mac_addr[5] = 0x01; } #else /* Difficult to configure MAC addresses any sane way if we have more * than one network interface. */ BUILD_ASSERT(CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT == 1, "Cannot have static MAC if interface count > 1"); if (CONFIG_ETH_NATIVE_POSIX_MAC_ADDR[0] != 0) { if (net_bytes_from_str(ctx->mac_addr, sizeof(ctx->mac_addr), CONFIG_ETH_NATIVE_POSIX_MAC_ADDR) < 0) { LOG_ERR("Invalid MAC address %s", CONFIG_ETH_NATIVE_POSIX_MAC_ADDR); } } #endif /* If we have only one network interface, then use the name * defined in the Kconfig directly. This way there is no need to * change the documentation etc. and break things. */ if (CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT == 1) { ctx->if_name = CONFIG_ETH_NATIVE_POSIX_DRV_NAME; } if (if_name_cmd_opt != NULL) { ctx->if_name = if_name_cmd_opt; } LOG_DBG("Interface %p using \"%s\"", iface, ctx->if_name); net_if_set_link_addr(iface, ll_addr->addr, ll_addr->len, NET_LINK_ETHERNET); ctx->dev_fd = eth_iface_create(CONFIG_ETH_NATIVE_POSIX_DEV_NAME, ctx->if_name, false); if (ctx->dev_fd < 0) { LOG_ERR("Cannot create %s (%d)", ctx->if_name, -errno); } else { /* Create a thread that will handle incoming data from host */ create_rx_handler(ctx); } } static enum ethernet_hw_caps eth_posix_native_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_TXTIME #if defined(CONFIG_NET_VLAN) | ETHERNET_HW_VLAN #endif #if defined(CONFIG_ETH_NATIVE_POSIX_VLAN_TAG_STRIP) | ETHERNET_HW_VLAN_TAG_STRIP #endif #if defined(CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK) | ETHERNET_PTP #endif #if defined(CONFIG_NET_PROMISCUOUS_MODE) | ETHERNET_PROMISC_MODE #endif #if defined(CONFIG_NET_LLDP) | ETHERNET_LLDP #endif ; } #if defined(CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK) static const struct device *eth_get_ptp_clock(const struct device *dev) { struct eth_context *context = dev->data; return context->ptp_clock; } #endif #if defined(CONFIG_NET_STATISTICS_ETHERNET) static struct net_stats_eth *get_stats(const struct device *dev) { struct eth_context *context = dev->data; return &(context->stats); } #endif static int set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { int ret = 0; if (IS_ENABLED(CONFIG_NET_PROMISCUOUS_MODE) && type == ETHERNET_CONFIG_TYPE_PROMISC_MODE) { struct eth_context *context = dev->data; if (config->promisc_mode) { if (context->promisc_mode) { return -EALREADY; } context->promisc_mode = true; } else { if (!context->promisc_mode) { return -EALREADY; } context->promisc_mode = false; } ret = eth_promisc_mode(context->if_name, context->promisc_mode); } else if (type == ETHERNET_CONFIG_TYPE_MAC_ADDRESS) { struct eth_context *context = dev->data; memcpy(context->mac_addr, config->mac_address.addr, sizeof(context->mac_addr)); } return ret; } #if defined(CONFIG_NET_VLAN) static int vlan_setup(const struct device *dev, struct net_if *iface, uint16_t tag, bool enable) { if (enable) { net_lldp_set_lldpdu(iface); } else { net_lldp_unset_lldpdu(iface); } return 0; } #endif /* CONFIG_NET_VLAN */ static const struct ethernet_api eth_if_api = { .iface_api.init = eth_iface_init, .get_capabilities = eth_posix_native_get_capabilities, .set_config = set_config, .send = eth_send, #if defined(CONFIG_NET_VLAN) .vlan_setup = vlan_setup, #endif #if defined(CONFIG_NET_STATISTICS_ETHERNET) .get_stats = get_stats, #endif #if defined(CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK) .get_ptp_clock = eth_get_ptp_clock, #endif }; #define DEFINE_ETH_DEV_DATA(x, _) \ static struct eth_context eth_context_data_##x = { \ .if_name = CONFIG_ETH_NATIVE_POSIX_DRV_NAME #x, \ .rx_thread = &rx_thread_data_##x, \ .rx_stack = rx_thread_stack_##x, \ .rx_stack_size = K_KERNEL_STACK_SIZEOF(rx_thread_stack_##x), \ } LISTIFY(CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT, DEFINE_ETH_DEV_DATA, (;), _); #define DEFINE_ETH_DEVICE(x, _) \ ETH_NET_DEVICE_INIT(eth_native_posix_##x, \ CONFIG_ETH_NATIVE_POSIX_DRV_NAME #x, \ NULL, NULL, &eth_context_data_##x, NULL, \ CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \ &eth_if_api, \ NET_ETH_MTU) LISTIFY(CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT, DEFINE_ETH_DEVICE, (;), _); #if defined(CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK) #if defined(CONFIG_NET_GPTP) BUILD_ASSERT( \ CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT == CONFIG_NET_GPTP_NUM_PORTS, \ "Number of network interfaces must match gPTP port count"); #endif struct ptp_context { struct eth_context *eth_context; }; #define DEFINE_PTP_DEV_DATA(x, _) \ static struct ptp_context ptp_context_##x LISTIFY(CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT, DEFINE_PTP_DEV_DATA, (;), _); static int ptp_clock_set_native_posix(const struct device *clk, struct net_ptp_time *tm) { ARG_UNUSED(clk); ARG_UNUSED(tm); /* We cannot set the host device time so this function * does nothing. */ return 0; } static int ptp_clock_get_native_posix(const struct device *clk, struct net_ptp_time *tm) { ARG_UNUSED(clk); return eth_clock_gettime(&tm->second, &tm->nanosecond); } static int ptp_clock_adjust_native_posix(const struct device *clk, int increment) { ARG_UNUSED(clk); ARG_UNUSED(increment); /* We cannot adjust the host device time so this function * does nothing. */ return 0; } static int ptp_clock_rate_adjust_native_posix(const struct device *clk, double ratio) { ARG_UNUSED(clk); ARG_UNUSED(ratio); /* We cannot adjust the host device time so this function * does nothing. */ return 0; } static const struct ptp_clock_driver_api api = { .set = ptp_clock_set_native_posix, .get = ptp_clock_get_native_posix, .adjust = ptp_clock_adjust_native_posix, .rate_adjust = ptp_clock_rate_adjust_native_posix, }; #define PTP_INIT_FUNC(x, _) \ static int ptp_init_##x(const struct device *port) \ { \ const struct device *const eth_dev = DEVICE_GET(eth_native_posix_##x); \ struct eth_context *context = eth_dev->data; \ struct ptp_context *ptp_context = port->data; \ \ context->ptp_clock = port; \ ptp_context->eth_context = context; \ \ return 0; \ } LISTIFY(CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT, PTP_INIT_FUNC, (), _) #define DEFINE_PTP_DEVICE(x, _) \ DEVICE_DEFINE(eth_native_posix_ptp_clock_##x, \ PTP_CLOCK_NAME "_" #x, \ ptp_init_##x, \ NULL, \ &ptp_context_##x, \ NULL, \ POST_KERNEL, \ CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, \ &api) LISTIFY(CONFIG_ETH_NATIVE_POSIX_INTERFACE_COUNT, DEFINE_PTP_DEVICE, (;), _); #endif /* CONFIG_ETH_NATIVE_POSIX_PTP_CLOCK */ static void add_native_posix_options(void) { static struct args_struct_t eth_native_posix_options[] = { { .is_mandatory = false, .option = "eth-if", .name = "name", .type = 's', .dest = (void *)&if_name_cmd_opt, .descript = "Name of the eth interface to use", }, ARG_TABLE_ENDMARKER, }; native_add_command_line_opts(eth_native_posix_options); } NATIVE_TASK(add_native_posix_options, PRE_BOOT_1, 10); ```
/content/code_sandbox/drivers/ethernet/eth_native_posix.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,949
```c /* */ #define DT_DRV_COMPAT st_stm32_ethernet #define LOG_MODULE_NAME eth_stm32_hal #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/sys/__assert.h> #include <zephyr/sys/util.h> #include <zephyr/sys/crc.h> #include <errno.h> #include <stdbool.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/phy.h> #include <ethernet/eth_stats.h> #include <soc.h> #include <zephyr/sys/printk.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include <zephyr/net/lldp.h> #include <zephyr/drivers/hwinfo.h> #if defined(CONFIG_NET_DSA) #include <zephyr/net/dsa.h> #endif #if defined(CONFIG_PTP_CLOCK_STM32_HAL) #include <zephyr/drivers/ptp_clock.h> #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ #include "eth.h" #include "eth_stm32_hal_priv.h" #if DT_INST_PROP(0, zephyr_random_mac_address) #define ETH_STM32_RANDOM_MAC #endif #if defined(CONFIG_ETH_STM32_HAL_USE_DTCM_FOR_DMA_BUFFER) && \ !DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) #error DTCM for DMA buffer is activated but zephyr,dtcm is not present in dts #endif #define PHY_ADDR CONFIG_ETH_STM32_HAL_PHY_ADDRESS #if defined(CONFIG_MDIO) #define DEVICE_PHY_BY_NAME(n) \ DEVICE_DT_GET(DT_CHILD(DT_INST_CHILD(n, mdio), __CONCAT(ethernet_phy_, PHY_ADDR))) static const struct device *eth_stm32_phy_dev = DEVICE_PHY_BY_NAME(0); #endif #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) #define PHY_BSR ((uint16_t)0x0001U) /*!< Transceiver Basic Status Register */ #define PHY_LINKED_STATUS ((uint16_t)0x0004U) /*!< Valid link established */ #define IS_ETH_DMATXDESC_OWN(dma_tx_desc) (dma_tx_desc->DESC3 & \ ETH_DMATXNDESCRF_OWN) #define ETH_RXBUFNB ETH_RX_DESC_CNT #define ETH_TXBUFNB ETH_TX_DESC_CNT #define ETH_MEDIA_INTERFACE_MII HAL_ETH_MII_MODE #define ETH_MEDIA_INTERFACE_RMII HAL_ETH_RMII_MODE /* Only one tx_buffer is sufficient to pass only 1 dma_buffer */ #define ETH_TXBUF_DEF_NB 1U #else #define IS_ETH_DMATXDESC_OWN(dma_tx_desc) (dma_tx_desc->Status & \ ETH_DMATXDESC_OWN) #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ #define ETH_DMA_TX_TIMEOUT_MS 20U /* transmit timeout in milliseconds */ #if defined(CONFIG_ETH_STM32_HAL_USE_DTCM_FOR_DMA_BUFFER) && \ DT_NODE_HAS_STATUS(DT_CHOSEN(zephyr_dtcm), okay) #define __eth_stm32_desc __dtcm_noinit_section #define __eth_stm32_buf __dtcm_noinit_section #elif defined(CONFIG_SOC_SERIES_STM32H7X) #define __eth_stm32_desc __attribute__((section(".eth_stm32_desc"))) #define __eth_stm32_buf __attribute__((section(".eth_stm32_buf"))) #elif defined(CONFIG_NOCACHE_MEMORY) #define __eth_stm32_desc __nocache __aligned(4) #define __eth_stm32_buf __nocache __aligned(4) #else #define __eth_stm32_desc __aligned(4) #define __eth_stm32_buf __aligned(4) #endif static ETH_DMADescTypeDef dma_rx_desc_tab[ETH_RXBUFNB] __eth_stm32_desc; static ETH_DMADescTypeDef dma_tx_desc_tab[ETH_TXBUFNB] __eth_stm32_desc; static uint8_t dma_rx_buffer[ETH_RXBUFNB][ETH_STM32_RX_BUF_SIZE] __eth_stm32_buf; static uint8_t dma_tx_buffer[ETH_TXBUFNB][ETH_STM32_TX_BUF_SIZE] __eth_stm32_buf; #if defined(CONFIG_ETH_STM32_HAL_API_V2) BUILD_ASSERT(ETH_STM32_RX_BUF_SIZE % 4 == 0, "Rx buffer size must be a multiple of 4"); struct eth_stm32_rx_buffer_header { struct eth_stm32_rx_buffer_header *next; uint16_t size; bool used; }; struct eth_stm32_tx_buffer_header { ETH_BufferTypeDef tx_buff; bool used; }; struct eth_stm32_tx_context { struct net_pkt *pkt; uint16_t first_tx_buffer_index; }; static struct eth_stm32_rx_buffer_header dma_rx_buffer_header[ETH_RXBUFNB]; static struct eth_stm32_tx_buffer_header dma_tx_buffer_header[ETH_TXBUFNB]; void HAL_ETH_RxAllocateCallback(uint8_t **buf) { for (size_t i = 0; i < ETH_RXBUFNB; ++i) { if (!dma_rx_buffer_header[i].used) { dma_rx_buffer_header[i].next = NULL; dma_rx_buffer_header[i].size = 0; dma_rx_buffer_header[i].used = true; *buf = dma_rx_buffer[i]; return; } } *buf = NULL; } /* Pointer to an array of ETH_STM32_RX_BUF_SIZE uint8_t's */ typedef uint8_t (*RxBufferPtr)[ETH_STM32_RX_BUF_SIZE]; /* called by HAL_ETH_ReadData() */ void HAL_ETH_RxLinkCallback(void **pStart, void **pEnd, uint8_t *buff, uint16_t Length) { /* buff points to the begin on one of the rx buffers, * so we can compute the index of the given buffer */ size_t index = (RxBufferPtr)buff - &dma_rx_buffer[0]; struct eth_stm32_rx_buffer_header *header = &dma_rx_buffer_header[index]; __ASSERT_NO_MSG(index < ETH_RXBUFNB); header->size = Length; if (!*pStart) { /* first packet, set head pointer of linked list */ *pStart = header; *pEnd = header; } else { __ASSERT_NO_MSG(*pEnd != NULL); /* not the first packet, add to list and adjust tail pointer */ ((struct eth_stm32_rx_buffer_header *)*pEnd)->next = header; *pEnd = header; } } /* Called by HAL_ETH_ReleaseTxPacket */ void HAL_ETH_TxFreeCallback(uint32_t *buff) { __ASSERT_NO_MSG(buff != NULL); /* buff is the user context in tx_config.pData */ struct eth_stm32_tx_context *ctx = (struct eth_stm32_tx_context *)buff; struct eth_stm32_tx_buffer_header *buffer_header = &dma_tx_buffer_header[ctx->first_tx_buffer_index]; while (buffer_header != NULL) { buffer_header->used = false; if (buffer_header->tx_buff.next != NULL) { buffer_header = CONTAINER_OF(buffer_header->tx_buff.next, struct eth_stm32_tx_buffer_header, tx_buff); } else { buffer_header = NULL; } } } /* allocate a tx buffer and mark it as used */ static inline uint16_t allocate_tx_buffer(void) { for (;;) { for (uint16_t index = 0; index < ETH_TXBUFNB; index++) { if (!dma_tx_buffer_header[index].used) { dma_tx_buffer_header[index].used = true; return index; } } k_yield(); } } #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_ETH_STM32_HAL_API_V2) static ETH_TxPacketConfig tx_config; #endif static HAL_StatusTypeDef read_eth_phy_register(ETH_HandleTypeDef *heth, uint32_t PHYAddr, uint32_t PHYReg, uint32_t *RegVal) { #if defined(CONFIG_MDIO) return phy_read(eth_stm32_phy_dev, PHYReg, RegVal); #elif defined(CONFIG_ETH_STM32_HAL_API_V2) return HAL_ETH_ReadPHYRegister(heth, PHYAddr, PHYReg, RegVal); #else ARG_UNUSED(PHYAddr); return HAL_ETH_ReadPHYRegister(heth, PHYReg, RegVal); #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */ } static inline void setup_mac_filter(ETH_HandleTypeDef *heth) { __ASSERT_NO_MSG(heth != NULL); #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) ETH_MACFilterConfigTypeDef MACFilterConf; HAL_ETH_GetMACFilterConfig(heth, &MACFilterConf); #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER) MACFilterConf.HashMulticast = ENABLE; MACFilterConf.PassAllMulticast = DISABLE; #else MACFilterConf.HashMulticast = DISABLE; MACFilterConf.PassAllMulticast = ENABLE; #endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */ MACFilterConf.HachOrPerfectFilter = DISABLE; HAL_ETH_SetMACFilterConfig(heth, &MACFilterConf); k_sleep(K_MSEC(1)); #else uint32_t tmp = heth->Instance->MACFFR; /* clear all multicast filter bits, resulting in perfect filtering */ tmp &= ~(ETH_MULTICASTFRAMESFILTER_PERFECTHASHTABLE | ETH_MULTICASTFRAMESFILTER_HASHTABLE | ETH_MULTICASTFRAMESFILTER_PERFECT | ETH_MULTICASTFRAMESFILTER_NONE); if (IS_ENABLED(CONFIG_ETH_STM32_MULTICAST_FILTER)) { /* enable multicast hash receive filter */ tmp |= ETH_MULTICASTFRAMESFILTER_HASHTABLE; } else { /* enable receiving all multicast frames */ tmp |= ETH_MULTICASTFRAMESFILTER_NONE; } heth->Instance->MACFFR = tmp; /* Wait until the write operation will be taken into account: * at least four TX_CLK/RX_CLK clock cycles */ tmp = heth->Instance->MACFFR; k_sleep(K_MSEC(1)); heth->Instance->MACFFR = tmp; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X) */ } #if defined(CONFIG_PTP_CLOCK_STM32_HAL) static bool eth_is_ptp_pkt(struct net_if *iface, struct net_pkt *pkt) { if (ntohs(NET_ETH_HDR(pkt)->type) != NET_ETH_PTYPE_PTP) { return false; } net_pkt_set_priority(pkt, NET_PRIORITY_CA); return true; } #if defined(CONFIG_ETH_STM32_HAL_API_V2) void HAL_ETH_TxPtpCallback(uint32_t *buff, ETH_TimeStampTypeDef *timestamp) { struct eth_stm32_tx_context *ctx = (struct eth_stm32_tx_context *)buff; ctx->pkt->timestamp.second = timestamp->TimeStampHigh; ctx->pkt->timestamp.nanosecond = timestamp->TimeStampLow; net_if_add_tx_timestamp(ctx->pkt); } #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ static int eth_tx(const struct device *dev, struct net_pkt *pkt) { struct eth_stm32_hal_dev_data *dev_data = dev->data; ETH_HandleTypeDef *heth; int res; size_t total_len; #if defined(CONFIG_ETH_STM32_HAL_API_V2) size_t remaining_read; struct eth_stm32_tx_context ctx = {.pkt = pkt, .first_tx_buffer_index = 0}; struct eth_stm32_tx_buffer_header *buf_header = NULL; #else uint8_t *dma_buffer; __IO ETH_DMADescTypeDef *dma_tx_desc; #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ HAL_StatusTypeDef hal_ret = HAL_OK; #if defined(CONFIG_PTP_CLOCK_STM32_HAL) bool timestamped_frame; #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ __ASSERT_NO_MSG(pkt != NULL); __ASSERT_NO_MSG(pkt->frags != NULL); __ASSERT_NO_MSG(dev != NULL); __ASSERT_NO_MSG(dev_data != NULL); heth = &dev_data->heth; total_len = net_pkt_get_len(pkt); if (total_len > (ETH_STM32_TX_BUF_SIZE * ETH_TXBUFNB)) { LOG_ERR("PKT too big"); return -EIO; } k_mutex_lock(&dev_data->tx_mutex, K_FOREVER); #if defined(CONFIG_ETH_STM32_HAL_API_V2) ctx.first_tx_buffer_index = allocate_tx_buffer(); buf_header = &dma_tx_buffer_header[ctx.first_tx_buffer_index]; #else /* CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) uint32_t cur_tx_desc_idx; cur_tx_desc_idx = heth->TxDescList.CurTxDesc; dma_tx_desc = (ETH_DMADescTypeDef *)heth->TxDescList.TxDesc[cur_tx_desc_idx]; #else dma_tx_desc = heth->TxDesc; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ while (IS_ETH_DMATXDESC_OWN(dma_tx_desc) != (uint32_t)RESET) { k_yield(); } #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_PTP_CLOCK_STM32_HAL) timestamped_frame = eth_is_ptp_pkt(net_pkt_iface(pkt), pkt) || net_pkt_is_tx_timestamping(pkt); if (timestamped_frame) { /* Enable transmit timestamp */ #if defined(CONFIG_ETH_STM32_HAL_API_V2) HAL_ETH_PTP_InsertTxTimestamp(heth); #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) dma_tx_desc->DESC2 |= ETH_DMATXNDESCRF_TTSE; #else dma_tx_desc->Status |= ETH_DMATXDESC_TTSE; #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ } #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ #if defined(CONFIG_ETH_STM32_HAL_API_V2) remaining_read = total_len; /* fill and allocate buffer until remaining data fits in one buffer */ while (remaining_read > ETH_STM32_TX_BUF_SIZE) { if (net_pkt_read(pkt, buf_header->tx_buff.buffer, ETH_STM32_TX_BUF_SIZE)) { res = -ENOBUFS; goto error; } const uint16_t next_buffer_id = allocate_tx_buffer(); buf_header->tx_buff.len = ETH_STM32_TX_BUF_SIZE; /* append new buffer to the linked list */ buf_header->tx_buff.next = &dma_tx_buffer_header[next_buffer_id].tx_buff; /* and adjust tail pointer */ buf_header = &dma_tx_buffer_header[next_buffer_id]; remaining_read -= ETH_STM32_TX_BUF_SIZE; } if (net_pkt_read(pkt, buf_header->tx_buff.buffer, remaining_read)) { res = -ENOBUFS; goto error; } buf_header->tx_buff.len = remaining_read; buf_header->tx_buff.next = NULL; #else /* CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) dma_buffer = dma_tx_buffer[cur_tx_desc_idx]; #else dma_buffer = (uint8_t *)(dma_tx_desc->Buffer1Addr); #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ if (net_pkt_read(pkt, dma_buffer, total_len)) { res = -ENOBUFS; goto error; } #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) ETH_BufferTypeDef tx_buffer_def; tx_buffer_def.buffer = dma_buffer; tx_buffer_def.len = total_len; tx_buffer_def.next = NULL; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_ETH_STM32_HAL_API_V2) tx_config.Length = total_len; #if defined(CONFIG_ETH_STM32_HAL_API_V2) tx_config.pData = &ctx; tx_config.TxBuffer = &dma_tx_buffer_header[ctx.first_tx_buffer_index].tx_buff; #else tx_config.TxBuffer = &tx_buffer_def; #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ /* Reset TX complete interrupt semaphore before TX request*/ k_sem_reset(&dev_data->tx_int_sem); /* tx_buffer is allocated on function stack, we need */ /* to wait for the transfer to complete */ /* So it is not freed before the interrupt happens */ hal_ret = HAL_ETH_Transmit_IT(heth, &tx_config); if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_Transmit: failed!"); res = -EIO; goto error; } /* Wait for end of TX buffer transmission */ /* If the semaphore timeout breaks, it means */ /* an error occurred or IT was not fired */ if (k_sem_take(&dev_data->tx_int_sem, K_MSEC(ETH_DMA_TX_TIMEOUT_MS)) != 0) { LOG_ERR("HAL_ETH_TransmitIT tx_int_sem take timeout"); res = -EIO; #ifndef CONFIG_ETH_STM32_HAL_API_V2 /* Content of the packet could be the reason for timeout */ LOG_HEXDUMP_ERR(dma_buffer, total_len, "eth packet timeout"); #endif /* Check for errors */ /* Ethernet device was put in error state */ /* Error state is unrecoverable ? */ if (HAL_ETH_GetState(heth) == HAL_ETH_STATE_ERROR) { LOG_ERR("%s: ETH in error state: errorcode:%x", __func__, HAL_ETH_GetError(heth)); /* TODO recover from error state by restarting eth */ } /* Check for DMA errors */ if (HAL_ETH_GetDMAError(heth)) { LOG_ERR("%s: ETH DMA error: dmaerror:%x", __func__, HAL_ETH_GetDMAError(heth)); /* DMA fatal bus errors are putting in error state*/ /* TODO recover from this */ } /* Check for MAC errors */ if (HAL_ETH_GetMACError(heth)) { LOG_ERR("%s: ETH MAC error: macerror:%x", __func__, HAL_ETH_GetMACError(heth)); /* MAC errors are putting in error state*/ /* TODO recover from this */ } goto error; } #else hal_ret = HAL_ETH_TransmitFrame(heth, total_len); if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_Transmit: failed!"); res = -EIO; goto error; } /* When Transmit Underflow flag is set, clear it and issue a * Transmit Poll Demand to resume transmission. */ if ((heth->Instance->DMASR & ETH_DMASR_TUS) != (uint32_t)RESET) { /* Clear TUS ETHERNET DMA flag */ heth->Instance->DMASR = ETH_DMASR_TUS; /* Resume DMA transmission*/ heth->Instance->DMATPDR = 0; res = -EIO; goto error; } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_PTP_CLOCK_STM32_HAL) && !defined(CONFIG_ETH_STM32_HAL_API_V2) if (timestamped_frame) { /* Retrieve transmission timestamp from last DMA TX descriptor */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) ETH_TxDescListTypeDef * dma_tx_desc_list; __IO ETH_DMADescTypeDef *last_dma_tx_desc; dma_tx_desc_list = &heth->TxDescList; for (uint32_t i = 0; i < ETH_TX_DESC_CNT; i++) { const uint32_t last_desc_idx = (cur_tx_desc_idx + i) % ETH_TX_DESC_CNT; last_dma_tx_desc = (ETH_DMADescTypeDef *)dma_tx_desc_list->TxDesc[last_desc_idx]; if (last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_LD) { break; } } while (IS_ETH_DMATXDESC_OWN(last_dma_tx_desc) != (uint32_t)RESET) { /* Wait for transmission */ k_yield(); } if ((last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_LD) && (last_dma_tx_desc->DESC3 & ETH_DMATXNDESCWBF_TTSS)) { pkt->timestamp.second = last_dma_tx_desc->DESC1; pkt->timestamp.nanosecond = last_dma_tx_desc->DESC0; } else { /* Invalid value */ pkt->timestamp.second = UINT64_MAX; pkt->timestamp.nanosecond = UINT32_MAX; } #else __IO ETH_DMADescTypeDef *last_dma_tx_desc = dma_tx_desc; while (!(last_dma_tx_desc->Status & ETH_DMATXDESC_LS) && last_dma_tx_desc->Buffer2NextDescAddr) { last_dma_tx_desc = (ETH_DMADescTypeDef *)last_dma_tx_desc->Buffer2NextDescAddr; } while (IS_ETH_DMATXDESC_OWN(last_dma_tx_desc) != (uint32_t)RESET) { /* Wait for transmission */ k_yield(); } if (last_dma_tx_desc->Status & ETH_DMATXDESC_LS && last_dma_tx_desc->Status & ETH_DMATXDESC_TTSS) { pkt->timestamp.second = last_dma_tx_desc->TimeStampHigh; pkt->timestamp.nanosecond = last_dma_tx_desc->TimeStampLow; } else { /* Invalid value */ pkt->timestamp.second = UINT64_MAX; pkt->timestamp.nanosecond = UINT32_MAX; } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ net_if_add_tx_timestamp(pkt); } #endif /* CONFIG_PTP_CLOCK_STM32_HAL && !CONFIG_ETH_STM32_HAL_API_V2 */ res = 0; error: #if defined(CONFIG_ETH_STM32_HAL_API_V2) /* free package tx buffer */ if (res != 0) { HAL_ETH_TxFreeCallback((uint32_t *)&ctx); } else if (HAL_ETH_ReleaseTxPacket(heth) != HAL_OK) { LOG_ERR("HAL_ETH_ReleaseTxPacket failed"); res = -EIO; } #endif k_mutex_unlock(&dev_data->tx_mutex); return res; } static struct net_if *get_iface(struct eth_stm32_hal_dev_data *ctx) { return ctx->iface; } static struct net_pkt *eth_rx(const struct device *dev) { struct eth_stm32_hal_dev_data *dev_data; ETH_HandleTypeDef *heth; struct net_pkt *pkt; size_t total_len = 0; #if defined(CONFIG_ETH_STM32_HAL_API_V2) void *appbuf = NULL; struct eth_stm32_rx_buffer_header *rx_header; #else #if !defined(CONFIG_SOC_SERIES_STM32H7X) && !defined(CONFIG_SOC_SERIES_STM32H5X) __IO ETH_DMADescTypeDef *dma_rx_desc; #endif /* !CONFIG_SOC_SERIES_STM32H7X */ uint8_t *dma_buffer; HAL_StatusTypeDef hal_ret = HAL_OK; #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_PTP_CLOCK_STM32_HAL) struct net_ptp_time timestamp; #if defined(CONFIG_ETH_STM32_HAL_API_V2) ETH_TimeStampTypeDef ts_registers; #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ /* Default to invalid value. */ timestamp.second = UINT64_MAX; timestamp.nanosecond = UINT32_MAX; #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ __ASSERT_NO_MSG(dev != NULL); dev_data = dev->data; __ASSERT_NO_MSG(dev_data != NULL); heth = &dev_data->heth; #if defined(CONFIG_ETH_STM32_HAL_API_V2) if (HAL_ETH_ReadData(heth, &appbuf) != HAL_OK) { /* no frame available */ return NULL; } /* computing total length */ for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf; rx_header; rx_header = rx_header->next) { total_len += rx_header->size; } #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) if (HAL_ETH_IsRxDataAvailable(heth) != true) { /* no frame available */ return NULL; } ETH_BufferTypeDef rx_buffer_def; uint32_t frame_length = 0; hal_ret = HAL_ETH_GetRxDataBuffer(heth, &rx_buffer_def); if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_GetRxDataBuffer: failed with state: %d", hal_ret); return NULL; } hal_ret = HAL_ETH_GetRxDataLength(heth, &frame_length); if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_GetRxDataLength: failed with state: %d", hal_ret); return NULL; } total_len = frame_length; dma_buffer = rx_buffer_def.buffer; #else hal_ret = HAL_ETH_GetReceivedFrame_IT(heth); if (hal_ret != HAL_OK) { /* no frame available */ return NULL; } total_len = heth->RxFrameInfos.length; dma_buffer = (uint8_t *)heth->RxFrameInfos.buffer; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ #if defined(CONFIG_PTP_CLOCK_STM32_HAL) #if defined(CONFIG_ETH_STM32_HAL_API_V2) if (HAL_ETH_PTP_GetRxTimestamp(heth, &ts_registers) == HAL_OK) { timestamp.second = ts_registers.TimeStampHigh; timestamp.nanosecond = ts_registers.TimeStampLow; } #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) ETH_RxDescListTypeDef * dma_rx_desc_list; dma_rx_desc_list = &heth->RxDescList; if (dma_rx_desc_list->AppDescNbr) { __IO ETH_DMADescTypeDef *last_dma_rx_desc; const uint32_t last_desc_idx = (dma_rx_desc_list->FirstAppDesc + dma_rx_desc_list->AppDescNbr - 1U) % ETH_RX_DESC_CNT; last_dma_rx_desc = (ETH_DMADescTypeDef *)dma_rx_desc_list->RxDesc[last_desc_idx]; if (dma_rx_desc_list->AppContextDesc && last_dma_rx_desc->DESC1 & ETH_DMARXNDESCWBF_TSA) { /* Retrieve timestamp from context DMA descriptor */ __IO ETH_DMADescTypeDef *context_dma_rx_desc; const uint32_t context_desc_idx = (last_desc_idx + 1U) % ETH_RX_DESC_CNT; context_dma_rx_desc = (ETH_DMADescTypeDef *)dma_rx_desc_list->RxDesc[context_desc_idx]; if (context_dma_rx_desc->DESC1 != UINT32_MAX || context_dma_rx_desc->DESC0 != UINT32_MAX) { timestamp.second = context_dma_rx_desc->DESC1; timestamp.nanosecond = context_dma_rx_desc->DESC0; } } } #else __IO ETH_DMADescTypeDef *last_dma_rx_desc; last_dma_rx_desc = heth->RxFrameInfos.LSRxDesc; if (last_dma_rx_desc->TimeStampHigh != UINT32_MAX || last_dma_rx_desc->TimeStampLow != UINT32_MAX) { timestamp.second = last_dma_rx_desc->TimeStampHigh; timestamp.nanosecond = last_dma_rx_desc->TimeStampLow; } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ pkt = net_pkt_rx_alloc_with_buffer(get_iface(dev_data), total_len, AF_UNSPEC, 0, K_MSEC(100)); if (!pkt) { LOG_ERR("Failed to obtain RX buffer"); goto release_desc; } #if defined(CONFIG_ETH_STM32_HAL_API_V2) for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf; rx_header; rx_header = rx_header->next) { const size_t index = rx_header - &dma_rx_buffer_header[0]; __ASSERT_NO_MSG(index < ETH_RXBUFNB); if (net_pkt_write(pkt, dma_rx_buffer[index], rx_header->size)) { LOG_ERR("Failed to append RX buffer to context buffer"); net_pkt_unref(pkt); pkt = NULL; goto release_desc; } } #else if (net_pkt_write(pkt, dma_buffer, total_len)) { LOG_ERR("Failed to append RX buffer to context buffer"); net_pkt_unref(pkt); pkt = NULL; goto release_desc; } #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ release_desc: #if defined(CONFIG_ETH_STM32_HAL_API_V2) for (rx_header = (struct eth_stm32_rx_buffer_header *)appbuf; rx_header; rx_header = rx_header->next) { rx_header->used = false; } #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) hal_ret = HAL_ETH_BuildRxDescriptors(heth); if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_BuildRxDescriptors: failed: %d", hal_ret); } #else /* Release descriptors to DMA */ /* Point to first descriptor */ dma_rx_desc = heth->RxFrameInfos.FSRxDesc; /* Set Own bit in Rx descriptors: gives the buffers back to DMA */ for (int i = 0; i < heth->RxFrameInfos.SegCount; i++) { dma_rx_desc->Status |= ETH_DMARXDESC_OWN; dma_rx_desc = (ETH_DMADescTypeDef *) (dma_rx_desc->Buffer2NextDescAddr); } /* Clear Segment_Count */ heth->RxFrameInfos.SegCount = 0; /* When Rx Buffer unavailable flag is set: clear it * and resume reception. */ if ((heth->Instance->DMASR & ETH_DMASR_RBUS) != (uint32_t)RESET) { /* Clear RBUS ETHERNET DMA flag */ heth->Instance->DMASR = ETH_DMASR_RBUS; /* Resume DMA reception */ heth->Instance->DMARPDR = 0; } #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ if (!pkt) { goto out; } #if defined(CONFIG_PTP_CLOCK_STM32_HAL) pkt->timestamp.second = timestamp.second; pkt->timestamp.nanosecond = timestamp.nanosecond; if (timestamp.second != UINT64_MAX) { net_pkt_set_rx_timestamping(pkt, true); } #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ out: if (!pkt) { eth_stats_update_errors_rx(get_iface(dev_data)); } return pkt; } static void rx_thread(void *arg1, void *unused1, void *unused2) { const struct device *dev; struct eth_stm32_hal_dev_data *dev_data; struct net_if *iface; struct net_pkt *pkt; int res; uint32_t status; HAL_StatusTypeDef hal_ret = HAL_OK; __ASSERT_NO_MSG(arg1 != NULL); ARG_UNUSED(unused1); ARG_UNUSED(unused2); dev = (const struct device *)arg1; dev_data = dev->data; __ASSERT_NO_MSG(dev_data != NULL); while (1) { res = k_sem_take(&dev_data->rx_int_sem, K_MSEC(CONFIG_ETH_STM32_CARRIER_CHECK_RX_IDLE_TIMEOUT_MS)); if (res == 0) { /* semaphore taken, update link status and receive packets */ if (dev_data->link_up != true) { dev_data->link_up = true; net_eth_carrier_on(get_iface(dev_data)); } while ((pkt = eth_rx(dev)) != NULL) { iface = net_pkt_iface(pkt); #if defined(CONFIG_NET_DSA) iface = dsa_net_recv(iface, &pkt); #endif res = net_recv_data(iface, pkt); if (res < 0) { eth_stats_update_errors_rx( net_pkt_iface(pkt)); LOG_ERR("Failed to enqueue frame " "into RX queue: %d", res); net_pkt_unref(pkt); } } } else if (res == -EAGAIN) { /* semaphore timeout period expired, check link status */ hal_ret = read_eth_phy_register(&dev_data->heth, PHY_ADDR, PHY_BSR, (uint32_t *) &status); if (hal_ret == HAL_OK) { if ((status & PHY_LINKED_STATUS) == PHY_LINKED_STATUS) { if (dev_data->link_up != true) { dev_data->link_up = true; net_eth_carrier_on( get_iface(dev_data)); } } else { if (dev_data->link_up != false) { dev_data->link_up = false; net_eth_carrier_off( get_iface(dev_data)); } } } } } } static void eth_isr(const struct device *dev) { struct eth_stm32_hal_dev_data *dev_data; ETH_HandleTypeDef *heth; __ASSERT_NO_MSG(dev != NULL); dev_data = dev->data; __ASSERT_NO_MSG(dev_data != NULL); heth = &dev_data->heth; __ASSERT_NO_MSG(heth != NULL); HAL_ETH_IRQHandler(heth); } #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_ETH_STM32_HAL_API_V2) void HAL_ETH_TxCpltCallback(ETH_HandleTypeDef *heth_handle) { __ASSERT_NO_MSG(heth_handle != NULL); struct eth_stm32_hal_dev_data *dev_data = CONTAINER_OF(heth_handle, struct eth_stm32_hal_dev_data, heth); __ASSERT_NO_MSG(dev_data != NULL); k_sem_give(&dev_data->tx_int_sem); } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_ETH_STM32_HAL_API_V2) void HAL_ETH_ErrorCallback(ETH_HandleTypeDef *heth) { /* Do not log errors. If errors are reported due to high traffic, * logging errors will only increase traffic issues */ #if defined(CONFIG_NET_STATISTICS_ETHERNET) __ASSERT_NO_MSG(heth != NULL); uint32_t dma_error; #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) uint32_t mac_error; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ const uint32_t error_code = HAL_ETH_GetError(heth); struct eth_stm32_hal_dev_data *dev_data = CONTAINER_OF(heth, struct eth_stm32_hal_dev_data, heth); switch (error_code) { case HAL_ETH_ERROR_DMA: dma_error = HAL_ETH_GetDMAError(heth); #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) if ((dma_error & ETH_DMA_RX_WATCHDOG_TIMEOUT_FLAG) || (dma_error & ETH_DMA_RX_PROCESS_STOPPED_FLAG) || (dma_error & ETH_DMA_RX_BUFFER_UNAVAILABLE_FLAG)) { eth_stats_update_errors_rx(dev_data->iface); } if ((dma_error & ETH_DMA_EARLY_TX_IT_FLAG) || (dma_error & ETH_DMA_TX_PROCESS_STOPPED_FLAG)) { eth_stats_update_errors_tx(dev_data->iface); } #else if ((dma_error & ETH_DMASR_RWTS) || (dma_error & ETH_DMASR_RPSS) || (dma_error & ETH_DMASR_RBUS)) { eth_stats_update_errors_rx(dev_data->iface); } if ((dma_error & ETH_DMASR_ETS) || (dma_error & ETH_DMASR_TPSS) || (dma_error & ETH_DMASR_TJTS)) { eth_stats_update_errors_tx(dev_data->iface); } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ break; #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) case HAL_ETH_ERROR_MAC: mac_error = HAL_ETH_GetMACError(heth); if (mac_error & ETH_RECEIVE_WATCHDOG_TIMEOUT) { eth_stats_update_errors_rx(dev_data->iface); } if ((mac_error & ETH_EXECESSIVE_COLLISIONS) || (mac_error & ETH_LATE_COLLISIONS) || (mac_error & ETH_EXECESSIVE_DEFERRAL) || (mac_error & ETH_TRANSMIT_JABBR_TIMEOUT) || (mac_error & ETH_LOSS_OF_CARRIER) || (mac_error & ETH_NO_CARRIER)) { eth_stats_update_errors_tx(dev_data->iface); } break; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ } #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) dev_data->stats.error_details.rx_crc_errors = heth->Instance->MMCRCRCEPR; dev_data->stats.error_details.rx_align_errors = heth->Instance->MMCRAEPR; #else dev_data->stats.error_details.rx_crc_errors = heth->Instance->MMCRFCECR; dev_data->stats.error_details.rx_align_errors = heth->Instance->MMCRFAECR; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ #endif /* CONFIG_NET_STATISTICS_ETHERNET */ } #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) /* DMA and MAC errors callback only appear in H7 series */ void HAL_ETH_DMAErrorCallback(ETH_HandleTypeDef *heth_handle) { __ASSERT_NO_MSG(heth_handle != NULL); LOG_ERR("%s errorcode:%x dmaerror:%x", __func__, HAL_ETH_GetError(heth_handle), HAL_ETH_GetDMAError(heth_handle)); /* State of eth handle is ERROR in case of unrecoverable error */ /* unrecoverable (ETH_DMACSR_FBE | ETH_DMACSR_TPS | ETH_DMACSR_RPS) */ if (HAL_ETH_GetState(heth_handle) == HAL_ETH_STATE_ERROR) { LOG_ERR("%s ethernet in error state", __func__); /* TODO restart the ETH peripheral to recover */ return; } /* Recoverable errors don't put ETH in error state */ /* ETH_DMACSR_CDE | ETH_DMACSR_ETI | ETH_DMACSR_RWT */ /* | ETH_DMACSR_RBU | ETH_DMACSR_AIS) */ /* TODO Check if we were TX transmitting and the unlock semaphore */ /* To return the error as soon as possible else we'll just wait */ /* for the timeout */ } void HAL_ETH_MACErrorCallback(ETH_HandleTypeDef *heth_handle) { __ASSERT_NO_MSG(heth_handle != NULL); /* MAC errors dumping */ LOG_ERR("%s errorcode:%x macerror:%x", __func__, HAL_ETH_GetError(heth_handle), HAL_ETH_GetMACError(heth_handle)); /* State of eth handle is ERROR in case of unrecoverable error */ if (HAL_ETH_GetState(heth_handle) == HAL_ETH_STATE_ERROR) { LOG_ERR("%s ethernet in error state", __func__); /* TODO restart or reconfig ETH peripheral to recover */ return; } } #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ void HAL_ETH_RxCpltCallback(ETH_HandleTypeDef *heth_handle) { __ASSERT_NO_MSG(heth_handle != NULL); struct eth_stm32_hal_dev_data *dev_data = CONTAINER_OF(heth_handle, struct eth_stm32_hal_dev_data, heth); __ASSERT_NO_MSG(dev_data != NULL); k_sem_give(&dev_data->rx_int_sem); } static void generate_mac(uint8_t *mac_addr) { #if defined(ETH_STM32_RANDOM_MAC) /* "zephyr,random-mac-address" is set, generate a random mac address */ gen_random_mac(mac_addr, ST_OUI_B0, ST_OUI_B1, ST_OUI_B2); #else /* Use user defined mac address */ mac_addr[0] = ST_OUI_B0; mac_addr[1] = ST_OUI_B1; mac_addr[2] = ST_OUI_B2; #if NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0)) mac_addr[3] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 3); mac_addr[4] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 4); mac_addr[5] = NODE_MAC_ADDR_OCTET(DT_DRV_INST(0), 5); #else uint8_t unique_device_ID_12_bytes[12]; uint32_t result_mac_32_bits; /* Nothing defined by the user, use device id */ hwinfo_get_device_id(unique_device_ID_12_bytes, 12); result_mac_32_bits = crc32_ieee((uint8_t *)unique_device_ID_12_bytes, 12); memcpy(&mac_addr[3], &result_mac_32_bits, 3); #endif /* NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(0))) */ #endif } static int eth_initialize(const struct device *dev) { struct eth_stm32_hal_dev_data *dev_data; const struct eth_stm32_hal_dev_cfg *cfg; ETH_HandleTypeDef *heth; HAL_StatusTypeDef hal_ret = HAL_OK; int ret = 0; __ASSERT_NO_MSG(dev != NULL); dev_data = dev->data; cfg = dev->config; __ASSERT_NO_MSG(dev_data != NULL); __ASSERT_NO_MSG(cfg != NULL); dev_data->clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(dev_data->clock)) { LOG_ERR("clock control device not ready"); return -ENODEV; } /* enable clock */ ret = clock_control_on(dev_data->clock, (clock_control_subsys_t)&cfg->pclken); ret |= clock_control_on(dev_data->clock, (clock_control_subsys_t)&cfg->pclken_tx); ret |= clock_control_on(dev_data->clock, (clock_control_subsys_t)&cfg->pclken_rx); #if DT_INST_CLOCKS_HAS_NAME(0, mac_clk_ptp) ret |= clock_control_on(dev_data->clock, (clock_control_subsys_t)&cfg->pclken_ptp); #endif if (ret) { LOG_ERR("Failed to enable ethernet clock"); return -EIO; } /* configure pinmux */ ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("Could not configure ethernet pins"); return ret; } heth = &dev_data->heth; generate_mac(dev_data->mac_addr); heth->Init.MACAddr = dev_data->mac_addr; #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_ETH_STM32_HAL_API_V2) heth->Init.TxDesc = dma_tx_desc_tab; heth->Init.RxDesc = dma_rx_desc_tab; heth->Init.RxBuffLen = ETH_STM32_RX_BUF_SIZE; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */ hal_ret = HAL_ETH_Init(heth); if (hal_ret == HAL_TIMEOUT) { /* HAL Init time out. This could be linked to */ /* a recoverable error. Log the issue and continue */ /* driver initialisation */ LOG_ERR("HAL_ETH_Init Timed out"); } else if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_Init failed: %d", hal_ret); return -EINVAL; } #if defined(CONFIG_PTP_CLOCK_STM32_HAL) /* Enable timestamping of RX packets. We enable all packets to be * timestamped to cover both IEEE 1588 and gPTP. */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACTSCR |= ETH_MACTSCR_TSENALL; #else heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSARFE; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_ETH_STM32_HAL_API_V2) /* Tx config init: */ memset(&tx_config, 0, sizeof(ETH_TxPacketConfig)); tx_config.Attributes = ETH_TX_PACKETS_FEATURES_CSUM | ETH_TX_PACKETS_FEATURES_CRCPAD; tx_config.ChecksumCtrl = IS_ENABLED(CONFIG_ETH_STM32_HW_CHECKSUM) ? ETH_CHECKSUM_IPHDR_PAYLOAD_INSERT_PHDR_CALC : ETH_CHECKSUM_DISABLE; tx_config.CRCPadCtrl = ETH_CRC_PAD_INSERT; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */ dev_data->link_up = false; /* Initialize semaphores */ k_mutex_init(&dev_data->tx_mutex); k_sem_init(&dev_data->rx_int_sem, 0, K_SEM_MAX_LIMIT); #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_ETH_STM32_HAL_API_V2) k_sem_init(&dev_data->tx_int_sem, 0, K_SEM_MAX_LIMIT); #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) || \ defined(CONFIG_ETH_STM32_HAL_API_V2) /* Adjust MDC clock range depending on HCLK frequency: */ HAL_ETH_SetMDIOClockRange(heth); /* @TODO: read duplex mode and speed from PHY and set it to ETH */ ETH_MACConfigTypeDef mac_config; HAL_ETH_GetMACConfig(heth, &mac_config); mac_config.DuplexMode = IS_ENABLED(CONFIG_ETH_STM32_MODE_HALFDUPLEX) ? ETH_HALFDUPLEX_MODE : ETH_FULLDUPLEX_MODE; mac_config.Speed = IS_ENABLED(CONFIG_ETH_STM32_SPEED_10M) ? ETH_SPEED_10M : ETH_SPEED_100M; hal_ret = HAL_ETH_SetMACConfig(heth, &mac_config); if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_SetMACConfig: failed: %d", hal_ret); } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X || CONFIG_ETH_STM32_HAL_API_V2 */ #if defined(CONFIG_ETH_STM32_HAL_API_V2) /* prepare tx buffer header */ for (uint16_t i = 0; i < ETH_TXBUFNB; ++i) { dma_tx_buffer_header[i].tx_buff.buffer = dma_tx_buffer[i]; } hal_ret = HAL_ETH_Start_IT(heth); #elif defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) for (uint32_t i = 0; i < ETH_RX_DESC_CNT; i++) { hal_ret = HAL_ETH_DescAssignMemory(heth, i, dma_rx_buffer[i], NULL); if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_DescAssignMemory: failed: %d, i: %d", hal_ret, i); return -EINVAL; } } hal_ret = HAL_ETH_Start_IT(heth); #else HAL_ETH_DMATxDescListInit(heth, dma_tx_desc_tab, &dma_tx_buffer[0][0], ETH_TXBUFNB); HAL_ETH_DMARxDescListInit(heth, dma_rx_desc_tab, &dma_rx_buffer[0][0], ETH_RXBUFNB); hal_ret = HAL_ETH_Start(heth); #endif /* CONFIG_ETH_STM32_HAL_API_V2 */ if (hal_ret != HAL_OK) { LOG_ERR("HAL_ETH_Start{_IT} failed"); } setup_mac_filter(heth); LOG_DBG("MAC %02x:%02x:%02x:%02x:%02x:%02x", dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2], dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]); return 0; } #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER) static void eth_stm32_mcast_filter(const struct device *dev, const struct ethernet_filter *filter) { struct eth_stm32_hal_dev_data *dev_data = (struct eth_stm32_hal_dev_data *)dev->data; ETH_HandleTypeDef *heth; uint32_t crc; uint32_t hash_table[2]; uint32_t hash_index; heth = &dev_data->heth; crc = __RBIT(crc32_ieee(filter->mac_address.addr, sizeof(struct net_eth_addr))); hash_index = (crc >> 26) & 0x3f; __ASSERT_NO_MSG(hash_index < ARRAY_SIZE(dev_data->hash_index_cnt)); #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) hash_table[0] = heth->Instance->MACHT0R; hash_table[1] = heth->Instance->MACHT1R; #else hash_table[0] = heth->Instance->MACHTLR; hash_table[1] = heth->Instance->MACHTHR; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ if (filter->set) { dev_data->hash_index_cnt[hash_index]++; hash_table[hash_index / 32] |= (1 << (hash_index % 32)); } else { if (dev_data->hash_index_cnt[hash_index] == 0) { __ASSERT_NO_MSG(false); return; } dev_data->hash_index_cnt[hash_index]--; if (dev_data->hash_index_cnt[hash_index] == 0) { hash_table[hash_index / 32] &= ~(1 << (hash_index % 32)); } } #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACHT0R = hash_table[0]; heth->Instance->MACHT1R = hash_table[1]; #else heth->Instance->MACHTLR = hash_table[0]; heth->Instance->MACHTHR = hash_table[1]; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ } #endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */ static void eth_iface_init(struct net_if *iface) { const struct device *dev; struct eth_stm32_hal_dev_data *dev_data; bool is_first_init = false; __ASSERT_NO_MSG(iface != NULL); dev = net_if_get_device(iface); __ASSERT_NO_MSG(dev != NULL); dev_data = dev->data; __ASSERT_NO_MSG(dev_data != NULL); if (dev_data->iface == NULL) { dev_data->iface = iface; is_first_init = true; } /* Register Ethernet MAC Address with the upper layer */ net_if_set_link_addr(iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); #if defined(CONFIG_NET_DSA) dsa_register_master_tx(iface, &eth_tx); #endif ethernet_init(iface); net_if_carrier_off(iface); net_lldp_set_lldpdu(iface); if (is_first_init) { const struct eth_stm32_hal_dev_cfg *cfg = dev->config; /* Now that the iface is setup, we are safe to enable IRQs. */ __ASSERT_NO_MSG(cfg->config_func != NULL); cfg->config_func(); /* Start interruption-poll thread */ k_thread_create(&dev_data->rx_thread, dev_data->rx_thread_stack, K_KERNEL_STACK_SIZEOF(dev_data->rx_thread_stack), rx_thread, (void *) dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_STM32_HAL_RX_THREAD_PRIO), 0, K_NO_WAIT); k_thread_name_set(&dev_data->rx_thread, "stm_eth"); } } static enum ethernet_hw_caps eth_stm32_hal_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T #if defined(CONFIG_NET_VLAN) | ETHERNET_HW_VLAN #endif #if defined(CONFIG_NET_PROMISCUOUS_MODE) | ETHERNET_PROMISC_MODE #endif #if defined(CONFIG_PTP_CLOCK_STM32_HAL) | ETHERNET_PTP #endif #if defined(CONFIG_NET_LLDP) | ETHERNET_LLDP #endif #if defined(CONFIG_ETH_STM32_HW_CHECKSUM) | ETHERNET_HW_RX_CHKSUM_OFFLOAD | ETHERNET_HW_TX_CHKSUM_OFFLOAD #endif #if defined(CONFIG_NET_DSA) | ETHERNET_DSA_MASTER_PORT #endif #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER) | ETHERNET_HW_FILTERING #endif ; } static int eth_stm32_hal_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { int ret = -ENOTSUP; struct eth_stm32_hal_dev_data *dev_data; ETH_HandleTypeDef *heth; dev_data = dev->data; heth = &dev_data->heth; switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(dev_data->mac_addr, config->mac_address.addr, 6); heth->Instance->MACA0HR = (dev_data->mac_addr[5] << 8) | dev_data->mac_addr[4]; heth->Instance->MACA0LR = (dev_data->mac_addr[3] << 24) | (dev_data->mac_addr[2] << 16) | (dev_data->mac_addr[1] << 8) | dev_data->mac_addr[0]; net_if_set_link_addr(dev_data->iface, dev_data->mac_addr, sizeof(dev_data->mac_addr), NET_LINK_ETHERNET); ret = 0; break; case ETHERNET_CONFIG_TYPE_PROMISC_MODE: #if defined(CONFIG_NET_PROMISCUOUS_MODE) #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) if (config->promisc_mode) { heth->Instance->MACPFR |= ETH_MACPFR_PR; } else { heth->Instance->MACPFR &= ~ETH_MACPFR_PR; } #else if (config->promisc_mode) { heth->Instance->MACFFR |= ETH_MACFFR_PM; } else { heth->Instance->MACFFR &= ~ETH_MACFFR_PM; } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ ret = 0; #endif /* CONFIG_NET_PROMISCUOUS_MODE */ break; #if defined(CONFIG_ETH_STM32_MULTICAST_FILTER) case ETHERNET_CONFIG_TYPE_FILTER: eth_stm32_mcast_filter(dev, &config->filter); break; #endif /* CONFIG_ETH_STM32_MULTICAST_FILTER */ default: break; } return ret; } #if defined(CONFIG_PTP_CLOCK_STM32_HAL) static const struct device *eth_stm32_get_ptp_clock(const struct device *dev) { struct eth_stm32_hal_dev_data *dev_data = dev->data; return dev_data->ptp_clock; } #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ #if defined(CONFIG_NET_STATISTICS_ETHERNET) static struct net_stats_eth *eth_stm32_hal_get_stats(const struct device *dev) { struct eth_stm32_hal_dev_data *dev_data = dev->data; return &dev_data->stats; } #endif /* CONFIG_NET_STATISTICS_ETHERNET */ static const struct ethernet_api eth_api = { .iface_api.init = eth_iface_init, #if defined(CONFIG_PTP_CLOCK_STM32_HAL) .get_ptp_clock = eth_stm32_get_ptp_clock, #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ .get_capabilities = eth_stm32_hal_get_capabilities, .set_config = eth_stm32_hal_set_config, #if defined(CONFIG_NET_DSA) .send = dsa_tx, #else .send = eth_tx, #endif #if defined(CONFIG_NET_STATISTICS_ETHERNET) .get_stats = eth_stm32_hal_get_stats, #endif /* CONFIG_NET_STATISTICS_ETHERNET */ }; static void eth0_irq_config(void) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); } PINCTRL_DT_INST_DEFINE(0); static const struct eth_stm32_hal_dev_cfg eth0_config = { .config_func = eth0_irq_config, .pclken = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, stmmaceth, bus), .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, stmmaceth, bits)}, .pclken_tx = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_tx, bus), .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_tx, bits)}, .pclken_rx = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_rx, bus), .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_rx, bits)}, #if DT_INST_CLOCKS_HAS_NAME(0, mac_clk_ptp) .pclken_ptp = {.bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_ptp, bus), .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_ptp, bits)}, #endif .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0), }; static struct eth_stm32_hal_dev_data eth0_data = { .heth = { .Instance = (ETH_TypeDef *)DT_INST_REG_ADDR(0), .Init = { #if !defined(CONFIG_SOC_SERIES_STM32H7X) && !defined(CONFIG_SOC_SERIES_STM32H5X) && \ !defined(CONFIG_ETH_STM32_HAL_API_V2) #if defined(CONFIG_ETH_STM32_AUTO_NEGOTIATION_ENABLE) .AutoNegotiation = ETH_AUTONEGOTIATION_ENABLE, #else .AutoNegotiation = ETH_AUTONEGOTIATION_DISABLE, .Speed = IS_ENABLED(CONFIG_ETH_STM32_SPEED_10M) ? ETH_SPEED_10M : ETH_SPEED_100M, .DuplexMode = IS_ENABLED(CONFIG_ETH_STM32_MODE_HALFDUPLEX) ? ETH_MODE_HALFDUPLEX : ETH_MODE_FULLDUPLEX, #endif /* !CONFIG_ETH_STM32_AUTO_NEGOTIATION_ENABLE */ .PhyAddress = PHY_ADDR, .RxMode = ETH_RXINTERRUPT_MODE, .ChecksumMode = IS_ENABLED(CONFIG_ETH_STM32_HW_CHECKSUM) ? ETH_CHECKSUM_BY_HARDWARE : ETH_CHECKSUM_BY_SOFTWARE, #endif /* !CONFIG_SOC_SERIES_STM32H7X */ .MediaInterface = IS_ENABLED(CONFIG_ETH_STM32_HAL_MII) ? ETH_MEDIA_INTERFACE_MII : ETH_MEDIA_INTERFACE_RMII, }, }, }; ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_initialize, NULL, &eth0_data, &eth0_config, CONFIG_ETH_INIT_PRIORITY, &eth_api, ETH_STM32_HAL_MTU); #if defined(CONFIG_PTP_CLOCK_STM32_HAL) struct ptp_context { struct eth_stm32_hal_dev_data *eth_dev_data; }; static struct ptp_context ptp_stm32_0_context; static int ptp_clock_stm32_set(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data; ETH_HandleTypeDef *heth = &eth_dev_data->heth; unsigned int key; key = irq_lock(); #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACSTSUR = tm->second; heth->Instance->MACSTNUR = tm->nanosecond; heth->Instance->MACTSCR |= ETH_MACTSCR_TSINIT; while (heth->Instance->MACTSCR & ETH_MACTSCR_TSINIT_Msk) { /* spin lock */ } #else heth->Instance->PTPTSHUR = tm->second; heth->Instance->PTPTSLUR = tm->nanosecond; heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTI; while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTI_Msk) { /* spin lock */ } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ irq_unlock(key); return 0; } static int ptp_clock_stm32_get(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data; ETH_HandleTypeDef *heth = &eth_dev_data->heth; unsigned int key; uint32_t second_2; key = irq_lock(); #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) tm->second = heth->Instance->MACSTSR; tm->nanosecond = heth->Instance->MACSTNR; second_2 = heth->Instance->MACSTSR; #else tm->second = heth->Instance->PTPTSHR; tm->nanosecond = heth->Instance->PTPTSLR; second_2 = heth->Instance->PTPTSHR; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ irq_unlock(key); if (tm->second != second_2 && tm->nanosecond < NSEC_PER_SEC / 2) { /* Second rollover has happened during first measurement: second register * was read before second boundary and nanosecond register was read after. * We will use second_2 as a new second value. */ tm->second = second_2; } return 0; } static int ptp_clock_stm32_adjust(const struct device *dev, int increment) { struct ptp_context *ptp_context = dev->data; struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data; ETH_HandleTypeDef *heth = &eth_dev_data->heth; int key, ret; if ((increment <= (int32_t)(-NSEC_PER_SEC)) || (increment >= (int32_t)NSEC_PER_SEC)) { ret = -EINVAL; } else { key = irq_lock(); #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACSTSUR = 0; if (increment >= 0) { heth->Instance->MACSTNUR = increment; } else { heth->Instance->MACSTNUR = ETH_MACSTNUR_ADDSUB | (NSEC_PER_SEC + increment); } heth->Instance->MACTSCR |= ETH_MACTSCR_TSUPDT; while (heth->Instance->MACTSCR & ETH_MACTSCR_TSUPDT_Msk) { /* spin lock */ } #else heth->Instance->PTPTSHUR = 0; if (increment >= 0) { heth->Instance->PTPTSLUR = increment; } else { heth->Instance->PTPTSLUR = ETH_PTPTSLUR_TSUPNS | (-increment); } heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTU; while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTU_Msk) { /* spin lock */ } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ ret = 0; irq_unlock(key); } return ret; } static int ptp_clock_stm32_rate_adjust(const struct device *dev, double ratio) { struct ptp_context *ptp_context = dev->data; struct eth_stm32_hal_dev_data *eth_dev_data = ptp_context->eth_dev_data; ETH_HandleTypeDef *heth = &eth_dev_data->heth; int key, ret; uint32_t addend_val; /* No change needed */ if (ratio == 1.0L) { return 0; } key = irq_lock(); ratio *= (double)eth_dev_data->clk_ratio_adj; /* Limit possible ratio */ if (ratio * 100 < CONFIG_ETH_STM32_HAL_PTP_CLOCK_ADJ_MIN_PCT || ratio * 100 > CONFIG_ETH_STM32_HAL_PTP_CLOCK_ADJ_MAX_PCT) { ret = -EINVAL; goto error; } /* Save new ratio */ eth_dev_data->clk_ratio_adj = ratio; /* Update addend register */ addend_val = UINT32_MAX * (double)eth_dev_data->clk_ratio * ratio; #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACTSAR = addend_val; heth->Instance->MACTSCR |= ETH_MACTSCR_TSADDREG; while (heth->Instance->MACTSCR & ETH_MACTSCR_TSADDREG_Msk) { /* spin lock */ } #else heth->Instance->PTPTSAR = addend_val; heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSARU; while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSARU_Msk) { /* spin lock */ } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ ret = 0; error: irq_unlock(key); return ret; } static const struct ptp_clock_driver_api api = { .set = ptp_clock_stm32_set, .get = ptp_clock_stm32_get, .adjust = ptp_clock_stm32_adjust, .rate_adjust = ptp_clock_stm32_rate_adjust, }; static int ptp_stm32_init(const struct device *port) { const struct device *const dev = DEVICE_DT_GET(DT_NODELABEL(mac)); struct eth_stm32_hal_dev_data *eth_dev_data = dev->data; const struct eth_stm32_hal_dev_cfg *eth_cfg = dev->config; struct ptp_context *ptp_context = port->data; ETH_HandleTypeDef *heth = &eth_dev_data->heth; int ret; uint32_t ptp_hclk_rate; uint32_t ss_incr_ns; uint32_t addend_val; eth_dev_data->ptp_clock = port; ptp_context->eth_dev_data = eth_dev_data; /* Mask the Timestamp Trigger interrupt */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACIER &= ~(ETH_MACIER_TSIE); #else heth->Instance->MACIMR &= ~(ETH_MACIMR_TSTIM); #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ /* Enable timestamping */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACTSCR |= ETH_MACTSCR_TSENA; #else heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSE; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ /* Query ethernet clock rate */ ret = clock_control_get_rate(eth_dev_data->clock, #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) (clock_control_subsys_t)&eth_cfg->pclken, #else (clock_control_subsys_t)&eth_cfg->pclken_ptp, #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ &ptp_hclk_rate); if (ret) { LOG_ERR("Failed to query ethernet clock"); return -EIO; } /* Program the subsecond increment register based on the PTP clock freq */ if (NSEC_PER_SEC % CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ != 0) { LOG_ERR("PTP clock period must be an integer nanosecond value"); return -EINVAL; } ss_incr_ns = NSEC_PER_SEC / CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ; if (ss_incr_ns > UINT8_MAX) { LOG_ERR("PTP clock period is more than %d nanoseconds", UINT8_MAX); return -EINVAL; } #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACSSIR = ss_incr_ns << ETH_MACMACSSIR_SSINC_Pos; #else heth->Instance->PTPSSIR = ss_incr_ns; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ /* Program timestamp addend register */ eth_dev_data->clk_ratio = ((double)CONFIG_ETH_STM32_HAL_PTP_CLOCK_SRC_HZ) / ((double)ptp_hclk_rate); /* * clk_ratio is a ratio between desired PTP clock frequency and HCLK rate. * Because HCLK is defined by a physical oscillator, it might drift due * to manufacturing tolerances and environmental effects (e.g. temperature). * clk_ratio_adj compensates for such inaccuracies. It starts off as 1.0 * and gets adjusted by calling ptp_clock_stm32_rate_adjust(). */ eth_dev_data->clk_ratio_adj = 1.0f; addend_val = UINT32_MAX * eth_dev_data->clk_ratio * eth_dev_data->clk_ratio_adj; #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACTSAR = addend_val; heth->Instance->MACTSCR |= ETH_MACTSCR_TSADDREG; while (heth->Instance->MACTSCR & ETH_MACTSCR_TSADDREG_Msk) { k_yield(); } #else heth->Instance->PTPTSAR = addend_val; heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSARU; while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSARU_Msk) { k_yield(); } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ /* Enable fine timestamp correction method */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACTSCR |= ETH_MACTSCR_TSCFUPDT; #else heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSFCU; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ /* Enable nanosecond rollover into a new second */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACTSCR |= ETH_MACTSCR_TSCTRLSSR; #else heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSSR; #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ /* Initialize timestamp */ #if defined(CONFIG_SOC_SERIES_STM32H7X) || defined(CONFIG_SOC_SERIES_STM32H5X) heth->Instance->MACSTSUR = 0; heth->Instance->MACSTNUR = 0; heth->Instance->MACTSCR |= ETH_MACTSCR_TSINIT; while (heth->Instance->MACTSCR & ETH_MACTSCR_TSINIT_Msk) { k_yield(); } #else heth->Instance->PTPTSHUR = 0; heth->Instance->PTPTSLUR = 0; heth->Instance->PTPTSCR |= ETH_PTPTSCR_TSSTI; while (heth->Instance->PTPTSCR & ETH_PTPTSCR_TSSTI_Msk) { k_yield(); } #endif /* CONFIG_SOC_SERIES_STM32H7X || CONFIG_SOC_SERIES_STM32H5X */ #if defined(CONFIG_ETH_STM32_HAL_API_V2) /* Set PTP Configuration done */ heth->IsPtpConfigured = ETH_STM32_PTP_CONFIGURED; #endif return 0; } DEVICE_DEFINE(stm32_ptp_clock_0, PTP_CLOCK_NAME, ptp_stm32_init, NULL, &ptp_stm32_0_context, NULL, POST_KERNEL, CONFIG_ETH_STM32_HAL_PTP_CLOCK_INIT_PRIO, &api); #endif /* CONFIG_PTP_CLOCK_STM32_HAL */ ```
/content/code_sandbox/drivers/ethernet/eth_stm32_hal.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
16,799
```objective-c /* mbed Microcontroller Library * * * path_to_url * * Unless required by applicable law or agreed to in writing, software * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /* * This header is originally based on mbedOS header * targets/TARGET_ARM_SSG/TARGET_CM3DS_MPS2/device/drivers/smsc9220_eth.h, * but was considerably refactored since then. */ /* This file is the re-implementation of mps2_ethernet_api and Selftest's * ETH_MPS2. * MPS2 Selftest:path_to_url -> * \ISCM-1-0\AN491\software\Selftest\v2m_mps2\ */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_SMSC911X_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_SMSC911X_PRIV_H_ #ifndef __I #define __I #endif #ifndef __O #define __O #endif #ifndef __IO #define __IO #endif #define GET_BITFIELD(val, lsb, msb) \ (((val) >> (lsb)) & ((1 << ((msb) - (lsb) + 1)) - 1)) #define BFIELD(val, name) GET_BITFIELD(val, name ## _Lsb, name ## _Msb) #define SMSC9220_BFIELD(reg, bfield) BFIELD(SMSC9220->reg, reg ## _ ## bfield) /******************************************************************************/ /* SMSC9220 Register Definitions */ /******************************************************************************/ typedef struct { /* Receive FIFO Ports (offset 0x0) */ __I uint32_t RX_DATA_PORT; uint32_t RESERVED1[0x7]; /* Transmit FIFO Ports (offset 0x20) */ __O uint32_t TX_DATA_PORT; uint32_t RESERVED2[0x7]; /* Receive FIFO status port (offset 0x40) */ __I uint32_t RX_STAT_PORT; /* Receive FIFO status peek (offset 0x44) */ __I uint32_t RX_STAT_PEEK; /* Transmit FIFO status port (offset 0x48) */ __I uint32_t TX_STAT_PORT; /* Transmit FIFO status peek (offset 0x4C) */ __I uint32_t TX_STAT_PEEK; /* Chip ID and Revision (offset 0x50) */ __I uint32_t ID_REV; /* Main Interrupt Configuration (offset 0x54) */ __IO uint32_t IRQ_CFG; /* Interrupt Status (offset 0x58) */ __IO uint32_t INT_STS; /* Interrupt Enable Register (offset 0x5C) */ __IO uint32_t INT_EN; /* Reserved for future use (offset 0x60) */ uint32_t RESERVED3; /* Read-only byte order testing register 87654321h (offset 0x64) */ __I uint32_t BYTE_TEST; /* FIFO Level Interrupts (offset 0x68) */ __IO uint32_t FIFO_INT; /* Receive Configuration (offset 0x6C) */ __IO uint32_t RX_CFG; /* Transmit Configuration (offset 0x70) */ __IO uint32_t TX_CFG; /* Hardware Configuration (offset 0x74) */ __IO uint32_t HW_CFG; /* RX Datapath Control (offset 0x78) */ __IO uint32_t RX_DP_CTRL; /* Receive FIFO Information (offset 0x7C) */ __I uint32_t RX_FIFO_INF; /* Transmit FIFO Information (offset 0x80) */ __I uint32_t TX_FIFO_INF; /* Power Management Control (offset 0x84) */ __IO uint32_t PMT_CTRL; /* General Purpose IO Configuration (offset 0x88) */ __IO uint32_t GPIO_CFG; /* General Purpose Timer Configuration (offset 0x8C) */ __IO uint32_t GPT_CFG; /* General Purpose Timer Count (offset 0x90) */ __I uint32_t GPT_CNT; /* Reserved for future use (offset 0x94) */ uint32_t RESERVED4; /* WORD SWAP Register (offset 0x98) */ __IO uint32_t ENDIAN; /* Free Run Counter (offset 0x9C) */ __I uint32_t FREE_RUN; /* RX Dropped Frames Counter (offset 0xA0) */ __I uint32_t RX_DROP; /* MAC CSR Synchronizer Command (offset 0xA4) */ __IO uint32_t MAC_CSR_CMD; /* MAC CSR Synchronizer Data (offset 0xA8) */ __IO uint32_t MAC_CSR_DATA; /* Automatic Flow Control Configuration (offset 0xAC) */ __IO uint32_t AFC_CFG; /* EEPROM Command (offset 0xB0) */ __IO uint32_t E2P_CMD; /* EEPROM Data (offset 0xB4) */ __IO uint32_t E2P_DATA; } SMSC9220_TypeDef; #define HW_CFG_SRST BIT(0) #define RX_STAT_PORT_PKT_LEN_Lsb 16 #define RX_STAT_PORT_PKT_LEN_Msb 29 #define PMT_CTRL_READY BIT(0) #define RX_DP_CTRL_RX_FFWD BIT(31) #define RX_FIFO_INF_RXSUSED_Lsb 16 #define RX_FIFO_INF_RXSUSED_Msb 23 #define RX_FIFO_INF_RXDUSED_Lsb 0 #define RX_FIFO_INF_RXDUSED_Msb 15 #define MAC_CSR_CMD_BUSY BIT(31) #define MAC_CSR_CMD_READ BIT(30) #define MAC_CSR_CMD_WRITE 0 /* SMSC9220 MAC Registers Indices */ #define SMSC9220_MAC_CR 0x1 #define SMSC9220_MAC_ADDRH 0x2 #define SMSC9220_MAC_ADDRL 0x3 #define SMSC9220_MAC_HASHH 0x4 #define SMSC9220_MAC_HASHL 0x5 #define SMSC9220_MAC_MII_ACC 0x6 #define SMSC9220_MAC_MII_DATA 0x7 #define SMSC9220_MAC_FLOW 0x8 #define SMSC9220_MAC_VLAN1 0x9 #define SMSC9220_MAC_VLAN2 0xA #define SMSC9220_MAC_WUFF 0xB #define SMSC9220_MAC_WUCSR 0xC #define MAC_MII_ACC_MIIBZY BIT(0) #define MAC_MII_ACC_WRITE BIT(1) #define MAC_MII_ACC_READ 0 /* SMSC9220 PHY Registers Indices */ #define SMSC9220_PHY_BCONTROL 0 #define SMSC9220_PHY_BSTATUS 1 #define SMSC9220_PHY_ID1 2 #define SMSC9220_PHY_ID2 3 #define SMSC9220_PHY_ANEG_ADV 4 #define SMSC9220_PHY_ANEG_LPA 5 #define SMSC9220_PHY_ANEG_EXP 6 #define SMSC9220_PHY_MCONTROL 17 #define SMSC9220_PHY_MSTATUS 18 #define SMSC9220_PHY_CSINDICATE 27 #define SMSC9220_PHY_INTSRC 29 #define SMSC9220_PHY_INTMASK 30 #define SMSC9220_PHY_CS 31 #ifndef SMSC9220_BASE #define SMSC9220_BASE DT_INST_REG_ADDR(0) #endif #define SMSC9220 ((volatile SMSC9220_TypeDef *)SMSC9220_BASE) enum smsc9220_interrupt_source { SMSC9220_INTERRUPT_GPIO0 = 0, SMSC9220_INTERRUPT_GPIO1 = 1, SMSC9220_INTERRUPT_GPIO2 = 2, SMSC9220_INTERRUPT_RXSTATUS_FIFO_LEVEL = 3, SMSC9220_INTERRUPT_RXSTATUS_FIFO_FULL = 4, /* 5 Reserved according to Datasheet */ SMSC9220_INTERRUPT_RX_DROPPED_FRAME = 6, SMSC9220_INTERRUPT_TXSTATUS_FIFO_LEVEL = 7, SMSC9220_INTERRUPT_TXSTATUS_FIFO_FULL = 8, SMSC9220_INTERRUPT_TXDATA_FIFO_AVAILABLE = 9, SMSC9220_INTERRUPT_TXDATA_FIFO_OVERRUN = 10, /* 11, 12 Reserved according to Datasheet */ SMSC9220_INTERRUPT_TRANSMIT_ERROR = 13, SMSC9220_INTERRUPT_RECEIVE_ERROR = 14, SMSC9220_INTERRUPT_RECEIVE_WATCHDOG_TIMEOUT = 15, SMSC9220_INTERRUPT_TXSTATUS_OVERFLOW = 16, SMSC9220_INTERRUPT_POWER_MANAGEMENT = 17, SMSC9220_INTERRUPT_PHY = 18, SMSC9220_INTERRUPT_GP_TIMER = 19, SMSC9220_INTERRUPT_RX_DMA = 20, SMSC9220_INTERRUPT_TX_IOC = 21, /* 22 Reserved according to Datasheet*/ SMSC9220_INTERRUPT_RX_DROPPED_FRAME_HALF = 23, SMSC9220_INTERRUPT_RX_STOPPED = 24, SMSC9220_INTERRUPT_TX_STOPPED = 25, /* 26 - 30 Reserved according to Datasheet*/ SMSC9220_INTERRUPT_SW = 31 }; #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_SMSC911X_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_smsc911x_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,023
```unknown # Synopsys DesignWare MAC configuration options menuconfig ETH_DWMAC bool "Synopsys DesignWare MAC driver" default y depends on NET_BUF_FIXED_DATA_SIZE depends on (SOC_SERIES_STM32H7X && !ETH_STM32_HAL) || MMU depends on DT_HAS_SNPS_DESIGNWARE_ETHERNET_ENABLED help This is a driver for the Synopsys DesignWare MAC, also referred to as "DesignWare Cores Ethernet Quality-of-Service". Hardware versions 4.x and 5.x are supported. Platform specific glue support is also required. This driver is currently available on targets using the STM32H7X series as an alternative to the eth_stm32_hal driver. Support for a generic DT and MMU based platform is also provided. Not yet implemented: - MDIO (currently relying on default PHY config) - PTP support - VLAN support - various hardware offloads (when available) if ETH_DWMAC config ETH_DWMAC_STM32H7X bool depends on SOC_SERIES_STM32H7X select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT default y config ETH_DWMAC_MMU bool depends on MMU default y config DWMAC_NB_TX_DESCS int "Number of entries in the transmit descriptor ring" default 16 range 4 128 help A higher number allows for more packets to be queued which may improve throughput, but that requires more transient memory. However there must be enough descriptors to hold all fragments of a full-size packet to be transmitted or the packet will be dropped. Fragment size is influenced by CONFIG_NET_BUF_DATA_SIZE. config DWMAC_NB_RX_DESCS int "Number of entries in the receive descriptor ring" default 16 range 4 128 help Received packets are spread across the required number of fragment buffers. Each RX fragment has a size of CONFIG_NET_BUF_DATA_SIZE. There is one descriptor entry per fragment. A higher number allows for more packets to be received without immediate intervention from the CPU but requires more transient memory. A smaller number increases the risk of an overflow and dropped packets. endif # ETH_DWMAC ```
/content/code_sandbox/drivers/ethernet/Kconfig.dwmac
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
506
```c /* */ #include <zephyr/net/ethernet.h> #include <zephyr/net/phy.h> #include <zephyr/arch/cpu.h> #include <zephyr/sys_clock.h> #include <zephyr/drivers/mdio.h> #include <zephyr/logging/log.h> #include "eth_smsc91x_priv.h" #define DT_DRV_COMPAT smsc_lan91c111 LOG_MODULE_REGISTER(eth_smsc91x, CONFIG_ETHERNET_LOG_LEVEL); #define SMSC_LOCK(sc) k_mutex_lock(&(sc)->lock, K_FOREVER) #define SMSC_UNLOCK(sc) k_mutex_unlock(&(sc)->lock) #define HW_CYCLE_PER_US (sys_clock_hw_cycles_per_sec() / 1000000UL) #define TX_ALLOC_WAIT_TIME 100 #define MAX_IRQ_LOOPS 8 /* * MII */ #define MDO MGMT_MDO #define MDI MGMT_MDI #define MDC MGMT_MCLK #define MDIRPHY MGMT_MDOE #define MDIRHOST 0 #define MII_IDLE_DETECT_CYCLES 32 #define MII_COMMAND_START 0x01 #define MII_COMMAND_READ 0x02 #define MII_COMMAND_WRITE 0x01 #define MII_COMMAND_ACK 0x02 static const char *smsc_chip_ids[16] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 9 */ "SMSC LAN91C11", NULL, NULL, NULL, NULL, NULL, NULL, }; struct smsc_data { mm_reg_t smsc_reg; unsigned int irq; unsigned int smsc_chip; unsigned int smsc_rev; unsigned int smsc_mask; uint8_t mac[6]; struct k_mutex lock; struct k_work isr_work; }; struct eth_config { DEVICE_MMIO_ROM; const struct device *phy_dev; }; struct eth_context { DEVICE_MMIO_RAM; struct net_if *iface; struct smsc_data sc; }; static uint8_t tx_buffer[NET_ETH_MAX_FRAME_SIZE]; static uint8_t rx_buffer[NET_ETH_MAX_FRAME_SIZE]; static ALWAYS_INLINE void delay(int us) { k_busy_wait(us); } static ALWAYS_INLINE void smsc_select_bank(struct smsc_data *sc, uint16_t bank) { sys_write16(bank & BSR_BANK_MASK, sc->smsc_reg + BSR); } static ALWAYS_INLINE unsigned int smsc_current_bank(struct smsc_data *sc) { return FIELD_GET(BSR_BANK_MASK, sys_read16(sc->smsc_reg + BSR)); } static void smsc_mmu_wait(struct smsc_data *sc) { __ASSERT((smsc_current_bank(sc) == 2), "%s called when not in bank 2", __func__); while (sys_read16(sc->smsc_reg + MMUCR) & MMUCR_BUSY) { ; } } static ALWAYS_INLINE uint8_t smsc_read_1(struct smsc_data *sc, int offset) { return sys_read8(sc->smsc_reg + offset); } static ALWAYS_INLINE uint16_t smsc_read_2(struct smsc_data *sc, int offset) { return sys_read16(sc->smsc_reg + offset); } static ALWAYS_INLINE void smsc_read_multi_2(struct smsc_data *sc, int offset, uint16_t *datap, uint16_t count) { while (count--) { *datap++ = sys_read16(sc->smsc_reg + offset); } } static ALWAYS_INLINE void smsc_write_1(struct smsc_data *sc, int offset, uint8_t val) { sys_write8(val, sc->smsc_reg + offset); } static ALWAYS_INLINE void smsc_write_2(struct smsc_data *sc, int offset, uint16_t val) { sys_write16(val, sc->smsc_reg + offset); } static ALWAYS_INLINE void smsc_write_multi_2(struct smsc_data *sc, int offset, uint16_t *datap, uint16_t count) { while (count--) { sys_write16(*datap++, sc->smsc_reg + offset); } } static uint32_t smsc_mii_bitbang_read(struct smsc_data *sc) { uint16_t val; __ASSERT(FIELD_GET(BSR_BANK_MASK, smsc_read_2(sc, BSR)) == 3, "%s called with bank %d (!=3)", __func__, FIELD_GET(BSR_BANK_MASK, smsc_read_2(sc, BSR))); val = smsc_read_2(sc, MGMT); delay(1); /* Simulate a timing sequence */ return val; } static void smsc_mii_bitbang_write(struct smsc_data *sc, uint16_t val) { __ASSERT(FIELD_GET(BSR_BANK_MASK, smsc_read_2(sc, BSR)) == 3, "%s called with bank %d (!=3)", __func__, FIELD_GET(BSR_BANK_MASK, smsc_read_2(sc, BSR))); smsc_write_2(sc, MGMT, val); delay(1); /* Simulate a timing sequence */ } static void smsc_miibus_sync(struct smsc_data *sc) { int i; uint32_t v; v = MDIRPHY | MDO; smsc_mii_bitbang_write(sc, v); for (i = 0; i < MII_IDLE_DETECT_CYCLES; i++) { smsc_mii_bitbang_write(sc, v | MDC); smsc_mii_bitbang_write(sc, v); } } static void smsc_miibus_sendbits(struct smsc_data *sc, uint32_t data, int nbits) { int i; uint32_t v; v = MDIRPHY; smsc_mii_bitbang_write(sc, v); for (i = 1 << (nbits - 1); i != 0; i >>= 1) { if (data & i) { v |= MDO; } else { v &= ~MDO; } smsc_mii_bitbang_write(sc, v); smsc_mii_bitbang_write(sc, v | MDC); smsc_mii_bitbang_write(sc, v); } } static int smsc_miibus_readreg(struct smsc_data *sc, int phy, int reg) { int i, err, val; irq_disable(sc->irq); SMSC_LOCK(sc); smsc_select_bank(sc, 3); smsc_miibus_sync(sc); smsc_miibus_sendbits(sc, MII_COMMAND_START, 2); smsc_miibus_sendbits(sc, MII_COMMAND_READ, 2); smsc_miibus_sendbits(sc, phy, 5); smsc_miibus_sendbits(sc, reg, 5); /* Switch direction to PHY -> host */ smsc_mii_bitbang_write(sc, MDIRHOST); smsc_mii_bitbang_write(sc, MDIRHOST | MDC); smsc_mii_bitbang_write(sc, MDIRHOST); /* Check for error. */ err = smsc_mii_bitbang_read(sc) & MDI; /* Idle clock. */ smsc_mii_bitbang_write(sc, MDIRHOST | MDC); smsc_mii_bitbang_write(sc, MDIRHOST); val = 0; for (i = 0; i < 16; i++) { val <<= 1; /* Read data prior to clock low-high transition. */ if (err == 0 && (smsc_mii_bitbang_read(sc) & MDI) != 0) { val |= 1; } smsc_mii_bitbang_write(sc, MDIRHOST | MDC); smsc_mii_bitbang_write(sc, MDIRHOST); } /* Set direction to host -> PHY, without a clock transition. */ smsc_mii_bitbang_write(sc, MDIRPHY); SMSC_UNLOCK(sc); irq_enable(sc->irq); return (err == 0 ? val : 0); } static void smsc_miibus_writereg(struct smsc_data *sc, int phy, int reg, uint16_t val) { irq_disable(sc->irq); SMSC_LOCK(sc); smsc_select_bank(sc, 3); smsc_miibus_sync(sc); smsc_miibus_sendbits(sc, MII_COMMAND_START, 2); smsc_miibus_sendbits(sc, MII_COMMAND_WRITE, 2); smsc_miibus_sendbits(sc, phy, 5); smsc_miibus_sendbits(sc, reg, 5); smsc_miibus_sendbits(sc, MII_COMMAND_ACK, 2); smsc_miibus_sendbits(sc, val, 16); smsc_mii_bitbang_write(sc, MDIRPHY); SMSC_UNLOCK(sc); irq_enable(sc->irq); } static void smsc_reset(struct smsc_data *sc) { uint16_t ctr; /* * Mask all interrupts */ smsc_select_bank(sc, 2); smsc_write_1(sc, MSK, 0); /* * Tell the device to reset */ smsc_select_bank(sc, 0); smsc_write_2(sc, RCR, RCR_SOFT_RST); /* * Set up the configuration register */ smsc_select_bank(sc, 1); smsc_write_2(sc, CR, CR_EPH_POWER_EN); delay(1); /* * Turn off transmit and receive. */ smsc_select_bank(sc, 0); smsc_write_2(sc, TCR, 0); smsc_write_2(sc, RCR, 0); /* * Set up the control register */ smsc_select_bank(sc, 1); ctr = smsc_read_2(sc, CTR); ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE; smsc_write_2(sc, CTR, ctr); /* * Reset the MMU */ smsc_select_bank(sc, 2); smsc_mmu_wait(sc); smsc_write_2(sc, MMUCR, FIELD_PREP(MMUCR_CMD_MASK, MMUCR_CMD_MMU_RESET)); smsc_mmu_wait(sc); } static void smsc_enable(struct smsc_data *sc) { /* * Set up the receive/PHY control register. */ smsc_select_bank(sc, 0); smsc_write_2(sc, RPCR, RPCR_ANEG | RPCR_DPLX | RPCR_SPEED | FIELD_PREP(RPCR_LSA_MASK, RPCR_LED_LINK_ANY) | FIELD_PREP(RPCR_LSB_MASK, RPCR_LED_ACT_ANY)); /* * Set up the transmit and receive control registers. */ smsc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN); smsc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC); /* * Clear all interrupt status */ smsc_select_bank(sc, 2); smsc_write_1(sc, ACK, 0); /* * Set up the interrupt mask */ smsc_select_bank(sc, 2); sc->smsc_mask = RCV_INT; smsc_write_1(sc, MSK, sc->smsc_mask); } static int smsc_check(struct smsc_data *sc) { uint16_t val; val = smsc_read_2(sc, BSR); if (FIELD_GET(BSR_IDENTIFY_MASK, val) != BSR_IDENTIFY) { LOG_ERR("Identification value not in BSR"); return -ENODEV; } smsc_write_2(sc, BSR, 0); val = smsc_read_2(sc, BSR); if (FIELD_GET(BSR_IDENTIFY_MASK, val) != BSR_IDENTIFY) { LOG_ERR("Identification value not in BSR after write"); return -ENODEV; } smsc_select_bank(sc, 3); val = smsc_read_2(sc, REV); val = FIELD_GET(REV_CHIP_MASK, val); if (smsc_chip_ids[val] == NULL) { LOG_ERR("Unknown chip revision: %d", val); return -ENODEV; } return 0; } static void smsc_recv_pkt(struct eth_context *data) { struct net_pkt *pkt; unsigned int packet, status, len; struct smsc_data *sc = &data->sc; uint16_t val16; int ret; smsc_select_bank(sc, 2); packet = smsc_read_1(sc, FIFO_RX); while ((packet & FIFO_EMPTY) == 0) { /* * Point to the start of the packet. */ smsc_select_bank(sc, 2); smsc_write_1(sc, PNR, packet); smsc_write_2(sc, PTR, PTR_READ | PTR_RCV | PTR_AUTO_INCR); /* * Grab status and packet length. */ status = smsc_read_2(sc, DATA0); val16 = smsc_read_2(sc, DATA0); len = FIELD_GET(RX_LEN_MASK, val16); if (len < PKT_CTRL_DATA_LEN) { LOG_WRN("rxlen(%d) too short", len); } else { len -= PKT_CTRL_DATA_LEN; if (status & RX_ODDFRM) { len += 1; } if (len > NET_ETH_MAX_FRAME_SIZE) { LOG_WRN("rxlen(%d) too large", len); goto _mmu_release; } /* * Check for errors. */ if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALIGNERR)) { LOG_WRN("status word (0x%04x) indicate some error", status); goto _mmu_release; } /* * Pull the packet out of the device. */ smsc_select_bank(sc, 2); smsc_write_1(sc, PNR, packet); /* * Pointer start from 4 because we have already read status and len from * RX_FIFO */ smsc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); smsc_read_multi_2(sc, DATA0, (uint16_t *)rx_buffer, len / 2); if (len & 1) { rx_buffer[len - 1] = smsc_read_1(sc, DATA0); } pkt = net_pkt_rx_alloc_with_buffer(data->iface, len, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { LOG_ERR("Failed to obtain RX buffer"); goto _mmu_release; } ret = net_pkt_write(pkt, rx_buffer, len); if (ret) { net_pkt_unref(pkt); LOG_WRN("net_pkt_write return %d", ret); goto _mmu_release; } ret = net_recv_data(data->iface, pkt); if (ret) { LOG_WRN("net_recv_data return %d", ret); net_pkt_unref(pkt); } } _mmu_release: /* * Tell the device we're done */ smsc_mmu_wait(sc); smsc_write_2(sc, MMUCR, FIELD_PREP(MMUCR_CMD_MASK, MMUCR_CMD_RELEASE)); smsc_mmu_wait(sc); packet = smsc_read_1(sc, FIFO_RX); } sc->smsc_mask |= RCV_INT; smsc_write_1(sc, MSK, sc->smsc_mask); } static int smsc_send_pkt(struct smsc_data *sc, uint8_t *buf, uint16_t len) { unsigned int polling_count; uint8_t packet; SMSC_LOCK(sc); /* * Request memory */ smsc_select_bank(sc, 2); smsc_mmu_wait(sc); smsc_write_2(sc, MMUCR, FIELD_PREP(MMUCR_CMD_MASK, MMUCR_CMD_TX_ALLOC)); /* * Polling if the allocation succeeds. */ for (polling_count = TX_ALLOC_WAIT_TIME; polling_count > 0; polling_count--) { if (smsc_read_1(sc, IST) & ALLOC_INT) { break; } delay(1); } if (polling_count == 0) { SMSC_UNLOCK(sc); LOG_WRN("Alloc TX mem timeout"); return -1; } packet = smsc_read_1(sc, ARR); if (packet & ARR_FAILED) { SMSC_UNLOCK(sc); LOG_WRN("Alloc TX mem failed"); return -1; } /* * Tell the device to write to our packet number. */ smsc_write_1(sc, PNR, packet); smsc_write_2(sc, PTR, PTR_AUTO_INCR); /* * Tell the device how long the packet is (include control data). */ smsc_write_2(sc, DATA0, 0); smsc_write_2(sc, DATA0, len + PKT_CTRL_DATA_LEN); smsc_write_multi_2(sc, DATA0, (uint16_t *)buf, len / 2); /* Push out the control byte and the odd byte if needed. */ if (len & 1) { smsc_write_2(sc, DATA0, (CTRL_ODD << 8) | buf[len - 1]); } else { smsc_write_2(sc, DATA0, 0); } /* * Enqueue the packet. */ smsc_mmu_wait(sc); smsc_write_2(sc, MMUCR, FIELD_PREP(MMUCR_CMD_MASK, MMUCR_CMD_ENQUEUE)); /* * Unmask the TX empty interrupt */ sc->smsc_mask |= (TX_EMPTY_INT | TX_INT); smsc_write_1(sc, MSK, sc->smsc_mask); SMSC_UNLOCK(sc); /* * Finish up */ return 0; } static void smsc_isr_task(struct k_work *item) { struct smsc_data *sc = CONTAINER_OF(item, struct smsc_data, isr_work); struct eth_context *data = CONTAINER_OF(sc, struct eth_context, sc); uint8_t status; unsigned int mem_info, ephsr, packet, tcr; SMSC_LOCK(sc); for (int loop_count = 0; loop_count < MAX_IRQ_LOOPS; loop_count++) { smsc_select_bank(sc, 0); mem_info = smsc_read_2(sc, MIR); smsc_select_bank(sc, 2); status = smsc_read_1(sc, IST); LOG_DBG("INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x", status, smsc_read_1(sc, MSK), mem_info, smsc_read_2(sc, FIFO)); status &= sc->smsc_mask; if (!status) { break; } /* * Transmit error */ if (status & TX_INT) { /* * Kill off the packet if there is one. */ packet = smsc_read_1(sc, FIFO_TX); if ((packet & FIFO_EMPTY) == 0) { smsc_select_bank(sc, 2); smsc_write_1(sc, PNR, packet); smsc_write_2(sc, PTR, PTR_READ | PTR_AUTO_INCR); smsc_select_bank(sc, 0); ephsr = smsc_read_2(sc, EPHSR); if ((ephsr & EPHSR_TX_SUC) == 0) { LOG_WRN("bad packet, EPHSR: 0x%04x", ephsr); } smsc_select_bank(sc, 2); smsc_mmu_wait(sc); smsc_write_2(sc, MMUCR, FIELD_PREP(MMUCR_CMD_MASK, MMUCR_CMD_RELEASE_PKT)); smsc_select_bank(sc, 0); tcr = smsc_read_2(sc, TCR); tcr |= TCR_TXENA | TCR_PAD_EN; smsc_write_2(sc, TCR, tcr); } /* * Ack the interrupt */ smsc_select_bank(sc, 2); smsc_write_1(sc, ACK, TX_INT); } /* * Receive */ if (status & RCV_INT) { smsc_write_1(sc, ACK, RCV_INT); smsc_recv_pkt(data); } /* * Transmit empty */ if (status & TX_EMPTY_INT) { smsc_write_1(sc, ACK, TX_EMPTY_INT); sc->smsc_mask &= ~TX_EMPTY_INT; } } smsc_select_bank(sc, 2); smsc_write_1(sc, MSK, sc->smsc_mask); SMSC_UNLOCK(sc); } static int smsc_init(struct smsc_data *sc) { int ret; unsigned int val; ret = smsc_check(sc); if (ret) { return ret; } SMSC_LOCK(sc); smsc_reset(sc); SMSC_UNLOCK(sc); smsc_select_bank(sc, 3); val = smsc_read_2(sc, REV); sc->smsc_chip = FIELD_GET(REV_CHIP_MASK, val); sc->smsc_rev = FIELD_GET(REV_REV_MASK, val); smsc_select_bank(sc, 1); sc->mac[0] = smsc_read_1(sc, IAR0); sc->mac[1] = smsc_read_1(sc, IAR1); sc->mac[2] = smsc_read_1(sc, IAR2); sc->mac[3] = smsc_read_1(sc, IAR3); sc->mac[4] = smsc_read_1(sc, IAR4); sc->mac[5] = smsc_read_1(sc, IAR5); return 0; } static const struct device *eth_get_phy(const struct device *dev) { const struct eth_config *cfg = dev->config; return cfg->phy_dev; } static void phy_link_state_changed(const struct device *phy_dev, struct phy_link_state *state, void *user_data) { const struct device *dev = user_data; struct eth_context *data = dev->data; if (state->is_up) { net_eth_carrier_on(data->iface); } else { net_eth_carrier_off(data->iface); } } static enum ethernet_hw_caps eth_smsc_get_caps(const struct device *dev) { ARG_UNUSED(dev); return (ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T #if defined(CONFIG_NET_PROMISCUOUS_MODE) | ETHERNET_PROMISC_MODE #endif ); } static int eth_tx(const struct device *dev, struct net_pkt *pkt) { struct eth_context *data = dev->data; struct smsc_data *sc = &data->sc; uint16_t len; len = net_pkt_get_len(pkt); if (net_pkt_read(pkt, tx_buffer, len)) { LOG_WRN("read pkt failed"); return -1; } return smsc_send_pkt(sc, tx_buffer, len); } static int eth_smsc_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { int ret = 0; switch (type) { #if defined(CONFIG_NET_PROMISCUOUS_MODE) case ETHERNET_CONFIG_TYPE_PROMISC_MODE: struct eth_context *data = dev->data; struct smsc_data *sc = &data->sc; uint8_t reg_val; SMSC_LOCK(sc); smsc_select_bank(sc, 0); reg_val = smsc_read_1(sc, RCR); if (config->promisc_mode && !(reg_val & RCR_PRMS)) { smsc_write_1(sc, RCR, reg_val | RCR_PRMS); } else if (!config->promisc_mode && (reg_val & RCR_PRMS)) { smsc_write_1(sc, RCR, reg_val & ~RCR_PRMS); } else { ret = -EALREADY; } SMSC_UNLOCK(sc); break; #endif default: ret = -ENOTSUP; break; } return ret; } static void eth_initialize(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_context *data = dev->data; const struct eth_config *cfg = dev->config; const struct device *phy_dev = cfg->phy_dev; struct smsc_data *sc = &data->sc; ethernet_init(iface); net_if_carrier_off(iface); smsc_reset(sc); smsc_enable(sc); LOG_INF("MAC %02x:%02x:%02x:%02x:%02x:%02x", sc->mac[0], sc->mac[1], sc->mac[2], sc->mac[3], sc->mac[4], sc->mac[5]); net_if_set_link_addr(iface, sc->mac, sizeof(sc->mac), NET_LINK_ETHERNET); data->iface = iface; if (device_is_ready(phy_dev)) { phy_link_callback_set(phy_dev, phy_link_state_changed, (void *)dev); } else { LOG_ERR("PHY device not ready"); } } static const struct ethernet_api api_funcs = { .iface_api.init = eth_initialize, .get_capabilities = eth_smsc_get_caps, .get_phy = eth_get_phy, .set_config = eth_smsc_set_config, .send = eth_tx, }; static void eth_smsc_isr(const struct device *dev) { struct eth_context *data = dev->data; struct smsc_data *sc = &data->sc; uint32_t curbank; curbank = smsc_current_bank(sc); /* * Block interrupts in order to let smsc91x_isr_task to kick in */ smsc_select_bank(sc, 2); smsc_write_1(sc, MSK, 0); smsc_select_bank(sc, curbank); k_work_submit(&(sc->isr_work)); } int eth_init(const struct device *dev) { struct eth_context *data = (struct eth_context *)dev->data; struct smsc_data *sc = &data->sc; int ret; ret = k_mutex_init(&sc->lock); if (ret) { return ret; } k_work_init(&sc->isr_work, smsc_isr_task); IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_smsc_isr, DEVICE_DT_INST_GET(0), 0); DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE); sc->smsc_reg = DEVICE_MMIO_GET(dev); sc->irq = DT_INST_IRQN(0); smsc_init(sc); irq_enable(DT_INST_IRQN(0)); return 0; } static struct eth_context eth_0_context; static struct eth_config eth_0_config = { DEVICE_MMIO_ROM_INIT(DT_PARENT(DT_DRV_INST(0))), .phy_dev = DEVICE_DT_GET(DT_INST_PHANDLE(0, phy_handle)), }; ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_init, NULL, &eth_0_context, &eth_0_config, CONFIG_ETH_INIT_PRIORITY, &api_funcs, NET_ETH_MTU); #undef DT_DRV_COMPAT #define DT_DRV_COMPAT smsc_lan91c111_mdio struct mdio_smsc_config { const struct device *eth_dev; }; static void mdio_smsc_bus_disable(const struct device *dev) { ARG_UNUSED(dev); } static void mdio_smsc_bus_enable(const struct device *dev) { ARG_UNUSED(dev); } static int mdio_smsc_read(const struct device *dev, uint8_t prtad, uint8_t devad, uint16_t *data) { const struct mdio_smsc_config *cfg = dev->config; const struct device *eth_dev = cfg->eth_dev; struct eth_context *eth_data = eth_dev->data; struct smsc_data *sc = &eth_data->sc; *data = smsc_miibus_readreg(sc, prtad, devad); return 0; } static int mdio_smsc_write(const struct device *dev, uint8_t prtad, uint8_t devad, uint16_t data) { const struct mdio_smsc_config *cfg = dev->config; const struct device *eth_dev = cfg->eth_dev; struct eth_context *eth_data = eth_dev->data; struct smsc_data *sc = &eth_data->sc; smsc_miibus_writereg(sc, prtad, devad, data); return 0; } static const struct mdio_driver_api mdio_smsc_api = { .bus_disable = mdio_smsc_bus_disable, .bus_enable = mdio_smsc_bus_enable, .read = mdio_smsc_read, .write = mdio_smsc_write, }; const struct mdio_smsc_config mdio_smsc_config_0 = { .eth_dev = DEVICE_DT_GET(DT_CHILD(DT_INST_PARENT(0), ethernet)), }; DEVICE_DT_INST_DEFINE(0, NULL, NULL, NULL, &mdio_smsc_config_0, POST_KERNEL, CONFIG_MDIO_INIT_PRIORITY, &mdio_smsc_api); ```
/content/code_sandbox/drivers/ethernet/eth_smsc91x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,496
```c /* ENC424J600 Stand-alone Ethernet Controller with SPI * * */ #define DT_DRV_COMPAT microchip_enc424j600 #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include "eth_enc424j600_priv.h" LOG_MODULE_REGISTER(ethdrv, CONFIG_ETHERNET_LOG_LEVEL); static void enc424j600_write_sbc(const struct device *dev, uint8_t cmd) { const struct enc424j600_config *config = dev->config; uint8_t buf[2] = { cmd, 0xFF }; const struct spi_buf tx_buf = { .buf = buf, .len = 1, }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; spi_write_dt(&config->spi, &tx); } static void enc424j600_write_sfru(const struct device *dev, uint8_t addr, uint16_t value) { const struct enc424j600_config *config = dev->config; uint8_t buf[4]; const struct spi_buf tx_buf = { .buf = buf, .len = sizeof(buf) }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; buf[0] = ENC424J600_NBC_WCRU; buf[1] = addr; buf[2] = value; buf[3] = value >> 8; spi_write_dt(&config->spi, &tx); } static void enc424j600_read_sfru(const struct device *dev, uint8_t addr, uint16_t *value) { const struct enc424j600_config *config = dev->config; uint8_t buf[4]; const struct spi_buf tx_buf = { .buf = buf, .len = 2 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; struct spi_buf rx_buf = { .buf = buf, .len = sizeof(buf), }; const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1 }; buf[0] = ENC424J600_NBC_RCRU; buf[1] = addr; if (!spi_transceive_dt(&config->spi, &tx, &rx)) { *value = ((uint16_t)buf[3] << 8 | buf[2]); } else { LOG_DBG("Failure while reading register 0x%02x", addr); *value = 0U; } } static void enc424j600_modify_sfru(const struct device *dev, uint8_t opcode, uint16_t addr, uint16_t value) { const struct enc424j600_config *config = dev->config; uint8_t buf[4]; const struct spi_buf tx_buf = { .buf = buf, .len = sizeof(buf) }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; buf[0] = opcode; buf[1] = addr; buf[2] = value; buf[3] = value >> 8; spi_write_dt(&config->spi, &tx); } #define enc424j600_set_sfru(dev, addr, value) \ enc424j600_modify_sfru(dev, ENC424J600_NBC_BFSU, addr, value) #define enc424j600_clear_sfru(dev, addr, value) \ enc424j600_modify_sfru(dev, ENC424J600_NBC_BFCU, addr, value) static void enc424j600_write_phy(const struct device *dev, uint16_t addr, uint16_t data) { uint16_t mistat; enc424j600_write_sfru(dev, ENC424J600_SFR2_MIREGADRL, addr); enc424j600_write_sfru(dev, ENC424J600_SFR3_MIWRL, data); do { k_busy_wait(ENC424J600_PHY_ACCESS_DELAY); enc424j600_read_sfru(dev, ENC424J600_SFR3_MISTATL, &mistat); } while ((mistat & ENC424J600_MISTAT_BUSY)); } static void enc424j600_read_phy(const struct device *dev, uint16_t addr, uint16_t *data) { uint16_t mistat; enc424j600_write_sfru(dev, ENC424J600_SFR2_MIREGADRL, addr); enc424j600_write_sfru(dev, ENC424J600_SFR2_MICMDL, ENC424J600_MICMD_MIIRD); do { k_busy_wait(ENC424J600_PHY_ACCESS_DELAY); enc424j600_read_sfru(dev, ENC424J600_SFR3_MISTATL, &mistat); } while ((mistat & ENC424J600_MISTAT_BUSY)); enc424j600_write_sfru(dev, ENC424J600_SFR2_MICMDL, 0); enc424j600_read_sfru(dev, ENC424J600_SFR3_MIRDL, data); } static void enc424j600_write_mem(const struct device *dev, uint8_t opcode, uint8_t *data_buffer, uint16_t buf_len) { const struct enc424j600_config *config = dev->config; uint8_t buf[1] = { opcode }; const struct spi_buf tx_buf[2] = { { .buf = buf, .len = 1 }, { .buf = data_buffer, .len = buf_len }, }; const struct spi_buf_set tx = { .buffers = tx_buf, .count = 2 }; if (spi_write_dt(&config->spi, &tx)) { LOG_ERR("Failed to write SRAM buffer"); return; } } static void enc424j600_read_mem(const struct device *dev, uint8_t opcode, uint8_t *data_buffer, uint16_t buf_len) { const struct enc424j600_config *config = dev->config; uint8_t buf[1] = { opcode }; const struct spi_buf tx_buf = { .buf = buf, .len = 1 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; struct spi_buf rx_buf[2] = { { .buf = NULL, .len = 1 }, { .buf = data_buffer, .len = buf_len }, }; const struct spi_buf_set rx = { .buffers = rx_buf, .count = 2 }; if (spi_transceive_dt(&config->spi, &tx, &rx)) { LOG_ERR("Failed to read SRAM buffer"); return; } } static void enc424j600_gpio_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct enc424j600_runtime *context = CONTAINER_OF(cb, struct enc424j600_runtime, gpio_cb); k_sem_give(&context->int_sem); } static void enc424j600_init_filters(const struct device *dev) { uint16_t tmp; enc424j600_write_sfru(dev, ENC424J600_SFR1_ERXFCONL, ENC424J600_ERXFCON_CRCEN | ENC424J600_ERXFCON_RUNTEN | ENC424J600_ERXFCON_UCEN | ENC424J600_ERXFCON_MCEN | ENC424J600_ERXFCON_BCEN); if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_sfru(dev, ENC424J600_SFR1_ERXFCONL, &tmp); LOG_DBG("ERXFCON: 0x%04x", tmp); } } static void enc424j600_init_phy(const struct device *dev) { uint16_t tmp; enc424j600_write_phy(dev, ENC424J600_PSFR_PHANA, ENC424J600_PHANA_ADPAUS_SYMMETRIC_ONLY | ENC424J600_PHANA_AD100FD | ENC424J600_PHANA_AD100 | ENC424J600_PHANA_AD10FD | ENC424J600_PHANA_AD10 | ENC424J600_PHANA_ADIEEE_DEFAULT); if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_phy(dev, ENC424J600_PSFR_PHANA, &tmp); LOG_DBG("PHANA: 0x%04x", tmp); } enc424j600_read_phy(dev, ENC424J600_PSFR_PHCON1, &tmp); tmp |= ENC424J600_PHCON1_RENEG; LOG_DBG("PHCON1: 0x%04x", tmp); enc424j600_write_phy(dev, ENC424J600_PSFR_PHCON1, tmp); } static void enc424j600_setup_mac(const struct device *dev) { uint16_t tmp; uint16_t macon2; if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_phy(dev, ENC424J600_PSFR_PHANLPA, &tmp); LOG_DBG("PHANLPA: 0x%04x", tmp); } enc424j600_read_phy(dev, ENC424J600_PSFR_PHSTAT3, &tmp); if (tmp & ENC424J600_PHSTAT3_SPDDPX_100) { LOG_INF("100Mbps"); } else if (tmp & ENC424J600_PHSTAT3_SPDDPX_10) { LOG_INF("10Mbps"); } else { LOG_ERR("Unknown speed configuration"); } if (tmp & ENC424J600_PHSTAT3_SPDDPX_FD) { LOG_INF("full duplex"); enc424j600_read_sfru(dev, ENC424J600_SFR2_MACON2L, &macon2); macon2 |= ENC424J600_MACON2_FULDPX; enc424j600_write_sfru(dev, ENC424J600_SFR2_MACON2L, macon2); enc424j600_write_sfru(dev, ENC424J600_SFR2_MABBIPGL, ENC424J600_MABBIPG_DEFAULT); } else { LOG_INF("half duplex"); } if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_sfru(dev, ENC424J600_SFR2_MACON2L, &tmp); LOG_DBG("MACON2: 0x%04x", tmp); enc424j600_read_sfru(dev, ENC424J600_SFR2_MAMXFLL, &tmp); LOG_DBG("MAMXFL (maximum frame length): %u", tmp); } } static int enc424j600_tx(const struct device *dev, struct net_pkt *pkt) { struct enc424j600_runtime *context = dev->data; uint16_t len = net_pkt_get_len(pkt); struct net_buf *frag; uint16_t tmp; LOG_DBG("pkt %p (len %u)", pkt, len); k_sem_take(&context->tx_rx_sem, K_FOREVER); enc424j600_write_sfru(dev, ENC424J600_SFR4_EGPWRPTL, ENC424J600_TXSTART); for (frag = pkt->frags; frag; frag = frag->frags) { enc424j600_write_mem(dev, ENC424J600_NBC_WGPDATA, frag->data, frag->len); } enc424j600_write_sfru(dev, ENC424J600_SFR0_ETXSTL, ENC424J600_TXSTART); enc424j600_write_sfru(dev, ENC424J600_SFR0_ETXLENL, len); enc424j600_write_sbc(dev, ENC424J600_1BC_SETTXRTS); do { k_sleep(K_MSEC(1)); enc424j600_read_sfru(dev, ENC424J600_SFRX_ECON1L, &tmp); } while (tmp & ENC424J600_ECON1_TXRTS); if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_sfru(dev, ENC424J600_SFR0_ETXSTATL, &tmp); LOG_DBG("ETXSTAT: 0x%04x", tmp); } k_sem_give(&context->tx_rx_sem); return 0; } static int enc424j600_rx(const struct device *dev) { struct enc424j600_runtime *context = dev->data; const struct enc424j600_config *config = dev->config; uint8_t info[ENC424J600_RSV_SIZE + ENC424J600_PTR_NXP_PKT_SIZE]; struct net_buf *pkt_buf = NULL; struct net_pkt *pkt; uint16_t frm_len = 0U; uint32_t status; uint16_t tmp; k_sem_take(&context->tx_rx_sem, K_FOREVER); enc424j600_write_sfru(dev, ENC424J600_SFR4_ERXRDPTL, context->next_pkt_ptr); if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_sfru(dev, ENC424J600_SFR4_ERXRDPTL, &tmp); LOG_DBG("set ERXRDPT to 0x%04x", tmp); } enc424j600_read_mem(dev, ENC424J600_NBC_RRXDATA, info, sizeof(info)); if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_sfru(dev, ENC424J600_SFR4_ERXRDPTL, &tmp); LOG_DBG("ERXRDPT is 0x%04x now", tmp); } context->next_pkt_ptr = sys_get_le16(&info[0]); frm_len = sys_get_le16(&info[2]); status = sys_get_le32(&info[4]); LOG_DBG("npp 0x%04x, length %u, status 0x%08x", context->next_pkt_ptr, frm_len, status); /* frame length without FCS */ frm_len -= 4; if (frm_len > NET_ETH_MAX_FRAME_SIZE) { LOG_ERR("Maximum frame length exceeded"); eth_stats_update_errors_rx(context->iface); goto done; } /* Get the frame from the buffer */ pkt = net_pkt_rx_alloc_with_buffer(context->iface, frm_len, AF_UNSPEC, 0, K_MSEC(config->timeout)); if (!pkt) { LOG_ERR("Could not allocate rx buffer"); eth_stats_update_errors_rx(context->iface); goto done; } pkt_buf = pkt->buffer; do { size_t frag_len; uint8_t *data_ptr; size_t spi_frame_len; data_ptr = pkt_buf->data; /* Review the space available for the new frag */ frag_len = net_buf_tailroom(pkt_buf); if (frm_len > frag_len) { spi_frame_len = frag_len; } else { spi_frame_len = frm_len; } enc424j600_read_mem(dev, ENC424J600_NBC_RRXDATA, data_ptr, spi_frame_len); net_buf_add(pkt_buf, spi_frame_len); /* One fragment has been written via SPI */ frm_len -= spi_frame_len; pkt_buf = pkt_buf->frags; } while (frm_len > 0); if (net_recv_data(context->iface, pkt) < 0) { net_pkt_unref(pkt); } done: if (context->next_pkt_ptr == ENC424J600_RXSTART) { tmp = ENC424J600_RXEND - 1; LOG_DBG("wrap back"); } else { tmp = context->next_pkt_ptr - 2; } enc424j600_write_sfru(dev, ENC424J600_SFR0_ERXTAILL, tmp); enc424j600_write_sbc(dev, ENC424J600_1BC_SETPKTDEC); k_sem_give(&context->tx_rx_sem); return 0; } static void enc424j600_rx_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); struct enc424j600_runtime *context = p1; uint16_t eir; uint16_t estat; uint8_t counter; while (true) { k_sem_take(&context->int_sem, K_FOREVER); enc424j600_write_sbc(context->dev, ENC424J600_1BC_CLREIE); enc424j600_read_sfru(context->dev, ENC424J600_SFRX_EIRL, &eir); enc424j600_read_sfru(context->dev, ENC424J600_SFRX_ESTATL, &estat); LOG_DBG("ESTAT: 0x%04x", estat); if (eir & ENC424J600_EIR_PKTIF) { counter = (uint8_t)estat; while (counter) { enc424j600_rx(context->dev); enc424j600_read_sfru(context->dev, ENC424J600_SFRX_ESTATL, &estat); counter = (uint8_t)estat; LOG_DBG("ESTAT: 0x%04x", estat); } } else if (eir & ENC424J600_EIR_LINKIF) { enc424j600_clear_sfru(context->dev, ENC424J600_SFRX_EIRL, ENC424J600_EIR_LINKIF); if (estat & ENC424J600_ESTAT_PHYLNK) { LOG_INF("Link up"); enc424j600_setup_mac(context->dev); net_eth_carrier_on(context->iface); } else { LOG_INF("Link down"); if (context->iface_initialized) { net_eth_carrier_off(context->iface); } } } else { LOG_ERR("Unknown Interrupt, EIR: 0x%04x", eir); /* * Terminate interrupt handling thread * only when debugging. */ if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { k_oops(); } } enc424j600_write_sbc(context->dev, ENC424J600_1BC_SETEIE); } } static int enc424j600_get_config(const struct device *dev, enum ethernet_config_type type, struct ethernet_config *config) { uint16_t tmp; int rc = 0; struct enc424j600_runtime *context = dev->data; if (type != ETHERNET_CONFIG_TYPE_LINK && type != ETHERNET_CONFIG_TYPE_DUPLEX) { /* Unsupported configuration query */ return -ENOTSUP; } k_sem_take(&context->tx_rx_sem, K_FOREVER); if (type == ETHERNET_CONFIG_TYPE_LINK) { /* Query active link speed */ enc424j600_read_phy(dev, ENC424J600_PSFR_PHSTAT3, &tmp); if (tmp & ENC424J600_PHSTAT3_SPDDPX_100) { /* 100Mbps link speed */ config->l.link_100bt = true; } else if (tmp & ENC424J600_PHSTAT3_SPDDPX_10) { /* 10Mbps link speed */ config->l.link_10bt = true; } else { /* Unknown link speed */ rc = -EINVAL; } } else if (type == ETHERNET_CONFIG_TYPE_DUPLEX) { /* Query if half or full duplex */ enc424j600_read_phy(dev, ENC424J600_PSFR_PHSTAT3, &tmp); /* Assume operating in half duplex mode */ config->full_duplex = false; if (tmp & ENC424J600_PHSTAT3_SPDDPX_FD) { /* Operating in full duplex mode */ config->full_duplex = true; } } k_sem_give(&context->tx_rx_sem); return rc; } static enum ethernet_hw_caps enc424j600_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T; } static void enc424j600_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct enc424j600_runtime *context = dev->data; net_if_set_link_addr(iface, context->mac_address, sizeof(context->mac_address), NET_LINK_ETHERNET); context->iface = iface; ethernet_init(iface); net_if_carrier_off(iface); context->iface_initialized = true; } static int enc424j600_start_device(const struct device *dev) { struct enc424j600_runtime *context = dev->data; uint16_t tmp; if (!context->suspended) { LOG_INF("Not suspended"); return 0; } k_sem_take(&context->tx_rx_sem, K_FOREVER); enc424j600_set_sfru(dev, ENC424J600_SFR3_ECON2L, ENC424J600_ECON2_ETHEN | ENC424J600_ECON2_STRCH); enc424j600_read_phy(dev, ENC424J600_PSFR_PHCON1, &tmp); tmp &= ~ENC424J600_PHCON1_PSLEEP; enc424j600_write_phy(dev, ENC424J600_PSFR_PHCON1, tmp); enc424j600_set_sfru(dev, ENC424J600_SFRX_ECON1L, ENC424J600_ECON1_RXEN); context->suspended = false; k_sem_give(&context->tx_rx_sem); LOG_INF("started"); return 0; } static int enc424j600_stop_device(const struct device *dev) { struct enc424j600_runtime *context = dev->data; uint16_t tmp; if (context->suspended) { LOG_WRN("Already suspended"); return 0; } k_sem_take(&context->tx_rx_sem, K_FOREVER); enc424j600_clear_sfru(dev, ENC424J600_SFRX_ECON1L, ENC424J600_ECON1_RXEN); do { k_sleep(K_MSEC(10U)); enc424j600_read_sfru(dev, ENC424J600_SFRX_ESTATL, &tmp); } while (tmp & ENC424J600_ESTAT_RXBUSY); do { k_sleep(K_MSEC(10U)); enc424j600_read_sfru(dev, ENC424J600_SFRX_ECON1L, &tmp); } while (tmp & ENC424J600_ECON1_TXRTS); enc424j600_read_phy(dev, ENC424J600_PSFR_PHCON1, &tmp); tmp |= ENC424J600_PHCON1_PSLEEP; enc424j600_write_phy(dev, ENC424J600_PSFR_PHCON1, tmp); enc424j600_clear_sfru(dev, ENC424J600_SFR3_ECON2L, ENC424J600_ECON2_ETHEN | ENC424J600_ECON2_STRCH); context->suspended = true; k_sem_give(&context->tx_rx_sem); LOG_INF("stopped"); return 0; } static const struct ethernet_api api_funcs = { .iface_api.init = enc424j600_iface_init, .get_config = enc424j600_get_config, .get_capabilities = enc424j600_get_capabilities, .send = enc424j600_tx, .start = enc424j600_start_device, .stop = enc424j600_stop_device, }; static int enc424j600_init(const struct device *dev) { const struct enc424j600_config *config = dev->config; struct enc424j600_runtime *context = dev->data; uint8_t retries = ENC424J600_DEFAULT_NUMOF_RETRIES; uint16_t tmp; context->dev = dev; /* SPI config */ if (!spi_is_ready_dt(&config->spi)) { LOG_ERR("SPI master port %s not ready", config->spi.bus->name); return -EINVAL; } /* Initialize GPIO */ if (!gpio_is_ready_dt(&config->interrupt)) { LOG_ERR("GPIO port %s not ready", config->interrupt.port->name); return -EINVAL; } if (gpio_pin_configure_dt(&config->interrupt, GPIO_INPUT)) { LOG_ERR("Unable to configure GPIO pin %u", config->interrupt.pin); return -EINVAL; } gpio_init_callback(&(context->gpio_cb), enc424j600_gpio_callback, BIT(config->interrupt.pin)); if (gpio_add_callback(config->interrupt.port, &(context->gpio_cb))) { return -EINVAL; } gpio_pin_interrupt_configure_dt(&config->interrupt, GPIO_INT_EDGE_TO_ACTIVE); /* Check SPI connection */ do { k_busy_wait(USEC_PER_MSEC * 1U); enc424j600_write_sfru(dev, ENC424J600_SFRX_EUDASTL, 0x4AFE); enc424j600_read_sfru(dev, ENC424J600_SFRX_EUDASTL, &tmp); retries--; } while (tmp != 0x4AFE && retries); if (tmp != 0x4AFE) { LOG_ERR("Timeout, failed to establish SPI connection"); return -EIO; } retries = ENC424J600_DEFAULT_NUMOF_RETRIES; do { k_busy_wait(USEC_PER_MSEC * 1U); enc424j600_read_sfru(dev, ENC424J600_SFRX_ESTATL, &tmp); retries--; } while (!(tmp & ENC424J600_ESTAT_CLKRDY) && retries); if (!(tmp & ENC424J600_ESTAT_CLKRDY)) { LOG_ERR("CLKRDY not set"); return -EIO; } enc424j600_write_sbc(dev, ENC424J600_1BC_SETETHRST); k_busy_wait(ENC424J600_PHY_READY_DELAY); enc424j600_read_sfru(dev, ENC424J600_SFRX_EUDASTL, &tmp); if (tmp) { LOG_ERR("Failed to initialize ENC424J600"); return -EIO; } /* Disable INTIE and setup interrupt logic */ enc424j600_write_sfru(dev, ENC424J600_SFR3_EIEL, ENC424J600_EIE_PKTIE | ENC424J600_EIE_LINKIE); if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_sfru(dev, ENC424J600_SFR3_EIEL, &tmp); LOG_DBG("EIE: 0x%04x", tmp); } /* Configure TX and RX buffer */ enc424j600_write_sfru(dev, ENC424J600_SFR0_ETXSTL, ENC424J600_TXSTART); enc424j600_write_sfru(dev, ENC424J600_SFR0_ERXSTL, ENC424J600_RXSTART); enc424j600_write_sfru(dev, ENC424J600_SFR0_ERXTAILL, (ENC424J600_RXEND - 1)); context->next_pkt_ptr = ENC424J600_RXSTART; /* Disable user-defined buffer */ enc424j600_write_sfru(dev, ENC424J600_SFRX_EUDASTL, (ENC424J600_RXEND - 1)); enc424j600_write_sfru(dev, ENC424J600_SFRX_EUDANDL, (ENC424J600_RXEND - 1)); /* read MAC address byte 2 and 1 */ enc424j600_read_sfru(dev, ENC424J600_SFR3_MAADR1L, &tmp); context->mac_address[0] = tmp; context->mac_address[1] = tmp >> 8; /* read MAC address byte 4 and 3 */ enc424j600_read_sfru(dev, ENC424J600_SFR3_MAADR2L, &tmp); context->mac_address[2] = tmp; context->mac_address[3] = tmp >> 8; /* read MAC address byte 6 and 5 */ enc424j600_read_sfru(dev, ENC424J600_SFR3_MAADR3L, &tmp); context->mac_address[4] = tmp; context->mac_address[5] = tmp >> 8; enc424j600_init_filters(dev); enc424j600_init_phy(dev); /* Enable Reception */ enc424j600_set_sfru(dev, ENC424J600_SFRX_ECON1L, ENC424J600_ECON1_RXEN); if (CONFIG_ETHERNET_LOG_LEVEL == LOG_LEVEL_DBG) { enc424j600_read_sfru(dev, ENC424J600_SFRX_ECON1L, &tmp); LOG_DBG("ECON1: 0x%04x", tmp); } /* Start interruption-poll thread */ k_thread_create(&context->thread, context->thread_stack, CONFIG_ETH_ENC424J600_RX_THREAD_STACK_SIZE, enc424j600_rx_thread, context, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_ENC424J600_RX_THREAD_PRIO), 0, K_NO_WAIT); enc424j600_write_sbc(dev, ENC424J600_1BC_SETEIE); context->suspended = false; LOG_INF("ENC424J600 Initialized"); return 0; } static struct enc424j600_runtime enc424j600_0_runtime = { .tx_rx_sem = Z_SEM_INITIALIZER(enc424j600_0_runtime.tx_rx_sem, 1, UINT_MAX), .int_sem = Z_SEM_INITIALIZER(enc424j600_0_runtime.int_sem, 0, UINT_MAX), }; static const struct enc424j600_config enc424j600_0_config = { .spi = SPI_DT_SPEC_INST_GET(0, SPI_WORD_SET(8), 0), .interrupt = GPIO_DT_SPEC_INST_GET(0, int_gpios), .timeout = CONFIG_ETH_ENC424J600_TIMEOUT, }; ETH_NET_DEVICE_DT_INST_DEFINE(0, enc424j600_init, NULL, &enc424j600_0_runtime, &enc424j600_0_config, CONFIG_ETH_INIT_PRIORITY, &api_funcs, NET_ETH_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_enc424j600.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
6,870
```unknown # Intel(R) PRO/1000 Gigabit Ethernet driver configuration options menuconfig ETH_E1000 bool "Intel(R) PRO/1000 Gigabit Ethernet driver" default y depends on DT_HAS_INTEL_E1000_ENABLED depends on PCIE help Enable Intel(R) PRO/1000 Gigabit Ethernet driver. if ETH_E1000 config ETH_NIC_MODEL string default "e1000" help Tells what Qemu network model to use. This value is given as a parameter to -nic qemu command line option. config ETH_E1000_VERBOSE_DEBUG bool "Hexdump of the received and sent frames" help Enabling this will turn on the hexdump of the received and sent frames. Do not leave on for production. config ETH_E1000_PTP_CLOCK bool "PTP clock driver support [EXPERIMENTAL]" depends on PTP_CLOCK select EXPERIMENTAL default y help Enable PTP clock support. This is still a dummy that is only used for debugging and currently does not do any real PTP activities. config ETH_E1000_PTP_CLOCK_SRC_HZ int "Frequency of the clock source for the PTP timer" default 50000000 depends on ETH_E1000_PTP_CLOCK help Set the frequency in Hz sourced to the PTP timer. If the value is set properly, the timer will be accurate. endif # ETH_E1000 ```
/content/code_sandbox/drivers/ethernet/Kconfig.e1000
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
318
```unknown # STM32 HAL Ethernet driver configuration options menuconfig ETH_STM32_HAL bool "STM32 HAL Ethernet driver" default y depends on DT_HAS_ST_STM32_ETHERNET_ENABLED select USE_STM32_HAL_ETH select NOCACHE_MEMORY if SOC_SERIES_STM32H7X && CPU_CORTEX_M7 select HWINFO select ETH_DSA_SUPPORT select MDIO if SOC_SERIES_STM32H5X || SOC_SERIES_STM32H7X imply CRC help Enable STM32 HAL based Ethernet driver. It is available for all Ethernet enabled variants of the F2, F4, F7 and H7 series. if ETH_STM32_HAL choice ETH_STM32_HAL_API_VERSION prompt "STM32Cube HAL Ethernet version" config ETH_STM32_HAL_API_V2 bool "Use official STM32Cube HAL driver" depends on SOC_SERIES_STM32H7X || SOC_SERIES_STM32H5X || SOC_SERIES_STM32F4X || SOC_SERIES_STM32F7X help Use the official STM32Cube HAL driver instead of the legacy one. config ETH_STM32_HAL_API_V1 bool "Use legacy STM32Cube HAL driver" depends on SOC_SERIES_STM32F1X || SOC_SERIES_STM32F2X help Driver version based on legacy HAL version as the current official API version. Available only for STM32F1 and STM32F2 SoC series. endchoice config ETH_STM32_HAL_RX_THREAD_STACK_SIZE int "RX thread stack size" default 1500 help RX thread stack size config ETH_STM32_HAL_RX_THREAD_PRIO int "RX thread priority" default 2 help RX thread priority config ETH_STM32_HAL_USE_DTCM_FOR_DMA_BUFFER bool "Use DTCM for DMA buffers" default y depends on SOC_SERIES_STM32F7X help When this option is activated, the buffers for DMA transfer are moved from SRAM to the DTCM (Data Tightly Coupled Memory). config ETH_STM32_HAL_PHY_ADDRESS int "Phy address" default 0 help The phy address to use. config ETH_STM32_HAL_MII bool "Use MII interface" help Use the MII physical interface instead of RMII. config ETH_STM32_CARRIER_CHECK_RX_IDLE_TIMEOUT_MS int "Carrier check timeout period (ms)" default 500 range 100 30000 help Set the RX idle timeout period in milliseconds after which the PHY's carrier status is re-evaluated. config ETH_STM32_AUTO_NEGOTIATION_ENABLE bool "Autonegotiation mode" default y help Enable this if using autonegotiation config ETH_STM32_HW_CHECKSUM bool "Use TX and RX hardware checksum" depends on !SOC_SERIES_STM32H5X help Enable receive and transmit checksum offload to enhance throughput performances. See reference manual for more information on this feature. if !ETH_STM32_AUTO_NEGOTIATION_ENABLE config ETH_STM32_SPEED_10M bool "Set speed to 10 Mbps when autonegotiation is disabled" help Set this if using 10 Mbps and when autonegotiation is disabled, otherwise speed is 100 Mbps config ETH_STM32_MODE_HALFDUPLEX bool "Half duplex mode" help Set this if using half duplex when autonegotiation is disabled otherwise duplex mode is full duplex endif # !ETH_STM32_AUTO_NEGOTIATION_ENABLE if SOC_SERIES_STM32F7X || SOC_SERIES_STM32H7X || SOC_SERIES_STM32H5X config PTP_CLOCK_STM32_HAL bool "STM32 HAL PTP clock driver support" default y depends on PTP_CLOCK || NET_L2_PTP help Enable STM32 PTP clock support. config ETH_STM32_HAL_PTP_CLOCK_SRC_HZ int "Frequency of the clock source for the PTP timer" default 50000000 depends on PTP_CLOCK_STM32_HAL help Set the frequency in Hz sourced to the PTP timer. If the value is set properly, the timer will be accurate. config ETH_STM32_HAL_PTP_CLOCK_ADJ_MIN_PCT int "Lower bound of clock frequency adjustment (in percent)" default 90 depends on PTP_CLOCK_STM32_HAL help Specifies lower bound of PTP clock rate adjustment. config ETH_STM32_HAL_PTP_CLOCK_ADJ_MAX_PCT int "Upper bound of clock frequency adjustment (in percent)" default 110 depends on PTP_CLOCK_STM32_HAL help Specifies upper bound of PTP clock rate adjustment. config ETH_STM32_HAL_PTP_CLOCK_INIT_PRIO int default 85 depends on PTP_CLOCK_STM32_HAL help STM32 PTP Clock initialization priority level. There is a dependency from the network stack that this device initializes before network stack (NET_INIT_PRIO). endif # SOC_SERIES_STM32F7X || SOC_SERIES_STM32H7X || SOC_SERIES_STM32H5X config ETH_STM32_MULTICAST_FILTER bool "Multicast hash filter support" help Enable support for multicast hash filtering in the MAC. Once enabled the ethernet MAC performs imperfect filtering based on a computed hash of the destination MAC address of the multicast address. Only multicast with the computed hash set in the multicast table will be received and all other multicast is dropped by the MAC. If disabled then all multicast is received by the MAC. endif # ETH_STM32_HAL ```
/content/code_sandbox/drivers/ethernet/Kconfig.stm32_hal
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,266
```c /* * */ #include "eth_ivshmem_priv.h" #include <zephyr/arch/cpu.h> #include <zephyr/cache.h> #include <zephyr/kernel.h> #include <stdatomic.h> #include <string.h> /* These defines must match on the peer */ #define ETH_IVSHMEM_VRING_ALIGNMENT 64 #define ETH_IVSHMEM_FRAME_SIZE(len) ROUND_UP(18 + (len), L1_CACHE_BYTES) #define VRING_FLUSH(x) sys_cache_data_flush_range(&(x), sizeof(x)) #define VRING_INVALIDATE(x) sys_cache_data_invd_range(&(x), sizeof(x)) static int calc_vring_size( size_t section_size, uint16_t *vring_desc_len, uint32_t *vring_header_size); static uint32_t tx_buffer_advance(uint32_t max_len, uint32_t *position, uint32_t *len); static int tx_clean_used(struct eth_ivshmem_queue *q); static int get_rx_avail_desc_idx(struct eth_ivshmem_queue *q, uint16_t *avail_desc_idx); int eth_ivshmem_queue_init( struct eth_ivshmem_queue *q, uintptr_t tx_shmem, uintptr_t rx_shmem, size_t shmem_section_size) { memset(q, 0, sizeof(*q)); uint16_t vring_desc_len; uint32_t vring_header_size; int res = calc_vring_size(shmem_section_size, &vring_desc_len, &vring_header_size); if (res != 0) { return res; } q->desc_max_len = vring_desc_len; q->vring_data_max_len = shmem_section_size - vring_header_size; q->vring_header_size = vring_header_size; q->tx.shmem = (void *)tx_shmem; q->rx.shmem = (void *)rx_shmem; /* Init vrings */ vring_init(&q->tx.vring, vring_desc_len, q->tx.shmem, ETH_IVSHMEM_VRING_ALIGNMENT); vring_init(&q->rx.vring, vring_desc_len, q->rx.shmem, ETH_IVSHMEM_VRING_ALIGNMENT); /* Swap "used" pointers. * This is done so that each peer only ever writes to its output section, * while maintaining vring code consistency elsewhere in this file. */ struct vring_used *tmp_used = q->tx.vring.used; q->tx.vring.used = q->rx.vring.used; q->rx.vring.used = tmp_used; eth_ivshmem_queue_reset(q); return 0; } void eth_ivshmem_queue_reset(struct eth_ivshmem_queue *q) { q->tx.desc_head = 0; q->tx.desc_len = 0; q->tx.data_head = 0; q->tx.data_tail = 0; q->tx.data_len = 0; q->tx.avail_idx = 0; q->tx.used_idx = 0; q->tx.pending_data_head = 0; q->tx.pending_data_len = 0; q->rx.avail_idx = 0; q->rx.used_idx = 0; memset(q->tx.shmem, 0, q->vring_header_size); /* Init TX ring descriptors */ for (unsigned int i = 0; i < q->tx.vring.num - 1; i++) { q->tx.vring.desc[i].next = i + 1; } q->tx.vring.desc[q->tx.vring.num - 1].next = 0; } int eth_ivshmem_queue_tx_get_buff(struct eth_ivshmem_queue *q, void **data, size_t len) { /* Clean used TX buffers */ int res = tx_clean_used(q); if (res != 0) { return res; } if (q->tx.desc_len >= q->desc_max_len) { return -ENOBUFS; } uint32_t head = q->tx.data_head; uint32_t consumed_len = len; uint32_t new_head = tx_buffer_advance(q->vring_data_max_len, &head, &consumed_len); if (q->vring_data_max_len - q->tx.data_len < consumed_len) { return -ENOBUFS; } struct vring_desc *tx_desc = &q->tx.vring.desc[q->tx.desc_head]; tx_desc->addr = q->vring_header_size + head; tx_desc->len = len; tx_desc->flags = 0; VRING_FLUSH(*tx_desc); *data = (uint8_t *)q->tx.shmem + q->vring_header_size + head; q->tx.pending_data_head = new_head; q->tx.pending_data_len = q->tx.data_len + consumed_len; return 0; } int eth_ivshmem_queue_tx_commit_buff(struct eth_ivshmem_queue *q) { /* Ensure that a TX buffer is pending */ if (q->tx.pending_data_len == 0) { return -EINVAL; } uint16_t desc_head = q->tx.desc_head; q->tx.desc_len++; q->tx.desc_head = (q->tx.desc_head + 1) % q->desc_max_len; q->tx.data_head = q->tx.pending_data_head; q->tx.data_len = q->tx.pending_data_len; q->tx.vring.avail->ring[q->tx.avail_idx % q->desc_max_len] = desc_head; VRING_FLUSH(q->tx.vring.avail->ring[q->tx.avail_idx % q->desc_max_len]); atomic_thread_fence(memory_order_seq_cst); q->tx.avail_idx++; q->tx.vring.avail->idx = q->tx.avail_idx; VRING_FLUSH(q->tx.vring.avail->idx); q->tx.pending_data_len = 0; return 0; } int eth_ivshmem_queue_rx(struct eth_ivshmem_queue *q, const void **data, size_t *len) { *data = NULL; *len = 0; uint16_t avail_desc_idx; int res = get_rx_avail_desc_idx(q, &avail_desc_idx); if (res != 0) { return res; } struct vring_desc *desc = &q->rx.vring.desc[avail_desc_idx]; VRING_INVALIDATE(*desc); uint64_t offset = desc->addr - q->vring_header_size; uint32_t rx_len = desc->len; if (offset > q->vring_data_max_len || rx_len > q->vring_data_max_len || offset > q->vring_data_max_len - rx_len) { return -EINVAL; } *data = (uint8_t *)q->rx.shmem + q->vring_header_size + offset; *len = desc->len; return 0; } int eth_ivshmem_queue_rx_complete(struct eth_ivshmem_queue *q) { uint16_t avail_desc_idx; int res = get_rx_avail_desc_idx(q, &avail_desc_idx); if (res != 0) { return res; } uint16_t used_idx = q->rx.used_idx % q->desc_max_len; q->rx.used_idx++; q->rx.vring.used->ring[used_idx].id = avail_desc_idx; q->rx.vring.used->ring[used_idx].len = 1; VRING_FLUSH(q->rx.vring.used->ring[used_idx]); atomic_thread_fence(memory_order_seq_cst); q->rx.vring.used->idx = q->rx.used_idx; VRING_FLUSH(q->rx.vring.used->idx); atomic_thread_fence(memory_order_seq_cst); q->rx.avail_idx++; vring_avail_event(&q->rx.vring) = q->rx.avail_idx; VRING_FLUSH(vring_avail_event(&q->rx.vring)); return 0; } /** * Calculates the vring descriptor length and header size. * This must match what is calculated by the peer. */ static int calc_vring_size( size_t section_size, uint16_t *vring_desc_len, uint32_t *vring_header_size) { static const int eth_min_mtu = 68; uint32_t header_size; int16_t desc_len; for (desc_len = 4096; desc_len > 32; desc_len >>= 1) { header_size = vring_size(desc_len, ETH_IVSHMEM_VRING_ALIGNMENT); header_size = ROUND_UP(header_size, ETH_IVSHMEM_VRING_ALIGNMENT); if (header_size < section_size / 8) { break; } } if (header_size > section_size) { return -EINVAL; } uint32_t vring_data_size = section_size - header_size; if (vring_data_size < 4 * eth_min_mtu) { return -EINVAL; } *vring_desc_len = desc_len; *vring_header_size = header_size; return 0; } static uint32_t tx_buffer_advance(uint32_t max_len, uint32_t *position, uint32_t *len) { uint32_t aligned_len = ETH_IVSHMEM_FRAME_SIZE(*len); uint32_t contiguous_len = max_len - *position; *len = aligned_len; if (aligned_len > contiguous_len) { /* Wrap back to zero */ *position = 0; *len += contiguous_len; } return *position + aligned_len; } static int tx_clean_used(struct eth_ivshmem_queue *q) { while (true) { VRING_INVALIDATE(q->tx.vring.used->idx); if (q->tx.used_idx == q->tx.vring.used->idx) { break; } struct vring_used_elem *used = &q->tx.vring.used->ring[ q->tx.used_idx % q->desc_max_len]; atomic_thread_fence(memory_order_seq_cst); VRING_INVALIDATE(*used); if (used->id >= q->desc_max_len || used->len != 1) { return -EINVAL; } struct vring_desc *desc = &q->tx.vring.desc[used->id]; uint64_t offset = desc->addr - q->vring_header_size; uint32_t len = desc->len; uint32_t tail = q->tx.data_tail; uint32_t consumed_len = len; uint32_t new_tail = tx_buffer_advance(q->vring_data_max_len, &tail, &consumed_len); if (consumed_len > q->tx.data_len || offset != tail) { return -EINVAL; } q->tx.data_tail = new_tail; q->tx.data_len -= consumed_len; q->tx.desc_len--; q->tx.used_idx++; } return 0; } static int get_rx_avail_desc_idx(struct eth_ivshmem_queue *q, uint16_t *avail_desc_idx) { atomic_thread_fence(memory_order_seq_cst); VRING_INVALIDATE(q->rx.vring.avail->idx); uint16_t avail_idx = q->rx.vring.avail->idx; if (avail_idx == q->rx.avail_idx) { return -EWOULDBLOCK; } VRING_INVALIDATE(q->rx.vring.avail->ring[q->rx.avail_idx % q->desc_max_len]); *avail_desc_idx = q->rx.vring.avail->ring[q->rx.avail_idx % q->desc_max_len]; if (*avail_desc_idx >= q->desc_max_len) { return -EINVAL; } return 0; } ```
/content/code_sandbox/drivers/ethernet/eth_ivshmem_queue.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,486
```c /* ENC28J60 Stand-alone Ethernet Controller with SPI * * */ #define DT_DRV_COMPAT microchip_enc28j60 #define LOG_MODULE_NAME eth_enc28j60 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <string.h> #include <errno.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include "eth_enc28j60_priv.h" #include "eth.h" #define D10D24S 11 static int eth_enc28j60_soft_reset(const struct device *dev) { const struct eth_enc28j60_config *config = dev->config; uint8_t buf[2] = { ENC28J60_SPI_SC, 0xFF }; const struct spi_buf tx_buf = { .buf = buf, .len = 1, }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; return spi_write_dt(&config->spi, &tx); } static void eth_enc28j60_set_bank(const struct device *dev, uint16_t reg_addr) { const struct eth_enc28j60_config *config = dev->config; uint8_t buf[2]; const struct spi_buf tx_buf = { .buf = buf, .len = 2 }; const struct spi_buf rx_buf = { .buf = buf, .len = 2 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1 }; buf[0] = ENC28J60_SPI_RCR | ENC28J60_REG_ECON1; buf[1] = 0x0; if (!spi_transceive_dt(&config->spi, &tx, &rx)) { buf[0] = ENC28J60_SPI_WCR | ENC28J60_REG_ECON1; buf[1] = (buf[1] & 0xFC) | ((reg_addr >> 8) & 0x0F); spi_write_dt(&config->spi, &tx); } else { LOG_DBG("%s: Failure while setting bank to 0x%04x", dev->name, reg_addr); } } static void eth_enc28j60_write_reg(const struct device *dev, uint16_t reg_addr, uint8_t value) { const struct eth_enc28j60_config *config = dev->config; uint8_t buf[2]; const struct spi_buf tx_buf = { .buf = buf, .len = 2 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; buf[0] = ENC28J60_SPI_WCR | (reg_addr & 0xFF); buf[1] = value; spi_write_dt(&config->spi, &tx); } static void eth_enc28j60_read_reg(const struct device *dev, uint16_t reg_addr, uint8_t *value) { const struct eth_enc28j60_config *config = dev->config; uint8_t buf[3]; const struct spi_buf tx_buf = { .buf = buf, .len = 2 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; struct spi_buf rx_buf = { .buf = buf, }; const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1 }; uint8_t rx_size = 2U; if (reg_addr & 0xF000) { rx_size = 3U; } rx_buf.len = rx_size; buf[0] = ENC28J60_SPI_RCR | (reg_addr & 0xFF); buf[1] = 0x0; if (!spi_transceive_dt(&config->spi, &tx, &rx)) { *value = buf[rx_size - 1]; } else { LOG_DBG("%s: Failure while reading register 0x%04x", dev->name, reg_addr); *value = 0U; } } static void eth_enc28j60_set_eth_reg(const struct device *dev, uint16_t reg_addr, uint8_t value) { const struct eth_enc28j60_config *config = dev->config; uint8_t buf[2]; const struct spi_buf tx_buf = { .buf = buf, .len = 2 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; buf[0] = ENC28J60_SPI_BFS | (reg_addr & 0xFF); buf[1] = value; spi_write_dt(&config->spi, &tx); } static void eth_enc28j60_clear_eth_reg(const struct device *dev, uint16_t reg_addr, uint8_t value) { const struct eth_enc28j60_config *config = dev->config; uint8_t buf[2]; const struct spi_buf tx_buf = { .buf = buf, .len = 2 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; buf[0] = ENC28J60_SPI_BFC | (reg_addr & 0xFF); buf[1] = value; spi_write_dt(&config->spi, &tx); } static void eth_enc28j60_write_mem(const struct device *dev, uint8_t *data_buffer, uint16_t buf_len) { const struct eth_enc28j60_config *config = dev->config; uint8_t buf[1] = { ENC28J60_SPI_WBM }; struct spi_buf tx_buf[2] = { { .buf = buf, .len = 1 }, }; const struct spi_buf_set tx = { .buffers = tx_buf, .count = 2 }; uint16_t num_segments; uint16_t num_remaining; int i; num_segments = buf_len / MAX_BUFFER_LENGTH; num_remaining = buf_len - MAX_BUFFER_LENGTH * num_segments; for (i = 0; i < num_segments; i++, data_buffer += MAX_BUFFER_LENGTH) { tx_buf[1].buf = data_buffer; tx_buf[1].len = MAX_BUFFER_LENGTH; if (spi_write_dt(&config->spi, &tx)) { LOG_ERR("%s: Failed to write memory", dev->name); return; } } if (num_remaining > 0) { tx_buf[1].buf = data_buffer; tx_buf[1].len = num_remaining; if (spi_write_dt(&config->spi, &tx)) { LOG_ERR("%s: Failed to write memory", dev->name); } } } static void eth_enc28j60_read_mem(const struct device *dev, uint8_t *data_buffer, uint16_t buf_len) { const struct eth_enc28j60_config *config = dev->config; uint8_t buf[1] = { ENC28J60_SPI_RBM }; const struct spi_buf tx_buf = { .buf = buf, .len = 1 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; struct spi_buf rx_buf[2] = { { .buf = NULL, .len = 1 }, }; const struct spi_buf_set rx = { .buffers = rx_buf, .count = 2 }; uint16_t num_segments; uint16_t num_remaining; int i; num_segments = buf_len / MAX_BUFFER_LENGTH; num_remaining = buf_len - MAX_BUFFER_LENGTH * num_segments; for (i = 0; i < num_segments; i++, data_buffer += MAX_BUFFER_LENGTH) { rx_buf[1].buf = data_buffer; rx_buf[1].len = MAX_BUFFER_LENGTH; if (spi_transceive_dt(&config->spi, &tx, &rx)) { LOG_ERR("%s: Failed to read memory", dev->name); return; } } if (num_remaining > 0) { rx_buf[1].buf = data_buffer; rx_buf[1].len = num_remaining; if (spi_transceive_dt(&config->spi, &tx, &rx)) { LOG_ERR("%s: Failed to read memory", dev->name); } } } static void eth_enc28j60_write_phy(const struct device *dev, uint16_t reg_addr, int16_t data) { uint8_t data_mistat; eth_enc28j60_set_bank(dev, ENC28J60_REG_MIREGADR); eth_enc28j60_write_reg(dev, ENC28J60_REG_MIREGADR, reg_addr); eth_enc28j60_write_reg(dev, ENC28J60_REG_MIWRL, data & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_MIWRH, data >> 8); eth_enc28j60_set_bank(dev, ENC28J60_REG_MISTAT); do { /* wait 10.24 useconds */ k_busy_wait(D10D24S); eth_enc28j60_read_reg(dev, ENC28J60_REG_MISTAT, &data_mistat); } while ((data_mistat & ENC28J60_BIT_MISTAT_BUSY)); } static void eth_enc28j60_read_phy(const struct device *dev, uint16_t reg_addr, int16_t *data) { uint8_t data_mistat; uint8_t lsb; uint8_t msb; eth_enc28j60_set_bank(dev, ENC28J60_REG_MIREGADR); eth_enc28j60_write_reg(dev, ENC28J60_REG_MIREGADR, reg_addr); eth_enc28j60_write_reg(dev, ENC28J60_REG_MICMD, ENC28J60_BIT_MICMD_MIIRD); eth_enc28j60_set_bank(dev, ENC28J60_REG_MISTAT); do { /* wait 10.24 useconds */ k_busy_wait(D10D24S); eth_enc28j60_read_reg(dev, ENC28J60_REG_MISTAT, &data_mistat); } while ((data_mistat & ENC28J60_BIT_MISTAT_BUSY)); eth_enc28j60_set_bank(dev, ENC28J60_REG_MIREGADR); eth_enc28j60_write_reg(dev, ENC28J60_REG_MICMD, 0x0); eth_enc28j60_read_reg(dev, ENC28J60_REG_MIRDL, &lsb); eth_enc28j60_read_reg(dev, ENC28J60_REG_MIRDH, &msb); *data = (msb << 8) | lsb; } static void eth_enc28j60_gpio_callback(const struct device *dev, struct gpio_callback *cb, uint32_t pins) { struct eth_enc28j60_runtime *context = CONTAINER_OF(cb, struct eth_enc28j60_runtime, gpio_cb); k_sem_give(&context->int_sem); } static int eth_enc28j60_init_buffers(const struct device *dev) { uint8_t data_estat; const struct eth_enc28j60_config *config = dev->config; /* Reception buffers initialization */ eth_enc28j60_set_bank(dev, ENC28J60_REG_ERXSTL); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXSTL, ENC28J60_RXSTART & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXSTH, ENC28J60_RXSTART >> 8); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXRDPTL, ENC28J60_RXSTART & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXRDPTH, ENC28J60_RXSTART >> 8); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXNDL, ENC28J60_RXEND & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXNDH, ENC28J60_RXEND >> 8); eth_enc28j60_write_reg(dev, ENC28J60_REG_ETXSTL, ENC28J60_TXSTART & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ETXSTH, ENC28J60_TXSTART >> 8); eth_enc28j60_write_reg(dev, ENC28J60_REG_ETXNDL, ENC28J60_TXEND & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ETXNDH, ENC28J60_TXEND >> 8); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERDPTL, ENC28J60_RXSTART & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERDPTH, ENC28J60_RXSTART >> 8); eth_enc28j60_write_reg(dev, ENC28J60_REG_EWRPTL, ENC28J60_TXSTART & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_EWRPTH, ENC28J60_TXSTART >> 8); eth_enc28j60_set_bank(dev, ENC28J60_REG_ERXFCON); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXFCON, config->hw_rx_filter); /* Waiting for OST */ /* 32 bits for this timer should be fine, rollover not an issue with initialisation */ uint32_t start_wait = (uint32_t) k_uptime_get(); do { /* If the CLK isn't ready don't wait forever */ if ((k_uptime_get_32() - start_wait) > CONFIG_ETH_ENC28J60_CLKRDY_INIT_WAIT_MS) { LOG_ERR("OST wait timed out"); return -ETIMEDOUT; } /* wait 10.24 useconds */ k_busy_wait(D10D24S); eth_enc28j60_read_reg(dev, ENC28J60_REG_ESTAT, &data_estat); } while (!(data_estat & ENC28J60_BIT_ESTAT_CLKRDY)); return 0; } static void eth_enc28j60_init_mac(const struct device *dev) { const struct eth_enc28j60_config *config = dev->config; struct eth_enc28j60_runtime *context = dev->data; uint8_t data_macon; eth_enc28j60_set_bank(dev, ENC28J60_REG_MACON1); /* Set MARXEN to enable MAC to receive frames */ eth_enc28j60_read_reg(dev, ENC28J60_REG_MACON1, &data_macon); data_macon |= ENC28J60_BIT_MACON1_MARXEN | ENC28J60_BIT_MACON1_RXPAUS | ENC28J60_BIT_MACON1_TXPAUS; eth_enc28j60_write_reg(dev, ENC28J60_REG_MACON1, data_macon); data_macon = ENC28J60_MAC_CONFIG; if (config->full_duplex) { data_macon |= ENC28J60_BIT_MACON3_FULDPX; } eth_enc28j60_write_reg(dev, ENC28J60_REG_MACON3, data_macon); eth_enc28j60_write_reg(dev, ENC28J60_REG_MAIPGL, ENC28J60_MAC_NBBIPGL); if (config->full_duplex) { eth_enc28j60_write_reg(dev, ENC28J60_REG_MAIPGH, ENC28J60_MAC_NBBIPGH); eth_enc28j60_write_reg(dev, ENC28J60_REG_MABBIPG, ENC28J60_MAC_BBIPG_FD); } else { eth_enc28j60_write_reg(dev, ENC28J60_REG_MABBIPG, ENC28J60_MAC_BBIPG_HD); eth_enc28j60_write_reg(dev, ENC28J60_REG_MACON4, 1 << 6); } /* Configure MAC address */ eth_enc28j60_set_bank(dev, ENC28J60_REG_MAADR1); eth_enc28j60_write_reg(dev, ENC28J60_REG_MAADR6, context->mac_address[5]); eth_enc28j60_write_reg(dev, ENC28J60_REG_MAADR5, context->mac_address[4]); eth_enc28j60_write_reg(dev, ENC28J60_REG_MAADR4, context->mac_address[3]); eth_enc28j60_write_reg(dev, ENC28J60_REG_MAADR3, context->mac_address[2]); eth_enc28j60_write_reg(dev, ENC28J60_REG_MAADR2, context->mac_address[1]); eth_enc28j60_write_reg(dev, ENC28J60_REG_MAADR1, context->mac_address[0]); } static void eth_enc28j60_init_phy(const struct device *dev) { const struct eth_enc28j60_config *config = dev->config; if (config->full_duplex) { eth_enc28j60_write_phy(dev, ENC28J60_PHY_PHCON1, ENC28J60_BIT_PHCON1_PDPXMD); eth_enc28j60_write_phy(dev, ENC28J60_PHY_PHCON2, 0x0); } else { eth_enc28j60_write_phy(dev, ENC28J60_PHY_PHCON1, 0x0); eth_enc28j60_write_phy(dev, ENC28J60_PHY_PHCON2, ENC28J60_BIT_PHCON2_HDLDIS); } } static struct net_if *get_iface(struct eth_enc28j60_runtime *ctx) { return ctx->iface; } static int eth_enc28j60_tx(const struct device *dev, struct net_pkt *pkt) { struct eth_enc28j60_runtime *context = dev->data; uint16_t tx_bufaddr = ENC28J60_TXSTART; uint16_t len = net_pkt_get_len(pkt); uint8_t per_packet_control; uint16_t tx_bufaddr_end; struct net_buf *frag; uint8_t tx_end; LOG_DBG("%s: pkt %p (len %u)", dev->name, pkt, len); k_sem_take(&context->tx_rx_sem, K_FOREVER); /* Latest errata sheet: DS80349C * always reset transmit logic (Errata Issue 12) * the Microchip TCP/IP stack implementation used to first check * whether TXERIF is set and only then reset the transmit logic * but this has been changed in later versions; possibly they * have a reason for this; they don't mention this in the errata * sheet */ eth_enc28j60_set_eth_reg(dev, ENC28J60_REG_ECON1, ENC28J60_BIT_ECON1_TXRST); eth_enc28j60_clear_eth_reg(dev, ENC28J60_REG_ECON1, ENC28J60_BIT_ECON1_TXRST); /* Write the buffer content into the transmission buffer */ eth_enc28j60_set_bank(dev, ENC28J60_REG_ETXSTL); eth_enc28j60_write_reg(dev, ENC28J60_REG_EWRPTL, tx_bufaddr & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_EWRPTH, tx_bufaddr >> 8); eth_enc28j60_write_reg(dev, ENC28J60_REG_ETXSTL, tx_bufaddr & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ETXSTH, tx_bufaddr >> 8); /* Write the data into the buffer */ per_packet_control = ENC28J60_PPCTL_BYTE; eth_enc28j60_write_mem(dev, &per_packet_control, 1); for (frag = pkt->frags; frag; frag = frag->frags) { eth_enc28j60_write_mem(dev, frag->data, frag->len); } tx_bufaddr_end = tx_bufaddr + len; eth_enc28j60_write_reg(dev, ENC28J60_REG_ETXNDL, tx_bufaddr_end & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ETXNDH, tx_bufaddr_end >> 8); /* Signal ENC28J60 to send the buffer */ eth_enc28j60_set_eth_reg(dev, ENC28J60_REG_ECON1, ENC28J60_BIT_ECON1_TXRTS); do { /* wait 10.24 useconds */ k_busy_wait(D10D24S); eth_enc28j60_read_reg(dev, ENC28J60_REG_EIR, &tx_end); tx_end &= ENC28J60_BIT_EIR_TXIF; } while (!tx_end); eth_enc28j60_read_reg(dev, ENC28J60_REG_ESTAT, &tx_end); k_sem_give(&context->tx_rx_sem); if (tx_end & ENC28J60_BIT_ESTAT_TXABRT) { LOG_ERR("%s: TX failed!", dev->name); /* 12.1.3 "TRANSMIT ERROR INTERRUPT FLAG (TXERIF)" states: * * "After determining the problem and solution, the * host controller should clear the LATECOL (if set) and * TXABRT bits so that future aborts can be detected * accurately." */ eth_enc28j60_clear_eth_reg(dev, ENC28J60_REG_ESTAT, ENC28J60_BIT_ESTAT_TXABRT | ENC28J60_BIT_ESTAT_LATECOL); return -EIO; } LOG_DBG("%s: Tx successful", dev->name); return 0; } static void enc28j60_read_packet(const struct device *dev, uint16_t frm_len) { const struct eth_enc28j60_config *config = dev->config; struct eth_enc28j60_runtime *context = dev->data; struct net_buf *pkt_buf; struct net_pkt *pkt; uint16_t lengthfr; uint8_t dummy[4]; /* Get the frame from the buffer */ pkt = net_pkt_rx_alloc_with_buffer(get_iface(context), frm_len, AF_UNSPEC, 0, K_MSEC(config->timeout)); if (!pkt) { LOG_ERR("%s: Could not allocate rx buffer", dev->name); eth_stats_update_errors_rx(get_iface(context)); return; } pkt_buf = pkt->buffer; lengthfr = frm_len; do { size_t frag_len; uint8_t *data_ptr; size_t spi_frame_len; data_ptr = pkt_buf->data; /* Review the space available for the new frag */ frag_len = net_buf_tailroom(pkt_buf); if (frm_len > frag_len) { spi_frame_len = frag_len; } else { spi_frame_len = frm_len; } eth_enc28j60_read_mem(dev, data_ptr, spi_frame_len); net_buf_add(pkt_buf, spi_frame_len); /* One fragment has been written via SPI */ frm_len -= spi_frame_len; pkt_buf = pkt_buf->frags; } while (frm_len > 0); /* Let's pop the useless CRC */ eth_enc28j60_read_mem(dev, dummy, 4); /* Pops one padding byte from spi circular buffer * introduced by the device when the frame length is odd */ if (lengthfr & 0x01) { eth_enc28j60_read_mem(dev, dummy, 1); } net_pkt_set_iface(pkt, context->iface); /* Feed buffer frame to IP stack */ LOG_DBG("%s: Received packet of length %u", dev->name, lengthfr); if (net_recv_data(net_pkt_iface(pkt), pkt) < 0) { net_pkt_unref(pkt); } } static int eth_enc28j60_rx(const struct device *dev) { struct eth_enc28j60_runtime *context = dev->data; uint8_t counter; /* Errata 6. The Receive Packet Pending Interrupt Flag (EIR.PKTIF) * does not reliably/accurately report the status of pending packet. * Use EPKTCNT register instead. */ eth_enc28j60_set_bank(dev, ENC28J60_REG_EPKTCNT); eth_enc28j60_read_reg(dev, ENC28J60_REG_EPKTCNT, &counter); if (!counter) { return 0; } k_sem_take(&context->tx_rx_sem, K_FOREVER); do { uint16_t frm_len = 0U; uint8_t info[RSV_SIZE]; uint16_t next_packet; uint8_t rdptl = 0U; uint8_t rdpth = 0U; /* remove read fifo address to packet header address */ eth_enc28j60_set_bank(dev, ENC28J60_REG_ERXRDPTL); eth_enc28j60_read_reg(dev, ENC28J60_REG_ERXRDPTL, &rdptl); eth_enc28j60_read_reg(dev, ENC28J60_REG_ERXRDPTH, &rdpth); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERDPTL, rdptl); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERDPTH, rdpth); /* Read address for next packet */ eth_enc28j60_read_mem(dev, info, 2); next_packet = info[0] | (uint16_t)info[1] << 8; /* Errata 14. Even values in ERXRDPT * may corrupt receive buffer. No need adjust next packet if (next_packet == 0) { next_packet = ENC28J60_RXEND; } else if (!(next_packet & 0x01)) { next_packet--; }*/ /* Read reception status vector */ eth_enc28j60_read_mem(dev, info, 4); /* Get the frame length from the rx status vector, * minus CRC size at the end which is always present */ frm_len = sys_get_le16(info) - 4; enc28j60_read_packet(dev, frm_len); /* Free buffer memory and decrement rx counter */ eth_enc28j60_set_bank(dev, ENC28J60_REG_ERXRDPTL); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXRDPTL, next_packet & 0xFF); eth_enc28j60_write_reg(dev, ENC28J60_REG_ERXRDPTH, next_packet >> 8); eth_enc28j60_set_eth_reg(dev, ENC28J60_REG_ECON2, ENC28J60_BIT_ECON2_PKTDEC); /* Check if there are frames to clean from the buffer */ eth_enc28j60_set_bank(dev, ENC28J60_REG_EPKTCNT); eth_enc28j60_read_reg(dev, ENC28J60_REG_EPKTCNT, &counter); } while (counter); k_sem_give(&context->tx_rx_sem); return 0; } static void eth_enc28j60_rx_thread(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); const struct device *dev = p1; struct eth_enc28j60_runtime *context = dev->data; uint8_t int_stat; while (true) { k_sem_take(&context->int_sem, K_FOREVER); eth_enc28j60_read_reg(dev, ENC28J60_REG_EIR, &int_stat); if (int_stat & ENC28J60_BIT_EIR_PKTIF) { eth_enc28j60_rx(dev); /* Clear rx interruption flag */ eth_enc28j60_clear_eth_reg(dev, ENC28J60_REG_EIR, ENC28J60_BIT_EIR_PKTIF | ENC28J60_BIT_EIR_RXERIF); } else if (int_stat & ENC28J60_BIT_EIR_LINKIF) { uint16_t phir; uint16_t phstat2; /* Clear link change interrupt flag by PHIR reg read */ eth_enc28j60_read_phy(dev, ENC28J60_PHY_PHIR, &phir); eth_enc28j60_read_phy(dev, ENC28J60_PHY_PHSTAT2, &phstat2); if (phstat2 & ENC28J60_BIT_PHSTAT2_LSTAT) { LOG_INF("%s: Link up", dev->name); net_eth_carrier_on(context->iface); } else { LOG_INF("%s: Link down", dev->name); if (context->iface_initialized) { net_eth_carrier_off(context->iface); } } } } } static enum ethernet_hw_caps eth_enc28j60_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T #if defined(CONFIG_NET_VLAN) | ETHERNET_HW_VLAN #endif ; } static void eth_enc28j60_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_enc28j60_runtime *context = dev->data; net_if_set_link_addr(iface, context->mac_address, sizeof(context->mac_address), NET_LINK_ETHERNET); if (context->iface == NULL) { context->iface = iface; } ethernet_init(iface); net_if_carrier_off(iface); context->iface_initialized = true; } static const struct ethernet_api api_funcs = { .iface_api.init = eth_enc28j60_iface_init, .get_capabilities = eth_enc28j60_get_capabilities, .send = eth_enc28j60_tx, }; static int eth_enc28j60_init(const struct device *dev) { const struct eth_enc28j60_config *config = dev->config; struct eth_enc28j60_runtime *context = dev->data; /* SPI config */ if (!spi_is_ready_dt(&config->spi)) { LOG_ERR("%s: SPI master port %s not ready", dev->name, config->spi.bus->name); return -EINVAL; } /* Initialize GPIO */ if (!gpio_is_ready_dt(&config->interrupt)) { LOG_ERR("%s: GPIO port %s not ready", dev->name, config->interrupt.port->name); return -EINVAL; } if (gpio_pin_configure_dt(&config->interrupt, GPIO_INPUT)) { LOG_ERR("%s: Unable to configure GPIO pin %u", dev->name, config->interrupt.pin); return -EINVAL; } gpio_init_callback(&(context->gpio_cb), eth_enc28j60_gpio_callback, BIT(config->interrupt.pin)); if (gpio_add_callback(config->interrupt.port, &(context->gpio_cb))) { return -EINVAL; } gpio_pin_interrupt_configure_dt(&config->interrupt, GPIO_INT_EDGE_TO_ACTIVE); if (eth_enc28j60_soft_reset(dev)) { LOG_ERR("%s: Soft-reset failed", dev->name); return -EIO; } /* Errata B7/1 */ k_busy_wait(D10D24S); /* Apply a random MAC address if requested in DT */ if (config->random_mac) { gen_random_mac(context->mac_address, MICROCHIP_OUI_B0, MICROCHIP_OUI_B1, MICROCHIP_OUI_B2); LOG_INF("Random MAC Addr %02x:%02x:%02x:%02x:%02x:%02x", context->mac_address[0], context->mac_address[1], context->mac_address[2], context->mac_address[3], context->mac_address[4], context->mac_address[5]); } else { /* Assign octets not previously taken from devicetree */ context->mac_address[0] = MICROCHIP_OUI_B0; context->mac_address[1] = MICROCHIP_OUI_B1; context->mac_address[2] = MICROCHIP_OUI_B2; } if (eth_enc28j60_init_buffers(dev)) { return -ETIMEDOUT; } eth_enc28j60_init_mac(dev); eth_enc28j60_init_phy(dev); /* Enable interruptions */ eth_enc28j60_set_eth_reg(dev, ENC28J60_REG_EIE, ENC28J60_BIT_EIE_INTIE); eth_enc28j60_set_eth_reg(dev, ENC28J60_REG_EIE, ENC28J60_BIT_EIE_PKTIE); eth_enc28j60_set_eth_reg(dev, ENC28J60_REG_EIE, ENC28J60_BIT_EIE_LINKIE); eth_enc28j60_write_phy(dev, ENC28J60_PHY_PHIE, ENC28J60_BIT_PHIE_PGEIE | ENC28J60_BIT_PHIE_PLNKIE); /* Enable Reception */ eth_enc28j60_set_eth_reg(dev, ENC28J60_REG_ECON1, ENC28J60_BIT_ECON1_RXEN); /* Start interruption-poll thread */ k_thread_create(&context->thread, context->thread_stack, CONFIG_ETH_ENC28J60_RX_THREAD_STACK_SIZE, eth_enc28j60_rx_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_ENC28J60_RX_THREAD_PRIO), 0, K_NO_WAIT); LOG_INF("%s: Initialized", dev->name); return 0; } #define ENC28J60_DEFINE(inst) \ static struct eth_enc28j60_runtime eth_enc28j60_runtime_##inst = { \ .mac_address = DT_INST_PROP(inst, local_mac_address), \ .tx_rx_sem = \ Z_SEM_INITIALIZER((eth_enc28j60_runtime_##inst).tx_rx_sem, 1, UINT_MAX), \ .int_sem = Z_SEM_INITIALIZER((eth_enc28j60_runtime_##inst).int_sem, 0, UINT_MAX), \ }; \ \ static const struct eth_enc28j60_config eth_enc28j60_config_##inst = { \ .spi = SPI_DT_SPEC_INST_GET(inst, SPI_WORD_SET(8), 0), \ .interrupt = GPIO_DT_SPEC_INST_GET(inst, int_gpios), \ .full_duplex = DT_INST_PROP(0, full_duplex), \ .timeout = CONFIG_ETH_ENC28J60_TIMEOUT, \ .hw_rx_filter = DT_INST_PROP_OR(inst, hw_rx_filter, ENC28J60_RECEIVE_FILTERS), \ .random_mac = DT_INST_PROP(inst, zephyr_random_mac_address), \ }; \ \ ETH_NET_DEVICE_DT_INST_DEFINE(inst, eth_enc28j60_init, NULL, &eth_enc28j60_runtime_##inst, \ &eth_enc28j60_config_##inst, CONFIG_ETH_INIT_PRIORITY, \ &api_funcs, NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(ENC28J60_DEFINE); ```
/content/code_sandbox/drivers/ethernet/eth_enc28j60.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,908
```objective-c /* W5500 Stand-alone Ethernet Controller with SPI * * Author: Parthiban Nallathambi <parthiban@linumiz.com> * */ #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #ifndef _W5500_ #define _W5500_ /* * W5500 common registers */ #define W5500_COMMON_REGS 0x0000 #define W5500_MR 0x0000 /* Mode Register */ #define W5500_GW 0x0001 #define MR_RST 0x80 /* S/W reset */ #define MR_PB 0x10 /* Ping block */ #define MR_AI 0x02 /* Address Auto-Increment */ #define MR_IND 0x01 /* Indirect mode */ #define W5500_SHAR 0x0009 /* Source MAC address */ #define W5500_IR 0x0015 /* Interrupt Register */ #define W5500_COMMON_REGS_LEN 0x0040 #define W5500_PHYCFGR 0x002E /* PHY Configuration register */ #define W5500_Sn_MR 0x0000 /* Sn Mode Register */ #define W5500_Sn_CR 0x0001 /* Sn Command Register */ #define W5500_Sn_IR 0x0002 /* Sn Interrupt Register */ #define W5500_Sn_SR 0x0003 /* Sn Status Register */ #define W5500_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */ #define W5500_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */ #define W5500_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */ #define W5500_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */ #define W5500_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */ #define W5500_S0_REGS 0x10000 #define W5500_S0_MR (W5500_S0_REGS + W5500_Sn_MR) #define S0_MR_MACRAW 0x04 /* MAC RAW mode */ #define S0_MR_MF 0x40 /* MAC Filter for W5500 */ #define W5500_S0_CR (W5500_S0_REGS + W5500_Sn_CR) #define S0_CR_OPEN 0x01 /* OPEN command */ #define S0_CR_CLOSE 0x10 /* CLOSE command */ #define S0_CR_SEND 0x20 /* SEND command */ #define S0_CR_RECV 0x40 /* RECV command */ #define W5500_S0_IR (W5500_S0_REGS + W5500_Sn_IR) #define S0_IR_SENDOK 0x10 /* complete sending */ #define S0_IR_RECV 0x04 /* receiving data */ #define W5500_S0_SR (W5500_S0_REGS + W5500_Sn_SR) #define S0_SR_MACRAW 0x42 /* mac raw mode */ #define W5500_S0_TX_FSR (W5500_S0_REGS + W5500_Sn_TX_FSR) #define W5500_S0_TX_RD (W5500_S0_REGS + W5500_Sn_TX_RD) #define W5500_S0_TX_WR (W5500_S0_REGS + W5500_Sn_TX_WR) #define W5500_S0_RX_RSR (W5500_S0_REGS + W5500_Sn_RX_RSR) #define W5500_S0_RX_RD (W5500_S0_REGS + W5500_Sn_RX_RD) #define W5500_S0_IMR (W5500_S0_REGS + W5500_Sn_IMR) #define W5500_S0_MR_MF 7 /* MAC Filter for W5500 */ #define W5500_Sn_REGS_LEN 0x0040 #define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */ #define IR_S0 0x01 #define RTR_DEFAULT 2000 #define W5500_RTR 0x0019 /* Retry Time-value Register */ #define W5500_Sn_RXMEM_SIZE(n) \ (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */ #define W5500_Sn_TXMEM_SIZE(n) \ (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */ #define W5500_Sn_TX_MEM_START 0x20000 #define W5500_TX_MEM_SIZE 0x04000 #define W5500_Sn_RX_MEM_START 0x30000 #define W5500_RX_MEM_SIZE 0x04000 /* Delay for PHY write/read operations (25.6 us) */ #define W5500_PHY_ACCESS_DELAY 26U struct w5500_config { struct spi_dt_spec spi; struct gpio_dt_spec interrupt; struct gpio_dt_spec reset; int32_t timeout; }; struct w5500_runtime { struct net_if *iface; K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_ETH_W5500_RX_THREAD_STACK_SIZE); struct k_thread thread; uint8_t mac_addr[6]; struct gpio_callback gpio_cb; struct k_sem tx_sem; struct k_sem int_sem; bool link_up; uint8_t buf[NET_ETH_MAX_FRAME_SIZE]; }; #endif /*_W5500_*/ ```
/content/code_sandbox/drivers/ethernet/eth_w5500_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,168
```unknown # ENC424J600 Ethernet driver configuration options menuconfig ETH_ENC424J600 bool "ENC424J600C Ethernet Controller" default y depends on DT_HAS_MICROCHIP_ENC424J600_ENABLED select SPI help ENC424J600C Stand-Alone Ethernet Controller with SPI Interface if ETH_ENC424J600 config ETH_ENC424J600_RX_THREAD_STACK_SIZE int "Stack size for internal incoming packet handler" default 800 help Size of the stack used for internal thread which is ran for incoming packet processing. config ETH_ENC424J600_RX_THREAD_PRIO int "Priority for internal incoming packet handler" default 2 help Priority level for internal thread which is ran for incoming packet processing. config ETH_ENC424J600_TIMEOUT int "IP buffer timeout" default 100 help Given timeout in milliseconds. Maximum amount of time that the driver will wait from the IP stack to get a memory buffer before the Ethernet frame is dropped. endif ```
/content/code_sandbox/drivers/ethernet/Kconfig.enc424j600
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
220
```c /* * */ #define DT_DRV_COMPAT nxp_s32_netc_psi #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_s32_eth_psi); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/mbox.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/phy.h> #include <ethernet/eth_stats.h> #include <soc.h> #include <Netc_Eth_Ip.h> #include <Netc_Eth_Ip_Irq.h> #include <Netc_EthSwt_Ip.h> #include "eth.h" #include "eth_nxp_s32_netc_priv.h" #define TX_RING_IDX 1 #define RX_RING_IDX 0 static void nxp_s32_eth_configure_port(uint8_t port_idx, enum phy_link_speed speed) { EthTrcv_BaudRateType baudrate; Netc_EthSwt_Ip_PortDuplexType duplex; Std_ReturnType status; (void)Netc_EthSwt_Ip_SetPortMode(NETC_SWITCH_IDX, port_idx, false); baudrate = PHY_TO_NETC_SPEED(speed); status = Netc_EthSwt_Ip_SetPortSpeed(NETC_SWITCH_IDX, port_idx, baudrate); if (status != E_OK) { LOG_ERR("Failed to set port %d speed: %d", port_idx, status); return; } duplex = PHY_TO_NETC_DUPLEX_MODE(speed); status = Netc_EthSwt_Ip_SetPortMacLayerDuplexMode(NETC_SWITCH_IDX, port_idx, duplex); if (status != E_OK) { LOG_ERR("Failed to set port %d duplex mode: %d", port_idx, status); return; } (void)Netc_EthSwt_Ip_SetPortMode(NETC_SWITCH_IDX, port_idx, true); } static void phy_link_state_changed(const struct device *pdev, struct phy_link_state *state, void *user_data) { const struct device *dev = (struct device *)user_data; const struct nxp_s32_eth_config *cfg = dev->config; const struct nxp_s32_eth_data *ctx = dev->data; ARG_UNUSED(pdev); if (state->is_up) { LOG_DBG("Link up"); nxp_s32_eth_configure_port(cfg->port_idx, state->speed); net_eth_carrier_on(ctx->iface); } else { LOG_DBG("Link down"); net_eth_carrier_off(ctx->iface); } } static const struct device *nxp_s32_eth_get_phy(const struct device *dev) { const struct nxp_s32_eth_config *cfg = dev->config; return cfg->phy_dev; } /* Configure ETHx_EXT_RX_CLK @ 125 MHz as source of ETH_x_RGMII_RX_CLK */ static int nxp_s32_eth_configure_cgm(uint8_t port_idx) { uint32_t tout = 0xFFFFFFFF; if (port_idx == 0) { IP_MC_CGM_1->MUX_7_CSC = (IP_MC_CGM_1->MUX_7_CSC & ~MC_CGM_MUX_7_CSC_SELCTL_MASK) | MC_CGM_MUX_7_CSC_SELCTL(NETC_ETH_0_RX_CLK_IDX); IP_MC_CGM_1->MUX_7_CSC = (IP_MC_CGM_1->MUX_7_CSC & ~MC_CGM_MUX_7_CSC_CLK_SW_MASK) | MC_CGM_MUX_7_CSC_CLK_SW(1); while (((IP_MC_CGM_1->MUX_7_CSS & MC_CGM_MUX_7_CSS_CLK_SW_MASK) == 0) && (tout > 0)) { tout--; } while (((IP_MC_CGM_1->MUX_7_CSS & MC_CGM_MUX_7_CSS_SWIP_MASK) != 0) && (tout > 0)) { tout--; } while (((IP_MC_CGM_1->MUX_7_CSS & MC_CGM_MUX_7_CSS_SWTRG_MASK) >> MC_CGM_MUX_7_CSS_SWTRG_SHIFT != 1) && (tout > 0)) { tout--; } __ASSERT_NO_MSG(((IP_MC_CGM_1->MUX_7_CSS & MC_CGM_MUX_7_CSS_SELSTAT_MASK) >> MC_CGM_MUX_7_CSS_SELSTAT_SHIFT) == NETC_ETH_0_RX_CLK_IDX); } else if (port_idx == 1) { IP_MC_CGM_1->MUX_9_CSC = (IP_MC_CGM_1->MUX_9_CSC & ~MC_CGM_MUX_9_CSC_SELCTL_MASK) | MC_CGM_MUX_9_CSC_SELCTL(NETC_ETH_1_RX_CLK_IDX); IP_MC_CGM_1->MUX_9_CSC = (IP_MC_CGM_1->MUX_9_CSC & ~MC_CGM_MUX_9_CSC_CLK_SW_MASK) | MC_CGM_MUX_9_CSC_CLK_SW(1); while (((IP_MC_CGM_1->MUX_9_CSS & MC_CGM_MUX_9_CSS_CLK_SW_MASK) == 0) && (tout > 0)) { tout--; } while (((IP_MC_CGM_1->MUX_9_CSS & MC_CGM_MUX_9_CSS_SWIP_MASK) != 0) && (tout > 0)) { tout--; } while (((IP_MC_CGM_1->MUX_9_CSS & MC_CGM_MUX_9_CSS_SWTRG_MASK) >> MC_CGM_MUX_9_CSS_SWTRG_SHIFT != 1) && (tout > 0)) { tout--; } __ASSERT_NO_MSG(((IP_MC_CGM_1->MUX_9_CSS & MC_CGM_MUX_9_CSS_SELSTAT_MASK) >> MC_CGM_MUX_9_CSS_SELSTAT_SHIFT) == NETC_ETH_1_RX_CLK_IDX); } else { return -EINVAL; } return 0; } static int nxp_s32_eth_initialize(const struct device *dev) { const struct nxp_s32_eth_config *cfg = dev->config; int err; err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT); if (err != 0) { return err; } err = nxp_s32_eth_configure_cgm(cfg->port_idx); if (err != 0) { LOG_ERR("Failed to configure NETC Switch CGM"); return -EIO; } return nxp_s32_eth_initialize_common(dev); } static void nxp_s32_eth_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct nxp_s32_eth_data *ctx = dev->data; const struct nxp_s32_eth_config *cfg = dev->config; const struct nxp_s32_eth_msix *msix; /* * For VLAN, this value is only used to get the correct L2 driver. * The iface pointer in context should contain the main interface * if the VLANs are enabled. */ if (ctx->iface == NULL) { ctx->iface = iface; } Netc_Eth_Ip_SetMacAddr(cfg->si_idx, (const uint8_t *)ctx->mac_addr); net_if_set_link_addr(iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET); LOG_INF("SI%d MAC: %02x:%02x:%02x:%02x:%02x:%02x", cfg->si_idx, ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2], ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]); ethernet_init(iface); /* * PSI controls the PHY. If PHY is configured either as fixed * link or autoneg, the callback is executed at least once * immediately after setting it. */ if (!device_is_ready(cfg->phy_dev)) { LOG_ERR("PHY device (%p) is not ready, cannot init iface", cfg->phy_dev); return; } phy_link_callback_set(cfg->phy_dev, &phy_link_state_changed, (void *)dev); /* Do not start the interface until PHY link is up */ net_if_carrier_off(iface); for (int i = 0; i < NETC_MSIX_EVENTS_COUNT; i++) { msix = &cfg->msix[i]; if (mbox_is_ready_dt(&msix->mbox_spec)) { if (mbox_set_enabled_dt(&msix->mbox_spec, true)) { LOG_ERR("Failed to enable MRU channel %u", msix->mbox_spec.channel_id); } } } } static const struct ethernet_api nxp_s32_eth_api = { .iface_api.init = nxp_s32_eth_iface_init, .get_capabilities = nxp_s32_eth_get_capabilities, .get_phy = nxp_s32_eth_get_phy, .set_config = nxp_s32_eth_set_config, .send = nxp_s32_eth_tx }; BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(nxp_s32_netc_psi) == 1, "Only one PSI enabled supported"); #define NETC_VSI_GENERAL_CFG(node, prop, idx) \ [DT_PROP_BY_IDX(node, prop, idx)] = { \ .siId = DT_PROP_BY_IDX(node, prop, idx), \ .enableSi = true, \ .NumberOfRxBDR = 1, \ .NumberOfTxBDR = 1, \ .SIVlanControl = (NETC_F3_PSICFGR0_SIVC_CVLAN_BIT \ | NETC_F3_PSICFGR0_SIVC_SVLAN_BIT), \ .changeMACAllowed = true, \ .hashFilterUpdateAllowed = true, \ IF_ENABLED(CONFIG_NET_PROMISCUOUS_MODE, \ (.multicastPromiscuousChangeAllowed = true,)) \ } #define NETC_VSI_RX_MSG_BUF(node, prop, idx, n) \ BUILD_ASSERT((DT_PROP_BY_IDX(node, prop, idx) > NETC_ETH_IP_PSI_INDEX) \ && (DT_PROP_BY_IDX(node, prop, idx) <= FEATURE_NETC_ETH_NUM_OF_VIRTUAL_CTRLS), \ "Invalid VSI index"); \ static Netc_Eth_Ip_VsiToPsiMsgType \ _CONCAT3(nxp_s32_eth##n##_vsi, DT_PROP_BY_IDX(node, prop, idx), _rx_msg_buf) \ __aligned(FEATURE_NETC_ETH_VSI_MSG_ALIGNMENT) #define NETC_VSI_RX_MSG_BUF_ARRAY(node, prop, idx, n) \ [DT_PROP_BY_IDX(node, prop, idx) - 1] = \ &_CONCAT3(nxp_s32_eth##n##_vsi, DT_PROP_BY_IDX(node, prop, idx), _rx_msg_buf) #define NETC_SWITCH_PORT_CFG(_, n) \ { \ .ePort = &nxp_s32_eth##n##_switch_port_egress_cfg, \ .iPort = &nxp_s32_eth##n##_switch_port_ingress_cfg, \ .EthSwtPortMacLayerPortEnable = true, \ .EthSwtPortMacLayerSpeed = ETHTRCV_BAUD_RATE_1000MBIT, \ .EthSwtPortMacLayerDuplexMode = NETC_ETHSWT_PORT_FULL_DUPLEX, \ .EthSwtPortPhysicalLayerType = NETC_ETHSWT_RGMII_MODE, \ .EthSwtPortPruningEnable = true, \ } #define PHY_NODE(n) DT_INST_PHANDLE(n, phy_handle) #define INIT_VSIS(n) DT_INST_NODE_HAS_PROP(n, vsis) #define NETC_PSI_INSTANCE_DEFINE(n) \ void nxp_s32_eth_psi##n##_rx_event(uint8_t chan, const uint32 *buf, uint8_t buf_size) \ { \ ARG_UNUSED(chan); \ ARG_UNUSED(buf); \ ARG_UNUSED(buf_size); \ \ Netc_Eth_Ip_MSIX_Rx(NETC_SI_NXP_S32_HW_INSTANCE(n)); \ } \ \ static void nxp_s32_eth##n##_rx_callback(const uint8_t unused, const uint8_t ring) \ { \ const struct device *dev = DEVICE_DT_INST_GET(n); \ const struct nxp_s32_eth_config *cfg = dev->config; \ struct nxp_s32_eth_data *ctx = dev->data; \ \ ARG_UNUSED(unused); \ \ if (ring == cfg->rx_ring_idx) { \ k_sem_give(&ctx->rx_sem); \ } \ } \ \ static __nocache Netc_Eth_Ip_StateType nxp_s32_eth##n##_state; \ static __nocache Netc_Eth_Ip_MACFilterHashTableEntryType \ nxp_s32_eth##n##_mac_filter_hash_table[CONFIG_ETH_NXP_S32_MAC_FILTER_TABLE_SIZE]; \ \ NETC_TX_RING(n, 0, NETC_MIN_RING_LEN, NETC_MIN_RING_BUF_SIZE); \ NETC_TX_RING(n, TX_RING_IDX, \ CONFIG_ETH_NXP_S32_TX_RING_LEN, CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE); \ NETC_RX_RING(n, RX_RING_IDX, \ CONFIG_ETH_NXP_S32_RX_RING_LEN, CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE); \ \ static const Netc_Eth_Ip_RxRingConfigType nxp_s32_eth##n##_rxring_cfg[1] = { \ { \ .RingDesc = nxp_s32_eth##n##_rxring0_desc, \ .Buffer = nxp_s32_eth##n##_rxring0_buf, \ .ringSize = CONFIG_ETH_NXP_S32_RX_RING_LEN, \ .maxRingSize = CONFIG_ETH_NXP_S32_RX_RING_LEN, \ .bufferLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ .maxBuffLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ .TimerThreshold = CONFIG_ETH_NXP_S32_RX_IRQ_TIMER_THRESHOLD, \ .PacketsThreshold = CONFIG_ETH_NXP_S32_RX_IRQ_PACKET_THRESHOLD, \ .Callback = nxp_s32_eth##n##_rx_callback, \ } \ }; \ \ static const Netc_Eth_Ip_TxRingConfigType nxp_s32_eth##n##_txring_cfg[2] = { \ { \ .RingDesc = nxp_s32_eth##n##_txring0_desc, \ .Buffer = nxp_s32_eth##n##_txring0_buf, \ .ringSize = NETC_MIN_RING_LEN, \ .maxRingSize = NETC_MIN_RING_LEN, \ .bufferLen = NETC_MIN_RING_BUF_SIZE, \ .maxBuffLen = NETC_MIN_RING_BUF_SIZE, \ }, \ { \ .RingDesc = nxp_s32_eth##n##_txring1_desc, \ .Buffer = nxp_s32_eth##n##_txring1_buf, \ .ringSize = CONFIG_ETH_NXP_S32_TX_RING_LEN, \ .maxRingSize = CONFIG_ETH_NXP_S32_TX_RING_LEN, \ .bufferLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ .maxBuffLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ } \ }; \ \ static const Netc_Eth_Ip_GeneralSIConfigType \ nxp_s32_eth##n##_psi_cfg[FEATURE_NETC_ETH_NUMBER_OF_CTRLS] = { \ [NETC_SI_NXP_S32_HW_INSTANCE(n)] = { \ .siId = NETC_SI_NXP_S32_HW_INSTANCE(n), \ .enableSi = true, \ .NumberOfRxBDR = 1, \ .NumberOfTxBDR = 2, \ .SIVlanControl = (NETC_F3_PSICFGR0_SIVC_CVLAN_BIT \ | NETC_F3_PSICFGR0_SIVC_SVLAN_BIT), \ .changeMACAllowed = true, \ .hashFilterUpdateAllowed = true, \ IF_ENABLED(CONFIG_NET_PROMISCUOUS_MODE, \ (.multicastPromiscuousChangeAllowed = true,)) \ }, \ COND_CODE_1(INIT_VSIS(n), \ (DT_INST_FOREACH_PROP_ELEM_SEP(n, vsis, NETC_VSI_GENERAL_CFG, (,))), \ (EMPTY)) \ }; \ \ COND_CODE_1(INIT_VSIS(n), \ (DT_INST_FOREACH_PROP_ELEM_SEP_VARGS(n, vsis, NETC_VSI_RX_MSG_BUF, (;), n)), \ (EMPTY)); \ \ static const Netc_Eth_Ip_EnetcGeneralConfigType nxp_s32_eth##n##_enetc_general_cfg = { \ .numberOfConfiguredSis = FEATURE_NETC_ETH_NUMBER_OF_CTRLS, \ .stationInterfaceGeneralConfig = &nxp_s32_eth##n##_psi_cfg, \ IF_ENABLED(CONFIG_NET_PROMISCUOUS_MODE, \ (.maskMACPromiscuousMulticastEnable = (uint16_t)true, \ .maskMACPromiscuousUnicastEnable = (uint16_t)true,)) \ .RxVsiMsgCmdToPsi = { \ COND_CODE_1(INIT_VSIS(n), \ (DT_INST_FOREACH_PROP_ELEM_SEP_VARGS(n, vsis, \ NETC_VSI_RX_MSG_BUF_ARRAY, (,), n)), \ (EMPTY)) \ }, \ .maskMACVLANPromiscuousEnable = (uint16)0x3U, \ .maskVLANAllowUntaggedEnable = (uint32)0x30000U, \ }; \ \ static const Netc_Eth_Ip_StationInterfaceConfigType nxp_s32_eth##n##_si_cfg = { \ .NumberOfRxBDR = 1, \ .NumberOfTxBDR = 2, \ .txMruMailboxAddr = NULL, \ .rxMruMailboxAddr = (uint32 *)MRU_MBOX_ADDR(DT_DRV_INST(n), rx), \ .siMsgMruMailboxAddr = COND_CODE_1(INIT_VSIS(n), \ ((uint32 *)MRU_MBOX_ADDR(DT_DRV_INST(n), vsi_msg)), (NULL)), \ .EnableSIMsgInterrupt = true, \ .RxInterrupts = (uint32_t)true, \ .TxInterrupts = (uint32_t)false, \ .MACFilterTableMaxNumOfEntries = CONFIG_ETH_NXP_S32_MAC_FILTER_TABLE_SIZE, \ }; \ \ static uint8_t nxp_s32_eth##n##_switch_vlandr2dei_cfg[NETC_ETHSWT_IP_NUMBER_OF_DR]; \ static Netc_EthSwt_Ip_PortIngressType nxp_s32_eth##n##_switch_port_ingress_cfg; \ static Netc_EthSwt_Ip_PortEgressType nxp_s32_eth##n##_switch_port_egress_cfg = { \ .vlanDrToDei = &nxp_s32_eth##n##_switch_vlandr2dei_cfg, \ }; \ static Netc_EthSwt_Ip_PortType \ nxp_s32_eth##n##_switch_ports_cfg[NETC_ETHSWT_IP_NUMBER_OF_PORTS] = { \ LISTIFY(NETC_ETHSWT_IP_NUMBER_OF_PORTS, NETC_SWITCH_PORT_CFG, (,), n) \ }; \ \ static const Netc_EthSwt_Ip_ConfigType nxp_s32_eth##n##_switch_cfg = { \ .port = &nxp_s32_eth##n##_switch_ports_cfg, \ .EthSwtArlTableEntryTimeout = NETC_SWITCH_PORT_AGING, \ .netcClockFrequency = DT_INST_PROP(n, clock_frequency), \ .MacLearningOption = ETHSWT_MACLEARNINGOPTION_HWDISABLED, \ .MacForwardingOption = ETHSWT_NO_FDB_LOOKUP_FLOOD_FRAME, \ .Timer1588ClkSrc = ETHSWT_REFERENCE_CLOCK_DISABLED, \ }; \ \ PINCTRL_DT_INST_DEFINE(n); \ \ NETC_GENERATE_MAC_ADDRESS(n) \ \ static const struct nxp_s32_eth_config nxp_s32_eth##n##_config = { \ .netc_cfg = { \ .SiType = NETC_ETH_IP_PHYSICAL_SI, \ .siConfig = &nxp_s32_eth##n##_si_cfg, \ .generalConfig = &nxp_s32_eth##n##_enetc_general_cfg, \ .stateStructure = &nxp_s32_eth##n##_state, \ .paCtrlRxRingConfig = &nxp_s32_eth##n##_rxring_cfg, \ .paCtrlTxRingConfig = &nxp_s32_eth##n##_txring_cfg, \ }, \ .si_idx = NETC_SI_NXP_S32_HW_INSTANCE(n), \ .port_idx = NETC_SWITCH_PORT_IDX, \ .tx_ring_idx = TX_RING_IDX, \ .rx_ring_idx = RX_RING_IDX, \ .msix = { \ NETC_MSIX(DT_DRV_INST(n), rx, nxp_s32_eth_psi##n##_rx_event), \ COND_CODE_1(INIT_VSIS(n), \ (NETC_MSIX(DT_DRV_INST(n), vsi_msg, Netc_Eth_Ip_MSIX_SIMsgEvent)),\ (EMPTY)) \ }, \ .mac_filter_hash_table = &nxp_s32_eth##n##_mac_filter_hash_table[0], \ .generate_mac = nxp_s32_eth##n##_generate_mac, \ .phy_dev = DEVICE_DT_GET(PHY_NODE(n)), \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ }; \ \ static struct nxp_s32_eth_data nxp_s32_eth##n##_data = { \ .mac_addr = DT_INST_PROP_OR(n, local_mac_address, {0}), \ }; \ \ ETH_NET_DEVICE_DT_INST_DEFINE(n, \ nxp_s32_eth_initialize, \ NULL, \ &nxp_s32_eth##n##_data, \ &nxp_s32_eth##n##_config, \ CONFIG_ETH_INIT_PRIORITY, \ &nxp_s32_eth_api, \ NET_ETH_MTU); \ DT_INST_FOREACH_STATUS_OKAY(NETC_PSI_INSTANCE_DEFINE) static int nxp_s32_eth_switch_init(void) { Std_ReturnType swt_status; swt_status = Netc_EthSwt_Ip_Init(NETC_SWITCH_IDX, &nxp_s32_eth0_switch_cfg); if (swt_status != E_OK) { LOG_ERR("Failed to initialize NETC Switch %d (%d)", NETC_SWITCH_IDX, swt_status); return -EIO; } return 0; } /* * NETC Switch driver must be initialized before any other NETC component. * This is because Netc_EthSwt_Ip_Init() will not only initialize the Switch, * but also perform global initialization, enable the PCIe functions for MDIO * and ENETC, and initialize MDIO with a fixed configuration. */ SYS_INIT(nxp_s32_eth_switch_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT); ```
/content/code_sandbox/drivers/ethernet/eth_nxp_s32_netc_psi.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,576
```unknown # Atmel SAM Ethernet (GMAC) driver configuration options menuconfig ETH_SAM_GMAC bool "Atmel SAM Ethernet driver" default y depends on NET_BUF_FIXED_DATA_SIZE depends on DT_HAS_ATMEL_SAM_GMAC_ENABLED || \ DT_HAS_ATMEL_SAM0_GMAC_ENABLED select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT select MDIO select ETH_DSA_SUPPORT help Enable Atmel SAM MCU Family Ethernet driver. if ETH_SAM_GMAC # Workaround for not being able to have commas in macro arguments DT_ETH_SAM_GMAC_PATH := $(dt_nodelabel_path,gmac) # Just for readability, to keep the following lines shorter. DT_ETH_SAM_GMAC_NQ := $(dt_node_int_prop_int,$(DT_ETH_SAM_GMAC_PATH),num-queues) config ETH_SAM_GMAC_QUEUES int "Number of active hardware TX and RX queues" default 1 range 1 $(DT_ETH_SAM_GMAC_NQ) if SOC_SERIES_SAME70 || \ SOC_SERIES_SAMV71 || \ SOC_SERIES_SAM4E || \ SOC_SERIES_SAME54 help Select the number of hardware queues used by the driver. Packets will be routed to appropriate queues based on their priority. config ETH_SAM_GMAC_FORCE_QUEUE bool "Force all traffic to be routed through a specific queue" depends on ETH_SAM_GMAC_QUEUES > 1 depends on NET_TC_RX_COUNT < 5 help This option is meant to be used only for debugging. Use it to force all traffic to be routed through a specific hardware queue. With this enabled it is easier to verify whether the chosen hardware queue actually works. This works only if there are four or fewer RX traffic classes enabled, as the SAM GMAC hardware supports screening up to four traffic classes. config ETH_SAM_GMAC_FORCED_QUEUE int "Queue to force the packets to" depends on ETH_SAM_GMAC_FORCE_QUEUE default 0 range 0 1 if ETH_SAM_GMAC_QUEUES = 2 range 0 2 if ETH_SAM_GMAC_QUEUES = 3 range 0 3 if ETH_SAM_GMAC_QUEUES = 4 range 0 4 if ETH_SAM_GMAC_QUEUES = 5 range 0 5 if ETH_SAM_GMAC_QUEUES = 6 help Which queue to force the routing to. This affects both the TX and RX queues setup. config ETH_SAM_GMAC_BUF_RX_COUNT int "Network RX buffers preallocated by the SAM ETH driver" default 12 help Number of network buffers that will be permanently allocated by the Ethernet driver. These buffers are used in receive path. They are preallocated by the driver and made available to the GMAC module to be filled in with incoming data. Their number has to be large enough to fit at least one complete Ethernet frame. SAM ETH driver will always allocate that amount of buffers for itself thus reducing the NET_BUF_RX_COUNT which is a total amount of RX data buffers used by the whole networking stack. One has to ensure that NET_PKT_RX_COUNT is large enough to fit at least two Ethernet frames: one being received by the GMAC module and the other being processed by the higher layer networking stack. config ETH_SAM_GMAC_MAC_I2C_EEPROM bool "Read from an I2C EEPROM" help Read MAC address from an I2C EEPROM. if ETH_SAM_GMAC_MAC_I2C_EEPROM config ETH_SAM_GMAC_MAC_I2C_INT_ADDRESS hex "I2C EEPROM internal address" range 0 0xffffffff help Internal address of the EEPROM chip where the MAC address is stored. Chips with 1 to 4 byte internal address size are supported. Address size has to be configured in a separate Kconfig option. config ETH_SAM_GMAC_MAC_I2C_INT_ADDRESS_SIZE int "I2C EEPROM internal address size" default 1 range 1 4 help Size (in bytes) of the internal EEPROM address. endif # ETH_SAM_GMAC_MAC_I2C_EEPROM config PTP_CLOCK_SAM_GMAC bool "SAM GMAC PTP clock driver support" default y depends on PTP_CLOCK help Enable SAM GMAC PTP Clock support. endif # ETH_SAM_GMAC ```
/content/code_sandbox/drivers/ethernet/Kconfig.sam_gmac
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
987
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_SAM0_GMAC_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_SAM0_GMAC_H_ /* * Map the SAM-family DFP GMAC register names to the SAM0-family DFP GMAC * register names. */ #define GMAC_NCR NCR.reg #define GMAC_NCFGR NCFGR.reg #define GMAC_NSR NSR.reg #define GMAC_UR UR.reg #define GMAC_DCFGR DCFGR.reg #define GMAC_TSR TSR.reg #define GMAC_RBQB RBQB.reg #define GMAC_TBQB TBQB.reg #define GMAC_RSR RSR.reg #define GMAC_ISR ISR.reg #define GMAC_IER IER.reg #define GMAC_IDR IDR.reg #define GMAC_IMR IMR.reg #define GMAC_MAN MAN.reg #define GMAC_RPQ RPQ.reg #define GMAC_TPQ TPQ.reg #define GMAC_TPSF TPSF.reg #define GMAC_RPSF RPSF.reg #define GMAC_RJFML RJFML.reg #define GMAC_HRB HRB.reg #define GMAC_HRT HRT.reg #define GMAC_SA Sa #define GMAC_WOL WOL.reg #define GMAC_IPGS IPGS.reg #define GMAC_SVLAN SVLAN.reg #define GMAC_TPFCP TPFCP.reg #define GMAC_SAMB1 SAMB1.reg #define GMAC_SAMT1 SAMT1.reg #define GMAC_NSC NSC.reg #define GMAC_SCL SCL.reg #define GMAC_SCH SCH.reg #define GMAC_EFTSH EFTSH.reg #define GMAC_EFRSH EFRSH.reg #define GMAC_PEFTSH PEFTSH.reg #define GMAC_PEFRSH PEFRSH.reg #define GMAC_OTLO OTLO.reg #define GMAC_OTHI OTHI.reg #define GMAC_FT FT.reg #define GMAC_BCFT BCFT.reg #define GMAC_MFT MFT.reg #define GMAC_PFT PFT.reg #define GMAC_BFT64 BFT64.reg #define GMAC_TBFT127 TBFT127.reg #define GMAC_TBFT255 TBFT255.reg #define GMAC_TBFT511 TBFT511.reg #define GMAC_TBFT1023 TBFT1023.reg #define GMAC_TBFT1518 TBFT1518.reg #define GMAC_GTBFT1518 GTBFT1518.reg #define GMAC_TUR TUR.reg #define GMAC_SCF SCF.reg #define GMAC_MCF MCF.reg #define GMAC_EC EC.reg #define GMAC_LC LC.reg #define GMAC_DTF DTF.reg #define GMAC_CSE CSE.reg #define GMAC_ORLO ORLO.reg #define GMAC_ORHI ORHI.reg #define GMAC_FR FR.reg #define GMAC_BCFR BCFR.reg #define GMAC_MFR MFR.reg #define GMAC_PFR PFR.reg #define GMAC_BFR64 BFR64.reg #define GMAC_TBFR127 TBFR127.reg #define GMAC_TBFR255 TBFR255.reg #define GMAC_TBFR511 TBFR511.reg #define GMAC_TBFR1023 TBFR1023.reg #define GMAC_TBFR1518 TBFR1518.reg #define GMAC_TMXBFR TMXBFR.reg #define GMAC_UFR UFR.reg #define GMAC_OFR OFR.reg #define GMAC_JR JR.reg #define GMAC_FCSE FCSE.reg #define GMAC_LFFE LFFE.reg #define GMAC_RSE RSE.reg #define GMAC_AE AE.reg #define GMAC_RRE RRE.reg #define GMAC_ROE ROE.reg #define GMAC_IHCE IHCE.reg #define GMAC_TCE TCE.reg #define GMAC_UCE UCE.reg #define GMAC_TISUBN TISUBN.reg #define GMAC_TSH TSH.reg #define GMAC_TSSSL TSSSL.reg #define GMAC_TSSN TSSN.reg #define GMAC_TSL TSL.reg #define GMAC_TN TN.reg #define GMAC_TA TA.reg #define GMAC_TI TI.reg #define GMAC_EFTSL EFTSL.reg #define GMAC_EFTN EFTN.reg #define GMAC_EFRSL EFRSL.reg #define GMAC_EFRN EFRN.reg #define GMAC_PEFTSL PEFTSL.reg #define GMAC_PEFTN PEFTN.reg #define GMAC_PEFRSL PEFRSL.reg #define GMAC_PEFRN PEFRN.reg #define GMAC_RLPITR RLPITR.reg #define GMAC_RLPITI RLPITI.reg #define GMAC_TLPITR TLPITR.reg #define GMAC_TLPITI TLPITI.reg #define GMAC_SAB SAB.reg #define GMAC_SAT SAT.reg /* * Define the register field value symbols that are missing in the SAM0-family * DFP GMAC headers. */ #define GMAC_NCFGR_CLK_MCK_8 GMAC_NCFGR_CLK(0) #define GMAC_NCFGR_CLK_MCK_16 GMAC_NCFGR_CLK(1) #define GMAC_NCFGR_CLK_MCK_32 GMAC_NCFGR_CLK(2) #define GMAC_NCFGR_CLK_MCK_48 GMAC_NCFGR_CLK(3) #define GMAC_NCFGR_CLK_MCK_64 GMAC_NCFGR_CLK(4) #define GMAC_NCFGR_CLK_MCK_96 GMAC_NCFGR_CLK(5) #define GMAC_DCFGR_FBLDO_SINGLE GMAC_DCFGR_FBLDO(1) #define GMAC_DCFGR_FBLDO_INCR4 GMAC_DCFGR_FBLDO(2) #define GMAC_DCFGR_FBLDO_INCR8 GMAC_DCFGR_FBLDO(3) #define GMAC_DCFGR_FBLDO_INCR16 GMAC_DCFGR_FBLDO(4) #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_SAM0_GMAC_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_sam0_gmac.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,434
```objective-c /* * */ #ifndef OA_TC6_CFG_H__ #define OA_TC6_CFG_H__ #include <stdint.h> #include <stdbool.h> #include <zephyr/kernel.h> #include <zephyr/sys/byteorder.h> #include <zephyr/drivers/spi.h> #include <zephyr/net/net_pkt.h> #define MMS_REG(m, r) ((((m) & GENMASK(3, 0)) << 16) | ((r) & GENMASK(15, 0))) /* Memory Map Sector (MMS) 0 */ #define OA_ID MMS_REG(0x0, 0x000) /* expect 0x11 */ #define OA_PHYID MMS_REG(0x0, 0x001) #define OA_RESET MMS_REG(0x0, 0x003) #define OA_RESET_SWRESET BIT(0) #define OA_CONFIG0 MMS_REG(0x0, 0x004) #define OA_CONFIG0_SYNC BIT(15) #define OA_CONFIG0_RFA_ZARFE BIT(12) #define OA_CONFIG0_PROTE BIT(5) #define OA_STATUS0 MMS_REG(0x0, 0x008) #define OA_STATUS0_RESETC BIT(6) #define OA_STATUS1 MMS_REG(0x0, 0x009) #define OA_BUFSTS MMS_REG(0x0, 0x00B) #define OA_BUFSTS_TXC GENMASK(15, 8) #define OA_BUFSTS_RCA GENMASK(7, 0) #define OA_IMASK0 MMS_REG(0x0, 0x00C) #define OA_IMASK0_TXPEM BIT(0) #define OA_IMASK0_TXBOEM BIT(1) #define OA_IMASK0_TXBUEM BIT(2) #define OA_IMASK0_RXBOEM BIT(3) #define OA_IMASK0_LOFEM BIT(4) #define OA_IMASK0_HDREM BIT(5) #define OA_IMASK1 MMS_REG(0x0, 0x00D) #define OA_IMASK0_UV18M BIT(19) /* OA Control header */ #define OA_CTRL_HDR_DNC BIT(31) #define OA_CTRL_HDR_HDRB BIT(30) #define OA_CTRL_HDR_WNR BIT(29) #define OA_CTRL_HDR_AID BIT(28) #define OA_CTRL_HDR_MMS GENMASK(27, 24) #define OA_CTRL_HDR_ADDR GENMASK(23, 8) #define OA_CTRL_HDR_LEN GENMASK(7, 1) #define OA_CTRL_HDR_P BIT(0) /* OA Data header */ #define OA_DATA_HDR_DNC BIT(31) #define OA_DATA_HDR_SEQ BIT(30) #define OA_DATA_HDR_NORX BIT(29) #define OA_DATA_HDR_DV BIT(21) #define OA_DATA_HDR_SV BIT(20) #define OA_DATA_HDR_SWO GENMASK(19, 16) #define OA_DATA_HDR_EV BIT(14) #define OA_DATA_HDR_EBO GENMASK(13, 8) #define OA_DATA_HDR_P BIT(0) /* OA Data footer */ #define OA_DATA_FTR_EXST BIT(31) #define OA_DATA_FTR_HDRB BIT(30) #define OA_DATA_FTR_SYNC BIT(29) #define OA_DATA_FTR_RCA GENMASK(28, 24) #define OA_DATA_FTR_DV BIT(21) #define OA_DATA_FTR_SV BIT(20) #define OA_DATA_FTR_SWO GENMASK(19, 16) #define OA_DATA_FTR_FD BIT(15) #define OA_DATA_FTR_EV BIT(14) #define OA_DATA_FTR_EBO GENMASK(13, 8) #define OA_DATA_FTR_TXC GENMASK(5, 1) #define OA_DATA_FTR_P BIT(0) #define OA_TC6_HDR_SIZE 4 #define OA_TC6_FTR_SIZE 4 #define OA_TC6_BUF_ALLOC_TIMEOUT K_MSEC(10) #define OA_TC6_FTR_RCA_MAX GENMASK(4, 0) #define OA_TC6_FTR_TXC_MAX GENMASK(4, 0) /** * @brief OA TC6 data. */ struct oa_tc6 { /** Pointer to SPI device */ const struct spi_dt_spec *spi; /** OA data payload (chunk) size */ uint8_t cps; /** * Number of available chunks buffers in OA TC6 device to store * data for transmission */ uint8_t txc; /** Number of available chunks to read from OA TC6 device */ uint8_t rca; /** Indication of pending interrupt in OA TC6 device */ bool exst; /** Indication of OA TC6 device being ready for transmission */ bool sync; /** Indication of protected control transmission mode */ bool protected; /** Pointer to network buffer concatenated from received chunk */ struct net_buf *concat_buf; }; typedef struct { uint32_t address; uint32_t value; } oa_mem_map_t; /** * @brief Calculate parity bit from data * * @param x data to calculate parity * * @return 0 if number of ones is odd, 1 otherwise. */ static inline bool oa_tc6_get_parity(const uint32_t x) { uint32_t y; y = x ^ (x >> 1); y = y ^ (y >> 2); y = y ^ (y >> 4); y = y ^ (y >> 8); y = y ^ (y >> 16); return !(y & 1); } /** * @brief Read OA TC6 compliant device single register * * @param tc6 OA TC6 specific data * * @param reg register to read * @param val pointer to variable to store read value * * @return 0 if read was successful, <0 otherwise. */ int oa_tc6_reg_read(struct oa_tc6 *tc6, const uint32_t reg, uint32_t *val); /** * @brief Write to OA TC6 compliant device a single register * * @param tc6 OA TC6 specific data * * @param reg register to read * @param val data to send to device * * @return 0 if write was successful, <0 otherwise. */ int oa_tc6_reg_write(struct oa_tc6 *tc6, const uint32_t reg, uint32_t val); /** * @brief Enable or disable the protected mode for control transactions * * @param tc6 OA TC6 specific data * * @param prote enable or disable protected control transactions * * @return 0 if operation was successful, <0 otherwise. */ int oa_tc6_set_protected_ctrl(struct oa_tc6 *tc6, bool prote); /** * @brief Send OA TC6 data chunks to the device * * @param tc6 OA TC6 specific data * * @param pkt network packet to be send * * @return 0 if data send was successful, <0 otherwise. */ int oa_tc6_send_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt); /** * @brief Read data chunks from OA TC6 device * * @param tc6 OA TC6 specific data * * @param pkt network packet to store received data * * @return 0 if read was successful, <0 otherwise. */ int oa_tc6_read_chunks(struct oa_tc6 *tc6, struct net_pkt *pkt); /** * @brief Perform SPI transfer of single chunk from/to OA TC6 device * * @param tc6 OA TC6 specific data * * @param buf_rx buffer to store read data * * @param buf_tx buffer with data to send * * @param hdr OA TC6 data transmission header value * * @param ftr poniter to OA TC6 data received footer * * @return 0 if transmission was successful, <0 otherwise. */ int oa_tc6_chunk_spi_transfer(struct oa_tc6 *tc6, uint8_t *buf_rx, uint8_t *buf_tx, uint32_t hdr, uint32_t *ftr); /** * @brief Read status from OA TC6 device * * @param tc6 OA TC6 specific data * * @param ftr poniter to OA TC6 data received footer * * @return 0 if successful, <0 otherwise. */ int oa_tc6_read_status(struct oa_tc6 *tc6, uint32_t *ftr); /** * @brief Read, modify and write control register from OA TC6 device * * @param tc6 OA TC6 specific data * * @param reg register to modify * * @param mask bit mask for modified register * * @param value to be stored in the register * * @return 0 if successful, <0 otherwise. */ int oa_tc6_reg_rmw(struct oa_tc6 *tc6, const uint32_t reg, uint32_t mask, uint32_t val); /** * @brief Check the status of OA TC6 device * * @param tc6 OA TC6 specific data * * @return 0 if successful, <0 otherwise. */ int oa_tc6_check_status(struct oa_tc6 *tc6); #endif /* OA_TC6_CFG_H__ */ ```
/content/code_sandbox/drivers/ethernet/oa_tc6.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,987
```unknown menuconfig ETH_NXP_S32_GMAC bool "NXP S32 GMAC driver" default y depends on DT_HAS_NXP_S32_GMAC_ENABLED select NOCACHE_MEMORY if ARCH_HAS_NOCACHE_MEMORY_SUPPORT select PINCTRL imply MDIO help Enable GMAC/EMAC Ethernet driver for NXP S32 SoCs. if ETH_NXP_S32_GMAC config ETH_NXP_S32_TX_RING_LEN int "TX ring length" default 4 range 4 128 help Length of the TX ring. ETH_NXP_S32_TX_RING_BUF_SIZE * ETH_NXP_S32_TX_RING_LEN must be a multiple of TX FIFO block size. config ETH_NXP_S32_TX_RING_BUF_SIZE int "TX ring data buffer size" default 1536 range 64 1536 help Size, in bytes, of the TX data buffer. The size must be big enough to store one complete Ethernet frame, and be a multiple of the data bus width. config ETH_NXP_S32_RX_RING_LEN int "RX ring length" default 4 range 4 128 help Length of the RX ring. ETH_NXP_S32_RX_RING_BUF_SIZE * ETH_NXP_S32_RX_RING_LEN must be a multiple of RX FIFO block size. config ETH_NXP_S32_RX_RING_BUF_SIZE int "RX ring data buffer size" default 1536 range 64 1536 help Size, in bytes, of the RX data buffer. The size must be big enough to store one complete Ethernet frame, and be a multiple of the data bus width. config ETH_NXP_S32_CLOCK_CONFIG_IDX int default 1 help This option specifies the zero-based index of the clock configuration used to initialize the GMAC/EMAC clocks. config ETH_NXP_S32_RX_THREAD_PRIO int "RX thread priority" default 2 help RX thread priority. RX thread is a cooperative thread. config ETH_NXP_S32_RX_THREAD_STACK_SIZE int "RX thread stack size" default 1500 help RX thread stack size. config ETH_NXP_S32_RX_BUDGET int "RX thread budget" default 128 range 1 1024 help The budget parameter places a limit on the amount of work the driver may do in the RX thread before yielding the processor, in case there is more work to do. This is to prevent the RX thread to starve other threads. Each received frame counts as one unit of work. config ETH_NXP_S32_LOOPBACK bool "MAC loopback" help MAC operates in loopback mode. The data is looped back through internal FIFO on to the internal MII/GMII interface, passing through the corresponding transmit PHY interface block. This mode requires the MII/GMII Rx clock input signal to function properly because the transmit clock is not internally looped back. config ETH_NXP_S32_MULTICAST_FILTER bool "Multicast destination address filtering" help Enable support for multicast destination address filtering in the MAC. MAC performs imperfect filtering using a 64-bit hash table. The upper 6-bits CRC of the received multicast address is used as index to the content of the hash table. Only multicast with the computed hash set in the multicast table will be received and all other multicast is dropped by the MAC. If disabled, all multicast is received by the MAC. endif # ETH_NXP_S32_GMAC ```
/content/code_sandbox/drivers/ethernet/Kconfig.nxp_s32_gmac
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
774
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_GECKO_PRIV_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_GECKO_PRIV_H_ #include <zephyr/kernel.h> #include <zephyr/types.h> #define ETH_GECKO_MTU NET_ETH_MTU #define SILABS_OUI_B0 0x00 #define SILABS_OUI_B1 0x0B #define SILABS_OUI_B2 0x57 #define ETH_TX_BUF_SIZE 1536 #define ETH_TX_BUF_COUNT 2 #define ETH_RX_BUF_SIZE 128 #define ETH_RX_BUF_COUNT 32 #define ETH_BUF_ALIGNMENT 16 #define ETH_DESC_ALIGNMENT 4 #define ETH_TX_USED BIT(31) #define ETH_TX_WRAP BIT(30) #define ETH_TX_ERROR BIT(29) #define ETH_TX_UNDERRUN BIT(28) #define ETH_TX_EXHAUSTED BIT(27) #define ETH_TX_NO_CRC BIT(16) #define ETH_TX_LAST BIT(15) #define ETH_TX_LENGTH (2048-1) #define ETH_RX_ADDRESS ~(ETH_DESC_ALIGNMENT-1) #define ETH_RX_WRAP BIT(1) #define ETH_RX_OWNERSHIP BIT(0) #define ETH_RX_BROADCAST BIT(31) #define ETH_RX_MULTICAST_HASH BIT(30) #define ETH_RX_UNICAST_HASH BIT(29) #define ETH_RX_EXT_ADDR BIT(28) #define ETH_RX_SAR1 BIT(26) #define ETH_RX_SAR2 BIT(25) #define ETH_RX_SAR3 BIT(24) #define ETH_RX_SAR4 BIT(23) #define ETH_RX_TYPE_ID BIT(22) #define ETH_RX_VLAN_TAG BIT(21) #define ETH_RX_PRIORITY_TAG BIT(20) #define ETH_RX_VLAN_PRIORITY (0x7UL<<17) #define ETH_RX_CFI BIT(16) #define ETH_RX_EOF BIT(15) #define ETH_RX_SOF BIT(14) #define ETH_RX_OFFSET (0x3UL<<12) #define ETH_RX_LENGTH (4096-1) #define ETH_RX_ENABLE(base) (base->NETWORKCTRL |= ETH_NETWORKCTRL_ENBRX) #define ETH_RX_DISABLE(base) (base->NETWORKCTRL &= ~ETH_NETWORKCTRL_ENBRX) #define ETH_TX_ENABLE(base) (base->NETWORKCTRL |= ETH_NETWORKCTRL_ENBTX) #define ETH_TX_DISABLE(base) (base->NETWORKCTRL &= ~ETH_NETWORKCTRL_ENBTX) struct eth_buf_desc { uint32_t address; uint32_t status; }; struct eth_gecko_pin_list { struct soc_gpio_pin mdio[2]; struct soc_gpio_pin rmii[7]; }; /* Device constant configuration parameters */ struct eth_gecko_dev_cfg { ETH_TypeDef *regs; const struct eth_gecko_pin_list *pin_list; uint32_t pin_list_size; void (*config_func)(void); struct phy_gecko_dev phy; }; /* Device run time data */ struct eth_gecko_dev_data { struct net_if *iface; uint8_t mac_addr[6]; struct k_sem tx_sem; struct k_sem rx_sem; K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_GECKO_RX_THREAD_STACK_SIZE); struct k_thread rx_thread; bool link_up; }; /* PHY Management pins */ #define PIN_PHY_MDC {DT_INST_PROP_BY_IDX(0, location_phy_mdc, 1), \ DT_INST_PROP_BY_IDX(0, location_phy_mdc, 2), gpioModePushPull,\ 0} #define PIN_PHY_MDIO {DT_INST_PROP_BY_IDX(0, location_phy_mdio, 1), \ DT_INST_PROP_BY_IDX(0, location_phy_mdio, 2), gpioModePushPull,\ 0} #define PIN_LIST_PHY {PIN_PHY_MDC, PIN_PHY_MDIO} /* RMII pins excluding reference clock, handled by board.c */ #define PIN_RMII_CRSDV {DT_INST_PROP_BY_IDX(0, location_rmii_crs_dv, 1),\ DT_INST_PROP_BY_IDX(0, location_rmii_crs_dv, 2), gpioModeInput, 0} #define PIN_RMII_TXD0 {DT_INST_PROP_BY_IDX(0, location_rmii_txd0, 1),\ DT_INST_PROP_BY_IDX(0, location_rmii_txd0, 2), gpioModePushPull, 0} #define PIN_RMII_TXD1 {DT_INST_PROP_BY_IDX(0, location_rmii_txd1, 1),\ DT_INST_PROP_BY_IDX(0, location_rmii_txd1, 2), gpioModePushPull, 0} #define PIN_RMII_TX_EN {DT_INST_PROP_BY_IDX(0, location_rmii_tx_en, 1),\ DT_INST_PROP_BY_IDX(0, location_rmii_tx_en, 2), gpioModePushPull, 0} #define PIN_RMII_RXD0 {DT_INST_PROP_BY_IDX(0, location_rmii_rxd0, 1),\ DT_INST_PROP_BY_IDX(0, location_rmii_rxd0, 2), gpioModeInput, 0} #define PIN_RMII_RXD1 {DT_INST_PROP_BY_IDX(0, location_rmii_rxd1, 1),\ DT_INST_PROP_BY_IDX(0, location_rmii_rxd1, 2), gpioModeInput, 0} #define PIN_RMII_RX_ER {DT_INST_PROP_BY_IDX(0, location_rmii_rx_er, 1),\ DT_INST_PROP_BY_IDX(0, location_rmii_rx_er, 2), gpioModeInput, 0} #define PIN_LIST_RMII {PIN_RMII_CRSDV, PIN_RMII_TXD0, PIN_RMII_TXD1, \ PIN_RMII_TX_EN, PIN_RMII_RXD0, PIN_RMII_RXD1, PIN_RMII_RX_ER} /* RMII reference clock is not included in RMII pin set * #define PIN_RMII_REFCLK {DT_INST_PROP_BY_IDX(0, location_rmii_refclk, 1),\ * DT_INST_PROP_BY_IDX(0, location_rmii_refclk, 2), gpioModePushPull, 0} */ #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_GECKO_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_gecko_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,344
```objective-c /* * */ #ifndef ETH_IVSHMEM_PRIV_H #define ETH_IVSHMEM_PRIV_H #include <stdbool.h> #include <stddef.h> #include <stdint.h> #include <openamp/virtio_ring.h> struct eth_ivshmem_queue { struct { struct vring vring; void *shmem; uint16_t desc_head; uint16_t desc_len; uint32_t data_head; uint32_t data_tail; uint32_t data_len; uint16_t avail_idx; uint16_t used_idx; uint32_t pending_data_head; uint32_t pending_data_len; } tx; struct { struct vring vring; void *shmem; uint16_t avail_idx; uint16_t used_idx; } rx; uint16_t desc_max_len; uint32_t vring_header_size; uint32_t vring_data_max_len; }; int eth_ivshmem_queue_init( struct eth_ivshmem_queue *q, uintptr_t tx_shmem, uintptr_t rx_shmem, size_t shmem_section_size); void eth_ivshmem_queue_reset(struct eth_ivshmem_queue *q); int eth_ivshmem_queue_tx_get_buff(struct eth_ivshmem_queue *q, void **data, size_t len); int eth_ivshmem_queue_tx_commit_buff(struct eth_ivshmem_queue *q); int eth_ivshmem_queue_rx(struct eth_ivshmem_queue *q, const void **data, size_t *len); int eth_ivshmem_queue_rx_complete(struct eth_ivshmem_queue *q); #endif /* ETH_IVSHMEM_PRIV_H */ ```
/content/code_sandbox/drivers/ethernet/eth_ivshmem_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
345
```c /* * */ #define DT_DRV_COMPAT intel_e1000 #define LOG_MODULE_NAME eth_e1000 #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <sys/types.h> #include <zephyr/kernel.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include <zephyr/drivers/pcie/pcie.h> #include <zephyr/irq.h> #include "eth_e1000_priv.h" #if defined(CONFIG_ETH_E1000_PTP_CLOCK) #include <zephyr/drivers/ptp_clock.h> #define PTP_INST_NODEID(n) DT_INST_CHILD(n, ptp) #endif #if defined(CONFIG_ETH_E1000_VERBOSE_DEBUG) #define hexdump(_buf, _len, fmt, args...) \ ({ \ const size_t STR_SIZE = 80; \ char _str[STR_SIZE]; \ \ snprintk(_str, STR_SIZE, "%s: " fmt, __func__, ## args); \ \ LOG_HEXDUMP_DBG(_buf, _len, _str); \ }) #else #define hexdump(args...) #endif static const char *e1000_reg_to_string(enum e1000_reg_t r) { #define _(_x) case _x: return #_x switch (r) { _(CTRL); _(ICR); _(ICS); _(IMS); _(RCTL); _(TCTL); _(RDBAL); _(RDBAH); _(RDLEN); _(RDH); _(RDT); _(TDBAL); _(TDBAH); _(TDLEN); _(TDH); _(TDT); _(RAL); _(RAH); } #undef _ LOG_ERR("Unsupported register: 0x%x", r); k_oops(); return NULL; } static struct net_if *get_iface(struct e1000_dev *ctx) { return ctx->iface; } static enum ethernet_hw_caps e1000_caps(const struct device *dev) { return #if defined(CONFIG_NET_VLAN) ETHERNET_HW_VLAN | #endif #if defined(CONFIG_ETH_E1000_PTP_CLOCK) ETHERNET_PTP | #endif ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_1000BASE_T | /* The driver does not really support TXTIME atm but mark * it to support it so that we can test the txtime sample. */ ETHERNET_TXTIME; } #if defined(CONFIG_ETH_E1000_PTP_CLOCK) static const struct device *e1000_get_ptp_clock(const struct device *dev) { struct e1000_dev *ctx = dev->data; return ctx->ptp_clock; } #endif static int e1000_tx(struct e1000_dev *dev, void *buf, size_t len) { hexdump(buf, len, "%zu byte(s)", len); dev->tx.addr = POINTER_TO_INT(buf); dev->tx.len = len; dev->tx.cmd = TDESC_EOP | TDESC_RS; iow32(dev, TDT, 1); while (!(dev->tx.sta)) { k_yield(); } LOG_DBG("tx.sta: 0x%02hx", dev->tx.sta); return (dev->tx.sta & TDESC_STA_DD) ? 0 : -EIO; } static int e1000_send(const struct device *ddev, struct net_pkt *pkt) { struct e1000_dev *dev = ddev->data; size_t len = net_pkt_get_len(pkt); if (net_pkt_read(pkt, dev->txb, len)) { return -EIO; } return e1000_tx(dev, dev->txb, len); } static struct net_pkt *e1000_rx(struct e1000_dev *dev) { struct net_pkt *pkt = NULL; void *buf; ssize_t len; LOG_DBG("rx.sta: 0x%02hx", dev->rx.sta); if (!(dev->rx.sta & RDESC_STA_DD)) { LOG_ERR("RX descriptor not ready"); goto out; } buf = INT_TO_POINTER((uint32_t)dev->rx.addr); len = dev->rx.len - 4; if (len <= 0) { LOG_ERR("Invalid RX descriptor length: %hu", dev->rx.len); goto out; } hexdump(buf, len, "%zd byte(s)", len); pkt = net_pkt_rx_alloc_with_buffer(dev->iface, len, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { LOG_ERR("Out of buffers"); goto out; } if (net_pkt_write(pkt, buf, len)) { LOG_ERR("Out of memory for received frame"); net_pkt_unref(pkt); pkt = NULL; } out: return pkt; } static void e1000_isr(const struct device *ddev) { struct e1000_dev *dev = ddev->data; uint32_t icr = ior32(dev, ICR); /* Cleared upon read */ icr &= ~(ICR_TXDW | ICR_TXQE); if (icr & ICR_RXO) { struct net_pkt *pkt = e1000_rx(dev); icr &= ~ICR_RXO; if (pkt) { net_recv_data(get_iface(dev), pkt); } else { eth_stats_update_errors_rx(get_iface(dev)); } } if (icr) { LOG_ERR("Unhandled interrupt, ICR: 0x%x", icr); } } int e1000_probe(const struct device *ddev) { /* PCI ID is decoded into REG_SIZE */ struct e1000_dev *dev = ddev->data; uint32_t ral, rah; struct pcie_bar mbar; if (dev->pcie->bdf == PCIE_BDF_NONE) { return -ENODEV; } pcie_probe_mbar(dev->pcie->bdf, 0, &mbar); pcie_set_cmd(dev->pcie->bdf, PCIE_CONF_CMDSTAT_MEM | PCIE_CONF_CMDSTAT_MASTER, true); device_map(&dev->address, mbar.phys_addr, mbar.size, K_MEM_CACHE_NONE); /* Setup TX descriptor */ iow32(dev, TDBAL, (uint32_t)POINTER_TO_UINT(&dev->tx)); iow32(dev, TDBAH, (uint32_t)((POINTER_TO_UINT(&dev->tx) >> 16) >> 16)); iow32(dev, TDLEN, 1*16); iow32(dev, TDH, 0); iow32(dev, TDT, 0); iow32(dev, TCTL, TCTL_EN); /* Setup RX descriptor */ dev->rx.addr = POINTER_TO_INT(dev->rxb); dev->rx.len = sizeof(dev->rxb); iow32(dev, RDBAL, (uint32_t)POINTER_TO_UINT(&dev->rx)); iow32(dev, RDBAH, (uint32_t)((POINTER_TO_UINT(&dev->rx) >> 16) >> 16)); iow32(dev, RDLEN, 1*16); iow32(dev, RDH, 0); iow32(dev, RDT, 1); iow32(dev, IMS, IMS_RXO); ral = ior32(dev, RAL); rah = ior32(dev, RAH); memcpy(dev->mac, &ral, 4); memcpy(dev->mac + 4, &rah, 2); return 0; } BUILD_ASSERT(DT_INST_IRQN(0) != PCIE_IRQ_DETECT, "Dynamic IRQ allocation is not supported"); static void e1000_iface_init(struct net_if *iface) { struct e1000_dev *dev = net_if_get_device(iface)->data; const struct e1000_config *config = net_if_get_device(iface)->config; if (dev->iface == NULL) { dev->iface = iface; /* Do the phy link up only once */ config->config_func(dev); } ethernet_init(iface); net_if_set_link_addr(iface, dev->mac, sizeof(dev->mac), NET_LINK_ETHERNET); LOG_DBG("done"); } static const struct ethernet_api e1000_api = { .iface_api.init = e1000_iface_init, #if defined(CONFIG_ETH_E1000_PTP_CLOCK) .get_ptp_clock = e1000_get_ptp_clock, #endif .get_capabilities = e1000_caps, .send = e1000_send, }; #define E1000_DT_INST_IRQ_FLAGS(inst) \ COND_CODE_1(DT_INST_IRQ_HAS_CELL(inst, sense), \ (DT_INST_IRQ(inst, sense)), \ (DT_INST_IRQ(inst, flags))) #define E1000_PCI_INIT(inst) \ DEVICE_PCIE_INST_DECLARE(inst); \ \ static struct e1000_dev dev_##inst = { \ DEVICE_PCIE_INST_INIT(inst, pcie), \ }; \ \ static void e1000_config_##inst(const struct e1000_dev *dev) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), \ e1000_isr, DEVICE_DT_INST_GET(inst), \ E1000_DT_INST_IRQ_FLAGS(inst)); \ \ irq_enable(DT_INST_IRQN(inst)); \ iow32(dev, CTRL, CTRL_SLU); /* Set link up */ \ iow32(dev, RCTL, RCTL_EN | RCTL_MPE); \ } \ \ static const struct e1000_config config_##inst = { \ .config_func = e1000_config_##inst, \ }; \ \ ETH_NET_DEVICE_DT_INST_DEFINE(inst, \ e1000_probe, \ NULL, \ &dev_##inst, \ &config_##inst, \ CONFIG_ETH_INIT_PRIORITY, \ &e1000_api, \ NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(E1000_PCI_INIT); #if defined(CONFIG_ETH_E1000_PTP_CLOCK) struct ptp_context { struct e1000_dev *eth_context; /* Simulate the clock. This is only for testing. * The value is in nanoseconds */ uint64_t clock_time; }; static int ptp_clock_e1000_set(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; /* TODO: Set the clock real value here */ ptp_context->clock_time = tm->second * NSEC_PER_SEC + tm->nanosecond; return 0; } static int ptp_clock_e1000_get(const struct device *dev, struct net_ptp_time *tm) { struct ptp_context *ptp_context = dev->data; /* TODO: Get the clock value */ tm->second = ptp_context->clock_time / NSEC_PER_SEC; tm->nanosecond = ptp_context->clock_time - tm->second * NSEC_PER_SEC; return 0; } static int ptp_clock_e1000_adjust(const struct device *dev, int increment) { ARG_UNUSED(dev); ARG_UNUSED(increment); /* TODO: Implement clock adjustment */ return 0; } static int ptp_clock_e1000_rate_adjust(const struct device *dev, double ratio) { const int hw_inc = NSEC_PER_SEC / CONFIG_ETH_E1000_PTP_CLOCK_SRC_HZ; struct ptp_context *ptp_context = dev->data; struct e1000_dev *context = ptp_context->eth_context; int corr; int32_t mul; float val; /* No change needed. */ if (ratio == 1.0) { return 0; } ratio *= context->clk_ratio; /* Limit possible ratio. */ if ((ratio > 1.0 + 1.0/(2.0 * hw_inc)) || (ratio < 1.0 - 1.0/(2.0 * hw_inc))) { return -EINVAL; } /* Save new ratio. */ context->clk_ratio = ratio; if (ratio < 1.0) { corr = hw_inc - 1; val = 1.0 / (hw_inc * (1.0 - ratio)); } else if (ratio > 1.0) { corr = hw_inc + 1; val = 1.0 / (hw_inc * (ratio - 1.0)); } else { val = 0; corr = hw_inc; } if (val >= INT32_MAX) { /* Value is too high. * It is not possible to adjust the rate of the clock. */ mul = 0; } else { mul = val; } /* TODO: Adjust the clock here */ return 0; } static const struct ptp_clock_driver_api api = { .set = ptp_clock_e1000_set, .get = ptp_clock_e1000_get, .adjust = ptp_clock_e1000_adjust, .rate_adjust = ptp_clock_e1000_rate_adjust, }; static int ptp_e1000_init(const struct device *port) { struct ptp_context *ptp_context = port->data; struct e1000_dev *context = ptp_context->eth_context; context->ptp_clock = port; ptp_context->clock_time = k_ticks_to_ns_floor64(k_uptime_ticks()); return 0; } #define E1000_PTP_INIT(inst) \ static struct ptp_context ptp_e1000_context_##inst = { \ .eth_context = DEVICE_DT_INST_GET(inst)->data, \ }; \ \ DEVICE_DEFINE(e1000_ptp_clock, PTP_CLOCK_NAME, \ ptp_e1000_init, NULL, \ &ptp_e1000_context_##inst, NULL, POST_KERNEL, \ CONFIG_APPLICATION_INIT_PRIORITY, &api); DT_INST_FOREACH_STATUS_OKAY(E1000_PTP_INIT); #endif /* CONFIG_ETH_E1000_PTP_CLOCK */ ```
/content/code_sandbox/drivers/ethernet/eth_e1000.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
3,157
```c /* * Description: * 3504-0 Universal 10/100/1000 Ethernet MAC (DWC_gmac) * Driver specifically designed for Cyclone V SoC DevKit use only. * * based on Intel SOC FPGA HWLIB Repo * path_to_url */ #define LOG_MODULE_NAME eth_cyclonev #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #define DT_DRV_COMPAT snps_ethernet_cyclonev #include "eth_cyclonev_priv.h" #include <stdio.h> #include <zephyr/devicetree.h> #include <zephyr/kernel.h> #include <zephyr/net/ethernet.h> #include "phy_cyclonev.c" #include <ethernet/eth_stats.h> #include <sys/types.h> #include <zephyr/irq.h> #define TX_AVAIL_WAIT K_MSEC(1) #define INC_WRAP(idx, size) ({ idx = (idx + 1) % size; }) static const uint8_t eth_cyclonev_mac_addr[6] = DT_INST_PROP(0, local_mac_address); void eth_cyclonev_reset(uint32_t instance); void eth_cyclonev_set_mac_addr(uint8_t *address, uint32_t instance, uint32_t n, struct eth_cyclonev_priv *p); int eth_cyclonev_get_software_reset_status(uint32_t instance, struct eth_cyclonev_priv *p); int eth_cyclonev_software_reset(uint32_t instance, struct eth_cyclonev_priv *p); void eth_cyclonev_setup_rxdesc(struct eth_cyclonev_priv *p); void eth_cyclonev_setup_txdesc(struct eth_cyclonev_priv *p); static void eth_cyclonev_iface_init(struct net_if *iface); static int eth_cyclonev_send(const struct device *dev, struct net_pkt *pkt); void eth_cyclonev_isr(const struct device *dev); int set_mac_conf_status(int instance, uint32_t *mac_config_reg_settings, struct eth_cyclonev_priv *p); int eth_cyclonev_probe(const struct device *dev); static int eth_cyclonev_start(const struct device *dev); static int eth_cyclonev_stop(const struct device *dev); static void eth_cyclonev_receive(struct eth_cyclonev_priv *p); static void eth_cyclonev_tx_release(struct eth_cyclonev_priv *p); static int eth_cyclonev_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config); static enum ethernet_hw_caps eth_cyclonev_caps(const struct device *dev); /** Device config */ struct eth_cyclonev_config { /** BBRAM base address */ uint8_t *base; /** BBRAM size (Unit:bytes) */ int size; uint32_t emac_index; void (*irq_config)(void); }; /** * @brief Reset gmac device function * Function initialise HPS interface, see * path_to_url * /us/en/pdfs/literature/hb/cyclone-v/cv_54001.pdf p. 1252 * * @param instance Number of instance (0 or 1 in Cyclone V HPS) */ void eth_cyclonev_reset(uint32_t instance) { /* 1. After the HPS is released from cold or warm reset, *reset the Ethernet Controller module by setting the appropriate *emac bit in the permodrst register in the Reset Manager. */ sys_set_bits(RSTMGR_PERMODRST_ADDR, Rstmgr_Permodrst_Emac_Set_Msk[instance]); /* 4a. Set the physel_* field in the ctrl register of the System Manager *(EMAC Group) to 0x1 to select the RGMII PHY interface. */ alt_replbits_word(SYSMGR_EMAC_ADDR, Sysmgr_Core_Emac_Phy_Intf_Sel_Set_Msk[instance], Sysmgr_Emac_Phy_Intf_Sel_E_Rgmii[instance]); /* 4b. Disable the Ethernet Controller FPGA interfaces by clearing the * emac_* bit in the module register of the System Manager (FPGA Interface * group). */ sys_clear_bits(SYSMGR_FPGAINTF_INDIV_ADDR, Sysmgr_Fpgaintf_En_3_Emac_Set_Msk[instance]); /* 7. After confirming the settings are valid, software can clear the emac * bit in the permodrst register of the Reset Manager to bring the EMAC out of * reset. */ sys_clear_bits(RSTMGR_PERMODRST_ADDR, Rstmgr_Permodrst_Emac_Set_Msk[instance]); } /** * @brief Set MAC Address function * Loads the selected MAC Address in device's registers. * * @param address Pointer to Mac Address table * @param instance Number of instance (0 or 1 in Cyclone V HPS) * @param n Selected index of MAC Address, n <= 15. There's no implementation * of setting MAC Addresses for n > 15. * */ void eth_cyclonev_set_mac_addr(uint8_t *address, uint32_t instance, uint32_t n, struct eth_cyclonev_priv *p) { uint32_t tmpreg; if (instance > 1) { return; } if (n > 15) { LOG_ERR("Invalid index of MAC Address: %d", n); return; } /* Calculate the selected MAC address high register */ tmpreg = ((uint32_t)address[5] << 8) | (uint32_t)address[4]; /* Load the selected MAC address high register */ sys_write32(tmpreg, EMAC_GMAC_MAC_ADDR_HIGH_ADDR(p->base_addr, n)); /* Calculate the selected MAC address low register */ tmpreg = ((uint32_t)address[3] << 24) | ((uint32_t)address[2] << 16) | ((uint32_t)address[1] << 8) | address[0]; /* Load the selected MAC address low register */ sys_write32(tmpreg, EMAC_GMAC_MAC_ADDR_LOW_ADDR(p->base_addr, n)); } /** * @brief Get software reset status function * Check status of SWR bit in DMA Controller Bus_Mode Register * * @param instance Number of instance (0 or 1 in Cyclone V HPS) * @retval 1 if DMA Controller Resets Logic, 0 otherwise */ int eth_cyclonev_get_software_reset_status(uint32_t instance, struct eth_cyclonev_priv *p) { if (instance > 1) { return -1; } return EMAC_DMA_MODE_SWR_GET(sys_read32(EMAC_DMAGRP_BUS_MODE_ADDR(p->base_addr))); } /** * @brief Perform software reset * Resets all MAC subsystem registers and logic, wait for the software reset to * clear * * @param instance Number of instance (0 or 1 in Cyclone V HPS) * @retval 0 if Reset was successful, -1 otherwise */ int eth_cyclonev_software_reset(uint32_t instance, struct eth_cyclonev_priv *p) { unsigned int i; if (instance > 1) { return -1; } /* Set the SWR bit: resets all MAC subsystem internal registers and logic */ /* After reset all the registers holds their respective reset values */ sys_set_bits(EMAC_DMAGRP_BUS_MODE_ADDR(p->base_addr), EMAC_DMA_MODE_SWR_SET_MSK); /* Wait for the software reset to clear */ for (i = 0; i < 10; i++) { k_sleep(K_MSEC(10)); if (eth_cyclonev_get_software_reset_status(instance, p) == 0) { break; } } if (i == 10) { return -1; } return 0; } /** * @brief RX descriptor ring initialisation function * Sets up RX descriptor ring with chained descriptors, * sets OWN bit in each descriptor, inits rx variables and stats * * @param p Pointer to device structure. */ void eth_cyclonev_setup_rxdesc(struct eth_cyclonev_priv *p) { int32_t i; struct eth_cyclonev_dma_desc *rx_desc; /* For each descriptor where i = descriptor index do: */ for (i = 0; i < NB_RX_DESCS; i++) { rx_desc = &p->rx_desc_ring[i]; rx_desc->buffer1_addr = (uint32_t)&p->rx_buf[i * ETH_BUFFER_SIZE]; rx_desc->control_buffer_size = ETH_DMARXDESC_RCH | ETH_BUFFER_SIZE; /*set own bit*/ rx_desc->status = ETH_DMARXDESC_OWN; rx_desc->buffer2_next_desc_addr = (uint32_t)&p->rx_desc_ring[i + 1]; if (i == (NB_RX_DESCS - 1)) { rx_desc->buffer2_next_desc_addr = (uint32_t)&p->rx_desc_ring[0]; } } p->rx_current_desc_number = 0; p->rxints = 0; /* Set RX Descriptor List Address Register */ sys_write32((uint32_t)&p->rx_desc_ring[0], EMAC_DMA_RX_DESC_LIST_ADDR(p->base_addr)); } /** * @brief TX descriptor ring initialisation function * Sets up TX descriptor ring with chained descriptors, * sets OWN bit in each descriptor, inits rx variables and stats * * @param p Pointer to device structure. */ void eth_cyclonev_setup_txdesc(struct eth_cyclonev_priv *p) { int32_t i; struct eth_cyclonev_dma_desc *tx_desc; /* For each descriptor where i = descriptor index do: */ for (i = 0; i < NB_TX_DESCS; i++) { tx_desc = &p->tx_desc_ring[i]; tx_desc->buffer1_addr = (uint32_t)&p->tx_buf[i * ETH_BUFFER_SIZE]; tx_desc->buffer2_next_desc_addr = (uint32_t)&p->tx_desc_ring[i + 1]; tx_desc->status = 0; tx_desc->control_buffer_size = 0; if (i == (NB_TX_DESCS - 1)) { tx_desc->buffer2_next_desc_addr = (uint32_t)&p->tx_desc_ring[0]; } } p->tx_current_desc_number = 0; p->txints = 0; p->tx_tail = 0; /* Set TX Descriptor List Address Register */ sys_write32((uint32_t)&p->tx_desc_ring[0], EMAC_DMA_TX_DESC_LIST_ADDR(p->base_addr)); } /** * @brief Ethernet interface initialisation function * Inits interface, sets interface link MAC address * * @param iface Pointer to net_if structure */ /* Initialisation of interface */ static void eth_cyclonev_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); const struct eth_cyclonev_config *config = dev->config; struct eth_cyclonev_priv *p = dev->data; p->iface = iface; ethernet_init(iface); net_if_set_link_addr(iface, p->mac_addr, sizeof(p->mac_addr), NET_LINK_ETHERNET); /* * Semaphores are used to represent number of available descriptors. * The total is one less than ring size in order to always have * at least one inactive slot for the hardware tail pointer to * stop at and to prevent our head indexes from looping back * onto our tail indexes. */ k_sem_init(&p->free_tx_descs, NB_TX_DESCS - 1, NB_TX_DESCS - 1); /* Initialize the ethernet irq handler */ config->irq_config(); p->initialised = 1; LOG_DBG("done"); } /** * @brief Ethernet set config function usually called by * Zephyr Ethernet stack. It supports currently two options: * Set of Mac address and Enabling Promiscuous Mode * * @param dev Pointer to net_if structure * @param type Enumerated type of configuration to do * @param config Pointer to ethernet_config structure * @retval ret 0 if successful */ static int eth_cyclonev_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct eth_cyclonev_priv *p = dev->data; const struct eth_cyclonev_config *cv_config = dev->config; uint32_t reg_val; int ret = 0; (void)reg_val; /* silence the "unused variable" warning */ switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(p->mac_addr, config->mac_address.addr, sizeof(p->mac_addr)); eth_cyclonev_set_mac_addr(p->mac_addr, cv_config->emac_index, 0, p); /* Set MAC */ net_if_set_link_addr(p->iface, p->mac_addr, sizeof(p->mac_addr), NET_LINK_ETHERNET); break; #if defined(CONFIG_NET_PROMISCUOUS_MODE) case ETHERNET_CONFIG_TYPE_PROMISC_MODE: reg_val = sys_read32(EMAC_GMACGRP_MAC_FRAME_FILTER_ADDR(p->base_addr)); if (config->promisc_mode && !(reg_val & EMAC_GMACGRP_MAC_FRAME_FILTER_PR_SET_MSK)) { /* Turn on Promisc Mode */ sys_set_bits(EMAC_GMACGRP_MAC_FRAME_FILTER_ADDR(p->base_addr), EMAC_GMACGRP_MAC_FRAME_FILTER_PR_SET_MSK); } else if (!config->promisc_mode && (reg_val & EMAC_GMACGRP_MAC_FRAME_FILTER_PR_SET_MSK)) { /* Turn off Promisc Mode */ sys_clear_bits(EMAC_GMACGRP_MAC_FRAME_FILTER_ADDR(p->base_addr), EMAC_GMACGRP_MAC_FRAME_FILTER_PR_SET_MSK); } else { ret = -EALREADY; } break; #endif default: ret = -ENOTSUP; break; } LOG_DBG("set_config: ret = %d ", ret); return ret; } /** * @brief Get capabilities function usually called by * Zephyr Ethernet stack. * * @param dev Pointer to net_if structure * @retval caps Enumerated capabilities of device */ static enum ethernet_hw_caps eth_cyclonev_caps(const struct device *dev) { struct eth_cyclonev_priv *p = dev->data; enum ethernet_hw_caps caps = 0; if (p->feature & EMAC_DMA_HW_FEATURE_MIISEL) { caps |= ETHERNET_LINK_10BASE_T; caps |= ETHERNET_LINK_100BASE_T; } if (p->feature & EMAC_DMA_HW_FEATURE_GMIISEL) { caps |= ETHERNET_LINK_1000BASE_T; } if (p->feature & EMAC_DMA_HW_FEATURE_RXTYP2COE) { caps |= ETHERNET_HW_RX_CHKSUM_OFFLOAD; } if (p->feature & EMAC_DMA_HW_FEATURE_RXTYP1COE) { caps |= ETHERNET_HW_RX_CHKSUM_OFFLOAD; } caps |= ETHERNET_PROMISC_MODE; return caps; } /** * @brief Send packet function * Sends packet of data. See: * path_to_url * literature/hb/cyclone-v/cv_54001.pdf p.1254 and p.1206 * * @param dev Pointer to device structure * @param pkt Pointer to net_pkt structure containing packet to sent * @retval 0 if successful, -1 otherwise */ static int eth_cyclonev_send(const struct device *dev, struct net_pkt *pkt) { LOG_DBG("ethernet CVSX sending...\n"); struct eth_cyclonev_priv *p = dev->data; struct eth_cyclonev_dma_desc *tx_desc; int32_t index = 0; uint16_t len = net_pkt_get_len(pkt); int first = 1; struct net_buf *frag; LOG_DBG("Pkt length: %d", len); frag = pkt->buffer; do { /* reserve a free descriptor for this fragment */ if (k_sem_take(&p->free_tx_descs, TX_AVAIL_WAIT) != 0) { LOG_DBG("no more free tx descriptors"); goto abort; } tx_desc = &p->tx_desc_ring[p->tx_current_desc_number]; /* Check if it is a free descriptor. */ if (tx_desc->status & ETH_DMATXDESC_OWN) { /* Buffer is still owned by device. */ LOG_ERR("No free tx descriptors!\n"); goto abort; } /* check if len is too large */ if (len >= ETH_BUFFER_SIZE) { LOG_ERR("Length of packet is too long\n"); goto abort; } /* Copy data to local buffer */ if (frag) { memcpy(&p->tx_buf[p->tx_current_desc_number * ETH_BUFFER_SIZE], frag->data, len); } /* Set the buffer size. */ tx_desc->control_buffer_size = (frag->len & ETH_DMATXDESC_TBS1); LOG_DBG("Desc[%d] at address: 0x%08x: , Frag size: %d, Buffer Addr: %p", p->tx_current_desc_number, (unsigned int)&p->tx_desc_ring[p->tx_current_desc_number], frag->len, (void *)tx_desc->buffer1_addr); tx_desc->status = ETH_DMATXDESC_TCH; /* Set the Descriptor's FS bit. */ if (first) { tx_desc->status |= (ETH_DMATXDESC_FS | ETH_DMATXDESC_CIC_BYPASS); first = 0; } /* If Last: then (...) */ if (!frag->frags) { /* set the Descriptor's LS and IC bit. */ tx_desc->status |= (ETH_DMATXDESC_LS | ETH_DMATXDESC_IC); index = p->tx_current_desc_number; } /* Set the current index to the next descriptor. */ p->tx_current_desc_number = (p->tx_current_desc_number + 1); if (p->tx_current_desc_number >= NB_TX_DESCS) { p->tx_current_desc_number = 0; } if (!frag->frags) { while (1) { tx_desc = &p->tx_desc_ring[index]; if (tx_desc->status & ETH_DMATXDESC_OWN) { LOG_ERR("Send packet error!\n"); /* Restart DMA transmission and re-initialise * TX descriptors */ sys_clear_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR( p->base_addr), EMAC_DMAGRP_OPERATION_MODE_ST_SET_MSK); sys_set_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR( p->base_addr), EMAC_DMAGRP_OPERATION_MODE_FTF_SET_MSK); eth_cyclonev_setup_txdesc(p); sys_set_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR( p->base_addr), EMAC_DMAGRP_OPERATION_MODE_ST_SET_MSK); goto abort; } /* Set OWN bit. */ tx_desc->status |= ETH_DMATXDESC_OWN; if (tx_desc->status & ETH_DMATXDESC_FS) { break; } index--; if (index < 0) { index = NB_TX_DESCS - 1; } } LOG_DBG("Current Host Transmit Descriptor Register: 0x%08x", sys_read32( EMAC_DMA_CURR_HOST_TX_DESC_ADDR(p->base_addr))); LOG_DBG("Current Host Transmit Buffer Register: 0x%08x", sys_read32( EMAC_DMA_CURR_HOST_TX_BUFF_ADDR(p->base_addr))); /* If the DMA transmission is suspended, resume transmission. */ if (sys_read32(EMAC_DMAGRP_STATUS_ADDR(p->base_addr)) & EMAC_DMAGRP_STATUS_TS_SET_MSK) { /* Clear TBUS ETHERNET DMA flag */ sys_write32(EMAC_DMAGRP_STATUS_TS_SET_MSK, EMAC_DMAGRP_STATUS_ADDR(p->base_addr)); /* Resume DMA transmission */ sys_write32(0, EMAC_DMA_TX_POLL_DEMAND_ADDR(p->base_addr)); } } frag = frag->frags; } while (frag); LOG_DBG("Sent"); return 0; abort: k_sem_give(&p->free_tx_descs); /* Multi-descriptor package release (?) */ return -1; } /** * @brief Interrupt handling function * Detects interrupt status, invokes necessary actions * and clears interrupt status register * * @param dev Pointer to device structure */ void eth_cyclonev_isr(const struct device *dev) { struct eth_cyclonev_priv *p = dev->data; const struct eth_cyclonev_config *config = dev->config; uint32_t irq_status = 0; uint32_t irq_status_emac = 0; irq_status = sys_read32(EMAC_DMAGRP_STATUS_ADDR(p->base_addr)) & p->interrupt_mask; irq_status_emac = sys_read32(EMAC_GMAC_INT_STAT_ADDR(p->base_addr)); LOG_DBG("DMA_IRQ_STATUS = 0x%08x, emac: 0x%08x", irq_status, irq_status_emac); if (irq_status & EMAC_DMA_INT_EN_NIE_SET_MSK) { sys_write32(EMAC_DMA_INT_EN_NIE_SET_MSK, EMAC_DMAGRP_STATUS_ADDR(p->base_addr)); } if (irq_status & EMAC_DMA_INT_EN_TIE_SET_MSK) { p->txints++; eth_cyclonev_tx_release(p); /* Clear the selected ETHERNET DMA bit(s) */ sys_write32(EMAC_DMA_INT_EN_TIE_SET_MSK, EMAC_DMAGRP_STATUS_ADDR(p->base_addr)); } if (irq_status & EMAC_DMA_INT_EN_RIE_SET_MSK) { p->rxints++; eth_cyclonev_receive(p); /* Clear the selected ETHERNET DMA bit(s) */ sys_write32(EMAC_DMA_INT_EN_RIE_SET_MSK, EMAC_DMAGRP_STATUS_ADDR(p->base_addr)); } if (irq_status_emac & EMAC_GMAC_INT_STAT_RGSMIIIS_SET_MSK) { /* Clear the selected ETHERNET GMAC bit(s) */ uint32_t regval = sys_read32(GMACGRP_CONTROL_STATUS_ADDR(p->base_addr)); if (EMAC_GMAC_MII_CTL_STAT_LNKSTS_GET(regval)) { LOG_INF("Link is up"); } else { LOG_INF("Link is down"); return; } if (EMAC_GMAC_MII_CTL_STAT_LNKMOD_GET(regval)) { LOG_INF("Full duplex"); } else { LOG_INF("Half duplex"); } switch (EMAC_GMAC_MII_CTL_STAT_LNKSPEED_GET(regval)) { case 0: LOG_INF("Link Speed 2.5MHz"); break; case 1: LOG_INF("Link Speed 25MHz"); break; case 2: LOG_INF("Link Speed 125MHz"); break; default: LOG_ERR("LNKSPEED_GET_ERROR"); break; } if (p->initialised) { uint32_t cfg_reg_set; cfg_reg_set = sys_read32(GMACGRP_MAC_CONFIG_ADDR(p->base_addr)); if (eth_cyclonev_stop(dev) == -1) { LOG_ERR("Couldn't stop device: %s", dev->name); return; } set_mac_conf_status(config->emac_index, &cfg_reg_set, p); sys_write32(cfg_reg_set, GMACGRP_MAC_CONFIG_ADDR(p->base_addr)); eth_cyclonev_start(dev); } } } /** * @brief Receive packet function (IRQ) * In the event of receive completion interrupt, this function * copies data from buffer to necessary net stack structures * performs general error checking and returns descriptor to hardware. * * @param p Pointer to device structure * */ static void eth_cyclonev_receive(struct eth_cyclonev_priv *p) { struct eth_cyclonev_dma_desc *rx_desc; struct net_pkt *pkt; uint32_t index, frame_length, rx_search, wrap, data_remaining, last_desc_index, buf_size; index = p->rx_current_desc_number; rx_desc = &p->rx_desc_ring[index]; while (!(rx_desc->status & ETH_DMARXDESC_OWN)) { LOG_DBG("RDES0[%d] = 0x%08x", index, rx_desc->status); /* Look for FS bit */ if (!(rx_desc->status & ETH_DMARXDESC_FS)) { LOG_ERR("Unexpected missing FS bit"); rx_desc->status |= ETH_DMARXDESC_OWN; goto cont; } /* Look for EOF bit, save frame length including multiple * buffers and index of last descriptor */ rx_search = index; wrap = index; do { rx_desc = &p->rx_desc_ring[rx_search]; /* Frame length */ frame_length = data_remaining = (ETH_DMARXDESC_FL & rx_desc->status) >> 16; last_desc_index = rx_search; if (!(rx_desc->status & ETH_DMARXDESC_LS)) { INC_WRAP(rx_search, NB_RX_DESCS); if (rx_search == wrap) { LOG_ERR("Couldn't find EOF bit!"); rx_desc = &p->rx_desc_ring[index]; rx_desc->status |= ETH_DMARXDESC_OWN; goto cont; } } } while (!(rx_desc->status & ETH_DMARXDESC_LS)); LOG_DBG("Frame length = %d, Last descriptor = %d", frame_length, last_desc_index); p->rx_current_desc_number = last_desc_index; /* Allocate packet with buffer */ pkt = net_pkt_rx_alloc_with_buffer(p->iface, frame_length, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { LOG_ERR("net_pkt_rx_alloc_with_buffer() failed"); eth_stats_update_errors_rx(p->iface); } /* Copy data from multiple buffers and descriptors */ rx_search = index; wrap = index; do { rx_desc = &p->rx_desc_ring[rx_search]; if (data_remaining < ETH_BUFFER_SIZE) { buf_size = data_remaining; } else { buf_size = ETH_BUFFER_SIZE; } if (pkt) { net_pkt_write(pkt, &p->rx_buf[rx_search * ETH_BUFFER_SIZE], buf_size); } data_remaining -= buf_size; rx_desc->status |= ETH_DMARXDESC_OWN; if (last_desc_index != rx_search) { INC_WRAP(rx_search, NB_RX_DESCS); if (rx_search == wrap) { LOG_ERR("Couldn't find last descriptor! Data remaining: %d", data_remaining); goto cont; } if (rx_search == last_desc_index) { /* One more iteration */ rx_desc = &p->rx_desc_ring[rx_search]; if (data_remaining < ETH_BUFFER_SIZE) { buf_size = data_remaining; } else { buf_size = ETH_BUFFER_SIZE; } if (pkt) { net_pkt_write( pkt, &p->rx_buf[rx_search * ETH_BUFFER_SIZE], buf_size); } data_remaining -= buf_size; rx_desc->status |= ETH_DMARXDESC_OWN; } } } while (last_desc_index != rx_search); /* Hand-over packet into IP stack */ if (pkt) { if (net_recv_data(p->iface, pkt) < 0) { LOG_ERR("RX packet hand-over to IP stack failed"); net_pkt_unref(pkt); } LOG_DBG("Received packet %p, len %d", pkt, frame_length); } cont: p->rx_current_desc_number++; if (p->rx_current_desc_number == NB_RX_DESCS) { p->rx_current_desc_number = 0; } index = p->rx_current_desc_number; rx_desc = &p->rx_desc_ring[index]; } } /** * @brief Release tx function * Main purpose of its function is to track current descriptor number * and give back succeding tx semaphore when it have been used. * * @param p Pointer to device structure */ static void eth_cyclonev_tx_release(struct eth_cyclonev_priv *p) { unsigned int d_idx; struct eth_cyclonev_dma_desc *d; uint32_t des3_val; for (d_idx = p->tx_tail; d_idx != p->tx_current_desc_number; INC_WRAP(d_idx, NB_TX_DESCS), k_sem_give(&p->free_tx_descs)) { d = &p->tx_desc_ring[d_idx]; des3_val = d->status; LOG_DBG("TDES3[%d] = 0x%08x", d_idx, des3_val); /* stop here if hardware still owns it */ if (des3_val & ETH_DMATXDESC_OWN) { break; } /* last packet descriptor: */ if (des3_val & ETH_DMATXDESC_LS) { /* log any errors */ if (des3_val & ETH_DMATXDESC_ES) { LOG_ERR("tx error (DES3 = 0x%08x)", des3_val); eth_stats_update_errors_tx(p->iface); } } } p->tx_tail = d_idx; } /** * @brief Sets MAC Config Register (not implemented) * Detects PHY Mode and assigns MAC Config Register * * @param instance Number of instance (0 or 1 in Cyclone V HPS) * @param mac_config_reg_settings Mac_config register mask to set * @retval updated mac_config_reg mask (>=0), -1 otherwise */ /* Configure the MAC with the speed fixed by the auto-negotiation process */ int set_mac_conf_status(int instance, uint32_t *mac_config_reg_settings, struct eth_cyclonev_priv *p) { uint16_t phy_duplex_status, phy_speed; int ret; ret = alt_eth_phy_get_duplex_and_speed(&phy_duplex_status, &phy_speed, instance, p); if (ret != 0) { LOG_ERR("alt_eth_phy_get_duplex_and_speed failure!"); return ret; } /* Set Ethernet duplex mode to Full-duplex following the auto-negotiation */ if (phy_duplex_status != 0) { *mac_config_reg_settings |= EMAC_GMACGRP_MAC_CONFIGURATION_DM_SET_MSK; } /* Set Ethernet duplex mode to Half-duplex following the auto-negotiation */ else { *mac_config_reg_settings &= ~EMAC_GMACGRP_MAC_CONFIGURATION_DM_SET_MSK; } /* Set Ethernet speed to 10M following the auto-negotiation */ if (phy_speed == 10) { *mac_config_reg_settings &= ~EMAC_GMACGRP_MAC_CONFIGURATION_FES_SET_MSK; *mac_config_reg_settings |= EMAC_GMACGRP_MAC_CONFIGURATION_PS_SET_MSK; } /* Set Ethernet speed to 100M following the auto-negotiation */ if (phy_speed == 100) { *mac_config_reg_settings |= EMAC_GMACGRP_MAC_CONFIGURATION_FES_SET_MSK; *mac_config_reg_settings |= EMAC_GMACGRP_MAC_CONFIGURATION_PS_SET_MSK; } /* Set Ethernet speed to 1G following the auto-negotiation */ if (phy_speed == 1000) { *mac_config_reg_settings &= ~EMAC_GMACGRP_MAC_CONFIGURATION_PS_SET_MSK; } return 0; } /** * @brief Hardware initialisation function * Performs EMAC HPS interface initialisation, DMA initialisation, * EMAC initialisation and configuration. See: * path_to_url * www/programmable/us/en/pdfs/literature/hb/cyclone-v/cv_54001.pdf p.1252-54 * * @param dev Pointer to device structure * @retval 0 if successful, -1 otherwise */ int eth_cyclonev_probe(const struct device *dev) { struct eth_cyclonev_priv *p = dev->data; const struct eth_cyclonev_config *config = dev->config; uint32_t tmpreg = 0, interrupt_mask; uint32_t mac_config_reg_settings = 0; int ret; p->base_addr = (mem_addr_t)config->base; p->running = 0; p->initialised = 0; /* EMAC HPS Interface Initialization */ /* Reset the EMAC */ eth_cyclonev_reset(config->emac_index); /* Reset the PHY */ ret = alt_eth_phy_reset(config->emac_index, p); if (ret != 0) { LOG_ERR("alt_eth_phy_reset failure!\n"); return ret; } /* Configure the PHY */ ret = alt_eth_phy_config(config->emac_index, p); if (ret != 0) { LOG_ERR("alt_eth_phy_config failure!\n"); return ret; } /* Read HW feature register */ p->feature = sys_read32(EMAC_DMA_HW_FEATURE_ADDR(p->base_addr)); /* DMA Initialisation */ /* 1. Provide a software reset to reset all of the EMAC internal registers and *logic. (DMA Register 0 (BusMode Register) bit 0). * 2. Wait for the completion of the reset process (poll bit 0 of the DMA *Register 0 (Bus Mode Register), which is only cleared after the reset *operation is completed). */ ret = eth_cyclonev_software_reset(config->emac_index, p); if (ret != 0) { LOG_ERR("eth_cyclonev_software_reset failure!\n"); return ret; } /* 4. Program the following fields to initialize the Bus Mode Register by * setting values in DMA Register 0 (Bus Mode Register): */ sys_write32((tmpreg | EMAC_DMA_MODE_FB_SET_MSK /* Fixed Burst */ ), EMAC_DMAGRP_BUS_MODE_ADDR(p->base_addr)); /* 5. Program the interface options in Register 10 (AXI Bus Mode * Register). If fixed burst-length is enabled, then select the maximum * burst-length possible on the bus (bits[7:1]).(58) */ tmpreg = sys_read32(EMAC_DMAGRP_AXI_BUS_MODE_ADDR(p->base_addr)); sys_write32( tmpreg | EMAC_DMAGRP_AXI_BUS_MODE_BLEN16_SET_MSK, EMAC_DMAGRP_AXI_BUS_MODE_ADDR(p->base_addr)); /* Set Burst Length = 16 */ /* 6. Create a proper descriptor chain for transmit and receive. In addition, * ensure that the receive descriptors are owned by DMA (bit 31 of descriptor * should be set). * 7. Make sure that your software creates three or more different transmit or * receive descriptors in the chain before reusing any of the descriptors * 8. Initialize receive and transmit descriptor list address with the base * address of the transmit and receive descriptor (Register 3 (Receive * Descriptor List Address Register) and Register 4 (Transmit Descriptor List * Address Register) respectively). */ eth_cyclonev_setup_rxdesc(p); eth_cyclonev_setup_txdesc(p); /* 9. Program the following fields to initialize the mode of operation in * Register 6 (Operation Mode Register): */ sys_write32((0 | EMAC_DMAGRP_OPERATION_MODE_TSF_SET_MSK /* Transmit Store and Forward */ | EMAC_DMAGRP_OPERATION_MODE_RSF_SET_MSK /* Receive Store and Forward */ | EMAC_DMAGRP_OPERATION_MODE_FTF_SET_MSK /* Receive Store and Forward */ ), EMAC_DMAGRP_OPERATION_MODE_ADDR(p->base_addr)); /* 10.Clear the interrupt requests, by writing to those bits of the status * register (interrupt bits only) that are set. For example, by writing 1 into * bit 16, the normal interrupt summary clears this bit (DMA Register 5 * (Status Register)). */ interrupt_mask = EMAC_DMA_INT_EN_NIE_SET_MSK | EMAC_DMA_INT_EN_RIE_SET_MSK | EMAC_DMA_INT_EN_TIE_SET_MSK; p->interrupt_mask = interrupt_mask; /* Clear the selected ETHERNET DMA bit(s) */ sys_write32(interrupt_mask, EMAC_DMAGRP_STATUS_ADDR(p->base_addr)); /* 11.Enable the interrupts by programming Register 7 (Interrupt Enable * Register). */ sys_set_bits(EMAC_DMA_INT_EN_ADDR(p->base_addr), interrupt_mask); /* 12.Read Register 11 (AHB or AXI Status) to confirm that * all previous transactions are complete. */ if (sys_read32(EMAC_DMAGRP_AHB_OR_AXI_STATUS_ADDR(p->base_addr)) != 0) { LOG_ERR("AHB_OR_AXI_STATUS Fail!\n"); return -1; } /* EMAC Initialization and Configuration */ /* 1. Program the GMII Address Register (offset 0x10) for controlling the * management cycles for theexternal PHY. Bits[15:11] of the GMII Address * Register are written with the Physical Layer Address of the PHY before * reading or writing. Bit 0 indicates if the PHY is busy and is set before * reading or writing to the PHY management interface. * 2. Read the 16-bit data of the GMII Data Register from the PHY for link up, * speed of operation, and mode of operation, by specifying the appropriate * address value in bits[15:11] of the GMII Address Register. */ mac_config_reg_settings = (EMAC_GMACGRP_MAC_CONFIGURATION_IPC_SET_MSK /* Checksum Offload */ | EMAC_GMACGRP_MAC_CONFIGURATION_JD_SET_MSK /* Jabber Disable */ | EMAC_GMACGRP_MAC_CONFIGURATION_BE_SET_MSK /* Frame Burst Enable */ | EMAC_GMACGRP_MAC_CONFIGURATION_WD_SET_MSK /* Watchdog Disable */ | EMAC_GMACGRP_MAC_CONFIGURATION_TC_SET_MSK /* Enable Transmission to PHY */ ); ret = set_mac_conf_status(config->emac_index, &mac_config_reg_settings, p); if (ret != 0) { return -1; } /* 3. Provide the MAC address registers (MAC Address0 High Register * through MAC Address15 High Register and MAC Address0 Low Register * through MAC Address15 Low Register). */ memcpy(p->mac_addr, eth_cyclonev_mac_addr, sizeof(p->mac_addr)); eth_cyclonev_set_mac_addr(p->mac_addr, config->emac_index, 0, p); /* 5. Program the following fields to set the appropriate filters for the * incoming frames in the MAC Frame Filter Register: * Receive All * Promiscuous mode * Hash or Perfect Filter * Unicast, multicast, broadcast, and control frames filter settings */ sys_clear_bits(EMAC_GMACGRP_MAC_FRAME_FILTER_ADDR(p->base_addr), EMAC_GMACGRP_MAC_FRAME_FILTER_PR_SET_MSK); /* Disable promiscuous mode */ /* 7. Program the Interrupt Mask Register bits, * as required and if applicable for your configuration. */ sys_set_bits(EMAC_GMAC_INT_MSK_ADDR(p->base_addr), EMAC_GMAC_INT_STAT_LPIIS_SET_MSK | /* Disable Low Power IRQ */ EMAC_GMAC_INT_STAT_TSIS_SET_MSK); /* Disable Timestamp IRQ */ /* 8. Program the appropriate fields in MAC Configuration Register to * configure receive and transmit operation modes... */ sys_write32(mac_config_reg_settings, GMACGRP_MAC_CONFIG_ADDR(p->base_addr)); LOG_DBG("func_eth_cyclonev_probe Success!\n"); return 0; } /** * @brief Start device function * Starts DMA and EMAC transmitter and receiver. See: * path_to_url * www/programmable/us/en/pdfs/literature/hb/cyclone-v/cv_54001.pdf p.1255-56 * * @param dev Pointer to device structure * @retval 0 */ static int eth_cyclonev_start(const struct device *dev) { struct eth_cyclonev_priv *p = dev->data; if (p->running) { LOG_DBG("Device already running!"); return 0; } /*6. To re-start the operation, first start the DMA and then enable * the EMAC transmitter and receiver. */ /* Start the DMA */ sys_set_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR(p->base_addr), EMAC_DMAGRP_OPERATION_MODE_ST_SET_MSK); sys_set_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR(p->base_addr), EMAC_DMAGRP_OPERATION_MODE_SR_SET_MSK); /* Enable the EMAC transmitter and receiver */ sys_set_bits(GMACGRP_MAC_CONFIG_ADDR(p->base_addr), EMAC_GMACGRP_MAC_CONFIGURATION_TE_SET_MSK); sys_set_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR(p->base_addr), EMAC_DMAGRP_OPERATION_MODE_FTF_SET_MSK); /* Flush Transmit FIFO */ sys_set_bits(GMACGRP_MAC_CONFIG_ADDR(p->base_addr), EMAC_GMACGRP_MAC_CONFIGURATION_RE_SET_MSK); p->running = 1; LOG_DBG("Starting Device..."); return 0; } /** * @brief Stop device function * Stops DMA and EMAC transmitter and receiver. See: * path_to_url * programmable/us/en/pdfs/literature/hb/cyclone-v/cv_54001.pdf p.1255-56 * * @param dev Pointer to device structure * @retval 0 if successful, -1 otherwise */ static int eth_cyclonev_stop(const struct device *dev) { struct eth_cyclonev_priv *p = dev->data; if (!p->running) { LOG_DBG("Device is not running!"); return 0; } /* 1. Disable the transmit DMA (if applicable), by clearing bit 13 * (Start or Stop Transmission Command) of Register 6 (Operation Mode * Register). */ sys_clear_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR(p->base_addr), EMAC_DMAGRP_OPERATION_MODE_ST_SET_MSK); /* 3. Disable the EMAC transmitter and EMAC receiver by clearing Bit 3 * (TE) and Bit 2 (RE) in Register 0 (MAC Configuration Register). */ sys_clear_bits(GMACGRP_MAC_CONFIG_ADDR(p->base_addr), EMAC_GMACGRP_MAC_CONFIGURATION_TE_SET_MSK); sys_set_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR(p->base_addr), EMAC_DMAGRP_OPERATION_MODE_FTF_SET_MSK); /* Flush Transmit FIFO */ sys_clear_bits(GMACGRP_MAC_CONFIG_ADDR(p->base_addr), EMAC_GMACGRP_MAC_CONFIGURATION_RE_SET_MSK); /* 4. Disable the receive DMA (if applicable), after making sure that the data * in the RX FIFO buffer is transferred to the system memory * (by reading Register 9 (Debug Register). */ sys_clear_bits(EMAC_DMAGRP_OPERATION_MODE_ADDR(p->base_addr), EMAC_DMAGRP_OPERATION_MODE_SR_SET_MSK); /* 5. Make sure that both the TX FIFO buffer and RX FIFO buffer are empty. */ if (EMAC_DMAGRP_DEBUG_RXFSTS_GET( sys_read32(EMAC_DMAGRP_DEBUG_ADDR(p->base_addr))) != 0x0) { return -1; } p->running = 0; LOG_DBG("Stopping Device..."); return 0; } const struct ethernet_api eth_cyclonev_api = {.iface_api.init = eth_cyclonev_iface_init, .get_capabilities = eth_cyclonev_caps, .send = eth_cyclonev_send, .start = eth_cyclonev_start, .stop = eth_cyclonev_stop, .set_config = eth_cyclonev_set_config}; #define CYCLONEV_ETH_INIT(inst) \ static struct eth_cyclonev_priv eth_cyclonev_##inst##_data; \ static void eth_cyclonev_##inst##_irq_config(void); \ \ static const struct eth_cyclonev_config eth_cyclonev_##inst##_cfg = { \ .base = (uint8_t *)(DT_INST_REG_ADDR(inst)), \ .size = DT_INST_REG_SIZE(inst), \ .emac_index = DT_INST_PROP(inst, emac_index), \ .irq_config = eth_cyclonev_##inst##_irq_config, \ }; \ ETH_NET_DEVICE_DT_INST_DEFINE(inst, eth_cyclonev_probe, NULL, \ &eth_cyclonev_##inst##_data, \ &eth_cyclonev_##inst##_cfg, \ CONFIG_ETH_INIT_PRIORITY, \ &eth_cyclonev_api, \ NET_ETH_MTU); \ \ static void eth_cyclonev_##inst##_irq_config(void) \ { \ IRQ_CONNECT(DT_INST_IRQN(inst), \ DT_INST_IRQ(inst, priority), eth_cyclonev_isr, \ DEVICE_DT_INST_GET(inst), \ 0); \ irq_enable(DT_INST_IRQN(inst)); \ DT_INST_FOREACH_STATUS_OKAY(CYCLONEV_ETH_INIT) ```
/content/code_sandbox/drivers/ethernet/eth_cyclonev.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,112
```c /* * Driver for Synopsys DesignWare MAC * * * * STM32H7X specific glue. */ #define LOG_MODULE_NAME dwmac_plat #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); /* be compatible with the HAL-based driver here */ #define DT_DRV_COMPAT st_stm32_ethernet #include <sys/types.h> #include <zephyr/kernel.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth.h> #include <zephyr/drivers/clock_control.h> #include <zephyr/drivers/clock_control/stm32_clock_control.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/irq.h> #include "eth_dwmac_priv.h" PINCTRL_DT_INST_DEFINE(0); static const struct pinctrl_dev_config *eth0_pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0); static const struct stm32_pclken pclken = { .bus = DT_INST_CLOCKS_CELL_BY_NAME(0, stmmaceth, bus), .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, stmmaceth, bits), }; static const struct stm32_pclken pclken_tx = { .bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_tx, bus), .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_tx, bits), }; static const struct stm32_pclken pclken_rx = { .bus = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_rx, bus), .enr = DT_INST_CLOCKS_CELL_BY_NAME(0, mac_clk_rx, bits), }; int dwmac_bus_init(struct dwmac_priv *p) { uint32_t reg_addr, reg_val; int ret; p->clock = DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE); if (!device_is_ready(p->clock)) { LOG_ERR("clock control device not ready"); return -ENODEV; } ret = clock_control_on(p->clock, (clock_control_subsys_t)&pclken); ret |= clock_control_on(p->clock, (clock_control_subsys_t)&pclken_tx); ret |= clock_control_on(p->clock, (clock_control_subsys_t)&pclken_rx); if (ret) { LOG_ERR("Failed to enable ethernet clock"); return -EIO; } ret = pinctrl_apply_state(eth0_pcfg, PINCTRL_STATE_DEFAULT); if (ret < 0) { LOG_ERR("Could not configure ethernet pins"); return ret; } /* set SYSCFGEN in RCC_APB4ENR */ reg_addr = DT_REG_ADDR(DT_INST(0, st_stm32h7_rcc)) + 0xf4; reg_val = sys_read32(reg_addr); sys_write32(reg_val | BIT(1), reg_addr); /* set RMII mode in SYSCFG_PMCR */ reg_addr = 0x58000404; /* no DT node? */ reg_val = sys_read32(reg_addr); sys_write32(reg_val | 0x03800000, reg_addr); p->base_addr = DT_INST_REG_ADDR(0); return 0; } #if defined(CONFIG_NOCACHE_MEMORY) #define __desc_mem __nocache __aligned(4) #else #error "missing memory attribute for descriptors" #endif /* Descriptor rings in uncached memory */ static struct dwmac_dma_desc dwmac_tx_descs[NB_TX_DESCS] __desc_mem; static struct dwmac_dma_desc dwmac_rx_descs[NB_RX_DESCS] __desc_mem; void dwmac_platform_init(struct dwmac_priv *p) { p->tx_descs = dwmac_tx_descs; p->rx_descs = dwmac_rx_descs; /* basic configuration for this platform */ REG_WRITE(MAC_CONF, MAC_CONF_PS | MAC_CONF_FES | MAC_CONF_DM); REG_WRITE(DMA_SYSBUS_MODE, DMA_SYSBUS_MODE_AAL | DMA_SYSBUS_MODE_FB); /* set up IRQs (still masked for now) */ IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), dwmac_isr, DEVICE_DT_INST_GET(0), 0); irq_enable(DT_INST_IRQN(0)); /* create MAC address */ gen_random_mac(p->mac_addr, 0x00, 0x80, 0xE1); } /* Our private device instance */ static struct dwmac_priv dwmac_instance; ETH_NET_DEVICE_DT_INST_DEFINE(0, dwmac_probe, NULL, &dwmac_instance, NULL, CONFIG_ETH_INIT_PRIORITY, &dwmac_api, NET_ETH_MTU); ```
/content/code_sandbox/drivers/ethernet/eth_dwmac_stm32h7x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,008
```c /* * */ #define DT_DRV_COMPAT nxp_s32_gmac #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(nxp_s32_eth, CONFIG_ETHERNET_LOG_LEVEL); #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/drivers/pinctrl.h> #include <zephyr/net/ethernet.h> #include <zephyr/net/net_if.h> #include <zephyr/net/net_pkt.h> #include <zephyr/net/phy.h> #include <ethernet/eth_stats.h> #include <soc.h> #include <Gmac_Ip.h> #include <Gmac_Ip_Hw_Access.h> #include <Gmac_Ip_Irq.h> #include <Clock_Ip.h> #include "eth.h" #define ETH_NXP_S32_BUF_TIMEOUT K_MSEC(20) #define ETH_NXP_S32_DMA_TX_TIMEOUT K_MSEC(20) #define ETH_NXP_S32_MAC_ADDR_LEN 6U #define FREESCALE_OUI_B0 0x00 #define FREESCALE_OUI_B1 0x04 #define FREESCALE_OUI_B2 0x9f struct eth_nxp_s32_config { uint8_t instance; uint8_t tx_ring_idx; uint8_t rx_ring_idx; uint32_t rx_irq; uint32_t tx_irq; void (*do_config)(void); const struct pinctrl_dev_config *pincfg; const struct device *phy_dev; const Gmac_CtrlConfigType ctrl_cfg; GMAC_Type *base; }; struct eth_nxp_s32_data { struct net_if *iface; uint8_t mac_addr[ETH_NXP_S32_MAC_ADDR_LEN]; uint8_t if_suspended; struct k_mutex tx_mutex; struct k_sem rx_sem; struct k_sem tx_sem; struct k_thread rx_thread; K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_NXP_S32_RX_THREAD_STACK_SIZE); }; static void eth_nxp_s32_rx_thread(void *arg1, void *unused1, void *unused2); static inline struct net_if *get_iface(struct eth_nxp_s32_data *ctx) { return ctx->iface; } static void convert_phy_to_mac_config(Gmac_Ip_ConfigType *gmac_cfg, enum phy_link_speed phy_speed) { switch (phy_speed) { case LINK_HALF_10BASE_T: gmac_cfg->Speed = GMAC_SPEED_10M; gmac_cfg->Duplex = GMAC_HALF_DUPLEX; break; case LINK_FULL_10BASE_T: gmac_cfg->Speed = GMAC_SPEED_10M; gmac_cfg->Duplex = GMAC_FULL_DUPLEX; break; case LINK_HALF_100BASE_T: gmac_cfg->Speed = GMAC_SPEED_100M; gmac_cfg->Duplex = GMAC_HALF_DUPLEX; break; case LINK_FULL_100BASE_T: gmac_cfg->Speed = GMAC_SPEED_100M; gmac_cfg->Duplex = GMAC_FULL_DUPLEX; break; case LINK_HALF_1000BASE_T: gmac_cfg->Speed = GMAC_SPEED_1G; gmac_cfg->Duplex = GMAC_HALF_DUPLEX; break; case LINK_FULL_1000BASE_T: __fallthrough; default: gmac_cfg->Speed = GMAC_SPEED_1G; gmac_cfg->Duplex = GMAC_FULL_DUPLEX; break; } } static void phy_link_state_changed(const struct device *pdev, struct phy_link_state *state, void *user_data) { const struct device *dev = (struct device *)user_data; const struct eth_nxp_s32_config *cfg = dev->config; struct eth_nxp_s32_data *ctx = dev->data; Gmac_Ip_ConfigType gmac_cfg; ARG_UNUSED(pdev); if (state->is_up) { /* Porting phy link config to mac */ convert_phy_to_mac_config(&gmac_cfg, state->speed); /* Set MAC configuration */ Gmac_Ip_SetSpeed(cfg->instance, gmac_cfg.Speed); cfg->base->MAC_CONFIGURATION |= GMAC_MAC_CONFIGURATION_DM(gmac_cfg.Duplex); /* net iface should be down even if PHY link state is up * till the upper network layers have suspended the iface. */ if (ctx->if_suspended) { return; } LOG_DBG("Link up"); net_eth_carrier_on(ctx->iface); } else { LOG_DBG("Link down"); net_eth_carrier_off(ctx->iface); } } static const struct device *eth_nxp_s32_get_phy(const struct device *dev) { const struct eth_nxp_s32_config *cfg = dev->config; return cfg->phy_dev; } #if defined(CONFIG_SOC_SERIES_S32K3) static int select_phy_interface(Gmac_Ip_MiiModeType mode) { uint32_t regval; switch (mode) { case GMAC_MII_MODE: regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(0U); break; case GMAC_RMII_MODE: regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(2U); break; #if (FEATURE_GMAC_RGMII_EN == 1U) case GMAC_RGMII_MODE: regval = DCM_GPR_DCMRWF1_EMAC_CONF_SEL(1U); break; #endif default: return -EINVAL; } IP_DCM_GPR->DCMRWF1 = (IP_DCM_GPR->DCMRWF1 & ~DCM_GPR_DCMRWF1_EMAC_CONF_SEL_MASK) | regval; return 0; } #else #error "SoC not supported" #endif /* CONFIG_SOC_SERIES_S32K3 */ static int eth_nxp_s32_init(const struct device *dev) { const struct eth_nxp_s32_config *cfg = dev->config; struct eth_nxp_s32_data *ctx = dev->data; Gmac_Ip_StatusType mac_status; Clock_Ip_StatusType clk_status; int err; err = pinctrl_apply_state(cfg->pincfg, PINCTRL_STATE_DEFAULT); if (err != 0) { return err; } /* * Currently, clock control shim driver does not support configuring clock * muxes individually, so use the HAL directly. */ clk_status = Clock_Ip_Init(&Clock_Ip_aClockConfig[CONFIG_ETH_NXP_S32_CLOCK_CONFIG_IDX]); if (clk_status != CLOCK_IP_SUCCESS) { LOG_ERR("Failed to configure clocks (%d)", clk_status); return -EIO; } /* * PHY mode selection must be done before the controller is reset, * because the interface type is latched at controller's reset */ err = select_phy_interface(cfg->ctrl_cfg.Gmac_pCtrlConfig->MiiMode); if (err != 0) { LOG_ERR("Failed to select PHY interface (%d)", err); return -EIO; } mac_status = Gmac_Ip_Init(cfg->instance, &cfg->ctrl_cfg); if (mac_status != GMAC_STATUS_SUCCESS) { LOG_ERR("Failed to initialize GMAC%d (%d)", cfg->instance, mac_status); return -EIO; } k_mutex_init(&ctx->tx_mutex); k_sem_init(&ctx->rx_sem, 0, 1); k_sem_init(&ctx->tx_sem, 0, 1); k_thread_create(&ctx->rx_thread, ctx->rx_thread_stack, K_KERNEL_STACK_SIZEOF(ctx->rx_thread_stack), eth_nxp_s32_rx_thread, (void *)dev, NULL, NULL, K_PRIO_COOP(CONFIG_ETH_NXP_S32_RX_THREAD_PRIO), 0, K_NO_WAIT); k_thread_name_set(&ctx->rx_thread, "eth_nxp_s32_rx"); if (cfg->do_config != NULL) { cfg->do_config(); } return 0; } static int eth_nxp_s32_start(const struct device *dev) { const struct eth_nxp_s32_config *cfg = dev->config; struct eth_nxp_s32_data *ctx = dev->data; struct phy_link_state state; Gmac_Ip_EnableController(cfg->instance); irq_enable(cfg->rx_irq); irq_enable(cfg->tx_irq); /* If upper layers enable the net iface then mark it as * not suspended so that PHY Link changes can have the impact */ ctx->if_suspended = false; if (cfg->phy_dev) { phy_get_link_state(cfg->phy_dev, &state); /* Enable net_iface only when Ethernet PHY link is up or else * if net_iface is enabled when link is down and tx happens * in this state then the used tx buffers will never be recovered back. */ if (state.is_up == true) { net_eth_carrier_on(ctx->iface); } } else { net_eth_carrier_on(ctx->iface); } LOG_DBG("GMAC%d started", cfg->instance); return 0; } static int eth_nxp_s32_stop(const struct device *dev) { const struct eth_nxp_s32_config *cfg = dev->config; struct eth_nxp_s32_data *ctx = dev->data; Gmac_Ip_StatusType status; int err = 0; irq_disable(cfg->rx_irq); irq_disable(cfg->tx_irq); /* If upper layers disable the net iface then mark it as suspended * in order to save it from the PHY link state changes */ ctx->if_suspended = true; net_eth_carrier_off(ctx->iface); status = Gmac_Ip_DisableController(cfg->instance); if (status != GMAC_STATUS_SUCCESS) { LOG_ERR("Failed to disable controller GMAC%d (%d)", cfg->instance, status); err = -EIO; } LOG_DBG("GMAC%d stopped", cfg->instance); return err; } static void eth_nxp_s32_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); const struct eth_nxp_s32_config *cfg = dev->config; struct eth_nxp_s32_data *ctx = dev->data; if (ctx->iface == NULL) { ctx->iface = iface; } ethernet_init(iface); net_if_set_link_addr(iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET); LOG_INF("GMAC%d MAC address %02x:%02x:%02x:%02x:%02x:%02x", cfg->instance, ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2], ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]); /* Make sure that the net iface state is not suspended unless * upper layers explicitly stop the iface */ ctx->if_suspended = false; /* No PHY available, link is always up and MAC speed/duplex settings are fixed */ if (cfg->phy_dev == NULL) { net_if_carrier_on(iface); return; } /* * GMAC controls the PHY. If PHY is configured either as fixed * link or autoneg, the callback is executed at least once * immediately after setting it. */ if (!device_is_ready(cfg->phy_dev)) { LOG_ERR("PHY device (%p) is not ready, cannot init iface", cfg->phy_dev); return; } phy_link_callback_set(cfg->phy_dev, &phy_link_state_changed, (void *)dev); } static int eth_nxp_s32_tx(const struct device *dev, struct net_pkt *pkt) { struct eth_nxp_s32_data *ctx = dev->data; const struct eth_nxp_s32_config *cfg = dev->config; size_t pkt_len = net_pkt_get_len(pkt); int res = 0; Gmac_Ip_BufferType buf; Gmac_Ip_TxInfoType tx_info; Gmac_Ip_StatusType status; Gmac_Ip_TxOptionsType tx_options = { .NoInt = FALSE, .CrcPadIns = GMAC_CRC_AND_PAD_INSERTION, .ChecksumIns = GMAC_CHECKSUM_INSERTION_PROTO_PSEUDOH }; __ASSERT(pkt, "Packet pointer is NULL"); k_mutex_lock(&ctx->tx_mutex, K_FOREVER); k_sem_reset(&ctx->tx_sem); buf.Length = (uint16_t)pkt_len; buf.Data = NULL; status = Gmac_Ip_GetTxBuff(cfg->instance, cfg->tx_ring_idx, &buf, NULL); if (status != GMAC_STATUS_SUCCESS) { LOG_ERR("Failed to get tx buffer (%d)", status); res = -ENOBUFS; goto error; } res = net_pkt_read(pkt, buf.Data, pkt_len); if (res) { LOG_ERR("Failed to copy packet to tx buffer (%d)", res); res = -ENOBUFS; goto error; } buf.Length = (uint16_t)pkt_len; status = Gmac_Ip_SendFrame(cfg->instance, cfg->tx_ring_idx, &buf, &tx_options); if (status != GMAC_STATUS_SUCCESS) { LOG_ERR("Failed to tx frame (%d)", status); res = -EIO; goto error; } /* Wait for the transmission to complete */ if (k_sem_take(&ctx->tx_sem, ETH_NXP_S32_DMA_TX_TIMEOUT) != 0) { LOG_ERR("Timeout transmitting frame"); res = -EIO; goto error; } /* Restore the buffer address pointer and clear the descriptor after the status is read */ status = Gmac_Ip_GetTransmitStatus(cfg->instance, cfg->tx_ring_idx, &buf, &tx_info); if (status != GMAC_STATUS_SUCCESS) { LOG_ERR("Failed to restore tx buffer: %s (%d) ", (status == GMAC_STATUS_BUSY ? "busy" : "buf not found"), status); res = -EIO; } else if (tx_info.ErrMask != 0U) { LOG_ERR("Tx frame has errors (error mask 0x%X)", tx_info.ErrMask); res = -EIO; } error: k_mutex_unlock(&ctx->tx_mutex); if (res != 0) { eth_stats_update_errors_tx(ctx->iface); } return res; } static struct net_pkt *eth_nxp_s32_get_pkt(const struct device *dev, Gmac_Ip_BufferType *buf, Gmac_Ip_RxInfoType *rx_info) { struct eth_nxp_s32_data *ctx = dev->data; struct net_pkt *pkt = NULL; int res = 0; /* Using root iface, it will be updated in net_recv_data() */ pkt = net_pkt_rx_alloc_with_buffer(ctx->iface, rx_info->PktLen, AF_UNSPEC, 0, ETH_NXP_S32_BUF_TIMEOUT); if (!pkt) { LOG_ERR("Failed to allocate rx buffer of length %u", rx_info->PktLen); goto exit; } res = net_pkt_write(pkt, buf->Data, rx_info->PktLen); if (res) { LOG_ERR("Failed to write rx frame into pkt buffer (%d)", res); net_pkt_unref(pkt); pkt = NULL; goto exit; } exit: if (!pkt) { eth_stats_update_errors_rx(get_iface(ctx)); } return pkt; } static void eth_nxp_s32_rx(const struct device *dev) { struct eth_nxp_s32_data *ctx = dev->data; const struct eth_nxp_s32_config *cfg = dev->config; struct net_pkt *pkt; int res = 0; Gmac_Ip_RxInfoType rx_info = {0}; Gmac_Ip_BufferType buf; Gmac_Ip_StatusType status; status = Gmac_Ip_ReadFrame(cfg->instance, cfg->rx_ring_idx, &buf, &rx_info); if (rx_info.ErrMask != 0U) { Gmac_Ip_ProvideRxBuff(cfg->instance, cfg->rx_ring_idx, &buf); LOG_ERR("Rx frame has errors (error mask 0x%X)", rx_info.ErrMask); } else if (status == GMAC_STATUS_SUCCESS) { pkt = eth_nxp_s32_get_pkt(dev, &buf, &rx_info); Gmac_Ip_ProvideRxBuff(cfg->instance, cfg->rx_ring_idx, &buf); if (pkt != NULL) { res = net_recv_data(get_iface(ctx), pkt); if (res < 0) { eth_stats_update_errors_rx(get_iface(ctx)); net_pkt_unref(pkt); LOG_ERR("Failed to enqueue frame into rx queue (%d)", res); } } } } static void eth_nxp_s32_rx_thread(void *arg1, void *unused1, void *unused2) { const struct device *dev = (const struct device *)arg1; struct eth_nxp_s32_data *ctx = dev->data; const struct eth_nxp_s32_config *cfg = dev->config; int res; int work; ARG_UNUSED(unused1); ARG_UNUSED(unused2); __ASSERT_NO_MSG(arg1 != NULL); __ASSERT_NO_MSG(ctx != NULL); while (1) { res = k_sem_take(&ctx->rx_sem, K_FOREVER); __ASSERT_NO_MSG(res == 0); work = 0; while (Gmac_Ip_IsFrameAvailable(cfg->instance, cfg->rx_ring_idx)) { eth_nxp_s32_rx(dev); if (++work == CONFIG_ETH_NXP_S32_RX_BUDGET) { /* More work to do, reschedule */ work = 0; k_yield(); } } /* All work done, re-enable rx interrupt and exit polling */ irq_enable(cfg->rx_irq); /* In case a frame arrived after last eth_nxp_s32_rx() and before irq_enable() */ if (Gmac_Ip_IsFrameAvailable(cfg->instance, cfg->rx_ring_idx)) { eth_nxp_s32_rx(dev); } } } static int eth_nxp_s32_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct eth_nxp_s32_data *ctx = dev->data; const struct eth_nxp_s32_config *cfg = dev->config; int res = 0; uint32_t regval; ARG_UNUSED(cfg); ARG_UNUSED(regval); switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: /* Set new Ethernet MAC address and register it with the upper layer */ memcpy(ctx->mac_addr, config->mac_address.addr, sizeof(ctx->mac_addr)); Gmac_Ip_SetMacAddr(cfg->instance, (const uint8_t *)ctx->mac_addr); net_if_set_link_addr(ctx->iface, ctx->mac_addr, sizeof(ctx->mac_addr), NET_LINK_ETHERNET); LOG_INF("MAC set to: %02x:%02x:%02x:%02x:%02x:%02x", ctx->mac_addr[0], ctx->mac_addr[1], ctx->mac_addr[2], ctx->mac_addr[3], ctx->mac_addr[4], ctx->mac_addr[5]); break; #if defined(CONFIG_NET_PROMISCUOUS_MODE) case ETHERNET_CONFIG_TYPE_PROMISC_MODE: regval = cfg->base->MAC_PACKET_FILTER; if (config->promisc_mode && !(regval & GMAC_MAC_PACKET_FILTER_PR_MASK)) { cfg->base->MAC_PACKET_FILTER |= GMAC_MAC_PACKET_FILTER_PR_MASK; } else if (!config->promisc_mode && (regval & GMAC_MAC_PACKET_FILTER_PR_MASK)) { cfg->base->MAC_PACKET_FILTER &= ~GMAC_MAC_PACKET_FILTER_PR_MASK; } else { res = -EALREADY; } break; #endif #if defined(CONFIG_ETH_NXP_S32_MULTICAST_FILTER) case ETHERNET_HW_FILTERING: if (config->filter.set) { Gmac_Ip_AddDstAddrToHashFilter(cfg->instance, config->filter.mac_address.addr); } else { Gmac_Ip_RemoveDstAddrFromHashFilter(cfg->instance, config->filter.mac_address.addr); } break; #endif default: res = -ENOTSUP; break; } return res; } static enum ethernet_hw_caps eth_nxp_s32_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return (ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T #if (FEATURE_GMAC_RGMII_EN == 1U) | ETHERNET_LINK_1000BASE_T #endif | ETHERNET_DUPLEX_SET | ETHERNET_HW_TX_CHKSUM_OFFLOAD | ETHERNET_HW_RX_CHKSUM_OFFLOAD #if defined(CONFIG_NET_VLAN) | ETHERNET_HW_VLAN #endif #if defined(CONFIG_NET_PROMISCUOUS_MODE) | ETHERNET_PROMISC_MODE #endif #if defined(CONFIG_ETH_NXP_S32_MULTICAST_FILTER) | ETHERNET_HW_FILTERING #endif ); } static void eth_nxp_s32_tx_irq(const struct device *dev) { const struct eth_nxp_s32_config *cfg = dev->config; GMAC_TxIRQHandler(cfg->instance, cfg->tx_ring_idx); } static void eth_nxp_s32_rx_irq(const struct device *dev) { const struct eth_nxp_s32_config *cfg = dev->config; GMAC_RxIRQHandler(cfg->instance, cfg->rx_ring_idx); } static const struct ethernet_api eth_api = { .iface_api.init = eth_nxp_s32_iface_init, .get_capabilities = eth_nxp_s32_get_capabilities, .get_phy = eth_nxp_s32_get_phy, .start = eth_nxp_s32_start, .stop = eth_nxp_s32_stop, .send = eth_nxp_s32_tx, .set_config = eth_nxp_s32_set_config, }; BUILD_ASSERT(((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_RX_RING_LEN) % FEATURE_GMAC_MTL_RX_FIFO_BLOCK_SIZE) == 0, "CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_RX_RING_LEN " "must be multiple of RX FIFO block size"); BUILD_ASSERT(((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_TX_RING_LEN) % FEATURE_GMAC_MTL_TX_FIFO_BLOCK_SIZE) == 0, "CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE * CONFIG_ETH_NXP_S32_TX_RING_LEN " "must be multiple of TX FIFO block size"); BUILD_ASSERT((CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE % FEATURE_GMAC_DATA_BUS_WIDTH_BYTES) == 0, "CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE must be multiple of the data bus width"); BUILD_ASSERT((CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE % FEATURE_GMAC_DATA_BUS_WIDTH_BYTES) == 0, "CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE must be multiple of the data bus width"); #define ETH_NXP_S32_MAC_MII(n) \ _CONCAT(_CONCAT(GMAC_, DT_INST_STRING_UPPER_TOKEN(n, phy_connection_type)), _MODE) #define ETH_NXP_S32_IRQ_INIT(n, name) \ IRQ_CONNECT(DT_INST_IRQ_BY_NAME(n, name, irq), \ DT_INST_IRQ_BY_NAME(n, name, priority), \ eth_nxp_s32_##name##_irq, \ DEVICE_DT_INST_GET(n), \ COND_CODE_1(DT_INST_IRQ_HAS_CELL(n, flags), \ (DT_INST_IRQ_BY_NAME(n, name, flags)), (0))); #define ETH_NXP_S32_INIT_CONFIG(n) \ static void eth_nxp_s32_init_config_##n(void) \ { \ const struct device *dev = DEVICE_DT_INST_GET(n); \ struct eth_nxp_s32_data *ctx = dev->data; \ const struct eth_nxp_s32_config *cfg = dev->config; \ \ ETH_NXP_S32_IRQ_INIT(n, tx); \ ETH_NXP_S32_IRQ_INIT(n, rx); \ \ COND_CODE_1(DT_INST_PROP(n, zephyr_random_mac_address), ( \ gen_random_mac(ctx->mac_addr, FREESCALE_OUI_B0, \ FREESCALE_OUI_B1, FREESCALE_OUI_B2); \ Gmac_Ip_SetMacAddr(cfg->instance, ctx->mac_addr); \ ), ( \ Gmac_Ip_GetMacAddr(cfg->instance, ctx->mac_addr); \ )) \ } #define ETH_NXP_S32_RX_CALLBACK(n) \ static void eth_nxp_s32_rx_callback_##n(uint8_t inst, uint8_t chan) \ { \ const struct device *dev = DEVICE_DT_INST_GET(n); \ struct eth_nxp_s32_data *ctx = dev->data; \ const struct eth_nxp_s32_config *cfg = dev->config; \ \ ARG_UNUSED(inst); \ ARG_UNUSED(chan); \ \ /* Rx irq will be re-enabled from Rx thread */ \ irq_disable(cfg->rx_irq); \ k_sem_give(&ctx->rx_sem); \ } #define ETH_NXP_S32_TX_CALLBACK(n) \ static void eth_nxp_s32_tx_callback_##n(uint8_t inst, uint8_t chan) \ { \ const struct device *dev = DEVICE_DT_INST_GET(n); \ struct eth_nxp_s32_data *ctx = dev->data; \ \ ARG_UNUSED(inst); \ ARG_UNUSED(chan); \ \ k_sem_give(&ctx->tx_sem); \ } #define _ETH_NXP_S32_RING(n, name, len, buf_size) \ static Gmac_Ip_BufferDescriptorType eth_nxp_s32_##name##ring_desc_##n[len] \ __nocache __aligned(FEATURE_GMAC_BUFFDESCR_ALIGNMENT_BYTES); \ static uint8_t eth_nxp_s32_##name##ring_buf_##n[len * buf_size] \ __nocache __aligned(FEATURE_GMAC_BUFF_ALIGNMENT_BYTES) #define ETH_NXP_S32_RX_RING(n) \ _ETH_NXP_S32_RING(n, rx, \ CONFIG_ETH_NXP_S32_RX_RING_LEN, \ CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE) #define ETH_NXP_S32_TX_RING(n) \ _ETH_NXP_S32_RING(n, tx, \ CONFIG_ETH_NXP_S32_TX_RING_LEN, \ CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE) #define ETH_NXP_S32_MAC_TXTIMESHAPER_CONFIG(n) \ static const Gmac_Ip_TxTimeAwareShaper eth_nxp_s32_mac_txtimeshaper_config_##n = {\ .GateControlList = NULL, \ } #define ETH_NXP_S32_MAC_RXRING_CONFIG(n) \ static const Gmac_Ip_RxRingConfigType eth_nxp_s32_mac_rxring_config_##n = { \ .RingDesc = eth_nxp_s32_rxring_desc_##n, \ .Callback = eth_nxp_s32_rx_callback_##n, \ .Buffer = eth_nxp_s32_rxring_buf_##n, \ .Interrupts = (uint32_t)GMAC_CH_INTERRUPT_RI, \ .BufferLen = CONFIG_ETH_NXP_S32_RX_RING_BUF_SIZE, \ .RingSize = CONFIG_ETH_NXP_S32_RX_RING_LEN, \ .PriorityMask = 0U, \ .DmaBurstLength = 32U, \ } #define ETH_NXP_S32_MAC_TXRING_CONFIG(n) \ static const Gmac_Ip_TxRingConfigType eth_nxp_s32_mac_txring_config_##n = { \ .Weight = 0U, \ .IdleSlopeCredit = 0U, \ .SendSlopeCredit = 0U, \ .HiCredit = 0U, \ .LoCredit = 0, \ .RingDesc = eth_nxp_s32_txring_desc_##n, \ .Callback = eth_nxp_s32_tx_callback_##n, \ .Buffer = eth_nxp_s32_txring_buf_##n, \ .Interrupts = (uint32_t)GMAC_CH_INTERRUPT_TI, \ .BufferLen = CONFIG_ETH_NXP_S32_TX_RING_BUF_SIZE, \ .RingSize = CONFIG_ETH_NXP_S32_TX_RING_LEN, \ .PriorityMask = 0U, \ .DmaBurstLength = 32U, \ .QueueOpMode = GMAC_OP_MODE_DCB_GEN, \ } #define ETH_NXP_S32_MAC_PKT_FILTER(n) \ ((uint32_t)(0U \ COND_CODE_1(CONFIG_ETH_NXP_S32_MULTICAST_FILTER, \ (|GMAC_PKT_FILTER_HASH_MULTICAST), \ (|GMAC_PKT_FILTER_PASS_ALL_MULTICAST)) \ )) #define ETH_NXP_S32_MAC_CONF(n) \ ((uint32_t)(GMAC_MAC_CONFIG_CRC_STRIPPING \ | GMAC_MAC_CONFIG_AUTO_PAD \ | GMAC_MAC_CONFIG_CHECKSUM_OFFLOAD \ IF_ENABLED(CONFIG_ETH_NXP_S32_LOOPBACK, \ (|GMAC_MAC_CONFIG_LOOPBACK)) \ )) #define ETH_NXP_S32_MAC_CONFIG(n) \ static const Gmac_Ip_ConfigType eth_nxp_s32_mac_config_##n = { \ .RxRingCount = 1U, \ .TxRingCount = 1U, \ .Interrupts = 0U, \ .Callback = NULL, \ .TxSchedAlgo = GMAC_SCHED_ALGO_SP, \ .MiiMode = ETH_NXP_S32_MAC_MII(n), \ .Speed = GMAC_SPEED_100M, \ .Duplex = GMAC_FULL_DUPLEX, \ .MacConfig = ETH_NXP_S32_MAC_CONF(n), \ .MacPktFilterConfig = ETH_NXP_S32_MAC_PKT_FILTER(n), \ .EnableCtrl = false, \ } #define ETH_NXP_S32_MAC_ADDR(n) \ BUILD_ASSERT(DT_INST_PROP(n, zephyr_random_mac_address) || \ NODE_HAS_VALID_MAC_ADDR(DT_DRV_INST(n)), \ "eth_nxp_s32_gmac requires either a fixed or random MAC address"); \ static const uint8_t eth_nxp_s32_mac_addr_##n[ETH_NXP_S32_MAC_ADDR_LEN] = \ DT_INST_PROP_OR(n, local_mac_address, {0U}) #define ETH_NXP_S32_MAC_STATE(n) Gmac_Ip_StateType eth_nxp_s32_mac_state_##n #define ETH_NXP_S32_CTRL_CONFIG(n) \ { \ .Gmac_pCtrlState = &eth_nxp_s32_mac_state_##n, \ .Gmac_pCtrlConfig = &eth_nxp_s32_mac_config_##n, \ .Gmac_paCtrlRxRingConfig = &eth_nxp_s32_mac_rxring_config_##n, \ .Gmac_paCtrlTxRingConfig = &eth_nxp_s32_mac_txring_config_##n, \ .Gmac_pau8CtrlPhysAddr = &eth_nxp_s32_mac_addr_##n[0], \ .Gmac_pCtrlTxTimeAwareShaper = &eth_nxp_s32_mac_txtimeshaper_config_##n,\ } #define ETH_NXP_S32_HW_INSTANCE_CHECK(i, n) \ ((DT_INST_REG_ADDR(n) == IP_GMAC_##i##_BASE) ? i : 0) #define ETH_NXP_S32_HW_INSTANCE(n) \ LISTIFY(__DEBRACKET FEATURE_GMAC_NUM_INSTANCES, \ ETH_NXP_S32_HW_INSTANCE_CHECK, (|), n) #define ETH_NXP_S32_PHY_DEV(n) \ (COND_CODE_1(DT_INST_NODE_HAS_PROP(n, phy_handle), \ (DEVICE_DT_GET(DT_INST_PHANDLE(n, phy_handle))), NULL)) #define ETH_NXP_S32_DEVICE(n) \ ETH_NXP_S32_TX_CALLBACK(n) \ ETH_NXP_S32_RX_CALLBACK(n) \ ETH_NXP_S32_INIT_CONFIG(n) \ ETH_NXP_S32_RX_RING(n); \ ETH_NXP_S32_TX_RING(n); \ ETH_NXP_S32_MAC_STATE(n); \ ETH_NXP_S32_MAC_TXTIMESHAPER_CONFIG(n); \ ETH_NXP_S32_MAC_RXRING_CONFIG(n); \ ETH_NXP_S32_MAC_TXRING_CONFIG(n); \ ETH_NXP_S32_MAC_CONFIG(n); \ ETH_NXP_S32_MAC_ADDR(n); \ PINCTRL_DT_INST_DEFINE(n); \ \ static const struct eth_nxp_s32_config eth_nxp_s32_config_##n = { \ .instance = ETH_NXP_S32_HW_INSTANCE(n), \ .base = (GMAC_Type *)DT_INST_REG_ADDR(n), \ .ctrl_cfg = ETH_NXP_S32_CTRL_CONFIG(n), \ .do_config = eth_nxp_s32_init_config_##n, \ .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \ .phy_dev = ETH_NXP_S32_PHY_DEV(n), \ .rx_irq = DT_INST_IRQ_BY_NAME(n, rx, irq), \ .tx_irq = DT_INST_IRQ_BY_NAME(n, tx, irq), \ .tx_ring_idx = 0U, \ .rx_ring_idx = 0U, \ }; \ \ static struct eth_nxp_s32_data eth_nxp_s32_data_##n; \ \ ETH_NET_DEVICE_DT_INST_DEFINE(n, \ eth_nxp_s32_init, \ NULL, \ &eth_nxp_s32_data_##n, \ &eth_nxp_s32_config_##n, \ CONFIG_ETH_INIT_PRIORITY, \ &eth_api, \ NET_ETH_MTU); DT_INST_FOREACH_STATUS_OKAY(ETH_NXP_S32_DEVICE) ```
/content/code_sandbox/drivers/ethernet/eth_nxp_s32_gmac.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
7,782
```objective-c /* * Xilinx Processor System Gigabit Ethernet controller (GEM) driver * * Driver private data declarations * */ #ifndef _ZEPHYR_DRIVERS_ETHERNET_ETH_XLNX_GEM_PRIV_H_ #define _ZEPHYR_DRIVERS_ETHERNET_ETH_XLNX_GEM_PRIV_H_ #define DT_DRV_COMPAT xlnx_gem #include <zephyr/kernel.h> #include <zephyr/types.h> #include <zephyr/net/net_pkt.h> #include <zephyr/irq.h> #include "phy_xlnx_gem.h" #define ETH_XLNX_BUFFER_ALIGNMENT 4 /* RX/TX buffer alignment (in bytes) */ /* Buffer descriptor (BD) related defines */ /* Receive Buffer Descriptor bits & masks: comp. Zynq-7000 TRM, Table 16-2. */ /* * Receive Buffer Descriptor address word: * [31 .. 02] Mask for effective buffer address -> excludes [1..0] * [01] Wrap bit, last BD in RX BD ring * [00] BD used bit */ #define ETH_XLNX_GEM_RXBD_WRAP_BIT 0x00000002 #define ETH_XLNX_GEM_RXBD_USED_BIT 0x00000001 #define ETH_XLNX_GEM_RXBD_BUFFER_ADDR_MASK 0xFFFFFFFC /* * Receive Buffer Descriptor control word: * [31] Broadcast detected * [30] Multicast hash match detected * [29] Unicast hash match detected * [27] Specific address match detected * [26 .. 25] Bits indicating which specific address register was matched * [24] this bit has different semantics depending on whether RX checksum * offloading is enabled or not * [23 .. 22] These bits have different semantics depending on whether RX check- * sum offloading is enabled or not * [21] VLAN tag (type ID 0x8100) detected * [20] Priority tag: VLAN tag (type ID 0x8100) and null VLAN identifier * detected * [19 .. 17] VLAN priority * [16] Canonical format indicator bit * [15] End-of-frame bit * [14] Start-of-frame bit * [13] FCS status bit for FCS ignore mode * [12 .. 00] Data length of received frame */ #define ETH_XLNX_GEM_RXBD_BCAST_BIT 0x80000000 #define ETH_XLNX_GEM_RXBD_MCAST_HASH_MATCH_BIT 0x40000000 #define ETH_XLNX_GEM_RXBD_UCAST_HASH_MATCH_BIT 0x20000000 #define ETH_XLNX_GEM_RXBD_SPEC_ADDR_MATCH_BIT 0x08000000 #define ETH_XLNX_GEM_RXBD_SPEC_ADDR_MASK 0x00000003 #define ETH_XLNX_GEM_RXBD_SPEC_ADDR_SHIFT 25 #define ETH_XLNX_GEM_RXBD_BIT24 0x01000000 #define ETH_XLNX_GEM_RXBD_BITS23_22_MASK 0x00000003 #define ETH_XLNX_GEM_RXBD_BITS23_22_SHIFT 22 #define ETH_XLNX_GEM_RXBD_VLAN_TAG_DETECTED_BIT 0x00200000 #define ETH_XLNX_GEM_RXBD_PRIO_TAG_DETECTED_BIT 0x00100000 #define ETH_XLNX_GEM_RXBD_VLAN_PRIORITY_MASK 0x00000007 #define ETH_XLNX_GEM_RXBD_VLAN_PRIORITY_SHIFT 17 #define ETH_XLNX_GEM_RXBD_CFI_BIT 0x00010000 #define ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT 0x00008000 #define ETH_XLNX_GEM_RXBD_START_OF_FRAME_BIT 0x00004000 #define ETH_XLNX_GEM_RXBD_FCS_STATUS_BIT 0x00002000 #define ETH_XLNX_GEM_RXBD_FRAME_LENGTH_MASK 0x00001FFF /* Transmit Buffer Descriptor bits & masks: comp. Zynq-7000 TRM, Table 16-3. */ /* * Transmit Buffer Descriptor control word: * [31] BD used marker * [30] Wrap bit, last BD in TX BD ring * [29] Retry limit exceeded * [27] TX frame corruption due to AHB/AXI error, HRESP errors or buffers * exhausted mid-frame * [26] Late collision, TX error detected * [22 .. 20] Transmit IP/TCP/UDP checksum generation offload error bits * [16] No CRC appended by MAC * [15] Last buffer bit, indicates end of current TX frame * [13 .. 00] Data length in the BD's associated buffer */ #define ETH_XLNX_GEM_TXBD_USED_BIT 0x80000000 #define ETH_XLNX_GEM_TXBD_WRAP_BIT 0x40000000 #define ETH_XLNX_GEM_TXBD_RETRY_BIT 0x20000000 #define ETH_XLNX_GEM_TXBD_TX_FRAME_CORRUPT_BIT 0x08000000 #define ETH_XLNX_GEM_TXBD_LATE_COLLISION_BIT 0x04000000 #define ETH_XLNX_GEM_TXBD_CKSUM_OFFLOAD_ERROR_MASK 0x00000007 #define ETH_XLNX_GEM_TXBD_CKSUM_OFFLOAD_ERROR_SHIFT 20 #define ETH_XLNX_GEM_TXBD_NO_CRC_BIT 0x00010000 #define ETH_XLNX_GEM_TXBD_LAST_BIT 0x00008000 #define ETH_XLNX_GEM_TXBD_LEN_MASK 0x00003FFF #define ETH_XLNX_GEM_TXBD_ERR_MASK 0x3C000000 #define ETH_XLNX_GEM_CKSUM_NO_ERROR 0x00000000 #define ETH_XLNX_GEM_CKSUM_VLAN_HDR_ERROR 0x00000001 #define ETH_XLNX_GEM_CKSUM_SNAP_HDR_ERROR 0x00000002 #define ETH_XLNX_GEM_CKSUM_IP_TYPE_OR_LEN_ERROR 0x00000003 #define ETH_XLNX_GEM_CKSUM_NOT_VLAN_SNAP_IP_ERROR 0x00000004 #define ETH_XLNX_GEM_CKSUM_UNSUPP_PKT_FRAG_ERROR 0x00000005 #define ETH_XLNX_GEM_CKSUM_NOT_TCP_OR_UDP_ERROR 0x00000006 #define ETH_XLNX_GEM_CKSUM_PREMATURE_END_ERROR 0x00000007 #if defined(CONFIG_SOC_FAMILY_XILINX_ZYNQ7000) /* * Zynq-7000 TX clock configuration: * * GEMx_CLK_CTRL (SLCR) registers: * [25 .. 20] Reference clock divisor 1 * [13 .. 08] Reference clock divisor 0 * [00] Clock active bit */ #define ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK 0x0000003F #define ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR1_SHIFT 20 #define ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR0_SHIFT 8 #define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_CLKACT_BIT 0x02000000 #elif defined(CONFIG_SOC_XILINX_ZYNQMP) /* * UltraScale TX clock configuration: comp. * path_to_url * * CRL_WPROT (CRL_APB) register: * [00] CRL APB register space write protection bit * * GEMx_REF_CTRL (CRL_APB) registers: * [30] RX channel clock active bit * [29] Clock active bit * [21 .. 16] Reference clock divisor 1 * [13 .. 08] Reference clock divisor 0 */ #define ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS 0xFF5E001C #define ETH_XLNX_CRL_APB_WPROT_BIT 0x00000001 #define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK 0x0000003F #define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT 16 #define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT 8 #define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_RX_CLKACT_BIT 0x04000000 #define ETH_XLNX_CRL_APB_GEMX_REF_CTRL_CLKACT_BIT 0x02000000 #endif /* CONFIG_SOC_FAMILY_XILINX_ZYNQ7000 || CONFIG_SOC_XILINX_ZYNQMP */ /* * Register offsets within the respective GEM's address space: * NWCTRL = gem.net_ctrl Network Control register * NWCFG = gem.net_cfg Network Configuration register * NWSR = gem.net_status Network Status register * DMACR = gem.dma_cfg DMA Control register * TXSR = gem.tx_status TX Status register * RXQBASE = gem.rx_qbar RXQ base address register * TXQBASE = gem.tx_qbar TXQ base address register * RXSR = gem.rx_status RX Status register * ISR = gem.intr_status Interrupt status register * IER = gem.intr_en Interrupt enable register * IDR = gem.intr_dis Interrupt disable register * IMR = gem.intr_mask Interrupt mask register * PHYMNTNC = gem.phy_maint PHY maintenance register * LADDR1L = gem.spec_addr1_bot Specific address 1 bottom register * LADDR1H = gem.spec_addr1_top Specific address 1 top register * LADDR2L = gem.spec_addr2_bot Specific address 2 bottom register * LADDR2H = gem.spec_addr2_top Specific address 2 top register * LADDR3L = gem.spec_addr3_bot Specific address 3 bottom register * LADDR3H = gem.spec_addr3_top Specific address 3 top register * LADDR4L = gem.spec_addr4_bot Specific address 4 bottom register * LADDR4H = gem.spec_addr4_top Specific address 4 top register */ #define ETH_XLNX_GEM_NWCTRL_OFFSET 0x00000000 #define ETH_XLNX_GEM_NWCFG_OFFSET 0x00000004 #define ETH_XLNX_GEM_NWSR_OFFSET 0x00000008 #define ETH_XLNX_GEM_DMACR_OFFSET 0x00000010 #define ETH_XLNX_GEM_TXSR_OFFSET 0x00000014 #define ETH_XLNX_GEM_RXQBASE_OFFSET 0x00000018 #define ETH_XLNX_GEM_TXQBASE_OFFSET 0x0000001C #define ETH_XLNX_GEM_RXSR_OFFSET 0x00000020 #define ETH_XLNX_GEM_ISR_OFFSET 0x00000024 #define ETH_XLNX_GEM_IER_OFFSET 0x00000028 #define ETH_XLNX_GEM_IDR_OFFSET 0x0000002C #define ETH_XLNX_GEM_IMR_OFFSET 0x00000030 #define ETH_XLNX_GEM_PHY_MAINTENANCE_OFFSET 0x00000034 #define ETH_XLNX_GEM_LADDR1L_OFFSET 0x00000088 #define ETH_XLNX_GEM_LADDR1H_OFFSET 0x0000008C #define ETH_XLNX_GEM_LADDR2L_OFFSET 0x00000090 #define ETH_XLNX_GEM_LADDR2H_OFFSET 0x00000094 #define ETH_XLNX_GEM_LADDR3L_OFFSET 0x00000098 #define ETH_XLNX_GEM_LADDR3H_OFFSET 0x0000009C #define ETH_XLNX_GEM_LADDR4L_OFFSET 0x000000A0 #define ETH_XLNX_GEM_LADDR4H_OFFSET 0x000000A4 /* * Masks for clearing registers during initialization: * gem.net_ctrl [clear_stat_regs] * gem.tx_status [7..0] * gem.rx_status [3..0] * gem.intr_dis [26..0] */ #define ETH_XLNX_GEM_STATCLR_MASK 0x00000020 #define ETH_XLNX_GEM_TXSRCLR_MASK 0x000000FF #define ETH_XLNX_GEM_RXSRCLR_MASK 0x0000000F #define ETH_XLNX_GEM_IDRCLR_MASK 0x07FFFFFF /* (Shift) masks for individual registers' bits / bitfields */ /* * gem.net_ctrl: * [15] Store 1588 receive timestamp in CRC field * [12] Transmit zero quantum pause frame * [11] Transmit pause frame * [10] Halt transmission after current frame * [09] Start transmission (tx_go) * [07] Enable writing to statistics counters * [06] Increment statistics registers - for testing purposes only * [05] Clear statistics registers * [04] Enable MDIO port * [03] Enable transmit * [02] Enable receive * [01] Local loopback mode */ #define ETH_XLNX_GEM_NWCTRL_RXTSTAMP_BIT 0x00008000 #define ETH_XLNX_GEM_NWCTRL_ZEROPAUSETX_BIT 0x00001000 #define ETH_XLNX_GEM_NWCTRL_PAUSETX_BIT 0x00000800 #define ETH_XLNX_GEM_NWCTRL_HALTTX_BIT 0x00000400 #define ETH_XLNX_GEM_NWCTRL_STARTTX_BIT 0x00000200 #define ETH_XLNX_GEM_NWCTRL_STATWEN_BIT 0x00000080 #define ETH_XLNX_GEM_NWCTRL_STATINC_BIT 0x00000040 #define ETH_XLNX_GEM_NWCTRL_STATCLR_BIT 0x00000020 #define ETH_XLNX_GEM_NWCTRL_MDEN_BIT 0x00000010 #define ETH_XLNX_GEM_NWCTRL_TXEN_BIT 0x00000008 #define ETH_XLNX_GEM_NWCTRL_RXEN_BIT 0x00000004 #define ETH_XLNX_GEM_NWCTRL_LOOPEN_BIT 0x00000002 /* * gem.net_cfg: * [30] Ignore IPG RX Error * [29] Disable rejection of non-standard preamble * [28] Enable IPG stretch * [27] Enable SGMII mode * [26] Disable rejection of frames with FCS errors * [25] Enable frames to be received in HDX mode while transmitting * [24] Enable RX checksum offload to hardware * [23] Do not copy pause frames to memory * [22 .. 21] Data bus width * [20 .. 18] MDC clock division setting * [17] Discard FCS from received frames * [16] RX length field error frame discard enable * [15 .. 14] Receive buffer offset, # of bytes * [13] Enable pause TX upon 802.3 pause frame reception * [12] Retry test - for testing purposes only * [11] Use TBI instead of the GMII/MII interface * [10] Gigabit mode enable * [09] External address match enable * [08] Enable 1536 byte frames reception * [07] Receive unicast hash frames enable * [06] Receive multicast hash frames enable * [05] Disable broadcast frame reception * [04] Copy all frames = promiscuous mode * [02] Discard non-VLAN frames enable * [01] Full duplex enable * [00] Speed selection: 1 = 100Mbit/s, 0 = 10 Mbit/s, GBE mode is * set separately in bit [10] */ #define ETH_XLNX_GEM_NWCFG_IGNIPGRXERR_BIT 0x40000000 #define ETH_XLNX_GEM_NWCFG_BADPREAMBEN_BIT 0x20000000 #define ETH_XLNX_GEM_NWCFG_IPG_STRETCH_BIT 0x10000000 #define ETH_XLNX_GEM_NWCFG_SGMIIEN_BIT 0x08000000 #define ETH_XLNX_GEM_NWCFG_FCSIGNORE_BIT 0x04000000 #define ETH_XLNX_GEM_NWCFG_HDRXEN_BIT 0x02000000 #define ETH_XLNX_GEM_NWCFG_RXCHKSUMEN_BIT 0x01000000 #define ETH_XLNX_GEM_NWCFG_PAUSECOPYDI_BIT 0x00800000 #define ETH_XLNX_GEM_NWCFG_DBUSW_MASK 0x3 #define ETH_XLNX_GEM_NWCFG_DBUSW_SHIFT 21 #define ETH_XLNX_GEM_NWCFG_MDC_MASK 0x7 #define ETH_XLNX_GEM_NWCFG_MDC_SHIFT 18 #define ETH_XLNX_GEM_NWCFG_MDCCLKDIV_MASK 0x001C0000 #define ETH_XLNX_GEM_NWCFG_FCSREM_BIT 0x00020000 #define ETH_XLNX_GEM_NWCFG_LENGTHERRDSCRD_BIT 0x00010000 #define ETH_XLNX_GEM_NWCFG_RXOFFS_MASK 0x00000003 #define ETH_XLNX_GEM_NWCFG_RXOFFS_SHIFT 14 #define ETH_XLNX_GEM_NWCFG_PAUSEEN_BIT 0x00002000 #define ETH_XLNX_GEM_NWCFG_RETRYTESTEN_BIT 0x00001000 #define ETH_XLNX_GEM_NWCFG_TBIINSTEAD_BIT 0x00000800 #define ETH_XLNX_GEM_NWCFG_1000_BIT 0x00000400 #define ETH_XLNX_GEM_NWCFG_EXTADDRMATCHEN_BIT 0x00000200 #define ETH_XLNX_GEM_NWCFG_1536RXEN_BIT 0x00000100 #define ETH_XLNX_GEM_NWCFG_UCASTHASHEN_BIT 0x00000080 #define ETH_XLNX_GEM_NWCFG_MCASTHASHEN_BIT 0x00000040 #define ETH_XLNX_GEM_NWCFG_BCASTDIS_BIT 0x00000020 #define ETH_XLNX_GEM_NWCFG_COPYALLEN_BIT 0x00000010 #define ETH_XLNX_GEM_NWCFG_NVLANDISC_BIT 0x00000004 #define ETH_XLNX_GEM_NWCFG_FDEN_BIT 0x00000002 #define ETH_XLNX_GEM_NWCFG_100_BIT 0x00000001 /* * gem.dma_cfg: * [24] Discard packets when AHB resource is unavailable * [23 .. 16] RX buffer size, n * 64 bytes * [11] Enable/disable TCP|UDP/IP TX checksum offload * [10] TX buffer half/full memory size * [09 .. 08] Receiver packet buffer memory size select * [07] Endianness configuration * [06] Descriptor access endianness configuration * [04 .. 00] AHB fixed burst length for DMA data operations */ #define ETH_XLNX_GEM_DMACR_DISCNOAHB_BIT 0x01000000 #define ETH_XLNX_GEM_DMACR_RX_BUF_MASK 0x000000FF #define ETH_XLNX_GEM_DMACR_RX_BUF_SHIFT 16 #define ETH_XLNX_GEM_DMACR_TCP_CHKSUM_BIT 0x00000800 #define ETH_XLNX_GEM_DMACR_TX_SIZE_BIT 0x00000400 #define ETH_XLNX_GEM_DMACR_RX_SIZE_MASK 0x00000300 #define ETH_XLNX_GEM_DMACR_RX_SIZE_SHIFT 8 #define ETH_XLNX_GEM_DMACR_ENDIAN_BIT 0x00000080 #define ETH_XLNX_GEM_DMACR_DESCR_ENDIAN_BIT 0x00000040 #define ETH_XLNX_GEM_DMACR_AHB_BURST_LENGTH_MASK 0x0000001F /* * gem.intr_* interrupt status/enable/disable bits: * [25] PTP pdelay_resp frame transmitted * [24] PTP pdelay_req frame transmitted * [23] PTP pdelay_resp frame received * [22] PTP delay_req frame received * [21] PTP sync frame transmitted * [20] PTP delay_req frame transmitted * [19] PTP sync frame received * [18] PTP delay_req frame received * [17] PCS link partner page mask * [16] Auto-negotiation completed * [15] External interrupt * [14] Pause frame transmitted * [13] Pause time has reached zero * [12] Pause frame received with non-zero pause quantum * [11] hresp not OK * [10] Receive overrun * [09] Link change * [07] Transmit complete * [06] Transmit frame corruption due to AHB/AXI error * [05] Retry limit exceeded or late collision * [04] Transmit buffer underrun * [03] Set 'used' bit in TX BD encountered * [02] Set 'used' bit in RX BD encountered * [01] Frame received * [00] PHY management done */ #define ETH_XLNX_GEM_IXR_PTPPSTX_BIT 0x02000000 #define ETH_XLNX_GEM_IXR_PTPPDRTX_BIT 0x01000000 #define ETH_XLNX_GEM_IXR_PTPSTX_BIT 0x00800000 #define ETH_XLNX_GEM_IXR_PTPDRTX_BIT 0x00400000 #define ETH_XLNX_GEM_IXR_PTPPSRX_BIT 0x00200000 #define ETH_XLNX_GEM_IXR_PTPPDRRX_BIT 0x00100000 #define ETH_XLNX_GEM_IXR_PTPSRX_BIT 0x00080000 #define ETH_XLNX_GEM_IXR_PTPDRRX_BIT 0x00040000 #define ETH_XLNX_GEM_IXR_PARTNER_PGRX_BIT 0x00020000 #define ETH_XLNX_GEM_IXR_AUTONEG_COMPLETE_BIT 0x00010000 #define ETH_XLNX_GEM_IXR_EXTERNAL_INT_BIT 0x00008000 #define ETH_XLNX_GEM_IXR_PAUSE_TX_BIT 0x00004000 #define ETH_XLNX_GEM_IXR_PAUSE_ZERO_BIT 0x00002000 #define ETH_XLNX_GEM_IXR_PAUSE_NONZERO_BIT 0x00001000 #define ETH_XLNX_GEM_IXR_HRESP_NOT_OK_BIT 0x00000800 #define ETH_XLNX_GEM_IXR_RX_OVERRUN_BIT 0x00000400 #define ETH_XLNX_GEM_IXR_LINK_CHANGE 0x00000200 #define ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT 0x00000080 #define ETH_XLNX_GEM_IXR_TX_CORRUPT_BIT 0x00000040 #define ETH_XLNX_GEM_IXR_RETRY_LIMIT_OR_LATE_COLL_BIT 0x00000020 #define ETH_XLNX_GEM_IXR_TX_UNDERRUN_BIT 0x00000010 #define ETH_XLNX_GEM_IXR_TX_USED_BIT 0x00000008 #define ETH_XLNX_GEM_IXR_RX_USED_BIT 0x00000004 #define ETH_XLNX_GEM_IXR_FRAME_RX_BIT 0x00000002 #define ETH_XLNX_GEM_IXR_PHY_MGMNT_BIT 0x00000001 #define ETH_XLNX_GEM_IXR_ALL_MASK 0x03FC7FFE #define ETH_XLNX_GEM_IXR_ERRORS_MASK 0x00000C60 /* Bits / bit masks relating to the GEM's MDIO interface */ /* * gem.net_status: * [02] PHY management idle bit * [01] MDIO input status */ #define ETH_XLNX_GEM_MDIO_IDLE_BIT 0x00000004 #define ETH_XLNX_GEM_MDIO_IN_STATUS_BIT 0x00000002 /* * gem.phy_maint: * [31 .. 30] constant values * [17 .. 16] constant values * [29] Read operation control bit * [28] Write operation control bit * [27 .. 23] PHY address * [22 .. 18] Register address * [15 .. 00] 16-bit data word */ #define ETH_XLNX_GEM_PHY_MAINT_CONST_BITS 0x40020000 #define ETH_XLNX_GEM_PHY_MAINT_READ_OP_BIT 0x20000000 #define ETH_XLNX_GEM_PHY_MAINT_WRITE_OP_BIT 0x10000000 #define ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_MASK 0x0000001F #define ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_SHIFT 23 #define ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_MASK 0x0000001F #define ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_SHIFT 18 #define ETH_XLNX_GEM_PHY_MAINT_DATA_MASK 0x0000FFFF /* Device initialization macro */ #define ETH_XLNX_GEM_NET_DEV_INIT(port) \ ETH_NET_DEVICE_DT_INST_DEFINE(port,\ eth_xlnx_gem_dev_init,\ NULL,\ &eth_xlnx_gem##port##_dev_data,\ &eth_xlnx_gem##port##_dev_cfg,\ CONFIG_ETH_INIT_PRIORITY,\ &eth_xlnx_gem_apis,\ NET_ETH_MTU); /* Device configuration data declaration macro */ #define ETH_XLNX_GEM_DEV_CONFIG(port) \ static const struct eth_xlnx_gem_dev_cfg eth_xlnx_gem##port##_dev_cfg = {\ .base_addr = DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 0),\ .config_func = eth_xlnx_gem##port##_irq_config,\ .pll_clock_frequency = DT_INST_PROP(port, clock_frequency),\ .clk_ctrl_reg_address = DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 1),\ .mdc_divider = (enum eth_xlnx_mdc_clock_divider)\ (DT_INST_PROP(port, mdc_divider)),\ .max_link_speed = (enum eth_xlnx_link_speed)\ (DT_INST_PROP(port, link_speed)),\ .init_phy = DT_INST_PROP(port, init_mdio_phy),\ .phy_mdio_addr_fix = DT_INST_PROP(port, mdio_phy_address),\ .phy_advertise_lower = DT_INST_PROP(port, advertise_lower_link_speeds),\ .phy_poll_interval = DT_INST_PROP(port, phy_poll_interval),\ .defer_rxp_to_queue = !DT_INST_PROP(port, handle_rx_in_isr),\ .defer_txd_to_queue = DT_INST_PROP(port, handle_tx_in_workq),\ .amba_dbus_width = (enum eth_xlnx_amba_dbus_width)\ (DT_INST_PROP(port, amba_ahb_dbus_width)),\ .ahb_burst_length = (enum eth_xlnx_ahb_burst_length)\ (DT_INST_PROP(port, amba_ahb_burst_length)),\ .hw_rx_buffer_size = (enum eth_xlnx_hwrx_buffer_size)\ (DT_INST_PROP(port, hw_rx_buffer_size)),\ .hw_rx_buffer_offset = (uint8_t)\ (DT_INST_PROP(port, hw_rx_buffer_offset)),\ .rxbd_count = (uint8_t)\ (DT_INST_PROP(port, rx_buffer_descriptors)),\ .txbd_count = (uint8_t)\ (DT_INST_PROP(port, tx_buffer_descriptors)),\ .rx_buffer_size = (((uint16_t)(DT_INST_PROP(port, rx_buffer_size)) +\ (ETH_XLNX_BUFFER_ALIGNMENT-1)) & ~(ETH_XLNX_BUFFER_ALIGNMENT-1)),\ .tx_buffer_size = (((uint16_t)(DT_INST_PROP(port, tx_buffer_size)) +\ (ETH_XLNX_BUFFER_ALIGNMENT-1)) & ~(ETH_XLNX_BUFFER_ALIGNMENT-1)),\ .ignore_ipg_rxer = DT_INST_PROP(port, ignore_ipg_rxer),\ .disable_reject_nsp = DT_INST_PROP(port, disable_reject_nsp),\ .enable_ipg_stretch = DT_INST_PROP(port, ipg_stretch),\ .enable_sgmii_mode = DT_INST_PROP(port, sgmii_mode),\ .disable_reject_fcs_crc_errors = DT_INST_PROP(port, disable_reject_fcs_crc_errors),\ .enable_rx_halfdup_while_tx = DT_INST_PROP(port, rx_halfdup_while_tx),\ .enable_rx_chksum_offload = DT_INST_PROP(port, rx_checksum_offload),\ .disable_pause_copy = DT_INST_PROP(port, disable_pause_copy),\ .discard_rx_fcs = DT_INST_PROP(port, discard_rx_fcs),\ .discard_rx_length_errors = DT_INST_PROP(port, discard_rx_length_errors),\ .enable_pause = DT_INST_PROP(port, pause_frame),\ .enable_tbi = DT_INST_PROP(port, tbi),\ .ext_addr_match = DT_INST_PROP(port, ext_address_match),\ .enable_1536_frames = DT_INST_PROP(port, long_frame_rx_support),\ .enable_ucast_hash = DT_INST_PROP(port, unicast_hash),\ .enable_mcast_hash = DT_INST_PROP(port, multicast_hash),\ .disable_bcast = DT_INST_PROP(port, reject_broadcast),\ .copy_all_frames = DT_INST_PROP(port, promiscuous_mode),\ .discard_non_vlan = DT_INST_PROP(port, discard_non_vlan),\ .enable_fdx = DT_INST_PROP(port, full_duplex),\ .disc_rx_ahb_unavail = DT_INST_PROP(port, discard_rx_frame_ahb_unavail),\ .enable_tx_chksum_offload = DT_INST_PROP(port, tx_checksum_offload),\ .tx_buffer_size_full = DT_INST_PROP(port, hw_tx_buffer_size_full),\ .enable_ahb_packet_endian_swap = DT_INST_PROP(port, ahb_packet_endian_swap),\ .enable_ahb_md_endian_swap = DT_INST_PROP(port, ahb_md_endian_swap)\ }; /* Device run-time data declaration macro */ #define ETH_XLNX_GEM_DEV_DATA(port) \ static struct eth_xlnx_gem_dev_data eth_xlnx_gem##port##_dev_data = {\ .mac_addr = DT_INST_PROP(port, local_mac_address),\ .started = 0,\ .eff_link_speed = LINK_DOWN,\ .phy_addr = 0,\ .phy_id = 0,\ .phy_access_api = NULL,\ .first_rx_buffer = NULL,\ .first_tx_buffer = NULL\ }; /* DMA memory area declaration macro */ #define ETH_XLNX_GEM_DMA_AREA_DECL(port) \ struct eth_xlnx_dma_area_gem##port {\ struct eth_xlnx_gem_bd rx_bd[DT_INST_PROP(port, rx_buffer_descriptors)];\ struct eth_xlnx_gem_bd tx_bd[DT_INST_PROP(port, tx_buffer_descriptors)];\ uint8_t rx_buffer\ [DT_INST_PROP(port, rx_buffer_descriptors)]\ [((DT_INST_PROP(port, rx_buffer_size)\ + (ETH_XLNX_BUFFER_ALIGNMENT - 1))\ & ~(ETH_XLNX_BUFFER_ALIGNMENT - 1))];\ uint8_t tx_buffer\ [DT_INST_PROP(port, tx_buffer_descriptors)]\ [((DT_INST_PROP(port, tx_buffer_size)\ + (ETH_XLNX_BUFFER_ALIGNMENT - 1))\ & ~(ETH_XLNX_BUFFER_ALIGNMENT - 1))];\ }; /* DMA memory area instantiation macro */ #define ETH_XLNX_GEM_DMA_AREA_INST(port) \ static struct eth_xlnx_dma_area_gem##port eth_xlnx_gem##port##_dma_area\ __ocm_bss_section __aligned(4096); /* Interrupt configuration function macro */ #define ETH_XLNX_GEM_CONFIG_IRQ_FUNC(port) \ static void eth_xlnx_gem##port##_irq_config(const struct device *dev)\ {\ ARG_UNUSED(dev);\ IRQ_CONNECT(DT_INST_IRQN(port), DT_INST_IRQ(port, priority),\ eth_xlnx_gem_isr, DEVICE_DT_INST_GET(port), 0);\ irq_enable(DT_INST_IRQN(port));\ } /* RX/TX BD Ring initialization macro */ #define ETH_XLNX_GEM_INIT_BD_RING(port) \ if (dev_conf->base_addr == DT_REG_ADDR_BY_IDX(DT_INST(port, xlnx_gem), 0)) {\ dev_data->rxbd_ring.first_bd = &(eth_xlnx_gem##port##_dma_area.rx_bd[0]);\ dev_data->txbd_ring.first_bd = &(eth_xlnx_gem##port##_dma_area.tx_bd[0]);\ dev_data->first_rx_buffer = (uint8_t *)eth_xlnx_gem##port##_dma_area.rx_buffer;\ dev_data->first_tx_buffer = (uint8_t *)eth_xlnx_gem##port##_dma_area.tx_buffer;\ } /* Top-level device initialization macro - bundles all of the above */ #define ETH_XLNX_GEM_INITIALIZE(port) \ ETH_XLNX_GEM_CONFIG_IRQ_FUNC(port);\ ETH_XLNX_GEM_DEV_CONFIG(port);\ ETH_XLNX_GEM_DEV_DATA(port);\ ETH_XLNX_GEM_DMA_AREA_DECL(port);\ ETH_XLNX_GEM_DMA_AREA_INST(port);\ ETH_XLNX_GEM_NET_DEV_INIT(port);\ /* IRQ handler function type */ typedef void (*eth_xlnx_gem_config_irq_t)(const struct device *dev); /* Enums for bitfields representing configuration settings */ /** * @brief Link speed configuration enumeration type. * * Enumeration type for link speed indication, contains 'link down' * plus all link speeds supported by the controller (10/100/1000). */ enum eth_xlnx_link_speed { /* The values of this enum are consecutively numbered */ LINK_DOWN = 0, LINK_10MBIT, LINK_100MBIT, LINK_1GBIT }; /** * @brief AMBA AHB data bus width configuration enumeration type. * * Enumeration type containing the supported width options for the * AMBA AHB data bus. This is a configuration item in the controller's * net_cfg register. */ enum eth_xlnx_amba_dbus_width { /* The values of this enum are consecutively numbered */ AMBA_AHB_DBUS_WIDTH_32BIT = 0, AMBA_AHB_DBUS_WIDTH_64BIT, AMBA_AHB_DBUS_WIDTH_128BIT }; /** * @brief MDC clock divider configuration enumeration type. * * Enumeration type containing the supported clock divider values * used to generate the MDIO interface clock (MDC) from either the * cpu_1x clock (Zynq-7000) or the LPD LSBUS clock (UltraScale). * This is a configuration item in the controller's net_cfg register. */ enum eth_xlnx_mdc_clock_divider { /* The values of this enum are consecutively numbered */ MDC_DIVIDER_8 = 0, MDC_DIVIDER_16, MDC_DIVIDER_32, MDC_DIVIDER_48, #ifdef CONFIG_SOC_FAMILY_XILINX_ZYNQ7000 /* Dividers > 48 are only available in the Zynq-7000 */ MDC_DIVIDER_64, MDC_DIVIDER_96, MDC_DIVIDER_128, MDC_DIVIDER_224 #endif }; /** * @brief DMA RX buffer size configuration enumeration type. * * Enumeration type containing the supported size options for the * DMA receive buffer size in AHB system memory. This is a configuration * item in the controller's dma_cfg register. */ enum eth_xlnx_hwrx_buffer_size { /* The values of this enum are consecutively numbered */ HWRX_BUFFER_SIZE_1KB = 0, HWRX_BUFFER_SIZE_2KB, HWRX_BUFFER_SIZE_4KB, HWRX_BUFFER_SIZE_8KB }; /** * @brief AHB burst length configuration enumeration type. * * Enumeration type containing the supported burst length options * for the AHB fixed burst length for DMA data operations. This is a * configuration item in the controller's dma_cfg register. */ enum eth_xlnx_ahb_burst_length { /* The values of this enum are one-hot encoded */ AHB_BURST_SINGLE = 1, /* 2 = also AHB_BURST_SINGLE */ AHB_BURST_INCR4 = 4, AHB_BURST_INCR8 = 8, AHB_BURST_INCR16 = 16 }; /** * @brief DMA memory area buffer descriptor. * * An array of these descriptors for each RX and TX is used to * describe the respective DMA memory area. Each address word * points to the start of a RX or TX buffer within the DMA memory * area, while the control word is used for buffer status exchange * with the controller. */ struct eth_xlnx_gem_bd { /* TODO for Cortex-A53: 64-bit addressing */ /* TODO: timestamping support */ /* Buffer physical address (absolute address) */ uint32_t addr; /* Buffer control word (different contents for RX and TX) */ uint32_t ctrl; }; /** * @brief DMA memory area buffer descriptor ring management structure. * * The DMA memory area buffer descriptor ring management structure * is used to manage either the RX or TX buffer descriptor array * (while the buffer descriptors are just an array from the software * point of view, the controller treats them as a ring, in which the * last descriptor's control word has a special last-in-ring bit set). * It contains a pointer to the start of the descriptor array, a * semaphore as a means of preventing concurrent access, a free entry * counter as well as indices used to determine which BD shall be used * or evaluated for the next RX/TX operation. */ struct eth_xlnx_gem_bdring { /* Concurrent modification protection */ struct k_sem ring_sem; /* Pointer to the first BD in the list */ struct eth_xlnx_gem_bd *first_bd; /* Index of the next BD to be used for TX */ uint8_t next_to_use; /* Index of the next BD to be processed (both RX/TX) */ uint8_t next_to_process; /* Number of currently available BDs in this ring */ uint8_t free_bds; }; /** * @brief Constant device configuration data structure. * * This struct contains all device configuration data for a GEM * controller instance which is constant. The data herein is * either acquired from the generated header file based on the * data from Kconfig, or from header file based on the device tree * data. Some of the data contained, in particular data relating * to clock sources, is specific to either the Zynq-7000 or the * UltraScale SoCs, which both contain the GEM. */ struct eth_xlnx_gem_dev_cfg { uint32_t base_addr; eth_xlnx_gem_config_irq_t config_func; uint32_t pll_clock_frequency; uint32_t clk_ctrl_reg_address; enum eth_xlnx_mdc_clock_divider mdc_divider; enum eth_xlnx_link_speed max_link_speed; bool init_phy; uint8_t phy_mdio_addr_fix; uint8_t phy_advertise_lower; uint32_t phy_poll_interval; uint8_t defer_rxp_to_queue; uint8_t defer_txd_to_queue; enum eth_xlnx_amba_dbus_width amba_dbus_width; enum eth_xlnx_ahb_burst_length ahb_burst_length; enum eth_xlnx_hwrx_buffer_size hw_rx_buffer_size; uint8_t hw_rx_buffer_offset; uint8_t rxbd_count; uint8_t txbd_count; uint16_t rx_buffer_size; uint16_t tx_buffer_size; bool ignore_ipg_rxer : 1; bool disable_reject_nsp : 1; bool enable_ipg_stretch : 1; bool enable_sgmii_mode : 1; bool disable_reject_fcs_crc_errors : 1; bool enable_rx_halfdup_while_tx : 1; bool enable_rx_chksum_offload : 1; bool disable_pause_copy : 1; bool discard_rx_fcs : 1; bool discard_rx_length_errors : 1; bool enable_pause : 1; bool enable_tbi : 1; bool ext_addr_match : 1; bool enable_1536_frames : 1; bool enable_ucast_hash : 1; bool enable_mcast_hash : 1; bool disable_bcast : 1; bool copy_all_frames : 1; bool discard_non_vlan : 1; bool enable_fdx : 1; bool disc_rx_ahb_unavail : 1; bool enable_tx_chksum_offload : 1; bool tx_buffer_size_full : 1; bool enable_ahb_packet_endian_swap : 1; bool enable_ahb_md_endian_swap : 1; }; /** * @brief Run-time device configuration data structure. * * This struct contains all device configuration data for a GEM * controller instance which is modifyable at run-time, such as * data relating to the attached PHY or the auxiliary thread. */ struct eth_xlnx_gem_dev_data { struct net_if *iface; uint8_t mac_addr[6]; enum eth_xlnx_link_speed eff_link_speed; struct k_work tx_done_work; struct k_work rx_pend_work; struct k_sem tx_done_sem; uint8_t phy_addr; uint32_t phy_id; struct k_work_delayable phy_poll_delayed_work; struct phy_xlnx_gem_api *phy_access_api; uint8_t *first_rx_buffer; uint8_t *first_tx_buffer; struct eth_xlnx_gem_bdring rxbd_ring; struct eth_xlnx_gem_bdring txbd_ring; #ifdef CONFIG_NET_STATISTICS_ETHERNET struct net_stats_eth stats; #endif bool started; }; #endif /* _ZEPHYR_DRIVERS_ETHERNET_ETH_XLNX_GEM_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_xlnx_gem_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,625
```c /* * Lukasz Majewski <lukma@denx.de> */ #define LOG_MODULE_NAME dsa #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME, CONFIG_ETHERNET_LOG_LEVEL); #include <zephyr/device.h> #include <zephyr/drivers/gpio.h> #include <zephyr/kernel.h> #include <errno.h> #include <zephyr/sys/util.h> #include <zephyr/net/ethernet.h> #include <zephyr/linker/sections.h> #include <zephyr/toolchain/common.h> #if defined(CONFIG_DSA_SPI) #include <zephyr/drivers/spi.h> #else #error "No communication bus defined" #endif #if CONFIG_DSA_KSZ8863 #define DT_DRV_COMPAT microchip_ksz8863 #include "dsa_ksz8863.h" #elif CONFIG_DSA_KSZ8794 #define DT_DRV_COMPAT microchip_ksz8794 #include "dsa_ksz8794.h" #else #error "Unsupported KSZ chipset" #endif struct ksz8xxx_data { int iface_init_count; bool is_init; #if defined(CONFIG_DSA_SPI) struct spi_dt_spec spi; #endif }; #define PRV_DATA(ctx) ((struct ksz8xxx_data *const)(ctx)->prv_data) static void dsa_ksz8xxx_write_reg(const struct ksz8xxx_data *pdev, uint16_t reg_addr, uint8_t value) { #if defined(CONFIG_DSA_SPI) uint8_t buf[3]; const struct spi_buf tx_buf = { .buf = buf, .len = 3 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; buf[0] = KSZ8XXX_SPI_CMD_WR | ((reg_addr >> 7) & 0x1F); buf[1] = (reg_addr << 1) & 0xFE; buf[2] = value; spi_write_dt(&pdev->spi, &tx); #endif } static void dsa_ksz8xxx_read_reg(const struct ksz8xxx_data *pdev, uint16_t reg_addr, uint8_t *value) { #if defined(CONFIG_DSA_SPI) uint8_t buf[3]; const struct spi_buf tx_buf = { .buf = buf, .len = 3 }; const struct spi_buf_set tx = { .buffers = &tx_buf, .count = 1 }; struct spi_buf rx_buf = { .buf = buf, .len = 3 }; const struct spi_buf_set rx = { .buffers = &rx_buf, .count = 1 }; buf[0] = KSZ8XXX_SPI_CMD_RD | ((reg_addr >> 7) & 0x1F); buf[1] = (reg_addr << 1) & 0xFE; buf[2] = 0x0; if (!spi_transceive_dt(&pdev->spi, &tx, &rx)) { *value = buf[2]; } else { LOG_DBG("Failure while reading register 0x%04x", reg_addr); *value = 0U; } #endif } static bool dsa_ksz8xxx_port_link_status(struct ksz8xxx_data *pdev, uint8_t port) { uint8_t tmp; if (port < KSZ8XXX_FIRST_PORT || port > KSZ8XXX_LAST_PORT || port == KSZ8XXX_CPU_PORT) { return false; } dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_STAT2_PORTn(port), &tmp); return tmp & KSZ8XXX_STAT2_LINK_GOOD; } #if !DT_INST_NODE_HAS_PROP(0, reset_gpios) static void dsa_ksz8xxx_soft_reset(struct ksz8xxx_data *pdev) { /* reset switch */ dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_RESET_REG, KSZ8XXX_RESET_SET); k_busy_wait(KSZ8XXX_SOFT_RESET_DURATION); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_RESET_REG, KSZ8XXX_RESET_CLEAR); } #endif static int dsa_ksz8xxx_probe(struct ksz8xxx_data *pdev) { uint16_t timeout = 100; uint8_t val[2], tmp; /* * Wait for SPI of KSZ8794 being fully operational - up to 10 ms */ for (timeout = 100, tmp = 0; tmp != KSZ8XXX_CHIP_ID0_ID_DEFAULT && timeout > 0; timeout--) { dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_CHIP_ID0, &tmp); k_busy_wait(100); } if (timeout == 0) { LOG_ERR("KSZ8794: No SPI communication!"); return -ENODEV; } dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_CHIP_ID0, &val[0]); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_CHIP_ID1, &val[1]); if (val[0] != KSZ8XXX_CHIP_ID0_ID_DEFAULT || val[1] != KSZ8XXX_CHIP_ID1_ID_DEFAULT) { LOG_ERR("Chip ID mismatch. " "Expected %02x%02x but found %02x%02x", KSZ8XXX_CHIP_ID0_ID_DEFAULT, KSZ8XXX_CHIP_ID1_ID_DEFAULT, val[0], val[1]); return -ENODEV; } LOG_DBG("KSZ8794: ID0: 0x%x ID1: 0x%x timeout: %d", val[1], val[0], timeout); return 0; } static int dsa_ksz8xxx_write_static_mac_table(struct ksz8xxx_data *pdev, uint16_t entry_addr, uint8_t *p) { /* * According to KSZ8794 manual - write to static mac address table * requires write to indirect registers: * Write register 0x71 (113) * .... * Write register 0x78 (120) * * Then: * Write to Register 110 with 0x00 (write static table selected) * Write to Register 111 with 0x0x (trigger the write operation, to * table entry x) */ dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_DATA_7, p[7]); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_DATA_6, p[6]); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_DATA_5, p[5]); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_DATA_4, p[4]); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_DATA_3, p[3]); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_DATA_2, p[2]); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_DATA_1, p[1]); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_DATA_0, p[0]); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_CTRL_0, 0x00); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_CTRL_1, entry_addr); return 0; } static int dsa_ksz8xxx_set_static_mac_table(struct ksz8xxx_data *pdev, const uint8_t *mac, uint8_t fw_port, uint16_t entry_idx) { /* * The data in uint8_t buf[] buffer is stored in the little endian * format, as it eases programming proper KSZ8794 registers. */ uint8_t buf[8]; buf[7] = 0; /* Prepare entry for static MAC address table */ buf[5] = mac[0]; buf[4] = mac[1]; buf[3] = mac[2]; buf[2] = mac[3]; buf[1] = mac[4]; buf[0] = mac[5]; buf[6] = fw_port; buf[6] |= KSZ8XXX_STATIC_MAC_TABLE_VALID; buf[6] |= KSZ8XXX_STATIC_MAC_TABLE_OVRD; dsa_ksz8xxx_write_static_mac_table(pdev, entry_idx, buf); return 0; } static int dsa_ksz8xxx_read_static_mac_table(struct ksz8xxx_data *pdev, uint16_t entry_addr, uint8_t *p) { /* * According to KSZ8794 manual - read from static mac address table * requires reads from indirect registers: * * Write to Register 110 with 0x10 (read static table selected) * Write to Register 111 with 0x0x (trigger the read operation, to * table entry x) * * Then: * Write register 0x71 (113) * .... * Write register 0x78 (120) * */ dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_CTRL_0, 0x10); dsa_ksz8xxx_write_reg(pdev, KSZ8XXX_REG_IND_CTRL_1, entry_addr); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_REG_IND_DATA_7, &p[7]); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_REG_IND_DATA_6, &p[6]); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_REG_IND_DATA_5, &p[5]); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_REG_IND_DATA_4, &p[4]); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_REG_IND_DATA_3, &p[3]); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_REG_IND_DATA_2, &p[2]); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_REG_IND_DATA_1, &p[1]); dsa_ksz8xxx_read_reg(pdev, KSZ8XXX_REG_IND_DATA_0, &p[0]); return 0; } #if CONFIG_DSA_KSZ8863 static int dsa_ksz8xxx_switch_setup(const struct ksz8xxx_data *pdev) { uint8_t tmp, i; /* * Loop through ports - The same setup when tail tagging is enabled or * disabled. */ for (i = KSZ8XXX_FIRST_PORT; i <= KSZ8XXX_LAST_PORT; i++) { /* Enable transmission, reception and switch address learning */ dsa_ksz8xxx_read_reg(pdev, KSZ8863_CTRL2_PORTn(i), &tmp); tmp |= KSZ8863_CTRL2_TRANSMIT_EN; tmp |= KSZ8863_CTRL2_RECEIVE_EN; tmp &= ~KSZ8863_CTRL2_LEARNING_DIS; dsa_ksz8xxx_write_reg(pdev, KSZ8863_CTRL2_PORTn(i), tmp); } #if defined(CONFIG_DSA_KSZ_TAIL_TAGGING) /* Enable tail tag feature */ dsa_ksz8xxx_read_reg(pdev, KSZ8863_GLOBAL_CTRL1, &tmp); tmp |= KSZ8863_GLOBAL_CTRL1_TAIL_TAG_EN; dsa_ksz8xxx_write_reg(pdev, KSZ8863_GLOBAL_CTRL1, tmp); #else /* Disable tail tag feature */ dsa_ksz8xxx_read_reg(pdev, KSZ8863_GLOBAL_CTRL1, &tmp); tmp &= ~KSZ8863_GLOBAL_CTRL1_TAIL_TAG_EN; dsa_ksz8xxx_write_reg(pdev, KSZ8863_GLOBAL_CTRL1, tmp); #endif dsa_ksz8xxx_read_reg(pdev, KSZ8863_GLOBAL_CTRL2, &tmp); tmp &= ~KSZ8863_GLOBAL_CTRL2_LEG_MAX_PKT_SIZ_CHK_ENA; dsa_ksz8xxx_write_reg(pdev, KSZ8863_GLOBAL_CTRL2, tmp); return 0; } #endif #if CONFIG_DSA_KSZ8794 static int dsa_ksz8xxx_switch_setup(struct ksz8xxx_data *pdev) { uint8_t tmp, i; /* * Loop through ports - The same setup when tail tagging is enabled or * disabled. */ for (i = KSZ8XXX_FIRST_PORT; i <= KSZ8XXX_LAST_PORT; i++) { /* Enable transmission, reception and switch address learning */ dsa_ksz8xxx_read_reg(pdev, KSZ8794_CTRL2_PORTn(i), &tmp); tmp |= KSZ8794_CTRL2_TRANSMIT_EN; tmp |= KSZ8794_CTRL2_RECEIVE_EN; tmp &= ~KSZ8794_CTRL2_LEARNING_DIS; dsa_ksz8xxx_write_reg(pdev, KSZ8794_CTRL2_PORTn(i), tmp); } #if defined(CONFIG_DSA_KSZ_TAIL_TAGGING) /* Enable tail tag feature */ dsa_ksz8xxx_read_reg(pdev, KSZ8794_GLOBAL_CTRL10, &tmp); tmp |= KSZ8794_GLOBAL_CTRL10_TAIL_TAG_EN; dsa_ksz8xxx_write_reg(pdev, KSZ8794_GLOBAL_CTRL10, tmp); #else /* Disable tail tag feature */ dsa_ksz8xxx_read_reg(pdev, KSZ8794_GLOBAL_CTRL10, &tmp); tmp &= ~KSZ8794_GLOBAL_CTRL10_TAIL_TAG_EN; dsa_ksz8xxx_write_reg(pdev, KSZ8794_GLOBAL_CTRL10, tmp); #endif dsa_ksz8xxx_read_reg(pdev, KSZ8794_PORT4_IF_CTRL6, &tmp); LOG_DBG("KSZ8794: CONTROL6: 0x%x port4", tmp); dsa_ksz8xxx_read_reg(pdev, KSZ8794_PORT4_CTRL2, &tmp); LOG_DBG("KSZ8794: CONTROL2: 0x%x port4", tmp); dsa_ksz8xxx_read_reg(pdev, KSZ8794_GLOBAL_CTRL2, &tmp); tmp |= KSZ8794_GLOBAL_CTRL2_LEG_MAX_PKT_SIZ_CHK_DIS; dsa_ksz8xxx_write_reg(pdev, KSZ8794_GLOBAL_CTRL2, tmp); return 0; } #if DT_INST_NODE_HAS_PROP(0, workaround) /* * Workaround 0x01 * Solution for Short Cable Problems with the KSZ8795 Family * * Title * Solution for Short Cable Problems with the KSZ8795 Family * * path_to_url * Problems-with-the-KSZ8795-Family * * Problem Description: * 1) The KSZ8795 family parts might be not link when connected through a few * type of short cable (<3m). * 2) There may be a link-up issue in the capacitor AC coupling mode for port * to port or board to board cases. * * Answer * Root Cause: * KSZ8795 family switches with integrated Ethernet PHY that has a DSP based * equalizer EQ that can balance the signal received to adapt various cable * length characteristics. The equalizer default settings amplify the signal * coming in to get more accurate readings from low amplitude signals. * When using some type of short cable (for example, CAT-6 cable with low * attenuation to high frequencies signal vs. CAT-5 cable) or board to board * connection, or port to port with capacitor AC coupling connection, the signal * is amplified too much and cause the link-up failed with same boost setting in * the equalizer EQ. * * Solution/Workaround: * Write a DSP control register that is indirect register (0x3c) to optimize the * equalizer EQ to cover above corner cases. * w 6e a0 //write the indirect register * w 6f 3c //assign the indirect hidden register address (0x3c) * w a0 15 //write 0x15 to REG (0x3c) to optimize the EQ. The default is 0x0a. * Based on testing and practical application, this register setting above can * solve the issue for all type of the short cables and the capacitor AC * coupling mode. * * The indirect DSP register (0x3c) is an 8-bit register, the bits describe as * follows, * * Bits Bit Name Description Mode Default Setting * 0x0a 0x15 * 7-5 Reserved RO 000 000 * 4 Cpu_EQ_Done_Cond1 How to judge EQ is finished, * there are two ways to judge * if EQ is finished, can set * either way R/W 0 1 * 3-1 Cpu_EQ_CP_Points Control of EQ training is * over-boosted or * [2:0] under-boosted, that means to * compensate signal attenuation * more or less. R/W 101 010 * 0 Cpu_STOP_RUN after EQ training completed, * stop adaptation R/W 0 1 * * Explanation: * The above register change makes equalizers compensation range wider, and * therefore cables with various characteristics can be tolerated. Adjust * equalizer EQ training algorithm to cover a few type of short cables issue. * Also is appropriate for the board to board connection and port to port * connection with the capacitor AC coupling mode. * * Basically, it decides how much signal amplitude to compensate accurately * to the different type of short cables characteristics. The current default * value in the indirect register (0x3c) can cover all general standard * Ethernet short cables like CAT-5, CAT-5e without any problem. * Based on tests, a more optimized equalizer adjustment value 0x15 is better * for all corner cases of the short cable and short distance connection for * port to port or board to board cases. */ static int dsa_ksz8794_phy_workaround_0x01(struct ksz8xxx_data *pdev) { uint8_t indirect_type = 0x0a; uint8_t indirect_addr = 0x3c; uint8_t indirect_data = 0x15; dsa_ksz8xxx_write_reg(pdev, KSZ8794_REG_IND_CTRL_0, indirect_type); dsa_ksz8xxx_write_reg(pdev, KSZ8794_REG_IND_CTRL_1, indirect_addr); dsa_ksz8xxx_write_reg(pdev, KSZ8794_IND_BYTE, indirect_data); LOG_INF("apply workarkound 0x01 for short connections on KSZ8794"); return 0; } /* * Workaround 0x02 and 0x4 * Solution for Using CAT-5E or CAT-6 Short Cable with a Link Issue for the * KSZ8795 Family * * Title * Solution for Using CAT-5E or CAT-6 Short Cable with a Link Issue for the * KSZ8795 Family * path_to_url * -CAT-6-Short-Cable- with-a-Link-Issue-for-the-KSZ8795-Family * * Question * Possible Problem Description: * 1) KSZ8795 family includes KSZ8795CLX, KSZ8775CLX, KSZ8765CLX and KSZ8794CNX. * 2) The KSZ8795 family copper parts may not link well when connected through a * short CAT-5E or CAT-6 cable (about <=30 meter). The failure rate may be about * 2-5%. * * Answer * Root Cause: * Basically, KSZ8795 10/100 Ethernet switch family was designed based on CAT-5 * cable. With the application of more type of cables, specially two types * cables of CAT-5E and CAT-6, both cables have wider bandwidth that has * different frequency characteristics than CAT-5 cable. More higher frequency * component of the CAT-5E or CAT-6 will be amplified in the receiving amplifier * and will cause the received signal distortion due to too much high frequency * components receiving signal amplitude and cause the link-up failure with * short cables. * * Solution/Workaround: * 1) dsa_ksz8794_phy_workaround_0x02() * Based on the root cause above, adjust the receiver low pass filter to reduce * the high frequency component to keep the receive signal within a reasonable * range when using CAT-5E and CAT-6 cable. * * Set the indirect register as follows for the receiver low pass filter. * Format is w [Register address] [8-bit data] * w 6e a0 //write the indirect register * w 6f 4c //write/assign the internal used indirect register address (0x4c) * w a0 40 //write 0x40 to indirect register (0x4c) to reduce low pass filter * bandwidth. * * The register 0x4c bits [7:6] for receiver low pass filter bandwidth control. * * The default value is 00, change to 01. * Based on testing and practical application, this register setting above can * solve the link issue if using CAT-5E and CAT-6 short cables. * * The indirect register (0x4C) is an 8-bit register. The bits [7:6] are * described in the table below. * * * Bits Bit Name Description Mode Default Setting * 0x00 0x40 * 7-6 RX BW control Low pass filter bandwidth R/W 00 01 * 00 = 90MHz * 01 = 62MHz * 10 = 55MHz * 11 = 44MHz * 5 Enable Near-end loopback R/W 0 0 * 4-3 BTRT Additional reduce R/W 00 00 * 2 SD Ext register R/W 0 0 * 1-0 FXD reference setting 1.7V, 2V, * 1.4V * R/W 00 00 * * Solution/Workaround: * 2) dsa_ksz8794_phy_workaround_0x04() * For the wider bandwidth cables or on-board capacitor AC coupling * application, we recommend adding/setting the indirect register (0x08) from * default 0x0f to 0x00 that means to change register (0x08) bits [5:0] from * 0x0f to 0x00 to reduce equalizers (EQ) initial value to 0x00 for more * short cable or on-board capacitors AC coupling application. * * Set the indirect register as follows for EQ with 0x00 initial value. * Format is w [Register address] [8-bit data] * w 6e a0 //write the indirect register * w 6f 08 //write/assign the internal used indirect register address (0x08) * w a0 00 //write 0x00 to indirect register (0x08) to make EQ initial value * equal to 0x00 for very short cable (For example, 0.1m or less) * or connect two ports directly through capacitors for a capacitor * AC couple. * * The indirect DSP register (0x08) is an 8-bit register. The bits [5:0] are * described in the table below. * * Bits Bit Name Description Mode Default Setting * 0x0f 0x00 * 7 Park EQ Enable Park Equalizer function enable R/W 0 0 * 6 Reserved R 0 0 * 5-0 Cpu_EQ_Index Equalizer index control * interface R/W 001111 000000 * from 0 to 55, set EQ initial value * Conclusion: * Due to CAT-5E and CAT-6 cable having wider bandwidth, more high frequency * components will pass the low pass filter into the receiving amplifier and * cause the received signal amplitude to be too high. * Reducing the receiver low pass filter bandwidth will be the best way to * reduce the high frequency components to meet CAT-5E and CAT-6 short cable * link issue and doesnt affect CAT-5 cable because CAT-5 is not a wider * bandwidth cable. * * The DSP register (0X08) bits [5:0] are for EQ initial value. Its current * default value is 0x0F, which assumes the need to equalize regardless of the * cable length. This 0x0f initial equalize value in EQ isnt needed when * using very short cable or an on-board direct connection like capacitors AC * coupling mode. As the cable length increases, the device will equalize * automatic accordingly from 0x00 EQ initial value. * * So, it is better to set both register (0x4c) to 0x40 and register (0x08) to * 0x00 for compatibility with all Ethernet cable types and Ethernet cable * lengths. */ static int dsa_ksz8794_phy_workaround_0x02(struct ksz8xxx_data *pdev) { uint8_t indirect_type = 0x0a; uint8_t indirect_addr = 0x4c; uint8_t indirect_data = 0x40; dsa_ksz8xxx_write_reg(pdev, KSZ8794_REG_IND_CTRL_0, indirect_type); dsa_ksz8xxx_write_reg(pdev, KSZ8794_REG_IND_CTRL_1, indirect_addr); dsa_ksz8xxx_write_reg(pdev, KSZ8794_IND_BYTE, indirect_data); LOG_INF("apply workarkound 0x02 link issue CAT-5E/6 on KSZ8794"); return 0; } static int dsa_ksz8794_phy_workaround_0x04(struct ksz8xxx_data *pdev) { uint8_t indirect_type = 0x0a; uint8_t indirect_addr = 0x08; uint8_t indirect_data = 0x00; dsa_ksz8xxx_write_reg(pdev, KSZ8794_REG_IND_CTRL_0, indirect_type); dsa_ksz8xxx_write_reg(pdev, KSZ8794_REG_IND_CTRL_1, indirect_addr); dsa_ksz8xxx_write_reg(pdev, KSZ8794_IND_BYTE, indirect_data); LOG_INF("apply workarkound 0x04 link issue CAT-5E/6 on KSZ8794"); return 0; } static int dsa_ksz8794_apply_workarounds(struct ksz8xxx_data *pdev) { int workaround = DT_INST_PROP(0, workaround); if (workaround & 0x01) { dsa_ksz8794_phy_workaround_0x01(pdev); } if (workaround & 0x02) { dsa_ksz8794_phy_workaround_0x02(pdev); } if (workaround & 0x04) { dsa_ksz8794_phy_workaround_0x04(pdev); } return 0; } #endif #if DT_INST_NODE_HAS_PROP(0, mii_lowspeed_drivestrength) static int dsa_ksz8794_set_lowspeed_drivestrength(struct ksz8xxx_data *pdev) { int mii_lowspeed_drivestrength = DT_INST_PROP(0, mii_lowspeed_drivestrength); uint8_t tmp, val; int ret = 0; switch (mii_lowspeed_drivestrength) { case 2: val = KSZ8794_GLOBAL_CTRL20_LOWSPEED_2MA; break; case 4: val = KSZ8794_GLOBAL_CTRL20_LOWSPEED_4MA; break; case 8: val = KSZ8794_GLOBAL_CTRL20_LOWSPEED_8MA; break; case 12: val = KSZ8794_GLOBAL_CTRL20_LOWSPEED_12MA; break; case 16: val = KSZ8794_GLOBAL_CTRL20_LOWSPEED_16MA; break; case 20: val = KSZ8794_GLOBAL_CTRL20_LOWSPEED_20MA; break; case 24: val = KSZ8794_GLOBAL_CTRL20_LOWSPEED_24MA; break; case 28: val = KSZ8794_GLOBAL_CTRL20_LOWSPEED_28MA; break; default: ret = -1; LOG_ERR("KSZ8794: unsupported drive strength %dmA", mii_lowspeed_drivestrength); break; } if (ret == 0) { /* set Low-Speed Interface Drive Strength for MII and RMMI */ dsa_ksz8xxx_read_reg(pdev, KSZ8794_GLOBAL_CTRL20, &tmp); tmp &= ~KSZ8794_GLOBAL_CTRL20_LOWSPEED_MASK; tmp |= val; dsa_ksz8xxx_write_reg(pdev, KSZ8794_GLOBAL_CTRL20, tmp); dsa_ksz8xxx_read_reg(pdev, KSZ8794_GLOBAL_CTRL20, &tmp); LOG_INF("KSZ8794: set drive strength %dmA", mii_lowspeed_drivestrength); } return ret; } #endif #endif #if DT_INST_NODE_HAS_PROP(0, reset_gpios) static int dsa_ksz8xxx_gpio_reset(void) { struct gpio_dt_spec reset_gpio = GPIO_DT_SPEC_INST_GET(0, reset_gpios); if (!gpio_is_ready_dt(&reset_gpio)) { LOG_ERR("Reset GPIO device not ready"); return -ENODEV; } gpio_pin_configure_dt(&reset_gpio, GPIO_OUTPUT_ACTIVE); k_msleep(10); gpio_pin_set_dt(&reset_gpio, 0); return 0; } #endif /* Low level initialization code for DSA PHY */ int dsa_hw_init(struct ksz8xxx_data *pdev) { int rc; if (pdev->is_init) { return 0; } /* Hard reset */ #if DT_INST_NODE_HAS_PROP(0, reset_gpios) dsa_ksz8xxx_gpio_reset(); /* Time needed for chip to completely power up (100ms) */ k_busy_wait(KSZ8XXX_HARD_RESET_WAIT); #endif #if defined(CONFIG_DSA_SPI) if (!spi_is_ready_dt(&pdev->spi)) { LOG_ERR("SPI bus %s is not ready", pdev->spi.bus->name); return -ENODEV; } #endif /* Probe attached PHY */ rc = dsa_ksz8xxx_probe(pdev); if (rc < 0) { return rc; } #if !DT_INST_NODE_HAS_PROP(0, reset_gpios) /* Soft reset */ dsa_ksz8xxx_soft_reset(pdev); #endif /* Setup KSZ8794 */ dsa_ksz8xxx_switch_setup(pdev); #if DT_INST_NODE_HAS_PROP(0, mii_lowspeed_drivestrength) dsa_ksz8794_set_lowspeed_drivestrength(pdev); #endif #if DT_INST_NODE_HAS_PROP(0, workaround) /* apply workarounds */ dsa_ksz8794_apply_workarounds(pdev); #endif pdev->is_init = true; return 0; } static void dsa_delayed_work(struct k_work *item) { struct k_work_delayable *dwork = k_work_delayable_from_work(item); struct dsa_context *context = CONTAINER_OF(dwork, struct dsa_context, dsa_work); struct ksz8xxx_data *pdev = PRV_DATA(context); bool link_state; uint8_t i; for (i = KSZ8XXX_FIRST_PORT; i <= KSZ8XXX_LAST_PORT; i++) { /* Skip Switch <-> CPU Port */ if (i == KSZ8XXX_CPU_PORT) { continue; } link_state = dsa_ksz8xxx_port_link_status(pdev, i); if (link_state && !context->link_up[i]) { LOG_INF("DSA port: %d link UP!", i); net_eth_carrier_on(context->iface_slave[i]); } else if (!link_state && context->link_up[i]) { LOG_INF("DSA port: %d link DOWN!", i); net_eth_carrier_off(context->iface_slave[i]); } context->link_up[i] = link_state; } k_work_reschedule(&context->dsa_work, DSA_STATUS_PERIOD_MS); } int dsa_port_init(const struct device *dev) { struct dsa_context *data = dev->data; struct ksz8xxx_data *pdev = PRV_DATA(data); dsa_hw_init(pdev); return 0; } /* Generic implementation of writing value to DSA register */ static int dsa_ksz8xxx_sw_write_reg(const struct device *dev, uint16_t reg_addr, uint8_t value) { struct dsa_context *data = dev->data; struct ksz8xxx_data *pdev = PRV_DATA(data); dsa_ksz8xxx_write_reg(pdev, reg_addr, value); return 0; } /* Generic implementation of reading value from DSA register */ static int dsa_ksz8xxx_sw_read_reg(const struct device *dev, uint16_t reg_addr, uint8_t *value) { struct dsa_context *data = dev->data; struct ksz8xxx_data *pdev = PRV_DATA(data); dsa_ksz8xxx_read_reg(pdev, reg_addr, value); return 0; } /** * @brief Set entry to DSA MAC address table * * @param dev DSA device * @param mac The MAC address to be set in the table * @param fw_port Port number to forward packets * @param tbl_entry_idx The index of entry in the table * @param flags Flags to be set in the entry * * @return 0 if ok, < 0 if error */ static int dsa_ksz8xxx_set_mac_table_entry(const struct device *dev, const uint8_t *mac, uint8_t fw_port, uint16_t tbl_entry_idx, uint16_t flags) { struct dsa_context *data = dev->data; struct ksz8xxx_data *pdev = PRV_DATA(data); if (flags != 0) { return -EINVAL; } dsa_ksz8xxx_set_static_mac_table(pdev, mac, fw_port, tbl_entry_idx); return 0; } /** * @brief Get DSA MAC address table entry * * @param dev DSA device * @param buf The buffer for data read from the table * @param tbl_entry_idx The index of entry in the table * * @return 0 if ok, < 0 if error */ static int dsa_ksz8xxx_get_mac_table_entry(const struct device *dev, uint8_t *buf, uint16_t tbl_entry_idx) { struct dsa_context *data = dev->data; struct ksz8xxx_data *pdev = PRV_DATA(data); dsa_ksz8xxx_read_static_mac_table(pdev, tbl_entry_idx, buf); return 0; } #if defined(CONFIG_DSA_KSZ_TAIL_TAGGING) #define DSA_KSZ8795_TAIL_TAG_OVRD BIT(6) #define DSA_KSZ8795_TAIL_TAG_LOOKUP BIT(7) #define DSA_KSZ8794_EGRESS_TAG_LEN 1 #define DSA_KSZ8794_INGRESS_TAG_LEN 1 #define DSA_MIN_L2_FRAME_SIZE 64 #define DSA_L2_FCS_SIZE 4 struct net_pkt *dsa_ksz8xxx_xmit_pkt(struct net_if *iface, struct net_pkt *pkt) { struct ethernet_context *ctx = net_if_l2_data(iface); struct net_eth_hdr *hdr = NET_ETH_HDR(pkt); struct net_linkaddr lladst; uint8_t port_idx, *dbuf; struct net_buf *buf; size_t len, pad = 0; lladst.len = sizeof(hdr->dst.addr); lladst.addr = &hdr->dst.addr[0]; len = net_pkt_get_len(pkt); /* * For KSZ8794 one needs to 'pad' the L2 frame to its minimal size * (64B) before appending TAIL TAG and FCS */ if (len < (DSA_MIN_L2_FRAME_SIZE - DSA_L2_FCS_SIZE)) { /* Calculate number of bytes needed for padding */ pad = DSA_MIN_L2_FRAME_SIZE - DSA_L2_FCS_SIZE - len; } buf = net_buf_alloc_len(net_buf_pool_get(pkt->buffer->pool_id), pad + DSA_KSZ8794_INGRESS_TAG_LEN, K_NO_WAIT); if (!buf) { LOG_ERR("DSA cannot allocate new data buffer"); return NULL; } /* * Get the pointer to struct's net_buf_simple data and zero out the * padding and tag byte placeholder */ dbuf = net_buf_simple_tail(&(buf->b)); memset(dbuf, 0x0, pad + DSA_KSZ8794_INGRESS_TAG_LEN); /* * For master port (eth0) set the bit 7 to use look-up table to pass * packet to correct interface (bits [0..6] _are_ ignored). * * For slave ports (lan1..3) just set the tag properly: * bit 0 -> eth1, bit 1 -> eth2. bit 2 -> eth3 * It may be also necessary to set bit 6 to "anyhow send packets to * specified port in Bits[3:0]". This may be needed for RSTP * implementation (when the switch port is disabled, but shall handle * LLDP packets). */ if (dsa_is_port_master(iface)) { port_idx = DSA_KSZ8795_TAIL_TAG_LOOKUP; } else { port_idx = (1 << (ctx->dsa_port_idx)); } NET_DBG("TT - port: 0x%x[%p] LEN: %d 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", port_idx, iface, len, lladst.addr[0], lladst.addr[1], lladst.addr[2], lladst.addr[3], lladst.addr[4], lladst.addr[5]); /* The tail tag shall be placed after the padding (if present) */ dbuf[pad] = port_idx; /* Set proper len member for the actual struct net_buf_simple */ net_buf_add(buf, pad + DSA_KSZ8794_INGRESS_TAG_LEN); /* Append struct net_buf to packet data */ net_buf_frag_add(pkt->buffer, buf); return pkt; } /** * @brief DSA function to get proper interface * * This is the function for assigning proper slave interface after receiving * the packet on master. * * @param iface Network interface * @param pkt Network packet * * Returns: * - Pointer to struct net_if */ static struct net_if *dsa_ksz8xxx_get_iface(struct net_if *iface, struct net_pkt *pkt) { struct ethernet_context *ctx; struct net_if *iface_sw; size_t plen; uint8_t pnum; if (!(net_eth_get_hw_capabilities(iface) & (ETHERNET_DSA_SLAVE_PORT | ETHERNET_DSA_MASTER_PORT))) { return iface; } net_pkt_set_overwrite(pkt, true); net_pkt_cursor_init(pkt); plen = net_pkt_get_len(pkt); net_pkt_skip(pkt, plen - DSA_KSZ8794_EGRESS_TAG_LEN); net_pkt_read_u8(pkt, &pnum); net_pkt_update_length(pkt, plen - DSA_KSZ8794_EGRESS_TAG_LEN); /* * NOTE: * The below approach is only for ip_k66f board as we do know * that eth0 is on position (index) 1, then we do have lan1 with * index 2, lan2 with 3 and lan3 with 4. * * This is caused by eth interfaces placing order by linker and * may vary on other boards, where are for example two eth * interfaces available. */ iface_sw = net_if_get_by_index(pnum + 2); ctx = net_if_l2_data(iface); NET_DBG("TT - plen: %d pnum: %d pos: 0x%p dsa_port_idx: %d", plen - DSA_KSZ8794_EGRESS_TAG_LEN, pnum, net_pkt_cursor_get_pos(pkt), ctx->dsa_port_idx); return iface_sw; } #endif static void dsa_iface_init(struct net_if *iface) { struct dsa_slave_config *cfg = (struct dsa_slave_config *) net_if_get_device(iface)->config; struct ethernet_context *ctx = net_if_l2_data(iface); const struct device *dm, *dev = net_if_get_device(iface); struct dsa_context *context = dev->data; struct ksz8xxx_data *pdev = PRV_DATA(context); struct ethernet_context *ctx_master; int i = pdev->iface_init_count; /* Find master port for ksz8794 switch */ if (context->iface_master == NULL) { dm = DEVICE_DT_GET(DT_INST_PHANDLE(0, dsa_master_port)); context->iface_master = net_if_lookup_by_dev(dm); if (context->iface_master == NULL) { LOG_ERR("DSA: Master iface NOT found!"); return; } /* * Provide pointer to DSA context to master's eth interface * struct ethernet_context */ ctx_master = net_if_l2_data(context->iface_master); ctx_master->dsa_ctx = context; } if (context->iface_slave[i] == NULL) { context->iface_slave[i] = iface; net_if_set_link_addr(iface, cfg->mac_addr, sizeof(cfg->mac_addr), NET_LINK_ETHERNET); ctx->dsa_port_idx = i; ctx->dsa_ctx = context; /* * Initialize ethernet context 'work' for this iface to * be able to monitor the carrier status. */ ethernet_init(iface); } pdev->iface_init_count++; net_if_carrier_off(iface); /* * Start DSA work to monitor status of ports (read from switch IC) * only when carrier_work is properly initialized for all slave * interfaces. */ if (pdev->iface_init_count == context->num_slave_ports) { k_work_init_delayable(&context->dsa_work, dsa_delayed_work); k_work_reschedule(&context->dsa_work, DSA_STATUS_PERIOD_MS); } } static enum ethernet_hw_caps dsa_port_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_DSA_SLAVE_PORT | ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T; } const struct ethernet_api dsa_eth_api_funcs = { .iface_api.init = dsa_iface_init, .get_capabilities = dsa_port_get_capabilities, .send = dsa_tx, }; static struct dsa_api dsa_api_f = { .switch_read = dsa_ksz8xxx_sw_read_reg, .switch_write = dsa_ksz8xxx_sw_write_reg, .switch_set_mac_table_entry = dsa_ksz8xxx_set_mac_table_entry, .switch_get_mac_table_entry = dsa_ksz8xxx_get_mac_table_entry, #if defined(CONFIG_DSA_KSZ_TAIL_TAGGING) .dsa_xmit_pkt = dsa_ksz8xxx_xmit_pkt, .dsa_get_iface = dsa_ksz8xxx_get_iface, #endif }; /* * The order of NET_DEVICE_INIT_INSTANCE() placement IS important. * * To make the code simpler - the special care needs to be put on * the proper placement of eth0, lan1, lan2, lan3, etc - to avoid * the need to search for proper interface when each packet is * received or sent. * The net_if.c has a very fast API to provide access to linked by * the linker struct net_if(s) via device or index. As it is already * available for use - let's use it. * * To do that one needs to check how linker places the interfaces. * To inspect: * objdump -dst ./zephyr/CMakeFiles/zephyr.dir/drivers/ethernet/eth_mcux.c.obj\ * | grep "__net_if" * (The real problem is with eth0 and lanX order) * * If this approach is not enough for a simple system (like e.g. ip_k66f, one * can prepare dedicated linker script for the board to force the * order for complicated designs (like ones with eth0, eth1, and lanX). * * For simple cases it is just good enough. */ #define NET_SLAVE_DEVICE_INIT_INSTANCE(slave, n) \ const struct dsa_slave_config dsa_0_slave_##slave##_config = { \ .mac_addr = DT_PROP_OR(slave, local_mac_address, {0}) \ }; \ NET_DEVICE_INIT_INSTANCE(CONCAT(dsa_slave_port_, slave), \ "lan" STRINGIFY(n), \ n, \ dsa_port_init, \ NULL, \ &dsa_context_##n, \ &dsa_0_slave_##slave##_config, \ CONFIG_ETH_INIT_PRIORITY, \ &dsa_eth_api_funcs, \ ETHERNET_L2, \ NET_L2_GET_CTX_TYPE(ETHERNET_L2), \ NET_ETH_MTU); #define NET_SLAVE_DEVICE_0_INIT_INSTANCE(slave) \ NET_SLAVE_DEVICE_INIT_INSTANCE(slave, 0) #define NET_SLAVE_DEVICE_1_INIT_INSTANCE(slave) \ NET_SLAVE_DEVICE_INIT_INSTANCE(slave, 1) #define NET_SLAVE_DEVICE_2_INIT_INSTANCE(slave) \ NET_SLAVE_DEVICE_INIT_INSTANCE(slave, 2) #define NET_SLAVE_DEVICE_3_INIT_INSTANCE(slave) \ NET_SLAVE_DEVICE_INIT_INSTANCE(slave, 3) #define NET_SLAVE_DEVICE_4_INIT_INSTANCE(slave) \ NET_SLAVE_DEVICE_INIT_INSTANCE(slave, 4) #if defined(CONFIG_DSA_SPI) #define DSA_SPI_BUS_CONFIGURATION(n) \ .spi = SPI_DT_SPEC_INST_GET(n, \ SPI_WORD_SET(8), \ 0U) #else #define DSA_SPI_BUS_CONFIGURATION(n) #endif #define DSA_DEVICE(n) \ static struct ksz8xxx_data dsa_device_prv_data_##n = { \ .iface_init_count = 0, \ .is_init = false, \ DSA_SPI_BUS_CONFIGURATION(n), \ }; \ static struct dsa_context dsa_context_##n = { \ .num_slave_ports = DT_INST_PROP(0, dsa_slave_ports), \ .dapi = &dsa_api_f, \ .prv_data = (void *)&dsa_device_prv_data_##n, \ }; \ DT_INST_FOREACH_CHILD_VARGS(n, NET_SLAVE_DEVICE_INIT_INSTANCE, n); DT_INST_FOREACH_STATUS_OKAY(DSA_DEVICE); ```
/content/code_sandbox/drivers/ethernet/dsa_ksz8xxx.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
10,514
```objective-c /* * Lukasz Majewski <lukma@denx.de> */ #ifndef __DSA_KSZ8794_H__ #define __DSA_KSZ8794_H__ /* SPI commands */ #define KSZ8794_SPI_CMD_WR (BIT(6)) #define KSZ8794_SPI_CMD_RD (BIT(6) | BIT(5)) /* PHY registers */ #define KSZ8794_BMCR 0x00 #define KSZ8794_BMSR 0x01 #define KSZ8794_PHYID1 0x02 #define KSZ8794_PHYID2 0x03 #define KSZ8794_ANAR 0x04 #define KSZ8794_ANLPAR 0x05 #define KSZ8794_LINKMD 0x1D #define KSZ8794_PHYSCS 0x1F /* SWITCH registers */ #define KSZ8794_CHIP_ID0 0x00 #define KSZ8794_CHIP_ID1 0x01 #define KSZ8794_GLOBAL_CTRL0 0x02 #define KSZ8794_GLOBAL_CTRL1 0x03 #define KSZ8794_GLOBAL_CTRL2 0x04 #define KSZ8794_GLOBAL_CTRL3 0x05 #define KSZ8794_GLOBAL_CTRL4 0x06 #define KSZ8794_GLOBAL_CTRL5 0x07 #define KSZ8794_GLOBAL_CTRL6_MIB_CTRL 0x08 #define KSZ8794_GLOBAL_CTRL7 0x09 #define KSZ8794_GLOBAL_CTRL8 0x0A #define KSZ8794_GLOBAL_CTRL9 0x0B #define KSZ8794_GLOBAL_CTRL10 0x0C #define KSZ8794_GLOBAL_CTRL11 0x0D #define KSZ8794_PD_MGMT_CTRL1 0x0E #define KSZ8794_PD_MGMT_CTRL2 0x0F #define KSZ8794_PORT1_CTRL0 0x10 #define KSZ8794_PORT1_CTRL1 0x11 #define KSZ8794_PORT1_CTRL2 0x12 #define KSZ8794_PORT1_CTRL3 0x13 #define KSZ8794_PORT1_CTRL4 0x14 #define KSZ8794_PORT1_CTRL5 0x15 #define KSZ8794_PORT1_CTRL7 0x17 #define KSZ8794_PORT1_STAT0 0x18 #define KSZ8794_PORT1_STAT1 0x19 #define KSZ8794_PORT1_PHY_CTRL8 0x1A #define KSZ8794_PORT1_LINKMD 0x1B #define KSZ8794_PORT1_PHY_CTRL9 0x1C #define KSZ8794_PORT1_PHY_CTRL10 0x1D #define KSZ8794_PORT1_STAT2 0x1E #define KSZ8794_PORT1_CTRL11_STAT3 0x1F #define KSZ8794_PORT2_CTRL0 0x20 #define KSZ8794_PORT2_CTRL1 0x21 #define KSZ8794_PORT2_CTRL2 0x22 #define KSZ8794_PORT2_CTRL3 0x23 #define KSZ8794_PORT2_CTRL4 0x24 #define KSZ8794_PORT2_CTRL5 0x25 #define KSZ8794_PORT2_CTRL7 0x27 #define KSZ8794_PORT2_STAT0 0x28 #define KSZ8794_PORT2_STAT1 0x29 #define KSZ8794_PORT2_PHY_CTRL8 0x2A #define KSZ8794_PORT2_LINKMD 0x2B #define KSZ8794_PORT2_PHY_CTRL9 0x2C #define KSZ8794_PORT2_PHY_CTRL10 0x2D #define KSZ8794_PORT2_STAT2 0x2E #define KSZ8794_PORT2_CTRL11_STAT3 0x2F #define KSZ8794_PORT3_CTRL0 0x30 #define KSZ8794_PORT3_CTRL1 0x31 #define KSZ8794_PORT3_CTRL2 0x32 #define KSZ8794_PORT3_CTRL3 0x33 #define KSZ8794_PORT3_CTRL4 0x34 #define KSZ8794_PORT3_CTRL5 0x35 #define KSZ8794_PORT3_CTRL7 0x37 #define KSZ8794_PORT3_STAT0 0x38 #define KSZ8794_PORT3_STAT1 0x39 #define KSZ8794_PORT3_PHY_CTRL8 0x3A #define KSZ8794_PORT3_LINKMD 0x3B #define KSZ8794_PORT3_PHY_CTRL9 0x3C #define KSZ8794_PORT3_PHY_CTRL10 0x3D #define KSZ8794_PORT3_STAT2 0x3E #define KSZ8794_PORT3_CTRL11_STAT3 0x3F #define KSZ8794_PORT4_CTRL0 0x50 #define KSZ8794_PORT4_CTRL1 0x51 #define KSZ8794_PORT4_CTRL2 0x52 #define KSZ8794_PORT4_CTRL3 0x53 #define KSZ8794_PORT4_CTRL4 0x54 #define KSZ8794_PORT4_CTRL5 0x55 #define KSZ8794_PORT4_IF_CTRL6 0x56 #define KSZ8794_MAC_ADDR0 0x68 #define KSZ8794_MAC_ADDR1 0x69 #define KSZ8794_MAC_ADDR2 0x6A #define KSZ8794_MAC_ADDR3 0x6B #define KSZ8794_MAC_ADDR4 0x6C #define KSZ8794_MAC_ADDR5 0x6D #define KSZ8794_IND_ACCESS_CTRL0 0x6E #define KSZ8794_IND_ACCESS_CTRL1 0x6F #define KSZ8794_IND_DATA8 0x70 #define KSZ8794_IND_DATA7 0x71 #define KSZ8794_IND_DATA6 0x72 #define KSZ8794_IND_DATA5 0x73 #define KSZ8794_IND_DATA4 0x74 #define KSZ8794_IND_DATA3 0x75 #define KSZ8794_IND_DATA2 0x76 #define KSZ8794_IND_DATA1 0x77 #define KSZ8794_IND_DATA0 0x78 #define KSZ8794_INT_STAT 0x7C #define KSZ8794_INT_MASK 0x7D #define KSZ8794_ACL_INT_STAT 0x7E #define KSZ8794_ACL_CTRL 0x7F #define KSZ8794_GLOBAL_CTRL12 0x80 #define KSZ8794_GLOBAL_CTRL13 0x81 #define KSZ8794_GLOBAL_CTRL14 0x82 #define KSZ8794_GLOBAL_CTRL15 0x83 #define KSZ8794_GLOBAL_CTRL16 0x84 #define KSZ8794_GLOBAL_CTRL17 0x85 #define KSZ8794_GLOBAL_CTRL18 0x86 #define KSZ8794_GLOBAL_CTRL19 0x87 #define KSZ8794_TOS_PRIO_CTRL0 0x90 #define KSZ8794_TOS_PRIO_CTRL1 0x91 #define KSZ8794_TOS_PRIO_CTRL2 0x92 #define KSZ8794_TOS_PRIO_CTRL3 0x93 #define KSZ8794_TOS_PRIO_CTRL4 0x94 #define KSZ8794_TOS_PRIO_CTRL5 0x95 #define KSZ8794_TOS_PRIO_CTRL6 0x96 #define KSZ8794_TOS_PRIO_CTRL7 0x97 #define KSZ8794_TOS_PRIO_CTRL8 0x98 #define KSZ8794_TOS_PRIO_CTRL9 0x99 #define KSZ8794_TOS_PRIO_CTRL10 0x9A #define KSZ8794_TOS_PRIO_CTRL11 0x9B #define KSZ8794_TOS_PRIO_CTRL12 0x9C #define KSZ8794_TOS_PRIO_CTRL13 0x9D #define KSZ8794_TOS_PRIO_CTRL14 0x9E #define KSZ8794_TOS_PRIO_CTRL15 0x9F #define KSZ8794_IND_BYTE 0xA0 #define KSZ8794_GLOBAL_CTRL20 0xA3 #define KSZ8794_GLOBAL_CTRL21 0xA4 #define KSZ8794_PORT1_CTRL12 0xB0 #define KSZ8794_PORT1_CTRL13 0xB1 #define KSZ8794_PORT1_CTRL14 0xB2 #define KSZ8794_PORT1_CTRL15 0xB3 #define KSZ8794_PORT1_CTRL16 0xB4 #define KSZ8794_PORT1_CTRL17 0xB5 #define KSZ8794_PORT1_RATE_LIMIT_CTRL 0xB6 #define KSZ8794_PORT1_PRIO0_IG_LIMIT_CTRL1 0xB7 #define KSZ8794_PORT1_PRIO1_IG_LIMIT_CTRL2 0xB8 #define KSZ8794_PORT1_PRIO2_IG_LIMIT_CTRL3 0xB9 #define KSZ8794_PORT1_PRIO3_IG_LIMIT_CTRL4 0xBA #define KSZ8794_PORT1_QUEUE0_EG_LIMIT_CTRL1 0xBB #define KSZ8794_PORT1_QUEUE1_EG_LIMIT_CTRL2 0xBC #define KSZ8794_PORT1_QUEUE2_EG_LIMIT_CTRL3 0xBD #define KSZ8794_PORT1_QUEUE3_EG_LIMIT_CTRL4 0xBE #define KSZ8794_TEST 0xBF #define KSZ8794_PORT2_CTRL12 0xC0 #define KSZ8794_PORT2_CTRL13 0xC1 #define KSZ8794_PORT2_CTRL14 0xC2 #define KSZ8794_PORT2_CTRL15 0xC3 #define KSZ8794_PORT2_CTRL16 0xC4 #define KSZ8794_PORT2_CTRL17 0xC5 #define KSZ8794_PORT2_RATE_LIMIT_CTRL 0xC6 #define KSZ8794_PORT2_PRIO0_IG_LIMIT_CTRL1 0xC7 #define KSZ8794_PORT2_PRIO1_IG_LIMIT_CTRL2 0xC8 #define KSZ8794_PORT2_PRIO2_IG_LIMIT_CTRL3 0xC9 #define KSZ8794_PORT2_PRIO3_IG_LIMIT_CTRL4 0xCA #define KSZ8794_PORT2_QUEUE0_EG_LIMIT_CTRL1 0xCB #define KSZ8794_PORT2_QUEUE1_EG_LIMIT_CTRL2 0xCC #define KSZ8794_PORT2_QUEUE2_EG_LIMIT_CTRL3 0xCD #define KSZ8794_PORT2_QUEUE3_EG_LIMIT_CTRL4 0xCE #define KSZ8794_PORT3_CTRL12 0xD0 #define KSZ8794_PORT3_CTRL13 0xD1 #define KSZ8794_PORT3_CTRL14 0xD2 #define KSZ8794_PORT3_CTRL15 0xD3 #define KSZ8794_PORT3_CTRL16 0xD4 #define KSZ8794_PORT3_CTRL17 0xD5 #define KSZ8794_PORT3_RATE_LIMIT_CTRL 0xD6 #define KSZ8794_PORT3_PRIO0_IG_LIMIT_CTRL1 0xD7 #define KSZ8794_PORT3_PRIO1_IG_LIMIT_CTRL2 0xD8 #define KSZ8794_PORT3_PRIO2_IG_LIMIT_CTRL3 0xD9 #define KSZ8794_PORT3_PRIO3_IG_LIMIT_CTRL4 0xDA #define KSZ8794_PORT3_QUEUE0_EG_LIMIT_CTRL1 0xDB #define KSZ8794_PORT3_QUEUE1_EG_LIMIT_CTRL2 0xDC #define KSZ8794_PORT3_QUEUE2_EG_LIMIT_CTRL3 0xDD #define KSZ8794_PORT3_QUEUE3_EG_LIMIT_CTRL4 0xDE #define KSZ8794_TEST2 0xDF #define KSZ8794_TEST3 0xEF #define KSZ8794_PORT4_CTRL12 0xF0 #define KSZ8794_PORT4_CTRL13 0xF1 #define KSZ8794_PORT4_CTRL14 0xF2 #define KSZ8794_PORT4_CTRL15 0xF3 #define KSZ8794_PORT4_CTRL16 0xF4 #define KSZ8794_PORT4_CTRL17 0xF5 #define KSZ8794_PORT4_RATE_LIMIT_CTRL 0xF6 #define KSZ8794_PORT4_PRIO0_IG_LIMIT_CTRL1 0xF7 #define KSZ8794_PORT4_PRIO1_IG_LIMIT_CTRL2 0xF8 #define KSZ8794_PORT4_PRIO2_IG_LIMIT_CTRL3 0xF9 #define KSZ8794_PORT4_PRIO3_IG_LIMIT_CTRL4 0xFA #define KSZ8794_PORT4_QUEUE0_EG_LIMIT_CTRL1 0xFB #define KSZ8794_PORT4_QUEUE1_EG_LIMIT_CTRL2 0xFC #define KSZ8794_PORT4_QUEUE2_EG_LIMIT_CTRL3 0xFD #define KSZ8794_PORT4_QUEUE3_EG_LIMIT_CTRL4 0xFE #define KSZ8794_TEST4 0xFF /* Basic Control register */ #define KSZ8794_BMCR_RESET 0x8000 #define KSZ8794_BMCR_LOOPBACK 0x4000 #define KSZ8794_BMCR_FORCE_100 0x2000 #define KSZ8794_BMCR_AN_EN 0x1000 #define KSZ8794_BMCR_POWER_DOWN 0x0800 #define KSZ8794_BMCR_ISOLATE 0x0400 #define KSZ8794_BMCR_RESTART_AN 0x0200 #define KSZ8794_BMCR_FORCE_FULL_DUPLEX 0x0100 #define KSZ8794_BMCR_HP_MDIX 0x0020 #define KSZ8794_BMCR_FORCE_MDI 0x0010 #define KSZ8794_BMCR_AUTO_MDIX_DIS 0x0008 #define KSZ8794_BMCR_FAR_END_FAULT_DIS 0x0004 #define KSZ8794_BMCR_TRANSMIT_DIS 0x0002 #define KSZ8794_BMCR_LED_DIS 0x0001 /* Basic Status register */ #define KSZ8794_BMSR_100BT4 0x8000 #define KSZ8794_BMSR_100BTX_FD 0x4000 #define KSZ8794_BMSR_100BTX_HD 0x2000 #define KSZ8794_BMSR_10BT_FD 0x1000 #define KSZ8794_BMSR_10BT_HD 0x0800 #define KSZ8794_BMSR_AN_COMPLETE 0x0020 #define KSZ8794_BMSR_FAR_END_FAULT 0x0010 #define KSZ8794_BMSR_AN_CAPABLE 0x0008 #define KSZ8794_BMSR_LINK_STATUS 0x0004 #define KSZ8794_BMSR_EXTENDED_CAPABLE 0x0001 #define KSZ8794_GLOBAL_CTRL10_TAIL_TAG_EN BIT(1) #define KSZ8794_GLOBAL_CTRL2_LEG_MAX_PKT_SIZ_CHK_DIS BIT(1) #define KSZ8794_CTRL2_PORTn(n) (0x12 + ((n) * 0x10)) #define KSZ8794_CTRL2_TRANSMIT_EN BIT(2) #define KSZ8794_CTRL2_RECEIVE_EN BIT(1) #define KSZ8794_CTRL2_LEARNING_DIS BIT(0) #define KSZ8794_STAT2_PORTn(n) (0x1E + ((n) * 0x10)) #define KSZ8794_STAT2_LINK_GOOD BIT(5) #define KSZ8794_CHIP_ID0_ID_DEFAULT 0x87 #define KSZ8794_CHIP_ID1_ID_DEFAULT 0x61 #define KSZ8794_PWR_MGNT_MODE_SOFT_DOWN BIT(4) #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_MASK 0x07 #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_2MA 0x00 #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_4MA 0x01 #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_8MA 0x02 #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_12MA 0x03 #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_16MA 0x04 #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_20MA 0x05 #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_24MA 0x06 #define KSZ8794_GLOBAL_CTRL20_LOWSPEED_28MA 0x07 enum { /* * KSZ8794 register's MAP * (0x00 - 0x0F): Global Registers * Port registers (offsets): * (0x10): Port 1 * (0x20): Port 2 * (0x30): Port 3 * (0x40): Reserved * (0x50): Port 4 */ /* LAN ports for the ksz8794 switch */ KSZ8794_PORT1 = 0, KSZ8794_PORT2, KSZ8794_PORT3, /* * SWITCH <-> CPU port * * We also need to consider the "Reserved' offset * defined above. */ KSZ8794_PORT4 = 4, }; #define KSZ8794_REG_IND_DATA_8 0x70 #define KSZ8794_REG_IND_DATA_7 0x71 #define KSZ8794_REG_IND_DATA_6 0x72 #define KSZ8794_REG_IND_DATA_5 0x73 #define KSZ8794_REG_IND_DATA_4 0x74 #define KSZ8794_REG_IND_DATA_3 0x75 #define KSZ8794_REG_IND_DATA_2 0x76 #define KSZ8794_REG_IND_DATA_1 0x77 #define KSZ8794_REG_IND_DATA_0 0x78 #define KSZ8794_REG_IND_CTRL_0 0x6E #define KSZ8794_REG_IND_CTRL_1 0x6F #define KSZ8794_STATIC_MAC_TABLE_VALID BIT(5) #define KSZ8794_STATIC_MAC_TABLE_OVRD BIT(6) #define KSZ8XXX_CHIP_ID0 KSZ8794_CHIP_ID0 #define KSZ8XXX_CHIP_ID1 KSZ8794_CHIP_ID1 #define KSZ8XXX_CHIP_ID0_ID_DEFAULT KSZ8794_CHIP_ID0_ID_DEFAULT #define KSZ8XXX_CHIP_ID1_ID_DEFAULT KSZ8794_CHIP_ID1_ID_DEFAULT #define KSZ8XXX_FIRST_PORT KSZ8794_PORT1 #define KSZ8XXX_LAST_PORT KSZ8794_PORT3 #define KSZ8XXX_CPU_PORT KSZ8794_PORT4 #define KSZ8XXX_REG_IND_CTRL_0 KSZ8794_REG_IND_CTRL_0 #define KSZ8XXX_REG_IND_CTRL_1 KSZ8794_REG_IND_CTRL_1 #define KSZ8XXX_REG_IND_DATA_8 KSZ8794_REG_IND_DATA_8 #define KSZ8XXX_REG_IND_DATA_7 KSZ8794_REG_IND_DATA_7 #define KSZ8XXX_REG_IND_DATA_6 KSZ8794_REG_IND_DATA_6 #define KSZ8XXX_REG_IND_DATA_5 KSZ8794_REG_IND_DATA_5 #define KSZ8XXX_REG_IND_DATA_4 KSZ8794_REG_IND_DATA_4 #define KSZ8XXX_REG_IND_DATA_3 KSZ8794_REG_IND_DATA_3 #define KSZ8XXX_REG_IND_DATA_2 KSZ8794_REG_IND_DATA_2 #define KSZ8XXX_REG_IND_DATA_1 KSZ8794_REG_IND_DATA_1 #define KSZ8XXX_REG_IND_DATA_0 KSZ8794_REG_IND_DATA_0 #define KSZ8XXX_STATIC_MAC_TABLE_VALID KSZ8794_STATIC_MAC_TABLE_VALID #define KSZ8XXX_STATIC_MAC_TABLE_OVRD KSZ8794_STATIC_MAC_TABLE_OVRD #define KSZ8XXX_STAT2_LINK_GOOD KSZ8794_STAT2_LINK_GOOD #define KSZ8XXX_RESET_REG KSZ8794_PD_MGMT_CTRL1 #define KSZ8XXX_RESET_SET KSZ8794_PWR_MGNT_MODE_SOFT_DOWN #define KSZ8XXX_RESET_CLEAR 0 #define KSZ8XXX_STAT2_PORTn KSZ8794_STAT2_PORTn #define KSZ8XXX_SPI_CMD_RD KSZ8794_SPI_CMD_RD #define KSZ8XXX_SPI_CMD_WR KSZ8794_SPI_CMD_WR #define KSZ8XXX_SOFT_RESET_DURATION 1000 #define KSZ8XXX_HARD_RESET_WAIT 10000 #endif /* __DSA_KSZ8794_H__ */ ```
/content/code_sandbox/drivers/ethernet/dsa_ksz8794.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,838
```objective-c /* * */ #ifndef ETH_E1000_PRIV_H #define ETH_E1000_PRIV_H #ifdef __cplusplus extern "C" { #endif #define CTRL_SLU (1 << 6) /* Set Link Up */ #define TCTL_EN (1 << 1) #define RCTL_EN (1 << 1) #define ICR_TXDW (1) /* Transmit Descriptor Written Back */ #define ICR_TXQE (1 << 1) /* Transmit Queue Empty */ #define ICR_RXO (1 << 6) /* Receiver Overrun */ #define IMS_RXO (1 << 6) /* Receiver FIFO Overrun */ #define RCTL_MPE (1 << 4) /* Multicast Promiscuous Enabled */ #define TDESC_EOP (1) /* End Of Packet */ #define TDESC_RS (1 << 3) /* Report Status */ #define RDESC_STA_DD (1) /* Descriptor Done */ #define TDESC_STA_DD (1) /* Descriptor Done */ #define ETH_ALEN 6 /* TODO: Add a global reusable definition in OS */ enum e1000_reg_t { CTRL = 0x0000, /* Device Control */ ICR = 0x00C0, /* Interrupt Cause Read */ ICS = 0x00C8, /* Interrupt Cause Set */ IMS = 0x00D0, /* Interrupt Mask Set */ RCTL = 0x0100, /* Receive Control */ TCTL = 0x0400, /* Transmit Control */ RDBAL = 0x2800, /* Rx Descriptor Base Address Low */ RDBAH = 0x2804, /* Rx Descriptor Base Address High */ RDLEN = 0x2808, /* Rx Descriptor Length */ RDH = 0x2810, /* Rx Descriptor Head */ RDT = 0x2818, /* Rx Descriptor Tail */ TDBAL = 0x3800, /* Tx Descriptor Base Address Low */ TDBAH = 0x3804, /* Tx Descriptor Base Address High */ TDLEN = 0x3808, /* Tx Descriptor Length */ TDH = 0x3810, /* Tx Descriptor Head */ TDT = 0x3818, /* Tx Descriptor Tail */ RAL = 0x5400, /* Receive Address Low */ RAH = 0x5404, /* Receive Address High */ }; /* Legacy TX Descriptor */ struct e1000_tx { uint64_t addr; uint16_t len; uint8_t cso; uint8_t cmd; uint8_t sta; uint8_t css; uint16_t special; }; /* Legacy RX Descriptor */ struct e1000_rx { uint64_t addr; uint16_t len; uint16_t csum; uint8_t sta; uint8_t err; uint16_t special; }; struct e1000_dev { volatile struct e1000_tx tx __aligned(16); volatile struct e1000_rx rx __aligned(16); mm_reg_t address; /* BDF & DID/VID */ struct pcie_dev *pcie; /* If VLAN is enabled, there can be multiple VLAN interfaces related to * this physical device. In that case, this iface pointer value is not * really used for anything. */ struct net_if *iface; uint8_t mac[ETH_ALEN]; uint8_t txb[NET_ETH_MTU]; uint8_t rxb[NET_ETH_MTU]; #if defined(CONFIG_ETH_E1000_PTP_CLOCK) const struct device *ptp_clock; double clk_ratio; #endif }; struct e1000_config { void (*config_func)(const struct e1000_dev *dev); }; static const char *e1000_reg_to_string(enum e1000_reg_t r) __attribute__((unused)); #define iow32(_dev, _reg, _val) do { \ LOG_DBG("iow32 %s 0x%08x", e1000_reg_to_string(_reg), (_val)); \ sys_write32(_val, (_dev)->address + (_reg)); \ } while (false) #define ior32(_dev, _reg) \ ({ \ uint32_t val = sys_read32((_dev)->address + (_reg)); \ LOG_DBG("ior32 %s 0x%08x", e1000_reg_to_string(_reg), val); \ val; \ }) #ifdef __cplusplus } #endif #endif /* ETH_E1000_PRIV_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth_e1000_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,001
```objective-c /* * */ #ifndef ZEPHYR_DRIVERS_ETHERNET_ETH_H_ #define ZEPHYR_DRIVERS_ETHERNET_ETH_H_ #include <zephyr/types.h> #include <zephyr/random/random.h> /* helper macro to return mac address octet from local_mac_address prop */ #define NODE_MAC_ADDR_OCTET(node, n) DT_PROP_BY_IDX(node, local_mac_address, n) /* Determine if a mac address is all 0's */ #define NODE_MAC_ADDR_NULL(node) \ ((NODE_MAC_ADDR_OCTET(node, 0) == 0) && \ (NODE_MAC_ADDR_OCTET(node, 1) == 0) && \ (NODE_MAC_ADDR_OCTET(node, 2) == 0) && \ (NODE_MAC_ADDR_OCTET(node, 3) == 0) && \ (NODE_MAC_ADDR_OCTET(node, 4) == 0) && \ (NODE_MAC_ADDR_OCTET(node, 5) == 0)) /* Given a device tree node for an ethernet controller will * returns false if there is no local-mac-address property or * the property is all zero's. Otherwise will return True */ #define NODE_HAS_VALID_MAC_ADDR(node) \ UTIL_AND(DT_NODE_HAS_PROP(node, local_mac_address),\ (!NODE_MAC_ADDR_NULL(node))) static inline void gen_random_mac(uint8_t *mac_addr, uint8_t b0, uint8_t b1, uint8_t b2) { mac_addr[0] = b0; mac_addr[1] = b1; mac_addr[2] = b2; /* Set MAC address locally administered, unicast (LAA) */ mac_addr[0] |= 0x02; sys_rand_get(&mac_addr[3], 3U); } #endif /* ZEPHYR_DRIVERS_ETHERNET_ETH_H_ */ ```
/content/code_sandbox/drivers/ethernet/eth.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
409
```unknown menuconfig ETH_LAN865X bool "LAN865X 10BASE-T1S Controller" default y depends on DT_HAS_MICROCHIP_LAN865X_ENABLED select SPI select NET_L2_ETHERNET_MGMT help The LAN865X is a low power, 10BASE-T1S transceiver compliant with the IEEE 802.3cg-2019 Ethernet standard for long reach, 10 Mbps single pair Ethernet (SPE). Featuring an integrated media access control (MAC) and a PHY, the LAN865X enables direct connectivity with a variety of controllers via a serial peripheral inter-face (SPI). if ETH_LAN865X config ETH_LAN865X_INIT_PRIORITY int "LAN865X driver init priority" default 72 help LAN865X device driver initialization priority. Must be initialized after SPI. config ETH_LAN865X_IRQ_THREAD_STACK_SIZE int "Stack size for a thread that processes IRQ" default 512 help Size of the stack used for internal thread which is ran to process raised INT IRQ. config ETH_LAN865X_IRQ_THREAD_PRIO int "Priority for internal incoming packet handler" default 2 help Priority level for internal thread which is ran for LAN INT IRQ processing. config ETH_LAN865X_TIMEOUT int "IP buffer timeout" default 100 help Given timeout in milliseconds. Maximum amount of time that the driver will wait from the IP stack to get a memory buffer. endif # ETH_LAN865X ```
/content/code_sandbox/drivers/ethernet/Kconfig.lan865x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
338
```objective-c /* * */ #ifndef ETH_ADIN2111_PRIV_H__ #define ETH_ADIN2111_PRIV_H__ #include <stdint.h> #include <stdbool.h> #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #include <zephyr/net/net_if.h> #include <ethernet/eth_stats.h> /* SPI frequency maximum, based on clock cycle time */ #define ADIN2111_SPI_MAX_FREQUENCY 25000000U #define ADIN2111_PHYID 0x01U /* PHY Identification Register Reset Value */ #define ADIN2111_PHYID_RST_VAL 0x0283BCA1U #define ADIN1110_PHYID_RST_VAL 0x0283BC91U /* Reset Control and Status Register */ #define ADIN2111_RESET 0x03U /* MACPHY software reset */ #define ADIN2111_RESET_SWRESET BIT(0) /* Configuration Register 0 */ #define ADIN2111_CONFIG0 0x04U /* Configuration Synchronization */ #define ADIN2111_CONFIG0_SYNC BIT(15) /* Transmit Frame Check Sequence Validation Enable */ #define ADIN2111_CONFIG0_TXFCSVE BIT(14) /* Zero Align Receive Frame Enable */ #define ADIN2111_CONFIG0_ZARFE BIT(12) /* New packet received only after a new CS assertion */ #define ADIN2111_CONFIG0_CSARFE BIT(13) /* Transmit Cut Through Enable */ #define ADIN2111_CONFIG0_TXCTE BIT(9) /* Receive Cut Through Enable. Must be 0 for Generic SPI */ #define ADIN2111_CONFIG0_RXCTE BIT(8) /* Configuration Register 2 */ #define ADIN2111_CONFIG2 0x06U /* Forward Frames from Port 2 Not Matching a MAC Address to Port 1 */ #define ADIN2111_CONFIG2_P2_FWD_UNK2P1 BIT(14) /* Forward Frames from Port 1 Not Matching a MAC Address to Port 2 */ #define ADIN2111_CONFIG2_P1_FWD_UNK2P2 BIT(13) /* Forward Frames Not Matching Any MAC Address to the Host */ #define ADIN2111_CONFIG2_P2_FWD_UNK2HOST BIT(12) /* Enable Cut Through from Port to Port */ #define ADIN2111_CONFIG2_PORT_CUT_THRU_EN BIT(11) /* Enable CRC Append */ #define ADIN2111_CONFIG2_CRC_APPEND BIT(5) /* Forward Frames Not Matching Any MAC Address to the Host */ #define ADIN2111_CONFIG2_P1_FWD_UNK2HOST BIT(2) /* Status Register 0 */ #define ADIN2111_STATUS0 0x08U /* PHY Interrupt for Port 1 */ #define ADIN2111_STATUS0_PHYINT BIT(7) /** * Reset Complete. * The bit is set when the MACPHY reset is complete * and ready for configuration. */ #define ADIN2111_STATUS0_RESETC BIT(6) /* Value to completely clear status register 0 */ #define ADIN2111_STATUS0_CLEAR 0x1F7FU /* Status Register 1 */ #define ADIN2111_STATUS1 0x09U /* PHY Interrupt for Port 2 */ #define ADIN2111_STATUS1_PHYINT BIT(19) /* Port 2 RX FIFO Contains Data */ #define ADIN2111_STATUS1_P2_RX_RDY BIT(17) /* Indicates that a CRC error was detected */ #define ADIN2111_STATUS1_SPI_ERR BIT(10) /* Port 1 RX FIFO Contains Data */ #define ADIN2111_STATUS1_P1_RX_RDY BIT(4) /* Frame transmitted */ #define ADIN2111_STATUS1_TX_RDY BIT(3) /* Value to completely clear status register 1 */ #define ADIN2111_STATUS1_CLEAR 0xFFF01F08U /* Buffer Status Register */ #define ADIN2111_BUFSTS 0x0BU /* Rx chunks available */ #define ADIN2111_BUFSTS_RCA_MASK GENMASK(7, 0) /* Tx credits */ #define ADIN2111_BUFSTS_TXC 8U #define ADIN2111_BUFSTS_TXC_MASK GENMASK(15, 8) /* Interrupt Mask Register 0 */ #define ADIN2111_IMASK0 0x0CU /* Physical Layer Interrupt Mask */ #define ADIN2111_IMASK0_PHYINTM BIT(7) /* Interrupt Mask Register 1 */ #define ADIN2111_IMASK1 0x0DU /* Mask Bit for P2_PHYINT */ #define ADIN2111_IMASK1_P2_PHYINT_MASK BIT(19) /*!< Mask Bit for P2_RX_RDY. Generic SPI only.*/ #define ADIN2111_IMASK1_P2_RX_RDY_MASK BIT(17) /*!< Mask Bit for SPI_ERR. Generic SPI only. */ #define ADIN2111_IMASK1_SPI_ERR_MASK BIT(10) /*!< Mask Bit for P1_RX_RDY. Generic SPI only.*/ #define ADIN2111_IMASK1_P1_RX_RDY_MASK BIT(4) /*!< Mask Bit for TX_FRM_DONE. Generic SPI only.*/ #define ADIN2111_IMASK1_TX_RDY_MASK BIT(3) /* MAC Tx Frame Size Register */ #define ADIN2111_TX_FSIZE 0x30U /* Tx FIFO Space Register */ #define ADIN2111_TX_SPACE 0x32U /* MAC Address Rule and DA Filter Upper 16 Bits Registers */ #define ADIN2111_ADDR_FILT_UPR 0x50U #define ADIN2111_ADDR_APPLY2PORT2 BIT(31) #define ADIN2111_ADDR_APPLY2PORT1 BIT(30) #define ADIN2111_ADDR_TO_OTHER_PORT BIT(17) #define ADIN2111_ADDR_TO_HOST BIT(16) /* MAC Address DA Filter Lower 32 Bits Registers */ #define ADIN2111_ADDR_FILT_LWR 0x51U /* Upper 16 Bits of the MAC Address Mask */ #define ADIN2111_ADDR_MSK_UPR 0x70U /* Lower 32 Bits of the MAC Address Mask */ #define ADIN2111_ADDR_MSK_LWR 0x71U /* P1 MAC Rx Frame Size Register */ #define ADIN2111_P1_RX_FSIZE 0x90U /* P1 MAC Receive Register */ #define ADIN2111_P1_RX 0x91U /* P2 MAC Rx Frame Size Register */ #define ADIN2111_P2_RX_FSIZE 0xC0U /* P2 MAC Receive Register */ #define ADIN2111_P2_RX 0xC1U /* MAC reset status */ #define ADIN1110_MAC_RST_STATUS_REG 0x3BU /* MAC reset */ #define ADIN2111_SOFT_RST_REG 0x3CU #define ADIN2111_SWRESET_KEY1 0x4F1CU #define ADIN2111_SWRESET_KEY2 0xC1F4U #define ADIN2111_SWRELEASE_KEY1 0x6F1AU #define ADIN2111_SWRELEASE_KEY2 0xA1F6U /* SPI header size in bytes */ #define ADIN2111_SPI_HEADER_SIZE 2U /* SPI header size for write transaction */ #define ADIN2111_WRITE_HEADER_SIZE ADIN2111_SPI_HEADER_SIZE /* SPI header size for read transaction (1 for TA) */ #define ADIN2111_READ_HEADER_SIZE (ADIN2111_SPI_HEADER_SIZE + 1U) /* SPI register write buffer size without CRC */ #define ADIN2111_REG_WRITE_BUF_SIZE (ADIN2111_WRITE_HEADER_SIZE + sizeof(uint32_t)) /* SPI register write buffer with appended CRC size (1 for header, 1 for register) */ #define ADIN2111_REG_WRITE_BUF_SIZE_CRC (ADIN2111_REG_WRITE_BUF_SIZE + 2U) /* SPI register read buffer size with TA without CRC */ #define ADIN2111_REG_READ_BUF_SIZE (ADIN2111_READ_HEADER_SIZE + sizeof(uint32_t)) /* SPI register read buffer with TA and appended CRC size (1 header, 1 for register) */ #define ADIN2111_REG_READ_BUF_SIZE_CRC (ADIN2111_REG_READ_BUF_SIZE + 2U) /* SPI read fifo cmd buffer size with TA without CRC */ #define ADIN2111_FIFO_READ_CMD_BUF_SIZE (ADIN2111_READ_HEADER_SIZE) /* SPI read fifo cmd buffer with TA and appended CRC size */ #define ADIN2111_FIFO_READ_CMD_BUF_SIZE_CRC (ADIN2111_FIFO_READ_CMD_BUF_SIZE + 1U) /* SPI Header for writing control transaction in half duplex mode */ #define ADIN2111_WRITE_TXN_CTRL 0xA000U /* SPI Header for writing control transaction with MAC TX register (!) in half duplex mode */ #define ADIN2111_TXN_CTRL_TX_REG 0xA031U /* SPI Header for reading control transaction in half duplex mode */ #define ADIN2111_READ_TXN_CTRL 0x8000U /* Frame header size in bytes */ #define ADIN2111_FRAME_HEADER_SIZE 2U #define ADIN2111_INTERNAL_HEADER_SIZE 2U /* Number of buffer bytes in TxFIFO to provide frame margin upon writes */ #define ADIN2111_TX_FIFO_BUFFER_MARGIN 4U /* Manufacturer unique ID */ #define ADIN2111_PHYID_OUI 0xa0ef /* Open Alliance definitions */ #define ADIN2111_OA_ALLOC_TIMEOUT K_MSEC(10) /* Max setting to a max RCA of 255 68-bytes ckunks */ #define ADIN2111_OA_BUF_SZ (255U * 64U) #define ADIN2111_OA_CTL_LEN_PROT 16U #define ADIN2111_OA_CTL_LEN 12U #define ADIN2111_OA_CTL_MMS BIT(24) #define ADIN2111_OA_CTL_WNR BIT(29) #define ADIN2111_OA_DATA_HDR_DNC BIT(31) #define ADIN2111_OA_DATA_HDR_NORX BIT(29) #define ADIN2111_OA_DATA_HDR_VS 22U #define ADIN2111_OA_DATA_HDR_DV BIT(21) #define ADIN2111_OA_DATA_HDR_SV BIT(20) #define ADIN2111_OA_DATA_HDR_EV BIT(14) #define ADIN2111_OA_DATA_HDR_EBO 8U #define ADIN2111_OA_DATA_FTR_SYNC BIT(29) #define ADIN2111_OA_DATA_FTR_EBO 8U #define ADIN2111_OA_DATA_FTR_DV BIT(21) #define ADIN2111_OA_DATA_FTR_SV BIT(20) #define ADIN2111_OA_DATA_FTR_EV BIT(14) #define ADIN2111_OA_DATA_FTR_SWO 16U #define ADIN2111_OA_DATA_FTR_SWO_MSK GENMASK(19, 16) #define ADIN2111_OA_DATA_FTR_EBO 8U #define ADIN2111_OA_DATA_FTR_EBO_MSK GENMASK(13, 8) enum adin2111_chips_id { ADIN2111_MAC = 0, ADIN1110_MAC, }; struct adin2111_config { enum adin2111_chips_id id; struct spi_dt_spec spi; struct gpio_dt_spec interrupt; struct gpio_dt_spec reset; }; struct adin2111_data { struct k_mutex lock; struct k_sem offload_sem; uint32_t imask0; uint32_t imask1; uint8_t *buf; /* Port 0: PHY 1, Port 1: PHY 2 */ const struct device *port[2]; uint8_t *oa_tx_buf; uint8_t *oa_rx_buf; uint16_t ifaces_left_to_init; uint16_t scur; struct gpio_callback gpio_int_callback; bool oa; bool oa_prot; uint8_t oa_cps; K_KERNEL_STACK_MEMBER(rx_thread_stack, CONFIG_ETH_ADIN2111_IRQ_THREAD_STACK_SIZE); struct k_thread rx_thread; }; struct adin2111_port_data { struct net_if *iface; uint8_t mac_addr[6]; #if defined(CONFIG_NET_STATISTICS_ETHERNET) struct net_stats_eth stats; #endif /* CONFIG_NET_STATISTICS_ETHERNET */ }; struct adin2111_port_config { const struct device *adin; const struct device *phy; const uint16_t port_idx; const uint16_t phy_addr; }; #endif /* ETH_ADIN2111_PRIV_H__ */ ```
/content/code_sandbox/drivers/ethernet/eth_adin2111_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,744
```c /* * Xilinx Processor System Gigabit Ethernet controller (GEM) driver * * PHY management interface implementation * Models currently supported: * - Marvell Alaska 88E1111 (QEMU simulated PHY) * - Marvell Alaska 88E1510/88E1518/88E1512/88E1514 (Zedboard) * - Texas Instruments TLK105 * - Texas Instruments DP83822 * */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include "eth_xlnx_gem_priv.h" #define LOG_MODULE_NAME phy_xlnx_gem #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); /* Basic MDIO read / write functions for PHY access */ /** * @brief Read PHY data via the MDIO interface * Reads data from a PHY attached to the respective GEM's MDIO interface * * @param base_addr Base address of the GEM's register space * @param phy_addr MDIO address of the PHY to be accessed * @param reg_addr Index of the PHY register to be read * @return 16-bit data word received from the PHY */ static uint16_t phy_xlnx_gem_mdio_read( uint32_t base_addr, uint8_t phy_addr, uint8_t reg_addr) { uint32_t reg_val; uint32_t poll_cnt = 0; /* * MDIO read operation as described in Zynq-7000 TRM, * chapter 16.3.4, p. 517. */ /* * Wait until gem.net_status[phy_mgmt_idle] == 1 before issuing the * current command. */ do { if (poll_cnt++ > 0) { k_busy_wait(100); } reg_val = sys_read32(base_addr + ETH_XLNX_GEM_NWSR_OFFSET); } while ((reg_val & ETH_XLNX_GEM_MDIO_IDLE_BIT) == 0 && poll_cnt < 10); if (poll_cnt == 10) { LOG_ERR("GEM@0x%08X read from PHY address %hhu, " "register address %hhu timed out", base_addr, phy_addr, reg_addr); return 0; } /* Assemble & write the read command to the gem.phy_maint register */ /* Set the bits constant for any operation */ reg_val = ETH_XLNX_GEM_PHY_MAINT_CONST_BITS; /* Indicate a read operation */ reg_val |= ETH_XLNX_GEM_PHY_MAINT_READ_OP_BIT; /* PHY address */ reg_val |= (((uint32_t)phy_addr & ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_MASK) << ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_SHIFT); /* Register address */ reg_val |= (((uint32_t)reg_addr & ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_MASK) << ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_SHIFT); sys_write32(reg_val, base_addr + ETH_XLNX_GEM_PHY_MAINTENANCE_OFFSET); /* * Wait until gem.net_status[phy_mgmt_idle] == 1 -> current command * completed. */ poll_cnt = 0; do { if (poll_cnt++ > 0) { k_busy_wait(100); } reg_val = sys_read32(base_addr + ETH_XLNX_GEM_NWSR_OFFSET); } while ((reg_val & ETH_XLNX_GEM_MDIO_IDLE_BIT) == 0 && poll_cnt < 10); if (poll_cnt == 10) { LOG_ERR("GEM@0x%08X read from PHY address %hhu, " "register address %hhu timed out", base_addr, phy_addr, reg_addr); return 0; } /* * Read the data returned by the PHY -> lower 16 bits of the PHY main- * tenance register */ reg_val = sys_read32(base_addr + ETH_XLNX_GEM_PHY_MAINTENANCE_OFFSET); return (uint16_t)reg_val; } /** * @brief Writes PHY data via the MDIO interface * Writes data to a PHY attached to the respective GEM's MDIO interface * * @param base_addr Base address of the GEM's register space * @param phy_addr MDIO address of the PHY to be accessed * @param reg_addr Index of the PHY register to be written to * @param value 16-bit data word to be written to the target register */ static void phy_xlnx_gem_mdio_write( uint32_t base_addr, uint8_t phy_addr, uint8_t reg_addr, uint16_t value) { uint32_t reg_val; uint32_t poll_cnt = 0; /* * MDIO write operation as described in Zynq-7000 TRM, * chapter 16.3.4, p. 517. */ /* * Wait until gem.net_status[phy_mgmt_idle] == 1 before issuing the * current command. */ do { if (poll_cnt++ > 0) { k_busy_wait(100); } reg_val = sys_read32(base_addr + ETH_XLNX_GEM_NWSR_OFFSET); } while ((reg_val & ETH_XLNX_GEM_MDIO_IDLE_BIT) == 0 && poll_cnt < 10); if (poll_cnt == 10) { LOG_ERR("GEM@0x%08X write to PHY address %hhu, " "register address %hhu timed out", base_addr, phy_addr, reg_addr); return; } /* Assemble & write the read command to the gem.phy_maint register */ /* Set the bits constant for any operation */ reg_val = ETH_XLNX_GEM_PHY_MAINT_CONST_BITS; /* Indicate a read operation */ reg_val |= ETH_XLNX_GEM_PHY_MAINT_WRITE_OP_BIT; /* PHY address */ reg_val |= (((uint32_t)phy_addr & ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_MASK) << ETH_XLNX_GEM_PHY_MAINT_PHY_ADDRESS_SHIFT); /* Register address */ reg_val |= (((uint32_t)reg_addr & ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_MASK) << ETH_XLNX_GEM_PHY_MAINT_REGISTER_ID_SHIFT); /* 16 bits of data for the destination register */ reg_val |= ((uint32_t)value & ETH_XLNX_GEM_PHY_MAINT_DATA_MASK); sys_write32(reg_val, base_addr + ETH_XLNX_GEM_PHY_MAINTENANCE_OFFSET); /* * Wait until gem.net_status[phy_mgmt_idle] == 1 -> current command * completed. */ poll_cnt = 0; do { if (poll_cnt++ > 0) { k_busy_wait(100); } reg_val = sys_read32(base_addr + ETH_XLNX_GEM_NWSR_OFFSET); } while ((reg_val & ETH_XLNX_GEM_MDIO_IDLE_BIT) == 0 && poll_cnt < 10); if (poll_cnt == 10) { LOG_ERR("GEM@0x%08X write to PHY address %hhu, " "register address %hhu timed out", base_addr, phy_addr, reg_addr); } } /* * Vendor-specific PHY management functions for: * Marvell Alaska 88E1111 (QEMU simulated PHY) * Marvell Alaska 88E1510/88E1518/88E1512/88E1514 (Zedboard) * Register IDs & procedures are based on the corresponding datasheets: * path_to_url * path_to_url * * NOTICE: Unless indicated otherwise, page/table source references refer to * the 88E151x datasheet. */ /** * @brief Marvell Alaska PHY reset function * Reset function for the Marvell Alaska PHY series * * @param dev Pointer to the device data */ static void phy_xlnx_gem_marvell_alaska_reset(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t phy_data; uint32_t retries = 0; /* * Page 0, register address 0 = Copper control register, * bit [15] = PHY reset. Register 0/0 access is R/M/W. Comp. * datasheet chapter 2.6 and table 64 "Copper Control Register". */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_REGISTER); phy_data |= PHY_MRVL_COPPER_CONTROL_RESET_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_REGISTER, phy_data); /* Bit [15] reverts to 0 once the reset is complete. */ while (((phy_data & PHY_MRVL_COPPER_CONTROL_RESET_BIT) != 0) && (retries++ < 10)) { phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_REGISTER); } if (retries == 10) { LOG_ERR("%s reset PHY address %hhu (Marvell Alaska) timed out", dev->name, dev_data->phy_addr); } } /** * @brief Marvell Alaska PHY configuration function * Configuration function for the Marvell Alaska PHY series * * @param dev Pointer to the device data */ static void phy_xlnx_gem_marvell_alaska_cfg(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t phy_data; uint16_t phy_data_gbit; uint32_t retries = 0; /* * Page 0, register address 0 = Copper control register, * bit [12] = auto-negotiation enable bit is to be cleared * for now, afterwards, trigger a PHY reset. * Register 0/0 access is R/M/W. Comp. datasheet chapter 2.6 * and table 64 "Copper Control Register". */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_REGISTER); phy_data &= ~PHY_MRVL_COPPER_CONTROL_AUTONEG_ENABLE_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_REGISTER, phy_data); phy_xlnx_gem_marvell_alaska_reset(dev); if ((dev_data->phy_id & PHY_MRVL_PHY_ID_MODEL_MASK) == PHY_MRVL_PHY_ID_MODEL_88E151X) { /* * 88E151x only: configure the system interface and media type * (i.e. "RGMII to Copper", 0x0). On the 88E1111, this setting * is configured using I/O pins on the device. * TODO: Make this value configurable via KConfig or DT? * Page 18, register address 20 = General Control Register 1, * bits [2..0] = mode configuration * Comp. datasheet table 129 "General Control Register 1" * NOTICE: a change of this value requires a subsequent software * reset command via the same register's bit [15]. */ phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_PAGE_SWITCH_REGISTER, PHY_MRVL_GENERAL_CONTROL_1_PAGE); phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_GENERAL_CONTROL_1_REGISTER); phy_data &= ~(PHY_MRVL_MODE_CONFIG_MASK << PHY_MRVL_MODE_CONFIG_SHIFT); phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_GENERAL_CONTROL_1_REGISTER, phy_data); /* * [15] Mode Software Reset bit, affecting pages 6 and 18 * Reset is performed immediately, bit [15] is self-clearing. * This reset bit is not to be confused with the actual PHY * reset in register 0/0! */ phy_data |= PHY_MRVL_GENERAL_CONTROL_1_RESET_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_GENERAL_CONTROL_1_REGISTER, phy_data); /* Bit [15] reverts to 0 once the reset is complete. */ while (((phy_data & PHY_MRVL_GENERAL_CONTROL_1_RESET_BIT) != 0) && (retries++ < 10)) { phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_GENERAL_CONTROL_1_REGISTER); } if (retries == 10) { LOG_ERR("%s configure PHY address %hhu (Marvell Alaska) timed out", dev->name, dev_data->phy_addr); return; } /* Revert to register page 0 */ phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_PAGE_SWITCH_REGISTER, PHY_MRVL_BASE_REGISTERS_PAGE); } /* * Configure MDIX * TODO: Make this value configurable via KConfig or DT? * 88E151x: Page 0, register address 16 = Copper specific control register 1, * 88E1111: Page any, register address 16 = PHY specific control register, * bits [6..5] = MDIO crossover mode. Comp. datasheet table 76. * NOTICE: a change of this value requires a subsequent software * reset command via Copper Control Register's bit [15]. */ /* [6..5] 11 = Enable auto cross over detection */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_1_REGISTER); phy_data &= ~(PHY_MRVL_MDIX_CONFIG_MASK << PHY_MRVL_MDIX_CONFIG_SHIFT); phy_data |= (PHY_MRVL_MDIX_AUTO_CROSSOVER_ENABLE << PHY_MRVL_MDIX_CONFIG_SHIFT); phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_1_REGISTER, phy_data); /* * Configure the Copper Specific Interrupt Enable Register * (88E151x) / Interrupt Enable Register (88E1111). * The interrupt status register provides a convenient way to * detect relevant state changes, also, PHY management could * eventually be changed from polling to interrupt-driven. * There's just one big catch: at least on the Zedboard, the * PHY interrupt line isn't wired up, therefore, the GEM can * never trigger a PHY interrupt. Still, the PHY interrupts * are configured & enabled in order to obtain all relevant * status data from a single source. * * -> all bits contained herein will be retained during the * upcoming software reset operation. * Page 0, register address 18 = (Copper Specific) Interrupt * Enable Register, * bit [14] = Speed changed interrupt enable, * bit [13] = Duplex changed interrupt enable, * bit [11] = Auto-negotiation completed interrupt enable, * bit [10] = Link status changed interrupt enable. * Comp. datasheet table 78 */ phy_data = PHY_MRVL_COPPER_SPEED_CHANGED_INT_BIT | PHY_MRVL_COPPER_DUPLEX_CHANGED_INT_BIT | PHY_MRVL_COPPER_AUTONEG_COMPLETED_INT_BIT | PHY_MRVL_COPPER_LINK_STATUS_CHANGED_INT_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_INT_ENABLE_REGISTER, phy_data); /* Trigger a PHY Reset, affecting pages 0, 2, 3, 5, 7. */ phy_xlnx_gem_marvell_alaska_reset(dev); /* * Clear the interrupt status register before advertising the * supported link speed(s). */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_INT_STATUS_REGISTER); /* * Set which link speeds and duplex modes shall be advertised during * auto-negotiation, then re-enable auto-negotiation. PHY link speed * advertisement configuration as described in Zynq-7000 TRM, chapter * 16.3.4, p. 517. */ /* * Advertise the link speed from the device configuration & perform * auto-negotiation. This process involves: * * Page 0, register address 4 = * Copper Auto-Negotiation Advertisement Register, * Page 0, register address 0 = * Copper Control Register, bit [15] = Reset -> apply all changes * made regarding advertisement, * Page 0, register address 9 = * 1000BASE-T Control Register (if link speed = 1GBit/s), * Page 0, register address 1 = * Copper Status Register, bit [5] = Copper Auto-Negotiation * Complete. * * Comp. datasheet tables 68 & 73. */ /* * 88E151x only: * Register 4, bits [4..0] = Selector field, 00001 = 802.3. Those bits * are reserved in other Marvell PHYs. */ if ((dev_data->phy_id & PHY_MRVL_PHY_ID_MODEL_MASK) == PHY_MRVL_PHY_ID_MODEL_88E151X) { phy_data = PHY_MRVL_ADV_SELECTOR_802_3; } else { phy_data = 0x0000; } /* * Clear the 1 GBit/s FDX/HDX advertisement bits from reg. 9's current * contents in case we're going to advertise anything below 1 GBit/s * as maximum / nominal link speed. */ phy_data_gbit = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_1000BASET_CONTROL_REGISTER); phy_data_gbit &= ~PHY_MRVL_ADV_1000BASET_FDX_BIT; phy_data_gbit &= ~PHY_MRVL_ADV_1000BASET_HDX_BIT; if (dev_conf->enable_fdx) { if (dev_conf->max_link_speed == LINK_1GBIT) { /* Advertise 1 GBit/s, full duplex */ phy_data_gbit |= PHY_MRVL_ADV_1000BASET_FDX_BIT; if (dev_conf->phy_advertise_lower) { /* + 100 MBit/s, full duplex */ phy_data |= PHY_MRVL_ADV_100BASET_FDX_BIT; /* + 10 MBit/s, full duplex */ phy_data |= PHY_MRVL_ADV_10BASET_FDX_BIT; } } else if (dev_conf->max_link_speed == LINK_100MBIT) { /* Advertise 100 MBit/s, full duplex */ phy_data |= PHY_MRVL_ADV_100BASET_FDX_BIT; if (dev_conf->phy_advertise_lower) { /* + 10 MBit/s, full duplex */ phy_data |= PHY_MRVL_ADV_10BASET_FDX_BIT; } } else if (dev_conf->max_link_speed == LINK_10MBIT) { /* Advertise 10 MBit/s, full duplex */ phy_data |= PHY_MRVL_ADV_10BASET_FDX_BIT; } } else { if (dev_conf->max_link_speed == LINK_1GBIT) { /* Advertise 1 GBit/s, half duplex */ phy_data_gbit = PHY_MRVL_ADV_1000BASET_HDX_BIT; if (dev_conf->phy_advertise_lower) { /* + 100 MBit/s, half duplex */ phy_data |= PHY_MRVL_ADV_100BASET_HDX_BIT; /* + 10 MBit/s, half duplex */ phy_data |= PHY_MRVL_ADV_10BASET_HDX_BIT; } } else if (dev_conf->max_link_speed == LINK_100MBIT) { /* Advertise 100 MBit/s, half duplex */ phy_data |= PHY_MRVL_ADV_100BASET_HDX_BIT; if (dev_conf->phy_advertise_lower) { /* + 10 MBit/s, half duplex */ phy_data |= PHY_MRVL_ADV_10BASET_HDX_BIT; } } else if (dev_conf->max_link_speed == LINK_10MBIT) { /* Advertise 10 MBit/s, half duplex */ phy_data |= PHY_MRVL_ADV_10BASET_HDX_BIT; } } phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_1000BASET_CONTROL_REGISTER, phy_data_gbit); phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_AUTONEG_ADV_REGISTER, phy_data); /* * Trigger a PHY reset, affecting pages 0, 2, 3, 5, 7. * Afterwards, set the auto-negotiation enable bit [12] in the * Copper Control Register. */ phy_xlnx_gem_marvell_alaska_reset(dev); phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_REGISTER); phy_data |= PHY_MRVL_COPPER_CONTROL_AUTONEG_ENABLE_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_CONTROL_REGISTER, phy_data); /* * Set the link speed to 'link down' for now, once auto-negotiation * is complete, the result will be handled by the system work queue. */ dev_data->eff_link_speed = LINK_DOWN; } /** * @brief Marvell Alaska PHY status change polling function * Status change polling function for the Marvell Alaska PHY series * * @param dev Pointer to the device data * @return A set of bits indicating whether one or more of the following * events has occurred: auto-negotiation completed, link state * changed, link speed changed. */ static uint16_t phy_xlnx_gem_marvell_alaska_poll_sc(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t phy_data; uint16_t phy_status = 0; /* * PHY status change detection is implemented by reading the * interrupt status register. * Page 0, register address 19 = Copper Interrupt Status Register * bit [14] = Speed changed interrupt, * bit [13] = Duplex changed interrupt, * bit [11] = Auto-negotiation completed interrupt, * bit [10] = Link status changed interrupt. * Comp. datasheet table 79 */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_INT_STATUS_REGISTER); if ((phy_data & PHY_MRVL_COPPER_AUTONEG_COMPLETED_INT_BIT) != 0) { phy_status |= PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE; } if (((phy_data & PHY_MRVL_COPPER_DUPLEX_CHANGED_INT_BIT) != 0) || ((phy_data & PHY_MRVL_COPPER_LINK_STATUS_CHANGED_INT_BIT) != 0)) { phy_status |= PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED; } if ((phy_data & PHY_MRVL_COPPER_SPEED_CHANGED_INT_BIT) != 0) { phy_status |= PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED; } /* * Clear the status register, preserve reserved bit [3] as indicated * by the datasheet */ phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_INT_STATUS_REGISTER, (phy_data & 0x8)); return phy_status; } /** * @brief Marvell Alaska PHY link status polling function * Link status polling function for the Marvell Alaska PHY series * * @param dev Pointer to the device data * @return 1 if the PHY indicates link up, 0 if the link is down */ static uint8_t phy_xlnx_gem_marvell_alaska_poll_lsts(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t phy_data; /* * Current link status is obtained from: * Page 0, register address 1 = Copper Status Register * bit [2] = Copper Link Status */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_STATUS_REGISTER); return ((phy_data >> PHY_MRVL_COPPER_LINK_STATUS_BIT_SHIFT) & 0x0001); } /** * @brief Marvell Alaska PHY link speed polling function * Link speed polling function for the Marvell Alaska PHY series * * @param dev Pointer to the device data * @return Enum containing the current link speed reported by the PHY */ static enum eth_xlnx_link_speed phy_xlnx_gem_marvell_alaska_poll_lspd( const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; enum eth_xlnx_link_speed link_speed; uint16_t phy_data; /* * Current link speed is obtained from: * Page 0, register address 17 = Copper Specific Status Register 1 * bits [15 .. 14] = Speed. */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_MRVL_COPPER_STATUS_1_REGISTER); phy_data >>= PHY_MRVL_LINK_SPEED_SHIFT; phy_data &= PHY_MRVL_LINK_SPEED_MASK; /* * Link speed bit masks: comp. datasheet, table 77 @ description * of the 'Speed' bits. */ switch (phy_data) { case PHY_MRVL_LINK_SPEED_10MBIT: link_speed = LINK_10MBIT; break; case PHY_MRVL_LINK_SPEED_100MBIT: link_speed = LINK_100MBIT; break; case PHY_MRVL_LINK_SPEED_1GBIT: link_speed = LINK_1GBIT; break; default: link_speed = LINK_DOWN; break; }; return link_speed; } /* * Vendor-specific PHY management functions for: * Texas Instruments TLK105 * Texas Instruments DP83822 * with the DP83822 being the successor to the deprecated TLK105. * Register IDs & procedures are based on the corresponding datasheets: * path_to_url * path_to_url */ /** * @brief TI TLK105 & DP83822 PHY reset function * Reset function for the TI TLK105 & DP83822 PHYs * * @param dev Pointer to the device data */ static void phy_xlnx_gem_ti_dp83822_reset(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t phy_data; uint32_t retries = 0; phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_BASIC_MODE_CONTROL_REGISTER); phy_data |= PHY_TI_BASIC_MODE_CONTROL_RESET_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_BASIC_MODE_CONTROL_REGISTER, phy_data); while (((phy_data & PHY_TI_BASIC_MODE_CONTROL_RESET_BIT) != 0) && (retries++ < 10)) { phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_BASIC_MODE_CONTROL_REGISTER); } if (retries == 10) { LOG_ERR("%s reset PHY address %hhu (TI TLK105/DP83822) timed out", dev->name, dev_data->phy_addr); } } /** * @brief TI TLK105 & DP83822 PHY configuration function * Configuration function for the TI TLK105 & DP83822 PHYs * * @param dev Pointer to the device data */ static void phy_xlnx_gem_ti_dp83822_cfg(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t phy_data = PHY_TI_ADV_SELECTOR_802_3; /* Configure link advertisement */ if (dev_conf->enable_fdx) { if (dev_conf->max_link_speed == LINK_100MBIT) { /* Advertise 100BASE-TX, full duplex */ phy_data |= PHY_TI_ADV_100BASET_FDX_BIT; if (dev_conf->phy_advertise_lower) { /* + 10BASE-TX, full duplex */ phy_data |= PHY_TI_ADV_10BASET_FDX_BIT; } } else if (dev_conf->max_link_speed == LINK_10MBIT) { /* Advertise 10BASE-TX, full duplex */ phy_data |= PHY_TI_ADV_10BASET_FDX_BIT; } } else { if (dev_conf->max_link_speed == LINK_100MBIT) { /* Advertise 100BASE-TX, half duplex */ phy_data |= PHY_TI_ADV_100BASET_HDX_BIT; if (dev_conf->phy_advertise_lower) { /* + 10BASE-TX, half duplex */ phy_data |= PHY_TI_ADV_10BASET_HDX_BIT; } } else if (dev_conf->max_link_speed == LINK_10MBIT) { /* Advertise 10BASE-TX, half duplex */ phy_data |= PHY_TI_ADV_10BASET_HDX_BIT; } } phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_AUTONEG_ADV_REGISTER, phy_data); /* Enable auto-negotiation */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_BASIC_MODE_CONTROL_REGISTER); phy_data |= PHY_TI_BASIC_MODE_CONTROL_AUTONEG_ENABLE_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_BASIC_MODE_CONTROL_REGISTER, phy_data); /* Robust Auto MDIX */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_CONTROL_REGISTER_1); phy_data |= PHY_TI_CR1_ROBUST_AUTO_MDIX_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_CONTROL_REGISTER_1, phy_data); phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_PHY_CONTROL_REGISTER); /* Auto MDIX enable */ phy_data |= PHY_TI_PHY_CONTROL_AUTO_MDIX_ENABLE_BIT; /* Link LED shall only indicate link up or down, no RX/TX activity */ phy_data |= PHY_TI_PHY_CONTROL_LED_CONFIG_LINK_ONLY_BIT; /* Force MDIX disable */ phy_data &= ~PHY_TI_PHY_CONTROL_FORCE_MDIX_BIT; phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_PHY_CONTROL_REGISTER, phy_data); /* Set blink rate to 5 Hz */ phy_data = (PHY_TI_LED_CONTROL_BLINK_RATE_5HZ << PHY_TI_LED_CONTROL_BLINK_RATE_SHIFT); phy_xlnx_gem_mdio_write(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_LED_CONTROL_REGISTER, phy_data); /* * Set the link speed to 'link down' for now, once auto-negotiation * is complete, the result will be handled by the system work queue. */ dev_data->eff_link_speed = LINK_DOWN; } /** * @brief TI TLK105 & DP83822 PHY status change polling function * Status change polling function for the TI TLK105 & DP83822 PHYs * * @param dev Pointer to the device data * @return A set of bits indicating whether one or more of the following * events has occurred: auto-negotiation completed, link state * changed, link speed changed. */ static uint16_t phy_xlnx_gem_ti_dp83822_poll_sc(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t phy_data; uint16_t phy_status = 0; /* * The relevant status bits are obtained from the MII Interrupt * Status Register 1. The upper byte of the register's data word * contains the status bits which are set regardless of whether * the corresponding interrupt enable bits are set in the lower * byte or not (comp. TLK105 documentation, chapter 8.1.16). */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_MII_INTERRUPT_STATUS_REGISTER_1); if ((phy_data & PHY_TI_AUTONEG_COMPLETED_INT_BIT) != 0) { phy_status |= PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE; } if ((phy_data & PHY_TI_DUPLEX_CHANGED_INT_BIT) != 0) { phy_status |= PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED; } if ((phy_data & PHY_TI_LINK_STATUS_CHANGED_INT_BIT) != 0) { phy_status |= PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED; } if ((phy_data & PHY_TI_SPEED_CHANGED_INT_BIT) != 0) { phy_status |= PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED; } return phy_status; } /** * @brief TI TLK105 & DP83822 PHY link status polling function * Link status polling function for the TI TLK105 & DP83822 PHYs * * @param dev Pointer to the device data * @return 1 if the PHY indicates link up, 0 if the link is down */ static uint8_t phy_xlnx_gem_ti_dp83822_poll_lsts(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t phy_data; /* * Double read of the BMSR is intentional - the relevant bit is latched * low so that after a link down -> link up transition, the first read * of the BMSR will still return the latched link down status rather * than the current status. */ phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_BASIC_MODE_STATUS_REGISTER); phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_BASIC_MODE_STATUS_REGISTER); return ((phy_data & PHY_TI_BASIC_MODE_STATUS_LINK_STATUS_BIT) != 0); } /** * @brief TI TLK105 & DP83822 PHY link speed polling function * Link speed polling function for the TI TLK105 & DP83822 PHYs * * @param dev Pointer to the device data * @return Enum containing the current link speed reported by the PHY */ static enum eth_xlnx_link_speed phy_xlnx_gem_ti_dp83822_poll_lspd( const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; enum eth_xlnx_link_speed link_speed; uint16_t phy_data; phy_data = phy_xlnx_gem_mdio_read(dev_conf->base_addr, dev_data->phy_addr, PHY_TI_PHY_STATUS_REGISTER); /* PHYSCR[0] is the link established indication bit */ if ((phy_data & PHY_TI_PHY_STATUS_LINK_BIT) != 0) { /* PHYSCR[1] is the speed status bit: 0 = 100 Mbps, 1 = 10 Mbps. */ if ((phy_data & PHY_TI_PHY_STATUS_SPEED_BIT) != 0) { link_speed = LINK_10MBIT; } else { link_speed = LINK_100MBIT; } } else { link_speed = LINK_DOWN; } return link_speed; } /** * @brief Marvell Alaska PHY function pointer table * Function pointer table for the Marvell Alaska PHY series * specific management functions */ static struct phy_xlnx_gem_api phy_xlnx_gem_marvell_alaska_api = { .phy_reset_func = phy_xlnx_gem_marvell_alaska_reset, .phy_configure_func = phy_xlnx_gem_marvell_alaska_cfg, .phy_poll_status_change_func = phy_xlnx_gem_marvell_alaska_poll_sc, .phy_poll_link_status_func = phy_xlnx_gem_marvell_alaska_poll_lsts, .phy_poll_link_speed_func = phy_xlnx_gem_marvell_alaska_poll_lspd }; /** * @brief Texas Instruments TLK105 & DP83822 PHY function pointer table * Function pointer table for the Texas Instruments TLK105 / DP83822 PHY * series specific management functions */ static struct phy_xlnx_gem_api phy_xlnx_gem_ti_dp83822_api = { .phy_reset_func = phy_xlnx_gem_ti_dp83822_reset, .phy_configure_func = phy_xlnx_gem_ti_dp83822_cfg, .phy_poll_status_change_func = phy_xlnx_gem_ti_dp83822_poll_sc, .phy_poll_link_status_func = phy_xlnx_gem_ti_dp83822_poll_lsts, .phy_poll_link_speed_func = phy_xlnx_gem_ti_dp83822_poll_lspd }; /* * All vendor-specific API structs & code are located above * -> assemble the top-level list of supported devices the * upcoming function phy_xlnx_gem_detect will work with. */ /** * @brief Top-level table of supported PHYs * Top-level table of PHYs supported by the GEM driver. Contains 1..n * supported PHY specifications, consisting of the PHY ID plus a mask * for masking out variable parts of the PHY ID such as hardware revisions, * as well as a textual description of the PHY model and a pointer to * the corresponding PHY management function pointer table. */ static struct phy_xlnx_gem_supported_dev phy_xlnx_gem_supported_devs[] = { { .phy_id = PHY_MRVL_PHY_ID_MODEL_88E1111, .phy_id_mask = PHY_MRVL_PHY_ID_MODEL_MASK, .api = &phy_xlnx_gem_marvell_alaska_api, .identifier = "Marvell Alaska 88E1111" }, { .phy_id = PHY_MRVL_PHY_ID_MODEL_88E151X, .phy_id_mask = PHY_MRVL_PHY_ID_MODEL_MASK, .api = &phy_xlnx_gem_marvell_alaska_api, .identifier = "Marvell Alaska 88E151x" }, { .phy_id = PHY_TI_PHY_ID_MODEL_DP83822, .phy_id_mask = PHY_TI_PHY_ID_MODEL_MASK, .api = &phy_xlnx_gem_ti_dp83822_api, .identifier = "Texas Instruments DP83822" }, { .phy_id = PHY_TI_PHY_ID_MODEL_TLK105, .phy_id_mask = PHY_TI_PHY_ID_MODEL_MASK, .api = &phy_xlnx_gem_ti_dp83822_api, .identifier = "Texas Instruments TLK105" } }; /** * @brief Top-level PHY detection function * Top-level PHY detection function called by the GEM driver if PHY management * is enabled for the current GEM device instance. This function is generic * and does not require any knowledge regarding PHY vendors, models etc. * * @param dev Pointer to the device data * @retval -ENOTSUP if PHY management is disabled for the current GEM * device instance * @retval -EIO if no (supported) PHY was detected * @retval 0 if a supported PHY has been detected */ int phy_xlnx_gem_detect(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint8_t phy_curr_addr; uint8_t phy_first_addr = dev_conf->phy_mdio_addr_fix; uint8_t phy_last_addr = (dev_conf->phy_mdio_addr_fix != 0) ? dev_conf->phy_mdio_addr_fix : 31; uint32_t phy_id; uint16_t phy_data; uint32_t list_iter; /* * Clear the PHY address & ID in the device data struct -> may be * pre-initialized with a non-zero address meaning auto detection * is disabled. If eventually a supported PHY is found, a non- * zero address will be written back to the data struct. */ dev_data->phy_addr = 0; dev_data->phy_id = 0; dev_data->phy_access_api = NULL; if (!dev_conf->init_phy) { return -ENOTSUP; } /* * PHY detection as described in Zynq-7000 TRM, chapter 16.3.4, * p. 517 */ for (phy_curr_addr = phy_first_addr; phy_curr_addr <= phy_last_addr; phy_curr_addr++) { /* Read the upper & lower PHY ID 16-bit words */ phy_data = phy_xlnx_gem_mdio_read( dev_conf->base_addr, phy_curr_addr, PHY_IDENTIFIER_1_REGISTER); phy_id = (((uint32_t)phy_data << 16) & 0xFFFF0000); phy_data = phy_xlnx_gem_mdio_read( dev_conf->base_addr, phy_curr_addr, PHY_IDENTIFIER_2_REGISTER); phy_id |= ((uint32_t)phy_data & 0x0000FFFF); if (phy_id != 0x00000000 && phy_id != 0xFFFFFFFF) { LOG_DBG("%s detected PHY at address %hhu: " "ID 0x%08X", dev->name, phy_curr_addr, phy_id); /* * Iterate the list of all supported PHYs -> if the * current PHY is supported, store all related data * in the device's run-time data struct. */ for (list_iter = 0; list_iter < ARRAY_SIZE(phy_xlnx_gem_supported_devs); list_iter++) { if (phy_xlnx_gem_supported_devs[list_iter].phy_id == (phy_xlnx_gem_supported_devs[list_iter].phy_id_mask & phy_id)) { LOG_DBG("%s identified supported PHY: %s", dev->name, phy_xlnx_gem_supported_devs[list_iter].identifier); /* * Store the numeric values of the PHY ID and address * as well as the corresponding set of function pointers * in the device's run-time data struct. */ dev_data->phy_addr = phy_curr_addr; dev_data->phy_id = phy_id; dev_data->phy_access_api = phy_xlnx_gem_supported_devs[list_iter].api; return 0; } } } } LOG_ERR("%s PHY detection failed", dev->name); return -EIO; } ```
/content/code_sandbox/drivers/ethernet/phy_xlnx_gem.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
9,940
```c /* * Driver for Synopsys DesignWare MAC * * */ #define LOG_MODULE_NAME dwmac_core #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <sys/types.h> #include <zephyr/kernel.h> #include <zephyr/cache.h> #include <zephyr/net/ethernet.h> #include <zephyr/sys/barrier.h> #include <ethernet/eth_stats.h> #include "eth_dwmac_priv.h" #include "eth.h" /* * This driver references network data fragments with a zero-copy approach. * Even though the hardware can store received packets with an arbitrary * offset in memory, the gap bytes in the first word will be overwritten, * and subsequent fragments have to be buswidth-aligned anyway. * This means CONFIG_NET_BUF_VARIABLE_DATA_SIZE requires special care due * to its refcount byte placement, so we take the easy way out for now. */ #ifdef CONFIG_NET_BUF_VARIABLE_DATA_SIZE #error "CONFIG_NET_BUF_VARIABLE_DATA_SIZE=y is not supported" #endif /* size of pre-allocated packet fragments */ #define RX_FRAG_SIZE CONFIG_NET_BUF_DATA_SIZE /* * Grace period to wait for TX descriptor/fragment availability. * Worst case estimate is 1514*8 bits at 10 mbps for an existing packet * to be sent and freed, therefore 1ms is far more than enough. * Beyond that we'll drop the packet. */ #define TX_AVAIL_WAIT K_MSEC(1) /* descriptor index iterators */ #define INC_WRAP(idx, size) ({ idx = (idx + 1) % size; }) #define DEC_WRAP(idx, size) ({ idx = (idx + size - 1) % size; }) /* * Descriptor physical location . * MMU is special here as we have a separate uncached mapping that is * different from the normal RAM virt_to_phys mapping. */ #ifdef CONFIG_MMU #define TXDESC_PHYS_H(idx) hi32(p->tx_descs_phys + (idx) * sizeof(struct dwmac_dma_desc)) #define TXDESC_PHYS_L(idx) lo32(p->tx_descs_phys + (idx) * sizeof(struct dwmac_dma_desc)) #define RXDESC_PHYS_H(idx) hi32(p->rx_descs_phys + (idx) * sizeof(struct dwmac_dma_desc)) #define RXDESC_PHYS_L(idx) lo32(p->rx_descs_phys + (idx) * sizeof(struct dwmac_dma_desc)) #else #define TXDESC_PHYS_H(idx) phys_hi32(&p->tx_descs[idx]) #define TXDESC_PHYS_L(idx) phys_lo32(&p->tx_descs[idx]) #define RXDESC_PHYS_H(idx) phys_hi32(&p->rx_descs[idx]) #define RXDESC_PHYS_L(idx) phys_lo32(&p->rx_descs[idx]) #endif static inline uint32_t hi32(uintptr_t val) { /* trickery to avoid compiler warnings on 32-bit build targets */ if (sizeof(uintptr_t) > 4) { uint64_t hi = val; return hi >> 32; } return 0; } static inline uint32_t lo32(uintptr_t val) { /* just a typecast return to be symmetric with hi32() */ return val; } static inline uint32_t phys_hi32(void *addr) { /* the default 1:1 mapping is assumed */ return hi32((uintptr_t)addr); } static inline uint32_t phys_lo32(void *addr) { /* the default 1:1 mapping is assumed */ return lo32((uintptr_t)addr); } static enum ethernet_hw_caps dwmac_caps(const struct device *dev) { struct dwmac_priv *p = dev->data; enum ethernet_hw_caps caps = 0; if (p->feature0 & MAC_HW_FEATURE0_GMIISEL) { caps |= ETHERNET_LINK_1000BASE_T; } if (p->feature0 & MAC_HW_FEATURE0_MIISEL) { caps |= ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T; } caps |= ETHERNET_PROMISC_MODE; return caps; } /* for debug logs */ static inline int net_pkt_get_nbfrags(struct net_pkt *pkt) { struct net_buf *frag; int nbfrags = 0; for (frag = pkt->buffer; frag; frag = frag->frags) { nbfrags++; } return nbfrags; } static int dwmac_send(const struct device *dev, struct net_pkt *pkt) { struct dwmac_priv *p = dev->data; struct net_buf *frag, *pinned; unsigned int pkt_len = net_pkt_get_len(pkt); unsigned int d_idx; struct dwmac_dma_desc *d; uint32_t des2_flags, des3_flags; LOG_DBG("pkt len/frags=%d/%d", pkt_len, net_pkt_get_nbfrags(pkt)); /* initial flag values */ des2_flags = 0; des3_flags = TDES3_FD | TDES3_OWN; /* map packet fragments */ d_idx = p->tx_desc_head; frag = pkt->buffer; do { LOG_DBG("desc sem/head/tail=%d/%d/%d", k_sem_count_get(&p->free_tx_descs), p->tx_desc_head, p->tx_desc_tail); /* reserve a free descriptor for this fragment */ if (k_sem_take(&p->free_tx_descs, TX_AVAIL_WAIT) != 0) { LOG_DBG("no more free tx descriptors"); goto abort; } /* pin this fragment */ pinned = net_buf_clone(frag, TX_AVAIL_WAIT); if (!pinned) { LOG_DBG("net_buf_clone() returned NULL"); k_sem_give(&p->free_tx_descs); goto abort; } sys_cache_data_flush_range(pinned->data, pinned->len); p->tx_frags[d_idx] = pinned; LOG_DBG("d[%d]: frag %p pinned %p len %d", d_idx, frag->data, pinned->data, pinned->len); /* if no more fragments after this one: */ if (!frag->frags) { /* set those flags on the last descriptor */ des2_flags |= TDES2_IOC; des3_flags |= TDES3_LD; } /* fill the descriptor */ d = &p->tx_descs[d_idx]; d->des0 = phys_lo32(pinned->data); d->des1 = phys_hi32(pinned->data); d->des2 = pinned->len | des2_flags; d->des3 = pkt_len | des3_flags; /* clear the FD flag on subsequent descriptors */ des3_flags &= ~TDES3_FD; INC_WRAP(d_idx, NB_TX_DESCS); frag = frag->frags; } while (frag); /* make sure all the above made it to memory */ barrier_dmem_fence_full(); /* update the descriptor index head */ p->tx_desc_head = d_idx; /* lastly notify the hardware */ REG_WRITE(DMA_CHn_TXDESC_TAIL_PTR(0), TXDESC_PHYS_L(d_idx)); return 0; abort: while (d_idx != p->tx_desc_head) { /* release already pinned fragments */ DEC_WRAP(d_idx, NB_TX_DESCS); frag = p->tx_frags[d_idx]; net_pkt_frag_unref(frag); k_sem_give(&p->free_tx_descs); } return -ENOMEM; } static void dwmac_tx_release(struct dwmac_priv *p) { unsigned int d_idx; struct dwmac_dma_desc *d; struct net_buf *frag; uint32_t des3_val; for (d_idx = p->tx_desc_tail; d_idx != p->tx_desc_head; INC_WRAP(d_idx, NB_TX_DESCS), k_sem_give(&p->free_tx_descs)) { LOG_DBG("desc sem/tail/head=%d/%d/%d", k_sem_count_get(&p->free_tx_descs), p->tx_desc_tail, p->tx_desc_head); d = &p->tx_descs[d_idx]; des3_val = d->des3; LOG_DBG("TDES3[%d] = 0x%08x", d_idx, des3_val); /* stop here if hardware still owns it */ if (des3_val & TDES3_OWN) { break; } /* release corresponding fragments */ frag = p->tx_frags[d_idx]; LOG_DBG("unref frag %p", frag->data); net_pkt_frag_unref(frag); /* last packet descriptor: */ if (des3_val & TDES3_LD) { /* log any errors */ if (des3_val & TDES3_ES) { LOG_ERR("tx error (DES3 = 0x%08x)", des3_val); eth_stats_update_errors_tx(p->iface); } } } p->tx_desc_tail = d_idx; } static void dwmac_receive(struct dwmac_priv *p) { struct dwmac_dma_desc *d; struct net_buf *frag; unsigned int d_idx, bytes_so_far; uint32_t des3_val; for (d_idx = p->rx_desc_tail; d_idx != p->rx_desc_head; INC_WRAP(d_idx, NB_RX_DESCS), k_sem_give(&p->free_rx_descs)) { LOG_DBG("desc sem/tail/head=%d/%d/%d", k_sem_count_get(&p->free_rx_descs), d_idx, p->rx_desc_head); d = &p->rx_descs[d_idx]; des3_val = d->des3; LOG_DBG("RDES3[%d] = 0x%08x", d_idx, des3_val); /* stop here if hardware still owns it */ if (des3_val & RDES3_OWN) { break; } /* we ignore those for now */ if (des3_val & RDES3_CTXT) { continue; } /* a packet's first descriptor: */ if (des3_val & RDES3_FD) { p->rx_bytes = 0; if (p->rx_pkt) { LOG_ERR("d[%d] first desc but pkt exists", d_idx); eth_stats_update_errors_rx(p->iface); net_pkt_unref(p->rx_pkt); } p->rx_pkt = net_pkt_rx_alloc_on_iface(p->iface, K_NO_WAIT); if (!p->rx_pkt) { LOG_ERR("net_pkt_rx_alloc_on_iface() failed"); eth_stats_update_errors_rx(p->iface); } } if (!p->rx_pkt) { LOG_ERR("no rx_pkt: skipping desc %d", d_idx); continue; } /* retrieve current fragment */ frag = p->rx_frags[d_idx]; p->rx_frags[d_idx] = NULL; bytes_so_far = FIELD_GET(RDES3_PL, des3_val); frag->len = bytes_so_far - p->rx_bytes; p->rx_bytes = bytes_so_far; net_pkt_frag_add(p->rx_pkt, frag); /* last descriptor: */ if (des3_val & RDES3_LD) { /* submit packet if no errors */ if (!(des3_val & RDES3_ES)) { LOG_DBG("pkt len/frags=%zd/%d", net_pkt_get_len(p->rx_pkt), net_pkt_get_nbfrags(p->rx_pkt)); net_recv_data(p->iface, p->rx_pkt); } else { LOG_ERR("rx error (DES3 = 0x%08x)", des3_val); eth_stats_update_errors_rx(p->iface); net_pkt_unref(p->rx_pkt); } p->rx_pkt = NULL; } } p->rx_desc_tail = d_idx; } static void dwmac_rx_refill_thread(void *arg1, void *unused1, void *unused2) { struct dwmac_priv *p = arg1; struct dwmac_dma_desc *d; struct net_buf *frag; unsigned int d_idx; ARG_UNUSED(unused1); ARG_UNUSED(unused2); d_idx = p->rx_desc_head; for (;;) { LOG_DBG("desc sem/head/tail=%d/%d/%d", k_sem_count_get(&p->free_rx_descs), p->rx_desc_head, p->rx_desc_tail); /* wait for an empty descriptor */ if (k_sem_take(&p->free_rx_descs, K_FOREVER) != 0) { LOG_ERR("can't get free RX desc to refill"); break; } d = &p->rx_descs[d_idx]; __ASSERT(!(d->des3 & RDES3_OWN), "desc[%d]=0x%x: still hw owned! (sem/head/tail=%d/%d/%d)", d_idx, d->des3, k_sem_count_get(&p->free_rx_descs), p->rx_desc_head, p->rx_desc_tail); frag = p->rx_frags[d_idx]; /* get a new fragment if the previous one was consumed */ if (!frag) { frag = net_pkt_get_reserve_rx_data(RX_FRAG_SIZE, K_FOREVER); if (!frag) { LOG_ERR("net_pkt_get_reserve_rx_data() returned NULL"); k_sem_give(&p->free_rx_descs); break; } LOG_DBG("new frag[%d] at %p", d_idx, frag->data); __ASSERT(frag->size == RX_FRAG_SIZE, ""); sys_cache_data_invd_range(frag->data, frag->size); p->rx_frags[d_idx] = frag; } else { LOG_DBG("reusing frag[%d] at %p", d_idx, frag->data); } /* all is good: initialize the descriptor */ d->des0 = phys_lo32(frag->data); d->des1 = phys_hi32(frag->data); d->des2 = 0; d->des3 = RDES3_BUF1V | RDES3_IOC | RDES3_OWN; /* commit the above to memory */ barrier_dmem_fence_full(); /* advance to the next descriptor */ p->rx_desc_head = INC_WRAP(d_idx, NB_RX_DESCS); /* lastly notify the hardware */ REG_WRITE(DMA_CHn_RXDESC_TAIL_PTR(0), RXDESC_PHYS_L(d_idx)); } } static void dwmac_dma_irq(struct dwmac_priv *p, unsigned int ch) { uint32_t status; status = REG_READ(DMA_CHn_STATUS(ch)); LOG_DBG("DMA_CHn_STATUS(%d) = 0x%08x", ch, status); REG_WRITE(DMA_CHn_STATUS(ch), status); __ASSERT(ch == 0, "only one DMA channel is currently supported"); if (status & DMA_CHn_STATUS_AIS) { LOG_ERR("Abnormal Interrupt Status received (0x%x)", status); } if (status & DMA_CHn_STATUS_TI) { dwmac_tx_release(p); } if (status & DMA_CHn_STATUS_RI) { dwmac_receive(p); } } static void dwmac_mac_irq(struct dwmac_priv *p) { uint32_t status; status = REG_READ(MAC_IRQ_STATUS); LOG_DBG("MAC_IRQ_STATUS = 0x%08x", status); __ASSERT(false, "unimplemented"); } static void dwmac_mtl_irq(struct dwmac_priv *p) { uint32_t status; status = REG_READ(MTL_IRQ_STATUS); LOG_DBG("MTL_IRQ_STATUS = 0x%08x", status); __ASSERT(false, "unimplemented"); } void dwmac_isr(const struct device *ddev) { struct dwmac_priv *p = ddev->data; uint32_t irq_status; unsigned int ch; irq_status = REG_READ(DMA_IRQ_STATUS); LOG_DBG("DMA_IRQ_STATUS = 0x%08x", irq_status); while (irq_status & 0xff) { ch = find_lsb_set(irq_status & 0xff) - 1; irq_status &= ~BIT(ch); dwmac_dma_irq(p, ch); } if (irq_status & DMA_IRQ_STATUS_MTLIS) { dwmac_mtl_irq(p); } if (irq_status & DMA_IRQ_STATUS_MACIS) { dwmac_mac_irq(p); } } static void dwmac_set_mac_addr(struct dwmac_priv *p, uint8_t *addr, int n) { uint32_t reg_val; reg_val = (addr[5] << 8) | addr[4]; REG_WRITE(MAC_ADDRESS_HIGH(n), reg_val | MAC_ADDRESS_HIGH_AE); reg_val = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; REG_WRITE(MAC_ADDRESS_LOW(n), reg_val); } static int dwmac_set_config(const struct device *dev, enum ethernet_config_type type, const struct ethernet_config *config) { struct dwmac_priv *p = dev->data; uint32_t reg_val; int ret = 0; (void) reg_val; /* silence the "unused variable" warning */ switch (type) { case ETHERNET_CONFIG_TYPE_MAC_ADDRESS: memcpy(p->mac_addr, config->mac_address.addr, sizeof(p->mac_addr)); dwmac_set_mac_addr(p, p->mac_addr, 0); net_if_set_link_addr(p->iface, p->mac_addr, sizeof(p->mac_addr), NET_LINK_ETHERNET); break; #if defined(CONFIG_NET_PROMISCUOUS_MODE) case ETHERNET_CONFIG_TYPE_PROMISC_MODE: reg_val = REG_READ(MAC_PKT_FILTER); if (config->promisc_mode && !(reg_val & MAC_PKT_FILTER_PR)) { REG_WRITE(MAC_PKT_FILTER, reg_val | MAC_PKT_FILTER_PR); } else if (!config->promisc_mode && (reg_val & MAC_PKT_FILTER_PR)) { REG_WRITE(MAC_PKT_FILTER, reg_val & ~MAC_PKT_FILTER_PR); } else { ret = -EALREADY; } break; #endif default: ret = -ENOTSUP; break; } return ret; } static void dwmac_iface_init(struct net_if *iface) { struct dwmac_priv *p = net_if_get_device(iface)->data; uint32_t reg_val; __ASSERT(!p->iface, "interface already initialized?"); p->iface = iface; ethernet_init(iface); net_if_set_link_addr(iface, p->mac_addr, sizeof(p->mac_addr), NET_LINK_ETHERNET); dwmac_set_mac_addr(p, p->mac_addr, 0); /* * Semaphores are used to represent number of available descriptors. * The total is one less than ring size in order to always have * at least one inactive slot for the hardware tail pointer to * stop at and to prevent our head indexes from looping back * onto our tail indexes. */ k_sem_init(&p->free_tx_descs, NB_TX_DESCS - 1, NB_TX_DESCS - 1); k_sem_init(&p->free_rx_descs, NB_RX_DESCS - 1, NB_RX_DESCS - 1); /* set up RX buffer refill thread */ k_thread_create(&p->rx_refill_thread, p->rx_refill_thread_stack, K_KERNEL_STACK_SIZEOF(p->rx_refill_thread_stack), dwmac_rx_refill_thread, p, NULL, NULL, 0, K_PRIO_PREEMPT(0), K_NO_WAIT); k_thread_name_set(&p->rx_refill_thread, "dwmac_rx_refill"); /* start up TX/RX */ reg_val = REG_READ(DMA_CHn_TX_CTRL(0)); REG_WRITE(DMA_CHn_TX_CTRL(0), reg_val | DMA_CHn_TX_CTRL_St); reg_val = REG_READ(DMA_CHn_RX_CTRL(0)); REG_WRITE(DMA_CHn_RX_CTRL(0), reg_val | DMA_CHn_RX_CTRL_SR); reg_val = REG_READ(MAC_CONF); reg_val |= MAC_CONF_CST | MAC_CONF_TE | MAC_CONF_RE; REG_WRITE(MAC_CONF, reg_val); /* unmask IRQs */ REG_WRITE(DMA_CHn_IRQ_ENABLE(0), DMA_CHn_IRQ_ENABLE_TIE | DMA_CHn_IRQ_ENABLE_RIE | DMA_CHn_IRQ_ENABLE_NIE | DMA_CHn_IRQ_ENABLE_FBEE | DMA_CHn_IRQ_ENABLE_CDEE | DMA_CHn_IRQ_ENABLE_AIE); LOG_DBG("done"); } int dwmac_probe(const struct device *dev) { struct dwmac_priv *p = dev->data; int ret; uint32_t reg_val; k_timepoint_t timeout; ret = dwmac_bus_init(p); if (ret != 0) { return ret; } reg_val = REG_READ(MAC_VERSION); LOG_INF("HW version %u.%u0", (reg_val >> 4) & 0xf, reg_val & 0xf); __ASSERT(FIELD_GET(MAC_VERSION_SNPSVER, reg_val) >= 0x40, "This driver expects DWC-ETHERNET version >= 4.00"); /* resets all of the MAC internal registers and logic */ REG_WRITE(DMA_MODE, DMA_MODE_SWR); timeout = sys_timepoint_calc(K_MSEC(100)); while (REG_READ(DMA_MODE) & DMA_MODE_SWR) { if (sys_timepoint_expired(timeout)) { LOG_ERR("unable to reset hardware"); return -EIO; } } /* get configured hardware features */ p->feature0 = REG_READ(MAC_HW_FEATURE0); p->feature1 = REG_READ(MAC_HW_FEATURE1); p->feature2 = REG_READ(MAC_HW_FEATURE2); p->feature3 = REG_READ(MAC_HW_FEATURE3); LOG_DBG("hw_feature: 0x%08x 0x%08x 0x%08x 0x%08x", p->feature0, p->feature1, p->feature2, p->feature3); dwmac_platform_init(p); memset(p->tx_descs, 0, NB_TX_DESCS * sizeof(struct dwmac_dma_desc)); memset(p->rx_descs, 0, NB_RX_DESCS * sizeof(struct dwmac_dma_desc)); /* set up DMA */ REG_WRITE(DMA_CHn_TX_CTRL(0), 0); REG_WRITE(DMA_CHn_RX_CTRL(0), FIELD_PREP(DMA_CHn_RX_CTRL_PBL, 32) | FIELD_PREP(DMA_CHn_RX_CTRL_RBSZ, RX_FRAG_SIZE)); REG_WRITE(DMA_CHn_TXDESC_LIST_HADDR(0), TXDESC_PHYS_H(0)); REG_WRITE(DMA_CHn_TXDESC_LIST_ADDR(0), TXDESC_PHYS_L(0)); REG_WRITE(DMA_CHn_RXDESC_LIST_HADDR(0), RXDESC_PHYS_H(0)); REG_WRITE(DMA_CHn_RXDESC_LIST_ADDR(0), RXDESC_PHYS_L(0)); REG_WRITE(DMA_CHn_TXDESC_RING_LENGTH(0), NB_TX_DESCS - 1); REG_WRITE(DMA_CHn_RXDESC_RING_LENGTH(0), NB_RX_DESCS - 1); return 0; } const struct ethernet_api dwmac_api = { .iface_api.init = dwmac_iface_init, .get_capabilities = dwmac_caps, .set_config = dwmac_set_config, .send = dwmac_send, }; ```
/content/code_sandbox/drivers/ethernet/eth_dwmac.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
5,178
```objective-c /* ENC28J60 Stand-alone Ethernet Controller with SPI * * */ #include <zephyr/kernel.h> #include <zephyr/drivers/gpio.h> #include <zephyr/drivers/spi.h> #ifndef _ENC28J60_ #define _ENC28J60_ /* Any Bank Registers */ #define ENC28J60_REG_EIE 0x1B #define ENC28J60_REG_EIR 0x1C #define ENC28J60_REG_ESTAT 0x1D #define ENC28J60_REG_ECON2 0x1E #define ENC28J60_REG_ECON1 0x1F /* Register Encoding * Nibble 3 : 0x0 ETH Register * 0x1 MAC Register * 0x2 MII Register * Nibble 2 : Bank number * Nibble 1-0: Register address */ /* Bank 0 Registers */ #define ENC28J60_REG_ERDPTL 0x0000 #define ENC28J60_REG_ERDPTH 0x0001 #define ENC28J60_REG_EWRPTL 0x0002 #define ENC28J60_REG_EWRPTH 0x0003 #define ENC28J60_REG_ETXSTL 0x0004 #define ENC28J60_REG_ETXSTH 0x0005 #define ENC28J60_REG_ETXNDL 0x0006 #define ENC28J60_REG_ETXNDH 0x0007 #define ENC28J60_REG_ERXSTL 0x0008 #define ENC28J60_REG_ERXSTH 0x0009 #define ENC28J60_REG_ERXNDL 0x000A #define ENC28J60_REG_ERXNDH 0x000B #define ENC28J60_REG_ERXRDPTL 0x000C #define ENC28J60_REG_ERXRDPTH 0x000D #define ENC28J60_REG_ERXWRPTL 0x000E #define ENC28J60_REG_ERXWRPTH 0x000F #define ENC28J60_REG_EDMASTL 0x0010 #define ENC28J60_REG_EDMASTH 0x0011 #define ENC28J60_REG_EDMANDL 0x0012 #define ENC28J60_REG_EDMANDH 0x0013 #define ENC28J60_REG_EDMADSTL 0x0014 #define ENC28J60_REG_EDMADSTH 0x0015 #define ENC28J60_REG_EDMACSL 0x0016 #define ENC28J60_REG_EDMACSH 0x0017 /* Bank 1 Registers */ #define ENC28J60_REG_EHT0 0x0100 #define ENC28J60_REG_EHT1 0x0101 #define ENC28J60_REG_EHT2 0x0102 #define ENC28J60_REG_EHT3 0x0103 #define ENC28J60_REG_EHT4 0x0104 #define ENC28J60_REG_EHT5 0x0105 #define ENC28J60_REG_EHT6 0x0106 #define ENC28J60_REG_EHT7 0x0107 #define ENC28J60_REG_EPMM0 0x0108 #define ENC28J60_REG_EPMM1 0x0109 #define ENC28J60_REG_EPMM2 0x010A #define ENC28J60_REG_EPMM3 0x010B #define ENC28J60_REG_EPMM4 0x010C #define ENC28J60_REG_EPMM5 0x010D #define ENC28J60_REG_EPMM6 0x010E #define ENC28J60_REG_EPMM7 0x010F #define ENC28J60_REG_EPMCSL 0x0110 #define ENC28J60_REG_EPMCSH 0x0111 #define ENC28J60_REG_EPMOL 0x0114 #define ENC28J60_REG_EPMOH 0x0115 #define ENC28J60_REG_EWOLIE 0x0116 #define ENC28J60_REG_EWOLIR 0x0117 #define ENC28J60_REG_ERXFCON 0x0118 #define ENC28J60_REG_EPKTCNT 0x0119 /* Bank 2 Registers */ #define ENC28J60_REG_MACON1 0x1200 #define ENC28J60_REG_MACON3 0x1202 #define ENC28J60_REG_MACON4 0x1203 #define ENC28J60_REG_MABBIPG 0x1204 #define ENC28J60_REG_MAIPGL 0x1206 #define ENC28J60_REG_MAIPGH 0x1207 #define ENC28J60_REG_MACLCON1 0x1208 #define ENC28J60_REG_MACLCON2 0x1209 #define ENC28J60_REG_MAMXFLL 0x120A #define ENC28J60_REG_MAMXFLH 0x120B #define ENC28J60_REG_MAPHSUP 0x120C #define ENC28J60_REG_MICON 0x2211 #define ENC28J60_REG_MICMD 0x2212 #define ENC28J60_REG_MIREGADR 0x2214 #define ENC28J60_REG_MIWRL 0x2216 #define ENC28J60_REG_MIWRH 0x2217 #define ENC28J60_REG_MIRDL 0x2218 #define ENC28J60_REG_MIRDH 0x2219 /* Bank 3 Registers */ #define ENC28J60_REG_MAADR5 0x1300 #define ENC28J60_REG_MAADR6 0x1301 #define ENC28J60_REG_MAADR3 0x1302 #define ENC28J60_REG_MAADR4 0x1303 #define ENC28J60_REG_MAADR1 0x1304 #define ENC28J60_REG_MAADR2 0x1305 #define ENC28J60_REG_EBSTSD 0x0306 #define ENC28J60_REG_EBSTCON 0x0307 #define ENC28J60_REG_EBSTCSL 0x0308 #define ENC28J60_REG_EBSTCSH 0x0309 #define ENC28J60_REG_MISTAT 0x230A #define ENC28J60_REG_EREVID 0x0312 #define ENC28J60_REG_ECOCON 0x0315 #define ENC28J60_REG_EFLOCON 0x0317 #define ENC28J60_REG_EPAUSL 0x0318 #define ENC28J60_REG_EPAUSH 0x0319 /* PHY Registers */ #define ENC28J60_PHY_PHCON1 0x00 #define ENC28J60_PHY_PHSTAT1 0x01 #define ENC28J60_PHY_PHID1 0x02 #define ENC28J60_PHY_PHID2 0x03 #define ENC28J60_PHY_PHCON2 0x10 #define ENC28J60_PHY_PHSTAT2 0x11 #define ENC28J60_PHY_PHIE 0x12 #define ENC28J60_PHY_PHIR 0x13 #define ENC28J60_PHY_PHLCON 0x14 /* SPI Instruction Opcodes */ #define ENC28J60_SPI_RCR (0x0) #define ENC28J60_SPI_RBM (0x3A) #define ENC28J60_SPI_WCR (0x2 << 5) #define ENC28J60_SPI_WBM (0x7A) #define ENC28J60_SPI_BFS (0x4 << 5) #define ENC28J60_SPI_BFC (0x5 << 5) #define ENC28J60_SPI_SC (0xFF) /* Significant bits */ #define ENC28J60_BIT_MICMD_MIIRD (0x01) #define ENC28J60_BIT_MISTAT_BUSY (0x01) #define ENC28J60_BIT_ESTAT_CLKRDY (0x01) #define ENC28J60_BIT_MACON1_MARXEN (0x01) #define ENC28J60_BIT_MACON1_RXPAUS (0x04) #define ENC28J60_BIT_MACON1_TXPAUS (0x08) #define ENC28J60_BIT_MACON1_MARXEN (0x01) #define ENC28J60_BIT_MACON2_MARST (0x80) #define ENC28J60_BIT_MACON3_FULDPX (0x01) #define ENC28J60_BIT_ECON1_TXRST (0x80) #define ENC28J60_BIT_ECON1_TXRTS (0x08) #define ENC28J60_BIT_ECON1_RXEN (0x04) #define ENC28J60_BIT_ECON2_PKTDEC (0x40) #define ENC28J60_BIT_EIR_PKTIF (0x40) #define ENC28J60_BIT_EIE_TXIE (0x08) #define ENC28J60_BIT_EIE_PKTIE (0x40) #define ENC28J60_BIT_EIE_LINKIE (0x10) #define ENC28J60_BIT_EIE_INTIE (0x80) #define ENC28J60_BIT_EIR_PKTIF (0x40) #define ENC28J60_BIT_EIR_DMAIF (0x20) #define ENC28J60_BIT_EIR_LINKIF (0x10) #define ENC28J60_BIT_EIR_TXIF (0x08) #define ENC28J60_BIT_EIR_WOLIF (0x04) #define ENC28J60_BIT_EIR_TXERIF (0x02) #define ENC28J60_BIT_EIR_RXERIF (0x01) #define ENC28J60_BIT_ESTAT_TXABRT (0x02) #define ENC28J60_BIT_ESTAT_LATECOL (0x10) #define ENC28J60_BIT_PHCON1_PDPXMD (0x0100) #define ENC28J60_BIT_PHCON2_HDLDIS (0x0001) #define ENC28J60_BIT_PHSTAT2_LSTAT (0x0400) #define ENC28J60_BIT_PHIE_PGEIE (0x0002) #define ENC28J60_BIT_PHIE_PLNKIE (0x0010) /* Driver Static Configuration */ /* Receive filters enabled: * - Unicast * - Multicast * - Broadcast * - CRC Check * * Used as default if hw-rx-filter property * absent in DT */ #define ENC28J60_RECEIVE_FILTERS 0xA3 /* MAC configuration: * - Automatic Padding * - Automatic CRC * - Frame Length Checking */ #define ENC28J60_MAC_CONFIG 0x32 #define ENC28J60_MAC_BBIPG_HD 0x12 #define ENC28J60_MAC_BBIPG_FD 0x15 #define ENC28J60_MAC_NBBIPGL 0x12 #define ENC28J60_MAC_NBBIPGH 0x0C #define ENC28J60_PHY_LEDCONF 0x3422 /* Status Vector size plus per packet control byte: 8 bytes */ #define ENC28J60_SV_SIZE 8 /* Per Packet Control Byte configured to follow MACON3 configuration */ #define ENC28J60_PPCTL_BYTE 0x0 /* Start of RX buffer, (must be zero, Rev. B4 Errata point 5) */ #define ENC28J60_RXSTART 0x0000 /* End of RX buffer, room for 2 packets */ #define ENC28J60_RXEND 0x0BFF /* Start of TX buffer, room for 1 packet */ #define ENC28J60_TXSTART 0x0C00 /* End of TX buffer */ #define ENC28J60_TXEND 0x11FF /* Status vectors array size */ #define TSV_SIZE 7 #define RSV_SIZE 4 /* Microchip's OUI*/ #define MICROCHIP_OUI_B0 0x00 #define MICROCHIP_OUI_B1 0x04 #define MICROCHIP_OUI_B2 0xA3 #define MAX_BUFFER_LENGTH 128 struct eth_enc28j60_config { struct spi_dt_spec spi; struct gpio_dt_spec interrupt; uint8_t full_duplex; int32_t timeout; uint8_t hw_rx_filter; bool random_mac; }; struct eth_enc28j60_runtime { struct net_if *iface; K_KERNEL_STACK_MEMBER(thread_stack, CONFIG_ETH_ENC28J60_RX_THREAD_STACK_SIZE); struct k_thread thread; uint8_t mac_address[6]; struct gpio_callback gpio_cb; struct k_sem tx_rx_sem; struct k_sem int_sem; bool iface_initialized : 1; }; #endif /*_ENC28J60_*/ ```
/content/code_sandbox/drivers/ethernet/eth_enc28j60_priv.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
2,897
```unknown # ETH_STELLARIS Ethernet driver configuration options menuconfig ETH_STELLARIS bool "TI Stellaris MCU family ethernet driver." default y depends on DT_HAS_TI_STELLARIS_ETHERNET_ENABLED help Stellaris on-board Ethernet Controller config ETH_NIC_MODEL string default "stellaris" depends on ETH_STELLARIS help Tells what Qemu network model to use. This value is given as a parameter to -nic qemu command line option. ```
/content/code_sandbox/drivers/ethernet/Kconfig.stellaris
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
109
```c /* * Xilinx Processor System Gigabit Ethernet controller (GEM) driver * * * Known current limitations / TODOs: * - Only supports 32-bit addresses in buffer descriptors, therefore * the ZynqMP APU (Cortex-A53 cores) may not be fully supported. * - Hardware timestamps not considered. * - VLAN tags not considered. * - Wake-on-LAN interrupt not supported. * - Send function is not SMP-capable (due to single TX done semaphore). * - Interrupt-driven PHY management not supported - polling only. * - No explicit placement of the DMA memory area(s) in either a * specific memory section or at a fixed memory location yet. This * is not an issue as long as the controller is used in conjunction * with the Cortex-R5 QEMU target or an actual R5 running without the * MPU enabled. * - No detailed error handling when evaluating the Interrupt Status, * RX Status and TX Status registers. */ #include <zephyr/kernel.h> #include <zephyr/device.h> #include <zephyr/devicetree.h> #include <zephyr/sys/__assert.h> #include <zephyr/net/net_if.h> #include <zephyr/net/ethernet.h> #include <ethernet/eth_stats.h> #include "eth_xlnx_gem_priv.h" #define LOG_MODULE_NAME eth_xlnx_gem #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); static int eth_xlnx_gem_dev_init(const struct device *dev); static void eth_xlnx_gem_iface_init(struct net_if *iface); static void eth_xlnx_gem_isr(const struct device *dev); static int eth_xlnx_gem_send(const struct device *dev, struct net_pkt *pkt); static int eth_xlnx_gem_start_device(const struct device *dev); static int eth_xlnx_gem_stop_device(const struct device *dev); static enum ethernet_hw_caps eth_xlnx_gem_get_capabilities(const struct device *dev); #if defined(CONFIG_NET_STATISTICS_ETHERNET) static struct net_stats_eth *eth_xlnx_gem_stats(const struct device *dev); #endif static void eth_xlnx_gem_reset_hw(const struct device *dev); static void eth_xlnx_gem_configure_clocks(const struct device *dev); static void eth_xlnx_gem_set_initial_nwcfg(const struct device *dev); static void eth_xlnx_gem_set_nwcfg_link_speed(const struct device *dev); static void eth_xlnx_gem_set_mac_address(const struct device *dev); static void eth_xlnx_gem_set_initial_dmacr(const struct device *dev); static void eth_xlnx_gem_init_phy(const struct device *dev); static void eth_xlnx_gem_poll_phy(struct k_work *item); static void eth_xlnx_gem_configure_buffers(const struct device *dev); static void eth_xlnx_gem_rx_pending_work(struct k_work *item); static void eth_xlnx_gem_handle_rx_pending(const struct device *dev); static void eth_xlnx_gem_tx_done_work(struct k_work *item); static void eth_xlnx_gem_handle_tx_done(const struct device *dev); static const struct ethernet_api eth_xlnx_gem_apis = { .iface_api.init = eth_xlnx_gem_iface_init, .get_capabilities = eth_xlnx_gem_get_capabilities, .send = eth_xlnx_gem_send, .start = eth_xlnx_gem_start_device, .stop = eth_xlnx_gem_stop_device, #if defined(CONFIG_NET_STATISTICS_ETHERNET) .get_stats = eth_xlnx_gem_stats, #endif }; /* * Insert the configuration & run-time data for all GEM instances which * are enabled in the device tree of the current target board. */ DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INITIALIZE) /** * @brief GEM device initialization function * Initializes the GEM itself, the DMA memory area used by the GEM and, * if enabled, an associated PHY attached to the GEM's MDIO interface. * * @param dev Pointer to the device data * @retval 0 if the device initialization completed successfully */ static int eth_xlnx_gem_dev_init(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; uint32_t reg_val; /* Precondition checks using assertions */ /* Valid PHY address and polling interval, if PHY is to be managed */ if (dev_conf->init_phy) { __ASSERT((dev_conf->phy_mdio_addr_fix >= 0 && dev_conf->phy_mdio_addr_fix <= 32), "%s invalid PHY address %u, must be in range " "1 to 32, or 0 for auto-detection", dev->name, dev_conf->phy_mdio_addr_fix); __ASSERT(dev_conf->phy_poll_interval > 0, "%s has an invalid zero PHY status polling " "interval", dev->name); } /* Valid max. / nominal link speed value */ __ASSERT((dev_conf->max_link_speed == LINK_10MBIT || dev_conf->max_link_speed == LINK_100MBIT || dev_conf->max_link_speed == LINK_1GBIT), "%s invalid max./nominal link speed value %u", dev->name, (uint32_t)dev_conf->max_link_speed); /* MDC clock divider validity check, SoC dependent */ #if defined(CONFIG_SOC_XILINX_ZYNQMP) __ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_48, "%s invalid MDC clock divider value %u, must be in " "range 0 to %u", dev->name, dev_conf->mdc_divider, (uint32_t)MDC_DIVIDER_48); #elif defined(CONFIG_SOC_FAMILY_XILINX_ZYNQ7000) __ASSERT(dev_conf->mdc_divider <= MDC_DIVIDER_224, "%s invalid MDC clock divider value %u, must be in " "range 0 to %u", dev->name, dev_conf->mdc_divider, (uint32_t)MDC_DIVIDER_224); #endif /* AMBA AHB configuration options */ __ASSERT((dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_32BIT || dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_64BIT || dev_conf->amba_dbus_width == AMBA_AHB_DBUS_WIDTH_128BIT), "%s AMBA AHB bus width configuration is invalid", dev->name); __ASSERT((dev_conf->ahb_burst_length == AHB_BURST_SINGLE || dev_conf->ahb_burst_length == AHB_BURST_INCR4 || dev_conf->ahb_burst_length == AHB_BURST_INCR8 || dev_conf->ahb_burst_length == AHB_BURST_INCR16), "%s AMBA AHB burst length configuration is invalid", dev->name); /* HW RX buffer size */ __ASSERT((dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_8KB || dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_4KB || dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_2KB || dev_conf->hw_rx_buffer_size == HWRX_BUFFER_SIZE_1KB), "%s hardware RX buffer size configuration is invalid", dev->name); /* HW RX buffer offset */ __ASSERT(dev_conf->hw_rx_buffer_offset <= 3, "%s hardware RX buffer offset %u is invalid, must be in " "range 0 to 3", dev->name, dev_conf->hw_rx_buffer_offset); /* * RX & TX buffer sizes * RX Buffer size must be a multiple of 64, as the size of the * corresponding DMA receive buffer in AHB system memory is * expressed as n * 64 bytes in the DMA configuration register. */ __ASSERT(dev_conf->rx_buffer_size % 64 == 0, "%s RX buffer size %u is not a multiple of 64 bytes", dev->name, dev_conf->rx_buffer_size); __ASSERT((dev_conf->rx_buffer_size != 0 && dev_conf->rx_buffer_size <= 16320), "%s RX buffer size %u is invalid, should be >64, " "must be 16320 bytes maximum.", dev->name, dev_conf->rx_buffer_size); __ASSERT((dev_conf->tx_buffer_size != 0 && dev_conf->tx_buffer_size <= 16380), "%s TX buffer size %u is invalid, should be >64, " "must be 16380 bytes maximum.", dev->name, dev_conf->tx_buffer_size); /* Checksum offloading limitations of the QEMU GEM implementation */ #ifdef CONFIG_QEMU_TARGET __ASSERT(!dev_conf->enable_rx_chksum_offload, "TCP/UDP/IP hardware checksum offloading is not " "supported by the QEMU GEM implementation"); __ASSERT(!dev_conf->enable_tx_chksum_offload, "TCP/UDP/IP hardware checksum offloading is not " "supported by the QEMU GEM implementation"); #endif /* * Initialization procedure as described in the Zynq-7000 TRM, * chapter 16.3.x. */ eth_xlnx_gem_reset_hw(dev); /* Chapter 16.3.1 */ eth_xlnx_gem_set_initial_nwcfg(dev); /* Chapter 16.3.2 */ eth_xlnx_gem_set_mac_address(dev); /* Chapter 16.3.2 */ eth_xlnx_gem_set_initial_dmacr(dev); /* Chapter 16.3.2 */ /* Enable MDIO -> set gem.net_ctrl[mgmt_port_en] */ if (dev_conf->init_phy) { reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); reg_val |= ETH_XLNX_GEM_NWCTRL_MDEN_BIT; sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); } eth_xlnx_gem_configure_clocks(dev); /* Chapter 16.3.3 */ if (dev_conf->init_phy) { eth_xlnx_gem_init_phy(dev); /* Chapter 16.3.4 */ } eth_xlnx_gem_configure_buffers(dev); /* Chapter 16.3.5 */ return 0; } /** * @brief GEM associated interface initialization function * Initializes the interface associated with a GEM device. * * @param iface Pointer to the associated interface data struct */ static void eth_xlnx_gem_iface_init(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; /* Set the initial contents of the current instance's run-time data */ dev_data->iface = iface; net_if_set_link_addr(iface, dev_data->mac_addr, 6, NET_LINK_ETHERNET); ethernet_init(iface); net_if_carrier_off(iface); /* * Initialize the (delayed) work items for RX pending, TX done * and PHY status polling handlers */ k_work_init(&dev_data->tx_done_work, eth_xlnx_gem_tx_done_work); k_work_init(&dev_data->rx_pend_work, eth_xlnx_gem_rx_pending_work); k_work_init_delayable(&dev_data->phy_poll_delayed_work, eth_xlnx_gem_poll_phy); /* Initialize TX completion semaphore */ k_sem_init(&dev_data->tx_done_sem, 0, 1); /* * Initialize semaphores in the RX/TX BD rings which have not * yet been initialized */ k_sem_init(&dev_data->txbd_ring.ring_sem, 1, 1); /* RX BD ring semaphore is not required at the time being */ /* Initialize the device's interrupt */ dev_conf->config_func(dev); /* Submit initial PHY status polling delayed work */ k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT); } /** * @brief GEM interrupt service routine * GEM interrupt service routine. Checks for indications of errors * and either immediately handles RX pending / TX complete notifications * or defers them to the system work queue. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_isr(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint32_t reg_val; /* Read the interrupt status register */ reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); /* * TODO: handling if one or more error flag(s) are set in the * interrupt status register. -> For now, just log them */ if (reg_val & ETH_XLNX_GEM_IXR_ERRORS_MASK) { LOG_ERR("%s error bit(s) set in Interrupt Status Reg.: 0x%08X", dev->name, reg_val); } /* * Check for the following indications by the controller: * reg_val & 0x00000080 -> gem.intr_status bit [7] = Frame TX complete * reg_val & 0x00000002 -> gem.intr_status bit [1] = Frame received * comp. Zynq-7000 TRM, Chapter B.18, p. 1289/1290. * If the respective condition's handling is configured to be deferred * to the work queue thread, submit the corresponding job to the work * queue, otherwise, handle the condition immediately. */ if ((reg_val & ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT) != 0) { sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); if (dev_conf->defer_txd_to_queue) { k_work_submit(&dev_data->tx_done_work); } else { eth_xlnx_gem_handle_tx_done(dev); } } if ((reg_val & ETH_XLNX_GEM_IXR_FRAME_RX_BIT) != 0) { sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT, dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT, dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); if (dev_conf->defer_rxp_to_queue) { k_work_submit(&dev_data->rx_pend_work); } else { eth_xlnx_gem_handle_rx_pending(dev); } } /* * Clear all interrupt status bits so that the interrupt is de-asserted * by the GEM. -> TXSR/RXSR are read/cleared by either eth_xlnx_gem_- * handle_tx_done or eth_xlnx_gem_handle_rx_pending if those actions * are not deferred to the system's work queue for the current inter- * face. If the latter is the case, those registers will be read/ * cleared whenever the corresponding work item submitted from within * this ISR is being processed. */ sys_write32((0xFFFFFFFF & ~(ETH_XLNX_GEM_IXR_FRAME_RX_BIT | ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT)), dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); } /** * @brief GEM data send function * GEM data send function. Blocks until a TX complete notification has been * received & processed. * * @param dev Pointer to the device data * @param pkt Pointer to the data packet to be sent * @retval -EINVAL in case of invalid parameters, e.g. zero data length * @retval -EIO in case of: * (1) the attempt to TX data while the device is stopped, * the interface is down or the link is down, * (2) the attempt to TX data while no free buffers are available * in the DMA memory area, * (3) the transmission completion notification timing out * @retval 0 if the packet was transmitted successfully */ static int eth_xlnx_gem_send(const struct device *dev, struct net_pkt *pkt) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint16_t tx_data_length; uint16_t tx_data_remaining; void *tx_buffer_offs; uint8_t bds_reqd; uint8_t curr_bd_idx; uint8_t first_bd_idx; uint32_t reg_ctrl; uint32_t reg_val; int sem_status; if (!dev_data->started || dev_data->eff_link_speed == LINK_DOWN || (!net_if_flag_is_set(dev_data->iface, NET_IF_UP))) { #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.tx_dropped++; #endif return -EIO; } tx_data_length = tx_data_remaining = net_pkt_get_len(pkt); if (tx_data_length == 0) { LOG_ERR("%s cannot TX, zero packet length", dev->name); #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.errors.tx++; #endif return -EINVAL; } /* * Check if enough buffer descriptors are available for the amount * of data to be transmitted, update the free BD count if this is * the case. Update the 'next to use' BD index in the TX BD ring if * sufficient space is available. If TX done handling, where the BD * ring's data is accessed as well, is performed via the system work * queue, protect against interruptions during the update of the BD * ring's data by taking the ring's semaphore. If TX done handling * is performed within the ISR, protect against interruptions by * disabling the TX done interrupt source. */ bds_reqd = (uint8_t)((tx_data_length + (dev_conf->tx_buffer_size - 1)) / dev_conf->tx_buffer_size); if (dev_conf->defer_txd_to_queue) { k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER); } else { sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); } if (bds_reqd > dev_data->txbd_ring.free_bds) { LOG_ERR("%s cannot TX, packet length %hu requires " "%hhu BDs, current free count = %hhu", dev->name, tx_data_length, bds_reqd, dev_data->txbd_ring.free_bds); if (dev_conf->defer_txd_to_queue) { k_sem_give(&(dev_data->txbd_ring.ring_sem)); } else { sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); } #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.tx_dropped++; #endif return -EIO; } curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_use; reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); dev_data->txbd_ring.next_to_use = (first_bd_idx + bds_reqd) % dev_conf->txbd_count; dev_data->txbd_ring.free_bds -= bds_reqd; if (dev_conf->defer_txd_to_queue) { k_sem_give(&(dev_data->txbd_ring.ring_sem)); } else { sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); } /* * Scatter the contents of the network packet's buffer to * one or more DMA buffers. */ net_pkt_cursor_init(pkt); do { /* Calculate the base pointer of the target TX buffer */ tx_buffer_offs = (void *)(dev_data->first_tx_buffer + (dev_conf->tx_buffer_size * curr_bd_idx)); /* Copy packet data to DMA buffer */ net_pkt_read(pkt, (void *)tx_buffer_offs, (tx_data_remaining < dev_conf->tx_buffer_size) ? tx_data_remaining : dev_conf->tx_buffer_size); /* Update current BD's control word */ reg_val = sys_read32(reg_ctrl) & (ETH_XLNX_GEM_TXBD_WRAP_BIT | ETH_XLNX_GEM_TXBD_USED_BIT); reg_val |= (tx_data_remaining < dev_conf->tx_buffer_size) ? tx_data_remaining : dev_conf->tx_buffer_size; sys_write32(reg_val, reg_ctrl); if (tx_data_remaining > dev_conf->tx_buffer_size) { /* Switch to next BD */ curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count; reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); } tx_data_remaining -= (tx_data_remaining < dev_conf->tx_buffer_size) ? tx_data_remaining : dev_conf->tx_buffer_size; } while (tx_data_remaining > 0); /* Set the 'last' bit in the current BD's control word */ reg_val |= ETH_XLNX_GEM_TXBD_LAST_BIT; /* * Clear the 'used' bits of all BDs involved in the current * transmission. In accordance with chapter 16.3.8 of the * Zynq-7000 TRM, the 'used' bits shall be cleared in reverse * order, so that the 'used' bit of the first BD is cleared * last just before the transmission is started. */ reg_val &= ~ETH_XLNX_GEM_TXBD_USED_BIT; sys_write32(reg_val, reg_ctrl); while (curr_bd_idx != first_bd_idx) { curr_bd_idx = (curr_bd_idx != 0) ? (curr_bd_idx - 1) : (dev_conf->txbd_count - 1); reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); reg_val = sys_read32(reg_ctrl); reg_val &= ~ETH_XLNX_GEM_TXBD_USED_BIT; sys_write32(reg_val, reg_ctrl); } /* Set the start TX bit in the gem.net_ctrl register */ reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); reg_val |= ETH_XLNX_GEM_NWCTRL_STARTTX_BIT; sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.bytes.sent += tx_data_length; dev_data->stats.pkts.tx++; #endif /* Block until TX has completed */ sem_status = k_sem_take(&dev_data->tx_done_sem, K_MSEC(100)); if (sem_status < 0) { LOG_ERR("%s TX confirmation timed out", dev->name); #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.tx_timeout_count++; #endif return -EIO; } return 0; } /** * @brief GEM device start function * GEM device start function. Clears all status registers and any * pending interrupts, enables RX and TX, enables interrupts. If * no PHY is managed by the current driver instance, this function * also declares the physical link up at the configured nominal * link speed. * * @param dev Pointer to the device data * @retval 0 upon successful completion */ static int eth_xlnx_gem_start_device(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint32_t reg_val; if (dev_data->started) { return 0; } dev_data->started = true; /* Disable & clear all the MAC interrupts */ sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); /* Clear RX & TX status registers */ sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); /* RX and TX enable */ reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); reg_val |= (ETH_XLNX_GEM_NWCTRL_RXEN_BIT | ETH_XLNX_GEM_NWCTRL_TXEN_BIT); sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); /* Enable all the MAC interrupts */ sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); /* Submit the delayed work for polling the link state */ if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) == 0) { k_work_reschedule(&dev_data->phy_poll_delayed_work, K_NO_WAIT); } LOG_DBG("%s started", dev->name); return 0; } /** * @brief GEM device stop function * GEM device stop function. Disables all interrupts, disables * RX and TX, clears all status registers. If no PHY is managed * by the current driver instance, this function also declares * the physical link down. * * @param dev Pointer to the device data * @retval 0 upon successful completion */ static int eth_xlnx_gem_stop_device(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint32_t reg_val; if (!dev_data->started) { return 0; } dev_data->started = false; /* Cancel the delayed work that polls the link state */ if (k_work_delayable_remaining_get(&dev_data->phy_poll_delayed_work) != 0) { k_work_cancel_delayable(&dev_data->phy_poll_delayed_work); } /* RX and TX disable */ reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); reg_val &= (~(ETH_XLNX_GEM_NWCTRL_RXEN_BIT | ETH_XLNX_GEM_NWCTRL_TXEN_BIT)); sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); /* Disable & clear all the MAC interrupts */ sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); sys_write32(ETH_XLNX_GEM_IXR_ALL_MASK, dev_conf->base_addr + ETH_XLNX_GEM_ISR_OFFSET); /* Clear RX & TX status registers */ sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); LOG_DBG("%s stopped", dev->name); return 0; } /** * @brief GEM capability request function * Returns the capabilities of the GEM controller as an enumeration. * All of the data returned is derived from the device configuration * of the current GEM device instance. * * @param dev Pointer to the device data * @return Enumeration containing the current GEM device's capabilities */ static enum ethernet_hw_caps eth_xlnx_gem_get_capabilities( const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; enum ethernet_hw_caps caps = (enum ethernet_hw_caps)0; if (dev_conf->max_link_speed == LINK_1GBIT) { if (dev_conf->phy_advertise_lower) { caps |= (ETHERNET_LINK_1000BASE_T | ETHERNET_LINK_100BASE_T | ETHERNET_LINK_10BASE_T); } else { caps |= ETHERNET_LINK_1000BASE_T; } } else if (dev_conf->max_link_speed == LINK_100MBIT) { if (dev_conf->phy_advertise_lower) { caps |= (ETHERNET_LINK_100BASE_T | ETHERNET_LINK_10BASE_T); } else { caps |= ETHERNET_LINK_100BASE_T; } } else { caps |= ETHERNET_LINK_10BASE_T; } if (dev_conf->enable_rx_chksum_offload) { caps |= ETHERNET_HW_RX_CHKSUM_OFFLOAD; } if (dev_conf->enable_tx_chksum_offload) { caps |= ETHERNET_HW_TX_CHKSUM_OFFLOAD; } if (dev_conf->enable_fdx) { caps |= ETHERNET_DUPLEX_SET; } if (dev_conf->copy_all_frames) { caps |= ETHERNET_PROMISC_MODE; } return caps; } #ifdef CONFIG_NET_STATISTICS_ETHERNET /** * @brief GEM statistics data request function * Returns a pointer to the statistics data of the current GEM controller. * * @param dev Pointer to the device data * @return Pointer to the current GEM device's statistics data */ static struct net_stats_eth *eth_xlnx_gem_stats(const struct device *dev) { struct eth_xlnx_gem_dev_data *dev_data = dev->data; return &dev_data->stats; } #endif /** * @brief GEM Hardware reset function * Resets the current GEM device. Called from within the device * initialization function. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_reset_hw(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; /* * Controller reset sequence as described in the Zynq-7000 TRM, * chapter 16.3.1. */ /* Clear the NWCTRL register */ sys_write32(0x00000000, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); /* Clear the statistics counters */ sys_write32(ETH_XLNX_GEM_STATCLR_MASK, dev_conf->base_addr + ETH_XLNX_GEM_NWCTRL_OFFSET); /* Clear the RX/TX status registers */ sys_write32(ETH_XLNX_GEM_TXSRCLR_MASK, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); sys_write32(ETH_XLNX_GEM_RXSRCLR_MASK, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); /* Disable all interrupts */ sys_write32(ETH_XLNX_GEM_IDRCLR_MASK, dev_conf->base_addr + ETH_XLNX_GEM_IDR_OFFSET); /* Clear the buffer queues */ sys_write32(0x00000000, dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET); sys_write32(0x00000000, dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET); } /** * @brief GEM clock configuration function * Calculates the pre-scalers for the TX clock to match the current * (if an associated PHY is managed) or nominal link speed. Called * from within the device initialization function. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_configure_clocks(const struct device *dev) { /* * Clock source configuration for the respective GEM as described * in the Zynq-7000 TRM, chapter 16.3.3, is not tackled here. This * is performed by the PS7Init code. Only the DIVISOR and DIVISOR1 * values for the respective GEM's TX clock are calculated here. */ const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint32_t div0; uint32_t div1; uint32_t target = 2500000; /* default prevents 'may be uninitialized' warning */ uint32_t tmp; uint32_t clk_ctrl_reg; if ((!dev_conf->init_phy) || dev_data->eff_link_speed == LINK_DOWN) { /* * Run-time data indicates 'link down' or PHY management * is disabled for the current device -> this indicates the * initial device initialization. Once the PHY status polling * delayed work handler has picked up the result of the auto- * negotiation (if enabled), this if-statement will evaluate * to false. */ if (dev_conf->max_link_speed == LINK_10MBIT) { target = 2500000; /* Target frequency: 2.5 MHz */ } else if (dev_conf->max_link_speed == LINK_100MBIT) { target = 25000000; /* Target frequency: 25 MHz */ } else if (dev_conf->max_link_speed == LINK_1GBIT) { target = 125000000; /* Target frequency: 125 MHz */ } } else if (dev_data->eff_link_speed != LINK_DOWN) { /* * Use the effective link speed instead of the maximum/nominal * link speed for clock configuration. */ if (dev_data->eff_link_speed == LINK_10MBIT) { target = 2500000; /* Target frequency: 2.5 MHz */ } else if (dev_data->eff_link_speed == LINK_100MBIT) { target = 25000000; /* Target frequency: 25 MHz */ } else if (dev_data->eff_link_speed == LINK_1GBIT) { target = 125000000; /* Target frequency: 125 MHz */ } } /* * Calculate the divisors for the target frequency. * The frequency of the PLL to which the divisors shall be applied are * provided in the respective GEM's device tree data. */ for (div0 = 1; div0 < 64; div0++) { for (div1 = 1; div1 < 64; div1++) { tmp = ((dev_conf->pll_clock_frequency / div0) / div1); if (tmp >= (target - 10) && tmp <= (target + 10)) { break; } } if (tmp >= (target - 10) && tmp <= (target + 10)) { break; } } #if defined(CONFIG_SOC_XILINX_ZYNQMP) /* * ZynqMP register crl_apb.GEMx_REF_CTRL: * RX_CLKACT bit [26] * CLKACT bit [25] * div0 bits [13..8], div1 bits [21..16] * Unlock CRL_APB write access if the write protect bit * is currently set, restore it afterwards. */ clk_ctrl_reg = sys_read32(dev_conf->clk_ctrl_reg_address); clk_ctrl_reg &= ~((ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK << ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT) | (ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK << ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT)); clk_ctrl_reg |= ((div0 & ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK) << ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR0_SHIFT) | ((div1 & ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR_MASK) << ETH_XLNX_CRL_APB_GEMX_REF_CTRL_DIVISOR1_SHIFT); clk_ctrl_reg |= ETH_XLNX_CRL_APB_GEMX_REF_CTRL_RX_CLKACT_BIT | ETH_XLNX_CRL_APB_GEMX_REF_CTRL_CLKACT_BIT; /* * Unlock CRL_APB write access if the write protect bit * is currently set, restore it afterwards. */ tmp = sys_read32(ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS); if ((tmp & ETH_XLNX_CRL_APB_WPROT_BIT) > 0) { sys_write32((tmp & ~ETH_XLNX_CRL_APB_WPROT_BIT), ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS); } sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address); if ((tmp & ETH_XLNX_CRL_APB_WPROT_BIT) > 0) { sys_write32(tmp, ETH_XLNX_CRL_APB_WPROT_REGISTER_ADDRESS); } # elif defined(CONFIG_SOC_FAMILY_XILINX_ZYNQ7000) clk_ctrl_reg = sys_read32(dev_conf->clk_ctrl_reg_address); clk_ctrl_reg &= ~((ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK << ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR0_SHIFT) | (ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK << ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR1_SHIFT)); clk_ctrl_reg |= ((div0 & ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK) << ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR0_SHIFT) | ((div1 & ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR_MASK) << ETH_XLNX_SLCR_GEMX_CLK_CTRL_DIVISOR1_SHIFT); sys_write32(clk_ctrl_reg, dev_conf->clk_ctrl_reg_address); #endif /* CONFIG_SOC_XILINX_ZYNQMP / CONFIG_SOC_FAMILY_XILINX_ZYNQ7000 */ LOG_DBG("%s set clock dividers div0/1 %u/%u for target " "frequency %u Hz", dev->name, div0, div1, target); } /** * @brief GEM initial Network Configuration Register setup function * Writes the contents of the current GEM device's Network Configuration * Register (NWCFG / gem.net_cfg). Called from within the device * initialization function. Implementation differs depending on whether * the current target is a Zynq-7000 or a ZynqMP. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_set_initial_nwcfg(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; uint32_t reg_val = 0; if (dev_conf->ignore_ipg_rxer) { /* [30] ignore IPG rx_er */ reg_val |= ETH_XLNX_GEM_NWCFG_IGNIPGRXERR_BIT; } if (dev_conf->disable_reject_nsp) { /* [29] disable rejection of non-standard preamble */ reg_val |= ETH_XLNX_GEM_NWCFG_BADPREAMBEN_BIT; } if (dev_conf->enable_ipg_stretch) { /* [28] enable IPG stretch */ reg_val |= ETH_XLNX_GEM_NWCFG_IPG_STRETCH_BIT; } if (dev_conf->enable_sgmii_mode) { /* [27] SGMII mode enable */ reg_val |= ETH_XLNX_GEM_NWCFG_SGMIIEN_BIT; } if (dev_conf->disable_reject_fcs_crc_errors) { /* [26] disable rejection of FCS/CRC errors */ reg_val |= ETH_XLNX_GEM_NWCFG_FCSIGNORE_BIT; } if (dev_conf->enable_rx_halfdup_while_tx) { /* [25] RX half duplex while TX enable */ reg_val |= ETH_XLNX_GEM_NWCFG_HDRXEN_BIT; } if (dev_conf->enable_rx_chksum_offload) { /* [24] enable RX IP/TCP/UDP checksum offload */ reg_val |= ETH_XLNX_GEM_NWCFG_RXCHKSUMEN_BIT; } if (dev_conf->disable_pause_copy) { /* [23] Do not copy pause Frames to memory */ reg_val |= ETH_XLNX_GEM_NWCFG_PAUSECOPYDI_BIT; } /* [22..21] Data bus width */ reg_val |= (((uint32_t)(dev_conf->amba_dbus_width) & ETH_XLNX_GEM_NWCFG_DBUSW_MASK) << ETH_XLNX_GEM_NWCFG_DBUSW_SHIFT); /* [20..18] MDC clock divider */ reg_val |= (((uint32_t)dev_conf->mdc_divider & ETH_XLNX_GEM_NWCFG_MDC_MASK) << ETH_XLNX_GEM_NWCFG_MDC_SHIFT); if (dev_conf->discard_rx_fcs) { /* [17] Discard FCS from received frames */ reg_val |= ETH_XLNX_GEM_NWCFG_FCSREM_BIT; } if (dev_conf->discard_rx_length_errors) { /* [16] RX length error discard */ reg_val |= ETH_XLNX_GEM_NWCFG_LENGTHERRDSCRD_BIT; } /* [15..14] RX buffer offset */ reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_offset & ETH_XLNX_GEM_NWCFG_RXOFFS_MASK) << ETH_XLNX_GEM_NWCFG_RXOFFS_SHIFT); if (dev_conf->enable_pause) { /* [13] Enable pause TX */ reg_val |= ETH_XLNX_GEM_NWCFG_PAUSEEN_BIT; } if (dev_conf->enable_tbi) { /* [11] enable TBI instead of GMII/MII */ reg_val |= ETH_XLNX_GEM_NWCFG_TBIINSTEAD_BIT; } if (dev_conf->ext_addr_match) { /* [09] External address match enable */ reg_val |= ETH_XLNX_GEM_NWCFG_EXTADDRMATCHEN_BIT; } if (dev_conf->enable_1536_frames) { /* [08] Enable 1536 byte frames reception */ reg_val |= ETH_XLNX_GEM_NWCFG_1536RXEN_BIT; } if (dev_conf->enable_ucast_hash) { /* [07] Receive unicast hash frames */ reg_val |= ETH_XLNX_GEM_NWCFG_UCASTHASHEN_BIT; } if (dev_conf->enable_mcast_hash) { /* [06] Receive multicast hash frames */ reg_val |= ETH_XLNX_GEM_NWCFG_MCASTHASHEN_BIT; } if (dev_conf->disable_bcast) { /* [05] Do not receive broadcast frames */ reg_val |= ETH_XLNX_GEM_NWCFG_BCASTDIS_BIT; } if (dev_conf->copy_all_frames) { /* [04] Copy all frames */ reg_val |= ETH_XLNX_GEM_NWCFG_COPYALLEN_BIT; } if (dev_conf->discard_non_vlan) { /* [02] Receive only VLAN frames */ reg_val |= ETH_XLNX_GEM_NWCFG_NVLANDISC_BIT; } if (dev_conf->enable_fdx) { /* [01] enable Full duplex */ reg_val |= ETH_XLNX_GEM_NWCFG_FDEN_BIT; } if (dev_conf->max_link_speed == LINK_100MBIT) { /* [00] 10 or 100 Mbps */ reg_val |= ETH_XLNX_GEM_NWCFG_100_BIT; } else if (dev_conf->max_link_speed == LINK_1GBIT) { /* [10] Gigabit mode enable */ reg_val |= ETH_XLNX_GEM_NWCFG_1000_BIT; } /* * No else-branch for 10Mbit/s mode: * in 10 Mbit/s mode, both bits [00] and [10] remain 0 */ /* Write the assembled register contents to gem.net_cfg */ sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); } /** * @brief GEM Network Configuration Register link speed update function * Updates only the link speed-related bits of the Network Configuration * register. This is called from within #eth_xlnx_gem_poll_phy. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_set_nwcfg_link_speed(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint32_t reg_val; /* * Read the current gem.net_cfg register contents and mask out * the link speed-related bits */ reg_val = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); reg_val &= ~(ETH_XLNX_GEM_NWCFG_1000_BIT | ETH_XLNX_GEM_NWCFG_100_BIT); /* No bits to set for 10 Mbps. 100 Mbps and 1 Gbps set one bit each. */ if (dev_data->eff_link_speed == LINK_100MBIT) { reg_val |= ETH_XLNX_GEM_NWCFG_100_BIT; } else if (dev_data->eff_link_speed == LINK_1GBIT) { reg_val |= ETH_XLNX_GEM_NWCFG_1000_BIT; } /* Write the assembled register contents to gem.net_cfg */ sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_NWCFG_OFFSET); } /** * @brief GEM MAC address setup function * Acquires the MAC address to be assigned to the current GEM device * from the device configuration data which in turn acquires it from * the device tree data, then writes it to the gem.spec_addr1_bot/LADDR1L * and gem.spec_addr1_top/LADDR1H registers. Called from within the device * initialization function. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_set_mac_address(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint32_t regval_top; uint32_t regval_bot; regval_bot = (dev_data->mac_addr[0] & 0xFF); regval_bot |= (dev_data->mac_addr[1] & 0xFF) << 8; regval_bot |= (dev_data->mac_addr[2] & 0xFF) << 16; regval_bot |= (dev_data->mac_addr[3] & 0xFF) << 24; regval_top = (dev_data->mac_addr[4] & 0xFF); regval_top |= (dev_data->mac_addr[5] & 0xFF) << 8; sys_write32(regval_bot, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1L_OFFSET); sys_write32(regval_top, dev_conf->base_addr + ETH_XLNX_GEM_LADDR1H_OFFSET); LOG_DBG("%s MAC %02X:%02X:%02X:%02X:%02X:%02X", dev->name, dev_data->mac_addr[0], dev_data->mac_addr[1], dev_data->mac_addr[2], dev_data->mac_addr[3], dev_data->mac_addr[4], dev_data->mac_addr[5]); } /** * @brief GEM initial DMA Control Register setup function * Writes the contents of the current GEM device's DMA Control Register * (DMACR / gem.dma_cfg). Called from within the device initialization * function. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_set_initial_dmacr(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; uint32_t reg_val = 0; /* * gem.dma_cfg register bit (field) definitions: * comp. Zynq-7000 TRM, p. 1278 ff. */ if (dev_conf->disc_rx_ahb_unavail) { /* [24] Discard RX packet when AHB unavailable */ reg_val |= ETH_XLNX_GEM_DMACR_DISCNOAHB_BIT; } /* * [23..16] DMA RX buffer size in AHB system memory * e.g.: 0x02 = 128, 0x18 = 1536, 0xA0 = 10240 */ reg_val |= (((dev_conf->rx_buffer_size / 64) & ETH_XLNX_GEM_DMACR_RX_BUF_MASK) << ETH_XLNX_GEM_DMACR_RX_BUF_SHIFT); if (dev_conf->enable_tx_chksum_offload) { /* [11] TX TCP/UDP/IP checksum offload to GEM */ reg_val |= ETH_XLNX_GEM_DMACR_TCP_CHKSUM_BIT; } if (dev_conf->tx_buffer_size_full) { /* [10] TX buffer memory size select */ reg_val |= ETH_XLNX_GEM_DMACR_TX_SIZE_BIT; } /* * [09..08] RX packet buffer memory size select * 0 = 1kB, 1 = 2kB, 2 = 4kB, 3 = 8kB */ reg_val |= (((uint32_t)dev_conf->hw_rx_buffer_size << ETH_XLNX_GEM_DMACR_RX_SIZE_SHIFT) & ETH_XLNX_GEM_DMACR_RX_SIZE_MASK); if (dev_conf->enable_ahb_packet_endian_swap) { /* [07] AHB packet data endian swap enable */ reg_val |= ETH_XLNX_GEM_DMACR_ENDIAN_BIT; } if (dev_conf->enable_ahb_md_endian_swap) { /* [06] AHB mgmt descriptor endian swap enable */ reg_val |= ETH_XLNX_GEM_DMACR_DESCR_ENDIAN_BIT; } /* * [04..00] AHB fixed burst length for DMA ops. * 00001 = single AHB bursts, * 001xx = attempt to use INCR4 bursts, * 01xxx = attempt to use INCR8 bursts, * 1xxxx = attempt to use INCR16 bursts */ reg_val |= ((uint32_t)dev_conf->ahb_burst_length & ETH_XLNX_GEM_DMACR_AHB_BURST_LENGTH_MASK); /* Write the assembled register contents */ sys_write32(reg_val, dev_conf->base_addr + ETH_XLNX_GEM_DMACR_OFFSET); } /** * @brief GEM associated PHY detection and setup function * If the current GEM device shall manage an associated PHY, its detection * and configuration is performed from within this function. Called from * within the device initialization function. This function refers to * functionality implemented in the phy_xlnx_gem module. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_init_phy(const struct device *dev) { struct eth_xlnx_gem_dev_data *dev_data = dev->data; int detect_rc; LOG_DBG("%s attempting to initialize associated PHY", dev->name); /* * The phy_xlnx_gem_detect function checks if a valid PHY * ID is returned when reading the corresponding high / low * ID registers for all valid MDIO addresses. If a compatible * PHY is detected, the function writes a pointer to the * vendor-specific implementations of the PHY management * functions to the run-time device data struct, along with * the ID and the MDIO address of the detected PHY (dev_data-> * phy_id, dev_data->phy_addr, dev_data->phy_access_api). */ detect_rc = phy_xlnx_gem_detect(dev); if (detect_rc == 0 && dev_data->phy_id != 0x00000000 && dev_data->phy_id != 0xFFFFFFFF && dev_data->phy_access_api != NULL) { /* A compatible PHY was detected -> reset & configure it */ dev_data->phy_access_api->phy_reset_func(dev); dev_data->phy_access_api->phy_configure_func(dev); } else { LOG_WRN("%s no compatible PHY detected", dev->name); } } /** * @brief GEM associated PHY status polling function * This handler of a delayed work item is called from the context of * the system work queue. It is always scheduled at least once during the * interface initialization. If the current driver instance manages a * PHY, the delayed work item will be re-scheduled in order to continuously * monitor the link state and speed while the device is active. Link state * and link speed changes are polled, which may result in the link state * change being propagated (carrier on/off) and / or the TX clock being * reconfigured to match the current link speed. If PHY management is dis- * abled for the current driver instance or no compatible PHY was detected, * the work item will not be re-scheduled and default link speed and link * state values are applied. This function refers to functionality imple- * mented in the phy_xlnx_gem module. * * @param work Pointer to the delayed work item which facilitates * access to the current device's configuration data */ static void eth_xlnx_gem_poll_phy(struct k_work *work) { struct k_work_delayable *dwork = k_work_delayable_from_work(work); struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(dwork, struct eth_xlnx_gem_dev_data, phy_poll_delayed_work); const struct device *dev = net_if_get_device(dev_data->iface); const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; uint16_t phy_status; uint8_t link_status; if (dev_data->phy_access_api != NULL) { /* A supported PHY is managed by the driver */ phy_status = dev_data->phy_access_api->phy_poll_status_change_func(dev); if ((phy_status & ( PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED | PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED | PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE)) != 0) { /* * Get the PHY's link status. Handling a 'link down' * event the simplest possible case. */ link_status = dev_data->phy_access_api->phy_poll_link_status_func(dev); if (link_status == 0) { /* * Link is down -> propagate to the Ethernet * layer that the link has gone down. */ dev_data->eff_link_speed = LINK_DOWN; net_eth_carrier_off(dev_data->iface); LOG_WRN("%s link down", dev->name); } else { /* * A link has been detected, which, depending * on the driver's configuration, might have * a different speed than the previous link. * Therefore, the clock dividers must be ad- * justed accordingly. */ dev_data->eff_link_speed = dev_data->phy_access_api->phy_poll_link_speed_func(dev); eth_xlnx_gem_configure_clocks(dev); eth_xlnx_gem_set_nwcfg_link_speed(dev); net_eth_carrier_on(dev_data->iface); LOG_INF("%s link up, %s", dev->name, (dev_data->eff_link_speed == LINK_1GBIT) ? "1 GBit/s" : (dev_data->eff_link_speed == LINK_100MBIT) ? "100 MBit/s" : (dev_data->eff_link_speed == LINK_10MBIT) ? "10 MBit/s" : "undefined / link down"); } } /* * Re-submit the delayed work using the interval from the device * configuration data. */ k_work_reschedule(&dev_data->phy_poll_delayed_work, K_MSEC(dev_conf->phy_poll_interval)); } else { /* * The current driver instance doesn't manage a PHY or no * supported PHY was detected -> pretend the configured max. * link speed is the effective link speed and that the link * is up. The delayed work item won't be re-scheduled, as * there isn't anything to poll for. */ dev_data->eff_link_speed = dev_conf->max_link_speed; eth_xlnx_gem_configure_clocks(dev); eth_xlnx_gem_set_nwcfg_link_speed(dev); net_eth_carrier_on(dev_data->iface); LOG_WRN("%s PHY not managed by the driver or no compatible " "PHY detected, assuming link up at %s", dev->name, (dev_conf->max_link_speed == LINK_1GBIT) ? "1 GBit/s" : (dev_conf->max_link_speed == LINK_100MBIT) ? "100 MBit/s" : (dev_conf->max_link_speed == LINK_10MBIT) ? "10 MBit/s" : "undefined"); } } /** * @brief GEM DMA memory area setup function * Sets up the DMA memory area to be used by the current GEM device. * Called from within the device initialization function or from within * the context of the PHY status polling delayed work handler. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_configure_buffers(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; struct eth_xlnx_gem_bd *bdptr; uint32_t buf_iter; /* Initial configuration of the RX/TX BD rings */ DT_INST_FOREACH_STATUS_OKAY(ETH_XLNX_GEM_INIT_BD_RING) /* * Set initial RX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5, * "Receive Buffer Descriptor List". The BD ring data other than * the base RX/TX buffer pointers will be set in eth_xlnx_gem_- * iface_init() */ bdptr = dev_data->rxbd_ring.first_bd; for (buf_iter = 0; buf_iter < (dev_conf->rxbd_count - 1); buf_iter++) { /* Clear 'used' bit -> BD is owned by the controller */ bdptr->ctrl = 0; bdptr->addr = (uint32_t)dev_data->first_rx_buffer + (buf_iter * (uint32_t)dev_conf->rx_buffer_size); ++bdptr; } /* * For the last BD, bit [1] must be OR'ed in the buffer memory * address -> this is the 'wrap' bit indicating that this is the * last BD in the ring. This location is used as bits [1..0] can't * be part of the buffer address due to alignment requirements * anyways. Watch out: TX BDs handle this differently, their wrap * bit is located in the BD's control word! */ bdptr->ctrl = 0; /* BD is owned by the controller */ bdptr->addr = ((uint32_t)dev_data->first_rx_buffer + (buf_iter * (uint32_t)dev_conf->rx_buffer_size)) | ETH_XLNX_GEM_RXBD_WRAP_BIT; /* * Set initial TX BD data -> comp. Zynq-7000 TRM, Chapter 16.3.5, * "Transmit Buffer Descriptor List". TX BD ring data has already * been set up in eth_xlnx_gem_iface_init() */ bdptr = dev_data->txbd_ring.first_bd; for (buf_iter = 0; buf_iter < (dev_conf->txbd_count - 1); buf_iter++) { /* Set up the control word -> 'used' flag must be set. */ bdptr->ctrl = ETH_XLNX_GEM_TXBD_USED_BIT; bdptr->addr = (uint32_t)dev_data->first_tx_buffer + (buf_iter * (uint32_t)dev_conf->tx_buffer_size); ++bdptr; } /* * For the last BD, set the 'wrap' bit indicating to the controller * that this BD is the last one in the ring. -> For TX BDs, the 'wrap' * bit isn't located in the address word, but in the control word * instead */ bdptr->ctrl = (ETH_XLNX_GEM_TXBD_WRAP_BIT | ETH_XLNX_GEM_TXBD_USED_BIT); bdptr->addr = (uint32_t)dev_data->first_tx_buffer + (buf_iter * (uint32_t)dev_conf->tx_buffer_size); /* Set free count/current index in the RX/TX BD ring data */ dev_data->rxbd_ring.next_to_process = 0; dev_data->rxbd_ring.next_to_use = 0; dev_data->rxbd_ring.free_bds = dev_conf->rxbd_count; dev_data->txbd_ring.next_to_process = 0; dev_data->txbd_ring.next_to_use = 0; dev_data->txbd_ring.free_bds = dev_conf->txbd_count; /* Write pointers to the first RX/TX BD to the controller */ sys_write32((uint32_t)dev_data->rxbd_ring.first_bd, dev_conf->base_addr + ETH_XLNX_GEM_RXQBASE_OFFSET); sys_write32((uint32_t)dev_data->txbd_ring.first_bd, dev_conf->base_addr + ETH_XLNX_GEM_TXQBASE_OFFSET); } /** * @brief GEM RX data pending handler wrapper for the work queue * Wraps the RX data pending handler, eth_xlnx_gem_handle_rx_pending, * for the scenario in which the current GEM device is configured * to defer RX pending / TX done indication handling to the system * work queue. In this case, the work item received by this wrapper * function will be enqueued from within the ISR if the corresponding * bit is set within the controller's interrupt status register * (gem.intr_status). * * @param item Pointer to the work item enqueued by the ISR which * facilitates access to the current device's data */ static void eth_xlnx_gem_rx_pending_work(struct k_work *item) { struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item, struct eth_xlnx_gem_dev_data, rx_pend_work); const struct device *dev = net_if_get_device(dev_data->iface); eth_xlnx_gem_handle_rx_pending(dev); } /** * @brief GEM RX data pending handler * This handler is called either from within the ISR or from the * context of the system work queue whenever the RX data pending bit * is set in the controller's interrupt status register (gem.intr_status). * No further RX data pending interrupts will be triggered until this * handler has been executed, which eventually clears the corresponding * interrupt status bit. This function acquires the incoming packet * data from the DMA memory area via the RX buffer descriptors and copies * the data to a packet which will then be handed over to the network * stack. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_handle_rx_pending(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint32_t reg_addr; uint32_t reg_ctrl; uint32_t reg_val; uint32_t reg_val_rxsr; uint8_t first_bd_idx; uint8_t last_bd_idx; uint8_t curr_bd_idx; uint32_t rx_data_length; uint32_t rx_data_remaining; struct net_pkt *pkt; /* Read the RX status register */ reg_val_rxsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); /* * TODO Evaluate error flags from RX status register word * here for proper error handling. */ while (1) { curr_bd_idx = dev_data->rxbd_ring.next_to_process; first_bd_idx = last_bd_idx = curr_bd_idx; reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].addr); reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[first_bd_idx].ctrl); /* * Basic precondition checks for the current BD's * address and control words */ reg_val = sys_read32(reg_addr); if ((reg_val & ETH_XLNX_GEM_RXBD_USED_BIT) == 0) { /* * No new data contained in the current BD * -> break out of the RX loop */ break; } reg_val = sys_read32(reg_ctrl); if ((reg_val & ETH_XLNX_GEM_RXBD_START_OF_FRAME_BIT) == 0) { /* * Although the current BD is marked as 'used', it * doesn't contain the SOF bit. */ LOG_ERR("%s unexpected missing SOF bit in RX BD [%u]", dev->name, first_bd_idx); break; } /* * As long as the current BD doesn't have the EOF bit set, * iterate forwards until the EOF bit is encountered. Only * the BD containing the EOF bit also contains the length * of the received packet which spans multiple buffers. */ do { reg_ctrl = (uint32_t)(&dev_data->rxbd_ring.first_bd[last_bd_idx].ctrl); reg_val = sys_read32(reg_ctrl); rx_data_length = rx_data_remaining = (reg_val & ETH_XLNX_GEM_RXBD_FRAME_LENGTH_MASK); if ((reg_val & ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT) == 0) { last_bd_idx = (last_bd_idx + 1) % dev_conf->rxbd_count; } } while ((reg_val & ETH_XLNX_GEM_RXBD_END_OF_FRAME_BIT) == 0); /* * Store the position of the first BD behind the end of the * frame currently being processed as 'next to process' */ dev_data->rxbd_ring.next_to_process = (last_bd_idx + 1) % dev_conf->rxbd_count; /* * Allocate a destination packet from the network stack * now that the total frame length is known. */ pkt = net_pkt_rx_alloc_with_buffer(dev_data->iface, rx_data_length, AF_UNSPEC, 0, K_NO_WAIT); if (pkt == NULL) { LOG_ERR("RX packet buffer alloc failed: %u bytes", rx_data_length); #ifdef CONFIG_NET_STATISTICS_ETHERNET dev_data->stats.errors.rx++; dev_data->stats.error_details.rx_no_buffer_count++; #endif } /* * Copy data from all involved RX buffers into the allocated * packet's data buffer. If we don't have a packet buffer be- * cause none are available, we still have to iterate over all * involved BDs in order to properly release them for re-use * by the controller. */ do { if (pkt != NULL) { net_pkt_write(pkt, (const void *) (dev_data->rxbd_ring.first_bd[curr_bd_idx].addr & ETH_XLNX_GEM_RXBD_BUFFER_ADDR_MASK), (rx_data_remaining < dev_conf->rx_buffer_size) ? rx_data_remaining : dev_conf->rx_buffer_size); } rx_data_remaining -= (rx_data_remaining < dev_conf->rx_buffer_size) ? rx_data_remaining : dev_conf->rx_buffer_size; /* * The entire packet data of the current BD has been * processed, on to the next BD -> preserve the RX BD's * 'wrap' bit & address, but clear the 'used' bit. */ reg_addr = (uint32_t)(&dev_data->rxbd_ring.first_bd[curr_bd_idx].addr); reg_val = sys_read32(reg_addr); reg_val &= ~ETH_XLNX_GEM_RXBD_USED_BIT; sys_write32(reg_val, reg_addr); curr_bd_idx = (curr_bd_idx + 1) % dev_conf->rxbd_count; } while (curr_bd_idx != ((last_bd_idx + 1) % dev_conf->rxbd_count)); /* Propagate the received packet to the network stack */ if (pkt != NULL) { if (net_recv_data(dev_data->iface, pkt) < 0) { LOG_ERR("%s RX packet hand-over to IP stack failed", dev->name); net_pkt_unref(pkt); } #ifdef CONFIG_NET_STATISTICS_ETHERNET else { dev_data->stats.bytes.received += rx_data_length; dev_data->stats.pkts.rx++; } #endif } } /* Clear the RX status register */ sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_RXSR_OFFSET); /* Re-enable the frame received interrupt source */ sys_write32(ETH_XLNX_GEM_IXR_FRAME_RX_BIT, dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); } /** * @brief GEM TX done handler wrapper for the work queue * Wraps the TX done handler, eth_xlnx_gem_handle_tx_done, * for the scenario in which the current GEM device is configured * to defer RX pending / TX done indication handling to the system * work queue. In this case, the work item received by this wrapper * function will be enqueued from within the ISR if the corresponding * bit is set within the controller's interrupt status register * (gem.intr_status). * * @param item Pointer to the work item enqueued by the ISR which * facilitates access to the current device's data */ static void eth_xlnx_gem_tx_done_work(struct k_work *item) { struct eth_xlnx_gem_dev_data *dev_data = CONTAINER_OF(item, struct eth_xlnx_gem_dev_data, tx_done_work); const struct device *dev = net_if_get_device(dev_data->iface); eth_xlnx_gem_handle_tx_done(dev); } /** * @brief GEM TX done handler * This handler is called either from within the ISR or from the * context of the system work queue whenever the TX done bit is set * in the controller's interrupt status register (gem.intr_status). * No further TX done interrupts will be triggered until this handler * has been executed, which eventually clears the corresponding * interrupt status bit. Once this handler reaches the end of its * execution, the eth_xlnx_gem_send call which effectively triggered * it is unblocked by posting to the current GEM's TX done semaphore * on which the send function is blocking. * * @param dev Pointer to the device data */ static void eth_xlnx_gem_handle_tx_done(const struct device *dev) { const struct eth_xlnx_gem_dev_cfg *dev_conf = dev->config; struct eth_xlnx_gem_dev_data *dev_data = dev->data; uint32_t reg_ctrl; uint32_t reg_val; uint32_t reg_val_txsr; uint8_t curr_bd_idx; uint8_t first_bd_idx; uint8_t bds_processed = 0; uint8_t bd_is_last; /* Read the TX status register */ reg_val_txsr = sys_read32(dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); /* * TODO Evaluate error flags from TX status register word * here for proper error handling */ if (dev_conf->defer_txd_to_queue) { k_sem_take(&(dev_data->txbd_ring.ring_sem), K_FOREVER); } curr_bd_idx = first_bd_idx = dev_data->txbd_ring.next_to_process; reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); reg_val = sys_read32(reg_ctrl); do { ++bds_processed; /* * TODO Evaluate error flags from current BD control word * here for proper error handling */ /* * Check if the BD we're currently looking at is the last BD * of the current transmission */ bd_is_last = ((reg_val & ETH_XLNX_GEM_TXBD_LAST_BIT) != 0) ? 1 : 0; /* * Reset control word of the current BD, clear everything but * the 'wrap' bit, then set the 'used' bit */ reg_val &= ETH_XLNX_GEM_TXBD_WRAP_BIT; reg_val |= ETH_XLNX_GEM_TXBD_USED_BIT; sys_write32(reg_val, reg_ctrl); /* Move on to the next BD or break out of the loop */ if (bd_is_last == 1) { break; } curr_bd_idx = (curr_bd_idx + 1) % dev_conf->txbd_count; reg_ctrl = (uint32_t)(&dev_data->txbd_ring.first_bd[curr_bd_idx].ctrl); reg_val = sys_read32(reg_ctrl); } while (bd_is_last == 0 && curr_bd_idx != first_bd_idx); if (curr_bd_idx == first_bd_idx && bd_is_last == 0) { LOG_WRN("%s TX done handling wrapped around", dev->name); } dev_data->txbd_ring.next_to_process = (dev_data->txbd_ring.next_to_process + bds_processed) % dev_conf->txbd_count; dev_data->txbd_ring.free_bds += bds_processed; if (dev_conf->defer_txd_to_queue) { k_sem_give(&(dev_data->txbd_ring.ring_sem)); } /* Clear the TX status register */ sys_write32(0xFFFFFFFF, dev_conf->base_addr + ETH_XLNX_GEM_TXSR_OFFSET); /* Re-enable the TX complete interrupt source */ sys_write32(ETH_XLNX_GEM_IXR_TX_COMPLETE_BIT, dev_conf->base_addr + ETH_XLNX_GEM_IER_OFFSET); /* Indicate completion to a blocking eth_xlnx_gem_send() call */ k_sem_give(&dev_data->tx_done_sem); } ```
/content/code_sandbox/drivers/ethernet/eth_xlnx_gem.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
16,672
```c /* * */ #define DT_DRV_COMPAT smsc_lan9220 /* SMSC911x/SMSC9220 driver. Partly based on mbedOS driver. */ #define LOG_MODULE_NAME eth_smsc911x #define LOG_LEVEL CONFIG_ETHERNET_LOG_LEVEL #include <zephyr/logging/log.h> LOG_MODULE_REGISTER(LOG_MODULE_NAME); #include <soc.h> #include <zephyr/device.h> #include <errno.h> #include <zephyr/init.h> #include <zephyr/kernel.h> #include <zephyr/sys/__assert.h> #include <zephyr/net/net_core.h> #include <zephyr/net/net_pkt.h> #include <stdbool.h> #include <stdio.h> #include <string.h> #include <zephyr/sys/sys_io.h> #include <zephyr/net/ethernet.h> #include <zephyr/irq.h> #include "ethernet/eth_stats.h" #ifdef CONFIG_SHARED_IRQ #include <zephyr/shared_irq.h> #endif #include "eth_smsc911x_priv.h" #define RESET_TIMEOUT 10 #define PHY_RESET_TIMEOUT K_MSEC(100) #define REG_WRITE_TIMEOUT 50 /* Controller has only one PHY with address 1 */ #define PHY_ADDR 1 struct eth_context { struct net_if *iface; uint8_t mac[6]; #if defined(CONFIG_NET_STATISTICS_ETHERNET) struct net_stats_eth stats; #endif }; /* SMSC911x helper functions */ static int smsc_mac_regread(uint8_t reg, uint32_t *val) { uint32_t cmd = MAC_CSR_CMD_BUSY | MAC_CSR_CMD_READ | reg; SMSC9220->MAC_CSR_CMD = cmd; while ((SMSC9220->MAC_CSR_CMD & MAC_CSR_CMD_BUSY) != 0) { } *val = SMSC9220->MAC_CSR_DATA; return 0; } static int smsc_mac_regwrite(uint8_t reg, uint32_t val) { uint32_t cmd = MAC_CSR_CMD_BUSY | MAC_CSR_CMD_WRITE | reg; SMSC9220->MAC_CSR_DATA = val; SMSC9220->MAC_CSR_CMD = cmd; while ((SMSC9220->MAC_CSR_CMD & MAC_CSR_CMD_BUSY) != 0) { } return 0; } int smsc_phy_regread(uint8_t regoffset, uint32_t *data) { uint32_t val = 0U; uint32_t phycmd = 0U; unsigned int time_out = REG_WRITE_TIMEOUT; if (smsc_mac_regread(SMSC9220_MAC_MII_ACC, &val) < 0) { return -1; } if (val & MAC_MII_ACC_MIIBZY) { *data = 0U; return -EBUSY; } phycmd = 0U; phycmd |= PHY_ADDR << 11; phycmd |= (regoffset & 0x1F) << 6; phycmd |= MAC_MII_ACC_READ; phycmd |= MAC_MII_ACC_MIIBZY; /* Operation start */ if (smsc_mac_regwrite(SMSC9220_MAC_MII_ACC, phycmd)) { return -1; } val = 0U; do { k_sleep(K_MSEC(1)); time_out--; if (smsc_mac_regread(SMSC9220_MAC_MII_ACC, &val)) { return -1; } } while (time_out != 0U && (val & MAC_MII_ACC_MIIBZY)); if (time_out == 0U) { return -ETIMEDOUT; } if (smsc_mac_regread(SMSC9220_MAC_MII_DATA, data) < 0) { return -1; } return 0; } int smsc_phy_regwrite(uint8_t regoffset, uint32_t data) { uint32_t val = 0U; uint32_t phycmd = 0U; unsigned int time_out = REG_WRITE_TIMEOUT; if (smsc_mac_regread(SMSC9220_MAC_MII_ACC, &val) < 0) { return -1; } if (val & MAC_MII_ACC_MIIBZY) { return -EBUSY; } if (smsc_mac_regwrite(SMSC9220_MAC_MII_DATA, data & 0xFFFF) < 0) { return -1; } phycmd |= PHY_ADDR << 11; phycmd |= (regoffset & 0x1F) << 6; phycmd |= MAC_MII_ACC_WRITE; phycmd |= MAC_MII_ACC_MIIBZY; /* Operation start */ if (smsc_mac_regwrite(SMSC9220_MAC_MII_ACC, phycmd) < 0) { return -1; } do { k_sleep(K_MSEC(1)); time_out--; if (smsc_mac_regread(SMSC9220_MAC_MII_ACC, &phycmd)) { return -1; } } while (time_out != 0U && (phycmd & MAC_MII_ACC_MIIBZY)); if (time_out == 0U) { return -ETIMEDOUT; } return 0; } static int smsc_read_mac_address(uint8_t *mac) { uint32_t tmp; int res; res = smsc_mac_regread(SMSC9220_MAC_ADDRL, &tmp); if (res < 0) { return res; } mac[0] = (uint8_t)(tmp >> 0); mac[1] = (uint8_t)(tmp >> 8); mac[2] = (uint8_t)(tmp >> 16); mac[3] = (uint8_t)(tmp >> 24); res = smsc_mac_regread(SMSC9220_MAC_ADDRH, &tmp); if (res < 0) { return res; } mac[4] = (uint8_t)(tmp >> 0); mac[5] = (uint8_t)(tmp >> 8); return 0; } static int smsc_check_id(void) { uint32_t id = SMSC9220->ID_REV; /* If bottom and top halves of the word are the same, * the hardware is (likely) not present. */ if (((id >> 16) & 0xFFFF) == (id & 0xFFFF)) { return -1; } switch (((id >> 16) & 0xFFFF)) { case 0x9220: /* SMSC9220 on MPS2 */ case 0x0118: /* SMS9118 as emulated by QEMU */ break; default: return -1; } return 0; } static int smsc_soft_reset(void) { unsigned int time_out = RESET_TIMEOUT; SMSC9220->HW_CFG |= HW_CFG_SRST; do { k_sleep(K_MSEC(1)); time_out--; } while (time_out != 0U && (SMSC9220->HW_CFG & HW_CFG_SRST)); if (time_out == 0U) { return -1; } return 0; } void smsc_set_txfifo(unsigned int val) { /* 2kb minimum, 14kb maximum */ if (val >= 2U && val <= 14U) { SMSC9220->HW_CFG = val << 16; } } void smsc_init_irqs(void) { SMSC9220->INT_EN = 0; /* Clear all interrupts */ SMSC9220->INT_STS = 0xFFFFFFFF; /* Polarity config which works with QEMU */ /* IRQ deassertion at 220 usecs and master IRQ enable */ SMSC9220->IRQ_CFG = 0x22000111; } static int smsc_check_phy(void) { uint32_t phyid1, phyid2; if (smsc_phy_regread(SMSC9220_PHY_ID1, &phyid1)) { return -1; } if (smsc_phy_regread(SMSC9220_PHY_ID2, &phyid2)) { return -1; } return ((phyid1 == 0xFFFF && phyid2 == 0xFFFF) || (phyid1 == 0x0 && phyid2 == 0x0)); } int smsc_reset_phy(void) { uint32_t val; if (smsc_phy_regread(SMSC9220_PHY_BCONTROL, &val)) { return -1; } val |= 1 << 15; if (smsc_phy_regwrite(SMSC9220_PHY_BCONTROL, val)) { return -1; } return 0; } /** * Advertise all speeds and pause capabilities */ void smsc_advertise_caps(void) { uint32_t aneg_adv = 0U; smsc_phy_regread(SMSC9220_PHY_ANEG_ADV, &aneg_adv); aneg_adv |= 0xDE0; smsc_phy_regwrite(SMSC9220_PHY_ANEG_ADV, aneg_adv); smsc_phy_regread(SMSC9220_PHY_ANEG_ADV, &aneg_adv); } void smsc_establish_link(void) { uint32_t bcr = 0U; uint32_t hw_cfg = 0U; smsc_phy_regread(SMSC9220_PHY_BCONTROL, &bcr); bcr |= (1 << 12) | (1 << 9); smsc_phy_regwrite(SMSC9220_PHY_BCONTROL, bcr); smsc_phy_regread(SMSC9220_PHY_BCONTROL, &bcr); hw_cfg = SMSC9220->HW_CFG; hw_cfg &= 0xF0000; hw_cfg |= (1 << 20); SMSC9220->HW_CFG = hw_cfg; } static inline void smsc_enable_xmit(void) { SMSC9220->TX_CFG = 0x2 /*TX_CFG_TX_ON*/; } void smsc_enable_mac_xmit(void) { uint32_t mac_cr = 0U; smsc_mac_regread(SMSC9220_MAC_CR, &mac_cr); mac_cr |= (1 << 3); /* xmit enable */ mac_cr |= (1 << 28); /* Heartbeat disable */ smsc_mac_regwrite(SMSC9220_MAC_CR, mac_cr); } void smsc_enable_mac_recv(void) { uint32_t mac_cr = 0U; smsc_mac_regread(SMSC9220_MAC_CR, &mac_cr); mac_cr |= (1 << 2); /* Recv enable */ smsc_mac_regwrite(SMSC9220_MAC_CR, mac_cr); } int smsc_init(void) { unsigned int phyreset = 0U; if (smsc_check_id() < 0) { return -1; } if (smsc_soft_reset() < 0) { return -1; } smsc_set_txfifo(5); /* Sets automatic flow control thresholds, and backpressure */ /* threshold to defaults specified. */ SMSC9220->AFC_CFG = 0x006E3740; /* May need to initialize EEPROM/read MAC from it on real HW. */ /* Configure GPIOs as LED outputs. */ SMSC9220->GPIO_CFG = 0x70070000; smsc_init_irqs(); /* Configure MAC addresses here if needed. */ if (smsc_check_phy() < 0) { return -1; } if (smsc_reset_phy() < 0) { return -1; } k_sleep(PHY_RESET_TIMEOUT); /* Checking whether phy reset completed successfully.*/ if (smsc_phy_regread(SMSC9220_PHY_BCONTROL, &phyreset)) { return 1; } if (phyreset & (1 << 15)) { return 1; } smsc_advertise_caps(); /* bit [12] of BCONTROL seems self-clearing. */ /* Although it's not so in the manual. */ smsc_establish_link(); /* Interrupt threshold */ SMSC9220->FIFO_INT = 0xFF000000; smsc_enable_mac_xmit(); smsc_enable_xmit(); SMSC9220->RX_CFG = 0; smsc_enable_mac_recv(); /* Rx status FIFO level irq threshold */ SMSC9220->FIFO_INT &= ~(0xFF); /* Clear 2 bottom nibbles */ /* This sleep is compulsory otherwise txmit/receive will fail. */ k_sleep(K_MSEC(2000)); return 0; } /* Driver functions */ static enum ethernet_hw_caps eth_smsc911x_get_capabilities(const struct device *dev) { ARG_UNUSED(dev); return ETHERNET_LINK_10BASE_T | ETHERNET_LINK_100BASE_T; } #if defined(CONFIG_NET_STATISTICS_ETHERNET) static struct net_stats_eth *get_stats(const struct device *dev) { struct eth_context *context = dev->data; return &context->stats; } #endif static void eth_initialize(struct net_if *iface) { const struct device *dev = net_if_get_device(iface); struct eth_context *context = dev->data; LOG_DBG("eth_initialize"); smsc_read_mac_address(context->mac); SMSC9220->INT_EN |= BIT(SMSC9220_INTERRUPT_RXSTATUS_FIFO_LEVEL); net_if_set_link_addr(iface, context->mac, sizeof(context->mac), NET_LINK_ETHERNET); context->iface = iface; ethernet_init(iface); } static int smsc_write_tx_fifo(const uint8_t *buf, uint32_t len, bool is_last) { uint32_t *buf32; __ASSERT_NO_MSG(((uintptr_t)buf & 3) == 0); if (is_last) { /* Last fragment may be not full */ len = (len + 3) & ~3; } if ((len & 3) != 0U || len == 0U) { LOG_ERR("Chunk size not aligned: %u", len); return -1; } buf32 = (uint32_t *)buf; len /= 4U; do { SMSC9220->TX_DATA_PORT = *buf32++; } while (--len); return 0; } static int eth_tx(const struct device *dev, struct net_pkt *pkt) { uint16_t total_len = net_pkt_get_len(pkt); static uint8_t tx_buf[NET_ETH_MAX_FRAME_SIZE] __aligned(4); uint32_t txcmd_a, txcmd_b; uint32_t tx_stat; int res; txcmd_a = (1/*is_first_segment*/ << 13) | (1/*is_last_segment*/ << 12) | total_len; /* Use len as a tag */ txcmd_b = total_len << 16 | total_len; SMSC9220->TX_DATA_PORT = txcmd_a; SMSC9220->TX_DATA_PORT = txcmd_b; if (net_pkt_read(pkt, tx_buf, total_len)) { goto error; } res = smsc_write_tx_fifo(tx_buf, total_len, true); if (res < 0) { goto error; } tx_stat = SMSC9220->TX_STAT_PORT; LOG_DBG("TX_STAT: %x", tx_stat); return 0; error: LOG_ERR("Writing pkt to FIFO failed"); return -1; } static const struct ethernet_api api_funcs = { .iface_api.init = eth_initialize, .get_capabilities = eth_smsc911x_get_capabilities, .send = eth_tx, #if defined(CONFIG_NET_STATISTICS_ETHERNET) .get_stats = get_stats, #endif }; static void smsc_discard_pkt(void) { /* TODO: */ /* Datasheet p.43: */ /* When performing a fast-forward, there must be at least 4 DWORDs * of data in the RX data FIFO for the packet being discarded. For * less than 4 DWORDs do not use RX_FFWD. In this case data must be * read from the RX data FIFO and discarded using standard PIO read * operations. */ SMSC9220->RX_DP_CTRL = RX_DP_CTRL_RX_FFWD; } static inline void smsc_wait_discard_pkt(void) { while ((SMSC9220->RX_DP_CTRL & RX_DP_CTRL_RX_FFWD) != 0) { } } static int smsc_read_rx_fifo(struct net_pkt *pkt, uint32_t len) { uint32_t buf32; __ASSERT_NO_MSG((len & 3) == 0U && len >= 4U); len /= 4U; do { buf32 = SMSC9220->RX_DATA_PORT; if (net_pkt_write(pkt, &buf32, sizeof(uint32_t))) { return -1; } } while (--len); return 0; } static struct net_pkt *smsc_recv_pkt(const struct device *dev, uint32_t pkt_size) { struct eth_context *context = dev->data; struct net_pkt *pkt; uint32_t rem_size; /* Round up to next DWORD size */ rem_size = (pkt_size + 3) & ~3; /* Don't account for FCS when filling net pkt */ rem_size -= 4U; pkt = net_pkt_rx_alloc_with_buffer(context->iface, rem_size, AF_UNSPEC, 0, K_NO_WAIT); if (!pkt) { LOG_ERR("Failed to obtain RX buffer"); smsc_discard_pkt(); return NULL; } if (smsc_read_rx_fifo(pkt, rem_size) < 0) { smsc_discard_pkt(); net_pkt_unref(pkt); return NULL; } /* Discard FCS */ { uint32_t __unused dummy = SMSC9220->RX_DATA_PORT; } /* Adjust len of the last buf down for DWORD alignment */ if (pkt_size & 3) { net_pkt_update_length(pkt, net_pkt_get_len(pkt) - (4 - (pkt_size & 3))); } return pkt; } static void eth_smsc911x_isr(const struct device *dev) { uint32_t int_status = SMSC9220->INT_STS; struct eth_context *context = dev->data; LOG_DBG("%s: INT_STS=%x INT_EN=%x", __func__, int_status, SMSC9220->INT_EN); if (int_status & BIT(SMSC9220_INTERRUPT_RXSTATUS_FIFO_LEVEL)) { struct net_pkt *pkt; uint32_t pkt_size, val; uint32_t rx_stat; val = SMSC9220->RX_FIFO_INF; uint32_t pkt_pending = BFIELD(val, RX_FIFO_INF_RXSUSED); LOG_DBG("in RX FIFO: pkts: %u, bytes: %u", pkt_pending, BFIELD(val, RX_FIFO_INF_RXDUSED)); /* Ack rxstatus_fifo_level only when no packets pending. The * idea is to serve 1 packet per interrupt (e.g. to allow * higher priority interrupts to fire) by keeping interrupt * pending for as long as there're packets in FIFO. And when * there's none, finally acknowledge it. */ if (pkt_pending == 0U) { goto done; } int_status &= ~BIT(SMSC9220_INTERRUPT_RXSTATUS_FIFO_LEVEL); /* Make sure that any previously started discard op is * finished. */ smsc_wait_discard_pkt(); rx_stat = SMSC9220->RX_STAT_PORT; pkt_size = BFIELD(rx_stat, RX_STAT_PORT_PKT_LEN); LOG_DBG("pkt sz: %u", pkt_size); pkt = smsc_recv_pkt(dev, pkt_size); LOG_DBG("out RX FIFO: pkts: %u, bytes: %u", SMSC9220_BFIELD(RX_FIFO_INF, RXSUSED), SMSC9220_BFIELD(RX_FIFO_INF, RXDUSED)); if (pkt != NULL) { int res = net_recv_data(context->iface, pkt); if (res < 0) { LOG_ERR("net_recv_data: %d", res); net_pkt_unref(pkt); } } } done: /* Ack pending interrupts */ SMSC9220->INT_STS = int_status; } /* Bindings to the platform */ int eth_init(const struct device *dev) { IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), eth_smsc911x_isr, DEVICE_DT_INST_GET(0), 0); int ret = smsc_init(); if (ret != 0) { LOG_ERR("smsc911x failed to initialize"); return -ENODEV; } irq_enable(DT_INST_IRQN(0)); return ret; } static struct eth_context eth_0_context; ETH_NET_DEVICE_DT_INST_DEFINE(0, eth_init, NULL, &eth_0_context, NULL /*&eth_config_0*/, CONFIG_ETH_INIT_PRIORITY, &api_funcs, NET_ETH_MTU /*MTU*/); ```
/content/code_sandbox/drivers/ethernet/eth_smsc911x.c
c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
4,567
```objective-c /* * Xilinx Processor System Gigabit Ethernet controller (GEM) driver * * PHY management interface and related data * */ #ifndef _ZEPHYR_DRIVERS_ETHERNET_PHY_XLNX_GEM_H_ #define _ZEPHYR_DRIVERS_ETHERNET_PHY_XLNX_GEM_H_ #include <zephyr/kernel.h> #include <zephyr/types.h> /* Event codes used to indicate a particular state change to the driver */ #define PHY_XLNX_GEM_EVENT_LINK_SPEED_CHANGED (1 << 0) #define PHY_XLNX_GEM_EVENT_LINK_STATE_CHANGED (1 << 1) #define PHY_XLNX_GEM_EVENT_AUTONEG_COMPLETE (1 << 2) /* PHY register addresses & constants that are not vendor-specific */ #define PHY_IDENTIFIER_1_REGISTER 2 #define PHY_IDENTIFIER_2_REGISTER 3 /* PHY registers & constants -> Marvell Alaska specific */ /* Marvell PHY ID bits [3..0] = revision -> discard during ID check */ #define PHY_MRVL_PHY_ID_MODEL_MASK 0xFFFFFFF0 #define PHY_MRVL_PHY_ID_MODEL_88E1111 0x01410CC0 #define PHY_MRVL_PHY_ID_MODEL_88E151X 0x01410DD0 #define PHY_MRVL_BASE_REGISTERS_PAGE 0 #define PHY_MRVL_COPPER_CONTROL_REGISTER 0 #define PHY_MRVL_COPPER_STATUS_REGISTER 1 #define PHY_MRVL_COPPER_AUTONEG_ADV_REGISTER 4 #define PHY_MRVL_COPPER_LINK_PARTNER_ABILITY_REGISTER 5 #define PHY_MRVL_1000BASET_CONTROL_REGISTER 9 #define PHY_MRVL_COPPER_CONTROL_1_REGISTER 16 #define PHY_MRVL_COPPER_STATUS_1_REGISTER 17 #define PHY_MRVL_COPPER_INT_ENABLE_REGISTER 18 #define PHY_MRVL_COPPER_INT_STATUS_REGISTER 19 #define PHY_MRVL_COPPER_PAGE_SWITCH_REGISTER 22 #define PHY_MRVL_GENERAL_CONTROL_1_REGISTER 20 #define PHY_MRVL_GENERAL_CONTROL_1_PAGE 18 #define PHY_MRVL_GENERAL_CONTROL_1_RESET_BIT (1 << 15) #define PHY_MRVL_COPPER_CONTROL_RESET_BIT (1 << 15) #define PHY_MRVL_COPPER_CONTROL_AUTONEG_ENABLE_BIT (1 << 12) #define PHY_MRVL_ADV_1000BASET_FDX_BIT (1 << 9) #define PHY_MRVL_ADV_1000BASET_HDX_BIT (1 << 8) #define PHY_MRVL_ADV_100BASET_FDX_BIT (1 << 8) #define PHY_MRVL_ADV_100BASET_HDX_BIT (1 << 7) #define PHY_MRVL_ADV_10BASET_FDX_BIT (1 << 6) #define PHY_MRVL_ADV_10BASET_HDX_BIT (1 << 5) #define PHY_MRVL_ADV_SELECTOR_802_3 0x0001 #define PHY_MRVL_MDIX_CONFIG_MASK 0x0003 #define PHY_MRVL_MDIX_CONFIG_SHIFT 5 #define PHY_MRVL_MDIX_AUTO_CROSSOVER_ENABLE 0x0003 #define PHY_MRVL_MODE_CONFIG_MASK 0x0007 #define PHY_MRVL_MODE_CONFIG_SHIFT 0 #define PHY_MRVL_COPPER_SPEED_CHANGED_INT_BIT (1 << 14) #define PHY_MRVL_COPPER_DUPLEX_CHANGED_INT_BIT (1 << 13) #define PHY_MRVL_COPPER_AUTONEG_COMPLETED_INT_BIT (1 << 11) #define PHY_MRVL_COPPER_LINK_STATUS_CHANGED_INT_BIT (1 << 10) #define PHY_MRVL_COPPER_LINK_STATUS_BIT_SHIFT 5 #define PHY_MRVL_LINK_SPEED_SHIFT 14 #define PHY_MRVL_LINK_SPEED_MASK 0x3 #define PHY_MRVL_LINK_SPEED_10MBIT 0 #define PHY_MRVL_LINK_SPEED_100MBIT 1 #define PHY_MRVL_LINK_SPEED_1GBIT 2 /*TI TLK105 & DP83822*/ /* TI PHY ID bits [3..0] = revision -> discard during ID check */ #define PHY_TI_PHY_ID_MODEL_MASK 0xFFFFFFF0 #define PHY_TI_PHY_ID_MODEL_DP83822 0x2000A240 #define PHY_TI_PHY_ID_MODEL_TLK105 0x2000A210 #define PHY_TI_PHY_SPECIFIC_CONTROL_REGISTER 0x0010 #define PHY_TI_BASIC_MODE_CONTROL_REGISTER 0x0000 #define PHY_TI_BASIC_MODE_STATUS_REGISTER 0x0001 #define PHY_TI_AUTONEG_ADV_REGISTER 0x0004 #define PHY_TI_CONTROL_REGISTER_1 0x0009 #define PHY_TI_PHY_STATUS_REGISTER 0x0010 #define PHY_TI_MII_INTERRUPT_STATUS_REGISTER_1 0x0012 #define PHY_TI_LED_CONTROL_REGISTER 0x0018 #define PHY_TI_PHY_CONTROL_REGISTER 0x0019 #define PHY_TI_BASIC_MODE_CONTROL_RESET_BIT (1 << 15) #define PHY_TI_BASIC_MODE_CONTROL_AUTONEG_ENABLE_BIT (1 << 12) #define PHY_TI_BASIC_MODE_STATUS_LINK_STATUS_BIT (1 << 2) #define PHY_TI_LINK_STATUS_CHANGED_INT_BIT (1 << 13) #define PHY_TI_SPEED_CHANGED_INT_BIT (1 << 12) #define PHY_TI_DUPLEX_CHANGED_INT_BIT (1 << 11) #define PHY_TI_AUTONEG_COMPLETED_INT_BIT (1 << 10) #define PHY_TI_ADV_SELECTOR_802_3 0x0001 #define PHY_TI_ADV_100BASET_FDX_BIT (1 << 8) #define PHY_TI_ADV_100BASET_HDX_BIT (1 << 7) #define PHY_TI_ADV_10BASET_FDX_BIT (1 << 6) #define PHY_TI_ADV_10BASET_HDX_BIT (1 << 5) #define PHY_TI_CR1_ROBUST_AUTO_MDIX_BIT (1 << 5) #define PHY_TI_PHY_CONTROL_AUTO_MDIX_ENABLE_BIT (1 << 15) #define PHY_TI_PHY_CONTROL_FORCE_MDIX_BIT (1 << 14) #define PHY_TI_PHY_CONTROL_LED_CONFIG_LINK_ONLY_BIT (1 << 5) #define PHY_TI_LED_CONTROL_BLINK_RATE_SHIFT 9 #define PHY_TI_LED_CONTROL_BLINK_RATE_20HZ 0 #define PHY_TI_LED_CONTROL_BLINK_RATE_10HZ 1 #define PHY_TI_LED_CONTROL_BLINK_RATE_5HZ 2 #define PHY_TI_LED_CONTROL_BLINK_RATE_2HZ 3 #define PHY_TI_PHY_STATUS_LINK_BIT (1 << 0) #define PHY_TI_PHY_STATUS_SPEED_BIT (1 << 1) /** * @brief Vendor-specific PHY management function pointer table struct * * Contains the PHY management function pointers for a specific PHY * make or model. */ struct phy_xlnx_gem_api { void (*phy_reset_func)(const struct device *dev); void (*phy_configure_func)(const struct device *dev); uint16_t (*phy_poll_status_change_func)(const struct device *dev); uint8_t (*phy_poll_link_status_func)(const struct device *dev); enum eth_xlnx_link_speed (*phy_poll_link_speed_func)(const struct device *dev); }; /** * @brief Supported PHY list entry struct * * Contains the PHY management function pointers for a specific PHY * make or model. */ struct phy_xlnx_gem_supported_dev { uint32_t phy_id; uint32_t phy_id_mask; struct phy_xlnx_gem_api *api; const char *identifier; }; /* PHY identification function -> generic, not vendor-specific */ int phy_xlnx_gem_detect(const struct device *dev); #endif /* _ZEPHYR_DRIVERS_ETHERNET_PHY_XLNX_GEM_H_ */ ```
/content/code_sandbox/drivers/ethernet/phy_xlnx_gem.h
objective-c
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
1,699
```unknown menuconfig ETH_SMSC911X bool "SMSC911x/9220 Ethernet driver" default y depends on DT_HAS_SMSC_LAN9220_ENABLED help Enable driver for SMSC/LAN911x/9220 family of chips. config ETH_NIC_MODEL string default "lan9118" depends on ETH_SMSC911X help Tells what Qemu network model to use. This value is given as a parameter to -nic qemu command line option. ```
/content/code_sandbox/drivers/ethernet/Kconfig.smsc911x
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
109
```unknown # Distributed Switch Architecture [DSA] configuration options # Lukasz Majewski <lukma@denx.de> config ETH_DSA_SUPPORT bool help Set by an ethernet driver that supports DSA. menuconfig NET_DSA bool "Distributed Switch Architecture support" depends on ETH_DSA_SUPPORT help Enable Distributed Switch Architecture support. For now it only supports Kinetics and STM32 ENET drivers. if NET_DSA config DSA_KSZ8XXX bool config DSA_KSZ8794 bool "Support for KSZ8794" default y depends on DT_HAS_MICROCHIP_KSZ8794_ENABLED select DSA_KSZ8XXX select SPI if $(dt_compat_on_bus,$(DT_COMPAT_MICROCHIP_KSZ8794),spi) help Add support for KSZ8794 DSA device driver. config DSA_KSZ8863 bool "Support for KSZ8863" default y depends on DT_HAS_MICROCHIP_KSZ8863_ENABLED select DSA_KSZ8XXX select SPI if $(dt_compat_on_bus,$(DT_COMPAT_MICROCHIP_KSZ8863),spi) help Add support for KSZ8863 DSA device driver. config DSA_KSZ_TAIL_TAGGING bool "Support for tail tagging" depends on DSA_KSZ8794 || DSA_KSZ8863 help Add support for tail tagging on DSA device. config DSA_SPI bool "Support for PHY SPI interface" depends on SPI && (DSA_KSZ8794 || DSA_KSZ8863) help Use SPI bus to communicate with PHY module = NET_DSA module-dep = NET_LOG module-str = Log level for DSA module-help = Enables core DSA code to output debug messages. source "subsys/net/Kconfig.template.log_config.net" endif # NET_DSA ```
/content/code_sandbox/drivers/ethernet/Kconfig.dsa
unknown
2016-05-26T17:54:19
2024-08-16T18:09:06
zephyr
zephyrproject-rtos/zephyr
10,307
410