commit_title
stringlengths
13
124
commit_body
stringlengths
0
1.9k
release_summary
stringclasses
52 values
changes_summary
stringlengths
1
758
release_affected_domains
stringclasses
33 values
release_affected_drivers
stringclasses
51 values
domain_of_changes
stringlengths
2
571
language_set
stringclasses
983 values
diffstat_files
int64
1
300
diffstat_insertions
int64
0
309k
diffstat_deletions
int64
0
168k
commit_diff
stringlengths
92
23.4M
category
stringclasses
108 values
commit_hash
stringlengths
34
40
related_people
stringlengths
0
370
domain
stringclasses
21 values
subdomain
stringclasses
241 values
leaf_module
stringlengths
0
912
ath11k: add support for sta to handle beacon miss
when ap goes down without any indication to sta, firmware detects missing beacon, and sends wmi roam event with reason beacon_miss to the host.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for sta to handle beacon miss
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['ath11k ']
['h', 'c']
4
81
13
--- diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h +#define ath11k_connection_loss_hz (3 * hz) + struct delayed_work connection_loss_work; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c +static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct sk_buff *skb = data; + struct ieee80211_mgmt *mgmt = (void *)skb->data; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + + if (vif->type != nl80211_iftype_station) + return; + + if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) + return; + + cancel_delayed_work(&arvif->connection_loss_work); +} + +void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ieee80211_iface_iter_normal, + ath11k_mac_handle_beacon_iter, + skb); +} + +static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + u32 *vdev_id = data; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k *ar = arvif->ar; + struct ieee80211_hw *hw = ar->hw; + + if (arvif->vdev_id != *vdev_id) + return; + + if (!arvif->is_up) + return; + + ieee80211_beacon_loss(vif); + + /* firmware doesn't report beacon loss events repeatedly. if ap probe + * (done by mac80211) succeeds but beacons do not resume then it + * doesn't make sense to continue operation. queue connection loss work + * which can be cancelled when beacon is received. + */ + ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, + ath11k_connection_loss_hz); +} + +void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id) +{ + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ieee80211_iface_iter_normal, + ath11k_mac_handle_beacon_miss_iter, + &vdev_id); +} + +static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work) +{ + struct ath11k_vif *arvif = container_of(work, struct ath11k_vif, + connection_loss_work.work); + struct ieee80211_vif *vif = arvif->vif; + + if (!arvif->is_up) + return; + + ieee80211_connection_loss(vif); +} + - /* todo: cancel connection_loss_work */ + cancel_delayed_work_sync(&arvif->connection_loss_work); - - /* should we initialize any worker to handle connection loss indication - * from firmware in sta mode? - */ + init_delayed_work(&arvif->connection_loss_work, + ath11k_mac_vif_sta_connection_loss_work); + cancel_delayed_work_sync(&arvif->connection_loss_work); + diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h --- a/drivers/net/wireless/ath/ath11k/mac.h +++ b/drivers/net/wireless/ath/ath11k/mac.h +void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb); +void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c - /* todo: pending handle beacon implementation - *if (ieee80211_is_beacon(hdr->frame_control)) - * ath11k_mac_handle_beacon(ar, skb); - */ + if (ieee80211_is_beacon(hdr->frame_control)) + ath11k_mac_handle_beacon(ar, skb); - /* todo: pending beacon miss and connection_loss_work - * implementation - * ath11k_mac_handle_beacon_miss(ar, vdev_id); - */ + ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id);
Networking
26f6979237293e93d3f165a0f3af9d967596b2c4
lavanya suresh
drivers
net
ath, ath11k, wireless
bnxt_en: add pci ids for hyper-v vf devices.
support vf device ids used by the hyper-v hypervisor.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add pci ids for hyper-v vf devices
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ']
['c']
1
18
1
--- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c + netxtreme_c_vf_hv, + netxtreme_e_vf_hv, + netxtreme_e_p5_vf_hv, + [netxtreme_c_vf_hv] = { "broadcom netxtreme-c virtual function for hyper-v" }, + [netxtreme_e_vf_hv] = { "broadcom netxtreme-e virtual function for hyper-v" }, + [netxtreme_e_p5_vf_hv] = { "broadcom bcm5750x netxtreme-e virtual function for hyper-v" }, + { pci_vdevice(broadcom, 0x1607), .driver_data = netxtreme_e_vf_hv }, + { pci_vdevice(broadcom, 0x1608), .driver_data = netxtreme_e_vf_hv }, + { pci_vdevice(broadcom, 0x16bd), .driver_data = netxtreme_e_vf_hv }, + { pci_vdevice(broadcom, 0x16c2), .driver_data = netxtreme_c_vf_hv }, + { pci_vdevice(broadcom, 0x16c3), .driver_data = netxtreme_c_vf_hv }, + { pci_vdevice(broadcom, 0x16c4), .driver_data = netxtreme_e_vf_hv }, + { pci_vdevice(broadcom, 0x16c5), .driver_data = netxtreme_e_vf_hv }, + { pci_vdevice(broadcom, 0x16e6), .driver_data = netxtreme_c_vf_hv }, + { pci_vdevice(broadcom, 0x1808), .driver_data = netxtreme_e_p5_vf_hv }, + { pci_vdevice(broadcom, 0x1809), .driver_data = netxtreme_e_p5_vf_hv }, - idx == netxtreme_s_vf || idx == netxtreme_e_p5_vf); + idx == netxtreme_s_vf || idx == netxtreme_c_vf_hv || + idx == netxtreme_e_vf_hv || idx == netxtreme_e_p5_vf);
Networking
7fbf359bb2c19c824cbb1954020680824f6ee5a5
michael chan vasundhara volam vasundhara v volam broadcom com andy gospodarek gospo broadcom com
drivers
net
bnxt, broadcom, ethernet
bnxt_en: add support for fw managed link down feature.
in the current code, the driver will not shutdown the link during ifdown if there are still vfs sharing the port. newer firmware will manage the link down decision when the port is shared by vfs, so we can just call firmware to shutdown the port unconditionally and let firmware make the final decision.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for fw managed link down feature
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ']
['h', 'c']
2
3
1
--- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c - if (pci_num_vf(bp->pdev)) + if (pci_num_vf(bp->pdev) && + !(bp->phy_flags & bnxt_phy_fl_fw_managed_lkdn)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h +#define bnxt_phy_fl_fw_managed_lkdn port_phy_qcaps_resp_flags_fw_managed_link_down
Networking
d5ca99054f8e25384390d41c0123d930eed510b6
michael chan edwin peer edwin peer broadcom com
drivers
net
bnxt, broadcom, ethernet
bnxt_en: implement .ndo_features_check().
for udp encapsultions, we only support the offloaded vxlan port and geneve port. all other ports included fou and gue are not supported so we need to turn off tso and checksum features.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
implement .ndo_features_check()
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ']
['h', 'c']
2
42
2
--- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +static netdev_features_t bnxt_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct bnxt *bp; + __be16 udp_port; + u8 l4_proto = 0; + + features = vlan_features_check(skb, features); + if (!skb->encapsulation) + return features; + + switch (vlan_get_protocol(skb)) { + case htons(eth_p_ip): + l4_proto = ip_hdr(skb)->protocol; + break; + case htons(eth_p_ipv6): + l4_proto = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + if (l4_proto != ipproto_udp) + return features; + + bp = netdev_priv(dev); + /* for udp, we can only handle 1 vxlan port and 1 geneve port. */ + udp_port = udp_hdr(skb)->dest; + if (udp_port == bp->vxlan_port || udp_port == bp->nge_port) + return features; + return features & ~(netif_f_csum_mask | netif_f_gso_mask); +} + - if (ti.type == udp_tunnel_type_vxlan) + if (ti.type == udp_tunnel_type_vxlan) { + bp->vxlan_port = ti.port; - else + } else { + bp->nge_port = ti.port; + } + .ndo_features_check = bnxt_features_check, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h + __be16 vxlan_port; + __be16 nge_port;
Networking
1698d600b361915fbe5eda63a613da55c435bd34
michael chan
drivers
net
bnxt, broadcom, ethernet
bnxt_en: support iff_supp_nofcs feature to transmit without ethernet fcs.
if firmware is capable, set the iff_supp_nofcs flag to support the sockets option to transmit packets without fcs. this is mainly used for testing.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support iff_supp_nofcs feature to transmit without ethernet fcs
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ']
['h', 'c']
2
14
3
--- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c + __le32 lflags = 0; + if (unlikely(skb->no_fcs)) { + lflags |= cpu_to_le32(tx_bd_flags_no_crc); + goto normal_tx; + } + - txbd1->tx_bd_hsize_lflags = 0; + txbd1->tx_bd_hsize_lflags = lflags; - txbd1->tx_bd_hsize_lflags = cpu_to_le32(tx_bd_flags_lso | + txbd1->tx_bd_hsize_lflags |= cpu_to_le32(tx_bd_flags_lso | - txbd1->tx_bd_hsize_lflags = + txbd1->tx_bd_hsize_lflags |= + if (bp->phy_flags & bnxt_phy_fl_no_fcs) + bp->dev->priv_flags |= iff_supp_nofcs; + else + bp->dev->priv_flags &= ~iff_supp_nofcs; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h +#define bnxt_phy_fl_no_fcs port_phy_qcaps_resp_flags_no_fcs
Networking
dade5e15fade59a789c30bc47abfe926ddd856d6
michael chan edwin peer edwin peer broadcom com
drivers
net
bnxt, broadcom, ethernet
bnxt_en: allow promiscuous mode for trusted vfs
firmware previously only allowed promiscuous mode for vfs associated with a default vlan. it is now possible to enable promiscuous mode for a vf having no vlan configured provided that it is trusted. in such cases the vf will see all packets received by the pf, irrespective of destination mac or vlan.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
allow promiscuous mode for trusted vfs
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ']
['h', 'c']
3
11
7
--- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c -/* allow pf and vf with default vlan to be in promiscuous mode */ +/* allow pf, trusted vfs and vfs with default vlan to be in promiscuous mode */ - if (bnxt_vf(bp) && !bp->vf.vlan) + if (bnxt_vf(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) - if ((bp->dev->flags & iff_promisc) && bnxt_promisc_ok(bp)) + if (bp->dev->flags & iff_promisc) - if ((dev->flags & iff_promisc) && bnxt_promisc_ok(bp)) + if (dev->flags & iff_promisc) + if ((vnic->rx_mask & cfa_l2_set_rx_mask_req_mask_promiscuous) && + !bnxt_promisc_ok(bp)) + vnic->rx_mask &= ~cfa_l2_set_rx_mask_req_mask_promiscuous; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c - req.fid = cpu_to_le16(vf->fw_fid); + req.fid = cpu_to_le16(bnxt_pf(bp) ? vf->fw_fid : 0xffff); -static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) - if (!(bp->fw_cap & bnxt_fw_cap_trusted_vf)) + if (bnxt_pf(bp) && !(bp->fw_cap & bnxt_fw_cap_trusted_vf)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf);
Networking
dd85fc0ab5b4daa496bd3e2832b51963022182d0
edwin peer
drivers
net
bnxt, broadcom, ethernet
bnxt: add more ethtool standard stats
michael suggest a few more stats we can expose.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add more ethtool standard stats
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ']
['c']
1
6
0
--- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c + mac_stats->framechecksequenceerrors = + bnxt_get_rx_port_stats64(rx, rx_fcs_err_frames); + mac_stats->alignmenterrors = + bnxt_get_rx_port_stats64(rx, rx_align_err_frames); + mac_stats->outofrangelengthfield = + bnxt_get_rx_port_stats64(rx, rx_oor_len_frames);
Networking
37434782d63f89de5b9c383a449b6a82dc3fa4fb
jakub kicinski
drivers
net
bnxt, broadcom, ethernet
bnxt: implement ethtool standard stats
most of the names seem to strongly correlate with names from the standard and rfc. whether ..+good_frames are indeed frames..ok i'm the least sure of.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
implement ethtool standard stats
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ']
['c']
1
125
0
--- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +static void bnxt_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (bnxt_vf(bp) || !(bp->flags & bnxt_flag_port_stats_ext)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + phy_stats->symbolerrorduringcarrier = + *(rx + bnxt_rx_stats_ext_offset(rx_pcs_symbol_err)); +} + +static void bnxt_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (bnxt_vf(bp) || !(bp->flags & bnxt_flag_port_stats)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + bnxt_tx_port_stats_byte_offset / 8; + + mac_stats->framesreceivedok = + bnxt_get_rx_port_stats64(rx, rx_good_frames); + mac_stats->framestransmittedok = + bnxt_get_tx_port_stats64(tx, tx_good_frames); +} + +static void bnxt_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (bnxt_vf(bp) || !(bp->flags & bnxt_flag_port_stats)) + return; + + rx = bp->port_stats.sw_stats; + ctrl_stats->maccontrolframesreceived = + bnxt_get_rx_port_stats64(rx, rx_ctrl_frames); +} + +static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 9216 }, + { 9217, 16383 }, + {} +}; + +static void bnxt_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (bnxt_vf(bp) || !(bp->flags & bnxt_flag_port_stats)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + bnxt_tx_port_stats_byte_offset / 8; + + rmon_stats->jabbers = + bnxt_get_rx_port_stats64(rx, rx_jbr_frames); + rmon_stats->oversize_pkts = + bnxt_get_rx_port_stats64(rx, rx_ovrsz_frames); + rmon_stats->undersize_pkts = + bnxt_get_rx_port_stats64(rx, rx_undrsz_frames); + + rmon_stats->hist[0] = bnxt_get_rx_port_stats64(rx, rx_64b_frames); + rmon_stats->hist[1] = bnxt_get_rx_port_stats64(rx, rx_65b_127b_frames); + rmon_stats->hist[2] = bnxt_get_rx_port_stats64(rx, rx_128b_255b_frames); + rmon_stats->hist[3] = bnxt_get_rx_port_stats64(rx, rx_256b_511b_frames); + rmon_stats->hist[4] = + bnxt_get_rx_port_stats64(rx, rx_512b_1023b_frames); + rmon_stats->hist[5] = + bnxt_get_rx_port_stats64(rx, rx_1024b_1518b_frames); + rmon_stats->hist[6] = + bnxt_get_rx_port_stats64(rx, rx_1519b_2047b_frames); + rmon_stats->hist[7] = + bnxt_get_rx_port_stats64(rx, rx_2048b_4095b_frames); + rmon_stats->hist[8] = + bnxt_get_rx_port_stats64(rx, rx_4096b_9216b_frames); + rmon_stats->hist[9] = + bnxt_get_rx_port_stats64(rx, rx_9217b_16383b_frames); + + rmon_stats->hist_tx[0] = + bnxt_get_tx_port_stats64(tx, tx_64b_frames); + rmon_stats->hist_tx[1] = + bnxt_get_tx_port_stats64(tx, tx_65b_127b_frames); + rmon_stats->hist_tx[2] = + bnxt_get_tx_port_stats64(tx, tx_128b_255b_frames); + rmon_stats->hist_tx[3] = + bnxt_get_tx_port_stats64(tx, tx_256b_511b_frames); + rmon_stats->hist_tx[4] = + bnxt_get_tx_port_stats64(tx, tx_512b_1023b_frames); + rmon_stats->hist_tx[5] = + bnxt_get_tx_port_stats64(tx, tx_1024b_1518b_frames); + rmon_stats->hist_tx[6] = + bnxt_get_tx_port_stats64(tx, tx_1519b_2047b_frames); + rmon_stats->hist_tx[7] = + bnxt_get_tx_port_stats64(tx, tx_2048b_4095b_frames); + rmon_stats->hist_tx[8] = + bnxt_get_tx_port_stats64(tx, tx_4096b_9216b_frames); + rmon_stats->hist_tx[9] = + bnxt_get_tx_port_stats64(tx, tx_9217b_16383b_frames); + + *ranges = bnxt_rmon_ranges; +} + + .get_eth_phy_stats = bnxt_get_eth_phy_stats, + .get_eth_mac_stats = bnxt_get_eth_mac_stats, + .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, + .get_rmon_stats = bnxt_get_rmon_stats,
Networking
782bc00affcd63dacaa34e9ab6da588605423312
jakub kicinski
drivers
net
bnxt, broadcom, ethernet
bnxt: implement ethtool::get_fec_stats
report corrected bits.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
implement ethtool::get_fec_stats
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ']
['c']
1
15
0
--- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +static void bnxt_get_fec_stats(struct net_device *dev, + struct ethtool_fec_stats *fec_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (bnxt_vf(bp) || !(bp->flags & bnxt_flag_port_stats_ext)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + fec_stats->corrected_bits.total = + *(rx + bnxt_rx_stats_ext_offset(rx_corrected_bits)); +} + + .get_fec_stats = bnxt_get_fec_stats,
Networking
c9ca5c3aabafcaa934731b8a841f28f8df990b7f
jakub kicinski michael chan michael chan broadcom com
drivers
net
bnxt, broadcom, ethernet
net: broadcom: bcm4908_enet: support tx interrupt
it appears that each dma channel has its own interrupt and both rings can be configured (the same way) to handle interrupts.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support tx interrupt
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['bnxt_en ', 'broadcom', 'bcm4908_enet']
['c']
1
103
35
--- diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c + struct napi_struct napi; - struct napi_struct napi; + int irq_tx; -static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet) +static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu) - enet_write(enet, enet_dma_ch_rx_cfg + enet_dma_ch_cfg_int_mask, enet_dma_int_defaults); + enet_umac_write(enet, umac_max_frame_len, mtu + enet_max_eth_overhead); -static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet) +/*** + * dma ring ops + */ + +static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) - enet_write(enet, enet_dma_ch_rx_cfg + enet_dma_ch_cfg_int_mask, 0); + enet_write(enet, ring->cfg_block + enet_dma_ch_cfg_int_mask, enet_dma_int_defaults); -static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet) +static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) - enet_write(enet, enet_dma_ch_rx_cfg + enet_dma_ch_cfg_int_stat, enet_dma_int_defaults); + enet_write(enet, ring->cfg_block + enet_dma_ch_cfg_int_mask, 0); -static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu) +static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet, + struct bcm4908_enet_dma_ring *ring) - enet_umac_write(enet, umac_max_frame_len, mtu + enet_max_eth_overhead); + enet_write(enet, ring->cfg_block + enet_dma_ch_cfg_int_stat, enet_dma_int_defaults); + struct bcm4908_enet_dma_ring *ring; - bcm4908_enet_intrs_off(enet); - bcm4908_enet_intrs_ack(enet); + ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring; - napi_schedule(&enet->napi); + bcm4908_enet_dma_ring_intrs_off(enet, ring); + bcm4908_enet_dma_ring_intrs_ack(enet, ring); + + napi_schedule(&ring->napi); + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; + if (enet->irq_tx > 0) { + err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0, + "tx", enet); + if (err) { + dev_err(dev, "failed to request irq %d: %d ", + enet->irq_tx, err); + free_irq(netdev->irq, enet); + return err; + } + } + - bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring); - napi_enable(&enet->napi); + if (enet->irq_tx > 0) { + napi_enable(&tx_ring->napi); + bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring); + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring); + } + + bcm4908_enet_dma_rx_ring_enable(enet, rx_ring); + napi_enable(&rx_ring->napi); - - bcm4908_enet_intrs_ack(enet); - bcm4908_enet_intrs_on(enet); + bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring); + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring); + struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring; + struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring; - napi_disable(&enet->napi); + napi_disable(&rx_ring->napi); + napi_disable(&tx_ring->napi); + free_irq(enet->irq_tx, enet); - while (ring->read_idx != ring->write_idx) { - buf_desc = &ring->buf_desc[ring->read_idx]; - if (le32_to_cpu(buf_desc->ctl) & dma_ctl_status_own) - break; - slot = &ring->slots[ring->read_idx]; - - dma_unmap_single(dev, slot->dma_addr, slot->len, dma_to_device); - dev_kfree_skb(slot->skb); - if (++ring->read_idx == ring->length) - ring->read_idx = 0; - } + if (enet->irq_tx < 0 && + !(le32_to_cpu(ring->buf_desc[ring->read_idx].ctl) & dma_ctl_status_own)) + napi_schedule(&enet->tx_ring.napi); - if (free_buf_descs < 2) + if (free_buf_descs < 2) { + netif_stop_queue(netdev); + } -static int bcm4908_enet_poll(struct napi_struct *napi, int weight) +static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight) - struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi); + struct bcm4908_enet_dma_ring *rx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi); + struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring); - bcm4908_enet_intrs_on(enet); + bcm4908_enet_dma_ring_intrs_on(enet, rx_ring); +static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight) +{ + struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi); + struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring); + struct bcm4908_enet_dma_ring_bd *buf_desc; + struct bcm4908_enet_dma_ring_slot *slot; + struct device *dev = enet->dev; + unsigned int bytes = 0; + int handled = 0; + + while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) { + buf_desc = &tx_ring->buf_desc[tx_ring->read_idx]; + if (le32_to_cpu(buf_desc->ctl) & dma_ctl_status_own) + break; + slot = &tx_ring->slots[tx_ring->read_idx]; + + dma_unmap_single(dev, slot->dma_addr, slot->len, dma_to_device); + dev_kfree_skb(slot->skb); + bytes += slot->len; + if (++tx_ring->read_idx == tx_ring->length) + tx_ring->read_idx = 0; + + handled++; + } + + if (handled < weight) { + napi_complete_done(napi, handled); + bcm4908_enet_dma_ring_intrs_on(enet, tx_ring); + } + + if (netif_queue_stopped(enet->netdev)) + netif_wake_queue(enet->netdev); + + return handled; +} + + enet->irq_tx = platform_get_irq_byname(pdev, "tx"); + - netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64); + netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, napi_poll_weight); + netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, napi_poll_weight); - netif_napi_del(&enet->napi); + netif_napi_del(&enet->rx_ring.napi); + netif_napi_del(&enet->tx_ring.napi);
Networking
12bb508bfe5a564c36864b12253db23cac83bfa1
rafa mi ecki
drivers
net
broadcom, ethernet
can: etas_es58x: add core support for etas es58x can usb interfaces
this patch adds the core support for various usb can interfaces from etas gmbh (https://www.etas.com/en/products/es58x.php). the next patches add the glue code drivers for the individual interfaces.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
introducing etas es58x can usb interfaces
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ']
['h', 'kconfig', 'c', 'makefile']
5
2,988
0
--- diff --git a/drivers/net/can/usb/kconfig b/drivers/net/can/usb/kconfig --- a/drivers/net/can/usb/kconfig +++ b/drivers/net/can/usb/kconfig +config can_etas_es58x + tristate "etas es58x can/usb interfaces" + select crc16 + help + this driver supports the es581.4, es582.1 and es584.1 interfaces + from etas gmbh (https://www.etas.com/en/products/es58x.php). + + to compile this driver as a module, choose m here: the module + will be called etas_es58x. + diff --git a/drivers/net/can/usb/makefile b/drivers/net/can/usb/makefile --- a/drivers/net/can/usb/makefile +++ b/drivers/net/can/usb/makefile +obj-$(config_can_etas_es58x) += etas_es58x/ diff --git a/drivers/net/can/usb/etas_es58x/makefile b/drivers/net/can/usb/etas_es58x/makefile --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/makefile +# spdx-license-identifier: gpl-2.0 +obj-$(config_can_etas_es58x) += etas_es58x.o +etas_es58x-y = es58x_core.o diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c +// spdx-license-identifier: gpl-2.0 + +/* driver for etas gmbh es58x usb can(-fd) bus interfaces. + * + * file es58x_core.c: core logic to manage the network devices and the + * usb interface. + * + * copyright (c) 2019 robert bosch engineering and business solutions. all rights reserved. + * copyright (c) 2020 etas k.k.. all rights reserved. + * copyright (c) 2020, 2021 vincent mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/crc16.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" + +#define drv_version "1.00" +module_author("mailhol vincent <mailhol.vincent@wanadoo.fr>"); +module_author("arunachalam santhanam <arunachalam.santhanam@in.bosch.com>"); +module_description("socket can driver for etas es58x usb adapters"); +module_version(drv_version); +module_license("gpl v2"); + +#define es58x_module_name "etas_es58x" +#define es58x_vendor_id 0x108c + +/* table of devices which work with this driver. */ +static const struct usb_device_id es58x_id_table[] = { + { + /* terminating entry */ + } +}; + +module_device_table(usb, es58x_id_table); + +#define es58x_print_hex_dump(buf, len) \ + print_hex_dump(kern_debug, \ + es58x_module_name " " __stringify(buf) ": ", \ + dump_prefix_none, 16, 1, buf, len, false) + +#define es58x_print_hex_dump_debug(buf, len) \ + print_hex_dump_debug(es58x_module_name " " __stringify(buf) ": ",\ + dump_prefix_none, 16, 1, buf, len, false) + +/* the last two bytes of an es58x command is a crc16. the first two + * bytes (the start of frame) are skipped and the crc calculation + * starts on the third byte. + */ +#define es58x_crc_calc_offset 2 + +/** + * es58x_calculate_crc() - compute the crc16 of a given urb. + * @urb_cmd: the urb command for which we want to calculate the crc. + * @urb_len: length of @urb_cmd. must be at least bigger than 4 + * (es58x_crc_calc_offset + sizeof(crc)) + * + * return: crc16 value. + */ +static u16 es58x_calculate_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + ssize_t len = urb_len - es58x_crc_calc_offset - sizeof(crc); + + crc = crc16(0, &urb_cmd->raw_cmd[es58x_crc_calc_offset], len); + return crc; +} + +/** + * es58x_get_crc() - get the crc value of a given urb. + * @urb_cmd: the urb command for which we want to get the crc. + * @urb_len: length of @urb_cmd. must be at least bigger than 4 + * (es58x_crc_calc_offset + sizeof(crc)) + * + * return: crc16 value. + */ +static u16 es58x_get_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + const __le16 *crc_addr; + + crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; + crc = get_unaligned_le16(crc_addr); + return crc; +} + +/** + * es58x_set_crc() - set the crc value of a given urb. + * @urb_cmd: the urb command for which we want to get the crc. + * @urb_len: length of @urb_cmd. must be at least bigger than 4 + * (es58x_crc_calc_offset + sizeof(crc)) + */ +static void es58x_set_crc(union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 crc; + __le16 *crc_addr; + + crc = es58x_calculate_crc(urb_cmd, urb_len); + crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; + put_unaligned_le16(crc, crc_addr); +} + +/** + * es58x_check_crc() - validate the crc value of a given urb. + * @es58x_dev: es58x device. + * @urb_cmd: the urb command for which we want to check the crc. + * @urb_len: length of @urb_cmd. must be at least bigger than 4 + * (es58x_crc_calc_offset + sizeof(crc)) + * + * return: zero on success, -ebadmsg if the crc check fails. + */ +static int es58x_check_crc(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd, u16 urb_len) +{ + u16 calculated_crc = es58x_calculate_crc(urb_cmd, urb_len); + u16 expected_crc = es58x_get_crc(urb_cmd, urb_len); + + if (expected_crc != calculated_crc) { + dev_err_ratelimited(es58x_dev->dev, + "%s: bad crc, urb_len: %d ", + __func__, urb_len); + return -ebadmsg; + } + + return 0; +} + +/** + * es58x_timestamp_to_ns() - convert a timestamp value received from a + * es58x device to nanoseconds. + * @timestamp: timestamp received from a es58x device. + * + * the timestamp received from es58x is expressed in multiples of 0.5 + * micro seconds. this function converts it in to nanoseconds. + * + * return: timestamp value in nanoseconds. + */ +static u64 es58x_timestamp_to_ns(u64 timestamp) +{ + const u64 es58x_timestamp_ns_mult_coef = 500ull; + + return es58x_timestamp_ns_mult_coef * timestamp; +} + +/** + * es58x_set_skb_timestamp() - set the hardware timestamp of an skb. + * @netdev: can network device. + * @skb: socket buffer of a can message. + * @timestamp: timestamp received from an es58x device. + * + * used for both received and echo messages. + */ +static void es58x_set_skb_timestamp(struct net_device *netdev, + struct sk_buff *skb, u64 timestamp) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + struct skb_shared_hwtstamps *hwts; + + hwts = skb_hwtstamps(skb); + /* ignoring overflow (overflow on 64 bits timestamp with nano + * second precision would occur after more than 500 years). + */ + hwts->hwtstamp = ns_to_ktime(es58x_timestamp_to_ns(timestamp) + + es58x_dev->realtime_diff_ns); +} + +/** + * es58x_rx_timestamp() - handle a received timestamp. + * @es58x_dev: es58x device. + * @timestamp: timestamp received from a es58x device. + * + * calculate the difference between the es58x device and the kernel + * internal clocks. this difference will be later used as an offset to + * convert the timestamps of rx and echo messages to match the kernel + * system time (e.g. convert to unix time). + */ +void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp) +{ + u64 ktime_real_ns = ktime_get_real_ns(); + u64 device_timestamp = es58x_timestamp_to_ns(timestamp); + + dev_dbg(es58x_dev->dev, "%s: request round-trip time: %llu ns ", + __func__, ktime_real_ns - es58x_dev->ktime_req_ns); + + es58x_dev->realtime_diff_ns = + (es58x_dev->ktime_req_ns + ktime_real_ns) / 2 - device_timestamp; + es58x_dev->ktime_req_ns = 0; + + dev_dbg(es58x_dev->dev, + "%s: device timestamp: %llu, diff with kernel: %llu ", + __func__, device_timestamp, es58x_dev->realtime_diff_ns); +} + +/** + * es58x_set_realtime_diff_ns() - calculate difference between the + * clocks of the es58x device and the kernel + * @es58x_dev: es58x device. + * + * request a timestamp from the es58x device. once the answer is + * received, the timestamp difference will be set by the callback + * function es58x_rx_timestamp(). + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_set_realtime_diff_ns(struct es58x_device *es58x_dev) +{ + if (es58x_dev->ktime_req_ns) { + dev_warn(es58x_dev->dev, + "%s: previous request to set timestamp has not completed yet ", + __func__); + return -ebusy; + } + + es58x_dev->ktime_req_ns = ktime_get_real_ns(); + return es58x_dev->ops->get_timestamp(es58x_dev); +} + +/** + * es58x_is_can_state_active() - is the network device in an active + * can state? + * @netdev: can network device. + * + * the device is considered active if it is able to send or receive + * can frames, that is to say if it is in any of + * can_state_error_active, can_state_error_warning or + * can_state_error_passive states. + * + * caution: when recovering from a bus-off, + * net/core/dev.c#can_restart() will call + * net/core/dev.c#can_flush_echo_skb() without using any kind of + * locks. for this reason, it is critical to guarantee that no tx or + * echo operations (i.e. any access to priv->echo_skb[]) can be done + * while this function is returning false. + * + * return: true if the device is active, else returns false. + */ +static bool es58x_is_can_state_active(struct net_device *netdev) +{ + return es58x_priv(netdev)->can.state < can_state_bus_off; +} + +/** + * es58x_is_echo_skb_threshold_reached() - determine the limit of how + * many skb slots can be taken before we should stop the network + * queue. + * @priv: es58x private parameters related to the network device. + * + * we need to save enough free skb slots in order to be able to do + * bulk send. this function can be used to determine when to wake or + * stop the network queue in regard to the number of skb slots already + * taken if the echo fifo. + * + * return: boolean. + */ +static bool es58x_is_echo_skb_threshold_reached(struct es58x_priv *priv) +{ + u32 num_echo_skb = priv->tx_head - priv->tx_tail; + u32 threshold = priv->can.echo_skb_max - + priv->es58x_dev->param->tx_bulk_max + 1; + + return num_echo_skb >= threshold; +} + +/** + * es58x_can_free_echo_skb_tail() - remove the oldest echo skb of the + * echo fifo. + * @netdev: can network device. + * + * naming convention: the tail is the beginning of the fifo, i.e. the + * first skb to have entered the fifo. + */ +static void es58x_can_free_echo_skb_tail(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + unsigned int frame_len = 0; + + can_free_echo_skb(netdev, priv->tx_tail & fifo_mask, &frame_len); + netdev_completed_queue(netdev, 1, frame_len); + + priv->tx_tail++; + + netdev->stats.tx_dropped++; +} + +/** + * es58x_can_get_echo_skb_recovery() - try to re-sync the echo fifo. + * @netdev: can network device. + * @rcv_packet_idx: index + * + * this function should not be called under normal circumstances. in + * the unlikely case that one or several urb packages get dropped by + * the device, the index will get out of sync. try to recover by + * dropping the echo skb packets with older indexes. + * + * return: zero if recovery was successful, -einval otherwise. + */ +static int es58x_can_get_echo_skb_recovery(struct net_device *netdev, + u32 rcv_packet_idx) +{ + struct es58x_priv *priv = es58x_priv(netdev); + int ret = 0; + + netdev->stats.tx_errors++; + + if (net_ratelimit()) + netdev_warn(netdev, + "bad echo packet index: %u. first index: %u, end index %u, num_echo_skb: %02u/%02u ", + rcv_packet_idx, priv->tx_tail, priv->tx_head, + priv->tx_head - priv->tx_tail, + priv->can.echo_skb_max); + + if ((s32)(rcv_packet_idx - priv->tx_tail) < 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "received echo index is from the past. ignoring it "); + ret = -einval; + } else if ((s32)(rcv_packet_idx - priv->tx_head) >= 0) { + if (net_ratelimit()) + netdev_err(netdev, + "received echo index is from the future. ignoring it "); + ret = -einval; + } else { + if (net_ratelimit()) + netdev_warn(netdev, + "recovery: dropping %u echo skb from index %u to %u ", + rcv_packet_idx - priv->tx_tail, + priv->tx_tail, rcv_packet_idx - 1); + while (priv->tx_tail != rcv_packet_idx) { + if (priv->tx_tail == priv->tx_head) + return -einval; + es58x_can_free_echo_skb_tail(netdev); + } + } + return ret; +} + +/** + * es58x_can_get_echo_skb() - get the skb from the echo fifo and loop + * it back locally. + * @netdev: can network device. + * @rcv_packet_idx: index of the first packet received from the device. + * @tstamps: array of hardware timestamps received from a es58x device. + * @pkts: number of packets (and so, length of @tstamps). + * + * callback function for when we receive a self reception + * acknowledgment. retrieves the skb from the echo fifo, sets its + * hardware timestamp (the actual time it was sent) and loops it back + * locally. + * + * the device has to be active (i.e. network interface up and not in + * bus off state or restarting). + * + * packet indexes must be consecutive (i.e. index of first packet is + * @rcv_packet_idx, index of second packet is @rcv_packet_idx + 1 and + * index of last packet is @rcv_packet_idx + @pkts - 1). + * + * return: zero on success. + */ +int es58x_can_get_echo_skb(struct net_device *netdev, u32 rcv_packet_idx, + u64 *tstamps, unsigned int pkts) +{ + struct es58x_priv *priv = es58x_priv(netdev); + unsigned int rx_total_frame_len = 0; + unsigned int num_echo_skb = priv->tx_head - priv->tx_tail; + int i; + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d echo packets ", + __func__, netdev->name, pkts); + netdev->stats.tx_dropped += pkts; + return 0; + } else if (!es58x_is_can_state_active(netdev)) { + if (net_ratelimit()) + netdev_dbg(netdev, + "bus is off or device is restarting. ignoring %u echo packets from index %u ", + pkts, rcv_packet_idx); + /* stats.tx_dropped will be (or was already) + * incremented by + * drivers/net/can/net/dev.c:can_flush_echo_skb(). + */ + return 0; + } else if (num_echo_skb == 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "received %u echo packets from index: %u but echo skb queue is empty. ", + pkts, rcv_packet_idx); + netdev->stats.tx_dropped += pkts; + return 0; + } + + if (priv->tx_tail != rcv_packet_idx) { + if (es58x_can_get_echo_skb_recovery(netdev, rcv_packet_idx) < 0) { + if (net_ratelimit()) + netdev_warn(netdev, + "could not find echo skb for echo packet index: %u ", + rcv_packet_idx); + return 0; + } + } + if (num_echo_skb < pkts) { + int pkts_drop = pkts - num_echo_skb; + + if (net_ratelimit()) + netdev_err(netdev, + "received %u echo packets but have only %d echo skb. dropping %d echo skb ", + pkts, num_echo_skb, pkts_drop); + netdev->stats.tx_dropped += pkts_drop; + pkts -= pkts_drop; + } + + for (i = 0; i < pkts; i++) { + unsigned int skb_idx = priv->tx_tail & fifo_mask; + struct sk_buff *skb = priv->can.echo_skb[skb_idx]; + unsigned int frame_len = 0; + + if (skb) + es58x_set_skb_timestamp(netdev, skb, tstamps[i]); + + netdev->stats.tx_bytes += can_get_echo_skb(netdev, skb_idx, + &frame_len); + rx_total_frame_len += frame_len; + + priv->tx_tail++; + } + + netdev_completed_queue(netdev, pkts, rx_total_frame_len); + netdev->stats.tx_packets += pkts; + + priv->err_passive_before_rtx_success = 0; + if (!es58x_is_echo_skb_threshold_reached(priv)) + netif_wake_queue(netdev); + + return 0; +} + +/** + * es58x_can_reset_echo_fifo() - reset the echo fifo. + * @netdev: can network device. + * + * the echo_skb array of struct can_priv will be flushed by + * drivers/net/can/dev.c:can_flush_echo_skb(). this function resets + * the parameters of the struct es58x_priv of our device and reset the + * queue (c.f. bql). + */ +static void es58x_can_reset_echo_fifo(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + priv->tx_tail = 0; + priv->tx_head = 0; + priv->tx_urb = null; + priv->err_passive_before_rtx_success = 0; + netdev_reset_queue(netdev); +} + +/** + * es58x_flush_pending_tx_msg() - reset the buffer for transmission messages. + * @netdev: can network device. + * + * es58x_start_xmit() will queue up to tx_bulk_max messages in + * &tx_urb buffer and do a bulk send of all messages in one single urb + * (c.f. xmit_more flag). when the device recovers from a bus off + * state or when the device stops, the tx_urb buffer might still have + * pending messages in it and thus need to be flushed. + */ +static void es58x_flush_pending_tx_msg(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + + if (priv->tx_urb) { + netdev_warn(netdev, "%s: dropping %d tx messages ", + __func__, priv->tx_can_msg_cnt); + netdev->stats.tx_dropped += priv->tx_can_msg_cnt; + while (priv->tx_can_msg_cnt > 0) { + unsigned int frame_len = 0; + u16 fifo_mask = priv->es58x_dev->param->fifo_mask; + + priv->tx_head--; + priv->tx_can_msg_cnt--; + can_free_echo_skb(netdev, priv->tx_head & fifo_mask, + &frame_len); + netdev_completed_queue(netdev, 1, frame_len); + } + usb_anchor_urb(priv->tx_urb, &priv->es58x_dev->tx_urbs_idle); + atomic_inc(&es58x_dev->tx_urbs_idle_cnt); + usb_free_urb(priv->tx_urb); + } + priv->tx_urb = null; +} + +/** + * es58x_tx_ack_msg() - handle acknowledgment messages. + * @netdev: can network device. + * @tx_free_entries: number of free entries in the device transmit fifo. + * @rx_cmd_ret_u32: error code as returned by the es58x device. + * + * es58x sends an acknowledgment message after a transmission request + * is done. this is mandatory for the es581.4 but is optional (and + * deactivated in this driver) for the es58x_fd family. + * + * under normal circumstances, this function should never throw an + * error message. + * + * return: zero on success, errno when any error occurs. + */ +int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, + enum es58x_ret_u32 rx_cmd_ret_u32) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + if (tx_free_entries <= priv->es58x_dev->param->tx_bulk_max) { + if (net_ratelimit()) + netdev_err(netdev, + "only %d entries left in device queue, num_echo_skb: %d/%d ", + tx_free_entries, + priv->tx_head - priv->tx_tail, + priv->can.echo_skb_max); + netif_stop_queue(netdev); + } + + return es58x_rx_cmd_ret_u32(netdev, es58x_ret_type_tx_msg, + rx_cmd_ret_u32); +} + +/** + * es58x_rx_can_msg() - handle a received a can message. + * @netdev: can network device. + * @timestamp: hardware time stamp (only relevant in rx branches). + * @data: can payload. + * @can_id: can id. + * @es58x_flags: please refer to enum es58x_flag. + * @dlc: data length code (raw value). + * + * fill up a can skb and post it. + * + * this function handles the case where the dlc of a classical can + * frame is greater than can_max_dlen (c.f. the len8_dlc field of + * struct can_frame). + * + * return: zero on success. + */ +int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, + canid_t can_id, enum es58x_flag es58x_flags, u8 dlc) +{ + struct canfd_frame *cfd; + struct can_frame *ccf; + struct sk_buff *skb; + u8 len; + bool is_can_fd = !!(es58x_flags & es58x_flag_fd_data); + + if (dlc > can_max_raw_dlc) { + netdev_err(netdev, + "%s: dlc is %d but maximum should be %d ", + __func__, dlc, can_max_raw_dlc); + return -emsgsize; + } + + if (is_can_fd) { + len = can_fd_dlc2len(dlc); + skb = alloc_canfd_skb(netdev, &cfd); + } else { + len = can_cc_dlc2len(dlc); + skb = alloc_can_skb(netdev, &ccf); + cfd = (struct canfd_frame *)ccf; + } + if (!skb) { + netdev->stats.rx_dropped++; + return 0; + } + + cfd->can_id = can_id; + if (es58x_flags & es58x_flag_eff) + cfd->can_id |= can_eff_flag; + if (is_can_fd) { + cfd->len = len; + if (es58x_flags & es58x_flag_fd_brs) + cfd->flags |= canfd_brs; + if (es58x_flags & es58x_flag_fd_esi) + cfd->flags |= canfd_esi; + } else { + can_frame_set_cc_len(ccf, dlc, es58x_priv(netdev)->can.ctrlmode); + if (es58x_flags & es58x_flag_rtr) { + ccf->can_id |= can_rtr_flag; + len = 0; + } + } + memcpy(cfd->data, data, len); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + + es58x_set_skb_timestamp(netdev, skb, timestamp); + netif_rx(skb); + + es58x_priv(netdev)->err_passive_before_rtx_success = 0; + + return 0; +} + +/** + * es58x_rx_err_msg() - handle a received can event or error message. + * @netdev: can network device. + * @error: error code. + * @event: event code. + * @timestamp: timestamp received from a es58x device. + * + * handle the errors and events received by the es58x device, create + * a can error skb and post it. + * + * in some rare cases the devices might get stuck alternating between + * can_state_error_passive and can_state_error_warning. to prevent + * this behavior, we force a bus off state if the device goes in + * can_state_error_warning for es58x_max_consecutive_warn consecutive + * times with no successful transmission or reception in between. + * + * once the device is in bus off state, the only way to restart it is + * through the drivers/net/can/dev.c:can_restart() function. the + * device is technically capable to recover by itself under certain + * circumstances, however, allowing self recovery would create + * complex race conditions with drivers/net/can/dev.c:can_restart() + * and thus was not implemented. to activate automatic restart, please + * set the restart-ms parameter (e.g. ip link set can0 type can + * restart-ms 100). + * + * if the bus is really instable, this function would try to send a + * lot of log messages. those are rate limited (i.e. you will see + * messages such as "net_ratelimit: xxx callbacks suppressed" in + * dmesg). + * + * return: zero on success, errno when any error occurs. + */ +int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, + enum es58x_event event, u64 timestamp) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct can_priv *can = netdev_priv(netdev); + struct can_device_stats *can_stats = &can->can_stats; + struct can_frame *cf = null; + struct sk_buff *skb; + int ret; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, "%s: %s is down, dropping packet ", + __func__, netdev->name); + netdev->stats.rx_dropped++; + return 0; + } + + if (error == es58x_err_ok && event == es58x_event_ok) { + netdev_err(netdev, "%s: both error and event are zero ", + __func__); + return -einval; + } + + skb = alloc_can_err_skb(netdev, &cf); + + switch (error) { + case es58x_err_ok: /* 0: no error */ + break; + + case es58x_err_prot_stuff: + if (net_ratelimit()) + netdev_dbg(netdev, "error bitsuff "); + if (cf) + cf->data[2] |= can_err_prot_stuff; + break; + + case es58x_err_prot_form: + if (net_ratelimit()) + netdev_dbg(netdev, "error format "); + if (cf) + cf->data[2] |= can_err_prot_form; + break; + + case es58x_err_ack: + if (net_ratelimit()) + netdev_dbg(netdev, "error ack "); + if (cf) + cf->can_id |= can_err_ack; + break; + + case es58x_err_prot_bit: + if (net_ratelimit()) + netdev_dbg(netdev, "error bit "); + if (cf) + cf->data[2] |= can_err_prot_bit; + break; + + case es58x_err_prot_crc: + if (net_ratelimit()) + netdev_dbg(netdev, "error crc "); + if (cf) + cf->data[3] |= can_err_prot_loc_crc_seq; + break; + + case es58x_err_prot_bit1: + if (net_ratelimit()) + netdev_dbg(netdev, + "error: expected a recessive bit but monitored a dominant one "); + if (cf) + cf->data[2] |= can_err_prot_bit1; + break; + + case es58x_err_prot_bit0: + if (net_ratelimit()) + netdev_dbg(netdev, + "error expected a dominant bit but monitored a recessive one "); + if (cf) + cf->data[2] |= can_err_prot_bit0; + break; + + case es58x_err_prot_overload: + if (net_ratelimit()) + netdev_dbg(netdev, "error overload "); + if (cf) + cf->data[2] |= can_err_prot_overload; + break; + + case es58x_err_prot_unspec: + if (net_ratelimit()) + netdev_dbg(netdev, "unspecified error "); + if (cf) + cf->can_id |= can_err_prot; + break; + + default: + if (net_ratelimit()) + netdev_err(netdev, + "%s: unspecified error code 0x%04x ", + __func__, (int)error); + if (cf) + cf->can_id |= can_err_prot; + break; + } + + switch (event) { + case es58x_event_ok: /* 0: no event */ + break; + + case es58x_event_crtl_active: + if (can->state == can_state_bus_off) { + netdev_err(netdev, + "%s: state transition: bus off -> active ", + __func__); + } + if (net_ratelimit()) + netdev_dbg(netdev, "event can bus active "); + if (cf) + cf->data[1] |= can_err_crtl_active; + can->state = can_state_error_active; + break; + + case es58x_event_crtl_passive: + if (net_ratelimit()) + netdev_dbg(netdev, "event can bus passive "); + /* either tx or rx error count reached passive state + * but we do not know which. setting both flags by + * default. + */ + if (cf) { + cf->data[1] |= can_err_crtl_rx_passive; + cf->data[1] |= can_err_crtl_tx_passive; + } + if (can->state < can_state_bus_off) + can->state = can_state_error_passive; + can_stats->error_passive++; + if (priv->err_passive_before_rtx_success < u8_max) + priv->err_passive_before_rtx_success++; + break; + + case es58x_event_crtl_warning: + if (net_ratelimit()) + netdev_dbg(netdev, "event can bus warning "); + /* either tx or rx error count reached warning state + * but we do not know which. setting both flags by + * default. + */ + if (cf) { + cf->data[1] |= can_err_crtl_rx_warning; + cf->data[1] |= can_err_crtl_tx_warning; + } + if (can->state < can_state_bus_off) + can->state = can_state_error_warning; + can_stats->error_warning++; + break; + + case es58x_event_busoff: + if (net_ratelimit()) + netdev_dbg(netdev, "event can bus off "); + if (cf) + cf->can_id |= can_err_busoff; + can_stats->bus_off++; + netif_stop_queue(netdev); + if (can->state != can_state_bus_off) { + can->state = can_state_bus_off; + can_bus_off(netdev); + ret = can->do_set_mode(netdev, can_mode_stop); + if (ret) + return ret; + } + break; + + case es58x_event_single_wire: + if (net_ratelimit()) + netdev_warn(netdev, + "lost connection on either can high or can low "); + /* lost connection on either can high or can + * low. setting both flags by default. + */ + if (cf) { + cf->data[4] |= can_err_trx_canh_no_wire; + cf->data[4] |= can_err_trx_canl_no_wire; + } + break; + + default: + if (net_ratelimit()) + netdev_err(netdev, + "%s: unspecified event code 0x%04x ", + __func__, (int)event); + if (cf) + cf->can_id |= can_err_crtl; + break; + } + + /* driver/net/can/dev.c:can_restart() takes in account error + * messages in the rx stats. doing the same here for + * consistency. + */ + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += cf->can_dlc; + + if (cf) { + if (cf->data[1]) + cf->can_id |= can_err_crtl; + if (cf->data[2] || cf->data[3]) { + cf->can_id |= can_err_prot; + can_stats->bus_error++; + } + if (cf->data[4]) + cf->can_id |= can_err_trx; + + es58x_set_skb_timestamp(netdev, skb, timestamp); + netif_rx(skb); + } + + if ((event & es58x_event_crtl_passive) && + priv->err_passive_before_rtx_success == es58x_consecutive_err_passive_max) { + netdev_info(netdev, + "got %d consecutive warning events with no successful rx or tx. forcing bus-off ", + priv->err_passive_before_rtx_success); + return es58x_rx_err_msg(netdev, es58x_err_ok, + es58x_event_busoff, timestamp); + } + + return 0; +} + +/** + * es58x_cmd_ret_desc() - convert a command type to a string. + * @cmd_ret_type: type of the command which triggered the return code. + * + * the final line (return "<unknown>") should not be reached. if this + * is the case, there is an implementation bug. + * + * return: a readable description of the @cmd_ret_type. + */ +static const char *es58x_cmd_ret_desc(enum es58x_ret_type cmd_ret_type) +{ + switch (cmd_ret_type) { + case es58x_ret_type_set_bittiming: + return "set bittiming"; + case es58x_ret_type_enable_channel: + return "enable channel"; + case es58x_ret_type_disable_channel: + return "disable channel"; + case es58x_ret_type_tx_msg: + return "transmit message"; + case es58x_ret_type_reset_rx: + return "reset rx"; + case es58x_ret_type_reset_tx: + return "reset tx"; + case es58x_ret_type_device_err: + return "device error"; + } + + return "<unknown>"; +}; + +/** + * es58x_rx_cmd_ret_u8() - handle the command's return code received + * from the es58x device. + * @dev: device, only used for the dev_xxx() print functions. + * @cmd_ret_type: type of the command which triggered the return code. + * @rx_cmd_ret_u8: command error code as returned by the es58x device. + * + * handles the 8 bits command return code. those are specific to the + * es581.4 device. the return value will eventually be used by + * es58x_handle_urb_cmd() function which will take proper actions in + * case of critical issues such and memory errors or bad crc values. + * + * in contrast with es58x_rx_cmd_ret_u32(), the network device is + * unknown. + * + * return: zero on success, return errno when any error occurs. + */ +int es58x_rx_cmd_ret_u8(struct device *dev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u8 rx_cmd_ret_u8) +{ + const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); + + switch (rx_cmd_ret_u8) { + case es58x_ret_u8_ok: + dev_dbg_ratelimited(dev, "%s: ok ", ret_desc); + return 0; + + case es58x_ret_u8_err_unspecified_failure: + dev_err(dev, "%s: unspecified failure ", ret_desc); + return -ebadmsg; + + case es58x_ret_u8_err_no_mem: + dev_err(dev, "%s: device ran out of memory ", ret_desc); + return -enomem; + + case es58x_ret_u8_err_bad_crc: + dev_err(dev, "%s: crc of previous command is incorrect ", + ret_desc); + return -eio; + + default: + dev_err(dev, "%s: returned unknown value: 0x%02x ", + ret_desc, rx_cmd_ret_u8); + return -ebadmsg; + } +} + +/** + * es58x_rx_cmd_ret_u32() - handle the command return code received + * from the es58x device. + * @netdev: can network device. + * @cmd_ret_type: type of the command which triggered the return code. + * @rx_cmd_ret_u32: error code as returned by the es58x device. + * + * handles the 32 bits command return code. the return value will + * eventually be used by es58x_handle_urb_cmd() function which will + * take proper actions in case of critical issues such and memory + * errors or bad crc values. + * + * return: zero on success, errno when any error occurs. + */ +int es58x_rx_cmd_ret_u32(struct net_device *netdev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u32 rx_cmd_ret_u32) +{ + struct es58x_priv *priv = es58x_priv(netdev); + const struct es58x_operators *ops = priv->es58x_dev->ops; + const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); + + switch (rx_cmd_ret_u32) { + case es58x_ret_u32_ok: + switch (cmd_ret_type) { + case es58x_ret_type_enable_channel: + es58x_can_reset_echo_fifo(netdev); + priv->can.state = can_state_error_active; + netif_wake_queue(netdev); + netdev_info(netdev, + "%s: %s (serial number %s): can%d channel becomes ready ", + ret_desc, priv->es58x_dev->udev->product, + priv->es58x_dev->udev->serial, + priv->channel_idx + 1); + break; + + case es58x_ret_type_tx_msg: + if (is_enabled(config_verbose_debug) && net_ratelimit()) + netdev_vdbg(netdev, "%s: ok ", ret_desc); + break; + + default: + netdev_dbg(netdev, "%s: ok ", ret_desc); + break; + } + return 0; + + case es58x_ret_u32_err_unspecified_failure: + if (cmd_ret_type == es58x_ret_type_enable_channel) { + int ret; + + netdev_warn(netdev, + "%s: channel is already opened, closing and re-openning it to reflect new configuration ", + ret_desc); + ret = ops->disable_channel(es58x_priv(netdev)); + if (ret) + return ret; + return ops->enable_channel(es58x_priv(netdev)); + } + if (cmd_ret_type == es58x_ret_type_disable_channel) { + netdev_info(netdev, + "%s: channel is already closed ", ret_desc); + return 0; + } + netdev_err(netdev, + "%s: unspecified failure ", ret_desc); + return -ebadmsg; + + case es58x_ret_u32_err_no_mem: + netdev_err(netdev, "%s: device ran out of memory ", ret_desc); + return -enomem; + + case es58x_ret_u32_warn_param_adjusted: + netdev_warn(netdev, + "%s: some incompatible parameters have been adjusted ", + ret_desc); + return 0; + + case es58x_ret_u32_warn_tx_maybe_reorder: + netdev_warn(netdev, + "%s: tx messages might have been reordered ", + ret_desc); + return 0; + + case es58x_ret_u32_err_timedout: + netdev_err(netdev, "%s: command timed out ", ret_desc); + return -etimedout; + + case es58x_ret_u32_err_fifo_full: + netdev_warn(netdev, "%s: fifo is full ", ret_desc); + return 0; + + case es58x_ret_u32_err_bad_config: + netdev_err(netdev, "%s: bad configuration ", ret_desc); + return -einval; + + case es58x_ret_u32_err_no_resource: + netdev_err(netdev, "%s: no resource available ", ret_desc); + return -ebusy; + + default: + netdev_err(netdev, "%s returned unknown value: 0x%08x ", + ret_desc, rx_cmd_ret_u32); + return -ebadmsg; + } +} + +/** + * es58x_increment_rx_errors() - increment the network devices' error + * count. + * @es58x_dev: es58x device. + * + * if an error occurs on the early stages on receiving an urb command, + * we might not be able to figure out on which network device the + * error occurred. in such case, we arbitrarily increment the error + * count of all the network devices attached to our es58x device. + */ +static void es58x_increment_rx_errors(struct es58x_device *es58x_dev) +{ + int i; + + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + es58x_dev->netdev[i]->stats.rx_errors++; +} + +/** + * es58x_handle_urb_cmd() - handle the urb command + * @es58x_dev: es58x device. + * @urb_cmd: the urb command received from the es58x device, might not + * be aligned. + * + * sends the urb command to the device specific function. manages the + * errors thrown back by those functions. + */ +static void es58x_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es58x_operators *ops = es58x_dev->ops; + size_t cmd_len; + int i, ret; + + ret = ops->handle_urb_cmd(es58x_dev, urb_cmd); + switch (ret) { + case 0: /* ok */ + return; + + case -enodev: + dev_err_ratelimited(es58x_dev->dev, "device is not ready "); + break; + + case -einval: + case -emsgsize: + case -ebadrqc: + case -ebadmsg: + case -echrng: + case -etimedout: + cmd_len = es58x_get_urb_cmd_len(es58x_dev, + ops->get_msg_len(urb_cmd)); + dev_err(es58x_dev->dev, + "ops->handle_urb_cmd() returned error %pe", + err_ptr(ret)); + es58x_print_hex_dump(urb_cmd, cmd_len); + break; + + case -efault: + case -enomem: + case -eio: + default: + dev_crit(es58x_dev->dev, + "ops->handle_urb_cmd() returned error %pe, detaching all network devices ", + err_ptr(ret)); + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + netif_device_detach(es58x_dev->netdev[i]); + if (es58x_dev->ops->reset_device) + es58x_dev->ops->reset_device(es58x_dev); + break; + } + + /* because the urb command could not fully be parsed, + * channel_id is not confirmed. incrementing rx_errors count + * of all channels. + */ + es58x_increment_rx_errors(es58x_dev); +} + +/** + * es58x_check_rx_urb() - check the length and format of the urb command. + * @es58x_dev: es58x device. + * @urb_cmd: the urb command received from the es58x device, might not + * be aligned. + * @urb_actual_len: the actual length of the urb command. + * + * check if the first message of the received urb is valid, that is to + * say that both the header and the length are coherent. + * + * return: + * the length of the first message of the urb on success. + * + * -enodata if the urb command is incomplete (in which case, the urb + * command should be buffered and combined with the next urb to try to + * reconstitute the urb command). + * + * -eoverflow if the length is bigger than the maximum expected one. + * + * -ebadrqc if the start of frame does not match the expected value. + */ +static signed int es58x_check_rx_urb(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd, + u32 urb_actual_len) +{ + const struct device *dev = es58x_dev->dev; + const struct es58x_parameters *param = es58x_dev->param; + u16 sof, msg_len; + signed int urb_cmd_len, ret; + + if (urb_actual_len < param->urb_cmd_header_len) { + dev_vdbg(dev, + "%s: received %d bytes [%*ph]: header incomplete ", + __func__, urb_actual_len, urb_actual_len, + urb_cmd->raw_cmd); + return -enodata; + } + + sof = get_unaligned_le16(&urb_cmd->sof); + if (sof != param->rx_start_of_frame) { + dev_err_ratelimited(es58x_dev->dev, + "%s: expected sequence 0x%04x for start of frame but got 0x%04x. ", + __func__, param->rx_start_of_frame, sof); + return -ebadrqc; + } + + msg_len = es58x_dev->ops->get_msg_len(urb_cmd); + urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); + if (urb_cmd_len > param->rx_urb_cmd_max_len) { + dev_err_ratelimited(es58x_dev->dev, + "%s: biggest expected size for rx urb_cmd is %u but receive a command of size %d ", + __func__, + param->rx_urb_cmd_max_len, urb_cmd_len); + return -eoverflow; + } else if (urb_actual_len < urb_cmd_len) { + dev_vdbg(dev, "%s: received %02d/%02d bytes ", + __func__, urb_actual_len, urb_cmd_len); + return -enodata; + } + + ret = es58x_check_crc(es58x_dev, urb_cmd, urb_cmd_len); + if (ret) + return ret; + + return urb_cmd_len; +} + +/** + * es58x_copy_to_cmd_buf() - copy an array to the urb command buffer. + * @es58x_dev: es58x device. + * @raw_cmd: the buffer we want to copy. + * @raw_cmd_len: length of @raw_cmd. + * + * concatenates @raw_cmd_len bytes of @raw_cmd to the end of the urb + * command buffer. + * + * return: zero on success, -emsgsize if not enough space is available + * to do the copy. + */ +static int es58x_copy_to_cmd_buf(struct es58x_device *es58x_dev, + u8 *raw_cmd, int raw_cmd_len) +{ + if (es58x_dev->rx_cmd_buf_len + raw_cmd_len > + es58x_dev->param->rx_urb_cmd_max_len) + return -emsgsize; + + memcpy(&es58x_dev->rx_cmd_buf.raw_cmd[es58x_dev->rx_cmd_buf_len], + raw_cmd, raw_cmd_len); + es58x_dev->rx_cmd_buf_len += raw_cmd_len; + + return 0; +} + +/** + * es58x_split_urb_try_recovery() - try to recover bad urb sequences. + * @es58x_dev: es58x device. + * @raw_cmd: pointer to the buffer we want to copy. + * @raw_cmd_len: length of @raw_cmd. + * + * under some rare conditions, we might get incorrect urbs from the + * device. from our observations, one of the valid urb gets replaced + * by one from the past. the full root cause is not identified. + * + * this function looks for the next start of frame in the urb buffer + * in order to try to recover. + * + * such behavior was not observed on the devices of the es58x fd + * family and only seems to impact the es581.4. + * + * return: the number of bytes dropped on success, -ebadmsg if recovery failed. + */ +static int es58x_split_urb_try_recovery(struct es58x_device *es58x_dev, + u8 *raw_cmd, size_t raw_cmd_len) +{ + union es58x_urb_cmd *urb_cmd; + signed int urb_cmd_len; + u16 sof; + int dropped_bytes = 0; + + es58x_increment_rx_errors(es58x_dev); + + while (raw_cmd_len > sizeof(sof)) { + urb_cmd = (union es58x_urb_cmd *)raw_cmd; + sof = get_unaligned_le16(&urb_cmd->sof); + + if (sof == es58x_dev->param->rx_start_of_frame) { + urb_cmd_len = es58x_check_rx_urb(es58x_dev, + urb_cmd, raw_cmd_len); + if ((urb_cmd_len == -enodata) || urb_cmd_len > 0) { + dev_info_ratelimited(es58x_dev->dev, + "recovery successful! dropped %d bytes (urb_cmd_len: %d) ", + dropped_bytes, + urb_cmd_len); + return dropped_bytes; + } + } + raw_cmd++; + raw_cmd_len--; + dropped_bytes++; + } + + dev_warn_ratelimited(es58x_dev->dev, "%s: recovery failed ", __func__); + return -ebadmsg; +} + +/** + * es58x_handle_incomplete_cmd() - reconstitute an urb command from + * different urb pieces. + * @es58x_dev: es58x device. + * @urb: last urb buffer received. + * + * the device might split the urb commands in an arbitrary amount of + * pieces. this function concatenates those in an urb buffer until a + * full urb command is reconstituted and consume it. + * + * return: + * number of bytes consumed from @urb if successful. + * + * -enodata if the urb command is still incomplete. + * + * -ebadmsg if the urb command is incorrect. + */ +static signed int es58x_handle_incomplete_cmd(struct es58x_device *es58x_dev, + struct urb *urb) +{ + size_t cpy_len; + signed int urb_cmd_len, tmp_cmd_buf_len, ret; + + tmp_cmd_buf_len = es58x_dev->rx_cmd_buf_len; + cpy_len = min_t(int, es58x_dev->param->rx_urb_cmd_max_len - + es58x_dev->rx_cmd_buf_len, urb->actual_length); + ret = es58x_copy_to_cmd_buf(es58x_dev, urb->transfer_buffer, cpy_len); + if (ret < 0) + return ret; + + urb_cmd_len = es58x_check_rx_urb(es58x_dev, &es58x_dev->rx_cmd_buf, + es58x_dev->rx_cmd_buf_len); + if (urb_cmd_len == -enodata) { + return -enodata; + } else if (urb_cmd_len < 0) { + dev_err_ratelimited(es58x_dev->dev, + "could not reconstitute incomplete command from previous urb, dropping %d bytes ", + tmp_cmd_buf_len + urb->actual_length); + dev_err_ratelimited(es58x_dev->dev, + "error code: %pe, es58x_dev->rx_cmd_buf_len: %d, urb->actual_length: %u ", + err_ptr(urb_cmd_len), + tmp_cmd_buf_len, urb->actual_length); + es58x_print_hex_dump(&es58x_dev->rx_cmd_buf, tmp_cmd_buf_len); + es58x_print_hex_dump(urb->transfer_buffer, urb->actual_length); + return urb->actual_length; + } + + es58x_handle_urb_cmd(es58x_dev, &es58x_dev->rx_cmd_buf); + return urb_cmd_len - tmp_cmd_buf_len; /* consumed length */ +} + +/** + * es58x_split_urb() - cut the received urb in individual urb commands. + * @es58x_dev: es58x device. + * @urb: last urb buffer received. + * + * the device might send urb in bulk format (i.e. several urb commands + * concatenated together). this function will split all the commands + * contained in the urb. + * + * return: + * number of bytes consumed from @urb if successful. + * + * -enodata if the urb command is incomplete. + * + * -ebadmsg if the urb command is incorrect. + */ +static signed int es58x_split_urb(struct es58x_device *es58x_dev, + struct urb *urb) +{ + union es58x_urb_cmd *urb_cmd; + u8 *raw_cmd = urb->transfer_buffer; + s32 raw_cmd_len = urb->actual_length; + int ret; + + if (es58x_dev->rx_cmd_buf_len != 0) { + ret = es58x_handle_incomplete_cmd(es58x_dev, urb); + if (ret != -enodata) + es58x_dev->rx_cmd_buf_len = 0; + if (ret < 0) + return ret; + + raw_cmd += ret; + raw_cmd_len -= ret; + } + + while (raw_cmd_len > 0) { + if (raw_cmd[0] == es58x_heartbeat) { + raw_cmd++; + raw_cmd_len--; + continue; + } + urb_cmd = (union es58x_urb_cmd *)raw_cmd; + ret = es58x_check_rx_urb(es58x_dev, urb_cmd, raw_cmd_len); + if (ret > 0) { + es58x_handle_urb_cmd(es58x_dev, urb_cmd); + } else if (ret == -enodata) { + es58x_copy_to_cmd_buf(es58x_dev, raw_cmd, raw_cmd_len); + return -enodata; + } else if (ret < 0) { + ret = es58x_split_urb_try_recovery(es58x_dev, raw_cmd, + raw_cmd_len); + if (ret < 0) + return ret; + } + raw_cmd += ret; + raw_cmd_len -= ret; + } + + return 0; +} + +/** + * es58x_read_bulk_callback() - callback for reading data from device. + * @urb: last urb buffer received. + * + * this function gets eventually called each time an urb is received + * from the es58x device. + * + * checks urb status, calls read function and resubmits urb read + * operation. + */ +static void es58x_read_bulk_callback(struct urb *urb) +{ + struct es58x_device *es58x_dev = urb->context; + const struct device *dev = es58x_dev->dev; + int i, ret; + + switch (urb->status) { + case 0: /* success */ + break; + + case -eoverflow: + dev_err_ratelimited(dev, "%s: error %pe ", + __func__, err_ptr(urb->status)); + es58x_print_hex_dump_debug(urb->transfer_buffer, + urb->transfer_buffer_length); + goto resubmit_urb; + + case -eproto: + dev_warn_ratelimited(dev, "%s: error %pe. device unplugged? ", + __func__, err_ptr(urb->status)); + goto free_urb; + + case -enoent: + case -epipe: + dev_err_ratelimited(dev, "%s: error %pe ", + __func__, err_ptr(urb->status)); + goto free_urb; + + case -eshutdown: + dev_dbg_ratelimited(dev, "%s: error %pe ", + __func__, err_ptr(urb->status)); + goto free_urb; + + default: + dev_err_ratelimited(dev, "%s: error %pe ", + __func__, err_ptr(urb->status)); + goto resubmit_urb; + } + + ret = es58x_split_urb(es58x_dev, urb); + if ((ret != -enodata) && ret < 0) { + dev_err(es58x_dev->dev, "es58x_split_urb() returned error %pe", + err_ptr(ret)); + es58x_print_hex_dump_debug(urb->transfer_buffer, + urb->actual_length); + + /* because the urb command could not be parsed, + * channel_id is not confirmed. incrementing rx_errors + * count of all channels. + */ + es58x_increment_rx_errors(es58x_dev); + } + + resubmit_urb: + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe, + urb->transfer_buffer, urb->transfer_buffer_length, + es58x_read_bulk_callback, es58x_dev); + + ret = usb_submit_urb(urb, gfp_atomic); + if (ret == -enodev) { + for (i = 0; i < es58x_dev->num_can_ch; i++) + if (es58x_dev->netdev[i]) + netif_device_detach(es58x_dev->netdev[i]); + } else if (ret) + dev_err_ratelimited(dev, + "failed resubmitting read bulk urb: %pe ", + err_ptr(ret)); + return; + + free_urb: + usb_free_coherent(urb->dev, urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma); +} + +/** + * es58x_write_bulk_callback() - callback after writing data to the device. + * @urb: urb buffer which was previously submitted. + * + * this function gets eventually called each time an urb was sent to + * the es58x device. + * + * puts the @urb back to the urbs idle anchor and tries to restart the + * network queue. + */ +static void es58x_write_bulk_callback(struct urb *urb) +{ + struct net_device *netdev = urb->context; + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + + switch (urb->status) { + case 0: /* success */ + break; + + case -eoverflow: + if (net_ratelimit()) + netdev_err(netdev, "%s: error %pe ", + __func__, err_ptr(urb->status)); + es58x_print_hex_dump(urb->transfer_buffer, + urb->transfer_buffer_length); + break; + + case -enoent: + if (net_ratelimit()) + netdev_dbg(netdev, "%s: error %pe ", + __func__, err_ptr(urb->status)); + usb_free_coherent(urb->dev, + es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + return; + + default: + if (net_ratelimit()) + netdev_info(netdev, "%s: error %pe ", + __func__, err_ptr(urb->status)); + break; + } + + usb_anchor_urb(urb, &es58x_dev->tx_urbs_idle); + atomic_inc(&es58x_dev->tx_urbs_idle_cnt); +} + +/** + * es58x_alloc_urb() - allocate memory for an urb and its transfer + * buffer. + * @es58x_dev: es58x device. + * @urb: urb to be allocated. + * @buf: used to return dma address of buffer. + * @buf_len: requested buffer size. + * @mem_flags: affect whether allocation may block. + * + * allocates an urb and its @transfer_buffer and set its @transfer_dma + * address. + * + * this function is used at start-up to allocate all rx urbs at once + * and during run time for tx urbs. + * + * return: zero on success, -enomem if no memory is available. + */ +static int es58x_alloc_urb(struct es58x_device *es58x_dev, struct urb **urb, + u8 **buf, size_t buf_len, gfp_t mem_flags) +{ + *urb = usb_alloc_urb(0, mem_flags); + if (!*urb) { + dev_err(es58x_dev->dev, "no memory left for urbs "); + return -enomem; + } + + *buf = usb_alloc_coherent(es58x_dev->udev, buf_len, + mem_flags, &(*urb)->transfer_dma); + if (!*buf) { + dev_err(es58x_dev->dev, "no memory left for usb buffer "); + usb_free_urb(*urb); + return -enomem; + } + + (*urb)->transfer_flags |= urb_no_transfer_dma_map; + + return 0; +} + +/** + * es58x_get_tx_urb() - get an urb for transmission. + * @es58x_dev: es58x device. + * + * gets an urb from the idle urbs anchor or allocate a new one if the + * anchor is empty. + * + * if there are more than es58x_tx_urbs_max in the idle anchor, do + * some garbage collection. the garbage collection is done here + * instead of within es58x_write_bulk_callback() because + * usb_free_coherent() should not be used in irq context: + * c.f. warn_on(irqs_disabled()) in dma_free_attrs(). + * + * return: a pointer to an urb on success, null if no memory is + * available. + */ +static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev) +{ + atomic_t *idle_cnt = &es58x_dev->tx_urbs_idle_cnt; + struct urb *urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); + + if (!urb) { + size_t tx_buf_len; + u8 *buf; + + tx_buf_len = es58x_dev->param->tx_urb_cmd_max_len; + if (es58x_alloc_urb(es58x_dev, &urb, &buf, tx_buf_len, + gfp_atomic)) + return null; + + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe, + buf, tx_buf_len, null, null); + return urb; + } + + while (atomic_dec_return(idle_cnt) > es58x_tx_urbs_max) { + /* garbage collector */ + struct urb *tmp = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); + + if (!tmp) + break; + usb_free_coherent(tmp->dev, + es58x_dev->param->tx_urb_cmd_max_len, + tmp->transfer_buffer, tmp->transfer_dma); + usb_free_urb(tmp); + } + + return urb; +} + +/** + * es58x_submit_urb() - send data to the device. + * @es58x_dev: es58x device. + * @urb: urb to be sent. + * @netdev: can network device. + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb, + struct net_device *netdev) +{ + int ret; + + es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length); + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe, + urb->transfer_buffer, urb->transfer_buffer_length, + es58x_write_bulk_callback, netdev); + usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy); + ret = usb_submit_urb(urb, gfp_atomic); + if (ret) { + netdev_err(netdev, "%s: usb send urb failure: %pe ", + __func__, err_ptr(ret)); + usb_unanchor_urb(urb); + usb_free_coherent(urb->dev, + es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + } + usb_free_urb(urb); + + return ret; +} + +/** + * es58x_send_msg() - prepare an urb and submit it. + * @es58x_dev: es58x device. + * @cmd_type: command type. + * @cmd_id: command id. + * @msg: es58x message to be sent. + * @msg_len: length of @msg. + * @channel_idx: index of the network device. + * + * creates an urb command from a given message, sets the header and the + * crc and then submits it. + * + * return: zero on success, errno when any error occurs. + */ +int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, + const void *msg, u16 msg_len, int channel_idx) +{ + struct net_device *netdev; + union es58x_urb_cmd *urb_cmd; + struct urb *urb; + int urb_cmd_len; + + if (channel_idx == es58x_channel_idx_na) + netdev = es58x_dev->netdev[0]; /* default to first channel */ + else + netdev = es58x_dev->netdev[channel_idx]; + + urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); + if (urb_cmd_len > es58x_dev->param->tx_urb_cmd_max_len) + return -eoverflow; + + urb = es58x_get_tx_urb(es58x_dev); + if (!urb) + return -enomem; + + urb_cmd = urb->transfer_buffer; + es58x_dev->ops->fill_urb_header(urb_cmd, cmd_type, cmd_id, + channel_idx, msg_len); + memcpy(&urb_cmd->raw_cmd[es58x_dev->param->urb_cmd_header_len], + msg, msg_len); + urb->transfer_buffer_length = urb_cmd_len; + + return es58x_submit_urb(es58x_dev, urb, netdev); +} + +/** + * es58x_alloc_rx_urbs() - allocate rx urbs. + * @es58x_dev: es58x device. + * + * allocate urbs for reception and anchor them. + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev) +{ + const struct device *dev = es58x_dev->dev; + const struct es58x_parameters *param = es58x_dev->param; + size_t rx_buf_len = es58x_dev->rx_max_packet_size; + struct urb *urb; + u8 *buf; + int i; + int ret = -einval; + + for (i = 0; i < param->rx_urb_max; i++) { + ret = es58x_alloc_urb(es58x_dev, &urb, &buf, rx_buf_len, + gfp_kernel); + if (ret) + break; + + usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe, + buf, rx_buf_len, es58x_read_bulk_callback, + es58x_dev); + usb_anchor_urb(urb, &es58x_dev->rx_urbs); + + ret = usb_submit_urb(urb, gfp_kernel); + if (ret) { + usb_unanchor_urb(urb); + usb_free_coherent(es58x_dev->udev, rx_buf_len, + buf, urb->transfer_dma); + usb_free_urb(urb); + break; + } + usb_free_urb(urb); + } + + if (i == 0) { + dev_err(dev, "%s: could not setup any rx urbs ", __func__); + return ret; + } + dev_dbg(dev, "%s: allocated %d rx urbs each of size %zu ", + __func__, i, rx_buf_len); + + return ret; +} + +/** + * es58x_free_urbs() - free all the tx and rx urbs. + * @es58x_dev: es58x device. + */ +static void es58x_free_urbs(struct es58x_device *es58x_dev) +{ + struct urb *urb; + + if (!usb_wait_anchor_empty_timeout(&es58x_dev->tx_urbs_busy, 1000)) { + dev_err(es58x_dev->dev, "%s: timeout, some tx urbs still remain ", + __func__); + usb_kill_anchored_urbs(&es58x_dev->tx_urbs_busy); + } + + while ((urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle)) != null) { + usb_free_coherent(urb->dev, es58x_dev->param->tx_urb_cmd_max_len, + urb->transfer_buffer, urb->transfer_dma); + usb_free_urb(urb); + atomic_dec(&es58x_dev->tx_urbs_idle_cnt); + } + if (atomic_read(&es58x_dev->tx_urbs_idle_cnt)) + dev_err(es58x_dev->dev, + "all idle urbs were freed but tx_urb_idle_cnt is %d ", + atomic_read(&es58x_dev->tx_urbs_idle_cnt)); + + usb_kill_anchored_urbs(&es58x_dev->rx_urbs); +} + +/** + * es58x_open() - enable the network device. + * @netdev: can network device. + * + * called when the network transitions to the up state. allocate the + * urb resources if needed and open the channel. + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_open(struct net_device *netdev) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + int ret; + + if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) { + ret = es58x_alloc_rx_urbs(es58x_dev); + if (ret) + return ret; + + ret = es58x_set_realtime_diff_ns(es58x_dev); + if (ret) + goto free_urbs; + } + + ret = open_candev(netdev); + if (ret) + goto free_urbs; + + ret = es58x_dev->ops->enable_channel(es58x_priv(netdev)); + if (ret) + goto free_urbs; + + netif_start_queue(netdev); + + return ret; + + free_urbs: + if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_free_urbs(es58x_dev); + netdev_err(netdev, "%s: could not open the network device: %pe ", + __func__, err_ptr(ret)); + + return ret; +} + +/** + * es58x_stop() - disable the network device. + * @netdev: can network device. + * + * called when the network transitions to the down state. if all the + * channels of the device are closed, free the urb resources which are + * not needed anymore. + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_stop(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + int ret; + + netif_stop_queue(netdev); + ret = es58x_dev->ops->disable_channel(priv); + if (ret) + return ret; + + priv->can.state = can_state_stopped; + es58x_can_reset_echo_fifo(netdev); + close_candev(netdev); + + es58x_flush_pending_tx_msg(netdev); + + if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt)) + es58x_free_urbs(es58x_dev); + + return 0; +} + +/** + * es58x_xmit_commit() - send the bulk urb. + * @netdev: can network device. + * + * do the bulk send. this function should be called only once by bulk + * transmission. + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_xmit_commit(struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + int ret; + + if (!es58x_is_can_state_active(netdev)) + return -enetdown; + + if (es58x_is_echo_skb_threshold_reached(priv)) + netif_stop_queue(netdev); + + ret = es58x_submit_urb(priv->es58x_dev, priv->tx_urb, netdev); + if (ret == 0) + priv->tx_urb = null; + + return ret; +} + +/** + * es58x_xmit_more() - can we put more packets? + * @priv: es58x private parameters related to the network device. + * + * return: true if we can put more, false if it is time to send. + */ +static bool es58x_xmit_more(struct es58x_priv *priv) +{ + unsigned int free_slots = + priv->can.echo_skb_max - (priv->tx_head - priv->tx_tail); + + return netdev_xmit_more() && free_slots > 0 && + priv->tx_can_msg_cnt < priv->es58x_dev->param->tx_bulk_max; +} + +/** + * es58x_start_xmit() - transmit an skb. + * @skb: socket buffer of a can message. + * @netdev: can network device. + * + * called when a packet needs to be transmitted. + * + * this function relies on byte queue limits (bql). the main benefit + * is to increase the throughput by allowing bulk transfers + * (c.f. xmit_more flag). + * + * queues up to tx_bulk_max messages in &tx_urb buffer and does + * a bulk send of all messages in one single urb. + * + * return: netdev_tx_ok regardless of if we could transmit the @skb or + * had to drop it. + */ +static netdev_tx_t es58x_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct es58x_priv *priv = es58x_priv(netdev); + struct es58x_device *es58x_dev = priv->es58x_dev; + unsigned int frame_len; + int ret; + + if (can_dropped_invalid_skb(netdev, skb)) { + if (priv->tx_urb) + goto xmit_commit; + return netdev_tx_ok; + } + + if (priv->tx_urb && priv->tx_can_msg_is_fd != can_is_canfd_skb(skb)) { + /* can not do bulk send with mixed can and can fd frames. */ + ret = es58x_xmit_commit(netdev); + if (ret) + goto drop_skb; + } + + if (!priv->tx_urb) { + priv->tx_urb = es58x_get_tx_urb(es58x_dev); + if (!priv->tx_urb) { + ret = -enomem; + goto drop_skb; + } + priv->tx_can_msg_cnt = 0; + priv->tx_can_msg_is_fd = can_is_canfd_skb(skb); + } + + ret = es58x_dev->ops->tx_can_msg(priv, skb); + if (ret) + goto drop_skb; + + frame_len = can_skb_get_frame_len(skb); + ret = can_put_echo_skb(skb, netdev, + priv->tx_head & es58x_dev->param->fifo_mask, + frame_len); + if (ret) + goto xmit_failure; + netdev_sent_queue(netdev, frame_len); + + priv->tx_head++; + priv->tx_can_msg_cnt++; + + xmit_commit: + if (!es58x_xmit_more(priv)) { + ret = es58x_xmit_commit(netdev); + if (ret) + goto xmit_failure; + } + + return netdev_tx_ok; + + drop_skb: + dev_kfree_skb(skb); + netdev->stats.tx_dropped++; + xmit_failure: + netdev_warn(netdev, "%s: send message failure: %pe ", + __func__, err_ptr(ret)); + netdev->stats.tx_errors++; + es58x_flush_pending_tx_msg(netdev); + return netdev_tx_ok; +} + +static const struct net_device_ops es58x_netdev_ops = { + .ndo_open = es58x_open, + .ndo_stop = es58x_stop, + .ndo_start_xmit = es58x_start_xmit +}; + +/** + * es58x_set_mode() - change network device mode. + * @netdev: can network device. + * @mode: either %can_mode_start, %can_mode_stop or %can_mode_sleep + * + * currently, this function is only used to stop and restart the + * channel during a bus off event (c.f. es58x_rx_err_msg() and + * drivers/net/can/dev.c:can_restart() which are the two only + * callers). + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_set_mode(struct net_device *netdev, enum can_mode mode) +{ + struct es58x_priv *priv = es58x_priv(netdev); + + switch (mode) { + case can_mode_start: + switch (priv->can.state) { + case can_state_bus_off: + return priv->es58x_dev->ops->enable_channel(priv); + + case can_state_stopped: + return es58x_open(netdev); + + case can_state_error_active: + case can_state_error_warning: + case can_state_error_passive: + default: + return 0; + } + + case can_mode_stop: + switch (priv->can.state) { + case can_state_stopped: + return 0; + + case can_state_error_active: + case can_state_error_warning: + case can_state_error_passive: + case can_state_bus_off: + default: + return priv->es58x_dev->ops->disable_channel(priv); + } + + case can_mode_sleep: + default: + return -eopnotsupp; + } +} + +/** + * es58x_init_priv() - initialize private parameters. + * @es58x_dev: es58x device. + * @priv: es58x private parameters related to the network device. + * @channel_idx: index of the network device. + */ +static void es58x_init_priv(struct es58x_device *es58x_dev, + struct es58x_priv *priv, int channel_idx) +{ + const struct es58x_parameters *param = es58x_dev->param; + struct can_priv *can = &priv->can; + + priv->es58x_dev = es58x_dev; + priv->channel_idx = channel_idx; + priv->tx_urb = null; + priv->tx_can_msg_cnt = 0; + + can->bittiming_const = param->bittiming_const; + if (param->ctrlmode_supported & can_ctrlmode_fd) { + can->data_bittiming_const = param->data_bittiming_const; + can->tdc_const = param->tdc_const; + } + can->bitrate_max = param->bitrate_max; + can->clock = param->clock; + can->state = can_state_stopped; + can->ctrlmode_supported = param->ctrlmode_supported; + can->do_set_mode = es58x_set_mode; +} + +/** + * es58x_init_netdev() - initialize the network device. + * @es58x_dev: es58x device. + * @channel_idx: index of the network device. + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx) +{ + struct net_device *netdev; + struct device *dev = es58x_dev->dev; + int ret; + + netdev = alloc_candev(sizeof(struct es58x_priv), + es58x_dev->param->fifo_mask + 1); + if (!netdev) { + dev_err(dev, "could not allocate candev "); + return -enomem; + } + set_netdev_dev(netdev, dev); + es58x_dev->netdev[channel_idx] = netdev; + es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx); + + netdev->netdev_ops = &es58x_netdev_ops; + netdev->flags |= iff_echo; /* we support local echo */ + + ret = register_candev(netdev); + if (ret) + return ret; + + netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0), + es58x_dev->param->dql_min_limit); + + return ret; +} + +/** + * es58x_get_product_info() - get the product information and print them. + * @es58x_dev: es58x device. + * + * do a synchronous call to get the product information. + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_get_product_info(struct es58x_device *es58x_dev) +{ + struct usb_device *udev = es58x_dev->udev; + const int es58x_prod_info_idx = 6; + /* empirical tests show a prod_info length of maximum 83, + * below should be more than enough. + */ + const size_t prod_info_len = 127; + char *prod_info; + int ret; + + prod_info = kmalloc(prod_info_len, gfp_kernel); + if (!prod_info) + return -enomem; + + ret = usb_string(udev, es58x_prod_info_idx, prod_info, prod_info_len); + if (ret < 0) { + dev_err(es58x_dev->dev, + "%s: could not read the product info: %pe ", + __func__, err_ptr(ret)); + goto out_free; + } + if (ret >= prod_info_len - 1) { + dev_warn(es58x_dev->dev, + "%s: buffer is too small, result might be truncated ", + __func__); + } + dev_info(es58x_dev->dev, "product info: %s ", prod_info); + + out_free: + kfree(prod_info); + return ret < 0 ? ret : 0; +} + +/** + * es58x_init_es58x_dev() - initialize the es58x device. + * @intf: usb interface. + * @p_es58x_dev: pointer to the address of the es58x device. + * @driver_info: quirks of the device. + * + * return: zero on success, errno when any error occurs. + */ +static int es58x_init_es58x_dev(struct usb_interface *intf, + struct es58x_device **p_es58x_dev, + kernel_ulong_t driver_info) +{ + struct device *dev = &intf->dev; + struct es58x_device *es58x_dev; + const struct es58x_parameters *param; + const struct es58x_operators *ops; + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *ep_in, *ep_out; + int ret; + + dev_info(dev, + "starting %s %s (serial number %s) driver version %s ", + udev->manufacturer, udev->product, udev->serial, drv_version); + + ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out, + null, null); + if (ret) + return ret; + + if (driver_info & es58x_fd_family) { + return -enodev; + /* place holder for es58x_fd glue code. */ + } else { + return -enodev; + /* place holder for es581_4 glue code. */ + } + + es58x_dev = kzalloc(es58x_sizeof_es58x_device(param), gfp_kernel); + if (!es58x_dev) + return -enomem; + + es58x_dev->param = param; + es58x_dev->ops = ops; + es58x_dev->dev = dev; + es58x_dev->udev = udev; + + if (driver_info & es58x_dual_channel) + es58x_dev->num_can_ch = 2; + else + es58x_dev->num_can_ch = 1; + + init_usb_anchor(&es58x_dev->rx_urbs); + init_usb_anchor(&es58x_dev->tx_urbs_idle); + init_usb_anchor(&es58x_dev->tx_urbs_busy); + atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0); + atomic_set(&es58x_dev->opened_channel_cnt, 0); + usb_set_intfdata(intf, es58x_dev); + + es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev, + ep_in->bendpointaddress); + es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev, + ep_out->bendpointaddress); + es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wmaxpacketsize); + + *p_es58x_dev = es58x_dev; + + return 0; +} + +/** + * es58x_probe() - initialize the usb device. + * @intf: usb interface. + * @id: usb device id. + * + * return: zero on success, -enodev if the interface is not supported + * or errno when any other error occurs. + */ +static int es58x_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct es58x_device *es58x_dev; + int ch_idx, ret; + + ret = es58x_init_es58x_dev(intf, &es58x_dev, id->driver_info); + if (ret) + return ret; + + ret = es58x_get_product_info(es58x_dev); + if (ret) + goto cleanup_es58x_dev; + + for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) { + ret = es58x_init_netdev(es58x_dev, ch_idx); + if (ret) + goto cleanup_candev; + } + + return ret; + + cleanup_candev: + for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) + if (es58x_dev->netdev[ch_idx]) { + unregister_candev(es58x_dev->netdev[ch_idx]); + free_candev(es58x_dev->netdev[ch_idx]); + } + cleanup_es58x_dev: + kfree(es58x_dev); + + return ret; +} + +/** + * es58x_disconnect() - disconnect the usb device. + * @intf: usb interface + * + * called by the usb core when driver is unloaded or device is + * removed. + */ +static void es58x_disconnect(struct usb_interface *intf) +{ + struct es58x_device *es58x_dev = usb_get_intfdata(intf); + struct net_device *netdev; + int i; + + dev_info(&intf->dev, "disconnecting %s %s ", + es58x_dev->udev->manufacturer, es58x_dev->udev->product); + + for (i = 0; i < es58x_dev->num_can_ch; i++) { + netdev = es58x_dev->netdev[i]; + if (!netdev) + continue; + unregister_candev(netdev); + es58x_dev->netdev[i] = null; + free_candev(netdev); + } + + es58x_free_urbs(es58x_dev); + + kfree(es58x_dev); + usb_set_intfdata(intf, null); +} + +static struct usb_driver es58x_driver = { + .name = es58x_module_name, + .probe = es58x_probe, + .disconnect = es58x_disconnect, + .id_table = es58x_id_table +}; + +module_usb_driver(es58x_driver); diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h +/* spdx-license-identifier: gpl-2.0 */ + +/* driver for etas gmbh es58x usb can(-fd) bus interfaces. + * + * file es58x_core.h: all common definitions and declarations. + * + * copyright (c) 2019 robert bosch engineering and business solutions. all rights reserved. + * copyright (c) 2020 etas k.k.. all rights reserved. + * copyright (c) 2020, 2021 vincent mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __es58x_common_h__ +#define __es58x_common_h__ + +#include <linux/types.h> +#include <linux/usb.h> +#include <linux/netdevice.h> +#include <linux/can.h> +#include <linux/can/dev.h> + +/* driver constants */ +#define es58x_rx_urbs_max 5 /* empirical value */ +#define es58x_tx_urbs_max 6 /* empirical value */ + +#define es58x_max(param) 0 +#define es58x_tx_bulk_max es58x_max(tx_bulk_max) +#define es58x_rx_bulk_max es58x_max(rx_bulk_max) +#define es58x_echo_bulk_max es58x_max(echo_bulk_max) +#define es58x_num_can_ch_max es58x_max(num_can_ch) + +/* use this when channel index is irrelevant (e.g. device + * timestamp). + */ +#define es58x_channel_idx_na 0xff +#define es58x_empty_msg null + +/* threshold on consecutive can_state_error_passive. if we receive + * es58x_consecutive_err_passive_max times the event + * es58x_err_crtl_passive in a row without any successful rx or tx, + * we force the device to switch to can_state_bus_off state. + */ +#define es58x_consecutive_err_passive_max 254 + +/* a magic number sent by the es581.4 to inform it is alive. */ +#define es58x_heartbeat 0x11 + +/** + * enum es58x_driver_info - quirks of the device. + * @es58x_dual_channel: device has two can channels. if this flag is + * not set, it is implied that the device has only one can + * channel. + * @es58x_fd_family: device is can-fd capable. if this flag is not + * set, the device only supports classical can. + */ +enum es58x_driver_info { + es58x_dual_channel = bit(0), + es58x_fd_family = bit(1) +}; + +enum es58x_echo { + es58x_echo_off = 0, + es58x_echo_on = 1 +}; + +/** + * enum es58x_physical_layer - type of the physical layer. + * @es58x_physical_layer_high_speed: high-speed can (c.f. iso + * 11898-2). + * + * some products of the etas portfolio also support low-speed can + * (c.f. iso 11898-3). however, all the devices in scope of this + * driver do not support the option, thus, the enum has only one + * member. + */ +enum es58x_physical_layer { + es58x_physical_layer_high_speed = 1 +}; + +enum es58x_samples_per_bit { + es58x_samples_per_bit_one = 1, + es58x_samples_per_bit_three = 2 +}; + +/** + * enum es58x_sync_edge - synchronization method. + * @es58x_sync_edge_single: iso can specification defines the use of a + * single edge synchronization. the synchronization should be + * done on recessive to dominant level change. + * + * for information, es582.1 and es584.1 also support a double + * synchronization, requiring both recessive to dominant then dominant + * to recessive level change. however, this is not supported in + * socketcan framework, thus, the enum has only one member. + */ +enum es58x_sync_edge { + es58x_sync_edge_single = 1 +}; + +/** + * enum es58x_flag - can flags for rx/tx messages. + * @es58x_flag_eff: extended frame format (eff). + * @es58x_flag_rtr: remote transmission request (rtr). + * @es58x_flag_fd_brs: bit rate switch (brs): second bitrate for + * payload data. + * @es58x_flag_fd_esi: error state indicator (esi): tell if the + * transmitting node is in error passive mode. + * @es58x_flag_fd_data: can fd frame. + */ +enum es58x_flag { + es58x_flag_eff = bit(0), + es58x_flag_rtr = bit(1), + es58x_flag_fd_brs = bit(3), + es58x_flag_fd_esi = bit(5), + es58x_flag_fd_data = bit(6) +}; + +/** + * enum es58x_err - can error detection. + * @es58x_err_ok: no errors. + * @es58x_err_prot_stuff: bit stuffing error: more than 5 consecutive + * equal bits. + * @es58x_err_prot_form: frame format error. + * @es58x_err_ack: received no ack on transmission. + * @es58x_err_prot_bit: single bit error. + * @es58x_err_prot_crc: incorrect 15, 17 or 21 bits crc. + * @es58x_err_prot_bit1: unable to send recessive bit: tried to send + * recessive bit 1 but monitored dominant bit 0. + * @es58x_err_prot_bit0: unable to send dominant bit: tried to send + * dominant bit 0 but monitored recessive bit 1. + * @es58x_err_prot_overload: bus overload. + * @es58x_err_prot_unspec: unspecified. + * + * please refer to iso 11898-1:2015, section 10.11 "error detection" + * and section 10.13 "overload signaling" for additional details. + */ +enum es58x_err { + es58x_err_ok = 0, + es58x_err_prot_stuff = bit(0), + es58x_err_prot_form = bit(1), + es58x_err_ack = bit(2), + es58x_err_prot_bit = bit(3), + es58x_err_prot_crc = bit(4), + es58x_err_prot_bit1 = bit(5), + es58x_err_prot_bit0 = bit(6), + es58x_err_prot_overload = bit(7), + es58x_err_prot_unspec = bit(31) +}; + +/** + * enum es58x_event - can error codes returned by the device. + * @es58x_event_ok: no errors. + * @es58x_event_crtl_active: active state: both tr and rx error count + * is less than 128. + * @es58x_event_crtl_passive: passive state: either tx or rx error + * count is greater than 127. + * @es58x_event_crtl_warning: warning state: either tx or rx error + * count is greater than 96. + * @es58x_event_busoff: bus off. + * @es58x_event_single_wire: lost connection on either can high or can + * low. + * + * please refer to iso 11898-1:2015, section 12.1.4 "rules of fault + * confinement" for additional details. + */ +enum es58x_event { + es58x_event_ok = 0, + es58x_event_crtl_active = bit(0), + es58x_event_crtl_passive = bit(1), + es58x_event_crtl_warning = bit(2), + es58x_event_busoff = bit(3), + es58x_event_single_wire = bit(4) +}; + +/* enum es58x_ret_u8 - device return error codes, 8 bit format. + * + * specific to es581.4. + */ +enum es58x_ret_u8 { + es58x_ret_u8_ok = 0x00, + es58x_ret_u8_err_unspecified_failure = 0x80, + es58x_ret_u8_err_no_mem = 0x81, + es58x_ret_u8_err_bad_crc = 0x99 +}; + +/* enum es58x_ret_u32 - device return error codes, 32 bit format. + */ +enum es58x_ret_u32 { + es58x_ret_u32_ok = 0x00000000ul, + es58x_ret_u32_err_unspecified_failure = 0x80000000ul, + es58x_ret_u32_err_no_mem = 0x80004001ul, + es58x_ret_u32_warn_param_adjusted = 0x40004000ul, + es58x_ret_u32_warn_tx_maybe_reorder = 0x40004001ul, + es58x_ret_u32_err_timedout = 0x80000008ul, + es58x_ret_u32_err_fifo_full = 0x80003002ul, + es58x_ret_u32_err_bad_config = 0x80004000ul, + es58x_ret_u32_err_no_resource = 0x80004002ul +}; + +/* enum es58x_ret_type - type of the command returned by the es58x + * device. + */ +enum es58x_ret_type { + es58x_ret_type_set_bittiming, + es58x_ret_type_enable_channel, + es58x_ret_type_disable_channel, + es58x_ret_type_tx_msg, + es58x_ret_type_reset_rx, + es58x_ret_type_reset_tx, + es58x_ret_type_device_err +}; + +union es58x_urb_cmd { + struct { /* common header parts of all variants */ + __le16 sof; + u8 cmd_type; + u8 cmd_id; + } __packed; + u8 raw_cmd[0]; +}; + +/** + * struct es58x_priv - all information specific to a can channel. + * @can: struct can_priv must be the first member (socket can relies + * on the fact that function netdev_priv() returns a pointer to + * a struct can_priv). + * @es58x_dev: pointer to the corresponding es58x device. + * @tx_urb: used as a buffer to concatenate the tx messages and to do + * a bulk send. please refer to es58x_start_xmit() for more + * details. + * @tx_tail: index of the oldest packet still pending for + * completion. @tx_tail & echo_skb_mask represents the beginning + * of the echo skb fifo, i.e. index of the first element. + * @tx_head: index of the next packet to be sent to the + * device. @tx_head & echo_skb_mask represents the end of the + * echo skb fifo plus one, i.e. the first free index. + * @tx_can_msg_cnt: number of messages in @tx_urb. + * @tx_can_msg_is_fd: false: all messages in @tx_urb are classical + * can, true: all messages in @tx_urb are can fd. rationale: + * es58x fd devices do not allow to mix classical can and fd can + * frames in one single bulk transmission. + * @err_passive_before_rtx_success: the es58x device might enter in a + * state in which it keeps alternating between error passive + * and active states. this counter keeps track of the number of + * error passive and if it gets bigger than + * es58x_consecutive_err_passive_max, es58x_rx_err_msg() will + * force the status to bus-off. + * @channel_idx: channel index, starts at zero. + */ +struct es58x_priv { + struct can_priv can; + struct es58x_device *es58x_dev; + struct urb *tx_urb; + + u32 tx_tail; + u32 tx_head; + + u8 tx_can_msg_cnt; + bool tx_can_msg_is_fd; + + u8 err_passive_before_rtx_success; + + u8 channel_idx; +}; + +/** + * struct es58x_parameters - constant parameters of a given hardware + * variant. + * @bittiming_const: nominal bittimming constant parameters. + * @data_bittiming_const: data bittiming constant parameters. + * @tdc_const: transmission delay compensation constant parameters. + * @bitrate_max: maximum bitrate supported by the device. + * @clock: can clock parameters. + * @ctrlmode_supported: list of supported modes. please refer to + * can/netlink.h file for additional details. + * @tx_start_of_frame: magic number at the beginning of each tx urb + * command. + * @rx_start_of_frame: magic number at the beginning of each rx urb + * command. + * @tx_urb_cmd_max_len: maximum length of a tx urb command. + * @rx_urb_cmd_max_len: maximum length of a rx urb command. + * @fifo_mask: bit mask to quickly convert the tx_tail and tx_head + * field of the struct es58x_priv into echo_skb + * indexes. properties: @fifo_mask = echos_skb_max - 1 where + * echo_skb_max must be a power of two. also, echo_skb_max must + * not exceed the maximum size of the device internal tx fifo + * length. this parameter is used to control the network queue + * wake/stop logic. + * @dql_min_limit: dynamic queue limits (dql) absolute minimum limit + * of bytes allowed to be queued on this network device transmit + * queue. used by the byte queue limits (bql) to determine how + * frequently the xmit_more flag will be set to true in + * es58x_start_xmit(). set this value higher to optimize for + * throughput but be aware that it might have a negative impact + * on the latency! this value can also be set dynamically. please + * refer to documentation/abi/testing/sysfs-class-net-queues for + * more details. + * @tx_bulk_max: maximum number of tx messages that can be sent in one + * single urb packet. + * @urb_cmd_header_len: length of the urb command header. + * @rx_urb_max: number of rx urb to be allocated during device probe. + * @tx_urb_max: number of tx urb to be allocated during device probe. + */ +struct es58x_parameters { + const struct can_bittiming_const *bittiming_const; + const struct can_bittiming_const *data_bittiming_const; + const struct can_tdc_const *tdc_const; + u32 bitrate_max; + struct can_clock clock; + u32 ctrlmode_supported; + u16 tx_start_of_frame; + u16 rx_start_of_frame; + u16 tx_urb_cmd_max_len; + u16 rx_urb_cmd_max_len; + u16 fifo_mask; + u16 dql_min_limit; + u8 tx_bulk_max; + u8 urb_cmd_header_len; + u8 rx_urb_max; + u8 tx_urb_max; +}; + +/** + * struct es58x_operators - function pointers used to encode/decode + * the tx/rx messages. + * @get_msg_len: get field msg_len of the urb_cmd. the offset of + * msg_len inside urb_cmd depends of the device model. + * @handle_urb_cmd: decode the urb command received from the device + * and dispatch it to the relevant sub function. + * @fill_urb_header: fill the header of urb_cmd. + * @tx_can_msg: encode a tx can message and add it to the bulk buffer + * cmd_buf of es58x_dev. + * @enable_channel: start the can channel. + * @disable_channel: stop the can channel. + * @reset_device: full reset of the device. n.b: this feature is only + * present on the es581.4. for es58x fd devices, this field is + * set to null. + * @get_timestamp: request a timestamp from the es58x device. + */ +struct es58x_operators { + u16 (*get_msg_len)(const union es58x_urb_cmd *urb_cmd); + int (*handle_urb_cmd)(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd); + void (*fill_urb_header)(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 cmd_len); + int (*tx_can_msg)(struct es58x_priv *priv, const struct sk_buff *skb); + int (*enable_channel)(struct es58x_priv *priv); + int (*disable_channel)(struct es58x_priv *priv); + int (*reset_device)(struct es58x_device *es58x_dev); + int (*get_timestamp)(struct es58x_device *es58x_dev); +}; + +/** + * struct es58x_device - all information specific to an es58x device. + * @dev: device information. + * @udev: usb device information. + * @netdev: array of our can channels. + * @param: the constant parameters. + * @ops: operators. + * @rx_pipe: usb reception pipe. + * @tx_pipe: usb transmission pipe. + * @rx_urbs: anchor for received urbs. + * @tx_urbs_busy: anchor for tx urbs which were send to the device. + * @tx_urbs_idle: anchor for tx usb which are idle. this driver + * allocates the memory for the urbs during the probe. when a tx + * urb is needed, it can be taken from this anchor. the network + * queue wake/stop logic should prevent this urb from getting + * empty. please refer to es58x_get_tx_urb() for more details. + * @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle. + * @opened_channel_cnt: number of channels opened (c.f. es58x_open() + * and es58x_stop()). + * @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns() + * was called. + * @realtime_diff_ns: difference in nanoseconds between the clocks of + * the es58x device and the kernel. + * @timestamps: a temporary buffer to store the time stamps before + * feeding them to es58x_can_get_echo_skb(). can only be used + * in rx branches. + * @rx_max_packet_size: maximum length of bulk-in urb. + * @num_can_ch: number of can channel (i.e. number of elements of @netdev). + * @rx_cmd_buf_len: length of @rx_cmd_buf. + * @rx_cmd_buf: the device might split the urb commands in an + * arbitrary amount of pieces. this buffer is used to concatenate + * all those pieces. can only be used in rx branches. this field + * has to be the last one of the structure because it is has a + * flexible size (c.f. es58x_sizeof_es58x_device() function). + */ +struct es58x_device { + struct device *dev; + struct usb_device *udev; + struct net_device *netdev[es58x_num_can_ch_max]; + + const struct es58x_parameters *param; + const struct es58x_operators *ops; + + int rx_pipe; + int tx_pipe; + + struct usb_anchor rx_urbs; + struct usb_anchor tx_urbs_busy; + struct usb_anchor tx_urbs_idle; + atomic_t tx_urbs_idle_cnt; + atomic_t opened_channel_cnt; + + u64 ktime_req_ns; + s64 realtime_diff_ns; + + u64 timestamps[es58x_echo_bulk_max]; + + u16 rx_max_packet_size; + u8 num_can_ch; + + u16 rx_cmd_buf_len; + union es58x_urb_cmd rx_cmd_buf; +}; + +/** + * es58x_sizeof_es58x_device() - calculate the maximum length of + * struct es58x_device. + * @es58x_dev_param: the constant parameters of the device. + * + * the length of struct es58x_device depends on the length of its last + * field: rx_cmd_buf. this macro allows to optimize the memory + * allocation. + * + * return: length of struct es58x_device. + */ +static inline size_t es58x_sizeof_es58x_device(const struct es58x_parameters + *es58x_dev_param) +{ + return offsetof(struct es58x_device, rx_cmd_buf) + + es58x_dev_param->rx_urb_cmd_max_len; +} + +static inline int __es58x_check_msg_len(const struct device *dev, + const char *stringified_msg, + size_t actual_len, size_t expected_len) +{ + if (expected_len != actual_len) { + dev_err(dev, + "length of %s is %zu but received command is %zu. ", + stringified_msg, expected_len, actual_len); + return -emsgsize; + } + return 0; +} + +/** + * es58x_check_msg_len() - check the size of a received message. + * @dev: device, used to print error messages. + * @msg: received message, must not be a pointer. + * @actual_len: length of the message as advertised in the command header. + * + * must be a macro in order to accept the different types of messages + * as an input. can be use with any of the messages which have a fixed + * length. check for an exact match of the size. + * + * return: zero on success, -emsgsize if @actual_len differs from the + * expected length. + */ +#define es58x_check_msg_len(dev, msg, actual_len) \ + __es58x_check_msg_len(dev, __stringify(msg), \ + actual_len, sizeof(msg)) + +static inline int __es58x_check_msg_max_len(const struct device *dev, + const char *stringified_msg, + size_t actual_len, + size_t expected_len) +{ + if (actual_len > expected_len) { + dev_err(dev, + "maximum length for %s is %zu but received command is %zu. ", + stringified_msg, expected_len, actual_len); + return -eoverflow; + } + return 0; +} + +/** + * es58x_check_msg_max_len() - check the maximum size of a received message. + * @dev: device, used to print error messages. + * @msg: received message, must not be a pointer. + * @actual_len: length of the message as advertised in the command header. + * + * must be a macro in order to accept the different types of messages + * as an input. to be used with the messages of variable sizes. only + * check that the message is not bigger than the maximum expected + * size. + * + * return: zero on success, -eoverflow if @actual_len is greater than + * the expected length. + */ +#define es58x_check_msg_max_len(dev, msg, actual_len) \ + __es58x_check_msg_max_len(dev, __stringify(msg), \ + actual_len, sizeof(msg)) + +static inline int __es58x_msg_num_element(const struct device *dev, + const char *stringified_msg, + size_t actual_len, size_t msg_len, + size_t elem_len) +{ + size_t actual_num_elem = actual_len / elem_len; + size_t expected_num_elem = msg_len / elem_len; + + if (actual_num_elem == 0) { + dev_err(dev, + "minimum length for %s is %zu but received command is %zu. ", + stringified_msg, elem_len, actual_len); + return -emsgsize; + } else if ((actual_len % elem_len) != 0) { + dev_err(dev, + "received command length: %zu is not a multiple of %s[0]: %zu ", + actual_len, stringified_msg, elem_len); + return -emsgsize; + } else if (actual_num_elem > expected_num_elem) { + dev_err(dev, + "array %s is supposed to have %zu elements each of size %zu... ", + stringified_msg, expected_num_elem, elem_len); + dev_err(dev, + "... but received command has %zu elements (total length %zu). ", + actual_num_elem, actual_len); + return -eoverflow; + } + return actual_num_elem; +} + +/** + * es58x_msg_num_element() - check size and give the number of + * elements in a message of array type. + * @dev: device, used to print error messages. + * @msg: received message, must be an array. + * @actual_len: length of the message as advertised in the command + * header. + * + * must be a macro in order to accept the different types of messages + * as an input. to be used on message of array type. array's element + * has to be of fixed size (else use es58x_check_msg_max_len()). check + * that the total length is an exact multiple of the length of a + * single element. + * + * return: number of elements in the array on success, -eoverflow if + * @actual_len is greater than the expected length, -emsgsize if + * @actual_len is not a multiple of a single element. + */ +#define es58x_msg_num_element(dev, msg, actual_len) \ +({ \ + size_t __elem_len = sizeof((msg)[0]) + __must_be_array(msg); \ + __es58x_msg_num_element(dev, __stringify(msg), actual_len, \ + sizeof(msg), __elem_len); \ +}) + +/** + * es58x_priv() - get the priv member and cast it to struct es58x_priv. + * @netdev: can network device. + * + * return: es58x device. + */ +static inline struct es58x_priv *es58x_priv(struct net_device *netdev) +{ + return (struct es58x_priv *)netdev_priv(netdev); +} + +/** + * es58x_sizeof_urb_cmd() - calculate the maximum length of an urb + * command for a given message field name. + * @es58x_urb_cmd_type: type (either "struct es581_4_urb_cmd" or + * "struct es58x_fd_urb_cmd"). + * @msg_field: name of the message field. + * + * must be a macro in order to accept the different command types as + * an input. + * + * return: length of the urb command. + */ +#define es58x_sizeof_urb_cmd(es58x_urb_cmd_type, msg_field) \ + (offsetof(es58x_urb_cmd_type, raw_msg) \ + + sizeof_field(es58x_urb_cmd_type, msg_field) \ + + sizeof_field(es58x_urb_cmd_type, \ + reserved_for_crc16_do_not_use)) + +/** + * es58x_get_urb_cmd_len() - calculate the actual length of an urb + * command for a given message length. + * @es58x_dev: es58x device. + * @msg_len: length of the message. + * + * add the header and crc lengths to the message length. + * + * return: length of the urb command. + */ +static inline size_t es58x_get_urb_cmd_len(struct es58x_device *es58x_dev, + u16 msg_len) +{ + return es58x_dev->param->urb_cmd_header_len + msg_len + sizeof(u16); +} + +/** + * es58x_get_netdev() - get the network device. + * @es58x_dev: es58x device. + * @channel_no: the channel number as advertised in the urb command. + * @channel_idx_offset: some of the es58x starts channel numbering + * from 0 (es58x fd), others from 1 (es581.4). + * @netdev: can network device. + * + * do a sanity check on the index provided by the device. + * + * return: zero on success, -echrng if the received channel number is + * out of range and -enodev if the network device is not yet + * configured. + */ +static inline int es58x_get_netdev(struct es58x_device *es58x_dev, + int channel_no, int channel_idx_offset, + struct net_device **netdev) +{ + int channel_idx = channel_no - channel_idx_offset; + + *netdev = null; + if (channel_idx < 0 || channel_idx >= es58x_dev->num_can_ch) + return -echrng; + + *netdev = es58x_dev->netdev[channel_idx]; + if (!netdev || !netif_device_present(*netdev)) + return -enodev; + + return 0; +} + +/** + * es58x_get_raw_can_id() - get the can id. + * @cf: can frame. + * + * mask the can id in order to only keep the significant bits. + * + * return: the raw value of the can id. + */ +static inline int es58x_get_raw_can_id(const struct can_frame *cf) +{ + if (cf->can_id & can_eff_flag) + return cf->can_id & can_eff_mask; + else + return cf->can_id & can_sff_mask; +} + +/** + * es58x_get_flags() - get the can flags. + * @skb: socket buffer of a can message. + * + * return: the can flag as an enum es58x_flag. + */ +static inline enum es58x_flag es58x_get_flags(const struct sk_buff *skb) +{ + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + enum es58x_flag es58x_flags = 0; + + if (cf->can_id & can_eff_flag) + es58x_flags |= es58x_flag_eff; + + if (can_is_canfd_skb(skb)) { + es58x_flags |= es58x_flag_fd_data; + if (cf->flags & canfd_brs) + es58x_flags |= es58x_flag_fd_brs; + if (cf->flags & canfd_esi) + es58x_flags |= es58x_flag_fd_esi; + } else if (cf->can_id & can_rtr_flag) + /* remote frames are only defined in classical can frames */ + es58x_flags |= es58x_flag_rtr; + + return es58x_flags; +} + +int es58x_can_get_echo_skb(struct net_device *netdev, u32 packet_idx, + u64 *tstamps, unsigned int pkts); +int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, + enum es58x_ret_u32 rx_cmd_ret_u32); +int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, + canid_t can_id, enum es58x_flag es58x_flags, u8 dlc); +int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, + enum es58x_event event, u64 timestamp); +void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp); +int es58x_rx_cmd_ret_u8(struct device *dev, enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u8 rx_cmd_ret_u8); +int es58x_rx_cmd_ret_u32(struct net_device *netdev, + enum es58x_ret_type cmd_ret_type, + enum es58x_ret_u32 rx_cmd_ret_u32); +int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, + const void *msg, u16 cmd_len, int channel_idx); + +extern const struct es58x_parameters es581_4_param; +extern const struct es58x_operators es581_4_ops; + +extern const struct es58x_parameters es58x_fd_param; +extern const struct es58x_operators es58x_fd_ops; + +#endif /* __es58x_common_h__ */
Networking
8537257874e949a59c834cecfd5a063e11b64b0b
vincent mailhol
drivers
net
can, etas_es58x, usb
can: etas_es58x: add support for etas es581.4 can usb interface
this patch adds support for the es581.4 interface from etas gmbh (https://www.etas.com/en/products/es58x.php).
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
introducing etas es58x can usb interfaces
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ']
['h', 'c', 'makefile']
5
726
4
--- diff --git a/drivers/net/can/usb/etas_es58x/makefile b/drivers/net/can/usb/etas_es58x/makefile --- a/drivers/net/can/usb/etas_es58x/makefile +++ b/drivers/net/can/usb/etas_es58x/makefile -etas_es58x-y = es58x_core.o +etas_es58x-y = es58x_core.o es581_4.o diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es581_4.c +// spdx-license-identifier: gpl-2.0 + +/* driver for etas gmbh es58x usb can(-fd) bus interfaces. + * + * file es581_4.c: adds support to etas es581.4. + * + * copyright (c) 2019 robert bosch engineering and business solutions. all rights reserved. + * copyright (c) 2020 etas k.k.. all rights reserved. + * copyright (c) 2020, 2021 vincent mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" +#include "es581_4.h" + +/** + * es581_4_sizeof_rx_tx_msg() - calculate the actual length of the + * structure of a rx or tx message. + * @msg: message of variable length, must have a dlc field. + * + * even if rtr frames have actually no payload, the es58x devices + * still expect it. must be a macro in order to accept several types + * (struct es581_4_tx_can_msg and struct es581_4_rx_can_msg) as an + * input. + * + * return: length of the message. + */ +#define es581_4_sizeof_rx_tx_msg(msg) \ + offsetof(typeof(msg), data[can_cc_dlc2len((msg).dlc)]) + +static u16 es581_4_get_msg_len(const union es58x_urb_cmd *urb_cmd) +{ + return get_unaligned_le16(&urb_cmd->es581_4_urb_cmd.msg_len); +} + +static int es581_4_echo_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + struct net_device *netdev; + const struct es581_4_bulk_echo_msg *bulk_echo_msg; + const struct es581_4_echo_msg *echo_msg; + u64 *tstamps = es58x_dev->timestamps; + u16 msg_len; + u32 first_packet_idx, packet_idx; + unsigned int dropped = 0; + int i, num_element, ret; + + bulk_echo_msg = &es581_4_urb_cmd->bulk_echo_msg; + msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len) - + sizeof(bulk_echo_msg->channel_no); + num_element = es58x_msg_num_element(es58x_dev->dev, + bulk_echo_msg->echo_msg, msg_len); + if (num_element <= 0) + return num_element; + + ret = es58x_get_netdev(es58x_dev, bulk_echo_msg->channel_no, + es581_4_channel_idx_offset, &netdev); + if (ret) + return ret; + + echo_msg = &bulk_echo_msg->echo_msg[0]; + first_packet_idx = get_unaligned_le32(&echo_msg->packet_idx); + packet_idx = first_packet_idx; + for (i = 0; i < num_element; i++) { + u32 tmp_idx; + + echo_msg = &bulk_echo_msg->echo_msg[i]; + tmp_idx = get_unaligned_le32(&echo_msg->packet_idx); + if (tmp_idx == packet_idx - 1) { + if (net_ratelimit()) + netdev_warn(netdev, + "received echo packet idx %u twice ", + packet_idx - 1); + dropped++; + continue; + } + if (tmp_idx != packet_idx) { + netdev_err(netdev, "echo packet idx jumped from %u to %u ", + packet_idx - 1, echo_msg->packet_idx); + return -ebadmsg; + } + + tstamps[i] = get_unaligned_le64(&echo_msg->timestamp); + packet_idx++; + } + + netdev->stats.tx_dropped += dropped; + return es58x_can_get_echo_skb(netdev, first_packet_idx, + tstamps, num_element - dropped); +} + +static int es581_4_rx_can_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd, + u16 msg_len) +{ + const struct device *dev = es58x_dev->dev; + struct net_device *netdev; + int pkts, num_element, channel_no, ret; + + num_element = es58x_msg_num_element(dev, es581_4_urb_cmd->rx_can_msg, + msg_len); + if (num_element <= 0) + return num_element; + + channel_no = es581_4_urb_cmd->rx_can_msg[0].channel_no; + ret = es58x_get_netdev(es58x_dev, channel_no, + es581_4_channel_idx_offset, &netdev); + if (ret) + return ret; + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d rx packets ", + __func__, netdev->name, num_element); + netdev->stats.rx_dropped += num_element; + return 0; + } + + for (pkts = 0; pkts < num_element; pkts++) { + const struct es581_4_rx_can_msg *rx_can_msg = + &es581_4_urb_cmd->rx_can_msg[pkts]; + u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); + canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); + + if (channel_no != rx_can_msg->channel_no) + return -ebadmsg; + + ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, + can_id, rx_can_msg->flags, + rx_can_msg->dlc); + if (ret) + break; + } + + return ret; +} + +static int es581_4_rx_err_msg(struct es58x_device *es58x_dev, + const struct es581_4_rx_err_msg *rx_err_msg) +{ + struct net_device *netdev; + enum es58x_err error = get_unaligned_le32(&rx_err_msg->error); + int ret; + + ret = es58x_get_netdev(es58x_dev, rx_err_msg->channel_no, + es581_4_channel_idx_offset, &netdev); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, error, 0, + get_unaligned_le64(&rx_err_msg->timestamp)); +} + +static int es581_4_rx_event_msg(struct es58x_device *es58x_dev, + const struct es581_4_rx_event_msg *rx_event_msg) +{ + struct net_device *netdev; + enum es58x_event event = get_unaligned_le32(&rx_event_msg->event); + int ret; + + ret = es58x_get_netdev(es58x_dev, rx_event_msg->channel_no, + es581_4_channel_idx_offset, &netdev); + if (ret) + return ret; + + return es58x_rx_err_msg(netdev, 0, event, + get_unaligned_le64(&rx_event_msg->timestamp)); +} + +static int es581_4_rx_cmd_ret_u32(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd, + enum es58x_ret_type ret_type) +{ + struct net_device *netdev; + const struct es581_4_rx_cmd_ret *rx_cmd_ret; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, + es581_4_urb_cmd->rx_cmd_ret, msg_len); + if (ret) + return ret; + + rx_cmd_ret = &es581_4_urb_cmd->rx_cmd_ret; + + ret = es58x_get_netdev(es58x_dev, rx_cmd_ret->channel_no, + es581_4_channel_idx_offset, &netdev); + if (ret) + return ret; + + return es58x_rx_cmd_ret_u32(netdev, ret_type, + get_unaligned_le32(&rx_cmd_ret->rx_cmd_ret_le32)); +} + +static int es581_4_tx_ack_msg(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + struct net_device *netdev; + const struct es581_4_tx_ack_msg *tx_ack_msg; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + int ret; + + tx_ack_msg = &es581_4_urb_cmd->tx_ack_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); + if (ret) + return ret; + + if (tx_ack_msg->rx_cmd_ret_u8 != es58x_ret_u8_ok) + return es58x_rx_cmd_ret_u8(es58x_dev->dev, + es58x_ret_type_tx_msg, + tx_ack_msg->rx_cmd_ret_u8); + + ret = es58x_get_netdev(es58x_dev, tx_ack_msg->channel_no, + es581_4_channel_idx_offset, &netdev); + if (ret) + return ret; + + return es58x_tx_ack_msg(netdev, + get_unaligned_le16(&tx_ack_msg->tx_free_entries), + es58x_ret_u32_ok); +} + +static int es581_4_dispatch_rx_cmd(struct es58x_device *es58x_dev, + const struct es581_4_urb_cmd *es581_4_urb_cmd) +{ + const struct device *dev = es58x_dev->dev; + u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); + enum es581_4_rx_type rx_type = es581_4_urb_cmd->rx_can_msg[0].rx_type; + int ret = 0; + + switch (rx_type) { + case es581_4_rx_type_message: + return es581_4_rx_can_msg(es58x_dev, es581_4_urb_cmd, msg_len); + + case es581_4_rx_type_error: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_err_msg, + msg_len); + if (ret < 0) + return ret; + return es581_4_rx_err_msg(es58x_dev, + &es581_4_urb_cmd->rx_err_msg); + + case es581_4_rx_type_event: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_event_msg, + msg_len); + if (ret < 0) + return ret; + return es581_4_rx_event_msg(es58x_dev, + &es581_4_urb_cmd->rx_event_msg); + + default: + dev_err(dev, "%s: unknown rx_type 0x%02x ", __func__, rx_type); + return -ebadrqc; + } +} + +static int es581_4_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es581_4_urb_cmd *es581_4_urb_cmd; + struct device *dev = es58x_dev->dev; + u16 msg_len = es581_4_get_msg_len(urb_cmd); + int ret; + + es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + + if (es581_4_urb_cmd->cmd_type != es581_4_can_command_type) { + dev_err(dev, "%s: unknown command type (0x%02x) ", + __func__, es581_4_urb_cmd->cmd_type); + return -ebadrqc; + } + + switch ((enum es581_4_cmd_id)es581_4_urb_cmd->cmd_id) { + case es581_4_cmd_id_set_bittiming: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + es58x_ret_type_set_bittiming); + + case es581_4_cmd_id_enable_channel: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + es58x_ret_type_enable_channel); + + case es581_4_cmd_id_tx_msg: + return es581_4_tx_ack_msg(es58x_dev, es581_4_urb_cmd); + + case es581_4_cmd_id_rx_msg: + return es581_4_dispatch_rx_cmd(es58x_dev, es581_4_urb_cmd); + + case es581_4_cmd_id_reset_rx: + ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + es58x_ret_type_reset_rx); + return ret; + + case es581_4_cmd_id_reset_tx: + ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + es58x_ret_type_reset_tx); + return ret; + + case es581_4_cmd_id_disable_channel: + return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, + es58x_ret_type_disable_channel); + + case es581_4_cmd_id_timestamp: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->timestamp, + msg_len); + if (ret < 0) + return ret; + es58x_rx_timestamp(es58x_dev, + get_unaligned_le64(&es581_4_urb_cmd->timestamp)); + return 0; + + case es581_4_cmd_id_echo: + return es581_4_echo_msg(es58x_dev, es581_4_urb_cmd); + + case es581_4_cmd_id_device_err: + ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_cmd_ret_u8, + msg_len); + if (ret) + return ret; + return es58x_rx_cmd_ret_u8(dev, es58x_ret_type_device_err, + es581_4_urb_cmd->rx_cmd_ret_u8); + + default: + dev_warn(dev, "%s: unexpected command id: 0x%02x ", + __func__, es581_4_urb_cmd->cmd_id); + return -ebadrqc; + } +} + +static void es581_4_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 msg_len) +{ + struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + + es581_4_urb_cmd->sof = cpu_to_le16(es581_4_param.tx_start_of_frame); + es581_4_urb_cmd->cmd_type = cmd_type; + es581_4_urb_cmd->cmd_id = cmd_id; + es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); +} + +static int es581_4_tx_can_msg(struct es58x_priv *priv, + const struct sk_buff *skb) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; + struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + struct es581_4_tx_can_msg *tx_can_msg; + u16 msg_len; + int ret; + + if (can_is_canfd_skb(skb)) + return -emsgsize; + + if (priv->tx_can_msg_cnt == 0) { + msg_len = 1; /* struct es581_4_bulk_tx_can_msg:num_can_msg */ + es581_4_fill_urb_header(urb_cmd, es581_4_can_command_type, + es581_4_cmd_id_tx_msg, + priv->channel_idx, msg_len); + es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg = 0; + } else { + msg_len = es581_4_get_msg_len(urb_cmd); + } + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es581_4_urb_cmd->bulk_tx_can_msg, + msg_len + sizeof(*tx_can_msg)); + if (ret) + return ret; + + /* fill message contents. */ + tx_can_msg = (struct es581_4_tx_can_msg *) + &es581_4_urb_cmd->bulk_tx_can_msg.tx_can_msg_buf[msg_len - 1]; + put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); + put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx); + put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags); + tx_can_msg->channel_no = priv->channel_idx + es581_4_channel_idx_offset; + tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + + memcpy(tx_can_msg->data, cf->data, cf->len); + + /* calculate new sizes. */ + es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg++; + msg_len += es581_4_sizeof_rx_tx_msg(*tx_can_msg); + priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, + msg_len); + es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); + + return 0; +} + +static int es581_4_set_bittiming(struct es58x_priv *priv) +{ + struct es581_4_tx_conf_msg tx_conf_msg = { 0 }; + struct can_bittiming *bt = &priv->can.bittiming; + + tx_conf_msg.bitrate = cpu_to_le32(bt->bitrate); + /* bt->sample_point is in tenth of percent. convert it to percent. */ + tx_conf_msg.sample_point = cpu_to_le32(bt->sample_point / 10u); + tx_conf_msg.samples_per_bit = cpu_to_le32(es58x_samples_per_bit_one); + tx_conf_msg.bit_time = cpu_to_le32(can_bit_time(bt)); + tx_conf_msg.sjw = cpu_to_le32(bt->sjw); + tx_conf_msg.sync_edge = cpu_to_le32(es58x_sync_edge_single); + tx_conf_msg.physical_layer = + cpu_to_le32(es58x_physical_layer_high_speed); + tx_conf_msg.echo_mode = cpu_to_le32(es58x_echo_on); + tx_conf_msg.channel_no = priv->channel_idx + es581_4_channel_idx_offset; + + return es58x_send_msg(priv->es58x_dev, es581_4_can_command_type, + es581_4_cmd_id_set_bittiming, &tx_conf_msg, + sizeof(tx_conf_msg), priv->channel_idx); +} + +static int es581_4_enable_channel(struct es58x_priv *priv) +{ + int ret; + u8 msg = priv->channel_idx + es581_4_channel_idx_offset; + + ret = es581_4_set_bittiming(priv); + if (ret) + return ret; + + return es58x_send_msg(priv->es58x_dev, es581_4_can_command_type, + es581_4_cmd_id_enable_channel, &msg, sizeof(msg), + priv->channel_idx); +} + +static int es581_4_disable_channel(struct es58x_priv *priv) +{ + u8 msg = priv->channel_idx + es581_4_channel_idx_offset; + + return es58x_send_msg(priv->es58x_dev, es581_4_can_command_type, + es581_4_cmd_id_disable_channel, &msg, sizeof(msg), + priv->channel_idx); +} + +static int es581_4_reset_device(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, es581_4_can_command_type, + es581_4_cmd_id_reset_device, + es58x_empty_msg, 0, es58x_channel_idx_na); +} + +static int es581_4_get_timestamp(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, es581_4_can_command_type, + es581_4_cmd_id_timestamp, + es58x_empty_msg, 0, es58x_channel_idx_na); +} + +/* nominal bittiming constants for es581.4 as specified in the + * microcontroller datasheet: "stellaris(r) lm3s5b91 microcontroller" + * table 17-4 "can protocol ranges" from texas instruments. + */ +static const struct can_bittiming_const es581_4_bittiming_const = { + .name = "es581.4", + .tseg1_min = 1, + .tseg1_max = 8, + .tseg2_min = 1, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 128, + .brp_inc = 1 +}; + +const struct es58x_parameters es581_4_param = { + .bittiming_const = &es581_4_bittiming_const, + .data_bittiming_const = null, + .tdc_const = null, + .bitrate_max = 1 * can_mbps, + .clock = {.freq = 50 * can_mhz}, + .ctrlmode_supported = can_ctrlmode_cc_len8_dlc, + .tx_start_of_frame = 0xafaf, + .rx_start_of_frame = 0xfafa, + .tx_urb_cmd_max_len = es581_4_tx_urb_cmd_max_len, + .rx_urb_cmd_max_len = es581_4_rx_urb_cmd_max_len, + /* size of internal device tx queue is 330. + * + * however, we witnessed some es58x_err_prot_crc errors from + * the device and thus, echo_skb_max was lowered to the + * empirical value of 75 which seems stable and then rounded + * down to become a power of two. + * + * root cause of those es58x_err_prot_crc errors is still + * unclear. + */ + .fifo_mask = 63, /* echo_skb_max = 64 */ + .dql_min_limit = can_frame_len_max * 50, /* empirical value. */ + .tx_bulk_max = es581_4_tx_bulk_max, + .urb_cmd_header_len = es581_4_urb_cmd_header_len, + .rx_urb_max = es58x_rx_urbs_max, + .tx_urb_max = es58x_tx_urbs_max +}; + +const struct es58x_operators es581_4_ops = { + .get_msg_len = es581_4_get_msg_len, + .handle_urb_cmd = es581_4_handle_urb_cmd, + .fill_urb_header = es581_4_fill_urb_header, + .tx_can_msg = es581_4_tx_can_msg, + .enable_channel = es581_4_enable_channel, + .disable_channel = es581_4_disable_channel, + .reset_device = es581_4_reset_device, + .get_timestamp = es581_4_get_timestamp +}; diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es581_4.h +/* spdx-license-identifier: gpl-2.0 */ + +/* driver for etas gmbh es58x usb can(-fd) bus interfaces. + * + * file es581_4.h: definitions and declarations specific to etas + * es581.4. + * + * copyright (c) 2019 robert bosch engineering and business solutions. all rights reserved. + * copyright (c) 2020 etas k.k.. all rights reserved. + * copyright (c) 2020, 2021 vincent mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __es581_4_h__ +#define __es581_4_h__ + +#include <linux/types.h> + +#define es581_4_num_can_ch 2 +#define es581_4_channel_idx_offset 1 + +#define es581_4_tx_bulk_max 25 +#define es581_4_rx_bulk_max 30 +#define es581_4_echo_bulk_max 30 + +enum es581_4_cmd_type { + es581_4_can_command_type = 0x45 +}; + +enum es581_4_cmd_id { + es581_4_cmd_id_open_channel = 0x01, + es581_4_cmd_id_close_channel = 0x02, + es581_4_cmd_id_set_bittiming = 0x03, + es581_4_cmd_id_enable_channel = 0x04, + es581_4_cmd_id_tx_msg = 0x05, + es581_4_cmd_id_rx_msg = 0x06, + es581_4_cmd_id_reset_rx = 0x0a, + es581_4_cmd_id_reset_tx = 0x0b, + es581_4_cmd_id_disable_channel = 0x0c, + es581_4_cmd_id_timestamp = 0x0e, + es581_4_cmd_id_reset_device = 0x28, + es581_4_cmd_id_echo = 0x71, + es581_4_cmd_id_device_err = 0x72 +}; + +enum es581_4_rx_type { + es581_4_rx_type_message = 1, + es581_4_rx_type_error = 3, + es581_4_rx_type_event = 4 +}; + +/** + * struct es581_4_tx_conf_msg - channel configuration. + * @bitrate: bitrate. + * @sample_point: sample point is in percent [0..100]. + * @samples_per_bit: type enum es58x_samples_per_bit. + * @bit_time: number of time quanta in one bit. + * @sjw: synchronization jump width. + * @sync_edge: type enum es58x_sync_edge. + * @physical_layer: type enum es58x_physical_layer. + * @echo_mode: type enum es58x_echo_mode. + * @channel_no: channel number, starting from 1. not to be confused + * with channed_idx of the es58x fd which starts from 0. + */ +struct es581_4_tx_conf_msg { + __le32 bitrate; + __le32 sample_point; + __le32 samples_per_bit; + __le32 bit_time; + __le32 sjw; + __le32 sync_edge; + __le32 physical_layer; + __le32 echo_mode; + u8 channel_no; +} __packed; + +struct es581_4_tx_can_msg { + __le32 can_id; + __le32 packet_idx; + __le16 flags; + u8 channel_no; + u8 dlc; + u8 data[can_max_dlen]; +} __packed; + +/* the es581.4 allows bulk transfer. */ +struct es581_4_bulk_tx_can_msg { + u8 num_can_msg; + /* using type "u8[]" instead of "struct es581_4_tx_can_msg[]" + * for tx_msg_buf because each member has a flexible size. + */ + u8 tx_can_msg_buf[es581_4_tx_bulk_max * + sizeof(struct es581_4_tx_can_msg)]; +} __packed; + +struct es581_4_echo_msg { + __le64 timestamp; + __le32 packet_idx; +} __packed; + +struct es581_4_bulk_echo_msg { + u8 channel_no; + struct es581_4_echo_msg echo_msg[es581_4_echo_bulk_max]; +} __packed; + +/* normal rx can message */ +struct es581_4_rx_can_msg { + __le64 timestamp; + u8 rx_type; /* type enum es581_4_rx_type */ + u8 flags; /* type enum es58x_flag */ + u8 channel_no; + u8 dlc; + __le32 can_id; + u8 data[can_max_dlen]; +} __packed; + +struct es581_4_rx_err_msg { + __le64 timestamp; + __le16 rx_type; /* type enum es581_4_rx_type */ + __le16 flags; /* type enum es58x_flag */ + u8 channel_no; + u8 __padding[2]; + u8 dlc; + __le32 tag; /* related to the can filtering. unused in this module */ + __le32 can_id; + __le32 error; /* type enum es58x_error */ + __le32 destination; /* unused in this module */ +} __packed; + +struct es581_4_rx_event_msg { + __le64 timestamp; + __le16 rx_type; /* type enum es581_4_rx_type */ + u8 channel_no; + u8 __padding; + __le32 tag; /* related to the can filtering. unused in this module */ + __le32 event; /* type enum es58x_event */ + __le32 destination; /* unused in this module */ +} __packed; + +struct es581_4_tx_ack_msg { + __le16 tx_free_entries; /* number of remaining free entries in the device tx queue */ + u8 channel_no; + u8 rx_cmd_ret_u8; /* type enum es58x_cmd_ret_code_u8 */ +} __packed; + +struct es581_4_rx_cmd_ret { + __le32 rx_cmd_ret_le32; + u8 channel_no; + u8 __padding[3]; +} __packed; + +/** + * struct es581_4_urb_cmd - commands received from or sent to the + * es581.4 device. + * @sof: start of frame. + * @cmd_type: command type (type: enum es581_4_cmd_type). the crc + * calculation starts at this position. + * @cmd_id: command id (type: enum es581_4_cmd_id). + * @msg_len: length of the message, excluding crc (i.e. length of the + * union). + * @tx_conf_msg: channel configuration. + * @bulk_tx_can_msg: tx messages. + * @rx_can_msg: array of rx messages. + * @bulk_echo_msg: tx message being looped back. + * @rx_err_msg: error message. + * @rx_event_msg: event message. + * @tx_ack_msg: tx acknowledgment message. + * @rx_cmd_ret: command return code. + * @timestamp: timestamp reply. + * @rx_cmd_ret_u8: rx 8 bits return code (type: enum + * es58x_cmd_ret_code_u8). + * @raw_msg: message raw payload. + * @reserved_for_crc16_do_not_use: the structure ends with a + * crc16. because the structures in above union are of variable + * lengths, we can not predict the offset of the crc in + * advance. use functions es58x_get_crc() and es58x_set_crc() to + * manipulate it. + */ +struct es581_4_urb_cmd { + __le16 sof; + u8 cmd_type; + u8 cmd_id; + __le16 msg_len; + + union { + struct es581_4_tx_conf_msg tx_conf_msg; + struct es581_4_bulk_tx_can_msg bulk_tx_can_msg; + struct es581_4_rx_can_msg rx_can_msg[es581_4_rx_bulk_max]; + struct es581_4_bulk_echo_msg bulk_echo_msg; + struct es581_4_rx_err_msg rx_err_msg; + struct es581_4_rx_event_msg rx_event_msg; + struct es581_4_tx_ack_msg tx_ack_msg; + struct es581_4_rx_cmd_ret rx_cmd_ret; + __le64 timestamp; + u8 rx_cmd_ret_u8; + u8 raw_msg[0]; + } __packed; + + __le16 reserved_for_crc16_do_not_use; +} __packed; + +#define es581_4_urb_cmd_header_len (offsetof(struct es581_4_urb_cmd, raw_msg)) +#define es581_4_tx_urb_cmd_max_len \ + es58x_sizeof_urb_cmd(struct es581_4_urb_cmd, bulk_tx_can_msg) +#define es581_4_rx_urb_cmd_max_len \ + es58x_sizeof_urb_cmd(struct es581_4_urb_cmd, rx_can_msg) + +#endif /* __es581_4_h__ */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c +#define es581_4_product_id 0x0159 + /* etas gmbh es581.4 usb dual-channel can bus interface module. */ + usb_device(es58x_vendor_id, es581_4_product_id), + .driver_info = es58x_dual_channel + }, { - return -enodev; - /* place holder for es581_4 glue code. */ + param = &es581_4_param; + ops = &es581_4_ops; diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h --- a/drivers/net/can/usb/etas_es58x/es58x_core.h +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h +#include "es581_4.h" + -#define es58x_max(param) 0 +#define es58x_max(param) (es581_4_##param) + struct es581_4_urb_cmd es581_4_urb_cmd;
Networking
1dfb6005a60b13d18aacf190b853bf6f89d31820
vincent mailhol
drivers
net
can, etas_es58x, usb
can: etas_es58x: add support for the etas es58x_fd can usb interfaces
this patch add support for the es582.1 and es584.1 interfaces from etas gmbh (https://www.etas.com/en/products/es58x.php).
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
introducing etas es58x can usb interfaces
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ']
['h', 'c', 'makefile']
5
828
4
--- diff --git a/drivers/net/can/usb/etas_es58x/makefile b/drivers/net/can/usb/etas_es58x/makefile --- a/drivers/net/can/usb/etas_es58x/makefile +++ b/drivers/net/can/usb/etas_es58x/makefile -etas_es58x-y = es58x_core.o es581_4.o +etas_es58x-y = es58x_core.o es581_4.o es58x_fd.o diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c +#define es582_1_product_id 0x0168 +#define es584_1_product_id 0x0169 + +/* es58x fd has some interface protocols unsupported by this driver. */ +#define es58x_fd_interface_protocol 0 + }, { + /* etas gmbh es582.1 usb dual-channel can fd bus interface module. */ + usb_device_interface_protocol(es58x_vendor_id, es582_1_product_id, + es58x_fd_interface_protocol), + .driver_info = es58x_dual_channel | es58x_fd_family + }, { + /* etas gmbh es584.1 usb single-channel can fd bus interface module. */ + usb_device_interface_protocol(es58x_vendor_id, es584_1_product_id, + es58x_fd_interface_protocol), + .driver_info = es58x_fd_family - return -enodev; - /* place holder for es58x_fd glue code. */ + param = &es58x_fd_param; + ops = &es58x_fd_ops; diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h --- a/drivers/net/can/usb/etas_es58x/es58x_core.h +++ b/drivers/net/can/usb/etas_es58x/es58x_core.h +#include "es58x_fd.h" -#define es58x_max(param) (es581_4_##param) +#define es58x_max(param) \ + (es581_4_##param > es58x_fd_##param ? \ + es581_4_##param : es58x_fd_##param) + struct es58x_fd_urb_cmd es58x_fd_urb_cmd; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c +// spdx-license-identifier: gpl-2.0 + +/* driver for etas gmbh es58x usb can(-fd) bus interfaces. + * + * file es58x_fd.c: adds support to etas es582.1 and es584.1 (naming + * convention: we use the term "es58x fd" when referring to those two + * variants together). + * + * copyright (c) 2019 robert bosch engineering and business solutions. all rights reserved. + * copyright (c) 2020 etas k.k.. all rights reserved. + * copyright (c) 2020, 2021 vincent mailhol <mailhol.vincent@wanadoo.fr> + */ + +#include <linux/kernel.h> +#include <asm/unaligned.h> + +#include "es58x_core.h" +#include "es58x_fd.h" + +/** + * es58x_fd_sizeof_rx_tx_msg() - calculate the actual length of the + * structure of a rx or tx message. + * @msg: message of variable length, must have a dlc and a len fields. + * + * even if rtr frames have actually no payload, the es58x devices + * still expect it. must be a macro in order to accept several types + * (struct es58x_fd_tx_can_msg and struct es58x_fd_rx_can_msg) as an + * input. + * + * return: length of the message. + */ +#define es58x_fd_sizeof_rx_tx_msg(msg) \ +({ \ + typeof(msg) __msg = (msg); \ + size_t __msg_len; \ + \ + if (__msg.flags & es58x_flag_fd_data) \ + __msg_len = canfd_sanitize_len(__msg.len); \ + else \ + __msg_len = can_cc_dlc2len(__msg.dlc); \ + \ + offsetof(typeof(__msg), data[__msg_len]); \ +}) + +static enum es58x_fd_cmd_type es58x_fd_cmd_type(struct net_device *netdev) +{ + u32 ctrlmode = es58x_priv(netdev)->can.ctrlmode; + + if (ctrlmode & (can_ctrlmode_fd | can_ctrlmode_fd_non_iso)) + return es58x_fd_cmd_type_canfd; + else + return es58x_fd_cmd_type_can; +} + +static u16 es58x_fd_get_msg_len(const union es58x_urb_cmd *urb_cmd) +{ + return get_unaligned_le16(&urb_cmd->es58x_fd_urb_cmd.msg_len); +} + +static int es58x_fd_echo_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_priv *priv = es58x_priv(netdev); + const struct es58x_fd_echo_msg *echo_msg; + struct es58x_device *es58x_dev = priv->es58x_dev; + u64 *tstamps = es58x_dev->timestamps; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int i, num_element; + u32 rcv_packet_idx; + + const u32 mask = genmask(31, sizeof(echo_msg->packet_idx) * 8); + + num_element = es58x_msg_num_element(es58x_dev->dev, + es58x_fd_urb_cmd->echo_msg, + msg_len); + if (num_element < 0) + return num_element; + echo_msg = es58x_fd_urb_cmd->echo_msg; + + rcv_packet_idx = (priv->tx_tail & mask) | echo_msg[0].packet_idx; + for (i = 0; i < num_element; i++) { + if ((u8)rcv_packet_idx != echo_msg[i].packet_idx) { + netdev_err(netdev, "packet idx jumped from %u to %u ", + (u8)rcv_packet_idx - 1, + echo_msg[i].packet_idx); + return -ebadmsg; + } + + tstamps[i] = get_unaligned_le64(&echo_msg[i].timestamp); + rcv_packet_idx++; + } + + return es58x_can_get_echo_skb(netdev, priv->tx_tail, tstamps, num_element); +} + +static int es58x_fd_rx_can_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + const u8 *rx_can_msg_buf = es58x_fd_urb_cmd->rx_can_msg_buf; + u16 rx_can_msg_buf_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int pkts, ret; + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es58x_fd_urb_cmd->rx_can_msg_buf, + rx_can_msg_buf_len); + if (ret) + return ret; + + for (pkts = 0; rx_can_msg_buf_len > 0; pkts++) { + const struct es58x_fd_rx_can_msg *rx_can_msg = + (const struct es58x_fd_rx_can_msg *)rx_can_msg_buf; + bool is_can_fd = !!(rx_can_msg->flags & es58x_flag_fd_data); + /* rx_can_msg_len is the length of the rx_can_msg + * buffer. not to be confused with rx_can_msg->len + * which is the length of the can payload + * rx_can_msg->data. + */ + u16 rx_can_msg_len = es58x_fd_sizeof_rx_tx_msg(*rx_can_msg); + + if (rx_can_msg_len > rx_can_msg_buf_len) { + netdev_err(netdev, + "%s: expected a rx_can_msg of size %d but only %d bytes are left in rx_can_msg_buf ", + __func__, + rx_can_msg_len, rx_can_msg_buf_len); + return -emsgsize; + } + if (rx_can_msg->len > canfd_max_dlen) { + netdev_err(netdev, + "%s: data length is %d but maximum should be %d ", + __func__, rx_can_msg->len, canfd_max_dlen); + return -emsgsize; + } + + if (netif_running(netdev)) { + u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); + canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); + u8 dlc; + + if (is_can_fd) + dlc = can_fd_len2dlc(rx_can_msg->len); + else + dlc = rx_can_msg->dlc; + + ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, + can_id, rx_can_msg->flags, dlc); + if (ret) + break; + } + + rx_can_msg_buf_len -= rx_can_msg_len; + rx_can_msg_buf += rx_can_msg_len; + } + + if (!netif_running(netdev)) { + if (net_ratelimit()) + netdev_info(netdev, + "%s: %s is down, dropping %d rx packets ", + __func__, netdev->name, pkts); + netdev->stats.rx_dropped += pkts; + } + + return ret; +} + +static int es58x_fd_rx_event_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + const struct es58x_fd_rx_event_msg *rx_event_msg; + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len); + if (ret) + return ret; + + rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; + + return es58x_rx_err_msg(netdev, rx_event_msg->error_code, + rx_event_msg->event_code, + get_unaligned_le64(&rx_event_msg->timestamp)); +} + +static int es58x_fd_rx_cmd_ret_u32(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd, + enum es58x_ret_type cmd_ret_type) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + ret = es58x_check_msg_len(es58x_dev->dev, + es58x_fd_urb_cmd->rx_cmd_ret_le32, msg_len); + if (ret) + return ret; + + return es58x_rx_cmd_ret_u32(netdev, cmd_ret_type, + get_unaligned_le32(&es58x_fd_urb_cmd->rx_cmd_ret_le32)); +} + +static int es58x_fd_tx_ack_msg(struct net_device *netdev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; + const struct es58x_fd_tx_ack_msg *tx_ack_msg; + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + tx_ack_msg = &es58x_fd_urb_cmd->tx_ack_msg; + ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); + if (ret) + return ret; + + return es58x_tx_ack_msg(netdev, + get_unaligned_le16(&tx_ack_msg->tx_free_entries), + get_unaligned_le32(&tx_ack_msg->rx_cmd_ret_le32)); +} + +static int es58x_fd_can_cmd_id(struct es58x_device *es58x_dev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + struct net_device *netdev; + int ret; + + ret = es58x_get_netdev(es58x_dev, es58x_fd_urb_cmd->channel_idx, + es58x_fd_channel_idx_offset, &netdev); + if (ret) + return ret; + + switch ((enum es58x_fd_can_cmd_id)es58x_fd_urb_cmd->cmd_id) { + case es58x_fd_can_cmd_id_enable_channel: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + es58x_ret_type_enable_channel); + + case es58x_fd_can_cmd_id_disable_channel: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + es58x_ret_type_disable_channel); + + case es58x_fd_can_cmd_id_tx_msg: + return es58x_fd_tx_ack_msg(netdev, es58x_fd_urb_cmd); + + case es58x_fd_can_cmd_id_echo_msg: + return es58x_fd_echo_msg(netdev, es58x_fd_urb_cmd); + + case es58x_fd_can_cmd_id_rx_msg: + return es58x_fd_rx_can_msg(netdev, es58x_fd_urb_cmd); + + case es58x_fd_can_cmd_id_reset_rx: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + es58x_ret_type_reset_rx); + + case es58x_fd_can_cmd_id_reset_tx: + return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, + es58x_ret_type_reset_tx); + + case es58x_fd_can_cmd_id_error_or_event_msg: + return es58x_fd_rx_event_msg(netdev, es58x_fd_urb_cmd); + + default: + return -ebadrqc; + } +} + +static int es58x_fd_device_cmd_id(struct es58x_device *es58x_dev, + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) +{ + u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); + int ret; + + switch ((enum es58x_fd_dev_cmd_id)es58x_fd_urb_cmd->cmd_id) { + case es58x_fd_dev_cmd_id_timestamp: + ret = es58x_check_msg_len(es58x_dev->dev, + es58x_fd_urb_cmd->timestamp, msg_len); + if (ret) + return ret; + es58x_rx_timestamp(es58x_dev, + get_unaligned_le64(&es58x_fd_urb_cmd->timestamp)); + return 0; + + default: + return -ebadrqc; + } +} + +static int es58x_fd_handle_urb_cmd(struct es58x_device *es58x_dev, + const union es58x_urb_cmd *urb_cmd) +{ + const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd; + int ret; + + es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + + switch ((enum es58x_fd_cmd_type)es58x_fd_urb_cmd->cmd_type) { + case es58x_fd_cmd_type_can: + case es58x_fd_cmd_type_canfd: + ret = es58x_fd_can_cmd_id(es58x_dev, es58x_fd_urb_cmd); + break; + + case es58x_fd_cmd_type_device: + ret = es58x_fd_device_cmd_id(es58x_dev, es58x_fd_urb_cmd); + break; + + default: + ret = -ebadrqc; + break; + } + + if (ret == -ebadrqc) + dev_err(es58x_dev->dev, + "%s: unknown command type (0x%02x) and command id (0x%02x) combination ", + __func__, es58x_fd_urb_cmd->cmd_type, + es58x_fd_urb_cmd->cmd_id); + + return ret; +} + +static void es58x_fd_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, + u8 cmd_id, u8 channel_idx, u16 msg_len) +{ + struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + + es58x_fd_urb_cmd->sof = cpu_to_le16(es58x_fd_param.tx_start_of_frame); + es58x_fd_urb_cmd->cmd_type = cmd_type; + es58x_fd_urb_cmd->cmd_id = cmd_id; + es58x_fd_urb_cmd->channel_idx = channel_idx; + es58x_fd_urb_cmd->msg_len = cpu_to_le16(msg_len); +} + +static int es58x_fd_tx_can_msg(struct es58x_priv *priv, + const struct sk_buff *skb) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; + struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; + struct can_frame *cf = (struct can_frame *)skb->data; + struct es58x_fd_tx_can_msg *tx_can_msg; + bool is_fd = can_is_canfd_skb(skb); + u16 msg_len; + int ret; + + if (priv->tx_can_msg_cnt == 0) { + msg_len = 0; + es58x_fd_fill_urb_header(urb_cmd, + is_fd ? es58x_fd_cmd_type_canfd + : es58x_fd_cmd_type_can, + es58x_fd_can_cmd_id_tx_msg_no_ack, + priv->channel_idx, msg_len); + } else { + msg_len = es58x_fd_get_msg_len(urb_cmd); + } + + ret = es58x_check_msg_max_len(es58x_dev->dev, + es58x_fd_urb_cmd->tx_can_msg_buf, + msg_len + sizeof(*tx_can_msg)); + if (ret) + return ret; + + /* fill message contents. */ + tx_can_msg = (struct es58x_fd_tx_can_msg *) + &es58x_fd_urb_cmd->tx_can_msg_buf[msg_len]; + tx_can_msg->packet_idx = (u8)priv->tx_head; + put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); + tx_can_msg->flags = (u8)es58x_get_flags(skb); + if (is_fd) + tx_can_msg->len = cf->len; + else + tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); + memcpy(tx_can_msg->data, cf->data, cf->len); + + /* calculate new sizes */ + msg_len += es58x_fd_sizeof_rx_tx_msg(*tx_can_msg); + priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, + msg_len); + put_unaligned_le16(msg_len, &es58x_fd_urb_cmd->msg_len); + + return 0; +} + +static void es58x_fd_convert_bittiming(struct es58x_fd_bittiming *es58x_fd_bt, + struct can_bittiming *bt) +{ + /* the actual value set in the hardware registers is one less + * than the functional value. + */ + const int offset = 1; + + es58x_fd_bt->bitrate = cpu_to_le32(bt->bitrate); + es58x_fd_bt->tseg1 = + cpu_to_le16(bt->prop_seg + bt->phase_seg1 - offset); + es58x_fd_bt->tseg2 = cpu_to_le16(bt->phase_seg2 - offset); + es58x_fd_bt->brp = cpu_to_le16(bt->brp - offset); + es58x_fd_bt->sjw = cpu_to_le16(bt->sjw - offset); +} + +static int es58x_fd_enable_channel(struct es58x_priv *priv) +{ + struct es58x_device *es58x_dev = priv->es58x_dev; + struct net_device *netdev = es58x_dev->netdev[priv->channel_idx]; + struct es58x_fd_tx_conf_msg tx_conf_msg = { 0 }; + u32 ctrlmode; + size_t conf_len = 0; + + es58x_fd_convert_bittiming(&tx_conf_msg.nominal_bittiming, + &priv->can.bittiming); + ctrlmode = priv->can.ctrlmode; + + if (ctrlmode & can_ctrlmode_3_samples) + tx_conf_msg.samples_per_bit = es58x_samples_per_bit_three; + else + tx_conf_msg.samples_per_bit = es58x_samples_per_bit_one; + tx_conf_msg.sync_edge = es58x_sync_edge_single; + tx_conf_msg.physical_layer = es58x_physical_layer_high_speed; + tx_conf_msg.echo_mode = es58x_echo_on; + if (ctrlmode & can_ctrlmode_listenonly) + tx_conf_msg.ctrlmode |= es58x_fd_ctrlmode_passive; + else + tx_conf_msg.ctrlmode |= es58x_fd_ctrlmode_active; + + if (ctrlmode & can_ctrlmode_fd_non_iso) { + tx_conf_msg.ctrlmode |= es58x_fd_ctrlmode_fd_non_iso; + tx_conf_msg.canfd_enabled = 1; + } else if (ctrlmode & can_ctrlmode_fd) { + tx_conf_msg.ctrlmode |= es58x_fd_ctrlmode_fd; + tx_conf_msg.canfd_enabled = 1; + } + + if (tx_conf_msg.canfd_enabled) { + es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming, + &priv->can.data_bittiming); + + if (priv->can.tdc.tdco) { + tx_conf_msg.tdc_enabled = 1; + tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco); + tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf); + } + + conf_len = es58x_fd_canfd_conf_len; + } else { + conf_len = es58x_fd_can_conf_len; + } + + return es58x_send_msg(es58x_dev, es58x_fd_cmd_type(netdev), + es58x_fd_can_cmd_id_enable_channel, + &tx_conf_msg, conf_len, priv->channel_idx); +} + +static int es58x_fd_disable_channel(struct es58x_priv *priv) +{ + /* the type (es58x_fd_cmd_type_can or es58x_fd_cmd_type_canfd) does + * not matter here. + */ + return es58x_send_msg(priv->es58x_dev, es58x_fd_cmd_type_can, + es58x_fd_can_cmd_id_disable_channel, + es58x_empty_msg, 0, priv->channel_idx); +} + +static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev) +{ + return es58x_send_msg(es58x_dev, es58x_fd_cmd_type_device, + es58x_fd_dev_cmd_id_timestamp, es58x_empty_msg, + 0, es58x_channel_idx_na); +} + +/* nominal bittiming constants for es582.1 and es584.1 as specified in + * the microcontroller datasheet: "sam e701/s70/v70/v71 family" + * section 49.6.8 "mcan nominal bit timing and prescaler register" + * from microchip. + * + * the values from the specification are the hardware register + * values. to convert them to the functional values, all ranges were + * incremented by 1 (e.g. range [0..n-1] changed to [1..n]). + */ +static const struct can_bittiming_const es58x_fd_nom_bittiming_const = { + .name = "es582.1/es584.1", + .tseg1_min = 2, + .tseg1_max = 256, + .tseg2_min = 2, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 512, + .brp_inc = 1 +}; + +/* data bittiming constants for es582.1 and es584.1 as specified in + * the microcontroller datasheet: "sam e701/s70/v70/v71 family" + * section 49.6.4 "mcan data bit timing and prescaler register" from + * microchip. + */ +static const struct can_bittiming_const es58x_fd_data_bittiming_const = { + .name = "es582.1/es584.1", + .tseg1_min = 2, + .tseg1_max = 32, + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1 +}; + +/* transmission delay compensation constants for es582.1 and es584.1 + * as specified in the microcontroller datasheet: "sam + * e701/s70/v70/v71 family" section 49.6.15 "mcan transmitter delay + * compensation register" from microchip. + */ +static const struct can_tdc_const es58x_tdc_const = { + .tdcv_max = 0, /* manual mode not supported. */ + .tdco_max = 127, + .tdcf_max = 127 +}; + +const struct es58x_parameters es58x_fd_param = { + .bittiming_const = &es58x_fd_nom_bittiming_const, + .data_bittiming_const = &es58x_fd_data_bittiming_const, + .tdc_const = &es58x_tdc_const, + /* the devices use nxp tja1044g transievers which guarantee + * the timing for data rates up to 5 mbps. bitrates up to 8 + * mbps work in an optimal environment but are not recommended + * for production environment. + */ + .bitrate_max = 8 * can_mbps, + .clock = {.freq = 80 * can_mhz}, + .ctrlmode_supported = can_ctrlmode_loopback | can_ctrlmode_listenonly | + can_ctrlmode_3_samples | can_ctrlmode_fd | can_ctrlmode_fd_non_iso | + can_ctrlmode_cc_len8_dlc, + .tx_start_of_frame = 0xcefa, /* face in little endian */ + .rx_start_of_frame = 0xfeca, /* cafe in little endian */ + .tx_urb_cmd_max_len = es58x_fd_tx_urb_cmd_max_len, + .rx_urb_cmd_max_len = es58x_fd_rx_urb_cmd_max_len, + /* size of internal device tx queue is 500. + * + * however, when reaching value around 278, the device's busy + * led turns on and thus maximum value of 500 is never reached + * in practice. also, when this value is too high, some error + * on the echo_msg were witnessed when the device is + * recovering from bus off. + * + * for above reasons, a value that would prevent the device + * from becoming busy was chosen. in practice, bql would + * prevent the value from even getting closer to below + * maximum, so no impact on performance was measured. + */ + .fifo_mask = 255, /* echo_skb_max = 256 */ + .dql_min_limit = can_frame_len_max * 15, /* empirical value. */ + .tx_bulk_max = es58x_fd_tx_bulk_max, + .urb_cmd_header_len = es58x_fd_urb_cmd_header_len, + .rx_urb_max = es58x_rx_urbs_max, + .tx_urb_max = es58x_tx_urbs_max +}; + +const struct es58x_operators es58x_fd_ops = { + .get_msg_len = es58x_fd_get_msg_len, + .handle_urb_cmd = es58x_fd_handle_urb_cmd, + .fill_urb_header = es58x_fd_fill_urb_header, + .tx_can_msg = es58x_fd_tx_can_msg, + .enable_channel = es58x_fd_enable_channel, + .disable_channel = es58x_fd_disable_channel, + .reset_device = null, /* not implemented in the device firmware. */ + .get_timestamp = es58x_fd_get_timestamp +}; diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h --- /dev/null +++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h +/* spdx-license-identifier: gpl-2.0 */ + +/* driver for etas gmbh es58x usb can(-fd) bus interfaces. + * + * file es58x_fd.h: definitions and declarations specific to etas + * es582.1 and es584.1 (naming convention: we use the term "es58x fd" + * when referring to those two variants together). + * + * copyright (c) 2019 robert bosch engineering and business solutions. all rights reserved. + * copyright (c) 2020 etas k.k.. all rights reserved. + * copyright (c) 2020, 2021 vincent mailhol <mailhol.vincent@wanadoo.fr> + */ + +#ifndef __es58x_fd_h__ +#define __es58x_fd_h__ + +#include <linux/types.h> + +#define es582_1_num_can_ch 2 +#define es584_1_num_can_ch 1 +#define es58x_fd_num_can_ch 2 +#define es58x_fd_channel_idx_offset 0 + +#define es58x_fd_tx_bulk_max 100 +#define es58x_fd_rx_bulk_max 100 +#define es58x_fd_echo_bulk_max 100 + +enum es58x_fd_cmd_type { + es58x_fd_cmd_type_can = 0x03, + es58x_fd_cmd_type_canfd = 0x04, + es58x_fd_cmd_type_device = 0xff +}; + +/* command ids for es58x_fd_cmd_type_{can,canfd}. */ +enum es58x_fd_can_cmd_id { + es58x_fd_can_cmd_id_enable_channel = 0x01, + es58x_fd_can_cmd_id_disable_channel = 0x02, + es58x_fd_can_cmd_id_tx_msg = 0x05, + es58x_fd_can_cmd_id_echo_msg = 0x07, + es58x_fd_can_cmd_id_rx_msg = 0x10, + es58x_fd_can_cmd_id_error_or_event_msg = 0x11, + es58x_fd_can_cmd_id_reset_rx = 0x20, + es58x_fd_can_cmd_id_reset_tx = 0x21, + es58x_fd_can_cmd_id_tx_msg_no_ack = 0x55 +}; + +/* command ids for es58x_fd_cmd_type_device. */ +enum es58x_fd_dev_cmd_id { + es58x_fd_dev_cmd_id_gettimeticks = 0x01, + es58x_fd_dev_cmd_id_timestamp = 0x02 +}; + +/** + * enum es58x_fd_ctrlmode - controller mode. + * @es58x_fd_ctrlmode_active: send and receive messages. + * @es58x_fd_ctrlmode_passive: only receive messages (monitor). do not + * send anything, not even the acknowledgment bit. + * @es58x_fd_ctrlmode_fd: can fd according to iso11898-1. + * @es58x_fd_ctrlmode_fd_non_iso: follow bosch can fd specification + * v1.0 + * @es58x_fd_ctrlmode_disable_protocol_exception_handling: how to + * behave when can fd reserved bit is monitored as + * dominant. (c.f. iso 11898-1:2015, section 10.4.2.4 "control + * field", paragraph "r0 bit"). 0 (not disable = enable): send + * error frame. 1 (disable): goes into bus integration mode + * (c.f. below). + * @es58x_fd_ctrlmode_edge_filter_during_bus_integration: 0: edge + * filtering is disabled. 1: edge filtering is enabled. two + * consecutive dominant bits required to detect an edge for hard + * synchronization. + */ +enum es58x_fd_ctrlmode { + es58x_fd_ctrlmode_active = 0, + es58x_fd_ctrlmode_passive = bit(0), + es58x_fd_ctrlmode_fd = bit(4), + es58x_fd_ctrlmode_fd_non_iso = bit(5), + es58x_fd_ctrlmode_disable_protocol_exception_handling = bit(6), + es58x_fd_ctrlmode_edge_filter_during_bus_integration = bit(7) +}; + +struct es58x_fd_bittiming { + __le32 bitrate; + __le16 tseg1; /* range: [tseg1_min-1..tseg1_max-1] */ + __le16 tseg2; /* range: [tseg2_min-1..tseg2_max-1] */ + __le16 brp; /* range: [brp_min-1..brp_max-1] */ + __le16 sjw; /* range: [0..sjw_max-1] */ +} __packed; + +/** + * struct es58x_fd_tx_conf_msg - channel configuration. + * @nominal_bittiming: nominal bittiming. + * @samples_per_bit: type enum es58x_samples_per_bit. + * @sync_edge: type enum es58x_sync_edge. + * @physical_layer: type enum es58x_physical_layer. + * @echo_mode: type enum es58x_echo_mode. + * @ctrlmode: type enum es58x_fd_ctrlmode. + * @canfd_enabled: boolean (0: classical can, 1: can and/or canfd). + * @data_bittiming: bittiming for flexible data-rate transmission. + * @tdc_enabled: transmitter delay compensation switch (0: disabled, + * 1: enabled). on very high bitrates, the delay between when the + * bit is sent and received on the cantx and canrx pins of the + * transceiver start to be significant enough for errors to occur + * and thus need to be compensated. + * @tdco: transmitter delay compensation offset. offset value, in time + * quanta, defining the delay between the start of the bit + * reception on the canrx pin of the transceiver and the ssp + * (secondary sample point). valid values: 0 to 127. + * @tdcf: transmitter delay compensation filter window. defines the + * minimum value for the ssp position, in time quanta. the + * feature is enabled when tdcf is configured to a value greater + * than tdco. valid values: 0 to 127. + * + * please refer to the microcontroller datasheet: "sam + * e701/s70/v70/v71 family" section 49 "controller area network + * (mcan)" for additional information. + */ +struct es58x_fd_tx_conf_msg { + struct es58x_fd_bittiming nominal_bittiming; + u8 samples_per_bit; + u8 sync_edge; + u8 physical_layer; + u8 echo_mode; + u8 ctrlmode; + u8 canfd_enabled; + struct es58x_fd_bittiming data_bittiming; + u8 tdc_enabled; + __le16 tdco; + __le16 tdcf; +} __packed; + +#define es58x_fd_can_conf_len \ + (offsetof(struct es58x_fd_tx_conf_msg, canfd_enabled)) +#define es58x_fd_canfd_conf_len (sizeof(struct es58x_fd_tx_conf_msg)) + +struct es58x_fd_tx_can_msg { + u8 packet_idx; + __le32 can_id; + u8 flags; + union { + u8 dlc; /* only if cmd_id is es58x_fd_cmd_type_can */ + u8 len; /* only if cmd_id is es58x_fd_cmd_type_canfd */ + } __packed; + u8 data[canfd_max_dlen]; +} __packed; + +#define es58x_fd_can_tx_len \ + (offsetof(struct es58x_fd_tx_can_msg, data[can_max_dlen])) +#define es58x_fd_canfd_tx_len (sizeof(struct es58x_fd_tx_can_msg)) + +struct es58x_fd_rx_can_msg { + __le64 timestamp; + __le32 can_id; + u8 flags; + union { + u8 dlc; /* only if cmd_id is es58x_fd_cmd_type_can */ + u8 len; /* only if cmd_id is es58x_fd_cmd_type_canfd */ + } __packed; + u8 data[canfd_max_dlen]; +} __packed; + +#define es58x_fd_can_rx_len \ + (offsetof(struct es58x_fd_rx_can_msg, data[can_max_dlen])) +#define es58x_fd_canfd_rx_len (sizeof(struct es58x_fd_rx_can_msg)) + +struct es58x_fd_echo_msg { + __le64 timestamp; + u8 packet_idx; +} __packed; + +struct es58x_fd_rx_event_msg { + __le64 timestamp; + __le32 can_id; + u8 flags; /* type enum es58x_flag */ + u8 error_type; /* 0: event, 1: error */ + u8 error_code; + u8 event_code; +} __packed; + +struct es58x_fd_tx_ack_msg { + __le32 rx_cmd_ret_le32; /* type enum es58x_cmd_ret_code_u32 */ + __le16 tx_free_entries; /* number of remaining free entries in the device tx queue */ +} __packed; + +/** + * struct es58x_fd_urb_cmd - commands received from or sent to the + * es58x fd device. + * @sof: start of frame. + * @cmd_type: command type (type: enum es58x_fd_cmd_type). the crc + * calculation starts at this position. + * @cmd_id: command id (type: enum es58x_fd_cmd_id). + * @channel_idx: channel index starting at 0. + * @msg_len: length of the message, excluding crc (i.e. length of the + * union). + * @tx_conf_msg: channel configuration. + * @tx_can_msg_buf: concatenation of tx messages. type is "u8[]" + * instead of "struct es58x_fd_tx_msg[]" because the structure + * has a flexible size. + * @rx_can_msg_buf: concatenation rx messages. type is "u8[]" instead + * of "struct es58x_fd_rx_msg[]" because the structure has a + * flexible size. + * @echo_msg: array of echo messages (e.g. tx messages being looped + * back). + * @rx_event_msg: error or event message. + * @tx_ack_msg: tx acknowledgment message. + * @timestamp: timestamp reply. + * @rx_cmd_ret_le32: rx 32 bits return code (type: enum + * es58x_cmd_ret_code_u32). + * @raw_msg: message raw payload. + * @reserved_for_crc16_do_not_use: the structure ends with a + * crc16. because the structures in above union are of variable + * lengths, we can not predict the offset of the crc in + * advance. use functions es58x_get_crc() and es58x_set_crc() to + * manipulate it. + */ +struct es58x_fd_urb_cmd { + __le16 sof; + u8 cmd_type; + u8 cmd_id; + u8 channel_idx; + __le16 msg_len; + + union { + struct es58x_fd_tx_conf_msg tx_conf_msg; + u8 tx_can_msg_buf[es58x_fd_tx_bulk_max * es58x_fd_canfd_tx_len]; + u8 rx_can_msg_buf[es58x_fd_rx_bulk_max * es58x_fd_canfd_rx_len]; + struct es58x_fd_echo_msg echo_msg[es58x_fd_echo_bulk_max]; + struct es58x_fd_rx_event_msg rx_event_msg; + struct es58x_fd_tx_ack_msg tx_ack_msg; + __le64 timestamp; + __le32 rx_cmd_ret_le32; + u8 raw_msg[0]; + } __packed; + + __le16 reserved_for_crc16_do_not_use; +} __packed; + +#define es58x_fd_urb_cmd_header_len (offsetof(struct es58x_fd_urb_cmd, raw_msg)) +#define es58x_fd_tx_urb_cmd_max_len \ + es58x_sizeof_urb_cmd(struct es58x_fd_urb_cmd, tx_can_msg_buf) +#define es58x_fd_rx_urb_cmd_max_len \ + es58x_sizeof_urb_cmd(struct es58x_fd_urb_cmd, rx_can_msg_buf) + +#endif /* __es58x_fd_h__ */
Networking
c664e2137a27680922d8aeb64fb10313416b254f
vincent mailhol
drivers
net
can, etas_es58x, usb
can: mcp251xfd: add bql support
this patch re-adds bql support to the driver. support for netdev_xmit_more() will be added in a separate patch series.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add bql support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ', 'mcp251xfd']
['c']
1
18
5
--- diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c + netdev_reset_queue(priv->ndev); + - const struct mcp251xfd_hw_tef_obj *hw_tef_obj) + const struct mcp251xfd_hw_tef_obj *hw_tef_obj, + unsigned int *frame_len_ptr) - tef_tail, - hw_tef_obj->ts, null); + tef_tail, hw_tef_obj->ts, + frame_len_ptr); + unsigned int total_frame_len = 0; - err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]); + unsigned int frame_len = 0; + + err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); + + total_frame_len += frame_len; + netdev_completed_queue(priv->ndev, len, total_frame_len); + unsigned int frame_len; - can_put_echo_skb(skb, ndev, tx_head, 0); + frame_len = can_skb_get_frame_len(skb); + err = can_put_echo_skb(skb, ndev, tx_head, frame_len); + if (!err) + netdev_sent_queue(priv->ndev, frame_len);
Networking
0084e298acfe643c68daf0a316bb27618ec2a852
marc kleine budde
drivers
net
can, mcp251xfd, spi
can: mcp251xfd: add hw timestamp infrastructure
this patch add the hw timestamping infrastructure. the mcp251xfd has a free running timer of 32 bit width, running at max 40mhz, which wraps around every 107 seconds. the current timestamp is latched into rx and tef objects automatically be the can controller.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add hw timestamp infrastructure
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ', 'mcp251xfd']
['h', 'c', 'makefile']
4
88
0
--- diff --git a/drivers/net/can/spi/mcp251xfd/makefile b/drivers/net/can/spi/mcp251xfd/makefile --- a/drivers/net/can/spi/mcp251xfd/makefile +++ b/drivers/net/can/spi/mcp251xfd/makefile +mcp251xfd-objs += mcp251xfd-timestamp.o diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c + mcp251xfd_timestamp_init(priv); + mcp251xfd_timestamp_stop(priv); + mcp251xfd_timestamp_stop(priv); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c +// spdx-license-identifier: gpl-2.0 +// +// mcp251xfd - microchip mcp251xfd family can controller driver +// +// copyright (c) 2021 pengutronix, +// marc kleine-budde <kernel@pengutronix.de> +// + +#include <linux/clocksource.h> +#include <linux/workqueue.h> + +#include "mcp251xfd.h" + +static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc) +{ + struct mcp251xfd_priv *priv; + u32 timestamp = 0; + int err; + + priv = container_of(cc, struct mcp251xfd_priv, cc); + err = mcp251xfd_get_timestamp(priv, &timestamp); + if (err) + netdev_err(priv->ndev, + "error %d while reading timestamp. hw timestamps may be inaccurate.", + err); + + return timestamp; +} + +static void mcp251xfd_timestamp_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct mcp251xfd_priv *priv; + + priv = container_of(delayed_work, struct mcp251xfd_priv, timestamp); + timecounter_read(&priv->tc); + + schedule_delayed_work(&priv->timestamp, + mcp251xfd_timestamp_work_delay_sec * hz); +} + +void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv, + struct sk_buff *skb, u32 timestamp) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + u64 ns; + + ns = timecounter_cyc2time(&priv->tc, timestamp); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) +{ + struct cyclecounter *cc = &priv->cc; + + cc->read = mcp251xfd_timestamp_read; + cc->mask = cyclecounter_mask(32); + cc->shift = 1; + cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); + + timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); + + init_delayed_work(&priv->timestamp, mcp251xfd_timestamp_work); + schedule_delayed_work(&priv->timestamp, + mcp251xfd_timestamp_work_delay_sec * hz); +} + +void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv) +{ + cancel_delayed_work_sync(&priv->timestamp); +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +#include <linux/timecounter.h> +#include <linux/workqueue.h> +#define mcp251xfd_timestamp_work_delay_sec 45 +static_assert(mcp251xfd_timestamp_work_delay_sec < + cyclecounter_mask(32) / mcp251xfd_sysclock_hz_max / 2); + struct cyclecounter cc; + struct timecounter tc; + struct delayed_work timestamp; + +void mcp251xfd_skb_set_timestamp(struct mcp251xfd_priv *priv, + struct sk_buff *skb, u32 timestamp); +void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); +void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
Networking
efd8d98dfb900f96370cc7722ccb7959e58557c7
marc kleine budde
drivers
net
can, mcp251xfd, spi
can: mcp251xfd: add hw timestamp to rx, tx and error can frames
this patch uses the previously added mcp251xfd_skb_set_timestamp() function to convert the timestamp done by the can controller into a proper skb hw timestamp.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add hw timestamp to rx, tx and error can frames
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ', 'mcp251xfd']
['c']
1
18
5
--- diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c - u32 seq, seq_masked, tef_tail_masked; + struct sk_buff *skb; + u32 seq, seq_masked, tef_tail_masked, tef_tail; + tef_tail = mcp251xfd_get_tef_tail(priv); + skb = priv->can.echo_skb[tef_tail]; + if (skb) + mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); - mcp251xfd_get_tef_tail(priv), + tef_tail, -mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, +mcp251xfd_hw_rx_obj_to_skb(struct mcp251xfd_priv *priv, + + mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); -mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv, +mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, + struct sk_buff *skb; - return alloc_can_err_skb(priv->ndev, cf); + skb = alloc_can_err_skb(priv->ndev, cf); + if (skb) + mcp251xfd_skb_set_timestamp(priv, skb, *timestamp); + + return skb; + mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
Networking
5f02a49c6605fbd85c00acd19a10e149bba5c162
marc kleine budde
drivers
net
can, mcp251xfd, spi
can: mcp251xfd: add dev coredump support
for easier debugging this patch adds dev coredump support to the driver. a dev coredump is generated in case the chip fails to start or an error in the interrupt handler is detected.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add dev coredump support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ', 'mcp251xfd']
['h', 'kconfig', 'c', 'makefile']
6
343
0
--- diff --git a/drivers/net/can/spi/mcp251xfd/kconfig b/drivers/net/can/spi/mcp251xfd/kconfig --- a/drivers/net/can/spi/mcp251xfd/kconfig +++ b/drivers/net/can/spi/mcp251xfd/kconfig + select want_dev_coredump diff --git a/drivers/net/can/spi/mcp251xfd/makefile b/drivers/net/can/spi/mcp251xfd/makefile --- a/drivers/net/can/spi/mcp251xfd/makefile +++ b/drivers/net/can/spi/mcp251xfd/makefile + +mcp251xfd-$(config_dev_coredump) += mcp251xfd-dump.o diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c + mcp251xfd_dump(priv); + mcp251xfd_dump(priv); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c +// spdx-license-identifier: gpl-2.0 +// +// mcp251xfd - microchip mcp251xfd family can controller driver +// +// copyright (c) 2020, 2021 pengutronix, +// marc kleine-budde <kernel@pengutronix.de> +// copyright (c) 2015-2018 etnaviv project +// + +#include <linux/devcoredump.h> + +#include "mcp251xfd.h" +#include "mcp251xfd-dump.h" + +struct mcp251xfd_dump_iter { + void *start; + struct mcp251xfd_dump_object_header *hdr; + void *data; +}; + +struct mcp251xfd_dump_reg_space { + u16 base; + u16 size; +}; + +struct mcp251xfd_dump_ring { + enum mcp251xfd_dump_object_ring_key key; + u32 val; +}; + +static const struct mcp251xfd_dump_reg_space mcp251xfd_dump_reg_space[] = { + { + .base = mcp251xfd_reg_con, + .size = mcp251xfd_reg_fltobj(32) - mcp251xfd_reg_con, + }, { + .base = mcp251xfd_ram_start, + .size = mcp251xfd_ram_size, + }, { + .base = mcp251xfd_reg_osc, + .size = mcp251xfd_reg_devid - mcp251xfd_reg_osc, + }, +}; + +static void mcp251xfd_dump_header(struct mcp251xfd_dump_iter *iter, + enum mcp251xfd_dump_object_type object_type, + const void *data_end) +{ + struct mcp251xfd_dump_object_header *hdr = iter->hdr; + unsigned int len; + + len = data_end - iter->data; + if (!len) + return; + + hdr->magic = cpu_to_le32(mcp251xfd_dump_magic); + hdr->type = cpu_to_le32(object_type); + hdr->offset = cpu_to_le32(iter->data - iter->start); + hdr->len = cpu_to_le32(len); + + iter->hdr++; + iter->data += len; +} + +static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const int val_bytes = regmap_get_val_bytes(priv->map_rx); + struct mcp251xfd_dump_object_reg *reg = iter->data; + unsigned int i, j; + int err; + + for (i = 0; i < array_size(mcp251xfd_dump_reg_space); i++) { + const struct mcp251xfd_dump_reg_space *reg_space; + void *buf; + + reg_space = &mcp251xfd_dump_reg_space[i]; + + buf = kmalloc(reg_space->size, gfp_kernel); + if (!buf) + goto out; + + err = regmap_bulk_read(priv->map_reg, reg_space->base, + buf, reg_space->size / val_bytes); + if (err) { + kfree(buf); + continue; + } + + for (j = 0; j < reg_space->size; j += sizeof(u32), reg++) { + reg->reg = cpu_to_le32(reg_space->base + j); + reg->val = cpu_to_le32p(buf + j); + } + + kfree(buf); + } + + out: + mcp251xfd_dump_header(iter, mcp251xfd_dump_object_type_reg, reg); +} + +static void mcp251xfd_dump_ring(struct mcp251xfd_dump_iter *iter, + enum mcp251xfd_dump_object_type object_type, + const struct mcp251xfd_dump_ring *dump_ring, + unsigned int len) +{ + struct mcp251xfd_dump_object_reg *reg = iter->data; + unsigned int i; + + for (i = 0; i < len; i++, reg++) { + reg->reg = cpu_to_le32(dump_ring[i].key); + reg->val = cpu_to_le32(dump_ring[i].val); + } + + mcp251xfd_dump_header(iter, object_type, reg); +} + +static void mcp251xfd_dump_tef_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const struct mcp251xfd_tef_ring *tef = priv->tef; + const struct mcp251xfd_tx_ring *tx = priv->tx; + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = mcp251xfd_dump_object_ring_key_head, + .val = tef->head, + }, { + .key = mcp251xfd_dump_object_ring_key_tail, + .val = tef->tail, + }, { + .key = mcp251xfd_dump_object_ring_key_base, + .val = 0, + }, { + .key = mcp251xfd_dump_object_ring_key_nr, + .val = 0, + }, { + .key = mcp251xfd_dump_object_ring_key_fifo_nr, + .val = 0, + }, { + .key = mcp251xfd_dump_object_ring_key_obj_num, + .val = tx->obj_num, + }, { + .key = mcp251xfd_dump_object_ring_key_obj_size, + .val = sizeof(struct mcp251xfd_hw_tef_obj), + }, + }; + + mcp251xfd_dump_ring(iter, mcp251xfd_dump_object_type_tef, + dump_ring, array_size(dump_ring)); +} + +static void mcp251xfd_dump_rx_ring_one(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter, + const struct mcp251xfd_rx_ring *rx) +{ + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = mcp251xfd_dump_object_ring_key_head, + .val = rx->head, + }, { + .key = mcp251xfd_dump_object_ring_key_tail, + .val = rx->tail, + }, { + .key = mcp251xfd_dump_object_ring_key_base, + .val = rx->base, + }, { + .key = mcp251xfd_dump_object_ring_key_nr, + .val = rx->nr, + }, { + .key = mcp251xfd_dump_object_ring_key_fifo_nr, + .val = rx->fifo_nr, + }, { + .key = mcp251xfd_dump_object_ring_key_obj_num, + .val = rx->obj_num, + }, { + .key = mcp251xfd_dump_object_ring_key_obj_size, + .val = rx->obj_size, + }, + }; + + mcp251xfd_dump_ring(iter, mcp251xfd_dump_object_type_rx, + dump_ring, array_size(dump_ring)); +} + +static void mcp251xfd_dump_rx_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + struct mcp251xfd_rx_ring *rx_ring; + unsigned int i; + + mcp251xfd_for_each_rx_ring(priv, rx_ring, i) + mcp251xfd_dump_rx_ring_one(priv, iter, rx_ring); +} + +static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + const struct mcp251xfd_tx_ring *tx = priv->tx; + const struct mcp251xfd_dump_ring dump_ring[] = { + { + .key = mcp251xfd_dump_object_ring_key_head, + .val = tx->head, + }, { + .key = mcp251xfd_dump_object_ring_key_tail, + .val = tx->tail, + }, { + .key = mcp251xfd_dump_object_ring_key_base, + .val = tx->base, + }, { + .key = mcp251xfd_dump_object_ring_key_nr, + .val = 0, + }, { + .key = mcp251xfd_dump_object_ring_key_fifo_nr, + .val = mcp251xfd_tx_fifo, + }, { + .key = mcp251xfd_dump_object_ring_key_obj_num, + .val = tx->obj_num, + }, { + .key = mcp251xfd_dump_object_ring_key_obj_size, + .val = tx->obj_size, + }, + }; + + mcp251xfd_dump_ring(iter, mcp251xfd_dump_object_type_tx, + dump_ring, array_size(dump_ring)); +} + +static void mcp251xfd_dump_end(const struct mcp251xfd_priv *priv, + struct mcp251xfd_dump_iter *iter) +{ + struct mcp251xfd_dump_object_header *hdr = iter->hdr; + + hdr->magic = cpu_to_le32(mcp251xfd_dump_magic); + hdr->type = cpu_to_le32(mcp251xfd_dump_object_type_end); + hdr->offset = cpu_to_le32(0); + hdr->len = cpu_to_le32(0); + + /* provoke null pointer access, if used after end object */ + iter->hdr = null; +} + +void mcp251xfd_dump(const struct mcp251xfd_priv *priv) +{ + struct mcp251xfd_dump_iter iter; + unsigned int rings_num, obj_num; + unsigned int file_size = 0; + unsigned int i; + + /* register space + end marker */ + obj_num = 2; + + /* register space */ + for (i = 0; i < array_size(mcp251xfd_dump_reg_space); i++) + file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) * + sizeof(struct mcp251xfd_dump_object_reg); + + /* tef ring, rx ring, tx rings */ + rings_num = 1 + priv->rx_ring_num + 1; + obj_num += rings_num; + file_size += rings_num * __mcp251xfd_dump_object_ring_key_max * + sizeof(struct mcp251xfd_dump_object_reg); + + /* size of the headers */ + file_size += sizeof(*iter.hdr) * obj_num; + + /* allocate the file in vmalloc memory, it's likely to be big */ + iter.start = __vmalloc(file_size, gfp_kernel | __gfp_nowarn | + __gfp_zero | __gfp_noretry); + if (!iter.start) { + netdev_warn(priv->ndev, "failed to allocate devcoredump file. "); + return; + } + + /* point the data member after the headers */ + iter.hdr = iter.start; + iter.data = &iter.hdr[obj_num]; + + mcp251xfd_dump_registers(priv, &iter); + mcp251xfd_dump_tef_ring(priv, &iter); + mcp251xfd_dump_rx_ring(priv, &iter); + mcp251xfd_dump_tx_ring(priv, &iter); + mcp251xfd_dump_end(priv, &iter); + + dev_coredumpv(&priv->spi->dev, iter.start, + iter.data - iter.start, gfp_kernel); +} diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h --- /dev/null +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.h +/* spdx-license-identifier: gpl-2.0 + * + * mcp251xfd - microchip mcp251xfd family can controller driver + * + * copyright (c) 2019, 2020, 2021 pengutronix, + * marc kleine-budde <kernel@pengutronix.de> + */ + +#ifndef _mcp251xfd_dump_h +#define _mcp251xfd_dump_h + +#define mcp251xfd_dump_magic 0x1825434d + +enum mcp251xfd_dump_object_type { + mcp251xfd_dump_object_type_reg, + mcp251xfd_dump_object_type_tef, + mcp251xfd_dump_object_type_rx, + mcp251xfd_dump_object_type_tx, + mcp251xfd_dump_object_type_end = -1, +}; + +enum mcp251xfd_dump_object_ring_key { + mcp251xfd_dump_object_ring_key_head, + mcp251xfd_dump_object_ring_key_tail, + mcp251xfd_dump_object_ring_key_base, + mcp251xfd_dump_object_ring_key_nr, + mcp251xfd_dump_object_ring_key_fifo_nr, + mcp251xfd_dump_object_ring_key_obj_num, + mcp251xfd_dump_object_ring_key_obj_size, + __mcp251xfd_dump_object_ring_key_max, +}; + +struct mcp251xfd_dump_object_header { + __le32 magic; + __le32 type; + __le32 offset; + __le32 len; +}; + +struct mcp251xfd_dump_object_reg { + __le32 reg; + __le32 val; +}; + +#endif diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +#if is_enabled(config_dev_coredump) +void mcp251xfd_dump(const struct mcp251xfd_priv *priv); +#else +static inline void mcp251xfd_dump(const struct mcp251xfd_priv *priv) +{ +} +#endif +
Networking
e0ab3dd5f98fcca95a8290578833552e496fabaf
marc kleine budde
drivers
net
can, mcp251xfd, spi
can: peak_usb: add support of one_shot mode
this patch adds "one-shot" mode support to the following can-usb peak-system gmbh interfaces: - pcan-usb x6 - pcan-usb fd - pcan-usb pro fd - pcan-chip usb - pcan-usb pro
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support of one_shot mode
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ', 'peak_usb']
['c']
2
15
5
- pcan-usb x6 - pcan-usb fd - pcan-usb pro fd - pcan-chip usb - pcan-usb pro --- diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c + /* single-shot frame */ + if (dev->can.ctrlmode & can_ctrlmode_one_shot) + tx_msg_flags |= pucan_msg_single_shot; + - can_ctrlmode_cc_len8_dlc, + can_ctrlmode_one_shot | can_ctrlmode_cc_len8_dlc, - can_ctrlmode_cc_len8_dlc, + can_ctrlmode_one_shot | can_ctrlmode_cc_len8_dlc, - can_ctrlmode_cc_len8_dlc, + can_ctrlmode_one_shot | can_ctrlmode_cc_len8_dlc, - can_ctrlmode_cc_len8_dlc, + can_ctrlmode_one_shot | can_ctrlmode_cc_len8_dlc, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +#define pcan_usbpro_ss 0x08 + /* single-shot frame */ + if (dev->can.ctrlmode & can_ctrlmode_one_shot) + flags |= pcan_usbpro_ss; + - .ctrlmode_supported = can_ctrlmode_3_samples | can_ctrlmode_listenonly, + .ctrlmode_supported = can_ctrlmode_3_samples | can_ctrlmode_listenonly | + can_ctrlmode_one_shot,
Networking
58b29aa9d47128ec6ee8fd731b0f137a82f0b9ea
stephane grosjean
drivers
net
can, peak_usb, usb
can: peak_usb: add support of ethtool set_phys_id()
this patch makes it possible to specifically flash the led of a can port of the can-usb interfaces of peak-system.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support of ethtool set_phys_id()
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['can ', 'peak_usb']
['h', 'c']
6
126
1
--- diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c +#include <linux/ethtool.h> +#define pcan_usb_cmd_led 12 +static int pcan_usb_set_led(struct peak_usb_device *dev, u8 onoff) +{ + u8 args[pcan_usb_cmd_args_len] = { + [0] = !!onoff, + }; + + return pcan_usb_send_cmd(dev, pcan_usb_cmd_led, pcan_usb_set, args); +} + +static int pcan_usb_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ethtool_id_active: + /* call on/off twice a second */ + return 2; + + case ethtool_id_off: + err = pcan_usb_set_led(dev, 0); + break; + + case ethtool_id_on: + fallthrough; + + case ethtool_id_inactive: + /* restore led default */ + err = pcan_usb_set_led(dev, 1); + break; + + default: + break; + } + + return err; +} + +static const struct ethtool_ops pcan_usb_ethtool_ops = { + .set_phys_id = pcan_usb_set_phys_id, +}; + + .ethtool_ops = &pcan_usb_ethtool_ops, + diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +#include <linux/ethtool.h> + /* add ethtool support */ + netdev->ethtool_ops = peak_usb_adapter->ethtool_ops; + diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h + const struct ethtool_ops *ethtool_ops; + diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +#include <linux/ethtool.h> +/* blink led's */ +static int pcan_usb_fd_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ethtool_id_active: + err = pcan_usb_fd_set_can_led(dev, pcan_ufd_led_fast); + break; + case ethtool_id_inactive: + err = pcan_usb_fd_set_can_led(dev, pcan_ufd_led_def); + break; + default: + break; + } + + return err; +} + +static const struct ethtool_ops pcan_usb_fd_ethtool_ops = { + .set_phys_id = pcan_usb_fd_set_phys_id, +}; + + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + + .ethtool_ops = &pcan_usb_fd_ethtool_ops, + diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +#include <linux/ethtool.h> - pcan_usb_pro_set_led(dev, 0, 1); + pcan_usb_pro_set_led(dev, pcan_usbpro_led_device, 1); +static int pcan_usb_pro_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct peak_usb_device *dev = netdev_priv(netdev); + int err = 0; + + switch (state) { + case ethtool_id_active: + /* fast blinking forever */ + err = pcan_usb_pro_set_led(dev, pcan_usbpro_led_blink_fast, + 0xffffffff); + break; + + case ethtool_id_inactive: + /* restore led default */ + err = pcan_usb_pro_set_led(dev, pcan_usbpro_led_device, 1); + break; + + default: + break; + } + + return err; +} + +static const struct ethtool_ops pcan_usb_pro_ethtool_ops = { + .set_phys_id = pcan_usb_pro_set_phys_id, +}; + + .ethtool_ops = &pcan_usb_pro_ethtool_ops, + diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h +#define pcan_usbpro_led_device 0x00 +#define pcan_usbpro_led_blink_fast 0x01 +#define pcan_usbpro_led_blink_slow 0x02 +#define pcan_usbpro_led_on 0x03 +#define pcan_usbpro_led_off 0x04 +
Networking
a7e8511ffda6a81ba6b49c22d0ed296caeff438c
stephane grosjean
drivers
net
can, peak_usb, usb
dpaa2-eth: add rx copybreak support
dma unmapping, allocating a new buffer and dma mapping it back on the refill path is really not that efficient. proper buffer recycling (page pool, flipping the page and using the other half) cannot be done for dpaa2 since it's not a ring based controller but it rather deals with multiple queues which all get their buffers from the same buffer pool on rx.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add rx copybreak support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dpaa2-eth ']
['h', 'c']
2
36
3
--- diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) +{ + u16 fd_offset = dpaa2_fd_get_offset(fd); + u32 fd_length = dpaa2_fd_get_len(fd); + struct sk_buff *skb = null; + unsigned int skb_len; + + if (fd_length > dpaa2_eth_default_copybreak) + return null; + + skb_len = fd_length + dpaa2_eth_needed_headroom(null); + + skb = napi_alloc_skb(&ch->napi, skb_len); + if (!skb) + return null; + + skb_reserve(skb, dpaa2_eth_needed_headroom(null)); + skb_put(skb, fd_length); + + memcpy(skb->data, fd_vaddr + fd_offset, fd_length); + + dpaa2_eth_recycle_buf(ch->priv, ch, dpaa2_fd_get_addr(fd)); + + return skb; +} + - dma_unmap_page(dev, addr, priv->rx_buf_size, - dma_bidirectional); - skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); + skb = dpaa2_eth_copybreak(ch, fd, vaddr); + if (!skb) { + dma_unmap_page(dev, addr, priv->rx_buf_size, + dma_bidirectional); + skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); + } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +#define dpaa2_eth_default_copybreak 512 +
Networking
50f826999a80a100218b0cbf4f14057bc0edb3a3
ioana ciornei
drivers
net
dpaa2, ethernet, freescale
dpaa2-eth: export the rx copybreak value as an ethtool tunable
it's useful, especially for debugging purposes, to have the rx copybreak value changeable at runtime. export it as an ethtool tunable.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
export the rx copybreak value as an ethtool tunable
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dpaa2-eth ']
['h', 'c']
3
47
2
--- diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c + struct dpaa2_eth_priv *priv = ch->priv; - if (fd_length > dpaa2_eth_default_copybreak) + if (fd_length > priv->rx_copybreak) - dpaa2_eth_recycle_buf(ch->priv, ch, dpaa2_fd_get_addr(fd)); + dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); + priv->rx_copybreak = dpaa2_eth_default_copybreak; + diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h + + u32 rx_copybreak; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +static int dpaa2_eth_get_tunable(struct net_device *net_dev, + const struct ethtool_tunable *tuna, + void *data) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = 0; + + switch (tuna->id) { + case ethtool_rx_copybreak: + *(u32 *)data = priv->rx_copybreak; + break; + default: + err = -eopnotsupp; + break; + } + + return err; +} + +static int dpaa2_eth_set_tunable(struct net_device *net_dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = 0; + + switch (tuna->id) { + case ethtool_rx_copybreak: + priv->rx_copybreak = *(u32 *)data; + break; + default: + err = -eopnotsupp; + break; + } + + return err; +} + + .get_tunable = dpaa2_eth_get_tunable, + .set_tunable = dpaa2_eth_set_tunable,
Networking
8ed3cefc260e2ef2107cbd9484e4025f60c37bb5
ioana ciornei andrew lunn andrew lunn ch
drivers
net
dpaa2, ethernet, freescale
dpaa2-switch: add support for configuring learning state per port
add support for configuring the learning state of a switch port. when the user requests the hw learning to be disabled, a fast-age procedure on that specific port is run so that previously learnt addresses do not linger.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for configuring learning state per port
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dpaa2-switch ']
['h', 'c']
4
121
11
--- diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +static int dpaa2_switch_port_set_learning(struct ethsw_port_priv *port_priv, bool enable) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + enum dpsw_learning_mode learn_mode; + int err; + + if (enable) + learn_mode = dpsw_learning_mode_hw; + else + learn_mode = dpsw_learning_mode_dis; + + err = dpsw_if_set_learning_mode(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, learn_mode); + if (err) + netdev_err(port_priv->netdev, "dpsw_if_set_learning_mode err %d ", err); + + if (!enable) + dpaa2_switch_port_fast_age(port_priv); + + return err; +} + +static int dpaa2_switch_port_pre_bridge_flags(struct net_device *netdev, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(br_learning)) + return -einval; + + return 0; +} + +static int dpaa2_switch_port_bridge_flags(struct net_device *netdev, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int err; + + if (flags.mask & br_learning) { + bool learn_ena = !!(flags.val & br_learning); + + err = dpaa2_switch_port_set_learning(port_priv, learn_ena); + if (err) + return err; + } + + return 0; +} + + case switchdev_attr_id_port_pre_bridge_flags: + err = dpaa2_switch_port_pre_bridge_flags(netdev, attr->u.brport_flags, extack); + break; + case switchdev_attr_id_port_bridge_flags: + err = dpaa2_switch_port_bridge_flags(netdev, attr->u.brport_flags, extack); + break; + bool learn_ena; + /* inherit the initial bridge port learning state */ + learn_ena = br_port_flag_is_set(netdev, br_learning); + err = dpaa2_switch_port_set_learning(port_priv, learn_ena); + + /* no hw learning when not under a bridge */ + err = dpaa2_switch_port_set_learning(port_priv, false); + if (err) + return err; + + err = dpaa2_switch_port_set_learning(port_priv, false); + if (err) + goto err_port_probe; + diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h --- a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h +#define dpsw_cmdid_if_set_learning_mode dpsw_cmd_id(0x0ad) + +#define dpsw_learning_mode_shift 0 +#define dpsw_learning_mode_size 4 + +struct dpsw_cmd_if_set_learning_mode { + __le16 if_id; + /* only the first 4 bits from lsb */ + u8 mode; +}; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c + +/** + * dpsw_if_set_learning_mode() - configure the learning mode on an interface. + * if this api is used, it will take precedence over the fdb configuration. + * @mc_io: pointer to mc portal's i/o object + * @cmd_flags: command flags; one or more of 'mc_cmd_flag_' + * @token: token of dpsw object + * @if_id: interfaceid + * @mode: learning mode + * + * return: completion status. '0' on success; error code otherwise. + */ +int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_learning_mode mode) +{ + struct dpsw_cmd_if_set_learning_mode *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(dpsw_cmdid_if_set_learning_mode, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_set_learning_mode *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpsw_set_field(cmd_params->mode, learning_mode, mode); + + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h - * enum dpsw_fdb_learning_mode - auto-learning modes - * @dpsw_fdb_learning_mode_dis: disable auto-learning - * @dpsw_fdb_learning_mode_hw: enable hw auto-learning - * @dpsw_fdb_learning_mode_non_secure: enable none secure learning by cpu - * @dpsw_fdb_learning_mode_secure: enable secure learning by cpu + * enum dpsw_learning_mode - auto-learning modes + * @dpsw_learning_mode_dis: disable auto-learning + * @dpsw_learning_mode_hw: enable hw auto-learning + * @dpsw_learning_mode_non_secure: enable none secure learning by cpu + * @dpsw_learning_mode_secure: enable secure learning by cpu -enum dpsw_fdb_learning_mode { - dpsw_fdb_learning_mode_dis = 0, - dpsw_fdb_learning_mode_hw = 1, - dpsw_fdb_learning_mode_non_secure = 2, - dpsw_fdb_learning_mode_secure = 3 +enum dpsw_learning_mode { + dpsw_learning_mode_dis = 0, + dpsw_learning_mode_hw = 1, + dpsw_learning_mode_non_secure = 2, + dpsw_learning_mode_secure = 3 - enum dpsw_fdb_learning_mode learning_mode; + enum dpsw_learning_mode learning_mode; +int dpsw_if_set_learning_mode(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, enum dpsw_learning_mode mode); +
Networking
1e7cbabfdb12aa944ae0cb03871f8b55ede1341a
ioana ciornei
drivers
net
dpaa2, ethernet, freescale
dpaa2-switch: add support for configuring per port broadcast flooding
the br_bcast_flood bridge port flag is now accepted by the driver and a change in its state will determine a reconfiguration of the broadcast egress flooding list on the fdb associated with the port.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for configuring per port broadcast flooding
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dpaa2-switch ']
['h', 'c']
2
31
3
--- diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c - cfg->if_id[i++] = ethsw->ports[j]->idx; + if (type == dpsw_broadcast && ethsw->ports[j]->bcast_flood) + cfg->if_id[i++] = ethsw->ports[j]->idx; + else if (type == dpsw_flooding) + cfg->if_id[i++] = ethsw->ports[j]->idx; +static int dpaa2_switch_port_flood(struct ethsw_port_priv *port_priv, + struct switchdev_brport_flags flags) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + + if (flags.mask & br_bcast_flood) + port_priv->bcast_flood = !!(flags.val & br_bcast_flood); + + return dpaa2_switch_fdb_set_egress_flood(ethsw, port_priv->fdb->fdb_id); +} + - if (flags.mask & ~(br_learning)) + if (flags.mask & ~(br_learning | br_bcast_flood)) + if (flags.mask & br_bcast_flood) { + err = dpaa2_switch_port_flood(port_priv, flags); + if (err) + return err; + } + + /* reset the flooding state to denote that this port can send any + * packet in standalone mode. with this, we are also ensuring that any + * later bridge join will have the flooding flag on. + */ + port_priv->bcast_flood = true; + + port_priv->bcast_flood = true; + diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h - bool flood; + bool bcast_flood;
Networking
b54eb093f5ce784ca00170d4512c47cdc755397e
ioana ciornei
drivers
net
dpaa2, ethernet, freescale
dpaa2-switch: add support for configuring per port unknown flooding
add support for configuring per port unknown flooding by accepting both br_flood and br_mcast_flood as offloadable bridge port flags.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for configuring per port unknown flooding
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dpaa2-switch ']
['h', 'c']
2
21
3
--- diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c - else if (type == dpsw_flooding) + else if (type == dpsw_flooding && ethsw->ports[j]->ucast_flood) + if (flags.mask & br_flood) + port_priv->ucast_flood = !!(flags.val & br_flood); + - if (flags.mask & ~(br_learning | br_bcast_flood)) + if (flags.mask & ~(br_learning | br_bcast_flood | br_flood | + br_mcast_flood)) + if (flags.mask & (br_flood | br_mcast_flood)) { + bool multicast = !!(flags.val & br_mcast_flood); + bool unicast = !!(flags.val & br_flood); + + if (unicast != multicast) { + nl_set_err_msg_mod(extack, + "cannot configure multicast flooding independently of unicast"); + return -einval; + } + } + - if (flags.mask & br_bcast_flood) { + if (flags.mask & (br_bcast_flood | br_flood | br_mcast_flood)) { + port_priv->ucast_flood = true; + port_priv->ucast_flood = true; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h + bool ucast_flood;
Networking
6253d5e39ce2bba13c601274768c481846526a80
ioana ciornei
drivers
net
dpaa2, ethernet, freescale
dpaa2-switch: add tc flower hardware offload on ingress traffic
this patch adds support for tc flower hardware offload on the ingress path. shared filter blocks are supported by sharing a single acl table between multiple ports.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add tc flower hardware offload on ingress traffic
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dpaa2-switch ']
['h', 'c', 'makefile']
7
768
14
- ethernet: dst_mac/src_mac - ipv4: dst_ip/src_ip/ip_proto/tos - vlan: vlan_id/vlan_prio/vlan_tpid/vlan_dei - l4: dst_port/src_port - drop - mirred egress redirect - trap --- diff --git a/drivers/net/ethernet/freescale/dpaa2/makefile b/drivers/net/ethernet/freescale/dpaa2/makefile --- a/drivers/net/ethernet/freescale/dpaa2/makefile +++ b/drivers/net/ethernet/freescale/dpaa2/makefile -fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o +fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +// spdx-license-identifier: gpl-2.0 +/* + * dpaa2 ethernet switch flower support + * + * copyright 2021 nxp + * + */ + +#include "dpaa2-switch.h" + +static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, + struct dpsw_acl_key *acl_key) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = cls->common.extack; + struct dpsw_acl_fields *acl_h, *acl_m; + + if (dissector->used_keys & + ~(bit(flow_dissector_key_basic) | + bit(flow_dissector_key_control) | + bit(flow_dissector_key_eth_addrs) | + bit(flow_dissector_key_vlan) | + bit(flow_dissector_key_ports) | + bit(flow_dissector_key_ip) | + bit(flow_dissector_key_ipv6_addrs) | + bit(flow_dissector_key_ipv4_addrs))) { + nl_set_err_msg_mod(extack, + "unsupported keys used"); + return -eopnotsupp; + } + + acl_h = &acl_key->match; + acl_m = &acl_key->mask; + + if (flow_rule_match_key(rule, flow_dissector_key_basic)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + acl_h->l3_protocol = match.key->ip_proto; + acl_h->l2_ether_type = be16_to_cpu(match.key->n_proto); + acl_m->l3_protocol = match.mask->ip_proto; + acl_m->l2_ether_type = be16_to_cpu(match.mask->n_proto); + } + + if (flow_rule_match_key(rule, flow_dissector_key_eth_addrs)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + ether_addr_copy(acl_h->l2_dest_mac, &match.key->dst[0]); + ether_addr_copy(acl_h->l2_source_mac, &match.key->src[0]); + ether_addr_copy(acl_m->l2_dest_mac, &match.mask->dst[0]); + ether_addr_copy(acl_m->l2_source_mac, &match.mask->src[0]); + } + + if (flow_rule_match_key(rule, flow_dissector_key_vlan)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + acl_h->l2_vlan_id = match.key->vlan_id; + acl_h->l2_tpid = be16_to_cpu(match.key->vlan_tpid); + acl_h->l2_pcp_dei = match.key->vlan_priority << 1 | + match.key->vlan_dei; + + acl_m->l2_vlan_id = match.mask->vlan_id; + acl_m->l2_tpid = be16_to_cpu(match.mask->vlan_tpid); + acl_m->l2_pcp_dei = match.mask->vlan_priority << 1 | + match.mask->vlan_dei; + } + + if (flow_rule_match_key(rule, flow_dissector_key_ipv4_addrs)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + acl_h->l3_source_ip = be32_to_cpu(match.key->src); + acl_h->l3_dest_ip = be32_to_cpu(match.key->dst); + acl_m->l3_source_ip = be32_to_cpu(match.mask->src); + acl_m->l3_dest_ip = be32_to_cpu(match.mask->dst); + } + + if (flow_rule_match_key(rule, flow_dissector_key_ports)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + acl_h->l4_source_port = be16_to_cpu(match.key->src); + acl_h->l4_dest_port = be16_to_cpu(match.key->dst); + acl_m->l4_source_port = be16_to_cpu(match.mask->src); + acl_m->l4_dest_port = be16_to_cpu(match.mask->dst); + } + + if (flow_rule_match_key(rule, flow_dissector_key_ip)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + if (match.mask->ttl != 0) { + nl_set_err_msg_mod(extack, + "matching on ttl not supported"); + return -eopnotsupp; + } + + if ((match.mask->tos & 0x3) != 0) { + nl_set_err_msg_mod(extack, + "matching on ecn not supported, only dscp"); + return -eopnotsupp; + } + + acl_h->l3_dscp = match.key->tos >> 2; + acl_m->l3_dscp = match.mask->tos >> 2; + } + + return 0; +} + +static int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpsw_acl_key *acl_key = &entry->key; + struct device *dev = ethsw->dev; + u8 *cmd_buff; + int err; + + cmd_buff = kzalloc(dpaa2_ethsw_port_acl_cmd_buf_size, gfp_kernel); + if (!cmd_buff) + return -enomem; + + dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff); + + acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff, + dpaa2_ethsw_port_acl_cmd_buf_size, + dma_to_device); + if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { + dev_err(dev, "dma mapping failed "); + return -efault; + } + + err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, acl_entry_cfg); + + dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + dma_to_device); + if (err) { + dev_err(dev, "dpsw_acl_add_entry() failed %d ", err); + return err; + } + + kfree(cmd_buff); + + return 0; +} + +static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; + struct dpsw_acl_key *acl_key = &entry->key; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct device *dev = ethsw->dev; + u8 *cmd_buff; + int err; + + cmd_buff = kzalloc(dpaa2_ethsw_port_acl_cmd_buf_size, gfp_kernel); + if (!cmd_buff) + return -enomem; + + dpsw_acl_prepare_entry_cfg(acl_key, cmd_buff); + + acl_entry_cfg->key_iova = dma_map_single(dev, cmd_buff, + dpaa2_ethsw_port_acl_cmd_buf_size, + dma_to_device); + if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { + dev_err(dev, "dma mapping failed "); + return -efault; + } + + err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, acl_entry_cfg); + + dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + dma_to_device); + if (err) { + dev_err(dev, "dpsw_acl_remove_entry() failed %d ", err); + return err; + } + + kfree(cmd_buff); + + return 0; +} + +static int +dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + struct list_head *pos, *n; + int index = 0; + + if (list_empty(&acl_tbl->entries)) { + list_add(&entry->list, &acl_tbl->entries); + return index; + } + + list_for_each_safe(pos, n, &acl_tbl->entries) { + tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list); + if (entry->prio < tmp->prio) + break; + index++; + } + list_add(&entry->list, pos->prev); + return index; +} + +static struct dpaa2_switch_acl_entry* +dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl, + int index) +{ + struct dpaa2_switch_acl_entry *tmp; + int i = 0; + + list_for_each_entry(tmp, &acl_tbl->entries, list) { + if (i == index) + return tmp; + ++i; + } + + return null; +} + +static int +dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry, + int precedence) +{ + int err; + + err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + if (err) + return err; + + entry->cfg.precedence = precedence; + return dpaa2_switch_acl_entry_add(acl_tbl, entry); +} + +static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + int index, i, precedence, err; + + /* add the new acl entry to the linked list and get its index */ + index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry); + + /* move up in priority the acl entries to make space + * for the new filter. + */ + precedence = dpaa2_ethsw_port_max_acl_entries - acl_tbl->num_rules - 1; + for (i = 0; i < index; i++) { + tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + + err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + precedence); + if (err) + return err; + + precedence++; + } + + /* add the new entry to hardware */ + entry->cfg.precedence = precedence; + err = dpaa2_switch_acl_entry_add(acl_tbl, entry); + acl_tbl->num_rules++; + + return err; +} + +static struct dpaa2_switch_acl_entry * +dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl, + unsigned long cookie) +{ + struct dpaa2_switch_acl_entry *tmp, *n; + + list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + if (tmp->cookie == cookie) + return tmp; + } + return null; +} + +static int +dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp, *n; + int index = 0; + + list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + if (tmp->cookie == entry->cookie) + return index; + index++; + } + return -enoent; +} + +static int +dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, + struct dpaa2_switch_acl_entry *entry) +{ + struct dpaa2_switch_acl_entry *tmp; + int index, i, precedence, err; + + index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry); + + /* remove from hardware the acl entry */ + err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + if (err) + return err; + + acl_tbl->num_rules--; + + /* remove it from the list also */ + list_del(&entry->list); + + /* move down in priority the entries over the deleted one */ + precedence = entry->cfg.precedence; + for (i = index - 1; i >= 0; i--) { + tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + precedence); + if (err) + return err; + + precedence--; + } + + kfree(entry); + + return 0; +} + +static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw, + struct flow_action_entry *cls_act, + struct dpsw_acl_result *dpsw_act, + struct netlink_ext_ack *extack) +{ + int err = 0; + + switch (cls_act->id) { + case flow_action_trap: + dpsw_act->action = dpsw_acl_action_redirect_to_ctrl_if; + break; + case flow_action_redirect: + if (!dpaa2_switch_port_dev_check(cls_act->dev)) { + nl_set_err_msg_mod(extack, + "destination not a dpaa2 switch port"); + return -eopnotsupp; + } + + dpsw_act->if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); + dpsw_act->action = dpsw_acl_action_redirect; + break; + case flow_action_drop: + dpsw_act->action = dpsw_acl_action_drop; + break; + default: + nl_set_err_msg_mod(extack, + "action not supported"); + err = -eopnotsupp; + goto out; + } + +out: + return err; +} + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (!flow_offload_has_one_action(&rule->action)) { + nl_set_err_msg(extack, "only singular actions are supported"); + return -eopnotsupp; + } + + if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + nl_set_err_msg(extack, "maximum filter capacity reached"); + return -enomem; + } + + acl_entry = kzalloc(sizeof(*acl_entry), gfp_kernel); + if (!acl_entry) + return -enomem; + + err = dpaa2_switch_flower_parse_key(cls, &acl_entry->key); + if (err) + goto free_acl_entry; + + act = &rule->action.entries[0]; + err = dpaa2_switch_tc_parse_action(ethsw, act, + &acl_entry->cfg.result, extack); + if (err) + goto free_acl_entry; + + acl_entry->prio = cls->common.prio; + acl_entry->cookie = cls->cookie; + + err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + if (err) + goto free_acl_entry; + + return 0; + +free_acl_entry: + kfree(acl_entry); + + return err; +} + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls) +{ + struct dpaa2_switch_acl_entry *entry; + + entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); + if (!entry) + return 0; + + return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +#include <net/pkt_cls.h> +static int +dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *f) +{ + switch (f->command) { + case flow_cls_replace: + return dpaa2_switch_cls_flower_replace(acl_tbl, f); + case flow_cls_destroy: + return dpaa2_switch_cls_flower_destroy(acl_tbl, f); + default: + return -eopnotsupp; + } +} + +static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + switch (type) { + case tc_setup_clsflower: + return dpaa2_switch_setup_tc_cls_flower(cb_priv, type_data); + default: + return -eopnotsupp; + } +} + +static list_head(dpaa2_switch_block_cb_list); + +static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_acl_if_cfg acl_if_cfg; + int err; + + if (port_priv->acl_tbl) + return -einval; + + acl_if_cfg.if_id[0] = port_priv->idx; + acl_if_cfg.num_ifs = 1; + err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, &acl_if_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add_if err %d ", err); + return err; + } + + acl_tbl->ports |= bit(port_priv->idx); + port_priv->acl_tbl = acl_tbl; + + return 0; +} + +static int +dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct net_device *netdev = port_priv->netdev; + struct dpsw_acl_if_cfg acl_if_cfg; + int err; + + if (port_priv->acl_tbl != acl_tbl) + return -einval; + + acl_if_cfg.if_id[0] = port_priv->idx; + acl_if_cfg.num_ifs = 1; + err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, + acl_tbl->id, &acl_if_cfg); + if (err) { + netdev_err(netdev, "dpsw_acl_add_if err %d ", err); + return err; + } + + acl_tbl->ports &= ~bit(port_priv->idx); + port_priv->acl_tbl = null; + return 0; +} + +static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl; + int err; + + /* if the port is already bound to this acl table then do nothing. this + * can happen when this port is the first one to join a tc block + */ + if (port_priv->acl_tbl == acl_tbl) + return 0; + + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl); + if (err) + return err; + + /* mark the previous acl table as being unused if this was the last + * port that was using it. + */ + if (old_acl_tbl->ports == 0) + old_acl_tbl->in_use = false; + + return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); +} + +static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_acl_tbl *acl_tbl) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *new_acl_tbl; + int err; + + /* we are the last port that leaves a block (an acl table). + * we'll continue to use this table. + */ + if (acl_tbl->ports == bit(port_priv->idx)) + return 0; + + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl); + if (err) + return err; + + if (acl_tbl->ports == 0) + acl_tbl->in_use = false; + + new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); + new_acl_tbl->in_use = true; + return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl); +} + +static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct flow_block_cb *block_cb; + bool register_block = false; + int err; + + block_cb = flow_block_cb_lookup(f->block, + dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw); + + if (!block_cb) { + /* if the acl table is not already known, then this port must + * be the first to join it. in this case, we can just continue + * to use our private table + */ + acl_tbl = port_priv->acl_tbl; + + block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw, acl_tbl, null); + if (is_err(block_cb)) + return ptr_err(block_cb); + + register_block = true; + } else { + acl_tbl = flow_block_cb_priv(block_cb); + } + + flow_block_cb_incref(block_cb); + err = dpaa2_switch_port_block_bind(port_priv, acl_tbl); + if (err) + goto err_block_bind; + + if (register_block) { + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, + &dpaa2_switch_block_cb_list); + } + + return 0; + +err_block_bind: + if (!flow_block_cb_decref(block_cb)) + flow_block_cb_free(block_cb); + return err; +} + +static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, + struct flow_block_offload *f) +{ + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_acl_tbl *acl_tbl; + struct flow_block_cb *block_cb; + int err; + + block_cb = flow_block_cb_lookup(f->block, + dpaa2_switch_port_setup_tc_block_cb_ig, + ethsw); + if (!block_cb) + return; + + acl_tbl = flow_block_cb_priv(block_cb); + err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl); + if (!err && !flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } +} + +static int dpaa2_switch_setup_tc_block(struct net_device *netdev, + struct flow_block_offload *f) +{ + if (f->binder_type != flow_block_binder_type_clsact_ingress) + return -eopnotsupp; + + f->driver_block_list = &dpaa2_switch_block_cb_list; + + switch (f->command) { + case flow_block_bind: + return dpaa2_switch_setup_tc_block_bind(netdev, f); + case flow_block_unbind: + dpaa2_switch_setup_tc_block_unbind(netdev, f); + return 0; + default: + return -eopnotsupp; + } +} + +static int dpaa2_switch_port_setup_tc(struct net_device *netdev, + enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case tc_setup_block: { + return dpaa2_switch_setup_tc_block(netdev, type_data); + } + default: + return -eopnotsupp; + } + + return 0; +} + + .ndo_setup_tc = dpaa2_switch_port_setup_tc, - struct dpsw_acl_if_cfg acl_if_cfg; - acl_if_cfg.if_id[0] = port_priv->idx; - acl_if_cfg.num_ifs = 1; - err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl_id, &acl_if_cfg); - if (err) { - netdev_err(netdev, "dpsw_acl_add_if err %d ", err); - dpsw_acl_remove(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl_id); - } - + acl_tbl->ethsw = ethsw; - port_priv->acl_tbl = acl_tbl; + init_list_head(&acl_tbl->entries); + + err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); + if (err) + return err; - port_netdev->features = netif_f_hw_vlan_ctag_filter | netif_f_hw_vlan_stag_filter; + port_netdev->features = netif_f_hw_vlan_ctag_filter | + netif_f_hw_vlan_stag_filter | + netif_f_hw_tc; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +#define dpaa2_ethsw_port_default_traps 1 + +struct dpaa2_switch_acl_entry { + struct list_head list; + u16 prio; + unsigned long cookie; + + struct dpsw_acl_entry_cfg cfg; + struct dpsw_acl_key key; +}; + + struct list_head entries; + struct ethsw_core *ethsw; + u64 ports; + +static inline bool +dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl) +{ + if ((acl_tbl->num_rules + dpaa2_ethsw_port_default_traps) >= + dpaa2_ethsw_port_max_acl_entries) + return true; + return false; +} + +static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw, + struct net_device *netdev) +{ + int i; + + for (i = 0; i < ethsw->sw_attr.num_ifs; i++) + if (ethsw->ports[i]->netdev == netdev) + return ethsw->ports[i]->idx; + + return -einval; +} + + +/* tc offload */ + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls); + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct flow_cls_offload *cls); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h --- a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h +#define dpsw_cmdid_acl_remove_entry dpsw_cmd_id(0x093) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c + +/** + * dpsw_acl_remove_entry() - removes an entry from acl. + * @mc_io: pointer to mc portal's i/o object + * @cmd_flags: command flags; one or more of 'mc_cmd_flag_' + * @token: token of dpsw object + * @acl_id: acl id + * @cfg: entry configuration + * + * warning: this function has to be called after dpsw_acl_set_entry_cfg() + * + * return: '0' on success; error code otherwise. + */ +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg) +{ + struct dpsw_cmd_acl_entry *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(dpsw_cmdid_acl_remove_entry, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_acl_entry *)cmd.params; + cmd_params->acl_id = cpu_to_le16(acl_id); + cmd_params->result_if_id = cpu_to_le16(cfg->result.if_id); + cmd_params->precedence = cpu_to_le32(cfg->precedence); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + dpsw_set_field(cmd_params->result_action, + result_action, + cfg->result.action); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h + +int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 acl_id, const struct dpsw_acl_entry_cfg *cfg);
Networking
1110318d83e8011c4dfcb2f7dd343bcfb1623c5f
ioana ciornei
drivers
net
dpaa2, ethernet, freescale
dpaa2-switch: add tc matchall filter support
add support tc_setup_clsmatchall by using the same acl table entries framework as for tc flower. adding a matchall rule is done by installing an entry which has a mask of all zeroes, thus matching on any packet.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add tc matchall filter support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dpaa2-switch ']
['h', 'c']
3
79
0
--- diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = acl_tbl->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (!flow_offload_has_one_action(&cls->rule->action)) { + nl_set_err_msg(extack, "only singular actions are supported"); + return -eopnotsupp; + } + + if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + nl_set_err_msg(extack, "maximum filter capacity reached"); + return -enomem; + } + + acl_entry = kzalloc(sizeof(*acl_entry), gfp_kernel); + if (!acl_entry) + return -enomem; + + act = &cls->rule->action.entries[0]; + err = dpaa2_switch_tc_parse_action(ethsw, act, + &acl_entry->cfg.result, extack); + if (err) + goto free_acl_entry; + + acl_entry->prio = cls->common.prio; + acl_entry->cookie = cls->cookie; + + err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + if (err) + goto free_acl_entry; + + return 0; + +free_acl_entry: + kfree(acl_entry); + + return err; +} + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls) +{ + struct dpaa2_switch_acl_entry *entry; + + entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); + if (!entry) + return 0; + + return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +static int +dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *f) +{ + switch (f->command) { + case tc_clsmatchall_replace: + return dpaa2_switch_cls_matchall_replace(acl_tbl, f); + case tc_clsmatchall_destroy: + return dpaa2_switch_cls_matchall_destroy(acl_tbl, f); + default: + return -eopnotsupp; + } +} + + case tc_setup_clsmatchall: + return dpaa2_switch_setup_tc_cls_matchall(cb_priv, type_data); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +#include <net/pkt_cls.h> + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls); + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, + struct tc_cls_matchall_offload *cls);
Networking
4ba28c1a1aff053e6471151cffee860668ead786
ioana ciornei
drivers
net
dpaa2, ethernet, freescale
net: dsa: b53: mmap: add device tree support
add device tree support to b53_mmap.c while keeping platform devices support.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add device tree support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'b53', 'mmap']
['c']
1
55
0
--- diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c +#include <linux/bits.h> +static int b53_mmap_probe_of(struct platform_device *pdev, + struct b53_platform_data **ppdata) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *of_ports, *of_port; + struct device *dev = &pdev->dev; + struct b53_platform_data *pdata; + void __iomem *mem; + + mem = devm_platform_ioremap_resource(pdev, 0); + if (is_err(mem)) + return ptr_err(mem); + + pdata = devm_kzalloc(dev, sizeof(struct b53_platform_data), + gfp_kernel); + if (!pdata) + return -enomem; + + pdata->regs = mem; + pdata->chip_id = bcm63xx_device_id; + pdata->big_endian = of_property_read_bool(np, "big-endian"); + + of_ports = of_get_child_by_name(np, "ports"); + if (!of_ports) { + dev_err(dev, "no ports child node found "); + return -einval; + } + + for_each_available_child_of_node(of_ports, of_port) { + u32 reg; + + if (of_property_read_u32(of_port, "reg", &reg)) + continue; + + if (reg < b53_cpu_port) + pdata->enabled_ports |= bit(reg); + } + + of_node_put(of_ports); + *ppdata = pdata; + + return 0; +} + + struct device_node *np = pdev->dev.of_node; + int ret; + + if (!pdata && np) { + ret = b53_mmap_probe_of(pdev, &pdata); + if (ret) { + dev_err(&pdev->dev, "of probe error "); + return ret; + } + }
Networking
a5538a777b73b35750ed1ffff8c1ef539e861624
lvaro fern ndez rojas
drivers
net
b53, dsa
net: dsa: b53: support legacy tags
these tags are used on bcm5325, bcm5365 and bcm63xx switches.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support legacy tags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'b53']
['kconfig', 'c']
2
8
5
--- diff --git a/drivers/net/dsa/b53/kconfig b/drivers/net/dsa/b53/kconfig --- a/drivers/net/dsa/b53/kconfig +++ b/drivers/net/dsa/b53/kconfig + select net_dsa_tag_brcm_legacy diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c - /* older models (5325, 5365) support a different tag format that we do - * not support in net/dsa/tag_brcm.c yet. - */ - if (is5325(dev) || is5365(dev) || - !b53_can_enable_brcm_tags(ds, port, mprot)) { + if (!b53_can_enable_brcm_tags(ds, port, mprot)) { + /* older models require a different 6 byte tag */ + if (is5325(dev) || is5365(dev) || is63xx(dev)) { + dev->tag_protocol = dsa_tag_proto_brcm_legacy; + goto out; + } +
Networking
46c5176c586c81306bf9e7024c13b95da775490f
lvaro fern ndez rojas florian fainelli f fainelli gmail com
drivers
net
b53, dsa
net: dsa: hellcreek: add devlink vlan region
allow to dump the vlan table via devlink. this especially useful, because the driver internally leverages vlans for the port separation. these are not visible via the bridge utility.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for dumping tables
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'hellcreek']
['h', 'c']
2
80
0
--- diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c +static int hellcreek_devlink_region_vlan_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct hellcreek_devlink_vlan_entry *table, *entry; + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + struct hellcreek *hellcreek = ds->priv; + int i; + + table = kcalloc(vlan_n_vid, sizeof(*entry), gfp_kernel); + if (!table) + return -enomem; + + entry = table; + + mutex_lock(&hellcreek->reg_lock); + for (i = 0; i < vlan_n_vid; ++i, ++entry) { + entry->member = hellcreek->vidmbrcfg[i]; + entry->vid = i; + } + mutex_unlock(&hellcreek->reg_lock); + + *data = (u8 *)table; + + return 0; +} + +static struct devlink_region_ops hellcreek_region_vlan_ops = { + .name = "vlan", + .snapshot = hellcreek_devlink_region_vlan_snapshot, + .destructor = kfree, +}; + +static int hellcreek_setup_devlink_regions(struct dsa_switch *ds) +{ + struct hellcreek *hellcreek = ds->priv; + struct devlink_region_ops *ops; + struct devlink_region *region; + u64 size; + + /* vlan table */ + size = vlan_n_vid * sizeof(struct hellcreek_devlink_vlan_entry); + ops = &hellcreek_region_vlan_ops; + + region = dsa_devlink_region_create(ds, ops, 1, size); + if (is_err(region)) + return ptr_err(region); + + hellcreek->vlan_region = region; + + return 0; +} + +static void hellcreek_teardown_devlink_regions(struct dsa_switch *ds) +{ + struct hellcreek *hellcreek = ds->priv; + + dsa_devlink_region_destroy(hellcreek->vlan_region); +} + + ret = hellcreek_setup_devlink_regions(ds); + if (ret) { + dev_err(hellcreek->dev, + "failed to setup devlink regions! "); + goto err_regions; + } + + +err_regions: + dsa_devlink_resources_unregister(ds); + + return ret; + hellcreek_teardown_devlink_regions(ds); diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h --- a/drivers/net/dsa/hirschmann/hellcreek.h +++ b/drivers/net/dsa/hirschmann/hellcreek.h + struct devlink_region *vlan_region; +struct hellcreek_devlink_vlan_entry { + u16 vid; + u16 member; +}; +
Networking
ba2d1c28886ceacd7da96466529f7929eaf3a498
kurt kanzenbach vladimir oltean olteanv gmail com
drivers
net
dsa, hirschmann
net: dsa: hellcreek: use boolean value
hellcreek_select_vlan() takes a boolean instead of an integer. so, use false accordingly.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for dumping tables
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'hellcreek']
['c']
1
1
1
--- diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c - hellcreek_select_vlan(hellcreek, vid, 0); + hellcreek_select_vlan(hellcreek, vid, false);
Networking
e81813fb56350641d8d3ba6bb6811ecaab934f10
kurt kanzenbach andrew lunn andrew lunn ch vladimir oltean olteanv gmail com
drivers
net
dsa, hirschmann
net: dsa: hellcreek: move common code to helper
there are two functions which need to populate fdb entries. move that to a helper function.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for dumping tables
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'hellcreek']
['c']
1
43
42
--- diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c +static void hellcreek_populate_fdb_entry(struct hellcreek *hellcreek, + struct hellcreek_fdb_entry *entry, + size_t idx) +{ + unsigned char addr[eth_alen]; + u16 meta, mac; + + /* read values */ + meta = hellcreek_read(hellcreek, hr_fdbmdrd); + mac = hellcreek_read(hellcreek, hr_fdbrdl); + addr[5] = mac & 0xff; + addr[4] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, hr_fdbrdm); + addr[3] = mac & 0xff; + addr[2] = (mac & 0xff00) >> 8; + mac = hellcreek_read(hellcreek, hr_fdbrdh); + addr[1] = mac & 0xff; + addr[0] = (mac & 0xff00) >> 8; + + /* populate @entry */ + memcpy(entry->mac, addr, sizeof(addr)); + entry->idx = idx; + entry->portmask = (meta & hr_fdbmdrd_portmask_mask) >> + hr_fdbmdrd_portmask_shift; + entry->age = (meta & hr_fdbmdrd_age_mask) >> + hr_fdbmdrd_age_shift; + entry->is_obt = !!(meta & hr_fdbmdrd_obt); + entry->pass_blocked = !!(meta & hr_fdbmdrd_pass_blocked); + entry->is_static = !!(meta & hr_fdbmdrd_static); + entry->reprio_tc = (meta & hr_fdbmdrd_reprio_tc_mask) >> + hr_fdbmdrd_reprio_tc_shift; + entry->reprio_en = !!(meta & hr_fdbmdrd_reprio_en); +} + - unsigned char addr[eth_alen]; - u16 meta, mac; - - meta = hellcreek_read(hellcreek, hr_fdbmdrd); - mac = hellcreek_read(hellcreek, hr_fdbrdl); - addr[5] = mac & 0xff; - addr[4] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, hr_fdbrdm); - addr[3] = mac & 0xff; - addr[2] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, hr_fdbrdh); - addr[1] = mac & 0xff; - addr[0] = (mac & 0xff00) >> 8; + struct hellcreek_fdb_entry tmp = { 0 }; + + /* read entry */ + hellcreek_populate_fdb_entry(hellcreek, &tmp, i); - if (memcmp(addr, dest, eth_alen)) + if (memcmp(tmp.mac, dest, eth_alen)) - entry->idx = i; - entry->portmask = (meta & hr_fdbmdrd_portmask_mask) >> - hr_fdbmdrd_portmask_shift; - entry->age = (meta & hr_fdbmdrd_age_mask) >> - hr_fdbmdrd_age_shift; - entry->is_obt = !!(meta & hr_fdbmdrd_obt); - entry->pass_blocked = !!(meta & hr_fdbmdrd_pass_blocked); - entry->is_static = !!(meta & hr_fdbmdrd_static); - entry->reprio_tc = (meta & hr_fdbmdrd_reprio_tc_mask) >> - hr_fdbmdrd_reprio_tc_shift; - entry->reprio_en = !!(meta & hr_fdbmdrd_reprio_en); - memcpy(entry->mac, addr, sizeof(addr)); + memcpy(entry, &tmp, sizeof(*entry)); - u16 meta, mac; - - meta = hellcreek_read(hellcreek, hr_fdbmdrd); - mac = hellcreek_read(hellcreek, hr_fdbrdl); - entry.mac[5] = mac & 0xff; - entry.mac[4] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, hr_fdbrdm); - entry.mac[3] = mac & 0xff; - entry.mac[2] = (mac & 0xff00) >> 8; - mac = hellcreek_read(hellcreek, hr_fdbrdh); - entry.mac[1] = mac & 0xff; - entry.mac[0] = (mac & 0xff00) >> 8; + + /* read entry */ + hellcreek_populate_fdb_entry(hellcreek, &entry, i); - entry.portmask = (meta & hr_fdbmdrd_portmask_mask) >> - hr_fdbmdrd_portmask_shift; - entry.is_static = !!(meta & hr_fdbmdrd_static); -
Networking
eb5f3d3141805fd22b2fb49a23536cc3f30dd752
kurt kanzenbach andrew lunn andrew lunn ch vladimir oltean olteanv gmail com
drivers
net
dsa, hirschmann
net: dsa: hellcreek: add devlink fdb region
allow to dump the fdb table via devlink. this is a useful debugging feature.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for dumping tables
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'hellcreek']
['h', 'c']
2
63
0
--- diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c +static int hellcreek_devlink_region_fdb_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + struct hellcreek_fdb_entry *table, *entry; + struct hellcreek *hellcreek = ds->priv; + size_t i; + + table = kcalloc(hellcreek->fdb_entries, sizeof(*entry), gfp_kernel); + if (!table) + return -enomem; + + entry = table; + + mutex_lock(&hellcreek->reg_lock); + + /* start table read */ + hellcreek_read(hellcreek, hr_fdbmax); + hellcreek_write(hellcreek, 0x00, hr_fdbmax); + + for (i = 0; i < hellcreek->fdb_entries; ++i, ++entry) { + /* read current entry */ + hellcreek_populate_fdb_entry(hellcreek, entry, i); + + /* advance read pointer */ + hellcreek_write(hellcreek, 0x00, hr_fdbrdh); + } + + mutex_unlock(&hellcreek->reg_lock); + + *data = (u8 *)table; + + return 0; +} + +static struct devlink_region_ops hellcreek_region_fdb_ops = { + .name = "fdb", + .snapshot = hellcreek_devlink_region_fdb_snapshot, + .destructor = kfree, +}; + + int ret; + /* fdb table */ + size = hellcreek->fdb_entries * sizeof(struct hellcreek_fdb_entry); + ops = &hellcreek_region_fdb_ops; + + region = dsa_devlink_region_create(ds, ops, 1, size); + if (is_err(region)) { + ret = ptr_err(region); + goto err_fdb; + } + + hellcreek->fdb_region = region; + + +err_fdb: + dsa_devlink_region_destroy(hellcreek->vlan_region); + + return ret; + dsa_devlink_region_destroy(hellcreek->fdb_region); diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h --- a/drivers/net/dsa/hirschmann/hellcreek.h +++ b/drivers/net/dsa/hirschmann/hellcreek.h + struct devlink_region *fdb_region;
Networking
292cd449fee3a67541fab2626efb8af6a72b4c69
kurt kanzenbach andrew lunn andrew lunn ch vladimir oltean olteanv gmail com
drivers
net
dsa, hirschmann
net: dsa: hellcreek: offload bridge port flags
the switch implements unicast and multicast filtering per port. add support for it. by default filtering is disabled.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['c']
1
104
25
--- diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c +static void hellcreek_port_set_ucast_flood(struct hellcreek *hellcreek, + int port, bool enable) +{ + struct hellcreek_port *hellcreek_port; + u16 val; + + hellcreek_port = &hellcreek->ports[port]; + + dev_dbg(hellcreek->dev, "%s unicast flooding on port %d ", + enable ? "enable" : "disable", port); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + val = hellcreek_port->ptcfg; + if (enable) + val &= ~hr_ptcfg_uuc_flt; + else + val |= hr_ptcfg_uuc_flt; + hellcreek_write(hellcreek, val, hr_ptcfg); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static void hellcreek_port_set_mcast_flood(struct hellcreek *hellcreek, + int port, bool enable) +{ + struct hellcreek_port *hellcreek_port; + u16 val; + + hellcreek_port = &hellcreek->ports[port]; + + dev_dbg(hellcreek->dev, "%s multicast flooding on port %d ", + enable ? "enable" : "disable", port); + + mutex_lock(&hellcreek->reg_lock); + + hellcreek_select_port(hellcreek, port); + val = hellcreek_port->ptcfg; + if (enable) + val &= ~hr_ptcfg_umc_flt; + else + val |= hr_ptcfg_umc_flt; + hellcreek_write(hellcreek, val, hr_ptcfg); + hellcreek_port->ptcfg = val; + + mutex_unlock(&hellcreek->reg_lock); +} + +static int hellcreek_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(br_flood | br_mcast_flood)) + return -einval; + + return 0; +} + +static int hellcreek_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct hellcreek *hellcreek = ds->priv; + + if (flags.mask & br_flood) + hellcreek_port_set_ucast_flood(hellcreek, port, + !!(flags.val & br_flood)); + + if (flags.mask & br_mcast_flood) + hellcreek_port_set_mcast_flood(hellcreek, port, + !!(flags.val & br_mcast_flood)); + + return 0; +} + - .get_ethtool_stats = hellcreek_get_ethtool_stats, - .get_sset_count = hellcreek_get_sset_count, - .get_strings = hellcreek_get_strings, - .get_tag_protocol = hellcreek_get_tag_protocol, - .get_ts_info = hellcreek_get_ts_info, - .phylink_validate = hellcreek_phylink_validate, - .port_bridge_join = hellcreek_port_bridge_join, - .port_bridge_leave = hellcreek_port_bridge_leave, - .port_disable = hellcreek_port_disable, - .port_enable = hellcreek_port_enable, - .port_fdb_add = hellcreek_fdb_add, - .port_fdb_del = hellcreek_fdb_del, - .port_fdb_dump = hellcreek_fdb_dump, - .port_hwtstamp_set = hellcreek_port_hwtstamp_set, - .port_hwtstamp_get = hellcreek_port_hwtstamp_get, - .port_prechangeupper = hellcreek_port_prechangeupper, - .port_rxtstamp = hellcreek_port_rxtstamp, - .port_setup_tc = hellcreek_port_setup_tc, - .port_stp_state_set = hellcreek_port_stp_state_set, - .port_txtstamp = hellcreek_port_txtstamp, - .port_vlan_add = hellcreek_vlan_add, - .port_vlan_del = hellcreek_vlan_del, - .port_vlan_filtering = hellcreek_vlan_filtering, - .setup = hellcreek_setup, - .teardown = hellcreek_teardown, + .get_ethtool_stats = hellcreek_get_ethtool_stats, + .get_sset_count = hellcreek_get_sset_count, + .get_strings = hellcreek_get_strings, + .get_tag_protocol = hellcreek_get_tag_protocol, + .get_ts_info = hellcreek_get_ts_info, + .phylink_validate = hellcreek_phylink_validate, + .port_bridge_flags = hellcreek_bridge_flags, + .port_bridge_join = hellcreek_port_bridge_join, + .port_bridge_leave = hellcreek_port_bridge_leave, + .port_disable = hellcreek_port_disable, + .port_enable = hellcreek_port_enable, + .port_fdb_add = hellcreek_fdb_add, + .port_fdb_del = hellcreek_fdb_del, + .port_fdb_dump = hellcreek_fdb_dump, + .port_hwtstamp_set = hellcreek_port_hwtstamp_set, + .port_hwtstamp_get = hellcreek_port_hwtstamp_get, + .port_pre_bridge_flags = hellcreek_pre_bridge_flags, + .port_prechangeupper = hellcreek_port_prechangeupper, + .port_rxtstamp = hellcreek_port_rxtstamp, + .port_setup_tc = hellcreek_port_setup_tc, + .port_stp_state_set = hellcreek_port_stp_state_set, + .port_txtstamp = hellcreek_port_txtstamp, + .port_vlan_add = hellcreek_vlan_add, + .port_vlan_del = hellcreek_vlan_del, + .port_vlan_filtering = hellcreek_vlan_filtering, + .setup = hellcreek_setup, + .teardown = hellcreek_teardown,
Networking
db7284a6ccc4a6d7714645141f7dcee0fcb4e57d
kurt kanzenbach
drivers
net
dsa, hirschmann
net: dsa: hellcreek: report switch name and id
report the driver name, asic id and the switch name via devlink. this is a useful information for user space tooling.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
report switch name and id
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['h', 'c']
2
19
0
--- diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c +static int hellcreek_devlink_info_get(struct dsa_switch *ds, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct hellcreek *hellcreek = ds->priv; + int ret; + + ret = devlink_info_driver_name_put(req, "hellcreek"); + if (ret) + return ret; + + return devlink_info_version_fixed_put(req, + devlink_info_version_generic_asic_id, + hellcreek->pdata->name); +} + + .devlink_info_get = hellcreek_devlink_info_get, + .name = "r4c30", diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h --- a/include/linux/platform_data/hirschmann-hellcreek.h +++ b/include/linux/platform_data/hirschmann-hellcreek.h + const char *name; /* switch name */
Networking
1ab568e92bf8f6a359c977869dc546a23a6b5f13
kurt kanzenbach florian fainelli f fainelli gmail com andrew lunn andrew lunn ch vladimir oltean olteanv gmail com
drivers
net
dsa, hirschmann, platform_data
net: dsa: microchip: ksz8795: change drivers prefix to be generic
the driver can be used on other chips of this type. to reflect this we rename the drivers prefix from ksz8795 to ksz8.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for ksz88x3 driver family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['h', 'c']
3
111
117
--- diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c -static int ksz8795_reset_switch(struct ksz_device *dev) +static int ksz8_reset_switch(struct ksz_device *dev) -static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, - u64 *cnt) +static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt) -static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, - u64 *dropped, u64 *cnt) +static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) -static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze) +static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze) -static void ksz8795_port_init_cnt(struct ksz_device *dev, int port) +static void ksz8_port_init_cnt(struct ksz_device *dev, int port) -static void ksz8795_r_table(struct ksz_device *dev, int table, u16 addr, - u64 *data) +static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data) -static void ksz8795_w_table(struct ksz_device *dev, int table, u16 addr, - u64 data) +static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data) -static int ksz8795_valid_dyn_entry(struct ksz_device *dev, u8 *data) +static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data) -static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr, - u8 *mac_addr, u8 *fid, u8 *src_port, - u8 *timestamp, u16 *entries) +static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, + u8 *mac_addr, u8 *fid, u8 *src_port, + u8 *timestamp, u16 *entries) - rc = ksz8795_valid_dyn_entry(dev, &data); + rc = ksz8_valid_dyn_entry(dev, &data); -static int ksz8795_r_sta_mac_table(struct ksz_device *dev, u16 addr, - struct alu_struct *alu) +static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) - ksz8795_r_table(dev, table_static_mac, addr, &data); + ksz8_r_table(dev, table_static_mac, addr, &data); -static void ksz8795_w_sta_mac_table(struct ksz_device *dev, u16 addr, - struct alu_struct *alu) +static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr, + struct alu_struct *alu) - ksz8795_w_table(dev, table_static_mac, addr, data); + ksz8_w_table(dev, table_static_mac, addr, data); -static void ksz8795_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid) +static void ksz8_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid) -static void ksz8795_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan) +static void ksz8_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan) -static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr) +static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr) - ksz8795_r_table(dev, table_vlan, addr, &data); + ksz8_r_table(dev, table_vlan, addr, &data); -static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) +static void ksz8_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan) - ksz8795_r_table(dev, table_vlan, addr, &buf); + ksz8_r_table(dev, table_vlan, addr, &buf); -static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) +static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan) - ksz8795_r_table(dev, table_vlan, addr, &buf); + ksz8_r_table(dev, table_vlan, addr, &buf); - ksz8795_w_table(dev, table_vlan, addr, buf); + ksz8_w_table(dev, table_vlan, addr, buf); -static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) +static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val) -static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) +static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val) -static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds, - int port, - enum dsa_tag_protocol mp) +static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds, + int port, + enum dsa_tag_protocol mp) -static void ksz8795_get_strings(struct dsa_switch *ds, int port, - u32 stringset, uint8_t *buf) +static void ksz8_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf) -static void ksz8795_cfg_port_member(struct ksz_device *dev, int port, - u8 member) +static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member) -static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, - u8 state) +static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) - ksz8795_cfg_port_member(dev, port, (u8)member); + ksz8_cfg_port_member(dev, port, (u8)member); -static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port) +static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port) -static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port, - bool flag, - struct netlink_ext_ack *extack) +static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag, + struct netlink_ext_ack *extack) -static int ksz8795_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan, - struct netlink_ext_ack *extack) +static int ksz8_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) - ksz8795_r_vlan_table(dev, vlan->vid, &data); - ksz8795_from_vlan(data, &fid, &member, &valid); + ksz8_r_vlan_table(dev, vlan->vid, &data); + ksz8_from_vlan(data, &fid, &member, &valid); - ksz8795_to_vlan(fid, member, valid, &data); - ksz8795_w_vlan_table(dev, vlan->vid, data); + ksz8_to_vlan(fid, member, valid, &data); + ksz8_w_vlan_table(dev, vlan->vid, data); -static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +static int ksz8_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) - ksz8795_r_vlan_table(dev, vlan->vid, &data); - ksz8795_from_vlan(data, &fid, &member, &valid); + ksz8_r_vlan_table(dev, vlan->vid, &data); + ksz8_from_vlan(data, &fid, &member, &valid); - ksz8795_to_vlan(fid, member, valid, &data); - ksz8795_w_vlan_table(dev, vlan->vid, data); + ksz8_to_vlan(fid, member, valid, &data); + ksz8_w_vlan_table(dev, vlan->vid, data); -static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port, - struct dsa_mall_mirror_tc_entry *mirror, - bool ingress) +static int ksz8_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) -static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port, - struct dsa_mall_mirror_tc_entry *mirror) +static void ksz8_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) -static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port) +static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) - ksz8795_cfg_port_member(dev, port, member); + ksz8_cfg_port_member(dev, port, member); -static void ksz8795_config_cpu_port(struct dsa_switch *ds) +static void ksz8_config_cpu_port(struct dsa_switch *ds) - ksz8795_port_setup(dev, dev->cpu_port, true); + ksz8_port_setup(dev, dev->cpu_port, true); - ksz8795_port_stp_state_set(ds, i, br_state_disabled); + ksz8_port_stp_state_set(ds, i, br_state_disabled); -static int ksz8795_setup(struct dsa_switch *ds) +static int ksz8_setup(struct dsa_switch *ds) - ret = ksz8795_reset_switch(dev); + ret = ksz8_reset_switch(dev); - ksz8795_config_cpu_port(ds); + ksz8_config_cpu_port(ds); - ksz8795_r_vlan_entries(dev, i); + ksz8_r_vlan_entries(dev, i); - ksz8795_w_sta_mac_table(dev, 0, &alu); + ksz8_w_sta_mac_table(dev, 0, &alu); -static const struct dsa_switch_ops ksz8795_switch_ops = { - .get_tag_protocol = ksz8795_get_tag_protocol, - .setup = ksz8795_setup, +static const struct dsa_switch_ops ksz8_switch_ops = { + .get_tag_protocol = ksz8_get_tag_protocol, + .setup = ksz8_setup, - .get_strings = ksz8795_get_strings, + .get_strings = ksz8_get_strings, - .port_stp_state_set = ksz8795_port_stp_state_set, + .port_stp_state_set = ksz8_port_stp_state_set, - .port_vlan_filtering = ksz8795_port_vlan_filtering, - .port_vlan_add = ksz8795_port_vlan_add, - .port_vlan_del = ksz8795_port_vlan_del, + .port_vlan_filtering = ksz8_port_vlan_filtering, + .port_vlan_add = ksz8_port_vlan_add, + .port_vlan_del = ksz8_port_vlan_del, - .port_mirror_add = ksz8795_port_mirror_add, - .port_mirror_del = ksz8795_port_mirror_del, + .port_mirror_add = ksz8_port_mirror_add, + .port_mirror_del = ksz8_port_mirror_del, -static u32 ksz8795_get_port_addr(int port, int offset) +static u32 ksz8_get_port_addr(int port, int offset) -static int ksz8795_switch_detect(struct ksz_device *dev) +static int ksz8_switch_detect(struct ksz_device *dev) -static const struct ksz_chip_data ksz8795_switch_chips[] = { +static const struct ksz_chip_data ksz8_switch_chips[] = { -static int ksz8795_switch_init(struct ksz_device *dev) +static int ksz8_switch_init(struct ksz_device *dev) - dev->ds->ops = &ksz8795_switch_ops; + dev->ds->ops = &ksz8_switch_ops; - for (i = 0; i < array_size(ksz8795_switch_chips); i++) { - const struct ksz_chip_data *chip = &ksz8795_switch_chips[i]; + for (i = 0; i < array_size(ksz8_switch_chips); i++) { + const struct ksz_chip_data *chip = &ksz8_switch_chips[i]; -static void ksz8795_switch_exit(struct ksz_device *dev) +static void ksz8_switch_exit(struct ksz_device *dev) - ksz8795_reset_switch(dev); + ksz8_reset_switch(dev); -static const struct ksz_dev_ops ksz8795_dev_ops = { - .get_port_addr = ksz8795_get_port_addr, - .cfg_port_member = ksz8795_cfg_port_member, - .flush_dyn_mac_table = ksz8795_flush_dyn_mac_table, - .port_setup = ksz8795_port_setup, - .r_phy = ksz8795_r_phy, - .w_phy = ksz8795_w_phy, - .r_dyn_mac_table = ksz8795_r_dyn_mac_table, - .r_sta_mac_table = ksz8795_r_sta_mac_table, - .w_sta_mac_table = ksz8795_w_sta_mac_table, - .r_mib_cnt = ksz8795_r_mib_cnt, - .r_mib_pkt = ksz8795_r_mib_pkt, - .freeze_mib = ksz8795_freeze_mib, - .port_init_cnt = ksz8795_port_init_cnt, - .shutdown = ksz8795_reset_switch, - .detect = ksz8795_switch_detect, - .init = ksz8795_switch_init, - .exit = ksz8795_switch_exit, +static const struct ksz_dev_ops ksz8_dev_ops = { + .get_port_addr = ksz8_get_port_addr, + .cfg_port_member = ksz8_cfg_port_member, + .flush_dyn_mac_table = ksz8_flush_dyn_mac_table, + .port_setup = ksz8_port_setup, + .r_phy = ksz8_r_phy, + .w_phy = ksz8_w_phy, + .r_dyn_mac_table = ksz8_r_dyn_mac_table, + .r_sta_mac_table = ksz8_r_sta_mac_table, + .w_sta_mac_table = ksz8_w_sta_mac_table, + .r_mib_cnt = ksz8_r_mib_cnt, + .r_mib_pkt = ksz8_r_mib_pkt, + .freeze_mib = ksz8_freeze_mib, + .port_init_cnt = ksz8_port_init_cnt, + .shutdown = ksz8_reset_switch, + .detect = ksz8_switch_detect, + .init = ksz8_switch_init, + .exit = ksz8_switch_exit, -int ksz8795_switch_register(struct ksz_device *dev) +int ksz8_switch_register(struct ksz_device *dev) - return ksz_switch_register(dev, &ksz8795_dev_ops); + return ksz_switch_register(dev, &ksz8_dev_ops); -export_symbol(ksz8795_switch_register); +export_symbol(ksz8_switch_register); diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c - ret = ksz8795_switch_register(dev); + ret = ksz8_switch_register(dev); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h -int ksz8795_switch_register(struct ksz_device *dev); +int ksz8_switch_register(struct ksz_device *dev);
Networking
4b5baca0403e2b6308e68938dc4d94912f5b8e28
michael grzeschik andrew lunn andrew lunn ch florian fainelli f fainelli gmail com
drivers
net
dsa, microchip
net: dsa: microchip: ksz8795: move cpu_select_interface to extra function
this patch moves the cpu interface selection code to a individual function specific for ksz8795. it will make it simpler to customize the code path for different switches supported by this driver.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for ksz88x3 driver family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['c']
1
50
42
--- diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c +static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port) +{ + struct ksz_port *p = &dev->ports[port]; + u8 data8; + + if (!p->interface && dev->compat_interface) { + dev_warn(dev->dev, + "using legacy switch "phy-mode" property, because it is missing on port %d node. " + "please update your device tree. ", + port); + p->interface = dev->compat_interface; + } + + /* configure mii interface for proper network communication. */ + ksz_read8(dev, reg_port_5_ctrl_6, &data8); + data8 &= ~port_interface_type; + data8 &= ~port_gmii_1gps_mode; + switch (p->interface) { + case phy_interface_mode_mii: + p->phydev.speed = speed_100; + break; + case phy_interface_mode_rmii: + data8 |= port_interface_rmii; + p->phydev.speed = speed_100; + break; + case phy_interface_mode_gmii: + data8 |= port_gmii_1gps_mode; + data8 |= port_interface_gmii; + p->phydev.speed = speed_1000; + break; + default: + data8 &= ~port_rgmii_id_in_enable; + data8 &= ~port_rgmii_id_out_enable; + if (p->interface == phy_interface_mode_rgmii_id || + p->interface == phy_interface_mode_rgmii_rxid) + data8 |= port_rgmii_id_in_enable; + if (p->interface == phy_interface_mode_rgmii_id || + p->interface == phy_interface_mode_rgmii_txid) + data8 |= port_rgmii_id_out_enable; + data8 |= port_gmii_1gps_mode; + data8 |= port_interface_rgmii; + p->phydev.speed = speed_1000; + break; + } + ksz_write8(dev, reg_port_5_ctrl_6, data8); + p->phydev.duplex = 1; +} + - u8 data8, member; + u8 member; - if (!p->interface && dev->compat_interface) { - dev_warn(dev->dev, - "using legacy switch "phy-mode" property, because it is missing on port %d node. " - "please update your device tree. ", - port); - p->interface = dev->compat_interface; - } - - /* configure mii interface for proper network communication. */ - ksz_read8(dev, reg_port_5_ctrl_6, &data8); - data8 &= ~port_interface_type; - data8 &= ~port_gmii_1gps_mode; - switch (p->interface) { - case phy_interface_mode_mii: - p->phydev.speed = speed_100; - break; - case phy_interface_mode_rmii: - data8 |= port_interface_rmii; - p->phydev.speed = speed_100; - break; - case phy_interface_mode_gmii: - data8 |= port_gmii_1gps_mode; - data8 |= port_interface_gmii; - p->phydev.speed = speed_1000; - break; - default: - data8 &= ~port_rgmii_id_in_enable; - data8 &= ~port_rgmii_id_out_enable; - if (p->interface == phy_interface_mode_rgmii_id || - p->interface == phy_interface_mode_rgmii_rxid) - data8 |= port_rgmii_id_in_enable; - if (p->interface == phy_interface_mode_rgmii_id || - p->interface == phy_interface_mode_rgmii_txid) - data8 |= port_rgmii_id_out_enable; - data8 |= port_gmii_1gps_mode; - data8 |= port_interface_rgmii; - p->phydev.speed = speed_1000; - break; - } - ksz_write8(dev, reg_port_5_ctrl_6, data8); - p->phydev.duplex = 1; + ksz8795_cpu_interface_select(dev, port);
Networking
c2ac4d2ac5347a0d2aaabf3eca5ba2478d0617a9
michael grzeschik andrew lunn andrew lunn ch
drivers
net
dsa, microchip
net: dsa: microchip: ksz8795: move register offsets and shifts to separate struct
in order to get this driver used with other switches the functions need to use different offsets and register shifts. this patch changes the direct use of the register defines to register description structures, which can be set depending on the chips register layout.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for ksz88x3 driver family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['h', 'c']
3
281
160
--- diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8.h +/* spdx-license-identifier: gpl-2.0 */ +/* + * microchip ksz8xxx series register access + * + * copyright (c) 2020 pengutronix, michael grzeschik <kernel@pengutronix.de> + */ + +#ifndef __ksz8xxx_h +#define __ksz8xxx_h +#include <linux/kernel.h> + +enum ksz_regs { + reg_ind_ctrl_0, + reg_ind_data_8, + reg_ind_data_check, + reg_ind_data_hi, + reg_ind_data_lo, + reg_ind_mib_check, + p_force_ctrl, + p_link_status, + p_local_ctrl, + p_neg_restart_ctrl, + p_remote_status, + p_speed_status, + s_tail_tag_ctrl, +}; + +enum ksz_masks { + port_802_1p_remapping, + sw_tail_tag_enable, + mib_counter_overflow, + mib_counter_valid, + vlan_table_fid, + vlan_table_membership, + vlan_table_valid, + static_mac_table_valid, + static_mac_table_use_fid, + static_mac_table_fid, + static_mac_table_override, + static_mac_table_fwd_ports, + dynamic_mac_table_entries_h, + dynamic_mac_table_mac_empty, + dynamic_mac_table_not_ready, + dynamic_mac_table_entries, + dynamic_mac_table_fid, + dynamic_mac_table_src_port, + dynamic_mac_table_timestamp, +}; + +enum ksz_shifts { + vlan_table_membership_s, + vlan_table, + static_mac_fwd_ports, + static_mac_fid, + dynamic_mac_entries_h, + dynamic_mac_entries, + dynamic_mac_fid, + dynamic_mac_timestamp, + dynamic_mac_src_port, +}; + +struct ksz8 { + const u8 *regs; + const u32 *masks; + const u8 *shifts; + void *priv; +}; + +#endif diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c +#include "ksz8.h" + +static const u8 ksz8795_regs[] = { + [reg_ind_ctrl_0] = 0x6e, + [reg_ind_data_8] = 0x70, + [reg_ind_data_check] = 0x72, + [reg_ind_data_hi] = 0x71, + [reg_ind_data_lo] = 0x75, + [reg_ind_mib_check] = 0x74, + [p_force_ctrl] = 0x0c, + [p_link_status] = 0x0e, + [p_local_ctrl] = 0x07, + [p_neg_restart_ctrl] = 0x0d, + [p_remote_status] = 0x08, + [p_speed_status] = 0x09, + [s_tail_tag_ctrl] = 0x0c, +}; + +static const u32 ksz8795_masks[] = { + [port_802_1p_remapping] = bit(7), + [sw_tail_tag_enable] = bit(1), + [mib_counter_overflow] = bit(6), + [mib_counter_valid] = bit(5), + [vlan_table_fid] = genmask(6, 0), + [vlan_table_membership] = genmask(11, 7), + [vlan_table_valid] = bit(12), + [static_mac_table_valid] = bit(21), + [static_mac_table_use_fid] = bit(23), + [static_mac_table_fid] = genmask(30, 24), + [static_mac_table_override] = bit(26), + [static_mac_table_fwd_ports] = genmask(24, 20), + [dynamic_mac_table_entries_h] = genmask(6, 0), + [dynamic_mac_table_mac_empty] = bit(8), + [dynamic_mac_table_not_ready] = bit(7), + [dynamic_mac_table_entries] = genmask(31, 29), + [dynamic_mac_table_fid] = genmask(26, 20), + [dynamic_mac_table_src_port] = genmask(26, 24), + [dynamic_mac_table_timestamp] = genmask(28, 27), +}; + +static const u8 ksz8795_shifts[] = { + [vlan_table_membership_s] = 7, + [vlan_table] = 16, + [static_mac_fwd_ports] = 16, + [static_mac_fid] = 24, + [dynamic_mac_entries_h] = 3, + [dynamic_mac_entries] = 29, + [dynamic_mac_fid] = 16, + [dynamic_mac_timestamp] = 27, + [dynamic_mac_src_port] = 24, +}; + struct ksz8 *ksz8 = dev->priv; + const u32 *masks; + const u8 *regs; + masks = ksz8->masks; + regs = ksz8->regs; + - ksz_write16(dev, reg_ind_ctrl_0, ctrl_addr); + ksz_write16(dev, regs[reg_ind_ctrl_0], ctrl_addr); - ksz_read8(dev, reg_ind_mib_check, &check); + ksz_read8(dev, regs[reg_ind_mib_check], &check); - if (check & mib_counter_valid) { - ksz_read32(dev, reg_ind_data_lo, &data); - if (check & mib_counter_overflow) + if (check & masks[mib_counter_valid]) { + ksz_read32(dev, regs[reg_ind_data_lo], &data); + if (check & masks[mib_counter_overflow]) + struct ksz8 *ksz8 = dev->priv; + const u32 *masks; + const u8 *regs; + masks = ksz8->masks; + regs = ksz8->regs; + - ksz_write16(dev, reg_ind_ctrl_0, ctrl_addr); + ksz_write16(dev, regs[reg_ind_ctrl_0], ctrl_addr); - ksz_read8(dev, reg_ind_mib_check, &check); + ksz_read8(dev, regs[reg_ind_mib_check], &check); - if (check & mib_counter_valid) { - ksz_read32(dev, reg_ind_data_lo, &data); + if (check & masks[mib_counter_valid]) { + ksz_read32(dev, regs[reg_ind_data_lo], &data); - if (check & mib_counter_overflow) { + if (check & masks[mib_counter_overflow]) { - if (check & mib_counter_overflow) + if (check & masks[mib_counter_overflow]) + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; - ksz_write16(dev, reg_ind_ctrl_0, ctrl_addr); - ksz_read64(dev, reg_ind_data_hi, data); + ksz_write16(dev, regs[reg_ind_ctrl_0], ctrl_addr); + ksz_read64(dev, regs[reg_ind_data_hi], data); + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; - ksz_write64(dev, reg_ind_data_hi, data); - ksz_write16(dev, reg_ind_ctrl_0, ctrl_addr); + ksz_write64(dev, regs[reg_ind_data_hi], data); + ksz_write16(dev, regs[reg_ind_ctrl_0], ctrl_addr); + struct ksz8 *ksz8 = dev->priv; + const u32 *masks; + const u8 *regs; + + masks = ksz8->masks; + regs = ksz8->regs; - ksz_read8(dev, reg_ind_data_check, data); + ksz_read8(dev, regs[reg_ind_data_check], data); - } while ((*data & dynamic_mac_table_not_ready) && timeout); + } while ((*data & masks[dynamic_mac_table_not_ready]) && timeout); - if (*data & dynamic_mac_table_not_ready) { + if (*data & masks[dynamic_mac_table_not_ready]) { - ksz_read8(dev, reg_ind_data_8, data); + ksz_read8(dev, regs[reg_ind_data_8], data); - if (*data & dynamic_mac_table_mac_empty) + if (*data & masks[dynamic_mac_table_mac_empty]) + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; + const u32 *masks; + const u8 *regs; + shifts = ksz8->shifts; + masks = ksz8->masks; + regs = ksz8->regs; + - ksz_write16(dev, reg_ind_ctrl_0, ctrl_addr); + ksz_write16(dev, regs[reg_ind_ctrl_0], ctrl_addr); - ksz_read64(dev, reg_ind_data_hi, &buf); + ksz_read64(dev, regs[reg_ind_data_hi], &buf); - cnt = data & dynamic_mac_table_entries_h; - cnt <<= dynamic_mac_entries_h_s; - cnt |= (data_hi & dynamic_mac_table_entries) >> - dynamic_mac_entries_s; + cnt = data & masks[dynamic_mac_table_entries_h]; + cnt <<= shifts[dynamic_mac_entries_h]; + cnt |= (data_hi & masks[dynamic_mac_table_entries]) >> + shifts[dynamic_mac_entries]; - *fid = (data_hi & dynamic_mac_table_fid) >> - dynamic_mac_fid_s; - *src_port = (data_hi & dynamic_mac_table_src_port) >> - dynamic_mac_src_port_s; - *timestamp = (data_hi & dynamic_mac_table_timestamp) >> - dynamic_mac_timestamp_s; + *fid = (data_hi & masks[dynamic_mac_table_fid]) >> + shifts[dynamic_mac_fid]; + *src_port = (data_hi & masks[dynamic_mac_table_src_port]) >> + shifts[dynamic_mac_src_port]; + *timestamp = (data_hi & masks[dynamic_mac_table_timestamp]) >> + shifts[dynamic_mac_timestamp]; + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; + const u32 *masks; + shifts = ksz8->shifts; + masks = ksz8->masks; + - if (data_hi & (static_mac_table_valid | static_mac_table_override)) { + if (data_hi & (masks[static_mac_table_valid] | + masks[static_mac_table_override])) { - alu->port_forward = (data_hi & static_mac_table_fwd_ports) >> - static_mac_fwd_ports_s; + alu->port_forward = + (data_hi & masks[static_mac_table_fwd_ports]) >> + shifts[static_mac_fwd_ports]; - (data_hi & static_mac_table_override) ? 1 : 0; + (data_hi & masks[static_mac_table_override]) ? 1 : 0; - alu->is_use_fid = (data_hi & static_mac_table_use_fid) ? 1 : 0; - alu->fid = (data_hi & static_mac_table_fid) >> - static_mac_fid_s; + alu->is_static = true; + alu->is_use_fid = + (data_hi & masks[static_mac_table_use_fid]) ? 1 : 0; + alu->fid = (data_hi & masks[static_mac_table_fid]) >> + shifts[static_mac_fid]; + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; + const u32 *masks; + shifts = ksz8->shifts; + masks = ksz8->masks; + - data_hi |= (u32)alu->port_forward << static_mac_fwd_ports_s; + data_hi |= (u32)alu->port_forward << shifts[static_mac_fwd_ports]; - data_hi |= static_mac_table_override; + data_hi |= masks[static_mac_table_override]; - data_hi |= static_mac_table_use_fid; - data_hi |= (u32)alu->fid << static_mac_fid_s; + data_hi |= masks[static_mac_table_use_fid]; + data_hi |= (u32)alu->fid << shifts[static_mac_fid]; - data_hi |= static_mac_table_valid; + data_hi |= masks[static_mac_table_valid]; - data_hi &= ~static_mac_table_override; + data_hi &= ~masks[static_mac_table_override]; -static void ksz8_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid) +static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid, + u8 *member, u8 *valid) - *fid = vlan & vlan_table_fid; - *member = (vlan & vlan_table_membership) >> vlan_table_membership_s; - *valid = !!(vlan & vlan_table_valid); + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; + const u32 *masks; + + shifts = ksz8->shifts; + masks = ksz8->masks; + + *fid = vlan & masks[vlan_table_fid]; + *member = (vlan & masks[vlan_table_membership]) >> + shifts[vlan_table_membership_s]; + *valid = !!(vlan & masks[vlan_table_valid]); -static void ksz8_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan) +static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid, + u16 *vlan) + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; + const u32 *masks; + + shifts = ksz8->shifts; + masks = ksz8->masks; + - *vlan |= (u16)member << vlan_table_membership_s; + *vlan |= (u16)member << shifts[vlan_table_membership_s]; - *vlan |= vlan_table_valid; + *vlan |= masks[vlan_table_valid]; + struct ksz8 *ksz8 = dev->priv; + const u8 *shifts; + shifts = ksz8->shifts; + - data >>= vlan_table_s; + data >>= shifts[vlan_table]; + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; - ksz_pread8(dev, p, p_neg_restart_ctrl, &restart); - ksz_pread8(dev, p, p_speed_status, &speed); - ksz_pread8(dev, p, p_force_ctrl, &ctrl); + ksz_pread8(dev, p, regs[p_neg_restart_ctrl], &restart); + ksz_pread8(dev, p, regs[p_speed_status], &speed); + ksz_pread8(dev, p, regs[p_force_ctrl], &ctrl); - ksz_pread8(dev, p, p_link_status, &link); + ksz_pread8(dev, p, regs[p_link_status], &link); - ksz_pread8(dev, p, p_local_ctrl, &ctrl); + ksz_pread8(dev, p, regs[p_local_ctrl], &ctrl); - ksz_pread8(dev, p, p_remote_status, &link); + ksz_pread8(dev, p, regs[p_remote_status], &link); - u8 p = phy; + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; + u8 p = phy; - ksz_pread8(dev, p, p_speed_status, &speed); + ksz_pread8(dev, p, regs[p_speed_status], &speed); - ksz_pwrite8(dev, p, p_speed_status, data); - ksz_pread8(dev, p, p_force_ctrl, &ctrl); + ksz_pwrite8(dev, p, regs[p_speed_status], data); + ksz_pread8(dev, p, regs[p_force_ctrl], &ctrl); - ksz_pwrite8(dev, p, p_force_ctrl, data); - ksz_pread8(dev, p, p_neg_restart_ctrl, &restart); + ksz_pwrite8(dev, p, regs[p_force_ctrl], data); + ksz_pread8(dev, p, regs[p_neg_restart_ctrl], &restart); - ksz_pwrite8(dev, p, p_neg_restart_ctrl, data); + ksz_pwrite8(dev, p, regs[p_neg_restart_ctrl], data); - ksz_pread8(dev, p, p_local_ctrl, &ctrl); + ksz_pread8(dev, p, regs[p_local_ctrl], &ctrl); - ksz_pwrite8(dev, p, p_local_ctrl, data); + ksz_pwrite8(dev, p, regs[p_local_ctrl], data); - ksz8_from_vlan(data, &fid, &member, &valid); + ksz8_from_vlan(dev, data, &fid, &member, &valid); - ksz8_to_vlan(fid, member, valid, &data); + ksz8_to_vlan(dev, fid, member, valid, &data); - ksz8_from_vlan(data, &fid, &member, &valid); + ksz8_from_vlan(dev, data, &fid, &member, &valid); - ksz8_to_vlan(fid, member, valid, &data); + ksz8_to_vlan(dev, fid, member, valid, &data); + struct ksz8 *ksz8 = dev->priv; + const u32 *masks; + masks = ksz8->masks; + - ksz_port_cfg(dev, port, p_802_1p_ctrl, port_802_1p_remapping, false); + ksz_port_cfg(dev, port, p_802_1p_ctrl, + masks[port_802_1p_remapping], false); + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; + const u32 *masks; + masks = ksz8->masks; + - ksz_cfg(dev, s_tail_tag_ctrl, sw_tail_tag_enable, true); + ksz_cfg(dev, regs[s_tail_tag_ctrl], masks[sw_tail_tag_enable], true); - ksz_pread8(dev, i, p_remote_status, &remote); + ksz_pread8(dev, i, regs[p_remote_status], &remote); + struct ksz8 *ksz8 = dev->priv; + ksz8->regs = ksz8795_regs; + ksz8->masks = ksz8795_masks; + ksz8->shifts = ksz8795_shifts; + diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h -#define sw_tail_tag_enable bit(1) -#define port_802_1p_remapping bit(7) -#define reg_port_ctrl_7 0x07 -#define reg_port_status_2 0x0e -#define reg_ind_ctrl_0 0x6e - -#define reg_ind_data_8 0x70 -#define reg_ind_data_7 0x71 -#define reg_ind_data_6 0x72 -#define reg_ind_data_4 0x74 -#define reg_ind_data_3 0x75 -#define reg_ind_data_check reg_ind_data_6 -#define reg_ind_mib_check reg_ind_data_4 -#define reg_ind_data_hi reg_ind_data_7 -#define reg_ind_data_lo reg_ind_data_3 - -#define p_local_ctrl reg_port_ctrl_7 -#define p_remote_status reg_port_status_0 -#define p_force_ctrl reg_port_ctrl_9 -#define p_neg_restart_ctrl reg_port_ctrl_10 -#define p_speed_status reg_port_status_1 -#define p_link_status reg_port_status_2 -#define s_tail_tag_ctrl reg_sw_ctrl_10 -/** - * static_mac_table_addr 00-0000ffff-ffffffff - * static_mac_table_fwd_ports 00-001f0000-00000000 - * static_mac_table_valid 00-00200000-00000000 - * static_mac_table_override 00-00400000-00000000 - * static_mac_table_use_fid 00-00800000-00000000 - * static_mac_table_fid 00-7f000000-00000000 - */ - -#define static_mac_table_addr 0x0000ffff -#define static_mac_table_fwd_ports 0x001f0000 -#define static_mac_table_valid 0x00200000 -#define static_mac_table_override 0x00400000 -#define static_mac_table_use_fid 0x00800000 -#define static_mac_table_fid 0x7f000000 - -#define static_mac_fwd_ports_s 16 -#define static_mac_fid_s 24 - -/** - * vlan_table_fid 00-007f007f-007f007f - * vlan_table_membership 00-0f800f80-0f800f80 - * vlan_table_valid 00-10001000-10001000 - */ - -#define vlan_table_fid 0x007f -#define vlan_table_membership 0x0f80 -#define vlan_table_valid 0x1000 - -#define vlan_table_membership_s 7 -#define vlan_table_s 16 - -/** - * dynamic_mac_table_addr 00-0000ffff-ffffffff - * dynamic_mac_table_fid 00-007f0000-00000000 - * dynamic_mac_table_not_ready 00-00800000-00000000 - * dynamic_mac_table_src_port 00-07000000-00000000 - * dynamic_mac_table_timestamp 00-18000000-00000000 - * dynamic_mac_table_entries 7f-e0000000-00000000 - * dynamic_mac_table_mac_empty 80-00000000-00000000 - */ - -#define dynamic_mac_table_addr 0x0000ffff -#define dynamic_mac_table_fid 0x007f0000 -#define dynamic_mac_table_src_port 0x07000000 -#define dynamic_mac_table_timestamp 0x18000000 -#define dynamic_mac_table_entries 0xe0000000 - -#define dynamic_mac_table_not_ready 0x80 - -#define dynamic_mac_table_entries_h 0x7f -#define dynamic_mac_table_mac_empty 0x80 - -#define dynamic_mac_fid_s 16 -#define dynamic_mac_src_port_s 24 -#define dynamic_mac_timestamp_s 27 -#define dynamic_mac_entries_s 29 -#define dynamic_mac_entries_h_s 3 - -#define mib_counter_overflow bit(6) -#define mib_counter_valid bit(5) -
Networking
9f73e11250fb3948a8599d72318951d5e93b1eaf
michael grzeschik andrew lunn andrew lunn ch florian fainelli f fainelli gmail com
drivers
net
dsa, microchip
net: dsa: microchip: ksz8795: add support for ksz88xx chips
we add support for the ksz8863 and ksz8873 chips which are using the same register patterns but other offsets as the ksz8795.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for ksz88x3 driver family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['h', 'c']
3
281
71
--- diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c -static const struct { +static const u8 ksz8863_regs[] = { + [reg_ind_ctrl_0] = 0x79, + [reg_ind_data_8] = 0x7b, + [reg_ind_data_check] = 0x7b, + [reg_ind_data_hi] = 0x7c, + [reg_ind_data_lo] = 0x80, + [reg_ind_mib_check] = 0x80, + [p_force_ctrl] = 0x0c, + [p_link_status] = 0x0e, + [p_local_ctrl] = 0x0c, + [p_neg_restart_ctrl] = 0x0d, + [p_remote_status] = 0x0e, + [p_speed_status] = 0x0f, + [s_tail_tag_ctrl] = 0x03, +}; + +static const u32 ksz8863_masks[] = { + [port_802_1p_remapping] = bit(3), + [sw_tail_tag_enable] = bit(6), + [mib_counter_overflow] = bit(7), + [mib_counter_valid] = bit(6), + [vlan_table_fid] = genmask(15, 12), + [vlan_table_membership] = genmask(18, 16), + [vlan_table_valid] = bit(19), + [static_mac_table_valid] = bit(19), + [static_mac_table_use_fid] = bit(21), + [static_mac_table_fid] = genmask(29, 26), + [static_mac_table_override] = bit(20), + [static_mac_table_fwd_ports] = genmask(18, 16), + [dynamic_mac_table_entries_h] = genmask(5, 0), + [dynamic_mac_table_mac_empty] = bit(7), + [dynamic_mac_table_not_ready] = bit(7), + [dynamic_mac_table_entries] = genmask(31, 28), + [dynamic_mac_table_fid] = genmask(19, 16), + [dynamic_mac_table_src_port] = genmask(21, 20), + [dynamic_mac_table_timestamp] = genmask(23, 22), +}; + +static u8 ksz8863_shifts[] = { + [vlan_table_membership_s] = 16, + [static_mac_fwd_ports] = 16, + [static_mac_fid] = 22, + [dynamic_mac_entries_h] = 3, + [dynamic_mac_entries] = 24, + [dynamic_mac_fid] = 16, + [dynamic_mac_timestamp] = 24, + [dynamic_mac_src_port] = 20, +}; + +struct mib_names { -} mib_names[] = { +}; + +static const struct mib_names ksz87xx_mib_names[] = { +static const struct mib_names ksz88xx_mib_names[] = { + { "rx" }, + { "rx_hi" }, + { "rx_undersize" }, + { "rx_fragments" }, + { "rx_oversize" }, + { "rx_jabbers" }, + { "rx_symbol_err" }, + { "rx_crc_err" }, + { "rx_align_err" }, + { "rx_mac_ctrl" }, + { "rx_pause" }, + { "rx_bcast" }, + { "rx_mcast" }, + { "rx_ucast" }, + { "rx_64_or_less" }, + { "rx_65_127" }, + { "rx_128_255" }, + { "rx_256_511" }, + { "rx_512_1023" }, + { "rx_1024_1522" }, + { "tx" }, + { "tx_hi" }, + { "tx_late_col" }, + { "tx_pause" }, + { "tx_bcast" }, + { "tx_mcast" }, + { "tx_ucast" }, + { "tx_deferred" }, + { "tx_total_col" }, + { "tx_exc_col" }, + { "tx_single_col" }, + { "tx_mult_col" }, + { "rx_discards" }, + { "tx_discards" }, +}; + +static bool ksz_is_ksz88x3(struct ksz_device *dev) +{ + return dev->chip_id == 0x8830; +} + - /* reset switch */ - ksz_write8(dev, reg_power_management_1, - sw_software_power_down << sw_power_management_mode_s); - ksz_write8(dev, reg_power_management_1, 0); + if (ksz_is_ksz88x3(dev)) { + /* reset switch */ + ksz_cfg(dev, ksz8863_reg_sw_reset, + ksz8863_global_software_reset | ksz8863_pcs_reset, true); + ksz_cfg(dev, ksz8863_reg_sw_reset, + ksz8863_global_software_reset | ksz8863_pcs_reset, false); + } else { + /* reset switch */ + ksz_write8(dev, reg_power_management_1, + sw_software_power_down << sw_power_management_mode_s); + ksz_write8(dev, reg_power_management_1, 0); + } -static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, - u64 *dropped, u64 *cnt) +static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) - ctrl_addr = (ks_mib_total_rx_1 - ks_mib_total_rx_0) * port; - ctrl_addr += addr + ks_mib_total_rx_0; + ctrl_addr = (ksz8795_mib_total_rx_1 - ksz8795_mib_total_rx_0) * port; + ctrl_addr += addr + ksz8795_mib_total_rx_0; +static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + struct ksz8 *ksz8 = dev->priv; + const u8 *regs = ksz8->regs; + u32 *last = (u32 *)dropped; + u16 ctrl_addr; + u32 data; + u32 cur; + + addr -= dev->reg_mib_cnt; + ctrl_addr = addr ? ksz8863_mib_packet_dropped_tx_0 : + ksz8863_mib_packet_dropped_rx_0; + ctrl_addr += port; + ctrl_addr |= ind_acc_table(table_mib | table_read); + + mutex_lock(&dev->alu_mutex); + ksz_write16(dev, regs[reg_ind_ctrl_0], ctrl_addr); + ksz_read32(dev, regs[reg_ind_data_lo], &data); + mutex_unlock(&dev->alu_mutex); + + data &= mib_packet_dropped; + cur = last[addr]; + if (data != cur) { + last[addr] = data; + if (data < cur) + data += mib_packet_dropped + 1; + data -= cur; + *cnt += data; + } +} + +static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + if (ksz_is_ksz88x3(dev)) + ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt); + else + ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt); +} + + if (ksz_is_ksz88x3(dev)) + return; + + u64 *dropped; - /* flush all enabled port mib counters */ - ksz_cfg(dev, reg_sw_ctrl_6, bit(port), true); - ksz_cfg(dev, reg_sw_ctrl_6, sw_mib_counter_flush, true); - ksz_cfg(dev, reg_sw_ctrl_6, bit(port), false); + if (!ksz_is_ksz88x3(dev)) { + /* flush all enabled port mib counters */ + ksz_cfg(dev, reg_sw_ctrl_6, bit(port), true); + ksz_cfg(dev, reg_sw_ctrl_6, sw_mib_counter_flush, true); + ksz_cfg(dev, reg_sw_ctrl_6, bit(port), false); + } ++mib->cnt_ptr; + /* last one in storage */ + dropped = &mib->counters[dev->mib_cnt]; + - null, &mib->counters[mib->cnt_ptr]); + dropped, &mib->counters[mib->cnt_ptr]); ++mib->cnt_ptr; - if (!(ctrl & port_auto_neg_disable)) - data |= phy_auto_neg_enable; + if (ksz_is_ksz88x3(dev)) { + if ((ctrl & port_auto_neg_enable)) + data |= phy_auto_neg_enable; + } else { + if (!(ctrl & port_auto_neg_disable)) + data |= phy_auto_neg_enable; + } - data = ksz8795_id_lo; + if (ksz_is_ksz88x3(dev)) + data = ksz8863_id_lo; + else + data = ksz8795_id_lo; - if (!(val & phy_auto_neg_enable)) - data |= port_auto_neg_disable; - else - data &= ~port_auto_neg_disable; + if (ksz_is_ksz88x3(dev)) { + if ((val & phy_auto_neg_enable)) + data |= port_auto_neg_enable; + else + data &= ~port_auto_neg_enable; + } else { + if (!(val & phy_auto_neg_enable)) + data |= port_auto_neg_disable; + else + data &= ~port_auto_neg_disable; + + /* fiber port does not support auto-negotiation. */ + if (dev->ports[p].fiber) + data |= port_auto_neg_disable; + } - /* fiber port does not support auto-negotiation. */ - if (dev->ports[p].fiber) - data |= port_auto_neg_disable; - return dsa_tag_proto_ksz8795; + struct ksz_device *dev = ds->priv; + + /* ksz88x3 uses the same tag schema as ksz9893 */ + return ksz_is_ksz88x3(dev) ? + dsa_tag_proto_ksz9893 : dsa_tag_proto_ksz8795; - memcpy(buf + i * eth_gstring_len, mib_names[i].string, - eth_gstring_len); + memcpy(buf + i * eth_gstring_len, + dev->mib_names[i].string, eth_gstring_len); + if (ksz_is_ksz88x3(dev)) + return -enotsupp; + + if (ksz_is_ksz88x3(dev)) + return -enotsupp; + + if (ksz_is_ksz88x3(dev)) + return -enotsupp; + - ksz8795_set_prio_queue(dev, port, 4); + if (!ksz_is_ksz88x3(dev)) + ksz8795_set_prio_queue(dev, port, 4); - ksz8795_cpu_interface_select(dev, port); + if (!ksz_is_ksz88x3(dev)) + ksz8795_cpu_interface_select(dev, port); - ksz_pread8(dev, i, regs[p_remote_status], &remote); - if (remote & port_fiber_mode) - p->fiber = 1; + if (!ksz_is_ksz88x3(dev)) { + ksz_pread8(dev, i, regs[p_remote_status], &remote); + if (remote & port_fiber_mode) + p->fiber = 1; + } - if (id1 != family_id || - (id2 != chip_id_94 && id2 != chip_id_95)) - return -enodev; - if (id2 == chip_id_95) { - u8 val; + switch (id1) { + case ksz87_family_id: + if ((id2 != chip_id_94 && id2 != chip_id_95)) + return -enodev; + + if (id2 == chip_id_95) { + u8 val; - id2 = 0x95; - ksz_read8(dev, reg_port_1_status_0, &val); - if (val & port_fiber_mode) - id2 = 0x65; - } else if (id2 == chip_id_94) { - id2 = 0x94; + id2 = 0x95; + ksz_read8(dev, reg_port_status_0, &val); + if (val & port_fiber_mode) + id2 = 0x65; + } else if (id2 == chip_id_94) { + id2 = 0x94; + } + break; + case ksz88_family_id: + if (id2 != chip_id_63) + return -enodev; + break; + default: + dev_err(dev->dev, "invalid family id: %d ", id1); + return -enodev; + { + .chip_id = 0x8830, + .dev_name = "ksz8863/ksz8873", + .num_vlans = 16, + .num_alus = 0, + .num_statics = 8, + .cpu_ports = 0x4, /* can be configured as cpu port */ + .port_cnt = 3, + }, - ksz8->regs = ksz8795_regs; - ksz8->masks = ksz8795_masks; - ksz8->shifts = ksz8795_shifts; + if (ksz_is_ksz88x3(dev)) { + ksz8->regs = ksz8863_regs; + ksz8->masks = ksz8863_masks; + ksz8->shifts = ksz8863_shifts; + dev->mib_cnt = array_size(ksz88xx_mib_names); + dev->mib_names = ksz88xx_mib_names; + } else { + ksz8->regs = ksz8795_regs; + ksz8->masks = ksz8795_masks; + ksz8->shifts = ksz8795_shifts; + dev->mib_cnt = array_size(ksz87xx_mib_names); + dev->mib_names = ksz87xx_mib_names; + } - dev->reg_mib_cnt = ksz8795_counter_num; - dev->mib_cnt = array_size(mib_names); + dev->reg_mib_cnt = mib_counter_num; diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h --- a/drivers/net/dsa/microchip/ksz8795_reg.h +++ b/drivers/net/dsa/microchip/ksz8795_reg.h -#define family_id 0x87 +#define ksz87_family_id 0x87 +#define ksz88_family_id 0x88 +#define chip_id_63 0x30 + +#define ksz8863_reg_sw_reset 0x43 + +#define ksz8863_global_software_reset bit(4) +#define ksz8863_pcs_reset bit(0) +#define port_auto_neg_enable bit(7) +#define ksz8863_id_lo 0x1430 -#define ksz8795_counter_num 0x20 +#define mib_counter_num 0x20 -#define ks_mib_total_rx_0 0x100 -#define ks_mib_total_tx_0 0x101 -#define ks_mib_packet_dropped_rx_0 0x102 -#define ks_mib_packet_dropped_tx_0 0x103 -#define ks_mib_total_rx_1 0x104 -#define ks_mib_total_tx_1 0x105 -#define ks_mib_packet_dropped_tx_1 0x106 -#define ks_mib_packet_dropped_rx_1 0x107 -#define ks_mib_total_rx_2 0x108 -#define ks_mib_total_tx_2 0x109 -#define ks_mib_packet_dropped_tx_2 0x10a -#define ks_mib_packet_dropped_rx_2 0x10b -#define ks_mib_total_rx_3 0x10c -#define ks_mib_total_tx_3 0x10d -#define ks_mib_packet_dropped_tx_3 0x10e -#define ks_mib_packet_dropped_rx_3 0x10f -#define ks_mib_total_rx_4 0x110 -#define ks_mib_total_tx_4 0x111 -#define ks_mib_packet_dropped_tx_4 0x112 -#define ks_mib_packet_dropped_rx_4 0x113 +#define ksz8795_mib_total_rx_0 0x100 +#define ksz8795_mib_total_tx_0 0x101 +#define ksz8795_mib_total_rx_1 0x104 +#define ksz8795_mib_total_tx_1 0x105 + +#define ksz8863_mib_packet_dropped_tx_0 0x100 +#define ksz8863_mib_packet_dropped_rx_0 0x105 diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h + const struct mib_names *mib_names;
Networking
4b20a07e103f0b38b376b4b45c7c082202a876ff
oleksij rempel andrew lunn andrew lunn ch
drivers
net
dsa, microchip
net: dsa: microchip: add microchip ksz8863 spi based driver support
add ksz88x3 driver support. we add support for the kxz88x3 three port switches using the spi interface.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for ksz88x3 driver family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['c']
1
32
12
--- diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c --- a/drivers/net/dsa/microchip/ksz8795_spi.c +++ b/drivers/net/dsa/microchip/ksz8795_spi.c +#include "ksz8.h" -#define spi_addr_shift 12 -#define spi_addr_align 3 -#define spi_turnaround_shift 1 +#define ksz8795_spi_addr_shift 12 +#define ksz8795_spi_addr_align 3 +#define ksz8795_spi_turnaround_shift 1 -ksz_regmap_table(ksz8795, 16, spi_addr_shift, - spi_turnaround_shift, spi_addr_align); +#define ksz8863_spi_addr_shift 8 +#define ksz8863_spi_addr_align 8 +#define ksz8863_spi_turnaround_shift 0 + +ksz_regmap_table(ksz8795, 16, ksz8795_spi_addr_shift, + ksz8795_spi_turnaround_shift, ksz8795_spi_addr_align); + +ksz_regmap_table(ksz8863, 16, ksz8863_spi_addr_shift, + ksz8863_spi_turnaround_shift, ksz8863_spi_addr_align); + const struct regmap_config *regmap_config; + struct device *ddev = &spi->dev; - int i, ret; + struct ksz8 *ksz8; + int i, ret = 0; - dev = ksz_switch_alloc(&spi->dev, spi); + ksz8 = devm_kzalloc(&spi->dev, sizeof(struct ksz8), gfp_kernel); + ksz8->priv = spi; + + dev = ksz_switch_alloc(&spi->dev, ksz8); + regmap_config = device_get_match_data(ddev); + if (!regmap_config) + return -einval; + - rc = ksz8795_regmap_config[i]; + rc = regmap_config[i]; - ksz8795_regmap_config[i].val_bits, ret); + regmap_config[i].val_bits, ret); - { .compatible = "microchip,ksz8765" }, - { .compatible = "microchip,ksz8794" }, - { .compatible = "microchip,ksz8795" }, + { .compatible = "microchip,ksz8765", .data = &ksz8795_regmap_config }, + { .compatible = "microchip,ksz8794", .data = &ksz8795_regmap_config }, + { .compatible = "microchip,ksz8795", .data = &ksz8795_regmap_config }, + { .compatible = "microchip,ksz8863", .data = &ksz8863_regmap_config }, + { .compatible = "microchip,ksz8873", .data = &ksz8863_regmap_config },
Networking
cc13e52c3a894e407f5b95052b0012b07101ebec
michael grzeschik florian fainelli f fainelli gmail com andrew lunn andrew lunn ch
drivers
net
dsa, microchip
dt-bindings: net: dsa: document additional microchip ksz8863/8873 switch
it is a 3-port 10/100 ethernet switch. one cpu-port and two switch-ports.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for ksz88x3 driver family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['yaml']
1
2
0
--- diff --git a/documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml --- a/documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml +++ b/documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml - microchip,ksz8765 - microchip,ksz8794 - microchip,ksz8795 + - microchip,ksz8863 + - microchip,ksz8873 - microchip,ksz9477 - microchip,ksz9897 - microchip,ksz9896
Networking
61df0e7bbb90fac8c77203e0fa570804617f137d
michael grzeschik rob herring robh kernel org andrew lunn andrew lunn ch florian fainelli f fainelli gmail com
documentation
devicetree
bindings, dsa, net
net: phy: add support for microchip smi0 mdio bus
smi0 is a mangled version of mdio. the main low level difference is the mdio c22 op code is always 0, not 0x2 or 0x1 for read/write. the read/write information is instead encoded in the phy address.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for ksz88x3 driver family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['h', 'c']
3
17
2
--- diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c - mdiobb_cmd(ctrl, mdio_read, phy, reg); + mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg); - mdiobb_cmd(ctrl, mdio_write, phy, reg); + mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg); + if (!ctrl->override_op_c22) { + ctrl->op_c22_read = mdio_read; + ctrl->op_c22_write = mdio_write; + } diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c --- a/drivers/net/mdio/mdio-gpio.c +++ b/drivers/net/mdio/mdio-gpio.c + if (dev->of_node && + of_device_is_compatible(dev->of_node, "microchip,mdio-smi0")) { + bitbang->ctrl.op_c22_read = 0; + bitbang->ctrl.op_c22_write = 0; + bitbang->ctrl.override_op_c22 = 1; + } + + { .compatible = "microchip,mdio-smi0" }, diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h --- a/include/linux/mdio-bitbang.h +++ b/include/linux/mdio-bitbang.h + unsigned int override_op_c22; + u8 op_c22_read; + u8 op_c22_write;
Networking
800fcab8230f622544a12403977b5b7259a076f8
andrew lunn
include
linux
mdio
net: dsa: microchip: add microchip ksz8863 smi based driver support
add ksz88x3 driver support. we add support for the kxz88x3 three port switches using the microchip smi interface. they are supported using the mdio-bitbang interface.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for ksz88x3 driver family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ']
['kconfig', 'c', 'makefile']
3
223
1
--- diff --git a/drivers/net/dsa/microchip/kconfig b/drivers/net/dsa/microchip/kconfig --- a/drivers/net/dsa/microchip/kconfig +++ b/drivers/net/dsa/microchip/kconfig - this driver adds support for microchip ksz8795 switch chips. + this driver adds support for microchip ksz8795/ksz88x3 switch chips. + +config net_dsa_microchip_ksz8863_smi + tristate "ksz series smi connected switch driver" + depends on net_dsa_microchip_ksz8795 + select mdio_bitbang + help + select to enable support for registering switches configured through + microchip smi. it supports the ksz8863 and ksz8873 switch. diff --git a/drivers/net/dsa/microchip/makefile b/drivers/net/dsa/microchip/makefile --- a/drivers/net/dsa/microchip/makefile +++ b/drivers/net/dsa/microchip/makefile +obj-$(config_net_dsa_microchip_ksz8863_smi) += ksz8863_smi.o diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c --- /dev/null +++ b/drivers/net/dsa/microchip/ksz8863_smi.c +// spdx-license-identifier: gpl-2.0 +/* + * microchip ksz8863 series register access through smi + * + * copyright (c) 2019 pengutronix, michael grzeschik <kernel@pengutronix.de> + */ + +#include "ksz8.h" +#include "ksz_common.h" + +/* serial management interface (smi) uses the following frame format: + * + * preamble|start|read/write| phy | reg |ta| data bits | idle + * |frame| op code |address |address| | | + * read | 32x1's | 01 | 00 | 1xrrr | rrrrr |z0| 00000000dddddddd | z + * write| 32x1's | 01 | 00 | 0xrrr | rrrrr |10| xxxxxxxxdddddddd | z + * + */ + +#define smi_ksz88xx_read_phy bit(4) + +static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len, + void *val_buf, size_t val_len) +{ + struct ksz_device *dev = ctx; + struct mdio_device *mdev; + u8 reg = *(u8 *)reg_buf; + u8 *val = val_buf; + struct ksz8 *ksz8; + int i, ret = 0; + + ksz8 = dev->priv; + mdev = ksz8->priv; + + mutex_lock_nested(&mdev->bus->mdio_lock, mdio_mutex_nested); + for (i = 0; i < val_len; i++) { + int tmp = reg + i; + + ret = __mdiobus_read(mdev->bus, ((tmp & 0xe0) >> 5) | + smi_ksz88xx_read_phy, tmp); + if (ret < 0) + goto out; + + val[i] = ret; + } + ret = 0; + + out: + mutex_unlock(&mdev->bus->mdio_lock); + + return ret; +} + +static int ksz8863_mdio_write(void *ctx, const void *data, size_t count) +{ + struct ksz_device *dev = ctx; + struct mdio_device *mdev; + struct ksz8 *ksz8; + int i, ret = 0; + u32 reg; + u8 *val; + + ksz8 = dev->priv; + mdev = ksz8->priv; + + val = (u8 *)(data + 4); + reg = *(u32 *)data; + + mutex_lock_nested(&mdev->bus->mdio_lock, mdio_mutex_nested); + for (i = 0; i < (count - 4); i++) { + int tmp = reg + i; + + ret = __mdiobus_write(mdev->bus, ((tmp & 0xe0) >> 5), + tmp, val[i]); + if (ret < 0) + goto out; + } + + out: + mutex_unlock(&mdev->bus->mdio_lock); + + return ret; +} + +static const struct regmap_bus regmap_smi[] = { + { + .read = ksz8863_mdio_read, + .write = ksz8863_mdio_write, + .max_raw_read = 1, + .max_raw_write = 1, + }, + { + .read = ksz8863_mdio_read, + .write = ksz8863_mdio_write, + .val_format_endian_default = regmap_endian_big, + .max_raw_read = 2, + .max_raw_write = 2, + }, + { + .read = ksz8863_mdio_read, + .write = ksz8863_mdio_write, + .val_format_endian_default = regmap_endian_big, + .max_raw_read = 4, + .max_raw_write = 4, + } +}; + +static const struct regmap_config ksz8863_regmap_config[] = { + { + .name = "#8", + .reg_bits = 8, + .pad_bits = 24, + .val_bits = 8, + .cache_type = regcache_none, + .use_single_read = 1, + .lock = ksz_regmap_lock, + .unlock = ksz_regmap_unlock, + }, + { + .name = "#16", + .reg_bits = 8, + .pad_bits = 24, + .val_bits = 16, + .cache_type = regcache_none, + .use_single_read = 1, + .lock = ksz_regmap_lock, + .unlock = ksz_regmap_unlock, + }, + { + .name = "#32", + .reg_bits = 8, + .pad_bits = 24, + .val_bits = 32, + .cache_type = regcache_none, + .use_single_read = 1, + .lock = ksz_regmap_lock, + .unlock = ksz_regmap_unlock, + } +}; + +static int ksz8863_smi_probe(struct mdio_device *mdiodev) +{ + struct regmap_config rc; + struct ksz_device *dev; + struct ksz8 *ksz8; + int ret; + int i; + + ksz8 = devm_kzalloc(&mdiodev->dev, sizeof(struct ksz8), gfp_kernel); + ksz8->priv = mdiodev; + + dev = ksz_switch_alloc(&mdiodev->dev, ksz8); + if (!dev) + return -einval; + + for (i = 0; i < array_size(ksz8863_regmap_config); i++) { + rc = ksz8863_regmap_config[i]; + rc.lock_arg = &dev->regmap_mutex; + dev->regmap[i] = devm_regmap_init(&mdiodev->dev, + &regmap_smi[i], dev, + &rc); + if (is_err(dev->regmap[i])) { + ret = ptr_err(dev->regmap[i]); + dev_err(&mdiodev->dev, + "failed to initialize regmap%i: %d ", + ksz8863_regmap_config[i].val_bits, ret); + return ret; + } + } + + if (mdiodev->dev.platform_data) + dev->pdata = mdiodev->dev.platform_data; + + ret = ksz8_switch_register(dev); + + /* main dsa driver may not be started yet. */ + if (ret) + return ret; + + dev_set_drvdata(&mdiodev->dev, dev); + + return 0; +} + +static void ksz8863_smi_remove(struct mdio_device *mdiodev) +{ + struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev); + + if (dev) + ksz_switch_remove(dev); +} + +static const struct of_device_id ksz8863_dt_ids[] = { + { .compatible = "microchip,ksz8863" }, + { .compatible = "microchip,ksz8873" }, + { }, +}; +module_device_table(of, ksz8863_dt_ids); + +static struct mdio_driver ksz8863_driver = { + .probe = ksz8863_smi_probe, + .remove = ksz8863_smi_remove, + .mdiodrv.driver = { + .name = "ksz8863-switch", + .of_match_table = ksz8863_dt_ids, + }, +}; + +mdio_module_driver(ksz8863_driver); + +module_author("michael grzeschik <m.grzeschik@pengutronix.de>"); +module_description("microchip ksz8863 smi switch driver"); +module_license("gpl v2");
Networking
60a3647600027cbd54eb21997af3e175fbfa5592
michael grzeschik andrew lunn andrew lunn ch
drivers
net
dsa, microchip
net: dsa: mt7530: add support for eee features
this patch adds eee support.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for eee features
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mt7530']
['h', 'c']
2
56
1
--- diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c + if (mode == mlo_an_phy && phydev && phy_init_eee(phydev, 0) >= 0) { + switch (speed) { + case speed_1000: + mcr |= pmcr_force_eee1g; + break; + case speed_100: + mcr |= pmcr_force_eee100; + break; + } + } + +static int mt753x_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mt7530_priv *priv = ds->priv; + u32 eeecr = mt7530_read(priv, mt7530_pmeeecr_p(port)); + + e->tx_lpi_enabled = !(eeecr & lpi_mode_en); + e->tx_lpi_timer = get_lpi_thresh(eeecr); + + return 0; +} + +static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mt7530_priv *priv = ds->priv; + u32 set, mask = lpi_thresh_mask | lpi_mode_en; + + if (e->tx_lpi_timer > 0xfff) + return -einval; + + set = set_lpi_thresh(e->tx_lpi_timer); + if (!e->tx_lpi_enabled) + /* force lpi mode without a delay */ + set |= lpi_mode_en; + mt7530_rmw(priv, mt7530_pmeeecr_p(port), mask, set); + + return 0; +} + + .get_mac_eee = mt753x_get_mac_eee, + .set_mac_eee = mt753x_set_mac_eee, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h +#define pmcr_force_eee1g bit(7) +#define pmcr_force_eee100 bit(6) - pmcr_force_fdx | pmcr_force_lnk) + pmcr_force_fdx | pmcr_force_lnk | \ + pmcr_force_eee1g | pmcr_force_eee100) +#define mt7530_pmeeecr_p(x) (0x3004 + (x) * 0x100) +#define wakeup_time_1000(x) (((x) & 0xff) << 24) +#define wakeup_time_100(x) (((x) & 0xff) << 16) +#define lpi_thresh_mask genmask(15, 4) +#define lpi_thresh_sht 4 +#define set_lpi_thresh(x) (((x) << lpi_thresh_sht) & lpi_thresh_mask) +#define get_lpi_thresh(x) (((x) & lpi_thresh_mask) >> lpi_thresh_sht) +#define lpi_mode_en bit(0) +
Networking
40b5d2f15c091fa9c854acde91ad2acb504027d7
ren van dorst
drivers
net
dsa
net: dsa: mt7530: support mdb and bridge flag operations
support port mdb and bridge flag operations.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support mdb and bridge flag operations
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mt7530']
['h', 'c']
3
122
17
--- diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c - /* unknown multicast frame forwarding to the cpu port */ - mt7530_rmw(priv, mt7530_mfc, unm_ffp_mask, unm_ffp(bit(port))); + /* disable flooding by default */ + mt7530_rmw(priv, mt7530_mfc, bc_ffp_mask | unm_ffp_mask | unu_ffp_mask, + bc_ffp(bit(port)) | unm_ffp(bit(port)) | unu_ffp(bit(port))); +static int +mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~(br_learning | br_flood | br_mcast_flood | + br_bcast_flood)) + return -einval; + + return 0; +} + +static int +mt7530_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + struct mt7530_priv *priv = ds->priv; + + if (flags.mask & br_learning) + mt7530_rmw(priv, mt7530_psc_p(port), sa_dis, + flags.val & br_learning ? 0 : sa_dis); + + if (flags.mask & br_flood) + mt7530_rmw(priv, mt7530_mfc, unu_ffp(bit(port)), + flags.val & br_flood ? unu_ffp(bit(port)) : 0); + + if (flags.mask & br_mcast_flood) + mt7530_rmw(priv, mt7530_mfc, unm_ffp(bit(port)), + flags.val & br_mcast_flood ? unm_ffp(bit(port)) : 0); + + if (flags.mask & br_bcast_flood) + mt7530_rmw(priv, mt7530_mfc, bc_ffp(bit(port)), + flags.val & br_bcast_flood ? bc_ffp(bit(port)) : 0); + + return 0; +} + +static int +mt7530_port_set_mrouter(struct dsa_switch *ds, int port, bool mrouter, + struct netlink_ext_ack *extack) +{ + struct mt7530_priv *priv = ds->priv; + + mt7530_rmw(priv, mt7530_mfc, unm_ffp(bit(port)), + mrouter ? unm_ffp(bit(port)) : 0); + + return 0; +} + +static int +mt7530_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mt7530_priv *priv = ds->priv; + const u8 *addr = mdb->addr; + u16 vid = mdb->vid; + u8 port_mask = 0; + int ret; + + mutex_lock(&priv->reg_mutex); + + mt7530_fdb_write(priv, vid, 0, addr, 0, static_emp); + if (!mt7530_fdb_cmd(priv, mt7530_fdb_read, null)) + port_mask = (mt7530_read(priv, mt7530_atrd) >> port_map) + & port_map_mask; + + port_mask |= bit(port); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, static_ent); + ret = mt7530_fdb_cmd(priv, mt7530_fdb_write, null); + + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static int +mt7530_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct mt7530_priv *priv = ds->priv; + const u8 *addr = mdb->addr; + u16 vid = mdb->vid; + u8 port_mask = 0; + int ret; + + mutex_lock(&priv->reg_mutex); + + mt7530_fdb_write(priv, vid, 0, addr, 0, static_emp); + if (!mt7530_fdb_cmd(priv, mt7530_fdb_read, null)) + port_mask = (mt7530_read(priv, mt7530_atrd) >> port_map) + & port_map_mask; + + port_mask &= ~bit(port); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, + port_mask ? static_ent : static_emp); + ret = mt7530_fdb_cmd(priv, mt7530_fdb_write, null); + + mutex_unlock(&priv->reg_mutex); + + return ret; +} + - } else + } else { + /* disable learning by default on all user ports */ + mt7530_set(priv, mt7530_psc_p(i), sa_dis); + } - } else + } else { + /* disable learning by default on all user ports */ + mt7530_set(priv, mt7530_psc_p(i), sa_dis); + } + + .port_pre_bridge_flags = mt7530_port_pre_bridge_flags, + .port_bridge_flags = mt7530_port_bridge_flags, + .port_set_mrouter = mt7530_port_set_mrouter, + .port_mdb_add = mt7530_port_mdb_add, + .port_mdb_del = mt7530_port_mdb_del, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h +#define bc_ffp_mask bc_ffp(~0) diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c --- a/net/dsa/tag_mtk.c +++ b/net/dsa/tag_mtk.c - unsigned char *dest = eth_hdr(skb)->h_dest; - bool is_multicast_skb = is_multicast_ether_addr(dest) && - !is_broadcast_ether_addr(dest); - /* disable sa learning for multicast frames */ - if (unlikely(is_multicast_skb)) - mtk_tag[1] |= mtk_hdr_xmit_sa_dis; - - unsigned char *dest = eth_hdr(skb)->h_dest; - bool is_multicast_skb = is_multicast_ether_addr(dest) && - !is_broadcast_ether_addr(dest); - /* only unicast or broadcast frames are offloaded */ - if (likely(!is_multicast_skb)) - skb->offload_fwd_mark = 1; + skb->offload_fwd_mark = 1;
Networking
5a30833b9a16f8d1aa15de06636f9317ca51f9df
deng qingfang
net
dsa
dsa
net: dsa: add helper to resolve bridge port from dsa port
in order for a driver to be able to query a bridge for information about itself, e.g. reading out port flags, it has to use a netdev that is known to the bridge. in the simple case, that is just the netdev representing the port, e.g. swp0 or swp1 in this example:
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['h']
2
15
13
--- diff --git a/include/net/dsa.h b/include/net/dsa.h --- a/include/net/dsa.h +++ b/include/net/dsa.h +static inline +struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) +{ + if (!dp->bridge_dev) + return null; + + if (dp->lag_dev) + return dp->lag_dev; + else if (dp->hsr_dev) + return dp->hsr_dev; + + return dp->slave; +} + diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h - /* switchdev offloading can be configured on: */ - - if (dev == dp->slave) - /* dsa ports directly connected to a bridge, and event - * was emitted for the ports themselves. - */ - return true; - - if (dp->lag_dev == dev) - /* dsa ports connected to a bridge via a lag */ - return true; - - return false; + return dsa_port_to_bridge_port(dp) == dev;
Networking
cc76ce9e8dc659561ee62876da2cffc03fb58cc5
tobias waldekranz
include
net
net: dsa: mv88e6xxx: avoid useless attempts to fast-age lags
when a port is a part of a lag, the atu will create dynamic entries belonging to the lag id when learning is enabled. so trying to fast-age those out using the constituent port will have no effect. unfortunately the hardware does not support move operations on lags so there is no obvious way to transform the request to target the lag instead.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['c']
1
7
0
--- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c + if (dsa_to_port(ds, port)->lag_dev) + /* hardware is incapable of fast-aging a lag through a + * regular atu move operation. until we have something + * more fancy in place this is a no-op. + */ + return; +
Networking
ffcec3f257ccc2bf27642b9b1d97d2141f9cfcec
tobias waldekranz
drivers
net
dsa, mv88e6xxx
net: dsa: mv88e6xxx: provide generic vtu iterator
move the intricacies of correctly iterating over the vtu to a common implementation.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['c']
1
64
36
--- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c +static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip, + int (*cb)(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *priv), + void *priv) +{ + struct mv88e6xxx_vtu_entry entry = { + .vid = mv88e6xxx_max_vid(chip), + .valid = false, + }; + int err; + + if (!chip->info->ops->vtu_getnext) + return -eopnotsupp; + + do { + err = chip->info->ops->vtu_getnext(chip, &entry); + if (err) + return err; + + if (!entry.valid) + break; + + err = cb(chip, &entry, priv); + if (err) + return err; + } while (entry.vid < mv88e6xxx_max_vid(chip)); + + return 0; +} + +static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *_fid_bitmap) +{ + unsigned long *fid_bitmap = _fid_bitmap; + + set_bit(entry->fid, fid_bitmap); + return 0; +} + - struct mv88e6xxx_vtu_entry vlan; - vlan.vid = mv88e6xxx_max_vid(chip); - vlan.valid = false; - - do { - err = mv88e6xxx_vtu_getnext(chip, &vlan); - if (err) - return err; - - if (!vlan.valid) - break; - - set_bit(vlan.fid, fid_bitmap); - } while (vlan.vid < mv88e6xxx_max_vid(chip)); - - return 0; + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap); +struct mv88e6xxx_port_db_dump_vlan_ctx { + int port; + dsa_fdb_dump_cb_t *cb; + void *data; +}; + +static int mv88e6xxx_port_db_dump_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *entry, + void *_data) +{ + struct mv88e6xxx_port_db_dump_vlan_ctx *ctx = _data; + + return mv88e6xxx_port_db_dump_fid(chip, entry->fid, entry->vid, + ctx->port, ctx->cb, ctx->data); +} + - struct mv88e6xxx_vtu_entry vlan; + struct mv88e6xxx_port_db_dump_vlan_ctx ctx = { + .port = port, + .cb = cb, + .data = data, + }; - /* dump vlans' filtering information databases */ - vlan.vid = mv88e6xxx_max_vid(chip); - vlan.valid = false; - - do { - err = mv88e6xxx_vtu_getnext(chip, &vlan); - if (err) - return err; - - if (!vlan.valid) - break; - - err = mv88e6xxx_port_db_dump_fid(chip, vlan.fid, vlan.vid, port, - cb, data); - if (err) - return err; - } while (vlan.vid < mv88e6xxx_max_vid(chip)); - - return err; + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_db_dump_vlan, &ctx);
Networking
d89ef4b8b39cdb88675b2629b35dc9ffdf5ca347
tobias waldekranz andrew lunn andrew lunn ch vladimir oltean olteanv gmail com florian fainelli f fainelli gmail com
drivers
net
dsa, mv88e6xxx
net: dsa: mv88e6xxx: remove some bureaucracy around querying the vtu
the hardware has a somewhat quirky protocol for reading out the vtu entry for a particular vid. but there is no reason why we cannot create a better api for ourselves in the driver.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['c']
1
20
25
--- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c -static int mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip, - struct mv88e6xxx_vtu_entry *entry) +static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, + struct mv88e6xxx_vtu_entry *entry) + int err; + - return chip->info->ops->vtu_getnext(chip, entry); + entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip); + entry->valid = false; + + err = chip->info->ops->vtu_getnext(chip, entry); + + if (entry->vid != vid) + entry->valid = false; + + return err; - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); - if (vlan.vid != vid) - return 0; - - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); - if (vlan.vid != vid || !vlan.valid) + if (!vlan.valid) - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); - if (vlan.vid != vid || !vlan.valid) { + if (!vlan.valid) { - vlan.vid = vid - 1; - vlan.valid = false; - - err = mv88e6xxx_vtu_getnext(chip, &vlan); + err = mv88e6xxx_vtu_get(chip, vid, &vlan); - if (vlan.vid != vid || !vlan.valid || + if (!vlan.valid ||
Networking
34065c58306dab883deb323f2edf6074f2225c19
tobias waldekranz vladimir oltean olteanv gmail com florian fainelli f fainelli gmail com
drivers
net
dsa, mv88e6xxx
net: dsa: mv88e6xxx: use standard helper for broadcast address
use the conventional declaration style of a mac address in the kernel (u8 addr[eth_alen]) for the broadcast address, then set it using the existing helper.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['c']
1
3
1
--- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c - const char broadcast[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + u8 broadcast[eth_alen]; + + eth_broadcast_addr(broadcast);
Networking
0806dd4654145e70e4a4c5b06ddad4cd7a121fdf
tobias waldekranz vladimir oltean olteanv gmail com florian fainelli f fainelli gmail com
drivers
net
dsa, mv88e6xxx
net: dsa: mv88e6xxx: flood all traffic classes on standalone ports
in accordance with the comment in dsa_port_bridge_leave, standalone ports shall be configured to flood all types of traffic. this change aligns the mv88e6xxx driver with that policy.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['c']
1
2
6
--- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c - struct dsa_switch *ds = chip->ds; - bool flood; - /* upstream ports flood frames with unknown unicast or multicast da */ - flood = dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port); - err = chip->info->ops->port_set_ucast_flood(chip, port, flood); + err = chip->info->ops->port_set_ucast_flood(chip, port, true); - err = chip->info->ops->port_set_mcast_flood(chip, port, flood); + err = chip->info->ops->port_set_mcast_flood(chip, port, true);
Networking
7b9f16fe401c98cd3e1cb92d02bb7184a6d9e4c1
tobias waldekranz
drivers
net
dsa, mv88e6xxx
net: dsa: mv88e6xxx: offload bridge learning flag
allow a user to control automatic learning per port.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['h', 'c']
3
52
8
--- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c - /* port association vector: when learning source addresses - * of packets, add the address to the address database using - * a port bitmap that has only the bit for this port set and - * the other bits clear. + /* port association vector: disable automatic address learning + * on all user ports since they start out in standalone + * mode. when joining a bridge, learning will be configured to + * match the bridge port settings. enable learning on all + * dsa/cpu ports. note: from_cpu frames always bypass the + * learning process. + * + * disable holdat1, intonageout, lockedport, ignorewrongdata, + * and refreshlocked. i.e. setup standard automatic learning. - reg = 1 << port; - /* disable learning for cpu port */ - if (dsa_is_cpu_port(ds, port)) + if (dsa_is_user_port(ds, port)) + else + reg = 1 << port; - if (flags.mask & ~(br_flood | br_mcast_flood)) + if (flags.mask & ~(br_learning | br_flood | br_mcast_flood)) + bool do_fast_age = false; + if (flags.mask & br_learning) { + bool learning = !!(flags.val & br_learning); + u16 pav = learning ? (1 << port) : 0; + + err = mv88e6xxx_port_set_assoc_vector(chip, port, pav); + if (err) + goto out; + + if (!learning) + do_fast_age = true; + } + + if (do_fast_age) + mv88e6xxx_port_fast_age(ds, port); + diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c +/* offset 0x0b: port association vector */ + +int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port, + u16 pav) +{ + u16 reg, mask; + int err; + + err = mv88e6xxx_port_read(chip, port, mv88e6xxx_port_assoc_vector, + &reg); + if (err) + return err; + + mask = mv88e6xxx_port_mask(chip); + reg &= ~mask; + reg |= pav & mask; + + return mv88e6xxx_port_write(chip, port, mv88e6xxx_port_assoc_vector, + reg); +} + diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h +int mv88e6xxx_port_set_assoc_vector(struct mv88e6xxx_chip *chip, int port, + u16 pav);
Networking
041bd545e1249906b997645ee71f40df21417f17
tobias waldekranz
drivers
net
dsa, mv88e6xxx
net: dsa: mv88e6xxx: offload bridge broadcast flooding flag
these switches have two modes of classifying broadcast:
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['c']
1
70
1
--- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c + struct dsa_port *dp = dsa_to_port(chip->ds, port); + struct net_device *brport; + + if (dsa_is_unused_port(chip->ds, port)) + continue; + + brport = dsa_port_to_bridge_port(dp); + if (brport && !br_port_flag_is_set(brport, br_bcast_flood)) + /* skip bridged user ports where broadcast + * flooding is disabled. + */ + continue; + +struct mv88e6xxx_port_broadcast_sync_ctx { + int port; + bool flood; +}; + +static int +mv88e6xxx_port_broadcast_sync_vlan(struct mv88e6xxx_chip *chip, + const struct mv88e6xxx_vtu_entry *vlan, + void *_ctx) +{ + struct mv88e6xxx_port_broadcast_sync_ctx *ctx = _ctx; + u8 broadcast[eth_alen]; + u8 state; + + if (ctx->flood) + state = mv88e6xxx_g1_atu_data_state_mc_static; + else + state = mv88e6xxx_g1_atu_data_state_mc_unused; + + eth_broadcast_addr(broadcast); + + return mv88e6xxx_port_db_load_purge(chip, ctx->port, broadcast, + vlan->vid, state); +} + +static int mv88e6xxx_port_broadcast_sync(struct mv88e6xxx_chip *chip, int port, + bool flood) +{ + struct mv88e6xxx_port_broadcast_sync_ctx ctx = { + .port = port, + .flood = flood, + }; + struct mv88e6xxx_vtu_entry vid0 = { + .vid = 0, + }; + int err; + + /* update the port's private database... */ + err = mv88e6xxx_port_broadcast_sync_vlan(chip, &vid0, &ctx); + if (err) + return err; + + /* ...and the database for all vlans. */ + return mv88e6xxx_vtu_walk(chip, mv88e6xxx_port_broadcast_sync_vlan, + &ctx); +} + - if (flags.mask & ~(br_learning | br_flood | br_mcast_flood)) + if (flags.mask & ~(br_learning | br_flood | br_mcast_flood | + br_bcast_flood)) + if (flags.mask & br_bcast_flood) { + bool broadcast = !!(flags.val & br_bcast_flood); + + err = mv88e6xxx_port_broadcast_sync(chip, port, broadcast); + if (err) + goto out; + } +
Networking
8d1d8298eb00756cc525e12a133a5cc37cfdf992
tobias waldekranz
drivers
net
dsa, mv88e6xxx
net: dsa: mv88e6xxx: add support for mv88e6393x family
the marvell 88e6393x device is a single-chip integration of a 11-port ethernet switch with eight integrated gigabit ethernet (gbe) transceivers and three 10-gigabit interfaces.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for mv88e6393x family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['h', 'c']
8
771
0
- port 0 can be a serdes port - all serdeses are one-lane, eg. no xaui nor rxaui - on the other hand the serdeses can do usxgmii, 10gbaser and 5gbaser - port policy ctl register is changed to port policy mgmt ctl register, - egress monitor port is configured differently - ingress monitor/cpu/mirror ports are configured differently and can be - port speed altbit works differently than previously - phy registers can be also accessed via mdio address 0x18 and 0x19 --- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c +static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + if (port == 0 || port == 9 || port == 10) { + phylink_set(mask, 10000baset_full); + phylink_set(mask, 10000basekr_full); + phylink_set(mask, 10000basecr_full); + phylink_set(mask, 10000basesr_full); + phylink_set(mask, 10000baselr_full); + phylink_set(mask, 10000baselrm_full); + phylink_set(mask, 10000baseer_full); + phylink_set(mask, 5000baset_full); + phylink_set(mask, 2500basex_full); + phylink_set(mask, 2500baset_full); + } + + phylink_set(mask, 1000baset_full); + phylink_set(mask, 1000basex_full); + + mv88e6065_phylink_validate(chip, port, mask, state); +} + +static const struct mv88e6xxx_ops mv88e6393x_ops = { + /* mv88e6xxx_family_6393 */ + .setup_errata = mv88e6393x_serdes_setup_errata, + .irl_init_all = mv88e6390_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom8, + .set_eeprom = mv88e6xxx_g2_set_eeprom8, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_sync_link = mv88e6xxx_port_sync_link, + .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, + .port_set_speed_duplex = mv88e6393x_port_set_speed_duplex, + .port_max_speed_mode = mv88e6393x_port_max_speed_mode, + .port_tag_remap = mv88e6390_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, + .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, + .port_set_ether_type = mv88e6393x_port_set_ether_type, + .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_limit = mv88e6390_port_pause_limit, + .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6393x_port_set_cmode, + .port_setup_message_port = mv88e6xxx_setup_message_port, + .port_set_upstream_port = mv88e6393x_port_set_upstream_port, + .stats_snapshot = mv88e6390_g1_stats_snapshot, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, + .stats_get_sset_count = mv88e6320_stats_get_sset_count, + .stats_get_strings = mv88e6320_stats_get_strings, + .stats_get_stats = mv88e6390_stats_get_stats, + /* .set_cpu_port is missing because this family does not support a global + * cpu port, only per port cpu port which is set via + * .port_set_upstream_port method. + */ + .set_egress_port = mv88e6393x_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, + .mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, + .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, + .vtu_getnext = mv88e6390_g1_vtu_getnext, + .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, + .serdes_power = mv88e6393x_serdes_power, + .serdes_get_lane = mv88e6393x_serdes_get_lane, + .serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state, + .serdes_pcs_config = mv88e6390_serdes_pcs_config, + .serdes_pcs_an_restart = mv88e6390_serdes_pcs_an_restart, + .serdes_pcs_link_up = mv88e6390_serdes_pcs_link_up, + .serdes_irq_mapping = mv88e6390_serdes_irq_mapping, + .serdes_irq_enable = mv88e6393x_serdes_irq_enable, + .serdes_irq_status = mv88e6393x_serdes_irq_status, + /* todo: serdes stats */ + .gpio_ops = &mv88e6352_gpio_ops, + .avb_ops = &mv88e6390_avb_ops, + .ptp_ops = &mv88e6352_ptp_ops, + .phylink_validate = mv88e6393x_phylink_validate, +}; + + [mv88e6191x] = { + .prod_num = mv88e6xxx_port_switch_id_prod_6191x, + .family = mv88e6xxx_family_6393, + .name = "marvell 88e6191x", + .num_databases = 4096, + .num_ports = 11, /* 10 + z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .tag_protocol = dsa_tag_proto_dsa, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, + + [mv88e6193x] = { + .prod_num = mv88e6xxx_port_switch_id_prod_6193x, + .family = mv88e6xxx_family_6393, + .name = "marvell 88e6193x", + .num_databases = 4096, + .num_ports = 11, /* 10 + z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .tag_protocol = dsa_tag_proto_dsa, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, + + + [mv88e6393x] = { + .prod_num = mv88e6xxx_port_switch_id_prod_6393x, + .family = mv88e6xxx_family_6393, + .name = "marvell 88e6393x", + .num_databases = 4096, + .num_ports = 11, /* 10 + z80 */ + .num_internal_phys = 9, + .max_vid = 8191, + .port_base_addr = 0x0, + .phy_base_addr = 0x0, + .global1_addr = 0x1b, + .global2_addr = 0x1c, + .age_time_coeff = 3750, + .g1_irqs = 10, + .g2_irqs = 14, + .atu_move_port_mask = 0x1f, + .pvt = true, + .multi_chip = true, + .tag_protocol = dsa_tag_proto_dsa, + .ptp_support = true, + .ops = &mv88e6393x_ops, + }, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h + mv88e6191x, + mv88e6193x, + mv88e6393x, + mv88e6xxx_family_6393, /* 6191x 6193x 6393x */ diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h +#define mv88e6393x_g1_sts_irq_device_2 9 +#define mv88e6393x_g1_ctl1_device2_en 0x0200 diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h +/* offset 0x02: mac link change irq register for mv88e6393x */ +#define mv88e6393x_g2_maclink_int_src 0x02 + +/* offset 0x03: mac link change irq mask register for mv88e6393x */ +#define mv88e6393x_g2_maclink_int_mask 0x03 + +#define mv88e6393x_g2_egress_monitor_dest 0x05 + diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c +#include "global2.h" +int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg, + int bit, int val) +{ + int addr = chip->info->port_base_addr + port; + + return mv88e6xxx_wait_bit(chip, addr, reg, bit, val); +} + +/* support 10, 100, 200, 1000, 2500, 5000, 10000 mbps (e.g. 88e6393x) + * function mv88e6xxx_port_set_speed_duplex() can't be used as the register + * values for speeds 2500 & 5000 conflict. + */ +int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, + int speed, int duplex) +{ + u16 reg, ctrl; + int err; + + if (speed == speed_max) + speed = (port > 0 && port < 9) ? 1000 : 10000; + + if (speed == 200 && port != 0) + return -eopnotsupp; + + if (speed >= 2500 && port > 0 && port < 9) + return -eopnotsupp; + + switch (speed) { + case 10: + ctrl = mv88e6xxx_port_mac_ctl_speed_10; + break; + case 100: + ctrl = mv88e6xxx_port_mac_ctl_speed_100; + break; + case 200: + ctrl = mv88e6xxx_port_mac_ctl_speed_100 | + mv88e6390_port_mac_ctl_altspeed; + break; + case 1000: + ctrl = mv88e6xxx_port_mac_ctl_speed_1000; + break; + case 2500: + ctrl = mv88e6xxx_port_mac_ctl_speed_1000 | + mv88e6390_port_mac_ctl_altspeed; + break; + case 5000: + ctrl = mv88e6390_port_mac_ctl_speed_10000 | + mv88e6390_port_mac_ctl_altspeed; + break; + case 10000: + case speed_unforced: + ctrl = mv88e6xxx_port_mac_ctl_speed_unforced; + break; + default: + return -eopnotsupp; + } + + switch (duplex) { + case duplex_half: + ctrl |= mv88e6xxx_port_mac_ctl_force_duplex; + break; + case duplex_full: + ctrl |= mv88e6xxx_port_mac_ctl_force_duplex | + mv88e6xxx_port_mac_ctl_duplex_full; + break; + case duplex_unforced: + /* normal duplex detection */ + break; + default: + return -eopnotsupp; + } + + err = mv88e6xxx_port_read(chip, port, mv88e6xxx_port_mac_ctl, &reg); + if (err) + return err; + + reg &= ~(mv88e6xxx_port_mac_ctl_speed_mask | + mv88e6390_port_mac_ctl_altspeed | + mv88e6390_port_mac_ctl_force_speed); + + if (speed != speed_unforced) + reg |= mv88e6390_port_mac_ctl_force_speed; + + reg |= ctrl; + + err = mv88e6xxx_port_write(chip, port, mv88e6xxx_port_mac_ctl, reg); + if (err) + return err; + + if (speed) + dev_dbg(chip->dev, "p%d: speed set to %d mbps ", port, speed); + else + dev_dbg(chip->dev, "p%d: speed unforced ", port); + dev_dbg(chip->dev, "p%d: %s %s duplex ", port, + reg & mv88e6xxx_port_mac_ctl_force_duplex ? "force" : "unforce", + reg & mv88e6xxx_port_mac_ctl_duplex_full ? "full" : "half"); + + return 0; +} + +phy_interface_t mv88e6393x_port_max_speed_mode(int port) +{ + if (port == 0 || port == 9 || port == 10) + return phy_interface_mode_10gbaser; + + return phy_interface_mode_na; +} + + case phy_interface_mode_5gbaser: + cmode = mv88e6393x_port_sts_cmode_5gbaser; + break; + case phy_interface_mode_10gbaser: + cmode = mv88e6393x_port_sts_cmode_10gbaser; + break; +int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + int err; + u16 reg; + + if (port != 0 && port != 9 && port != 10) + return -eopnotsupp; + + /* mv88e6393x errata 4.5: eee should be disabled on serdes ports */ + err = mv88e6xxx_port_read(chip, port, mv88e6xxx_port_mac_ctl, &reg); + if (err) + return err; + + reg &= ~mv88e6xxx_port_mac_ctl_eee; + reg |= mv88e6xxx_port_mac_ctl_force_eee; + err = mv88e6xxx_port_write(chip, port, mv88e6xxx_port_mac_ctl, reg); + if (err) + return err; + + return mv88e6xxx_port_set_cmode(chip, port, mode, false); +} + +/* offset 0x0e: policy & mgmt control register for family 6191x 6193x 6393x */ + +static int mv88e6393x_port_policy_write(struct mv88e6xxx_chip *chip, int port, + u16 pointer, u8 data) +{ + u16 reg; + + reg = mv88e6393x_port_policy_mgmt_ctl_update | pointer | data; + + return mv88e6xxx_port_write(chip, port, mv88e6393x_port_policy_mgmt_ctl, + reg); +} + +static int mv88e6393x_port_policy_write_all(struct mv88e6xxx_chip *chip, + u16 pointer, u8 data) +{ + int err, port; + + for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { + if (dsa_is_unused_port(chip->ds, port)) + continue; + + err = mv88e6393x_port_policy_write(chip, port, pointer, data); + if (err) + return err; + } + + return 0; +} + +int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port) +{ + u16 ptr; + int err; + + switch (direction) { + case mv88e6xxx_egress_dir_ingress: + ptr = mv88e6393x_port_policy_mgmt_ctl_ptr_ingress_dest; + err = mv88e6393x_port_policy_write_all(chip, ptr, port); + if (err) + return err; + break; + case mv88e6xxx_egress_dir_egress: + ptr = mv88e6393x_g2_egress_monitor_dest; + err = mv88e6xxx_g2_write(chip, ptr, port); + if (err) + return err; + break; + } + + return 0; +} + +int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, + int upstream_port) +{ + u16 ptr = mv88e6393x_port_policy_mgmt_ctl_ptr_cpu_dest; + u8 data = mv88e6393x_port_policy_mgmt_ctl_cpu_dest_mgmtpri | + upstream_port; + + return mv88e6393x_port_policy_write(chip, port, ptr, data); +} + +int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + u16 ptr; + int err; + + /* consider the frames with reserved multicast destination + * addresses matching 01:80:c2:00:00:00 and + * 01:80:c2:00:00:02 as mgmt. + */ + ptr = mv88e6393x_port_policy_mgmt_ctl_ptr_01c280000000xlo; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = mv88e6393x_port_policy_mgmt_ctl_ptr_01c280000000xhi; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = mv88e6393x_port_policy_mgmt_ctl_ptr_01c280000002xlo; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + ptr = mv88e6393x_port_policy_mgmt_ctl_ptr_01c280000002xhi; + err = mv88e6393x_port_policy_write_all(chip, ptr, 0xff); + if (err) + return err; + + return 0; +} + +/* offset 0x10 & 0x11: epc */ + +static int mv88e6393x_port_epc_wait_ready(struct mv88e6xxx_chip *chip, int port) +{ + int bit = __bf_shf(mv88e6393x_port_epc_cmd_busy); + + return mv88e6xxx_port_wait_bit(chip, port, mv88e6393x_port_epc_cmd, bit, 0); +} + +/* port ether type for 6393x family */ + +int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, + u16 etype) +{ + u16 val; + int err; + + err = mv88e6393x_port_epc_wait_ready(chip, port); + if (err) + return err; + + err = mv88e6xxx_port_write(chip, port, mv88e6393x_port_epc_data, etype); + if (err) + return err; + + val = mv88e6393x_port_epc_cmd_busy | + mv88e6393x_port_epc_cmd_write | + mv88e6393x_port_epc_index_port_etype; + + return mv88e6xxx_port_write(chip, port, mv88e6393x_port_epc_cmd, val); +} + diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h +#define mv88e6393x_port_sts_cmode_5gbaser 0x000c +#define mv88e6393x_port_sts_cmode_10gbaser 0x000d +#define mv88e6393x_port_sts_cmode_usxgmii 0x000e +#define mv88e6xxx_port_mac_ctl_eee 0x0200 +#define mv88e6xxx_port_mac_ctl_force_eee 0x0100 +#define mv88e6xxx_port_switch_id_prod_6191x 0x1920 +#define mv88e6xxx_port_switch_id_prod_6193x 0x1930 +#define mv88e6xxx_port_switch_id_prod_6393x 0x3930 +/* offset 0x0e: policy & mgmt control register (family_6393x) */ +#define mv88e6393x_port_policy_mgmt_ctl 0x0e +#define mv88e6393x_port_policy_mgmt_ctl_update 0x8000 +#define mv88e6393x_port_policy_mgmt_ctl_ptr_mask 0x3f00 +#define mv88e6393x_port_policy_mgmt_ctl_data_mask 0x00ff +#define mv88e6393x_port_policy_mgmt_ctl_ptr_01c280000000xlo 0x2000 +#define mv88e6393x_port_policy_mgmt_ctl_ptr_01c280000000xhi 0x2100 +#define mv88e6393x_port_policy_mgmt_ctl_ptr_01c280000002xlo 0x2400 +#define mv88e6393x_port_policy_mgmt_ctl_ptr_01c280000002xhi 0x2500 +#define mv88e6393x_port_policy_mgmt_ctl_ptr_ingress_dest 0x3000 +#define mv88e6393x_port_policy_mgmt_ctl_ptr_cpu_dest 0x3800 +#define mv88e6393x_port_policy_mgmt_ctl_cpu_dest_mgmtpri 0x00e0 + +/* offset 0x10: extended port control command */ +#define mv88e6393x_port_epc_cmd 0x10 +#define mv88e6393x_port_epc_cmd_busy 0x8000 +#define mv88e6393x_port_epc_cmd_write 0x0300 +#define mv88e6393x_port_epc_index_port_etype 0x02 + +/* offset 0x11: extended port control data */ +#define mv88e6393x_port_epc_data 0x11 + +int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg, + int bit, int val); +int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port, + int speed, int duplex); +phy_interface_t mv88e6393x_port_max_speed_mode(int port); +int mv88e6393x_set_egress_port(struct mv88e6xxx_chip *chip, + enum mv88e6xxx_egress_direction direction, + int port); +int mv88e6393x_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, + int upstream_port); +int mv88e6393x_port_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6393x_port_set_ether_type(struct mv88e6xxx_chip *chip, int port, + u16 etype); +int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c +/* only ports 0, 9 and 10 have serdes lanes. return the serdes lane address + * a port is using else returns -enodev. + */ +int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port) +{ + u8 cmode = chip->ports[port].cmode; + int lane = -enodev; + + if (port != 0 && port != 9 && port != 10) + return -eopnotsupp; + + if (cmode == mv88e6xxx_port_sts_cmode_1000basex || + cmode == mv88e6xxx_port_sts_cmode_sgmii || + cmode == mv88e6xxx_port_sts_cmode_2500basex || + cmode == mv88e6393x_port_sts_cmode_5gbaser || + cmode == mv88e6393x_port_sts_cmode_10gbaser) + lane = port; + + return lane; +} + +static int mv88e6393x_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, + int port, int lane, + struct phylink_link_state *state) +{ + u16 status; + int err; + + err = mv88e6390_serdes_read(chip, lane, mdio_mmd_phyxs, + mv88e6390_10g_stat1, &status); + if (err) + return err; + + state->link = !!(status & mdio_stat1_lstatus); + if (state->link) { + if (state->interface == phy_interface_mode_5gbaser) + state->speed = speed_5000; + else + state->speed = speed_10000; + state->duplex = duplex_full; + } + + return 0; +} + +int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, + int lane, struct phylink_link_state *state) +{ + switch (state->interface) { + case phy_interface_mode_sgmii: + case phy_interface_mode_1000basex: + case phy_interface_mode_2500basex: + return mv88e6390_serdes_pcs_get_state_sgmii(chip, port, lane, + state); + case phy_interface_mode_5gbaser: + case phy_interface_mode_10gbaser: + return mv88e6393x_serdes_pcs_get_state_10g(chip, port, lane, + state); + + default: + return -eopnotsupp; + } +} + +static void mv88e6393x_serdes_irq_link_10g(struct mv88e6xxx_chip *chip, + int port, u8 lane) +{ + u16 status; + int err; + + /* if the link has dropped, we want to know about it. */ + err = mv88e6390_serdes_read(chip, lane, mdio_mmd_phyxs, + mv88e6390_10g_stat1, &status); + if (err) { + dev_err(chip->dev, "can't read serdes stat1: %d ", err); + return; + } + + dsa_port_phylink_mac_change(chip->ds, port, !!(status & mdio_stat1_lstatus)); +} + +static int mv88e6393x_serdes_irq_enable_10g(struct mv88e6xxx_chip *chip, + u8 lane, bool enable) +{ + u16 val = 0; + + if (enable) + val |= mv88e6393x_10g_int_link_change; + + return mv88e6390_serdes_write(chip, lane, mdio_mmd_phyxs, + mv88e6393x_10g_int_enable, val); +} + +int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, + int lane, bool enable) +{ + u8 cmode = chip->ports[port].cmode; + + switch (cmode) { + case mv88e6xxx_port_sts_cmode_sgmii: + case mv88e6xxx_port_sts_cmode_1000basex: + case mv88e6xxx_port_sts_cmode_2500basex: + return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable); + case mv88e6393x_port_sts_cmode_5gbaser: + case mv88e6393x_port_sts_cmode_10gbaser: + return mv88e6393x_serdes_irq_enable_10g(chip, lane, enable); + } + + return 0; +} + +static int mv88e6393x_serdes_irq_status_10g(struct mv88e6xxx_chip *chip, + u8 lane, u16 *status) +{ + int err; + + err = mv88e6390_serdes_read(chip, lane, mdio_mmd_phyxs, + mv88e6393x_10g_int_status, status); + + return err; +} + +irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + int lane) +{ + u8 cmode = chip->ports[port].cmode; + irqreturn_t ret = irq_none; + u16 status; + int err; + + switch (cmode) { + case mv88e6xxx_port_sts_cmode_sgmii: + case mv88e6xxx_port_sts_cmode_1000basex: + case mv88e6xxx_port_sts_cmode_2500basex: + err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status); + if (err) + return ret; + if (status & (mv88e6390_sgmii_int_link_down | + mv88e6390_sgmii_int_link_up)) { + ret = irq_handled; + mv88e6390_serdes_irq_link_sgmii(chip, port, lane); + } + break; + case mv88e6393x_port_sts_cmode_5gbaser: + case mv88e6393x_port_sts_cmode_10gbaser: + err = mv88e6393x_serdes_irq_status_10g(chip, lane, &status); + if (err) + return err; + if (status & mv88e6393x_10g_int_link_change) { + ret = irq_handled; + mv88e6393x_serdes_irq_link_10g(chip, port, lane); + } + break; + } + + return ret; +} + + +static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane) +{ + u16 reg, pcs; + int err; + + /* mv88e6393x family errata 4.6: + * cannot clear pwrdn bit on serdes on port 0 if device is configured + * cpu_mgd mode or p0_mode is configured for [x]mii. + * workaround: set port0 serdes register 4.f002 bit 5=0 and bit 15=1. + * + * it seems that after this workaround the serdes is automatically + * powered up (the bit is cleared), so power it down. + */ + if (lane == mv88e6393x_port0_lane) { + err = mv88e6390_serdes_read(chip, mv88e6393x_port0_lane, + mdio_mmd_phyxs, + mv88e6393x_serdes_poc, &reg); + if (err) + return err; + + reg &= ~mv88e6393x_serdes_poc_pdown; + reg |= mv88e6393x_serdes_poc_reset; + + err = mv88e6390_serdes_write(chip, lane, mdio_mmd_phyxs, + mv88e6393x_serdes_poc, reg); + if (err) + return err; + + err = mv88e6390_serdes_power_sgmii(chip, lane, false); + if (err) + return err; + } + + /* mv88e6393x family errata 4.8: + * when a serdes port is operating in 1000base-x or sgmii mode link may + * not come up after hardware reset or software reset of serdes core. + * workaround is to write serdes register 4.f074.14=1 for only those + * modes and 0 in all other modes. + */ + err = mv88e6390_serdes_read(chip, lane, mdio_mmd_phyxs, + mv88e6393x_serdes_poc, &pcs); + if (err) + return err; + + pcs &= mv88e6393x_serdes_poc_pcs_mask; + + err = mv88e6390_serdes_read(chip, lane, mdio_mmd_phyxs, + mv88e6393x_errata_4_8_reg, &reg); + if (err) + return err; + + if (pcs == mv88e6393x_serdes_poc_pcs_1000basex || + pcs == mv88e6393x_serdes_poc_pcs_sgmii_phy || + pcs == mv88e6393x_serdes_poc_pcs_sgmii_mac) + reg |= mv88e6393x_errata_4_8_bit; + else + reg &= ~mv88e6393x_errata_4_8_bit; + + return mv88e6390_serdes_write(chip, lane, mdio_mmd_phyxs, + mv88e6393x_errata_4_8_reg, reg); +} + +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip) +{ + int err; + + err = mv88e6393x_serdes_port_errata(chip, mv88e6393x_port0_lane); + if (err) + return err; + + err = mv88e6393x_serdes_port_errata(chip, mv88e6393x_port9_lane); + if (err) + return err; + + return mv88e6393x_serdes_port_errata(chip, mv88e6393x_port10_lane); +} + +int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, + bool on) +{ + u8 cmode = chip->ports[port].cmode; + + if (port != 0 && port != 9 && port != 10) + return -eopnotsupp; + + switch (cmode) { + case mv88e6xxx_port_sts_cmode_sgmii: + case mv88e6xxx_port_sts_cmode_1000basex: + case mv88e6xxx_port_sts_cmode_2500basex: + return mv88e6390_serdes_power_sgmii(chip, lane, on); + case mv88e6393x_port_sts_cmode_5gbaser: + case mv88e6393x_port_sts_cmode_10gbaser: + return mv88e6390_serdes_power_10g(chip, lane, on); + } + + return 0; +} diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h +#define mv88e6393x_10g_int_enable 0x9000 +#define mv88e6393x_10g_int_link_change bit(2) +#define mv88e6393x_10g_int_status 0x9001 +#define mv88e6393x_port0_lane 0x00 +#define mv88e6393x_port9_lane 0x09 +#define mv88e6393x_port10_lane 0x0a + +/* port operational configuration */ +#define mv88e6393x_serdes_poc 0xf002 +#define mv88e6393x_serdes_poc_pcs_1000basex 0x0000 +#define mv88e6393x_serdes_poc_pcs_2500basex 0x0001 +#define mv88e6393x_serdes_poc_pcs_sgmii_phy 0x0002 +#define mv88e6393x_serdes_poc_pcs_sgmii_mac 0x0003 +#define mv88e6393x_serdes_poc_pcs_5gbaser 0x0004 +#define mv88e6393x_serdes_poc_pcs_10gbaser 0x0005 +#define mv88e6393x_serdes_poc_pcs_usxgmii_phy 0x0006 +#define mv88e6393x_serdes_poc_pcs_usxgmii_mac 0x0007 +#define mv88e6393x_serdes_poc_pcs_mask 0x0007 +#define mv88e6393x_serdes_poc_reset bit(15) +#define mv88e6393x_serdes_poc_pdown bit(5) + +#define mv88e6393x_errata_4_8_reg 0xf074 +#define mv88e6393x_errata_4_8_bit bit(14) + +int mv88e6393x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); +int mv88e6393x_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, + int lane, struct phylink_link_state *state); +int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane, + bool on); +int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip); +int mv88e6393x_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, + int lane, bool enable); +irqreturn_t mv88e6393x_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, + int lane);
Networking
de776d0d316f7230d96ac1aa1df354d880476c1f
pavana sharma
drivers
net
dsa, mv88e6xxx
net: dsa: mv88e6xxx: implement .port_set_policy for amethyst
the 16-bit port policy ctl register from older chips is on 6393x changed to port policy mgmt ctl, which can access more data, but indirectly and via 8-bit registers.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for mv88e6393x family
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['dsa ', 'mv88e6xxx']
['h', 'c']
3
99
27
- if 0 <= shift < 8, we access register 0 in port policy mgmt ctl - if 8 <= shift < 16, we access register 1 in port policy mgmt ctl --- diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c + .port_set_policy = mv88e6393x_port_set_policy, diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c +static int mv88e6393x_port_policy_read(struct mv88e6xxx_chip *chip, int port, + u16 pointer, u8 *data) +{ + u16 reg; + int err; + + err = mv88e6xxx_port_write(chip, port, mv88e6393x_port_policy_mgmt_ctl, + pointer); + if (err) + return err; + + err = mv88e6xxx_port_read(chip, port, mv88e6393x_port_policy_mgmt_ctl, + &reg); + if (err) + return err; + + *data = reg; + + return 0; +} + -int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, - enum mv88e6xxx_policy_mapping mapping, - enum mv88e6xxx_policy_action action) +static int +mv88e6xxx_port_policy_mapping_get_pos(enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action, + u16 *mask, u16 *val, int *shift) - u16 reg, mask, val; - int shift; - int err; - - shift = __bf_shf(mv88e6xxx_port_policy_ctl_da_mask); - mask = mv88e6xxx_port_policy_ctl_da_mask; + *shift = __bf_shf(mv88e6xxx_port_policy_ctl_da_mask); + *mask = mv88e6xxx_port_policy_ctl_da_mask; - shift = __bf_shf(mv88e6xxx_port_policy_ctl_sa_mask); - mask = mv88e6xxx_port_policy_ctl_sa_mask; + *shift = __bf_shf(mv88e6xxx_port_policy_ctl_sa_mask); + *mask = mv88e6xxx_port_policy_ctl_sa_mask; - shift = __bf_shf(mv88e6xxx_port_policy_ctl_vtu_mask); - mask = mv88e6xxx_port_policy_ctl_vtu_mask; + *shift = __bf_shf(mv88e6xxx_port_policy_ctl_vtu_mask); + *mask = mv88e6xxx_port_policy_ctl_vtu_mask; - shift = __bf_shf(mv88e6xxx_port_policy_ctl_etype_mask); - mask = mv88e6xxx_port_policy_ctl_etype_mask; + *shift = __bf_shf(mv88e6xxx_port_policy_ctl_etype_mask); + *mask = mv88e6xxx_port_policy_ctl_etype_mask; - shift = __bf_shf(mv88e6xxx_port_policy_ctl_pppoe_mask); - mask = mv88e6xxx_port_policy_ctl_pppoe_mask; + *shift = __bf_shf(mv88e6xxx_port_policy_ctl_pppoe_mask); + *mask = mv88e6xxx_port_policy_ctl_pppoe_mask; - shift = __bf_shf(mv88e6xxx_port_policy_ctl_vbas_mask); - mask = mv88e6xxx_port_policy_ctl_vbas_mask; + *shift = __bf_shf(mv88e6xxx_port_policy_ctl_vbas_mask); + *mask = mv88e6xxx_port_policy_ctl_vbas_mask; - shift = __bf_shf(mv88e6xxx_port_policy_ctl_opt82_mask); - mask = mv88e6xxx_port_policy_ctl_opt82_mask; + *shift = __bf_shf(mv88e6xxx_port_policy_ctl_opt82_mask); + *mask = mv88e6xxx_port_policy_ctl_opt82_mask; - shift = __bf_shf(mv88e6xxx_port_policy_ctl_udp_mask); - mask = mv88e6xxx_port_policy_ctl_udp_mask; + *shift = __bf_shf(mv88e6xxx_port_policy_ctl_udp_mask); + *mask = mv88e6xxx_port_policy_ctl_udp_mask; - val = mv88e6xxx_port_policy_ctl_normal; + *val = mv88e6xxx_port_policy_ctl_normal; - val = mv88e6xxx_port_policy_ctl_mirror; + *val = mv88e6xxx_port_policy_ctl_mirror; - val = mv88e6xxx_port_policy_ctl_trap; + *val = mv88e6xxx_port_policy_ctl_trap; - val = mv88e6xxx_port_policy_ctl_discard; + *val = mv88e6xxx_port_policy_ctl_discard; + return 0; +} + +int mv88e6352_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action) +{ + u16 reg, mask, val; + int shift; + int err; + + err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask, + &val, &shift); + if (err) + return err; + + +int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action) +{ + u16 mask, val; + int shift; + int err; + u16 ptr; + u8 reg; + + err = mv88e6xxx_port_policy_mapping_get_pos(mapping, action, &mask, + &val, &shift); + if (err) + return err; + + /* the 16-bit port policy ctl register from older chips is on 6393x + * changed to port policy mgmt ctl, which can access more data, but + * indirectly. the original 16-bit value is divided into two 8-bit + * registers. + */ + ptr = shift / 8; + shift %= 8; + mask >>= ptr * 8; + + err = mv88e6393x_port_policy_read(chip, port, ptr, &reg); + if (err) + return err; + + reg &= ~mask; + reg |= (val << shift) & mask; + + return mv88e6393x_port_policy_write(chip, port, ptr, reg); +} diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h +int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port, + enum mv88e6xxx_policy_mapping mapping, + enum mv88e6xxx_policy_action action);
Networking
6584b26020fc5bb586d6e9f621eb8a7343a6ed33
marek beh n
drivers
net
dsa, mv88e6xxx
net: enetc: consume the error rx buffer descriptors in a dedicated function
we can and should check the rx bd errors before starting to build the skb. the only apparent reason why things are done in this backwards order is to spare one call to enetc_rxbd_next.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['c']
1
27
16
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c +static bool enetc_check_bd_errors_and_consume(struct enetc_bdr *rx_ring, + u32 bd_status, + union enetc_rx_bd **rxbd, int *i) +{ + if (likely(!(bd_status & enetc_rxbd_lstatus(enetc_rxbd_err_mask)))) + return false; + + enetc_rxbd_next(rx_ring, rxbd, i); + + while (!(bd_status & enetc_rxbd_lstatus_f)) { + dma_rmb(); + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + + enetc_rxbd_next(rx_ring, rxbd, i); + } + + rx_ring->ndev->stats.rx_dropped++; + rx_ring->ndev->stats.rx_errors++; + + return true; +} + + + if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, + &rxbd, &i)) + break; + - if (unlikely(bd_status & - enetc_rxbd_lstatus(enetc_rxbd_err_mask))) { - dev_kfree_skb(skb); - while (!(bd_status & enetc_rxbd_lstatus_f)) { - dma_rmb(); - bd_status = le32_to_cpu(rxbd->r.lstatus); - - enetc_rxbd_next(rx_ring, &rxbd, &i); - } - - rx_ring->ndev->stats.rx_dropped++; - rx_ring->ndev->stats.rx_errors++; - - break; - } -
Networking
2fa423f5f0c6891effd4d5c8bdb91d418001da11
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: move skb creation into enetc_build_skb
we need to build an skb from two code paths now: from the plain rx data path and from the xdp data path when the verdict is xdp_pass.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['c']
1
44
37
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c -static void enetc_process_skb(struct enetc_bdr *rx_ring, - struct sk_buff *skb) -{ - skb_record_rx_queue(skb, rx_ring->index); - skb->protocol = eth_type_trans(skb, rx_ring->ndev); -} - +static struct sk_buff *enetc_build_skb(struct enetc_bdr *rx_ring, + u32 bd_status, union enetc_rx_bd **rxbd, + int *i, int *cleaned_cnt) +{ + struct sk_buff *skb; + u16 size; + + size = le16_to_cpu((*rxbd)->r.buf_len); + skb = enetc_map_rx_buff_to_skb(rx_ring, *i, size); + if (!skb) + return null; + + enetc_get_offloads(rx_ring, *rxbd, skb); + + (*cleaned_cnt)++; + + enetc_rxbd_next(rx_ring, rxbd, i); + + /* not last bd in frame? */ + while (!(bd_status & enetc_rxbd_lstatus_f)) { + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + size = enetc_rxb_dma_size; + + if (bd_status & enetc_rxbd_lstatus_f) { + dma_rmb(); + size = le16_to_cpu((*rxbd)->r.buf_len); + } + + enetc_add_rx_buff_to_skb(rx_ring, *i, size, skb); + + (*cleaned_cnt)++; + + enetc_rxbd_next(rx_ring, rxbd, i); + } + + skb_record_rx_queue(skb, rx_ring->index); + skb->protocol = eth_type_trans(skb, rx_ring->ndev); + + return skb; +} + - u16 size; - size = le16_to_cpu(rxbd->r.buf_len); - skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); + skb = enetc_build_skb(rx_ring, bd_status, &rxbd, &i, + &cleaned_cnt); - enetc_get_offloads(rx_ring, rxbd, skb); - - cleaned_cnt++; - - enetc_rxbd_next(rx_ring, &rxbd, &i); - - /* not last bd in frame? */ - while (!(bd_status & enetc_rxbd_lstatus_f)) { - bd_status = le32_to_cpu(rxbd->r.lstatus); - size = enetc_rxb_dma_size; - - if (bd_status & enetc_rxbd_lstatus_f) { - dma_rmb(); - size = le16_to_cpu(rxbd->r.buf_len); - } - - enetc_add_rx_buff_to_skb(rx_ring, i, size, skb); - - cleaned_cnt++; - - enetc_rxbd_next(rx_ring, &rxbd, &i); - } - - - enetc_process_skb(rx_ring, skb); + rx_frm_cnt++; - - rx_frm_cnt++;
Networking
a800abd3ecb9acc55821f7ac9bba6c956b36a595
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: add a dedicated is_eof bit in the tx software bd
in the transmit path, if we have a scatter/gather frame, it is put into multiple software buffer descriptors, the last of which has the skb pointer populated (which is necessary for rearming the tx msi vector and for collecting the two-step tx timestamp from the tx confirmation path).
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'c']
2
4
4
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c + tx_ring->tx_swbd[i].is_eof = true; - bool is_eof = !!tx_swbd->skb; - - if (is_eof) { + if (tx_swbd->skb) { - if (is_eof) { + if (tx_swbd->is_eof) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h + u8 is_eof:1;
Networking
d504498d2eb3bfcbef4ddf3f51eb9f1391c8149f
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: clean the tx software bd on the tx confirmation path
with the future introduction of some new fields into enetc_tx_swbd such as is_xdp_tx, is_xdp_redirect etc, we need not only to set these bits to true from the xdp_tx/xdp_redirect code path, but also to false from the old code paths.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['c']
1
4
0
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c + /* scrub the swbd here so we don't have to do that + * when we reuse it during xmit + */ + memset(tx_swbd, 0, sizeof(*tx_swbd));
Networking
1ee8d6f3bebbdaa7692732c91685b27ae4c612be
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: move up enetc_reuse_page and enetc_page_reusable
for xdp_tx, we need to call enetc_reuse_page from enetc_clean_tx_ring, so we need to avoid a forward declaration.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['c']
1
19
19
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c +static bool enetc_page_reusable(struct page *page) +{ + return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); +} + +static void enetc_reuse_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *old) +{ + struct enetc_rx_swbd *new; + + new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; + + /* next buf that may reuse a page */ + enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); + + /* copy page reference */ + *new = *old; +} + -static bool enetc_page_reusable(struct page *page) -{ - return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); -} - -static void enetc_reuse_page(struct enetc_bdr *rx_ring, - struct enetc_rx_swbd *old) -{ - struct enetc_rx_swbd *new; - - new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; - - /* next buf that may reuse a page */ - enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); - - /* copy page reference */ - *new = *old; -} -
Networking
65d0cbb414cee012ceee9991d09f5e7c30b49fcc
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: add support for xdp_drop and xdp_pass
for the rx ring, enetc uses an allocation scheme based on pages split into two buffers, which is already very efficient in terms of preventing reallocations / maximizing reuse, so i see no reason why i would change that.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'c']
4
281
20
+--------+--------+--------+--------+--------+--------+--------+ +--------+--------+--------+--------+--------+--------+--------+ +--------+--------+--------+--------+--------+--------+--------+ +--------+--------+--------+--------+--------+ +--------+--------+--------+--------+--------+--------+--------+ +--------+--------+--------+--------+--------+--------+--------+ +--------+--------+ --- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c +#include <linux/bpf_trace.h> - rx_swbd->page_offset = enetc_rxb_pad; + rx_swbd->page_offset = rx_ring->buffer_offset; + size_t buffer_size = enetc_rxb_truesize - rx_ring->buffer_offset; + - enetc_rxb_dma_size, - dma_from_device); + buffer_size, dma_from_device); - skb = build_skb(ba - enetc_rxb_pad, enetc_rxb_truesize); + skb = build_skb(ba - rx_ring->buffer_offset, enetc_rxb_truesize); - skb_reserve(skb, enetc_rxb_pad); + skb_reserve(skb, rx_ring->buffer_offset); - int *i, int *cleaned_cnt) + int *i, int *cleaned_cnt, int buffer_size) - size = enetc_rxb_dma_size; + size = buffer_size; - &cleaned_cnt); + &cleaned_cnt, enetc_rxb_dma_size); +static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, + struct xdp_buff *xdp_buff, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset; + struct skb_shared_info *shinfo; + + xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset, + rx_ring->buffer_offset, size, false); + + shinfo = xdp_get_shared_info_from_buff(xdp_buff); + shinfo->nr_frags = 0; +} + +static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, + u16 size, struct xdp_buff *xdp_buff) +{ + struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff); + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags]; + + skb_frag_off_set(frag, rx_swbd->page_offset); + skb_frag_size_set(frag, size); + __skb_frag_set_page(frag, rx_swbd->page); + + shinfo->nr_frags++; +} + +static void enetc_build_xdp_buff(struct enetc_bdr *rx_ring, u32 bd_status, + union enetc_rx_bd **rxbd, int *i, + int *cleaned_cnt, struct xdp_buff *xdp_buff) +{ + u16 size = le16_to_cpu((*rxbd)->r.buf_len); + + xdp_init_buff(xdp_buff, enetc_rxb_truesize, &rx_ring->xdp.rxq); + + enetc_map_rx_buff_to_xdp(rx_ring, *i, xdp_buff, size); + (*cleaned_cnt)++; + enetc_rxbd_next(rx_ring, rxbd, i); + + /* not last bd in frame? */ + while (!(bd_status & enetc_rxbd_lstatus_f)) { + bd_status = le32_to_cpu((*rxbd)->r.lstatus); + size = enetc_rxb_dma_size_xdp; + + if (bd_status & enetc_rxbd_lstatus_f) { + dma_rmb(); + size = le16_to_cpu((*rxbd)->r.buf_len); + } + + enetc_add_rx_buff_to_xdp(rx_ring, *i, size, xdp_buff); + (*cleaned_cnt)++; + enetc_rxbd_next(rx_ring, rxbd, i); + } +} + +/* reuse the current page without performing half-page buffer flipping */ +static void enetc_put_xdp_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + enetc_reuse_page(rx_ring, rx_swbd); + + /* sync for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + enetc_rxb_dma_size_xdp, + dma_from_device); + + rx_swbd->page = null; +} + +static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + enetc_put_xdp_buff(rx_ring, + &rx_ring->rx_swbd[rx_ring_first]); + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } + rx_ring->stats.xdp_drops++; +} + +static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit, + struct bpf_prog *prog) +{ + int rx_frm_cnt = 0, rx_byte_cnt = 0; + int cleaned_cnt, i; + u32 xdp_act; + + cleaned_cnt = enetc_bd_unused(rx_ring); + /* next descriptor to process */ + i = rx_ring->next_to_clean; + + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd, *orig_rxbd; + int orig_i, orig_cleaned_cnt; + struct xdp_buff xdp_buff; + struct sk_buff *skb; + u32 bd_status; + + if (cleaned_cnt >= enetc_rxbd_bundle) + cleaned_cnt -= enetc_refill_rx_ring(rx_ring, + cleaned_cnt); + + rxbd = enetc_rxbd(rx_ring, i); + bd_status = le32_to_cpu(rxbd->r.lstatus); + if (!bd_status) + break; + + enetc_wr_reg_hot(rx_ring->idr, bit(rx_ring->index)); + dma_rmb(); /* for reading other rxbd fields */ + + if (enetc_check_bd_errors_and_consume(rx_ring, bd_status, + &rxbd, &i)) + break; + + orig_rxbd = rxbd; + orig_cleaned_cnt = cleaned_cnt; + orig_i = i; + + enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, + &cleaned_cnt, &xdp_buff); + + xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); + + switch (xdp_act) { + case xdp_aborted: + trace_xdp_exception(rx_ring->ndev, prog, xdp_act); + fallthrough; + case xdp_drop: + enetc_xdp_drop(rx_ring, orig_i, i); + break; + case xdp_pass: + rxbd = orig_rxbd; + cleaned_cnt = orig_cleaned_cnt; + i = orig_i; + + skb = enetc_build_skb(rx_ring, bd_status, &rxbd, + &i, &cleaned_cnt, + enetc_rxb_dma_size_xdp); + if (unlikely(!skb)) + /* exit the switch/case, not the loop */ + break; + + napi_gro_receive(napi, skb); + break; + default: + bpf_warn_invalid_xdp_action(xdp_act); + } + + rx_frm_cnt++; + } + + rx_ring->next_to_clean = i; + + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + + return rx_frm_cnt; +} + + struct enetc_bdr *rx_ring = &v->rx_ring; + struct bpf_prog *prog; - work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); + prog = rx_ring->xdp.prog; + if (prog) + work_done = enetc_clean_rx_ring_xdp(rx_ring, napi, budget, prog); + else + work_done = enetc_clean_rx_ring(rx_ring, napi, budget); - enetc_rxbdr_wr(hw, idx, enetc_rbbsr, enetc_rxb_dma_size); + if (rx_ring->xdp.prog) + enetc_rxbdr_wr(hw, idx, enetc_rbbsr, enetc_rxb_dma_size_xdp); + else + enetc_rxbdr_wr(hw, idx, enetc_rbbsr, enetc_rxb_dma_size); +static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + struct bpf_prog *old_prog; + bool is_up; + int i; + + /* the buffer layout is changing, so we need to drain the old + * rx buffers and seed new ones. + */ + is_up = netif_running(dev); + if (is_up) + dev_close(dev); + + old_prog = xchg(&priv->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + for (i = 0; i < priv->num_rx_rings; i++) { + struct enetc_bdr *rx_ring = priv->rx_ring[i]; + + rx_ring->xdp.prog = prog; + + if (prog) + rx_ring->buffer_offset = xdp_packet_headroom; + else + rx_ring->buffer_offset = enetc_rxb_pad; + } + + if (is_up) + return dev_open(dev, extack); + + return 0; +} + +int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case xdp_setup_prog: + return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack); + default: + return -einval; + } + + return 0; +} + + bdr = &v->rx_ring; + bdr->index = i; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->rx_bd_count; + bdr->buffer_offset = enetc_rxb_pad; + priv->rx_ring[i] = bdr; + + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); + if (err) { + kfree(v); + goto fail; + } + + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, + mem_type_page_shared, null); + if (err) { + xdp_rxq_info_unreg(&bdr->xdp.rxq); + kfree(v); + goto fail; + } + - - bdr = &v->rx_ring; - bdr->index = i; - bdr->ndev = priv->ndev; - bdr->dev = priv->dev; - bdr->bd_count = priv->rx_bd_count; - priv->rx_ring[i] = bdr; - netif_napi_del(&priv->int_vector[i]->napi); - cancel_work_sync(&priv->int_vector[i]->rx_dim.work); - kfree(priv->int_vector[i]); + struct enetc_int_vector *v = priv->int_vector[i]; + struct enetc_bdr *rx_ring = &v->rx_ring; + + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); + netif_napi_del(&v->napi); + cancel_work_sync(&v->rx_dim.work); + kfree(v); + struct enetc_bdr *rx_ring = &v->rx_ring; + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h +#define enetc_rxb_dma_size_xdp \ + (skb_with_overhead(enetc_rxb_truesize) - xdp_packet_headroom) + unsigned int xdp_drops; +}; + +struct enetc_xdp_data { + struct xdp_rxq_info rxq; + struct bpf_prog *prog; + int buffer_offset; + struct enetc_xdp_data xdp; + + + struct bpf_prog *xdp_prog; +int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c + "rx ring %2d xdp drops", + data[o++] = priv->rx_ring[i]->stats.xdp_drops; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c + .ndo_bpf = enetc_setup_bpf,
Networking
d1b15102dd16adc17fd5e4db8a485e6459f98906
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: add support for xdp_tx
for reflecting packets back into the interface they came from, we create an array of tx software bds derived from the rx software bds. therefore, we need to extend the tx software bd structure to contain most of the stuff that's already present in the rx software bd structure, for reasons that will become evident in a moment.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'c']
3
228
25
-> enetc_clean_rx_ring_xdp -> enetc_xdp_tx -> enetc_refill_rx_ring -> enetc_clean_tx_ring -> enetc_recycle_xdp_tx_buff --- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c -/* enetc overhead: optional extension bd + 1 bd gap */ -#define enetc_txbds_needed(val) ((val) + 2) -/* max # of chained tx bds is 15, including head and extension bd */ -#define enetc_max_skb_frags 13 -#define enetc_txbds_max_needed enetc_txbds_needed(enetc_max_skb_frags + 1) - + /* for xdp_tx, pages come from rx, whereas for the other contexts where + * we have is_dma_page_set, those come from skb_frag_dma_map. we need + * to match the dma mapping length, so we need to differentiate those. + */ - tx_swbd->len, dma_to_device); + tx_swbd->is_xdp_tx ? page_size : tx_swbd->len, + tx_swbd->dir); - tx_swbd->len, dma_to_device); + tx_swbd->len, tx_swbd->dir); +/* let h/w know bd ring has been updated */ +static void enetc_update_tx_ring_tail(struct enetc_bdr *tx_ring) +{ + /* includes wmb() */ + enetc_wr_reg_hot(tx_ring->tpir, tx_ring->next_to_use); +} + + tx_swbd->dir = dma_to_device; + tx_swbd->dir = dma_to_device; - /* let h/w know bd ring has been updated */ - enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */ + enetc_update_tx_ring_tail(tx_ring); +static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); + struct enetc_bdr *rx_ring = priv->rx_ring[tx_ring->index]; + struct enetc_rx_swbd rx_swbd = { + .dma = tx_swbd->dma, + .page = tx_swbd->page, + .page_offset = tx_swbd->page_offset, + .dir = tx_swbd->dir, + .len = tx_swbd->len, + }; + + if (likely(enetc_swbd_unused(rx_ring))) { + enetc_reuse_page(rx_ring, &rx_swbd); + + /* sync for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd.dma, + rx_swbd.page_offset, + enetc_rxb_dma_size_xdp, + rx_swbd.dir); + + rx_ring->stats.recycles++; + } else { + /* rx ring is already full, we need to unmap and free the + * page, since there's nothing useful we can do with it. + */ + rx_ring->stats.recycle_failures++; + + dma_unmap_page(rx_ring->dev, rx_swbd.dma, page_size, + rx_swbd.dir); + __free_page(rx_swbd.page); + } + + rx_ring->xdp.xdp_tx_in_flight--; +} + - if (likely(tx_swbd->dma)) + if (tx_swbd->is_xdp_tx) + enetc_recycle_xdp_tx_buff(tx_ring, tx_swbd); + else if (likely(tx_swbd->dma)) + bool xdp = !!(rx_ring->xdp.prog); - addr = dma_map_page(rx_ring->dev, page, 0, page_size, dma_from_device); + /* for xdp_tx, we forgo dma_unmap -> dma_map */ + rx_swbd->dir = xdp ? dma_bidirectional : dma_from_device; + + addr = dma_map_page(rx_ring->dev, page, 0, page_size, rx_swbd->dir); +/* this gets called during the non-xdp napi poll cycle as well as on xdp_pass, + * so it needs to work with both dma_from_device as well as dma_bidirectional + * mapped buffers. + */ - size, dma_from_device); + size, rx_swbd->dir); - buffer_size, dma_from_device); + buffer_size, rx_swbd->dir); - dma_unmap_page(rx_ring->dev, rx_swbd->dma, - page_size, dma_from_device); + dma_unmap_page(rx_ring->dev, rx_swbd->dma, page_size, + rx_swbd->dir); +static void enetc_xdp_map_tx_buff(struct enetc_bdr *tx_ring, int i, + struct enetc_tx_swbd *tx_swbd, + int frm_len) +{ + union enetc_tx_bd *txbd = enetc_txbd(*tx_ring, i); + + prefetchw(txbd); + + enetc_clear_tx_bd(txbd); + txbd->addr = cpu_to_le64(tx_swbd->dma + tx_swbd->page_offset); + txbd->buf_len = cpu_to_le16(tx_swbd->len); + txbd->frm_len = cpu_to_le16(frm_len); + + memcpy(&tx_ring->tx_swbd[i], tx_swbd, sizeof(*tx_swbd)); +} + +/* puts in the tx ring one xdp frame, mapped as an array of tx software buffer + * descriptors. + */ +static bool enetc_xdp_tx(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *xdp_tx_arr, int num_tx_swbd) +{ + struct enetc_tx_swbd *tmp_tx_swbd = xdp_tx_arr; + int i, k, frm_len = tmp_tx_swbd->len; + + if (unlikely(enetc_bd_unused(tx_ring) < enetc_txbds_needed(num_tx_swbd))) + return false; + + while (unlikely(!tmp_tx_swbd->is_eof)) { + tmp_tx_swbd++; + frm_len += tmp_tx_swbd->len; + } + + i = tx_ring->next_to_use; + + for (k = 0; k < num_tx_swbd; k++) { + struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[k]; + + enetc_xdp_map_tx_buff(tx_ring, i, xdp_tx_swbd, frm_len); + + /* last bd needs 'f' bit set */ + if (xdp_tx_swbd->is_eof) { + union enetc_tx_bd *txbd = enetc_txbd(*tx_ring, i); + + txbd->flags = enetc_txbd_flags_f; + } + + enetc_bdr_idx_inc(tx_ring, &i); + } + + tx_ring->next_to_use = i; + + return true; +} + + /* to be used for xdp_tx */ + rx_swbd->len = size; + + /* to be used for xdp_tx */ + rx_swbd->len = size; + - /* sync for use by the device */ - dma_from_device); + rx_swbd->dir); +/* convert rx buffer descriptors to tx buffer descriptors. these will be + * recycled back into the rx ring in enetc_clean_tx_ring. we need to scrub the + * rx software bds because the ownership of the buffer no longer belongs to the + * rx ring, so enetc_refill_rx_ring may not reuse rx_swbd->page. + */ +static int enetc_rx_swbd_to_xdp_tx_swbd(struct enetc_tx_swbd *xdp_tx_arr, + struct enetc_bdr *rx_ring, + int rx_ring_first, int rx_ring_last) +{ + int n = 0; + + for (; rx_ring_first != rx_ring_last; + n++, enetc_bdr_idx_inc(rx_ring, &rx_ring_first)) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; + struct enetc_tx_swbd *tx_swbd = &xdp_tx_arr[n]; + + /* no need to dma_map, we already have dma_bidirectional */ + tx_swbd->dma = rx_swbd->dma; + tx_swbd->dir = rx_swbd->dir; + tx_swbd->page = rx_swbd->page; + tx_swbd->page_offset = rx_swbd->page_offset; + tx_swbd->len = rx_swbd->len; + tx_swbd->is_dma_page = true; + tx_swbd->is_xdp_tx = true; + tx_swbd->is_eof = false; + memset(rx_swbd, 0, sizeof(*rx_swbd)); + } + + /* we rely on caller providing an rx_ring_last > rx_ring_first */ + xdp_tx_arr[n - 1].is_eof = true; + + return n; +} + + struct enetc_tx_swbd xdp_tx_arr[enetc_max_skb_frags] = {0}; + struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev); + struct enetc_bdr *tx_ring = priv->tx_ring[rx_ring->index]; + int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0; - if (cleaned_cnt >= enetc_rxbd_bundle) - cleaned_cnt -= enetc_refill_rx_ring(rx_ring, - cleaned_cnt); - + case xdp_tx: + xdp_tx_bd_cnt = enetc_rx_swbd_to_xdp_tx_swbd(xdp_tx_arr, + rx_ring, + orig_i, i); + + if (!enetc_xdp_tx(tx_ring, xdp_tx_arr, xdp_tx_bd_cnt)) { + enetc_xdp_drop(rx_ring, orig_i, i); + tx_ring->stats.xdp_tx_drops++; + } else { + tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; + rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; + xdp_tx_frm_cnt++; + } + break; + if (xdp_tx_frm_cnt) + enetc_update_tx_ring_tail(tx_ring); + + if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) + enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - + rx_ring->xdp.xdp_tx_in_flight); + - dma_unmap_page(rx_ring->dev, rx_swbd->dma, - page_size, dma_from_device); + dma_unmap_page(rx_ring->dev, rx_swbd->dma, page_size, + rx_swbd->dir); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h + struct page *page; /* valid only if is_xdp_tx */ + u16 page_offset; /* valid only if is_xdp_tx */ + enum dma_data_direction dir; + u8 is_xdp_tx:1; + enum dma_data_direction dir; + u16 len; +/* enetc overhead: optional extension bd + 1 bd gap */ +#define enetc_txbds_needed(val) ((val) + 2) +/* max # of chained tx bds is 15, including head and extension bd */ +#define enetc_max_skb_frags 13 +#define enetc_txbds_max_needed enetc_txbds_needed(enetc_max_skb_frags + 1) + + unsigned int xdp_tx; + unsigned int xdp_tx_drops; + unsigned int recycles; + unsigned int recycle_failures; + int xdp_tx_in_flight; +static inline int enetc_swbd_unused(struct enetc_bdr *bdr) +{ + if (bdr->next_to_clean > bdr->next_to_alloc) + return bdr->next_to_clean - bdr->next_to_alloc - 1; + + return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1; +} + diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c + "rx ring %2d recycles", + "rx ring %2d recycle failures", + "tx ring %2d xdp frames", + "tx ring %2d xdp drops", - for (i = 0; i < priv->num_tx_rings; i++) + for (i = 0; i < priv->num_tx_rings; i++) { + data[o++] = priv->tx_ring[i]->stats.xdp_tx; + data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops; + } + data[o++] = priv->rx_ring[i]->stats.recycles; + data[o++] = priv->rx_ring[i]->stats.recycle_failures;
Networking
7ed2bc80074ed4ed30e0cab323305bde851f7a87
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: increase rx ring default size
as explained in the xdp_tx patch, when receiving a burst of frames with the xdp_tx verdict, there is a momentary dip in the number of available rx buffers. the system will eventually recover as tx completions will start kicking in and refilling our rx bd ring again. but until that happens, we need to survive with as few out-of-buffer discards as possible.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h']
1
1
1
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h -#define enetc_rx_ring_default_size 512 +#define enetc_rx_ring_default_size 2048
Networking
d6a2829e82cff9e5ec10b8ee293488b57399ed01
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: add support for xdp_redirect
the driver implementation of the xdp_redirect action reuses parts from xdp_tx, most notably the enetc_xdp_tx function which transmits an array of tx software bds. only this time, the buffers don't have dma mappings, we need to create them.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
xdp support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'c']
4
218
12
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c +static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect) + return null; + + return tx_swbd->skb; +} + +static struct xdp_frame * +enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_xdp_redirect) + return tx_swbd->xdp_frame; + + return null; +} + -static void enetc_free_tx_skb(struct enetc_bdr *tx_ring, - struct enetc_tx_swbd *tx_swbd) +static void enetc_free_tx_frame(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) + struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); + struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); + - if (tx_swbd->skb) { - dev_kfree_skb_any(tx_swbd->skb); + if (xdp_frame) { + xdp_return_frame(tx_swbd->xdp_frame); + tx_swbd->xdp_frame = null; + } else if (skb) { + dev_kfree_skb_any(skb); - enetc_free_tx_skb(tx_ring, tx_swbd); + enetc_free_tx_frame(tx_ring, tx_swbd); + struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd); + struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd); + - if (tx_swbd->skb) { + if (xdp_frame) { + xdp_return_frame(xdp_frame); + tx_swbd->xdp_frame = null; + } else if (skb) { - enetc_tstamp_tx(tx_swbd->skb, tstamp); + enetc_tstamp_tx(skb, tstamp); - napi_consume_skb(tx_swbd->skb, napi_budget); + napi_consume_skb(skb, napi_budget); +static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *xdp_tx_arr, + struct xdp_frame *xdp_frame) +{ + struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0]; + struct skb_shared_info *shinfo; + void *data = xdp_frame->data; + int len = xdp_frame->len; + skb_frag_t *frag; + dma_addr_t dma; + unsigned int f; + int n = 0; + + dma = dma_map_single(tx_ring->dev, data, len, dma_to_device); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { + netdev_err(tx_ring->ndev, "dma map error "); + return -1; + } + + xdp_tx_swbd->dma = dma; + xdp_tx_swbd->dir = dma_to_device; + xdp_tx_swbd->len = len; + xdp_tx_swbd->is_xdp_redirect = true; + xdp_tx_swbd->is_eof = false; + xdp_tx_swbd->xdp_frame = null; + + n++; + xdp_tx_swbd = &xdp_tx_arr[n]; + + shinfo = xdp_get_shared_info_from_frame(xdp_frame); + + for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags; + f++, frag++) { + data = skb_frag_address(frag); + len = skb_frag_size(frag); + + dma = dma_map_single(tx_ring->dev, data, len, dma_to_device); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) { + /* undo the dma mapping for all fragments */ + while (n-- >= 0) + enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]); + + netdev_err(tx_ring->ndev, "dma map error "); + return -1; + } + + xdp_tx_swbd->dma = dma; + xdp_tx_swbd->dir = dma_to_device; + xdp_tx_swbd->len = len; + xdp_tx_swbd->is_xdp_redirect = true; + xdp_tx_swbd->is_eof = false; + xdp_tx_swbd->xdp_frame = null; + + n++; + xdp_tx_swbd = &xdp_tx_arr[n]; + } + + xdp_tx_arr[n - 1].is_eof = true; + xdp_tx_arr[n - 1].xdp_frame = xdp_frame; + + return n; +} + +int enetc_xdp_xmit(struct net_device *ndev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct enetc_tx_swbd xdp_redirect_arr[enetc_max_skb_frags] = {0}; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int xdp_tx_bd_cnt, i, k; + int xdp_tx_frm_cnt = 0; + + tx_ring = priv->tx_ring[smp_processor_id()]; + + prefetchw(enetc_txbd(*tx_ring, tx_ring->next_to_use)); + + for (k = 0; k < num_frames; k++) { + xdp_tx_bd_cnt = enetc_xdp_frame_to_xdp_tx_swbd(tx_ring, + xdp_redirect_arr, + frames[k]); + if (unlikely(xdp_tx_bd_cnt < 0)) + break; + + if (unlikely(!enetc_xdp_tx(tx_ring, xdp_redirect_arr, + xdp_tx_bd_cnt))) { + for (i = 0; i < xdp_tx_bd_cnt; i++) + enetc_unmap_tx_buff(tx_ring, + &xdp_redirect_arr[i]); + tx_ring->stats.xdp_tx_drops++; + break; + } + + xdp_tx_frm_cnt++; + } + + if (unlikely((flags & xdp_xmit_flush) || k != xdp_tx_frm_cnt)) + enetc_update_tx_ring_tail(tx_ring); + + tx_ring->stats.xdp_tx += xdp_tx_frm_cnt; + + return xdp_tx_frm_cnt; +} + +static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first]; + + if (rx_swbd->page) { + dma_unmap_page(rx_ring->dev, rx_swbd->dma, page_size, + rx_swbd->dir); + __free_page(rx_swbd->page); + rx_swbd->page = null; + } + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } + rx_ring->stats.xdp_redirect_failures++; +} + + int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0, xdp_redirect_frm_cnt = 0; - int xdp_tx_bd_cnt, xdp_tx_frm_cnt = 0; + int tmp_orig_i, err; + break; + case xdp_redirect: + /* xdp_return_frame does not support s/g in the sense + * that it leaks the fragments (__xdp_return should not + * call page_frag_free only for the initial buffer). + * until xdp_redirect gains support for s/g let's keep + * the code structure in place, but dead. we drop the + * s/g frames ourselves to avoid memory leaks which + * would otherwise leave the kernel oom. + */ + if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) { + enetc_xdp_drop(rx_ring, orig_i, i); + rx_ring->stats.xdp_redirect_sg++; + break; + } + + tmp_orig_i = orig_i; + + while (orig_i != i) { + enetc_put_rx_buff(rx_ring, + &rx_ring->rx_swbd[orig_i]); + enetc_bdr_idx_inc(rx_ring, &orig_i); + } + + err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); + if (unlikely(err)) { + enetc_xdp_free(rx_ring, tmp_orig_i, i); + } else { + xdp_redirect_frm_cnt++; + rx_ring->stats.xdp_redirect++; + } + + if (unlikely(xdp_redirect_frm_cnt > enetc_default_tx_work)) { + xdp_do_flush_map(); + xdp_redirect_frm_cnt = 0; + } + + if (xdp_redirect_frm_cnt) + xdp_do_flush_map(); + - enetc_free_tx_skb(txr, &txr->tx_swbd[i]); + enetc_free_tx_frame(txr, &txr->tx_swbd[i]); - enetc_free_tx_skb(tx_ring, tx_swbd); + enetc_free_tx_frame(tx_ring, tx_swbd); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct xdp_frame *xdp_frame; + }; + u8 is_xdp_redirect:1; + unsigned int xdp_redirect; + unsigned int xdp_redirect_failures; + unsigned int xdp_redirect_sg; +int enetc_xdp_xmit(struct net_device *ndev, int num_frames, + struct xdp_frame **frames, u32 flags); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c + "rx ring %2d redirects", + "rx ring %2d redirect failures", + "rx ring %2d redirect s/g", + data[o++] = priv->rx_ring[i]->stats.xdp_redirect; + data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures; + data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c + .ndo_xdp_xmit = enetc_xdp_xmit,
Networking
9d2b68cc108db2fdb35022ed2d88cfb305c441a6
vladimir oltean
drivers
net
enetc, ethernet, freescale
net: enetc: create a common enetc_pf_to_port helper
even though enetc interfaces are exposed as individual pcie pfs with their own driver instances, the enetc is still fundamentally a multi-port ethernet controller, and some parts of the ip take a port number (as can be seen in the psfp implementation).
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
flow control for nxp enetc
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'c']
2
22
10
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h +static inline int enetc_pf_to_port(struct pci_dev *pf_pdev) +{ + switch (pf_pdev->devfn) { + case 0: + return 0; + case 1: + return 1; + case 2: + return 2; + case 6: + return 3; + default: + return -1; + } +} + diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c -static inline int enetc_get_port(struct enetc_ndev_priv *priv) -{ - return priv->si->pdev->devfn & 0x7; -} - - si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); + si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); - si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv)); + si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); - sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv)); + sfi_config->input_ports = + cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev)); - set_bit(enetc_get_port(priv), &epsfp.dev_bitmap); + set_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap); - clear_bit(enetc_get_port(priv), &epsfp.dev_bitmap); + clear_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
Networking
87614b931c24d9dfc934ef9deaaf55d1cbdc2ac2
vladimir oltean
drivers
net
enetc, ethernet, freescale
dt-bindings: net: fsl: enetc: add the ierb documentation
mention the required compatible string and base address for the integrated endpoint register block node.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
flow control for nxp enetc
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['txt']
1
15
0
--- diff --git a/documentation/devicetree/bindings/net/fsl-enetc.txt b/documentation/devicetree/bindings/net/fsl-enetc.txt --- a/documentation/devicetree/bindings/net/fsl-enetc.txt +++ b/documentation/devicetree/bindings/net/fsl-enetc.txt + +* integrated endpoint register block bindings + +optionally, the fsl_enetc driver can probe on the integrated endpoint register +block, which preconfigures the fifo limits for the enetc ports. this is a node +with the following properties: + +- reg : specifies the address in the soc memory space. +- compatible : must be "fsl,ls1028a-enetc-ierb". + +example: + ierb@1f0800000 { + compatible = "fsl,ls1028a-enetc-ierb"; + reg = <0x01 0xf0800000 0x0 0x10000>; + };
Networking
4ac7acc67f29927975e2493a9f4ede0c631bb87a
vladimir oltean
documentation
devicetree
bindings, net
net: enetc: add a mini driver for the integrated endpoint register block
the nxp enetc is a 4-port ethernet controller which 'smells' to operating systems like 4 distinct pcie pfs with sr-iov, each pf having its own driver instance, but in fact there are some hardware resources which are shared between all ports, like for example the 256 kb sram fifo between the macs and the host transfer agent which dmas frames to dram.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
flow control for nxp enetc
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'kconfig', 'c', 'makefile']
5
221
1
--- diff --git a/drivers/net/ethernet/freescale/enetc/kconfig b/drivers/net/ethernet/freescale/enetc/kconfig --- a/drivers/net/ethernet/freescale/enetc/kconfig +++ b/drivers/net/ethernet/freescale/enetc/kconfig + depends on fsl_enetc_ierb || fsl_enetc_ierb=n +config fsl_enetc_ierb + tristate "enetc ierb driver" + help + this driver configures the integrated endpoint register block on nxp + ls1028a. + + if compiled as module (m), the module name is fsl-enetc-ierb. + diff --git a/drivers/net/ethernet/freescale/enetc/makefile b/drivers/net/ethernet/freescale/enetc/makefile --- a/drivers/net/ethernet/freescale/enetc/makefile +++ b/drivers/net/ethernet/freescale/enetc/makefile +obj-$(config_fsl_enetc_ierb) += fsl-enetc-ierb.o +fsl-enetc-ierb-y := enetc_ierb.o + diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c +// spdx-license-identifier: (gpl-2.0+ or bsd-3-clause) +/* copyright 2021 nxp semiconductors + * + * the integrated endpoint register block (ierb) is configured by pre-boot + * software and is supposed to be to enetc what a nvram is to a 'real' pcie + * card. upon flr, values from the ierb are transferred to the enetc pfs, and + * are read-only in the pf memory space. + * + * this driver fixes up the power-on reset values for the enetc shared fifo, + * such that the tx and rx allocations are sufficient for jumbo frames, and + * that intelligent fifo dropping is enabled before the internal data + * structures are corrupted. + * + * even though not all ports might be used on a given board, we are not + * concerned with partitioning the fifo, because the default values configure + * no strict reservations, so the entire fifo can be used by the rx of a single + * port, or the tx of a single port. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include "enetc.h" +#include "enetc_ierb.h" + +/* ierb registers */ +#define enetc_ierb_txmbar(port) (((port) * 0x100) + 0x8080) +#define enetc_ierb_rxmber(port) (((port) * 0x100) + 0x8090) +#define enetc_ierb_rxmblr(port) (((port) * 0x100) + 0x8094) +#define enetc_ierb_rxbcr(port) (((port) * 0x100) + 0x80a0) +#define enetc_ierb_txbcr(port) (((port) * 0x100) + 0x80a8) +#define enetc_ierb_fmbdtr 0xa000 + +#define enetc_reserved_for_icm 1024 + +struct enetc_ierb { + void __iomem *regs; +}; + +static void enetc_ierb_write(struct enetc_ierb *ierb, u32 offset, u32 val) +{ + iowrite32(val, ierb->regs + offset); +} + +int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev) +{ + struct enetc_ierb *ierb = platform_get_drvdata(pdev); + int port = enetc_pf_to_port(pf_pdev); + u16 tx_credit, rx_credit, tx_alloc; + + if (port < 0) + return -enodev; + + if (!ierb) + return -eprobe_defer; + + /* by default, it is recommended to set the host transfer agent + * per port transmit byte credit to "1000 + max_frame_size/2". + * the power-on reset value (1800 bytes) is rounded up to the nearest + * 100 assuming a maximum frame size of 1536 bytes. + */ + tx_credit = roundup(1000 + enetc_mac_maxfrm_size / 2, 100); + + /* internal memory allocated for transmit buffering is guaranteed but + * not reserved; i.e. if the total transmit allocation is not used, + * then the unused portion is not left idle, it can be used for receive + * buffering but it will be reclaimed, if required, from receive by + * intelligently dropping already stored receive frames in the internal + * memory to ensure that the transmit allocation is respected. + * + * patxmbar must be set to a value larger than + * patxbcr + 2 * max_frame_size + 32 + * if frame preemption is not enabled, or to + * 2 * patxbcr + 2 * p_max_frame_size (pmac maximum frame size) + + * 2 * np_max_frame_size (emac maximum frame size) + 64 + * if frame preemption is enabled. + */ + tx_alloc = roundup(2 * tx_credit + 4 * enetc_mac_maxfrm_size + 64, 16); + + /* initial credits, in units of 8 bytes, to the ingress congestion + * manager for the maximum amount of bytes the port is allocated for + * pending traffic. + * it is recommended to set the initial credits to 2 times the maximum + * frame size (2 frames of maximum size). + */ + rx_credit = div_round_up(enetc_mac_maxfrm_size * 2, 8); + + enetc_ierb_write(ierb, enetc_ierb_txbcr(port), tx_credit); + enetc_ierb_write(ierb, enetc_ierb_txmbar(port), tx_alloc); + enetc_ierb_write(ierb, enetc_ierb_rxbcr(port), rx_credit); + + return 0; +} +export_symbol(enetc_ierb_register_pf); + +static int enetc_ierb_probe(struct platform_device *pdev) +{ + struct enetc_ierb *ierb; + struct resource *res; + void __iomem *regs; + + ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), gfp_kernel); + if (!ierb) + return -enomem; + + res = platform_get_resource(pdev, ioresource_mem, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (is_err(regs)) + return ptr_err(regs); + + ierb->regs = regs; + + /* free buffer depletion threshold in bytes. + * this sets the minimum amount of free buffer memory that should be + * maintained in the datapath sub system, and when the amount of free + * buffer memory falls below this threshold, a depletion indication is + * asserted, which may trigger "intelligent drop" frame releases from + * the ingress queues in the icm. + * it is recommended to set the free buffer depletion threshold to 1024 + * bytes, since the icm needs some fifo memory for its own use. + */ + enetc_ierb_write(ierb, enetc_ierb_fmbdtr, enetc_reserved_for_icm); + + platform_set_drvdata(pdev, ierb); + + return 0; +} + +static int enetc_ierb_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id enetc_ierb_match[] = { + { .compatible = "fsl,ls1028a-enetc-ierb", }, + {}, +}; +module_device_table(of, enetc_ierb_match); + +static struct platform_driver enetc_ierb_driver = { + .driver = { + .name = "fsl-enetc-ierb", + .of_match_table = enetc_ierb_match, + }, + .probe = enetc_ierb_probe, + .remove = enetc_ierb_remove, +}; + +module_platform_driver(enetc_ierb_driver); + +module_description("nxp enetc ierb"); +module_license("dual bsd/gpl"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h +/* spdx-license-identifier: (gpl-2.0+ or bsd-3-clause) */ +/* copyright 2021 nxp semiconductors */ + +#include <linux/pci.h> +#include <linux/platform_device.h> + +#if is_enabled(config_fsl_enetc_ierb) + +int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev); + +#else + +static inline int enetc_ierb_register_pf(struct platform_device *pdev, + struct pci_dev *pf_pdev) +{ + return -eopnotsupp; +} + +#endif diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c +#include <linux/of_platform.h> +#include "enetc_ierb.h" - enetc_port_wr(hw, enetc_ptxmbar, 2 * enetc_mac_maxfrm_size); +static int enetc_pf_register_with_ierb(struct pci_dev *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct platform_device *ierb_pdev; + struct device_node *ierb_node; + + /* don't register with the ierb if the pf itself is disabled */ + if (!node || !of_device_is_available(node)) + return 0; + + ierb_node = of_find_compatible_node(null, null, + "fsl,ls1028a-enetc-ierb"); + if (!ierb_node || !of_device_is_available(ierb_node)) + return -enodev; + + ierb_pdev = of_find_device_by_node(ierb_node); + of_node_put(ierb_node); + + if (!ierb_pdev) + return -eprobe_defer; + + return enetc_ierb_register_pf(ierb_pdev, pdev); +} + + err = enetc_pf_register_with_ierb(pdev); + if (err == -eprobe_defer) + return err; + if (err) + dev_warn(&pdev->dev, + "could not register with ierb driver: %pe, please update the device tree ", + err_ptr(err)); +
Networking
e7d48e5fbf30f85c89d83683c3d2dbdaa8884103
vladimir oltean
drivers
net
enetc, ethernet, freescale
arm64: dts: ls1028a: declare the integrated endpoint register block node
add a node describing the address in the soc memory space for the ierb.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
flow control for nxp enetc
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['dtsi']
1
6
0
--- diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi + /* integrated endpoint register block */ + ierb@1f0800000 { + compatible = "fsl,ls1028a-enetc-ierb"; + reg = <0x01 0xf0800000 0x0 0x10000>; + }; +
Networking
b764dc6cc1ba8b82d844bbcfe97e1d432a2dca5b
vladimir oltean
arch
arm64
boot, dts, freescale
net: enetc: add support for flow control
in the enetc receive path, a frame received by the mac is first stored in a 256kb 'fifo' memory, then transferred to dram when enqueuing it to the rx ring. the fifo is a shared resource for all enetc ports, but every port keeps track of its own memory utilization, on rx and on tx.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
flow control for nxp enetc
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'c']
3
85
2
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +static void enetc_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(priv->phylink, pause); +} + +static int enetc_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct enetc_ndev_priv *priv = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(priv->phylink, pause); +} + + .get_pauseparam = enetc_get_pauseparam, + .set_pauseparam = enetc_set_pauseparam, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h +#define enetc_rbmr_cm bit(4) +#define enetc_ppauontr 0x0410 +#define enetc_ppauofftr 0x0414 +#define enetc_pm0_pause_ign bit(8) +#define enetc_pm0_pause_quanta 0x8054 +#define enetc_pm0_pause_thresh 0x8064 +#define enetc_pm1_pause_quanta 0x9054 +#define enetc_pm1_pause_thresh 0x9064 + diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c + u32 pause_off_thresh = 0, pause_on_thresh = 0; + u32 init_quanta = 0, refresh_quanta = 0; + struct enetc_hw *hw = &pf->si->hw; + u32 rbmr, cmd_cfg; + int idx; - enetc_force_rgmii_mac(&pf->si->hw, speed, duplex); + enetc_force_rgmii_mac(hw, speed, duplex); + + /* flow control */ + for (idx = 0; idx < priv->num_rx_rings; idx++) { + rbmr = enetc_rxbdr_rd(hw, idx, enetc_rbmr); + + if (tx_pause) + rbmr |= enetc_rbmr_cm; + else + rbmr &= ~enetc_rbmr_cm; + + enetc_rxbdr_wr(hw, idx, enetc_rbmr, rbmr); + } + + if (tx_pause) { + /* when the port first enters congestion, send a pause request + * with the maximum number of quanta. when the port exits + * congestion, it will automatically send a pause frame with + * zero quanta. + */ + init_quanta = 0xffff; + + /* also, set up the refresh timer to send follow-up pause + * frames at half the quanta value, in case the congestion + * condition persists. + */ + refresh_quanta = 0xffff / 2; + + /* start emitting pause frames when 3 large frames (or more + * smaller frames) have accumulated in the fifo waiting to be + * dmaed to the rx ring. + */ + pause_on_thresh = 3 * enetc_mac_maxfrm_size; + pause_off_thresh = 1 * enetc_mac_maxfrm_size; + } + + enetc_port_wr(hw, enetc_pm0_pause_quanta, init_quanta); + enetc_port_wr(hw, enetc_pm1_pause_quanta, init_quanta); + enetc_port_wr(hw, enetc_pm0_pause_thresh, refresh_quanta); + enetc_port_wr(hw, enetc_pm1_pause_thresh, refresh_quanta); + enetc_port_wr(hw, enetc_ppauontr, pause_on_thresh); + enetc_port_wr(hw, enetc_ppauofftr, pause_off_thresh); + + cmd_cfg = enetc_port_rd(hw, enetc_pm0_cmd_cfg); + + if (rx_pause) + cmd_cfg &= ~enetc_pm0_pause_ign; + else + cmd_cfg |= enetc_pm0_pause_ign; + + enetc_port_wr(hw, enetc_pm0_cmd_cfg, cmd_cfg); + enetc_port_wr(hw, enetc_pm1_cmd_cfg, cmd_cfg); - enetc_mac_enable(&pf->si->hw, true); + enetc_mac_enable(hw, true);
Networking
a8648887880f90137f0893aeb1a0abef30858c01
vladimir oltean
drivers
net
enetc, ethernet, freescale
enetc: support ptp sync packet one-step timestamping
this patch is to add support for ptp sync packet one-step timestamping. since enetc single-step register has to be configured dynamically per packet for correctionfield offeset and udp checksum update, current one-step timestamping packet has to be sent only when the last one completes transmitting on hardware. so, on the tx, this patch handles one-step timestamping packet as below:
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support ptp sync packet one-step timestamping
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'c']
4
195
26
- trasmit packet immediately if no other one in transfer, or queue to - start a work when complete transfer on hardware, to release the bit - set one-step timestamping flag in extension bd. - write 30 bits current timestamp in tstamp field of extension bd. - update ptp sync packet origintimestamp field with current timestamp. - configure single-step register for correctionfield offeset and udp --- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c +#include <linux/ptp_classify.h> +static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, + u8 *msgtype, u8 *twostep, + u16 *correction_offset, u16 *body_offset) +{ + unsigned int ptp_class; + struct ptp_header *hdr; + unsigned int type; + u8 *base; + + ptp_class = ptp_classify_raw(skb); + if (ptp_class == ptp_class_none) + return -einval; + + hdr = ptp_parse_header(skb, ptp_class); + if (!hdr) + return -einval; + + type = ptp_class & ptp_class_pmask; + if (type == ptp_class_ipv4 || type == ptp_class_ipv6) + *udp = 1; + else + *udp = 0; + + *msgtype = ptp_get_msgtype(hdr, ptp_class); + *twostep = hdr->flag_field[0] & 0x2; + + base = skb_mac_header(skb); + *correction_offset = (u8 *)&hdr->correction - base; + *body_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; + + return 0; +} + + bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; + struct enetc_ndev_priv *priv = netdev_priv(tx_ring->ndev); + struct enetc_hw *hw = &priv->si->hw; - skb_frag_t *frag; + u8 msgtype, twostep, udp; - bool do_vlan, do_tstamp; + u16 offset1, offset2; + skb_frag_t *frag; - do_tstamp = (skb->cb[0] & enetc_f_tx_tstamp) && - (skb_shinfo(skb)->tx_flags & skbtx_hw_tstamp); - tx_swbd->do_tstamp = do_tstamp; - tx_swbd->check_wb = tx_swbd->do_tstamp; + if (skb->cb[0] & enetc_f_tx_onestep_sync_tstamp) { + if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, &offset1, + &offset2) || + msgtype != ptp_msgtype_sync || twostep) + warn_once(1, "bad packet for one-step timestamping "); + else + do_onestep_tstamp = true; + } else if (skb->cb[0] & enetc_f_tx_tstamp) { + do_twostep_tstamp = true; + } + + tx_swbd->do_twostep_tstamp = do_twostep_tstamp; + tx_swbd->check_wb = tx_swbd->do_twostep_tstamp; - if (do_vlan || do_tstamp) + if (do_vlan || do_onestep_tstamp || do_twostep_tstamp) - if (do_tstamp) { + if (do_onestep_tstamp) { + u32 lo, hi, val; + u64 sec, nsec; + u8 *data; + + lo = enetc_rd_hot(hw, enetc_sictr0); + hi = enetc_rd_hot(hw, enetc_sictr1); + sec = (u64)hi << 32 | lo; + nsec = do_div(sec, 1000000000); + + /* configure extension bd */ + temp_bd.ext.tstamp = cpu_to_le32(lo & 0x3fffffff); + e_flags |= enetc_txbd_e_flags_one_step_ptp; + + /* update origintimestamp field of sync packet + * - 48 bits seconds field + * - 32 bits nanseconds field + */ + data = skb_mac_header(skb); + *(__be16 *)(data + offset2) = + htons((sec >> 32) & 0xffff); + *(__be32 *)(data + offset2 + 2) = + htonl(sec & 0xffffffff); + *(__be32 *)(data + offset2 + 6) = htonl(nsec); + + /* configure single-step register */ + val = enetc_pm0_single_step_en; + val |= enetc_set_single_step_offset(offset1); + if (udp) + val |= enetc_pm0_single_step_ch; + + enetc_port_wr(hw, enetc_pm0_single_step, val); + enetc_port_wr(hw, enetc_pm1_single_step, val); + } else if (do_twostep_tstamp) { -netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t enetc_start_xmit(struct sk_buff *skb, + struct net_device *ndev) - /* cb[0] used for tx timestamp type */ - skb->cb[0] = priv->active_offloads & enetc_f_tx_tstamp_mask; - +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u8 udp, msgtype, twostep; + u16 offset1, offset2; + + /* mark tx timestamp type on skb->cb[0] if requires */ + if ((skb_shinfo(skb)->tx_flags & skbtx_hw_tstamp) && + (priv->active_offloads & enetc_f_tx_tstamp_mask)) { + skb->cb[0] = priv->active_offloads & enetc_f_tx_tstamp_mask; + } else { + skb->cb[0] = 0; + } + + /* fall back to two-step timestamp if not one-step sync packet */ + if (skb->cb[0] & enetc_f_tx_onestep_sync_tstamp) { + if (enetc_ptp_parse(skb, &udp, &msgtype, &twostep, + &offset1, &offset2) || + msgtype != ptp_msgtype_sync || twostep != 0) + skb->cb[0] = enetc_f_tx_tstamp; + } + + /* queue one-step sync packet if already locked */ + if (skb->cb[0] & enetc_f_tx_onestep_sync_tstamp) { + if (test_and_set_bit_lock(enetc_tx_onestep_tstamp_in_progress, + &priv->flags)) { + skb_queue_tail(&priv->tx_skbs, skb); + return netdev_tx_ok; + } + } + + return enetc_start_xmit(skb, ndev); +} + + struct enetc_ndev_priv *priv = netdev_priv(ndev); - bool do_tstamp; + bool do_twostep_tstamp; - do_tstamp = false; + do_twostep_tstamp = false; - tx_swbd->do_tstamp) { + tx_swbd->do_twostep_tstamp) { - do_tstamp = true; + do_twostep_tstamp = true; - if (unlikely(do_tstamp)) { + if (unlikely(tx_swbd->skb->cb[0] & + enetc_f_tx_onestep_sync_tstamp)) { + /* start work to release lock for next one-step + * timestamping packet. and send one skb in + * tx_skbs queue if has. + */ + queue_work(system_wq, &priv->tx_onestep_tstamp); + } else if (unlikely(do_twostep_tstamp)) { - do_tstamp = false; + do_twostep_tstamp = false; +static void enetc_tx_onestep_tstamp(struct work_struct *work) +{ + struct enetc_ndev_priv *priv; + struct sk_buff *skb; + + priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); + + netif_tx_lock(priv->ndev); + + clear_bit_unlock(enetc_tx_onestep_tstamp_in_progress, &priv->flags); + skb = skb_dequeue(&priv->tx_skbs); + if (skb) + enetc_start_xmit(skb, priv->ndev); + + netif_tx_unlock(priv->ndev); +} + +static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv) +{ + init_work(&priv->tx_onestep_tstamp, enetc_tx_onestep_tstamp); + skb_queue_head_init(&priv->tx_skbs); +} + + enetc_tx_onestep_tstamp_init(priv); - priv->active_offloads &= ~enetc_f_tx_tstamp; + priv->active_offloads &= ~enetc_f_tx_tstamp_mask; + priv->active_offloads &= ~enetc_f_tx_tstamp_mask; + case hwtstamp_tx_onestep_sync: + priv->active_offloads &= ~enetc_f_tx_tstamp_mask; + priv->active_offloads |= enetc_f_tx_onestep_sync_tstamp; + break; - if (priv->active_offloads & enetc_f_tx_tstamp) + if (priv->active_offloads & enetc_f_tx_onestep_sync_tstamp) + config.tx_type = hwtstamp_tx_onestep_sync; + else if (priv->active_offloads & enetc_f_tx_tstamp) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h - u8 do_tstamp:1; + u8 do_twostep_tstamp:1; - enetc_f_tx_tstamp = bit(0), + enetc_f_tx_tstamp = bit(0), + enetc_f_tx_onestep_sync_tstamp = bit(1), - enetc_f_rx_tstamp = bit(8), - enetc_f_qbv = bit(9), - enetc_f_qci = bit(10), + enetc_f_rx_tstamp = bit(8), + enetc_f_qbv = bit(9), + enetc_f_qci = bit(10), +}; + +enum enetc_flags_bit { + enetc_tx_onestep_tstamp_in_progress = 0, + + unsigned long flags; + + struct work_struct tx_onestep_tstamp; + struct sk_buff_head tx_skbs; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c - (1 << hwtstamp_tx_on); + (1 << hwtstamp_tx_on) | + (1 << hwtstamp_tx_onestep_sync); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h +#define enetc_pm0_single_step 0x80c0 +#define enetc_pm1_single_step 0x90c0 +#define enetc_pm0_single_step_ch bit(7) +#define enetc_pm0_single_step_en bit(31) +#define enetc_set_single_step_offset(v) (((v) & 0xff) << 8) + +#define enetc_txbd_e_flags_one_step_ptp bit(1)
Networking
7294380c5211687aa4d66166984b152ee84caf5f
yangbo lu
drivers
net
enetc, ethernet, freescale
enetc: mark tx timestamp type per skb
mark tx timestamp type per skb on skb->cb[0], instead of global variable for all skbs. this is a preparation for one step timestamp support.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support ptp sync packet one-step timestamping
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['enetc ']
['h', 'c']
2
13
8
--- diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c -static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb, - int active_offloads) +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) - do_tstamp = (active_offloads & enetc_f_tx_tstamp) && + do_tstamp = (skb->cb[0] & enetc_f_tx_tstamp) && + /* cb[0] used for tx timestamp type */ + skb->cb[0] = priv->active_offloads & enetc_f_tx_tstamp_mask; + - count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads); + count = enetc_map_tx_buffs(tx_ring, skb); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h +#define enetc_f_tx_tstamp_mask 0xff - enetc_f_rx_tstamp = bit(0), - enetc_f_tx_tstamp = bit(1), - enetc_f_qbv = bit(2), - enetc_f_qci = bit(3), + /* 8 bits reserved for tx timestamp types (hwtstamp_tx_types) */ + enetc_f_tx_tstamp = bit(0), + + enetc_f_rx_tstamp = bit(8), + enetc_f_qbv = bit(9), + enetc_f_qci = bit(10),
Networking
f768e75130159b5444ee691bbdd201bef8e5bb24
yangbo lu
drivers
net
enetc, ethernet, freescale
net: ethernet: actions: add actions semi owl ethernet mac driver
add new driver for the ethernet mac used on the actions semi owl family of socs.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add actions semi owl ethernet mac driver
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
[]
['h', 'kconfig', 'c', 'makefile']
6
1,939
0
--- diff --git a/drivers/net/ethernet/kconfig b/drivers/net/ethernet/kconfig --- a/drivers/net/ethernet/kconfig +++ b/drivers/net/ethernet/kconfig +source "drivers/net/ethernet/actions/kconfig" diff --git a/drivers/net/ethernet/makefile b/drivers/net/ethernet/makefile --- a/drivers/net/ethernet/makefile +++ b/drivers/net/ethernet/makefile +obj-$(config_net_vendor_actions) += actions/ diff --git a/drivers/net/ethernet/actions/kconfig b/drivers/net/ethernet/actions/kconfig --- /dev/null +++ b/drivers/net/ethernet/actions/kconfig +# spdx-license-identifier: gpl-2.0-only + +config net_vendor_actions + bool "actions semi devices" + default y + depends on arch_actions + help + if you have a network (ethernet) card belonging to this class, say y. + + note that the answer to this question doesn't directly affect the + kernel: saying n will just cause the configurator to skip all the + questions about actions semi devices. if you say y, you will be + asked for your specific card in the following questions. + +if net_vendor_actions + +config owl_emac + tristate "actions semi owl ethernet mac support" + select phylib + help + this driver supports the actions semi ethernet media access + controller (emac) found on the s500 and s900 socs. the controller + is compliant with the ieee 802.3 csma/cd standard and supports + both half-duplex and full-duplex operation modes at 10/100 mb/s. + +endif # net_vendor_actions diff --git a/drivers/net/ethernet/actions/makefile b/drivers/net/ethernet/actions/makefile --- /dev/null +++ b/drivers/net/ethernet/actions/makefile +# spdx-license-identifier: gpl-2.0-only +# +# makefile for the actions semi owl socs built-in ethernet macs +# + +obj-$(config_owl_emac) += owl-emac.o diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c --- /dev/null +++ b/drivers/net/ethernet/actions/owl-emac.c +// spdx-license-identifier: gpl-2.0-or-later +/* + * actions semi owl socs ethernet mac driver + * + * copyright (c) 2012 actions semi inc. + * copyright (c) 2021 cristian ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#include <linux/circ_buf.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/reset.h> + +#include "owl-emac.h" + +#define owl_emac_default_msg_enable (netif_msg_drv | \ + netif_msg_probe | \ + netif_msg_link) + +static u32 owl_emac_reg_read(struct owl_emac_priv *priv, u32 reg) +{ + return readl(priv->base + reg); +} + +static void owl_emac_reg_write(struct owl_emac_priv *priv, u32 reg, u32 data) +{ + writel(data, priv->base + reg); +} + +static u32 owl_emac_reg_update(struct owl_emac_priv *priv, + u32 reg, u32 mask, u32 val) +{ + u32 data, old_val; + + data = owl_emac_reg_read(priv, reg); + old_val = data & mask; + + data &= ~mask; + data |= val & mask; + + owl_emac_reg_write(priv, reg, data); + + return old_val; +} + +static void owl_emac_reg_set(struct owl_emac_priv *priv, u32 reg, u32 bits) +{ + owl_emac_reg_update(priv, reg, bits, bits); +} + +static void owl_emac_reg_clear(struct owl_emac_priv *priv, u32 reg, u32 bits) +{ + owl_emac_reg_update(priv, reg, bits, 0); +} + +static struct device *owl_emac_get_dev(struct owl_emac_priv *priv) +{ + return priv->netdev->dev.parent; +} + +static void owl_emac_irq_enable(struct owl_emac_priv *priv) +{ + /* enable all interrupts except tu. + * + * note the nie and aie bits shall also be set in order to actually + * enable the selected interrupts. + */ + owl_emac_reg_write(priv, owl_emac_reg_mac_csr7, + owl_emac_bit_mac_csr7_nie | + owl_emac_bit_mac_csr7_aie | + owl_emac_bit_mac_csr7_all_not_tue); +} + +static void owl_emac_irq_disable(struct owl_emac_priv *priv) +{ + /* disable all interrupts. + * + * warning: unset only the nie and aie bits in csr7 to workaround an + * unexpected side effect (mac hardware bug?!) where some bits in the + * status register (csr5) are cleared automatically before being able + * to read them via owl_emac_irq_clear(). + */ + owl_emac_reg_write(priv, owl_emac_reg_mac_csr7, + owl_emac_bit_mac_csr7_all_not_tue); +} + +static u32 owl_emac_irq_status(struct owl_emac_priv *priv) +{ + return owl_emac_reg_read(priv, owl_emac_reg_mac_csr5); +} + +static u32 owl_emac_irq_clear(struct owl_emac_priv *priv) +{ + u32 val = owl_emac_irq_status(priv); + + owl_emac_reg_write(priv, owl_emac_reg_mac_csr5, val); + + return val; +} + +static dma_addr_t owl_emac_dma_map_rx(struct owl_emac_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = owl_emac_get_dev(priv); + + /* buffer pointer for the rx dma descriptor must be word aligned. */ + return dma_map_single(dev, skb_tail_pointer(skb), + skb_tailroom(skb), dma_from_device); +} + +static void owl_emac_dma_unmap_rx(struct owl_emac_priv *priv, + struct sk_buff *skb, dma_addr_t dma_addr) +{ + struct device *dev = owl_emac_get_dev(priv); + + dma_unmap_single(dev, dma_addr, skb_tailroom(skb), dma_from_device); +} + +static dma_addr_t owl_emac_dma_map_tx(struct owl_emac_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = owl_emac_get_dev(priv); + + return dma_map_single(dev, skb->data, skb_headlen(skb), dma_to_device); +} + +static void owl_emac_dma_unmap_tx(struct owl_emac_priv *priv, + struct sk_buff *skb, dma_addr_t dma_addr) +{ + struct device *dev = owl_emac_get_dev(priv); + + dma_unmap_single(dev, dma_addr, skb_headlen(skb), dma_to_device); +} + +static unsigned int owl_emac_ring_num_unused(struct owl_emac_ring *ring) +{ + return circ_space(ring->head, ring->tail, ring->size); +} + +static unsigned int owl_emac_ring_get_next(struct owl_emac_ring *ring, + unsigned int cur) +{ + return (cur + 1) & (ring->size - 1); +} + +static void owl_emac_ring_push_head(struct owl_emac_ring *ring) +{ + ring->head = owl_emac_ring_get_next(ring, ring->head); +} + +static void owl_emac_ring_pop_tail(struct owl_emac_ring *ring) +{ + ring->tail = owl_emac_ring_get_next(ring, ring->tail); +} + +static struct sk_buff *owl_emac_alloc_skb(struct net_device *netdev) +{ + struct sk_buff *skb; + int offset; + + skb = netdev_alloc_skb(netdev, owl_emac_rx_frame_max_len + + owl_emac_skb_reserve); + if (unlikely(!skb)) + return null; + + /* ensure 4 bytes dma alignment. */ + offset = ((uintptr_t)skb->data) & (owl_emac_skb_align - 1); + if (unlikely(offset)) + skb_reserve(skb, owl_emac_skb_align - offset); + + return skb; +} + +static int owl_emac_ring_prepare_rx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + struct device *dev = owl_emac_get_dev(priv); + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + int i; + + for (i = 0; i < ring->size; i++) { + skb = owl_emac_alloc_skb(netdev); + if (!skb) + return -enomem; + + dma_addr = owl_emac_dma_map_rx(priv, skb); + if (dma_mapping_error(dev, dma_addr)) { + dev_kfree_skb(skb); + return -enomem; + } + + desc = &ring->descs[i]; + desc->status = owl_emac_bit_rdes0_own; + desc->control = skb_tailroom(skb) & owl_emac_msk_rdes1_rbs1; + desc->buf_addr = dma_addr; + desc->reserved = 0; + + ring->skbs[i] = skb; + ring->skbs_dma[i] = dma_addr; + } + + desc->control |= owl_emac_bit_rdes1_rer; + + ring->head = 0; + ring->tail = 0; + + return 0; +} + +static void owl_emac_ring_prepare_tx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct owl_emac_ring_desc *desc; + int i; + + for (i = 0; i < ring->size; i++) { + desc = &ring->descs[i]; + + desc->status = 0; + desc->control = owl_emac_bit_tdes1_ic; + desc->buf_addr = 0; + desc->reserved = 0; + } + + desc->control |= owl_emac_bit_tdes1_ter; + + memset(ring->skbs_dma, 0, sizeof(dma_addr_t) * ring->size); + + ring->head = 0; + ring->tail = 0; +} + +static void owl_emac_ring_unprepare_rx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + int i; + + for (i = 0; i < ring->size; i++) { + ring->descs[i].status = 0; + + if (!ring->skbs_dma[i]) + continue; + + owl_emac_dma_unmap_rx(priv, ring->skbs[i], ring->skbs_dma[i]); + ring->skbs_dma[i] = 0; + + dev_kfree_skb(ring->skbs[i]); + ring->skbs[i] = null; + } +} + +static void owl_emac_ring_unprepare_tx(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + int i; + + for (i = 0; i < ring->size; i++) { + ring->descs[i].status = 0; + + if (!ring->skbs_dma[i]) + continue; + + owl_emac_dma_unmap_tx(priv, ring->skbs[i], ring->skbs_dma[i]); + ring->skbs_dma[i] = 0; + + dev_kfree_skb(ring->skbs[i]); + ring->skbs[i] = null; + } +} + +static int owl_emac_ring_alloc(struct device *dev, struct owl_emac_ring *ring, + unsigned int size) +{ + ring->descs = dmam_alloc_coherent(dev, + sizeof(struct owl_emac_ring_desc) * size, + &ring->descs_dma, gfp_kernel); + if (!ring->descs) + return -enomem; + + ring->skbs = devm_kcalloc(dev, size, sizeof(struct sk_buff *), + gfp_kernel); + if (!ring->skbs) + return -enomem; + + ring->skbs_dma = devm_kcalloc(dev, size, sizeof(dma_addr_t), + gfp_kernel); + if (!ring->skbs_dma) + return -enomem; + + ring->size = size; + + return 0; +} + +static void owl_emac_dma_cmd_resume_rx(struct owl_emac_priv *priv) +{ + owl_emac_reg_write(priv, owl_emac_reg_mac_csr2, + owl_emac_val_mac_csr2_rpd); +} + +static void owl_emac_dma_cmd_resume_tx(struct owl_emac_priv *priv) +{ + owl_emac_reg_write(priv, owl_emac_reg_mac_csr1, + owl_emac_val_mac_csr1_tpd); +} + +static u32 owl_emac_dma_cmd_set_tx(struct owl_emac_priv *priv, u32 status) +{ + return owl_emac_reg_update(priv, owl_emac_reg_mac_csr6, + owl_emac_bit_mac_csr6_st, status); +} + +static u32 owl_emac_dma_cmd_start_tx(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set_tx(priv, ~0); +} + +static u32 owl_emac_dma_cmd_set(struct owl_emac_priv *priv, u32 status) +{ + return owl_emac_reg_update(priv, owl_emac_reg_mac_csr6, + owl_emac_msk_mac_csr6_stsr, status); +} + +static u32 owl_emac_dma_cmd_start(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set(priv, ~0); +} + +static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv) +{ + return owl_emac_dma_cmd_set(priv, 0); +} + +static void owl_emac_set_hw_mac_addr(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + u8 *mac_addr = netdev->dev_addr; + u32 addr_high, addr_low; + + addr_high = mac_addr[0] << 8 | mac_addr[1]; + addr_low = mac_addr[2] << 24 | mac_addr[3] << 16 | + mac_addr[4] << 8 | mac_addr[5]; + + owl_emac_reg_write(priv, owl_emac_reg_mac_csr17, addr_high); + owl_emac_reg_write(priv, owl_emac_reg_mac_csr16, addr_low); +} + +static void owl_emac_update_link_state(struct owl_emac_priv *priv) +{ + u32 val, status; + + if (priv->pause) { + val = owl_emac_bit_mac_csr20_fce | owl_emac_bit_mac_csr20_tue; + val |= owl_emac_bit_mac_csr20_tpe | owl_emac_bit_mac_csr20_rpe; + val |= owl_emac_bit_mac_csr20_bpe; + } else { + val = 0; + } + + /* update flow control. */ + owl_emac_reg_write(priv, owl_emac_reg_mac_csr20, val); + + val = (priv->speed == speed_100) ? owl_emac_val_mac_csr6_speed_100m : + owl_emac_val_mac_csr6_speed_10m; + val <<= owl_emac_off_mac_csr6_speed; + + if (priv->duplex == duplex_full) + val |= owl_emac_bit_mac_csr6_fd; + + spin_lock_bh(&priv->lock); + + /* temporarily stop dma tx & rx. */ + status = owl_emac_dma_cmd_stop(priv); + + /* update operation modes. */ + owl_emac_reg_update(priv, owl_emac_reg_mac_csr6, + owl_emac_msk_mac_csr6_speed | + owl_emac_bit_mac_csr6_fd, val); + + /* restore dma tx & rx status. */ + owl_emac_dma_cmd_set(priv, status); + + spin_unlock_bh(&priv->lock); +} + +static void owl_emac_adjust_link(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + bool state_changed = false; + + if (phydev->link) { + if (!priv->link) { + priv->link = phydev->link; + state_changed = true; + } + + if (priv->speed != phydev->speed) { + priv->speed = phydev->speed; + state_changed = true; + } + + if (priv->duplex != phydev->duplex) { + priv->duplex = phydev->duplex; + state_changed = true; + } + + if (priv->pause != phydev->pause) { + priv->pause = phydev->pause; + state_changed = true; + } + } else { + if (priv->link) { + priv->link = phydev->link; + state_changed = true; + } + } + + if (state_changed) { + if (phydev->link) + owl_emac_update_link_state(priv); + + if (netif_msg_link(priv)) + phy_print_status(phydev); + } +} + +static irqreturn_t owl_emac_handle_irq(int irq, void *data) +{ + struct net_device *netdev = data; + struct owl_emac_priv *priv = netdev_priv(netdev); + + if (netif_running(netdev)) { + owl_emac_irq_disable(priv); + napi_schedule(&priv->napi); + } + + return irq_handled; +} + +static void owl_emac_ether_addr_push(u8 **dst, const u8 *src) +{ + u32 *a = (u32 *)(*dst); + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; + + *dst += 12; +} + +static void +owl_emac_setup_frame_prepare(struct owl_emac_priv *priv, struct sk_buff *skb) +{ + const u8 bcast_addr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + const u8 *mac_addr = priv->netdev->dev_addr; + u8 *frame; + int i; + + skb_put(skb, owl_emac_setup_frame_len); + + frame = skb->data; + memset(frame, 0, skb->len); + + owl_emac_ether_addr_push(&frame, mac_addr); + owl_emac_ether_addr_push(&frame, bcast_addr); + + /* fill multicast addresses. */ + warn_on(priv->mcaddr_list.count >= owl_emac_max_multicast_addrs); + for (i = 0; i < priv->mcaddr_list.count; i++) { + mac_addr = priv->mcaddr_list.addrs[i]; + owl_emac_ether_addr_push(&frame, mac_addr); + } +} + +/* the setup frame is a special descriptor which is used to provide physical + * addresses (i.e. mac, broadcast and multicast) to the mac hardware for + * filtering purposes. to be recognized as a setup frame, the tdes1_set bit + * must be set in the tx descriptor control field. + */ +static int owl_emac_setup_frame_xmit(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + unsigned int tx_head; + u32 status, control; + dma_addr_t dma_addr; + int ret; + + skb = owl_emac_alloc_skb(netdev); + if (!skb) + return -enomem; + + owl_emac_setup_frame_prepare(priv, skb); + + dma_addr = owl_emac_dma_map_tx(priv, skb); + if (dma_mapping_error(owl_emac_get_dev(priv), dma_addr)) { + ret = -enomem; + goto err_free_skb; + } + + spin_lock_bh(&priv->lock); + + tx_head = ring->head; + desc = &ring->descs[tx_head]; + + status = read_once(desc->status); + control = read_once(desc->control); + dma_rmb(); /* ensure data has been read before used. */ + + if (unlikely(status & owl_emac_bit_tdes0_own) || + !owl_emac_ring_num_unused(ring)) { + spin_unlock_bh(&priv->lock); + owl_emac_dma_unmap_tx(priv, skb, dma_addr); + ret = -ebusy; + goto err_free_skb; + } + + ring->skbs[tx_head] = skb; + ring->skbs_dma[tx_head] = dma_addr; + + control &= owl_emac_bit_tdes1_ic | owl_emac_bit_tdes1_ter; /* maintain bits */ + control |= owl_emac_bit_tdes1_set; + control |= owl_emac_msk_tdes1_tbs1 & skb->len; + + write_once(desc->control, control); + write_once(desc->buf_addr, dma_addr); + dma_wmb(); /* flush descriptor before changing ownership. */ + write_once(desc->status, owl_emac_bit_tdes0_own); + + owl_emac_ring_push_head(ring); + + /* temporarily enable dma tx. */ + status = owl_emac_dma_cmd_start_tx(priv); + + /* trigger setup frame processing. */ + owl_emac_dma_cmd_resume_tx(priv); + + /* restore dma tx status. */ + owl_emac_dma_cmd_set_tx(priv, status); + + /* stop regular tx until setup frame is processed. */ + netif_stop_queue(netdev); + + spin_unlock_bh(&priv->lock); + + return 0; + +err_free_skb: + dev_kfree_skb(skb); + return ret; +} + +static netdev_tx_t owl_emac_ndo_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct owl_emac_ring *ring = &priv->tx_ring; + struct owl_emac_ring_desc *desc; + unsigned int tx_head; + u32 status, control; + dma_addr_t dma_addr; + + dma_addr = owl_emac_dma_map_tx(priv, skb); + if (dma_mapping_error(dev, dma_addr)) { + dev_err_ratelimited(&netdev->dev, "tx dma mapping failed "); + dev_kfree_skb(skb); + netdev->stats.tx_dropped++; + return netdev_tx_ok; + } + + spin_lock_bh(&priv->lock); + + tx_head = ring->head; + desc = &ring->descs[tx_head]; + + status = read_once(desc->status); + control = read_once(desc->control); + dma_rmb(); /* ensure data has been read before used. */ + + if (!owl_emac_ring_num_unused(ring) || + unlikely(status & owl_emac_bit_tdes0_own)) { + netif_stop_queue(netdev); + spin_unlock_bh(&priv->lock); + + dev_dbg_ratelimited(&netdev->dev, "tx buffer full, status=0x%08x ", + owl_emac_irq_status(priv)); + owl_emac_dma_unmap_tx(priv, skb, dma_addr); + netdev->stats.tx_dropped++; + return netdev_tx_busy; + } + + ring->skbs[tx_head] = skb; + ring->skbs_dma[tx_head] = dma_addr; + + control &= owl_emac_bit_tdes1_ic | owl_emac_bit_tdes1_ter; /* maintain bits */ + control |= owl_emac_bit_tdes1_fs | owl_emac_bit_tdes1_ls; + control |= owl_emac_msk_tdes1_tbs1 & skb->len; + + write_once(desc->control, control); + write_once(desc->buf_addr, dma_addr); + dma_wmb(); /* flush descriptor before changing ownership. */ + write_once(desc->status, owl_emac_bit_tdes0_own); + + owl_emac_dma_cmd_resume_tx(priv); + owl_emac_ring_push_head(ring); + + /* fixme: the transmission is currently restricted to a single frame + * at a time as a workaround for a mac hardware bug that causes random + * freeze of the tx queue processor. + */ + netif_stop_queue(netdev); + + spin_unlock_bh(&priv->lock); + + return netdev_tx_ok; +} + +static bool owl_emac_tx_complete_tail(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *skb; + unsigned int tx_tail; + u32 status; + + tx_tail = ring->tail; + desc = &ring->descs[tx_tail]; + + status = read_once(desc->status); + dma_rmb(); /* ensure data has been read before used. */ + + if (status & owl_emac_bit_tdes0_own) + return false; + + /* check for errors. */ + if (status & owl_emac_bit_tdes0_es) { + dev_dbg_ratelimited(&netdev->dev, + "tx complete error status: 0x%08x ", + status); + + netdev->stats.tx_errors++; + + if (status & owl_emac_bit_tdes0_uf) + netdev->stats.tx_fifo_errors++; + + if (status & owl_emac_bit_tdes0_ec) + netdev->stats.tx_aborted_errors++; + + if (status & owl_emac_bit_tdes0_lc) + netdev->stats.tx_window_errors++; + + if (status & owl_emac_bit_tdes0_nc) + netdev->stats.tx_heartbeat_errors++; + + if (status & owl_emac_bit_tdes0_lo) + netdev->stats.tx_carrier_errors++; + } else { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += ring->skbs[tx_tail]->len; + } + + /* some collisions occurred, but pkt has been transmitted. */ + if (status & owl_emac_bit_tdes0_de) + netdev->stats.collisions++; + + skb = ring->skbs[tx_tail]; + owl_emac_dma_unmap_tx(priv, skb, ring->skbs_dma[tx_tail]); + dev_kfree_skb(skb); + + ring->skbs[tx_tail] = null; + ring->skbs_dma[tx_tail] = 0; + + owl_emac_ring_pop_tail(ring); + + if (unlikely(netif_queue_stopped(netdev))) + netif_wake_queue(netdev); + + return true; +} + +static void owl_emac_tx_complete(struct owl_emac_priv *priv) +{ + struct owl_emac_ring *ring = &priv->tx_ring; + struct net_device *netdev = priv->netdev; + unsigned int tx_next; + u32 status; + + spin_lock(&priv->lock); + + while (ring->tail != ring->head) { + if (!owl_emac_tx_complete_tail(priv)) + break; + } + + /* fixme: this is a workaround for a mac hardware bug not clearing + * (sometimes) the own bit for a transmitted frame descriptor. + * + * at this point, when tx queue is full, the tail descriptor has the + * own bit set, which normally means the frame has not been processed + * or transmitted yet. but if there is at least one descriptor in the + * queue having the own bit cleared, we can safely assume the tail + * frame has been also processed by the mac hardware. + * + * if that's the case, let's force the frame completion by manually + * clearing the own bit. + */ + if (unlikely(!owl_emac_ring_num_unused(ring))) { + tx_next = ring->tail; + + while ((tx_next = owl_emac_ring_get_next(ring, tx_next)) != ring->head) { + status = read_once(ring->descs[tx_next].status); + dma_rmb(); /* ensure data has been read before used. */ + + if (status & owl_emac_bit_tdes0_own) + continue; + + netdev_dbg(netdev, "found uncleared tx desc own bit "); + + status = read_once(ring->descs[ring->tail].status); + dma_rmb(); /* ensure data has been read before used. */ + status &= ~owl_emac_bit_tdes0_own; + write_once(ring->descs[ring->tail].status, status); + + owl_emac_tx_complete_tail(priv); + break; + } + } + + spin_unlock(&priv->lock); +} + +static int owl_emac_rx_process(struct owl_emac_priv *priv, int budget) +{ + struct owl_emac_ring *ring = &priv->rx_ring; + struct device *dev = owl_emac_get_dev(priv); + struct net_device *netdev = priv->netdev; + struct owl_emac_ring_desc *desc; + struct sk_buff *curr_skb, *new_skb; + dma_addr_t curr_dma, new_dma; + unsigned int rx_tail, len; + u32 status; + int recv = 0; + + while (recv < budget) { + spin_lock(&priv->lock); + + rx_tail = ring->tail; + desc = &ring->descs[rx_tail]; + + status = read_once(desc->status); + dma_rmb(); /* ensure data has been read before used. */ + + if (status & owl_emac_bit_rdes0_own) { + spin_unlock(&priv->lock); + break; + } + + curr_skb = ring->skbs[rx_tail]; + curr_dma = ring->skbs_dma[rx_tail]; + owl_emac_ring_pop_tail(ring); + + spin_unlock(&priv->lock); + + if (status & (owl_emac_bit_rdes0_de | owl_emac_bit_rdes0_rf | + owl_emac_bit_rdes0_tl | owl_emac_bit_rdes0_cs | + owl_emac_bit_rdes0_db | owl_emac_bit_rdes0_ce | + owl_emac_bit_rdes0_zero)) { + dev_dbg_ratelimited(&netdev->dev, + "rx desc error status: 0x%08x ", + status); + + if (status & owl_emac_bit_rdes0_de) + netdev->stats.rx_over_errors++; + + if (status & (owl_emac_bit_rdes0_rf | owl_emac_bit_rdes0_db)) + netdev->stats.rx_frame_errors++; + + if (status & owl_emac_bit_rdes0_tl) + netdev->stats.rx_length_errors++; + + if (status & owl_emac_bit_rdes0_cs) + netdev->stats.collisions++; + + if (status & owl_emac_bit_rdes0_ce) + netdev->stats.rx_crc_errors++; + + if (status & owl_emac_bit_rdes0_zero) + netdev->stats.rx_fifo_errors++; + + goto drop_skb; + } + + len = (status & owl_emac_msk_rdes0_fl) >> owl_emac_off_rdes0_fl; + if (unlikely(len > owl_emac_rx_frame_max_len)) { + netdev->stats.rx_length_errors++; + netdev_err(netdev, "invalid rx frame len: %u ", len); + goto drop_skb; + } + + /* prepare new skb before receiving the current one. */ + new_skb = owl_emac_alloc_skb(netdev); + if (unlikely(!new_skb)) + goto drop_skb; + + new_dma = owl_emac_dma_map_rx(priv, new_skb); + if (dma_mapping_error(dev, new_dma)) { + dev_kfree_skb(new_skb); + netdev_err(netdev, "rx dma mapping failed "); + goto drop_skb; + } + + owl_emac_dma_unmap_rx(priv, curr_skb, curr_dma); + + skb_put(curr_skb, len - eth_fcs_len); + curr_skb->ip_summed = checksum_none; + curr_skb->protocol = eth_type_trans(curr_skb, netdev); + curr_skb->dev = netdev; + + netif_receive_skb(curr_skb); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + recv++; + goto push_skb; + +drop_skb: + netdev->stats.rx_dropped++; + netdev->stats.rx_errors++; + /* reuse the current skb. */ + new_skb = curr_skb; + new_dma = curr_dma; + +push_skb: + spin_lock(&priv->lock); + + ring->skbs[ring->head] = new_skb; + ring->skbs_dma[ring->head] = new_dma; + + write_once(desc->buf_addr, new_dma); + dma_wmb(); /* flush descriptor before changing ownership. */ + write_once(desc->status, owl_emac_bit_rdes0_own); + + owl_emac_ring_push_head(ring); + + spin_unlock(&priv->lock); + } + + return recv; +} + +static int owl_emac_poll(struct napi_struct *napi, int budget) +{ + int work_done = 0, ru_cnt = 0, recv; + static int tx_err_cnt, rx_err_cnt; + struct owl_emac_priv *priv; + u32 status, proc_status; + + priv = container_of(napi, struct owl_emac_priv, napi); + + while ((status = owl_emac_irq_clear(priv)) & + (owl_emac_bit_mac_csr5_nis | owl_emac_bit_mac_csr5_ais)) { + recv = 0; + + /* tx setup frame raises eti instead of ti. */ + if (status & (owl_emac_bit_mac_csr5_ti | owl_emac_bit_mac_csr5_eti)) { + owl_emac_tx_complete(priv); + tx_err_cnt = 0; + + /* count mac internal rx errors. */ + proc_status = status & owl_emac_msk_mac_csr5_rs; + proc_status >>= owl_emac_off_mac_csr5_rs; + if (proc_status == owl_emac_val_mac_csr5_rs_data || + proc_status == owl_emac_val_mac_csr5_rs_cdes || + proc_status == owl_emac_val_mac_csr5_rs_fdes) + rx_err_cnt++; + } + + if (status & owl_emac_bit_mac_csr5_ri) { + recv = owl_emac_rx_process(priv, budget - work_done); + rx_err_cnt = 0; + + /* count mac internal tx errors. */ + proc_status = status & owl_emac_msk_mac_csr5_ts; + proc_status >>= owl_emac_off_mac_csr5_ts; + if (proc_status == owl_emac_val_mac_csr5_ts_data || + proc_status == owl_emac_val_mac_csr5_ts_cdes) + tx_err_cnt++; + } else if (status & owl_emac_bit_mac_csr5_ru) { + /* mac ahb is in suspended state, will return to rx + * descriptor processing when the host changes ownership + * of the descriptor and either an rx poll demand cmd is + * issued or a new frame is recognized by the mac ahb. + */ + if (++ru_cnt == 2) + owl_emac_dma_cmd_resume_rx(priv); + + recv = owl_emac_rx_process(priv, budget - work_done); + + /* guard against too many ru interrupts. */ + if (ru_cnt > 3) + break; + } + + work_done += recv; + if (work_done >= budget) + break; + } + + if (work_done < budget) { + napi_complete_done(napi, work_done); + owl_emac_irq_enable(priv); + } + + /* reset mac when getting too many internal tx or rx errors. */ + if (tx_err_cnt > 10 || rx_err_cnt > 10) { + netdev_dbg(priv->netdev, "%s error status: 0x%08x ", + tx_err_cnt > 10 ? "tx" : "rx", status); + rx_err_cnt = 0; + tx_err_cnt = 0; + schedule_work(&priv->mac_reset_task); + } + + return work_done; +} + +static void owl_emac_mdio_clock_enable(struct owl_emac_priv *priv) +{ + u32 val; + + /* enable mdc clock generation by adjusting clkdiv according to + * the vendor implementation of the original driver. + */ + val = owl_emac_reg_read(priv, owl_emac_reg_mac_csr10); + val &= owl_emac_msk_mac_csr10_clkdiv; + val |= owl_emac_val_mac_csr10_clkdiv_128 << owl_emac_off_mac_csr10_clkdiv; + + val |= owl_emac_bit_mac_csr10_sb; + val |= owl_emac_val_mac_csr10_opcode_cds << owl_emac_off_mac_csr10_opcode; + owl_emac_reg_write(priv, owl_emac_reg_mac_csr10, val); +} + +static void owl_emac_core_hw_reset(struct owl_emac_priv *priv) +{ + /* trigger hardware reset. */ + reset_control_assert(priv->reset); + usleep_range(10, 20); + reset_control_deassert(priv->reset); + usleep_range(100, 200); +} + +static int owl_emac_core_sw_reset(struct owl_emac_priv *priv) +{ + u32 val; + int ret; + + /* trigger software reset. */ + owl_emac_reg_set(priv, owl_emac_reg_mac_csr0, owl_emac_bit_mac_csr0_swr); + ret = readl_poll_timeout(priv->base + owl_emac_reg_mac_csr0, + val, !(val & owl_emac_bit_mac_csr0_swr), + owl_emac_poll_delay_usec, + owl_emac_reset_poll_timeout_usec); + if (ret) + return ret; + + if (priv->phy_mode == phy_interface_mode_rmii) { + /* enable rmii and use the 50mhz rmii clk as output to phy. */ + val = 0; + } else { + /* enable smii and use the 125mhz rmii clk as output to phy. + * additionally set smii sync delay to 4 half cycle. + */ + val = 0x04 << owl_emac_off_mac_ctrl_ssdc; + val |= owl_emac_bit_mac_ctrl_rsis; + } + owl_emac_reg_write(priv, owl_emac_reg_mac_ctrl, val); + + /* mdc is disabled after reset. */ + owl_emac_mdio_clock_enable(priv); + + /* set fifo pause & restart threshold levels. */ + val = 0x40 << owl_emac_off_mac_csr19_fptl; + val |= 0x10 << owl_emac_off_mac_csr19_frtl; + owl_emac_reg_write(priv, owl_emac_reg_mac_csr19, val); + + /* set flow control pause quanta time to ~100 ms. */ + val = 0x4fff << owl_emac_off_mac_csr18_pqt; + owl_emac_reg_write(priv, owl_emac_reg_mac_csr18, val); + + /* setup interrupt mitigation. */ + val = 7 << owl_emac_off_mac_csr11_nrp; + val |= 4 << owl_emac_off_mac_csr11_rt; + owl_emac_reg_write(priv, owl_emac_reg_mac_csr11, val); + + /* set rx/tx rings base addresses. */ + owl_emac_reg_write(priv, owl_emac_reg_mac_csr3, + (u32)(priv->rx_ring.descs_dma)); + owl_emac_reg_write(priv, owl_emac_reg_mac_csr4, + (u32)(priv->tx_ring.descs_dma)); + + /* setup initial operation mode. */ + val = owl_emac_val_mac_csr6_speed_100m << owl_emac_off_mac_csr6_speed; + val |= owl_emac_bit_mac_csr6_fd; + owl_emac_reg_update(priv, owl_emac_reg_mac_csr6, + owl_emac_msk_mac_csr6_speed | + owl_emac_bit_mac_csr6_fd, val); + owl_emac_reg_clear(priv, owl_emac_reg_mac_csr6, + owl_emac_bit_mac_csr6_pr | owl_emac_bit_mac_csr6_pm); + + priv->link = 0; + priv->speed = speed_unknown; + priv->duplex = duplex_unknown; + priv->pause = 0; + priv->mcaddr_list.count = 0; + + return 0; +} + +static int owl_emac_enable(struct net_device *netdev, bool start_phy) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + int ret; + + owl_emac_dma_cmd_stop(priv); + owl_emac_irq_disable(priv); + owl_emac_irq_clear(priv); + + owl_emac_ring_prepare_tx(priv); + ret = owl_emac_ring_prepare_rx(priv); + if (ret) + goto err_unprep; + + ret = owl_emac_core_sw_reset(priv); + if (ret) { + netdev_err(netdev, "failed to soft reset mac core: %d ", ret); + goto err_unprep; + } + + owl_emac_set_hw_mac_addr(netdev); + owl_emac_setup_frame_xmit(priv); + + netdev_reset_queue(netdev); + napi_enable(&priv->napi); + + owl_emac_irq_enable(priv); + owl_emac_dma_cmd_start(priv); + + if (start_phy) + phy_start(netdev->phydev); + + netif_start_queue(netdev); + + return 0; + +err_unprep: + owl_emac_ring_unprepare_rx(priv); + owl_emac_ring_unprepare_tx(priv); + + return ret; +} + +static void owl_emac_disable(struct net_device *netdev, bool stop_phy) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + owl_emac_dma_cmd_stop(priv); + owl_emac_irq_disable(priv); + + netif_stop_queue(netdev); + napi_disable(&priv->napi); + + if (stop_phy) + phy_stop(netdev->phydev); + + owl_emac_ring_unprepare_rx(priv); + owl_emac_ring_unprepare_tx(priv); +} + +static int owl_emac_ndo_open(struct net_device *netdev) +{ + return owl_emac_enable(netdev, true); +} + +static int owl_emac_ndo_stop(struct net_device *netdev) +{ + owl_emac_disable(netdev, true); + + return 0; +} + +static void owl_emac_set_multicast(struct net_device *netdev, int count) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct netdev_hw_addr *ha; + int index = 0; + + if (count <= 0) { + priv->mcaddr_list.count = 0; + return; + } + + netdev_for_each_mc_addr(ha, netdev) { + if (!is_multicast_ether_addr(ha->addr)) + continue; + + warn_on(index >= owl_emac_max_multicast_addrs); + ether_addr_copy(priv->mcaddr_list.addrs[index++], ha->addr); + } + + priv->mcaddr_list.count = index; + + owl_emac_setup_frame_xmit(priv); +} + +static void owl_emac_ndo_set_rx_mode(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + u32 status, val = 0; + int mcast_count = 0; + + if (netdev->flags & iff_promisc) { + val = owl_emac_bit_mac_csr6_pr; + } else if (netdev->flags & iff_allmulti) { + val = owl_emac_bit_mac_csr6_pm; + } else if (netdev->flags & iff_multicast) { + mcast_count = netdev_mc_count(netdev); + + if (mcast_count > owl_emac_max_multicast_addrs) { + val = owl_emac_bit_mac_csr6_pm; + mcast_count = 0; + } + } + + spin_lock_bh(&priv->lock); + + /* temporarily stop dma tx & rx. */ + status = owl_emac_dma_cmd_stop(priv); + + /* update operation modes. */ + owl_emac_reg_update(priv, owl_emac_reg_mac_csr6, + owl_emac_bit_mac_csr6_pr | owl_emac_bit_mac_csr6_pm, + val); + + /* restore dma tx & rx status. */ + owl_emac_dma_cmd_set(priv, status); + + spin_unlock_bh(&priv->lock); + + /* set/reset multicast addr list. */ + owl_emac_set_multicast(netdev, mcast_count); +} + +static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct sockaddr *skaddr = addr; + + if (!is_valid_ether_addr(skaddr->sa_data)) + return -eaddrnotavail; + + if (netif_running(netdev)) + return -ebusy; + + memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len); + owl_emac_set_hw_mac_addr(netdev); + + return owl_emac_setup_frame_xmit(netdev_priv(netdev)); +} + +static int owl_emac_ndo_do_ioctl(struct net_device *netdev, + struct ifreq *req, int cmd) +{ + if (!netif_running(netdev)) + return -einval; + + return phy_mii_ioctl(netdev->phydev, req, cmd); +} + +static void owl_emac_ndo_tx_timeout(struct net_device *netdev, + unsigned int txqueue) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + schedule_work(&priv->mac_reset_task); +} + +static void owl_emac_reset_task(struct work_struct *work) +{ + struct owl_emac_priv *priv; + + priv = container_of(work, struct owl_emac_priv, mac_reset_task); + + netdev_dbg(priv->netdev, "resetting mac "); + owl_emac_disable(priv->netdev, false); + owl_emac_enable(priv->netdev, false); +} + +static struct net_device_stats * +owl_emac_ndo_get_stats(struct net_device *netdev) +{ + /* fixme: if possible, try to get stats from mac hardware registers + * instead of tracking them manually in the driver. + */ + + return &netdev->stats; +} + +static const struct net_device_ops owl_emac_netdev_ops = { + .ndo_open = owl_emac_ndo_open, + .ndo_stop = owl_emac_ndo_stop, + .ndo_start_xmit = owl_emac_ndo_start_xmit, + .ndo_set_rx_mode = owl_emac_ndo_set_rx_mode, + .ndo_set_mac_address = owl_emac_ndo_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = owl_emac_ndo_do_ioctl, + .ndo_tx_timeout = owl_emac_ndo_tx_timeout, + .ndo_get_stats = owl_emac_ndo_get_stats, +}; + +static void owl_emac_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, owl_emac_drvname, sizeof(info->driver)); +} + +static u32 owl_emac_ethtool_get_msglevel(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + + return priv->msg_enable; +} + +static void owl_emac_ethtool_set_msglevel(struct net_device *ndev, u32 val) +{ + struct owl_emac_priv *priv = netdev_priv(ndev); + + priv->msg_enable = val; +} + +static const struct ethtool_ops owl_emac_ethtool_ops = { + .get_drvinfo = owl_emac_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_msglevel = owl_emac_ethtool_get_msglevel, + .set_msglevel = owl_emac_ethtool_set_msglevel, +}; + +static int owl_emac_mdio_wait(struct owl_emac_priv *priv) +{ + u32 val; + + /* wait while data transfer is in progress. */ + return readl_poll_timeout(priv->base + owl_emac_reg_mac_csr10, + val, !(val & owl_emac_bit_mac_csr10_sb), + owl_emac_poll_delay_usec, + owl_emac_mdio_poll_timeout_usec); +} + +static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct owl_emac_priv *priv = bus->priv; + u32 data, tmp; + int ret; + + if (regnum & mii_addr_c45) + return -eopnotsupp; + + data = owl_emac_bit_mac_csr10_sb; + data |= owl_emac_val_mac_csr10_opcode_rd << owl_emac_off_mac_csr10_opcode; + + tmp = addr << owl_emac_off_mac_csr10_phyadd; + data |= tmp & owl_emac_msk_mac_csr10_phyadd; + + tmp = regnum << owl_emac_off_mac_csr10_regadd; + data |= tmp & owl_emac_msk_mac_csr10_regadd; + + owl_emac_reg_write(priv, owl_emac_reg_mac_csr10, data); + + ret = owl_emac_mdio_wait(priv); + if (ret) + return ret; + + data = owl_emac_reg_read(priv, owl_emac_reg_mac_csr10); + data &= owl_emac_msk_mac_csr10_data; + + return data; +} + +static int +owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct owl_emac_priv *priv = bus->priv; + u32 data, tmp; + + if (regnum & mii_addr_c45) + return -eopnotsupp; + + data = owl_emac_bit_mac_csr10_sb; + data |= owl_emac_val_mac_csr10_opcode_wr << owl_emac_off_mac_csr10_opcode; + + tmp = addr << owl_emac_off_mac_csr10_phyadd; + data |= tmp & owl_emac_msk_mac_csr10_phyadd; + + tmp = regnum << owl_emac_off_mac_csr10_regadd; + data |= tmp & owl_emac_msk_mac_csr10_regadd; + + data |= val & owl_emac_msk_mac_csr10_data; + + owl_emac_reg_write(priv, owl_emac_reg_mac_csr10, data); + + return owl_emac_mdio_wait(priv); +} + +static int owl_emac_mdio_init(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct device_node *mdio_node; + int ret; + + mdio_node = of_get_child_by_name(dev->of_node, "mdio"); + if (!mdio_node) + return -enodev; + + if (!of_device_is_available(mdio_node)) { + ret = -enodev; + goto err_put_node; + } + + priv->mii = devm_mdiobus_alloc(dev); + if (!priv->mii) { + ret = -enomem; + goto err_put_node; + } + + snprintf(priv->mii->id, mii_bus_id_size, "%s", dev_name(dev)); + priv->mii->name = "owl-emac-mdio"; + priv->mii->parent = dev; + priv->mii->read = owl_emac_mdio_read; + priv->mii->write = owl_emac_mdio_write; + priv->mii->phy_mask = ~0; /* mask out all phys from auto probing. */ + priv->mii->priv = priv; + + ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node); + +err_put_node: + of_node_put(mdio_node); + return ret; +} + +static int owl_emac_phy_init(struct net_device *netdev) +{ + struct owl_emac_priv *priv = netdev_priv(netdev); + struct device *dev = owl_emac_get_dev(priv); + struct phy_device *phy; + + phy = of_phy_get_and_connect(netdev, dev->of_node, + owl_emac_adjust_link); + if (!phy) + return -enodev; + + phy_set_sym_pause(phy, true, true, true); + + if (netif_msg_link(priv)) + phy_attached_info(phy); + + return 0; +} + +static void owl_emac_get_mac_addr(struct net_device *netdev) +{ + struct device *dev = netdev->dev.parent; + int ret; + + ret = eth_platform_get_mac_address(dev, netdev->dev_addr); + if (!ret && is_valid_ether_addr(netdev->dev_addr)) + return; + + eth_hw_addr_random(netdev); + dev_warn(dev, "using random mac address %pm ", netdev->dev_addr); +} + +static __maybe_unused int owl_emac_suspend(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct owl_emac_priv *priv = netdev_priv(netdev); + + disable_irq(netdev->irq); + + if (netif_running(netdev)) { + owl_emac_disable(netdev, true); + netif_device_detach(netdev); + } + + clk_bulk_disable_unprepare(owl_emac_nclks, priv->clks); + + return 0; +} + +static __maybe_unused int owl_emac_resume(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct owl_emac_priv *priv = netdev_priv(netdev); + int ret; + + ret = clk_bulk_prepare_enable(owl_emac_nclks, priv->clks); + if (ret) + return ret; + + if (netif_running(netdev)) { + owl_emac_core_hw_reset(priv); + owl_emac_core_sw_reset(priv); + + ret = owl_emac_enable(netdev, true); + if (ret) { + clk_bulk_disable_unprepare(owl_emac_nclks, priv->clks); + return ret; + } + + netif_device_attach(netdev); + } + + enable_irq(netdev->irq); + + return 0; +} + +static void owl_emac_clk_disable_unprepare(void *data) +{ + struct owl_emac_priv *priv = data; + + clk_bulk_disable_unprepare(owl_emac_nclks, priv->clks); +} + +static int owl_emac_clk_set_rate(struct owl_emac_priv *priv) +{ + struct device *dev = owl_emac_get_dev(priv); + unsigned long rate; + int ret; + + switch (priv->phy_mode) { + case phy_interface_mode_rmii: + rate = 50000000; + break; + + case phy_interface_mode_smii: + rate = 125000000; + break; + + default: + dev_err(dev, "unsupported phy interface mode %d ", + priv->phy_mode); + return -eopnotsupp; + } + + ret = clk_set_rate(priv->clks[owl_emac_clk_rmii].clk, rate); + if (ret) + dev_err(dev, "failed to set rmii clock rate: %d ", ret); + + return ret; +} + +static int owl_emac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *netdev; + struct owl_emac_priv *priv; + int ret, i; + + netdev = devm_alloc_etherdev(dev, sizeof(*priv)); + if (!netdev) + return -enomem; + + platform_set_drvdata(pdev, netdev); + set_netdev_dev(netdev, dev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->msg_enable = netif_msg_init(-1, owl_emac_default_msg_enable); + + ret = of_get_phy_mode(dev->of_node, &priv->phy_mode); + if (ret) { + dev_err(dev, "failed to get phy mode: %d ", ret); + return ret; + } + + spin_lock_init(&priv->lock); + + ret = dma_set_mask_and_coherent(dev, dma_bit_mask(32)); + if (ret) { + dev_err(dev, "unsupported dma mask "); + return ret; + } + + ret = owl_emac_ring_alloc(dev, &priv->rx_ring, owl_emac_rx_ring_size); + if (ret) + return ret; + + ret = owl_emac_ring_alloc(dev, &priv->tx_ring, owl_emac_tx_ring_size); + if (ret) + return ret; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (is_err(priv->base)) + return ptr_err(priv->base); + + netdev->irq = platform_get_irq(pdev, 0); + if (netdev->irq < 0) + return netdev->irq; + + ret = devm_request_irq(dev, netdev->irq, owl_emac_handle_irq, + irqf_shared, netdev->name, netdev); + if (ret) { + dev_err(dev, "failed to request irq: %d ", netdev->irq); + return ret; + } + + for (i = 0; i < owl_emac_nclks; i++) + priv->clks[i].id = owl_emac_clk_names[i]; + + ret = devm_clk_bulk_get(dev, owl_emac_nclks, priv->clks); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(owl_emac_nclks, priv->clks); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, owl_emac_clk_disable_unprepare, priv); + if (ret) + return ret; + + ret = owl_emac_clk_set_rate(priv); + if (ret) + return ret; + + priv->reset = devm_reset_control_get_exclusive(dev, null); + if (is_err(priv->reset)) + return dev_err_probe(dev, ptr_err(priv->reset), + "failed to get reset control"); + + owl_emac_get_mac_addr(netdev); + + owl_emac_core_hw_reset(priv); + owl_emac_mdio_clock_enable(priv); + + ret = owl_emac_mdio_init(netdev); + if (ret) { + dev_err(dev, "failed to initialize mdio bus "); + return ret; + } + + ret = owl_emac_phy_init(netdev); + if (ret) { + dev_err(dev, "failed to initialize phy "); + return ret; + } + + init_work(&priv->mac_reset_task, owl_emac_reset_task); + + netdev->min_mtu = owl_emac_mtu_min; + netdev->max_mtu = owl_emac_mtu_max; + netdev->watchdog_timeo = owl_emac_tx_timeout; + netdev->netdev_ops = &owl_emac_netdev_ops; + netdev->ethtool_ops = &owl_emac_ethtool_ops; + netif_napi_add(netdev, &priv->napi, owl_emac_poll, napi_poll_weight); + + ret = devm_register_netdev(dev, netdev); + if (ret) { + netif_napi_del(&priv->napi); + phy_disconnect(netdev->phydev); + return ret; + } + + return 0; +} + +static int owl_emac_remove(struct platform_device *pdev) +{ + struct owl_emac_priv *priv = platform_get_drvdata(pdev); + + netif_napi_del(&priv->napi); + phy_disconnect(priv->netdev->phydev); + cancel_work_sync(&priv->mac_reset_task); + + return 0; +} + +static const struct of_device_id owl_emac_of_match[] = { + { .compatible = "actions,owl-emac", }, + { } +}; +module_device_table(of, owl_emac_of_match); + +static simple_dev_pm_ops(owl_emac_pm_ops, + owl_emac_suspend, owl_emac_resume); + +static struct platform_driver owl_emac_driver = { + .driver = { + .name = owl_emac_drvname, + .of_match_table = owl_emac_of_match, + .pm = &owl_emac_pm_ops, + }, + .probe = owl_emac_probe, + .remove = owl_emac_remove, +}; +module_platform_driver(owl_emac_driver); + +module_description("actions semi owl socs ethernet mac driver"); +module_author("actions semi inc."); +module_author("cristian ciocaltea <cristian.ciocaltea@gmail.com>"); +module_license("gpl"); diff --git a/drivers/net/ethernet/actions/owl-emac.h b/drivers/net/ethernet/actions/owl-emac.h --- /dev/null +++ b/drivers/net/ethernet/actions/owl-emac.h +/* spdx-license-identifier: gpl-2.0-or-later */ +/* + * actions semi owl socs ethernet mac driver + * + * copyright (c) 2012 actions semi inc. + * copyright (c) 2021 cristian ciocaltea <cristian.ciocaltea@gmail.com> + */ + +#ifndef __owl_emac_h__ +#define __owl_emac_h__ + +#define owl_emac_drvname "owl-emac" + +#define owl_emac_poll_delay_usec 5 +#define owl_emac_mdio_poll_timeout_usec 1000 +#define owl_emac_reset_poll_timeout_usec 2000 +#define owl_emac_tx_timeout (2 * hz) + +#define owl_emac_mtu_min eth_min_mtu +#define owl_emac_mtu_max eth_data_len +#define owl_emac_rx_frame_max_len (eth_frame_len + eth_fcs_len) +#define owl_emac_skb_align 4 +#define owl_emac_skb_reserve 18 + +#define owl_emac_max_multicast_addrs 14 +#define owl_emac_setup_frame_len 192 + +#define owl_emac_rx_ring_size 64 +#define owl_emac_tx_ring_size 32 + +/* bus mode register */ +#define owl_emac_reg_mac_csr0 0x0000 +#define owl_emac_bit_mac_csr0_swr bit(0) /* software reset */ + +/* transmit/receive poll demand registers */ +#define owl_emac_reg_mac_csr1 0x0008 +#define owl_emac_val_mac_csr1_tpd 0x01 +#define owl_emac_reg_mac_csr2 0x0010 +#define owl_emac_val_mac_csr2_rpd 0x01 + +/* receive/transmit descriptor list base address registers */ +#define owl_emac_reg_mac_csr3 0x0018 +#define owl_emac_reg_mac_csr4 0x0020 + +/* status register */ +#define owl_emac_reg_mac_csr5 0x0028 +#define owl_emac_msk_mac_csr5_ts genmask(22, 20) /* transmit process state */ +#define owl_emac_off_mac_csr5_ts 20 +#define owl_emac_val_mac_csr5_ts_data 0x03 /* transferring data host -> fifo */ +#define owl_emac_val_mac_csr5_ts_cdes 0x07 /* closing transmit descriptor */ +#define owl_emac_msk_mac_csr5_rs genmask(19, 17) /* receive process state */ +#define owl_emac_off_mac_csr5_rs 17 +#define owl_emac_val_mac_csr5_rs_fdes 0x01 /* fetching receive descriptor */ +#define owl_emac_val_mac_csr5_rs_cdes 0x05 /* closing receive descriptor */ +#define owl_emac_val_mac_csr5_rs_data 0x07 /* transferring data fifo -> host */ +#define owl_emac_bit_mac_csr5_nis bit(16) /* normal interrupt summary */ +#define owl_emac_bit_mac_csr5_ais bit(15) /* abnormal interrupt summary */ +#define owl_emac_bit_mac_csr5_eri bit(14) /* early receive interrupt */ +#define owl_emac_bit_mac_csr5_gte bit(11) /* general-purpose timer expiration */ +#define owl_emac_bit_mac_csr5_eti bit(10) /* early transmit interrupt */ +#define owl_emac_bit_mac_csr5_rps bit(8) /* receive process stopped */ +#define owl_emac_bit_mac_csr5_ru bit(7) /* receive buffer unavailable */ +#define owl_emac_bit_mac_csr5_ri bit(6) /* receive interrupt */ +#define owl_emac_bit_mac_csr5_unf bit(5) /* transmit underflow */ +#define owl_emac_bit_mac_csr5_lcis bit(4) /* link change status */ +#define owl_emac_bit_mac_csr5_lciq bit(3) /* link change interrupt */ +#define owl_emac_bit_mac_csr5_tu bit(2) /* transmit buffer unavailable */ +#define owl_emac_bit_mac_csr5_tps bit(1) /* transmit process stopped */ +#define owl_emac_bit_mac_csr5_ti bit(0) /* transmit interrupt */ + +/* operation mode register */ +#define owl_emac_reg_mac_csr6 0x0030 +#define owl_emac_bit_mac_csr6_ra bit(30) /* receive all */ +#define owl_emac_bit_mac_csr6_ttm bit(22) /* transmit threshold mode */ +#define owl_emac_bit_mac_csr6_sf bit(21) /* store and forward */ +#define owl_emac_msk_mac_csr6_speed genmask(17, 16) /* eth speed selection */ +#define owl_emac_off_mac_csr6_speed 16 +#define owl_emac_val_mac_csr6_speed_100m 0x00 +#define owl_emac_val_mac_csr6_speed_10m 0x02 +#define owl_emac_bit_mac_csr6_st bit(13) /* start/stop transmit command */ +#define owl_emac_bit_mac_csr6_lp bit(10) /* loopback mode */ +#define owl_emac_bit_mac_csr6_fd bit(9) /* full duplex mode */ +#define owl_emac_bit_mac_csr6_pm bit(7) /* pass all multicast */ +#define owl_emac_bit_mac_csr6_pr bit(6) /* promiscuous mode */ +#define owl_emac_bit_mac_csr6_if bit(4) /* inverse filtering */ +#define owl_emac_bit_mac_csr6_pb bit(3) /* pass bad frames */ +#define owl_emac_bit_mac_csr6_ho bit(2) /* hash only filtering mode */ +#define owl_emac_bit_mac_csr6_sr bit(1) /* start/stop receive command */ +#define owl_emac_bit_mac_csr6_hp bit(0) /* hash/perfect receive filtering mode */ +#define owl_emac_msk_mac_csr6_stsr (owl_emac_bit_mac_csr6_st | \ + owl_emac_bit_mac_csr6_sr) + +/* interrupt enable register */ +#define owl_emac_reg_mac_csr7 0x0038 +#define owl_emac_bit_mac_csr7_nie bit(16) /* normal interrupt summary enable */ +#define owl_emac_bit_mac_csr7_aie bit(15) /* abnormal interrupt summary enable */ +#define owl_emac_bit_mac_csr7_ere bit(14) /* early receive interrupt enable */ +#define owl_emac_bit_mac_csr7_gte bit(11) /* general-purpose timer overflow */ +#define owl_emac_bit_mac_csr7_ete bit(10) /* early transmit interrupt enable */ +#define owl_emac_bit_mac_csr7_rse bit(8) /* receive stopped enable */ +#define owl_emac_bit_mac_csr7_rue bit(7) /* receive buffer unavailable enable */ +#define owl_emac_bit_mac_csr7_rie bit(6) /* receive interrupt enable */ +#define owl_emac_bit_mac_csr7_une bit(5) /* underflow interrupt enable */ +#define owl_emac_bit_mac_csr7_tue bit(2) /* transmit buffer unavailable enable */ +#define owl_emac_bit_mac_csr7_tse bit(1) /* transmit stopped enable */ +#define owl_emac_bit_mac_csr7_tie bit(0) /* transmit interrupt enable */ +#define owl_emac_bit_mac_csr7_all_not_tue (owl_emac_bit_mac_csr7_ere | \ + owl_emac_bit_mac_csr7_gte | \ + owl_emac_bit_mac_csr7_ete | \ + owl_emac_bit_mac_csr7_rse | \ + owl_emac_bit_mac_csr7_rue | \ + owl_emac_bit_mac_csr7_rie | \ + owl_emac_bit_mac_csr7_une | \ + owl_emac_bit_mac_csr7_tse | \ + owl_emac_bit_mac_csr7_tie) + +/* missed frames and overflow counter register */ +#define owl_emac_reg_mac_csr8 0x0040 +/* mii management and serial rom register */ +#define owl_emac_reg_mac_csr9 0x0048 + +/* mii serial management register */ +#define owl_emac_reg_mac_csr10 0x0050 +#define owl_emac_bit_mac_csr10_sb bit(31) /* start transfer or busy */ +#define owl_emac_msk_mac_csr10_clkdiv genmask(30, 28) /* clock divider */ +#define owl_emac_off_mac_csr10_clkdiv 28 +#define owl_emac_val_mac_csr10_clkdiv_128 0x04 +#define owl_emac_val_mac_csr10_opcode_wr 0x01 /* register write command */ +#define owl_emac_off_mac_csr10_opcode 26 /* operation mode */ +#define owl_emac_val_mac_csr10_opcode_dcg 0x00 /* disable clock generation */ +#define owl_emac_val_mac_csr10_opcode_wr 0x01 /* register write command */ +#define owl_emac_val_mac_csr10_opcode_rd 0x02 /* register read command */ +#define owl_emac_val_mac_csr10_opcode_cds 0x03 /* clock divider set */ +#define owl_emac_msk_mac_csr10_phyadd genmask(25, 21) /* physical layer address */ +#define owl_emac_off_mac_csr10_phyadd 21 +#define owl_emac_msk_mac_csr10_regadd genmask(20, 16) /* register address */ +#define owl_emac_off_mac_csr10_regadd 16 +#define owl_emac_msk_mac_csr10_data genmask(15, 0) /* register data */ + +/* general-purpose timer and interrupt mitigation control register */ +#define owl_emac_reg_mac_csr11 0x0058 +#define owl_emac_off_mac_csr11_tt 27 /* transmit timer */ +#define owl_emac_off_mac_csr11_ntp 24 /* no. of transmit packets */ +#define owl_emac_off_mac_csr11_rt 20 /* receive timer */ +#define owl_emac_off_mac_csr11_nrp 17 /* no. of receive packets */ + +/* mac address low/high registers */ +#define owl_emac_reg_mac_csr16 0x0080 +#define owl_emac_reg_mac_csr17 0x0088 + +/* pause time & cache thresholds register */ +#define owl_emac_reg_mac_csr18 0x0090 +#define owl_emac_off_mac_csr18_cptl 24 /* cache pause threshold level */ +#define owl_emac_off_mac_csr18_crtl 16 /* cache restart threshold level */ +#define owl_emac_off_mac_csr18_pqt 0 /* flow control pause quanta time */ + +/* fifo pause & restart threshold register */ +#define owl_emac_reg_mac_csr19 0x0098 +#define owl_emac_off_mac_csr19_fptl 16 /* fifo pause threshold level */ +#define owl_emac_off_mac_csr19_frtl 0 /* fifo restart threshold level */ + +/* flow control setup & status register */ +#define owl_emac_reg_mac_csr20 0x00a0 +#define owl_emac_bit_mac_csr20_fce bit(31) /* flow control enable */ +#define owl_emac_bit_mac_csr20_tue bit(30) /* transmit un-pause frames enable */ +#define owl_emac_bit_mac_csr20_tpe bit(29) /* transmit pause frames enable */ +#define owl_emac_bit_mac_csr20_rpe bit(28) /* receive pause frames enable */ +#define owl_emac_bit_mac_csr20_bpe bit(27) /* back pressure (half-duplex) enable */ + +/* mii control register */ +#define owl_emac_reg_mac_ctrl 0x00b0 +#define owl_emac_bit_mac_ctrl_rrsb bit(8) /* rmii_refclk select bit */ +#define owl_emac_off_mac_ctrl_ssdc 4 /* smii sync delay cycle */ +#define owl_emac_bit_mac_ctrl_rcps bit(1) /* ref_clk phase select */ +#define owl_emac_bit_mac_ctrl_rsis bit(0) /* rmii/smii interface select */ + +/* receive descriptor status field */ +#define owl_emac_bit_rdes0_own bit(31) /* ownership bit */ +#define owl_emac_bit_rdes0_ff bit(30) /* filtering fail */ +#define owl_emac_msk_rdes0_fl genmask(29, 16) /* frame length */ +#define owl_emac_off_rdes0_fl 16 +#define owl_emac_bit_rdes0_es bit(15) /* error summary */ +#define owl_emac_bit_rdes0_de bit(14) /* descriptor error */ +#define owl_emac_bit_rdes0_rf bit(11) /* runt frame */ +#define owl_emac_bit_rdes0_mf bit(10) /* multicast frame */ +#define owl_emac_bit_rdes0_fs bit(9) /* first descriptor */ +#define owl_emac_bit_rdes0_ls bit(8) /* last descriptor */ +#define owl_emac_bit_rdes0_tl bit(7) /* frame too long */ +#define owl_emac_bit_rdes0_cs bit(6) /* collision seen */ +#define owl_emac_bit_rdes0_ft bit(5) /* frame type */ +#define owl_emac_bit_rdes0_re bit(3) /* report on mii error */ +#define owl_emac_bit_rdes0_db bit(2) /* dribbling bit */ +#define owl_emac_bit_rdes0_ce bit(1) /* crc error */ +#define owl_emac_bit_rdes0_zero bit(0) /* legal frame length indicator */ + +/* receive descriptor control and count field */ +#define owl_emac_bit_rdes1_rer bit(25) /* receive end of ring */ +#define owl_emac_msk_rdes1_rbs1 genmask(10, 0) /* buffer 1 size */ + +/* transmit descriptor status field */ +#define owl_emac_bit_tdes0_own bit(31) /* ownership bit */ +#define owl_emac_bit_tdes0_es bit(15) /* error summary */ +#define owl_emac_bit_tdes0_lo bit(11) /* loss of carrier */ +#define owl_emac_bit_tdes0_nc bit(10) /* no carrier */ +#define owl_emac_bit_tdes0_lc bit(9) /* late collision */ +#define owl_emac_bit_tdes0_ec bit(8) /* excessive collisions */ +#define owl_emac_msk_tdes0_cc genmask(6, 3) /* collision count */ +#define owl_emac_bit_tdes0_uf bit(1) /* underflow error */ +#define owl_emac_bit_tdes0_de bit(0) /* deferred */ + +/* transmit descriptor control and count field */ +#define owl_emac_bit_tdes1_ic bit(31) /* interrupt on completion */ +#define owl_emac_bit_tdes1_ls bit(30) /* last descriptor */ +#define owl_emac_bit_tdes1_fs bit(29) /* first descriptor */ +#define owl_emac_bit_tdes1_ft1 bit(28) /* filtering type */ +#define owl_emac_bit_tdes1_set bit(27) /* setup packet */ +#define owl_emac_bit_tdes1_ac bit(26) /* add crc disable */ +#define owl_emac_bit_tdes1_ter bit(25) /* transmit end of ring */ +#define owl_emac_bit_tdes1_dpd bit(23) /* disabled padding */ +#define owl_emac_bit_tdes1_ft0 bit(22) /* filtering type */ +#define owl_emac_msk_tdes1_tbs1 genmask(10, 0) /* buffer 1 size */ + +static const char *const owl_emac_clk_names[] = { "eth", "rmii" }; +#define owl_emac_nclks array_size(owl_emac_clk_names) + +enum owl_emac_clk_map { + owl_emac_clk_eth = 0, + owl_emac_clk_rmii +}; + +struct owl_emac_addr_list { + u8 addrs[owl_emac_max_multicast_addrs][eth_alen]; + int count; +}; + +/* tx/rx descriptors */ +struct owl_emac_ring_desc { + u32 status; + u32 control; + u32 buf_addr; + u32 reserved; /* 2nd buffer address is not used */ +}; + +struct owl_emac_ring { + struct owl_emac_ring_desc *descs; + dma_addr_t descs_dma; + struct sk_buff **skbs; + dma_addr_t *skbs_dma; + unsigned int size; + unsigned int head; + unsigned int tail; +}; + +struct owl_emac_priv { + struct net_device *netdev; + void __iomem *base; + + struct clk_bulk_data clks[owl_emac_nclks]; + struct reset_control *reset; + + struct owl_emac_ring rx_ring; + struct owl_emac_ring tx_ring; + + struct mii_bus *mii; + struct napi_struct napi; + + phy_interface_t phy_mode; + unsigned int link; + int speed; + int duplex; + int pause; + struct owl_emac_addr_list mcaddr_list; + + struct work_struct mac_reset_task; + + u32 msg_enable; /* debug message level */ + spinlock_t lock; /* sync concurrent ring access */ +}; + +#endif /* __owl_emac_h__ */
Networking
de6e0b198239857943db395377dc1d2ddd6c05df
cristian ciocaltea
drivers
net
actions, ethernet
gianfar: drop gfar_mq_polling support
gianfar used to enable all 8 rx queues (dma rings) per ethernet device, even though the controller can only support 2 interrupt lines at most. this meant that multiple rx queues would have to be grouped per napi poll routine, and the cpu would have to split the budget and service them in a round robin manner. the overhead of this scheme proved to outweight the potential benefits. the alternative was to introduce the "single queue" polling mode, supporting one rx queue per napi, which became the default packet processing option and helped improve the performance of the driver. mq_polling also relies on undocumeted device tree properties to specify how to map the 8 rx and tx queues to a given interrupt line (aka "interrupt group"). using module parameters to enable this mode wasn't an option either. long story short, mq_polling became obsolete, now it is just dead code, and no one asked for it so far. for the tx queues, multi-queue support (more than 1 tx queue per cpu) could be revisited by adding tc mqprio support, but again, one has to consider that there are only 2 interrupt lines. so the napi poll routine would have to service multiple tx rings.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
drop gfar_mq_polling support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['gianfar']
['h', 'c']
2
11
176
--- diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c - if (priv->poll_mode == gfar_sq_polling) - gfar_write(&regs->rir0, default_2rxq_rir0); - else /* gfar_mq_polling */ - gfar_write(&regs->rir0, default_8rxq_rir0); + gfar_write(&regs->rir0, default_2rxq_rir0); - u32 rxq_mask, txq_mask; - int ret; - + /* one q per interrupt group: q0 to g0, q1 to g1 */ - - ret = of_property_read_u32(np, "fsl,rx-bit-map", &rxq_mask); - if (!ret) { - grp->rx_bit_map = rxq_mask ? - rxq_mask : (default_mapping >> priv->num_grps); - } - - ret = of_property_read_u32(np, "fsl,tx-bit-map", &txq_mask); - if (!ret) { - grp->tx_bit_map = txq_mask ? - txq_mask : (default_mapping >> priv->num_grps); - } - - if (priv->poll_mode == gfar_sq_polling) { - /* one q per interrupt group: q0 to g0, q1 to g1 */ - grp->rx_bit_map = (default_mapping >> priv->num_grps); - grp->tx_bit_map = (default_mapping >> priv->num_grps); - } - unsigned short mode, poll_mode; + unsigned short mode; - if (of_device_is_compatible(np, "fsl,etsec2")) { + if (of_device_is_compatible(np, "fsl,etsec2")) - poll_mode = gfar_sq_polling; - } else { + else - poll_mode = gfar_sq_polling; - } - if (poll_mode == gfar_sq_polling) { - num_tx_qs = num_grps; /* one txq per int group */ - num_rx_qs = num_grps; /* one rxq per int group */ - } else { /* gfar_mq_polling */ - u32 tx_queues, rx_queues; - int ret; - - /* parse the num of hw tx and rx queues */ - ret = of_property_read_u32(np, "fsl,num_tx_queues", - &tx_queues); - num_tx_qs = ret ? 1 : tx_queues; - - ret = of_property_read_u32(np, "fsl,num_rx_queues", - &rx_queues); - num_rx_qs = ret ? 1 : rx_queues; - } + num_tx_qs = num_grps; /* one txq per int group */ + num_rx_qs = num_grps; /* one rxq per int group */ - priv->poll_mode = poll_mode; -static int gfar_poll_rx(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_rx); - struct gfar_private *priv = gfargrp->priv; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_rx_q *rx_queue = null; - int work_done = 0, work_done_per_q = 0; - int i, budget_per_q = 0; - unsigned long rstat_rxf; - int num_act_queues; - - /* clear ievent, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(&regs->ievent, ievent_rx_mask); - - rstat_rxf = gfar_read(&regs->rstat) & rstat_rxf_mask; - - num_act_queues = bitmap_weight(&rstat_rxf, max_rx_qs); - if (num_act_queues) - budget_per_q = budget/num_act_queues; - - for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { - /* skip queue if not active */ - if (!(rstat_rxf & (rstat_clear_rxf0 >> i))) - continue; - - rx_queue = priv->rx_queue[i]; - work_done_per_q = - gfar_clean_rx_ring(rx_queue, budget_per_q); - work_done += work_done_per_q; - - /* finished processing this queue */ - if (work_done_per_q < budget_per_q) { - /* clear active queue hw indication */ - gfar_write(&regs->rstat, - rstat_clear_rxf0 >> i); - num_act_queues--; - - if (!num_act_queues) - break; - } - } - - if (!num_act_queues) { - u32 imask; - napi_complete_done(napi, work_done); - - /* clear the halt bit in rstat */ - gfar_write(&regs->rstat, gfargrp->rstat); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(&regs->imask); - imask |= imask_rx_default; - gfar_write(&regs->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return work_done; -} - -static int gfar_poll_tx(struct napi_struct *napi, int budget) -{ - struct gfar_priv_grp *gfargrp = - container_of(napi, struct gfar_priv_grp, napi_tx); - struct gfar_private *priv = gfargrp->priv; - struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = null; - int has_tx_work = 0; - int i; - - /* clear ievent, so interrupts aren't called again - * because of the packets that have already arrived - */ - gfar_write(&regs->ievent, ievent_tx_mask); - - for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) { - tx_queue = priv->tx_queue[i]; - /* run tx cleanup to completion */ - if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) { - gfar_clean_tx_ring(tx_queue); - has_tx_work = 1; - } - } - - if (!has_tx_work) { - u32 imask; - napi_complete(napi); - - spin_lock_irq(&gfargrp->grplock); - imask = gfar_read(&regs->imask); - imask |= imask_tx_default; - gfar_write(&regs->imask, imask); - spin_unlock_irq(&gfargrp->grplock); - } - - return 0; -} - - if (priv->poll_mode == gfar_sq_polling) { - netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx_sq, gfar_dev_weight); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx_sq, 2); - } else { - netif_napi_add(dev, &priv->gfargrp[i].napi_rx, - gfar_poll_rx, gfar_dev_weight); - netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, - gfar_poll_tx, 2); - } + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx_sq, gfar_dev_weight); + netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h -/* gfar_sq_polling: single queue napi polling mode - * the driver supports a single pair of rx/tx queues - * per interrupt group (rx/tx int line). mq_mg mode - * devices have 2 interrupt groups, so the device will - * have a total of 2 tx and 2 rx queues in this case. - * gfar_mq_polling: multi queue napi polling mode - * the driver supports all the 8 rx and tx hw queues - * each queue mapped by the device tree to one of - * the 2 interrupt groups. this mode implies significant - * processing overhead (cpu and controller level). - */ -enum gfar_poll_mode { - gfar_sq_polling = 0, - gfar_mq_polling -}; - - unsigned short poll_mode;
Networking
8eda54c5e6c4eb3f3a9b70fdea278f4e0f8496b2
claudiu manoil
drivers
net
ethernet, freescale
net: hns3: pf add support for pushing link status to vfs
previously, vf updates its link status every second by send query command to pf in periodic service task. if link stats of pf is changed, vf may need at most one second to update its link status.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
pf add support for pushing link status to vfs
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['h', 'c']
4
43
8
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h + +/* pf immediately push link status to vfs when link status changed */ +#define hclge_mbx_push_link_status_en bit(0) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +static void hclge_push_link_status(struct hclge_dev *hdev) +{ + struct hclge_vport *vport; + int ret; + u16 i; + + for (i = 0; i < pci_num_vf(hdev->pdev); i++) { + vport = &hdev->vport[i + hclge_vf_vport_start_num]; + + if (!test_bit(hclge_vport_state_alive, &vport->state) || + vport->vf_info.link_state != ifla_vf_link_state_auto) + continue; + + ret = hclge_push_vf_link_status(vport); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to push link status to vf%u, ret = %d ", + i, ret); + } + } +} + + hclge_push_link_status(hdev); + int link_state_old; + int ret; + link_state_old = vport->vf_info.link_state; - return 0; + ret = hclge_push_vf_link_status(vport); + if (ret) { + vport->vf_info.link_state = link_state_old; + dev_err(&hdev->pdev->dev, + "failed to push vf%d link status, ret = %d ", vf, ret); + } + + return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +int hclge_push_vf_link_status(struct hclge_vport *vport); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c -static int hclge_get_link_info(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req) +int hclge_push_vf_link_status(struct hclge_vport *vport) - u8 msg_data[8]; - u8 dest_vfid; + u8 msg_data[9]; - dest_vfid = mbx_req->mbx_src_vfid; + msg_data[8] = hclge_mbx_push_link_status_en; - hclge_mbx_link_stat_change, dest_vfid); + hclge_mbx_link_stat_change, vport->vport_id); - ret = hclge_get_link_info(vport, req); + ret = hclge_push_vf_link_status(vport);
Networking
18b6e31f8bf4ac7af7b057228f38a5a530378e4e
guangbin huang
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: add support for imp-controlled phys
imp(intelligent management processor) firmware add a new feature to take control of phys for some new devices, pf driver adds support for this feature.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support imp-controlled phys
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['h', 'c']
6
192
3
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h + int (*get_phy_link_ksettings)(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd); + int (*set_phy_link_ksettings)(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c + dev_info(&h->pdev->dev, "support imp-controlled phy: %s ", + test_bit(hnae3_dev_support_phy_imp_b, caps) ? "yes" : "no"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); - if (!netdev->phydev) + if (test_bit(hnae3_dev_support_phy_imp_b, ae_dev->caps) && + ops->get_phy_link_ksettings) + ops->get_phy_link_ksettings(h, cmd); + else if (!netdev->phydev) + } else if (test_bit(hnae3_dev_support_phy_imp_b, ae_dev->caps) && + ops->set_phy_link_ksettings) { + return ops->set_phy_link_ksettings(handle, cmd); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c + if (hnae3_get_bit(caps, hclge_cap_phy_imp_b)) + set_bit(hnae3_dev_support_phy_imp_b, ae_dev->caps); + if (hnae3_dev_phy_imp_supported(hdev)) + hnae3_set_bit(compat, hclge_phy_imp_en_b, 1); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h + + /* phy command */ + hclge_opc_phy_link_ksetting = 0x7025, +#define hclge_phy_imp_en_b 2 +#define hclge_phy_link_setting_bd_num 2 + +struct hclge_phy_link_ksetting_0_cmd { + __le32 speed; + u8 duplex; + u8 autoneg; + u8 eth_tp_mdix; + u8 eth_tp_mdix_ctrl; + u8 port; + u8 transceiver; + u8 phy_address; + u8 rsv; + __le32 supported; + __le32 advertising; + __le32 lp_advertising; +}; + +struct hclge_phy_link_ksetting_1_cmd { + u8 master_slave_cfg; + u8 master_slave_state; + u8 rsv[22]; +}; + diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle, + struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[hclge_phy_link_setting_bd_num]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + u32 supported, advertising, lp_advertising; + struct hclge_dev *hdev = vport->back; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], hclge_opc_phy_link_ksetting, + true); + desc[0].flag |= cpu_to_le16(hclge_cmd_flag_next); + hclge_cmd_setup_basic_desc(&desc[1], hclge_opc_phy_link_ksetting, + true); + + ret = hclge_cmd_send(&hdev->hw, desc, hclge_phy_link_setting_bd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to get phy link ksetting, ret = %d. ", ret); + return ret; + } + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + cmd->base.autoneg = req0->autoneg; + cmd->base.speed = le32_to_cpu(req0->speed); + cmd->base.duplex = req0->duplex; + cmd->base.port = req0->port; + cmd->base.transceiver = req0->transceiver; + cmd->base.phy_address = req0->phy_address; + cmd->base.eth_tp_mdix = req0->eth_tp_mdix; + cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl; + supported = le32_to_cpu(req0->supported); + advertising = le32_to_cpu(req0->advertising); + lp_advertising = le32_to_cpu(req0->lp_advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + cmd->base.master_slave_cfg = req1->master_slave_cfg; + cmd->base.master_slave_state = req1->master_slave_state; + + return 0; +} + +static int +hclge_set_phy_link_ksettings(struct hnae3_handle *handle, + const struct ethtool_link_ksettings *cmd) +{ + struct hclge_desc desc[hclge_phy_link_setting_bd_num]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_phy_link_ksetting_0_cmd *req0; + struct hclge_phy_link_ksetting_1_cmd *req1; + struct hclge_dev *hdev = vport->back; + u32 advertising; + int ret; + + if (cmd->base.autoneg == autoneg_disable && + ((cmd->base.speed != speed_100 && cmd->base.speed != speed_10) || + (cmd->base.duplex != duplex_half && + cmd->base.duplex != duplex_full))) + return -einval; + + hclge_cmd_setup_basic_desc(&desc[0], hclge_opc_phy_link_ksetting, + false); + desc[0].flag |= cpu_to_le16(hclge_cmd_flag_next); + hclge_cmd_setup_basic_desc(&desc[1], hclge_opc_phy_link_ksetting, + false); + + req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data; + req0->autoneg = cmd->base.autoneg; + req0->speed = cpu_to_le32(cmd->base.speed); + req0->duplex = cmd->base.duplex; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + req0->advertising = cpu_to_le32(advertising); + req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl; + + req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data; + req1->master_slave_cfg = cmd->base.master_slave_cfg; + + ret = hclge_cmd_send(&hdev->hw, desc, hclge_phy_link_setting_bd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to set phy link ksettings, ret = %d. ", ret); + return ret; + } + + hdev->hw.mac.autoneg = cmd->base.autoneg; + hdev->hw.mac.speed = cmd->base.speed; + hdev->hw.mac.duplex = cmd->base.duplex; + linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); + + return 0; +} + +static int hclge_update_tp_port_info(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + int ret; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); + if (ret) + return ret; + + hdev->hw.mac.autoneg = cmd.base.autoneg; + hdev->hw.mac.speed = cmd.base.speed; + hdev->hw.mac.duplex = cmd.base.duplex; + + return 0; +} + +static int hclge_tp_port_init(struct hclge_dev *hdev) +{ + struct ethtool_link_ksettings cmd; + + if (!hnae3_dev_phy_imp_supported(hdev)) + return 0; + + cmd.base.autoneg = hdev->hw.mac.autoneg; + cmd.base.speed = hdev->hw.mac.speed; + cmd.base.duplex = hdev->hw.mac.duplex; + linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); + + return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); +} + - return 0; + return hclge_update_tp_port_info(hdev); - if (hdev->hw.mac.media_type == hnae3_media_type_copper) { + if (hdev->hw.mac.media_type == hnae3_media_type_copper && + !hnae3_dev_phy_imp_supported(hdev)) { + ret = hclge_tp_port_init(hdev); + if (ret) { + dev_err(&pdev->dev, "failed to init tp port, ret = %d ", + ret); + return ret; + } + + .get_phy_link_ksettings = hclge_get_phy_link_ksettings, + .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
Networking
f5f2b3e4dcc0e944dc33b522df84576679fbd8eb
guangbin huang
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: add get/set pause parameters support for imp-controlled phys
when the imp-controlled phys feature is enabled, phydev is null. in this case, the autoneg is always off when user uses ethtool -a command to get pause parameters because hclge_get_pauseparam() uses phydev to check whether device is tp port. to fit this new feature, use media type to check whether device is tp port.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support imp-controlled phys
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['c']
1
5
4
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c - struct phy_device *phydev = hdev->hw.mac.phydev; + u8 media_type = hdev->hw.mac.media_type; - *auto_neg = phydev ? hclge_get_autoneg(handle) : 0; + *auto_neg = (media_type == hnae3_media_type_copper) ? + hclge_get_autoneg(handle) : 0; - if (phydev) { + if (phydev || hnae3_dev_phy_imp_supported(hdev)) { - if (!auto_neg) + if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
Networking
57a8f46b1bd3f5f43b06f48aab7c1f7ca0936be3
guangbin huang
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: add ioctl support for imp-controlled phys
when the imp-controlled phys feature is enabled, driver will not register mdio bus. in order to support ioctl ops for phy tool to read or write phy register in this case, the firmware implement a new command for driver and driver implement ioctl by using this new command.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support imp-controlled phys
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['h', 'c']
4
73
1
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h + hclge_opc_phy_reg = 0x7026, +struct hclge_phy_reg_cmd { + __le16 reg_addr; + u8 rsv0[2]; + __le16 reg_val; + u8 rsv1[18]; +}; + diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + + if (!hnae3_dev_phy_imp_supported(hdev)) + return -eopnotsupp; + + switch (cmd) { + case siocgmiiphy: + data->phy_id = hdev->hw.mac.phy_addr; + /* this command reads phy id and register at the same time */ + fallthrough; + case siocgmiireg: + data->val_out = hclge_read_phy_reg(hdev, data->reg_num); + return 0; + + case siocsmiireg: + return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); + default: + return -eopnotsupp; + } +} + - return -eopnotsupp; + return hclge_mii_ioctl(hdev, ifr, cmd); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c + +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, hclge_opc_phy_reg, true); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to read phy reg, ret = %d. ", ret); + + return le16_to_cpu(req->reg_val); +} + +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val) +{ + struct hclge_phy_reg_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, hclge_opc_phy_reg, false); + + req = (struct hclge_phy_reg_cmd *)desc.data; + req->reg_addr = cpu_to_le16(reg_addr); + req->reg_val = cpu_to_le16(val); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to write phy reg, ret = %d. ", ret); + + return ret; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); +int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
Networking
024712f51e5711d69ced729fb3398819ed6e8b53
guangbin huang
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: add phy loopback support for imp-controlled phys
if the imp-controlled phys feature is enabled, driver can not call phy driver interface to set loopback anymore and needs to send command to firmware to start phy loopback.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support imp-controlled phys
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['h', 'c']
3
51
36
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h - hclge_opc_serdes_loopback = 0x0315, + hclge_opc_common_loopback = 0x0315, -#define hclge_cmd_serdes_done_b bit(0) -#define hclge_cmd_serdes_success_b bit(1) -struct hclge_serdes_lb_cmd { +#define hclge_cmd_ge_phy_inner_loop_b bit(3) +#define hclge_cmd_common_lb_done_b bit(0) +#define hclge_cmd_common_lb_success_b bit(1) +struct hclge_common_lb_cmd { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c - struct hclge_serdes_lb_cmd *req_serdes; + struct hclge_common_lb_cmd *req_common; - req_serdes = (struct hclge_serdes_lb_cmd *)desc.data; + req_common = (struct hclge_common_lb_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, hclge_opc_serdes_loopback, true); + hclge_cmd_setup_basic_desc(&desc, hclge_opc_common_loopback, true); - "failed to dump serdes loopback status, ret = %d ", + "failed to dump common loopback status, ret = %d ", - loopback_en = req_serdes->enable & hclge_cmd_serdes_serial_inner_loop_b; + loopback_en = req_common->enable & hclge_cmd_serdes_serial_inner_loop_b; - loopback_en = req_serdes->enable & + loopback_en = req_common->enable & - if (phydev) + if (phydev) { + } else if (hnae3_dev_phy_imp_supported(hdev)) { + loopback_en = req_common->enable & + hclge_cmd_ge_phy_inner_loop_b; + dev_info(&hdev->pdev->dev, "phy loopback: %s ", + loopback_en ? "on" : "off"); + } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c - if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && - hdev->hw.mac.phydev->drv->set_loopback) { + if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && + hdev->hw.mac.phydev->drv->set_loopback) || + hnae3_dev_phy_imp_supported(hdev)) { -static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en, +static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, -#define hclge_serdes_retry_ms 10 -#define hclge_serdes_retry_num 100 +#define hclge_common_lb_retry_ms 10 +#define hclge_common_lb_retry_num 100 - struct hclge_serdes_lb_cmd *req; + struct hclge_common_lb_cmd *req; - req = (struct hclge_serdes_lb_cmd *)desc.data; - hclge_cmd_setup_basic_desc(&desc, hclge_opc_serdes_loopback, false); + req = (struct hclge_common_lb_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, hclge_opc_common_loopback, false); + case hnae3_loop_phy: + loop_mode_b = hclge_cmd_ge_phy_inner_loop_b; + break; - "unsupported serdes loopback mode %d ", loop_mode); + "unsupported common loopback mode %d ", loop_mode); - "serdes loopback set fail, ret = %d ", ret); + "common loopback set fail, ret = %d ", ret); - msleep(hclge_serdes_retry_ms); - hclge_cmd_setup_basic_desc(&desc, hclge_opc_serdes_loopback, + msleep(hclge_common_lb_retry_ms); + hclge_cmd_setup_basic_desc(&desc, hclge_opc_common_loopback, - "serdes loopback get, ret = %d ", ret); + "common loopback get, ret = %d ", ret); - } while (++i < hclge_serdes_retry_num && - !(req->result & hclge_cmd_serdes_done_b)); + } while (++i < hclge_common_lb_retry_num && + !(req->result & hclge_cmd_common_lb_done_b)); - if (!(req->result & hclge_cmd_serdes_done_b)) { - dev_err(&hdev->pdev->dev, "serdes loopback set timeout "); + if (!(req->result & hclge_cmd_common_lb_done_b)) { + dev_err(&hdev->pdev->dev, "common loopback set timeout "); - } else if (!(req->result & hclge_cmd_serdes_success_b)) { - dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw "); + } else if (!(req->result & hclge_cmd_common_lb_success_b)) { + dev_err(&hdev->pdev->dev, "common loopback set failed in fw "); -static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, +static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, - ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode); + ret = hclge_cfg_common_loopback(hdev, en, loop_mode); - if (!phydev) + if (!phydev) { + if (hnae3_dev_phy_imp_supported(hdev)) + return hclge_set_common_loopback(hdev, en, + hnae3_loop_phy); + } - ret = hclge_set_serdes_loopback(hdev, en, loop_mode); + ret = hclge_set_common_loopback(hdev, en, loop_mode); - ret = hclge_cfg_serdes_loopback(hdev, false, hnae3_loop_serial_serdes); + ret = hclge_cfg_common_loopback(hdev, false, hnae3_loop_serial_serdes); - return hclge_cfg_serdes_loopback(hdev, false, + return hclge_cfg_common_loopback(hdev, false,
Networking
b47cfe1f402dbf10279b8f12131388fdff9d2259
guangbin huang
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: refactor out hclge_add_fd_entry()
the process of function hclge_add_fd_entry() is complex and prolix. to make it more readable, extract the process of fs->ring_cookie to a single function.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
refactor and new features for flow director
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['c']
1
40
27
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, + u16 *vport_id, u8 *action, u16 *queue_id) +{ + struct hclge_vport *vport = hdev->vport; + + if (ring_cookie == rx_cls_flow_disc) { + *action = hclge_fd_action_drop_packet; + } else { + u32 ring = ethtool_get_flow_spec_ring(ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); + u16 tqps; + + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "error: vf id (%u) > max vf num (%u) ", + vf, hdev->num_req_vfs); + return -einval; + } + + *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; + tqps = hdev->vport[vf].nic.kinfo.num_tqps; + + if (ring >= tqps) { + dev_err(&hdev->pdev->dev, + "error: queue id (%u) > max tqp num (%u) ", + ring, tqps - 1); + return -einval; + } + + *action = hclge_fd_action_select_queue; + *queue_id = ring; + } + + return 0; +} + - if (fs->ring_cookie == rx_cls_flow_disc) { - action = hclge_fd_action_drop_packet; - } else { - u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); - u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); - u16 tqps; - - if (vf > hdev->num_req_vfs) { - dev_err(&hdev->pdev->dev, - "error: vf id (%u) > max vf num (%u) ", - vf, hdev->num_req_vfs); - return -einval; - } - - dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; - tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; - - if (ring >= tqps) { - dev_err(&hdev->pdev->dev, - "error: queue id (%u) > max tqp num (%u) ", - ring, tqps - 1); - return -einval; - } - - action = hclge_fd_action_select_queue; - q_index = ring; - } + ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, + &action, &q_index); + if (ret) + return ret;
Networking
5f2b1238b33c38478ddc55536b65277b30f5d456
jian shen
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: refactor out hclge_fd_get_tuple()
the process of function hclge_fd_get_tuple() is complex and prolix. to make it more readable, extract the process of each flow-type tuple to a single function.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
refactor and new features for flow director
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['c']
1
117
103
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c -static int hclge_fd_get_tuple(struct hclge_dev *hdev, - struct ethtool_rx_flow_spec *fs, - struct hclge_fd_rule *rule) +static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) - u32 flow_type = fs->flow_type & ~(flow_ext | flow_mac_ext); + rule->tuples.src_ip[ipv4_index] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); + rule->tuples_mask.src_ip[ipv4_index] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); - switch (flow_type) { - case sctp_v4_flow: - case tcp_v4_flow: - case udp_v4_flow: - rule->tuples.src_ip[ipv4_index] = - be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src); - rule->tuples_mask.src_ip[ipv4_index] = - be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src); + rule->tuples.dst_ip[ipv4_index] = + be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[ipv4_index] = + be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); - rule->tuples.dst_ip[ipv4_index] = - be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst); - rule->tuples_mask.dst_ip[ipv4_index] = - be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst); + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); - rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc); - rule->tuples_mask.src_port = - be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc); + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); - rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst); - rule->tuples_mask.dst_port = - be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst); + rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; - rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos; - rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos; + rule->tuples.ether_proto = eth_p_ip; + rule->tuples_mask.ether_proto = 0xffff; - rule->tuples.ether_proto = eth_p_ip; - rule->tuples_mask.ether_proto = 0xffff; + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xff; +} - break; - case ip_user_flow: - rule->tuples.src_ip[ipv4_index] = - be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); - rule->tuples_mask.src_ip[ipv4_index] = - be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); +static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + rule->tuples.src_ip[ipv4_index] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src); + rule->tuples_mask.src_ip[ipv4_index] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src); - rule->tuples.dst_ip[ipv4_index] = - be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); - rule->tuples_mask.dst_ip[ipv4_index] = - be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); + rule->tuples.dst_ip[ipv4_index] = + be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst); + rule->tuples_mask.dst_ip[ipv4_index] = + be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst); - rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; - rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; + rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos; + rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos; - rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; - rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; + rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto; - rule->tuples.ether_proto = eth_p_ip; - rule->tuples_mask.ether_proto = 0xffff; + rule->tuples.ether_proto = eth_p_ip; + rule->tuples_mask.ether_proto = 0xffff; +} - break; - case sctp_v6_flow: - case tcp_v6_flow: - case udp_v6_flow: - be32_to_cpu_array(rule->tuples.src_ip, - fs->h_u.tcp_ip6_spec.ip6src, ipv6_size); - be32_to_cpu_array(rule->tuples_mask.src_ip, - fs->m_u.tcp_ip6_spec.ip6src, ipv6_size); +static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule, u8 ip_proto) +{ + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src, + ipv6_size); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src, + ipv6_size); - be32_to_cpu_array(rule->tuples.dst_ip, - fs->h_u.tcp_ip6_spec.ip6dst, ipv6_size); - be32_to_cpu_array(rule->tuples_mask.dst_ip, - fs->m_u.tcp_ip6_spec.ip6dst, ipv6_size); + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst, + ipv6_size); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst, + ipv6_size); - rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); - rule->tuples_mask.src_port = - be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); + rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc); + rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc); - rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); - rule->tuples_mask.dst_port = - be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); + rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst); + rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst); - rule->tuples.ether_proto = eth_p_ipv6; - rule->tuples_mask.ether_proto = 0xffff; + rule->tuples.ether_proto = eth_p_ipv6; + rule->tuples_mask.ether_proto = 0xffff; - break; - case ipv6_user_flow: - be32_to_cpu_array(rule->tuples.src_ip, - fs->h_u.usr_ip6_spec.ip6src, ipv6_size); - be32_to_cpu_array(rule->tuples_mask.src_ip, - fs->m_u.usr_ip6_spec.ip6src, ipv6_size); + rule->tuples.ip_proto = ip_proto; + rule->tuples_mask.ip_proto = 0xff; +} - be32_to_cpu_array(rule->tuples.dst_ip, - fs->h_u.usr_ip6_spec.ip6dst, ipv6_size); - be32_to_cpu_array(rule->tuples_mask.dst_ip, - fs->m_u.usr_ip6_spec.ip6dst, ipv6_size); +static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src, + ipv6_size); + be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src, + ipv6_size); - rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; - rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; + be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst, + ipv6_size); + be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst, + ipv6_size); - rule->tuples.ether_proto = eth_p_ipv6; - rule->tuples_mask.ether_proto = 0xffff; + rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto; + rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto; - break; - case ether_flow: - ether_addr_copy(rule->tuples.src_mac, - fs->h_u.ether_spec.h_source); - ether_addr_copy(rule->tuples_mask.src_mac, - fs->m_u.ether_spec.h_source); + rule->tuples.ether_proto = eth_p_ipv6; + rule->tuples_mask.ether_proto = 0xffff; +} - ether_addr_copy(rule->tuples.dst_mac, - fs->h_u.ether_spec.h_dest); - ether_addr_copy(rule->tuples_mask.dst_mac, - fs->m_u.ether_spec.h_dest); +static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source); + ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source); - rule->tuples.ether_proto = - be16_to_cpu(fs->h_u.ether_spec.h_proto); - rule->tuples_mask.ether_proto = - be16_to_cpu(fs->m_u.ether_spec.h_proto); + ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest); + ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest); - break; - default: - return -eopnotsupp; - } + rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto); + rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto); +} + +static int hclge_fd_get_tuple(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + u32 flow_type = fs->flow_type & ~(flow_ext | flow_mac_ext); - case sctp_v6_flow: - rule->tuples.ip_proto = ipproto_sctp; - rule->tuples_mask.ip_proto = 0xff; + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, ipproto_sctp); - case tcp_v6_flow: - rule->tuples.ip_proto = ipproto_tcp; - rule->tuples_mask.ip_proto = 0xff; + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, ipproto_tcp); + hclge_fd_get_tcpip4_tuple(hdev, fs, rule, ipproto_udp); + break; + case ip_user_flow: + hclge_fd_get_ip4_tuple(hdev, fs, rule); + break; + case sctp_v6_flow: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, ipproto_sctp); + break; + case tcp_v6_flow: + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, ipproto_tcp); + break; - rule->tuples.ip_proto = ipproto_udp; - rule->tuples_mask.ip_proto = 0xff; + hclge_fd_get_tcpip6_tuple(hdev, fs, rule, ipproto_udp); - default: + case ipv6_user_flow: + hclge_fd_get_ip6_tuple(hdev, fs, rule); + case ether_flow: + hclge_fd_get_ether_tuple(hdev, fs, rule); + break; + default: + return -eopnotsupp;
Networking
74b755d1dbf1c4ff6f0cc4513e573eb15c0e7dfc
jian shen
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: refactor for function hclge_fd_convert_tuple
currently, there are too many branches for hclge_fd_convert_tuple(). and it may be more when add new tuples. refactor it by sorting the tuples according to their length. so it only needs several key_opt now, and being flexible to add new tuples.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
refactor and new features for flow director
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['h', 'c']
2
97
104
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c - { outer_dst_mac, 48}, - { outer_src_mac, 48}, - { outer_vlan_tag_fst, 16}, - { outer_vlan_tag_sec, 16}, - { outer_eth_type, 16}, - { outer_l2_rsv, 16}, - { outer_ip_tos, 8}, - { outer_ip_proto, 8}, - { outer_src_ip, 32}, - { outer_dst_ip, 32}, - { outer_l3_rsv, 16}, - { outer_src_port, 16}, - { outer_dst_port, 16}, - { outer_l4_rsv, 32}, - { outer_tun_vni, 24}, - { outer_tun_flow_id, 8}, - { inner_dst_mac, 48}, - { inner_src_mac, 48}, - { inner_vlan_tag_fst, 16}, - { inner_vlan_tag_sec, 16}, - { inner_eth_type, 16}, - { inner_l2_rsv, 16}, - { inner_ip_tos, 8}, - { inner_ip_proto, 8}, - { inner_src_ip, 32}, - { inner_dst_ip, 32}, - { inner_l3_rsv, 16}, - { inner_src_port, 16}, - { inner_dst_port, 16}, - { inner_l4_rsv, 32}, + { outer_dst_mac, 48, key_opt_mac, -1, -1 }, + { outer_src_mac, 48, key_opt_mac, -1, -1 }, + { outer_vlan_tag_fst, 16, key_opt_le16, -1, -1 }, + { outer_vlan_tag_sec, 16, key_opt_le16, -1, -1 }, + { outer_eth_type, 16, key_opt_le16, -1, -1 }, + { outer_l2_rsv, 16, key_opt_le16, -1, -1 }, + { outer_ip_tos, 8, key_opt_u8, -1, -1 }, + { outer_ip_proto, 8, key_opt_u8, -1, -1 }, + { outer_src_ip, 32, key_opt_ip, -1, -1 }, + { outer_dst_ip, 32, key_opt_ip, -1, -1 }, + { outer_l3_rsv, 16, key_opt_le16, -1, -1 }, + { outer_src_port, 16, key_opt_le16, -1, -1 }, + { outer_dst_port, 16, key_opt_le16, -1, -1 }, + { outer_l4_rsv, 32, key_opt_le32, -1, -1 }, + { outer_tun_vni, 24, key_opt_vni, -1, -1 }, + { outer_tun_flow_id, 8, key_opt_u8, -1, -1 }, + { inner_dst_mac, 48, key_opt_mac, + offsetof(struct hclge_fd_rule, tuples.dst_mac), + offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) }, + { inner_src_mac, 48, key_opt_mac, + offsetof(struct hclge_fd_rule, tuples.src_mac), + offsetof(struct hclge_fd_rule, tuples_mask.src_mac) }, + { inner_vlan_tag_fst, 16, key_opt_le16, + offsetof(struct hclge_fd_rule, tuples.vlan_tag1), + offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) }, + { inner_vlan_tag_sec, 16, key_opt_le16, -1, -1 }, + { inner_eth_type, 16, key_opt_le16, + offsetof(struct hclge_fd_rule, tuples.ether_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) }, + { inner_l2_rsv, 16, key_opt_le16, -1, -1 }, + { inner_ip_tos, 8, key_opt_u8, + offsetof(struct hclge_fd_rule, tuples.ip_tos), + offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) }, + { inner_ip_proto, 8, key_opt_u8, + offsetof(struct hclge_fd_rule, tuples.ip_proto), + offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) }, + { inner_src_ip, 32, key_opt_ip, + offsetof(struct hclge_fd_rule, tuples.src_ip), + offsetof(struct hclge_fd_rule, tuples_mask.src_ip) }, + { inner_dst_ip, 32, key_opt_ip, + offsetof(struct hclge_fd_rule, tuples.dst_ip), + offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) }, + { inner_l3_rsv, 16, key_opt_le16, -1, -1 }, + { inner_src_port, 16, key_opt_le16, + offsetof(struct hclge_fd_rule, tuples.src_port), + offsetof(struct hclge_fd_rule, tuples_mask.src_port) }, + { inner_dst_port, 16, key_opt_le16, + offsetof(struct hclge_fd_rule, tuples.dst_port), + offsetof(struct hclge_fd_rule, tuples_mask.dst_port) }, + { inner_l4_rsv, 32, key_opt_le32, -1, -1 }, + int offset, moffset, ip_offset; + enum hclge_fd_key_opt key_opt; + u8 *p = (u8 *)rule; - if (rule->unused_tuple & tuple_bit) + if (rule->unused_tuple & bit(tuple_bit)) - switch (tuple_bit) { - case bit(inner_dst_mac): - for (i = 0; i < eth_alen; i++) { - calc_x(key_x[eth_alen - 1 - i], rule->tuples.dst_mac[i], - rule->tuples_mask.dst_mac[i]); - calc_y(key_y[eth_alen - 1 - i], rule->tuples.dst_mac[i], - rule->tuples_mask.dst_mac[i]); - } + key_opt = tuple_key_info[tuple_bit].key_opt; + offset = tuple_key_info[tuple_bit].offset; + moffset = tuple_key_info[tuple_bit].moffset; - return true; - case bit(inner_src_mac): - for (i = 0; i < eth_alen; i++) { - calc_x(key_x[eth_alen - 1 - i], rule->tuples.src_mac[i], - rule->tuples_mask.src_mac[i]); - calc_y(key_y[eth_alen - 1 - i], rule->tuples.src_mac[i], - rule->tuples_mask.src_mac[i]); - } + switch (key_opt) { + case key_opt_u8: + calc_x(*key_x, p[offset], p[moffset]); + calc_y(*key_y, p[offset], p[moffset]); - case bit(inner_vlan_tag_fst): - calc_x(tmp_x_s, rule->tuples.vlan_tag1, - rule->tuples_mask.vlan_tag1); - calc_y(tmp_y_s, rule->tuples.vlan_tag1, - rule->tuples_mask.vlan_tag1); + case key_opt_le16: + calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); + calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset])); - case bit(inner_eth_type): - calc_x(tmp_x_s, rule->tuples.ether_proto, - rule->tuples_mask.ether_proto); - calc_y(tmp_y_s, rule->tuples.ether_proto, - rule->tuples_mask.ether_proto); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); - - return true; - case bit(inner_ip_tos): - calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); - calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos); - - return true; - case bit(inner_ip_proto): - calc_x(*key_x, rule->tuples.ip_proto, - rule->tuples_mask.ip_proto); - calc_y(*key_y, rule->tuples.ip_proto, - rule->tuples_mask.ip_proto); - - return true; - case bit(inner_src_ip): - calc_x(tmp_x_l, rule->tuples.src_ip[ipv4_index], - rule->tuples_mask.src_ip[ipv4_index]); - calc_y(tmp_y_l, rule->tuples.src_ip[ipv4_index], - rule->tuples_mask.src_ip[ipv4_index]); + case key_opt_le32: + calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset])); - case bit(inner_dst_ip): - calc_x(tmp_x_l, rule->tuples.dst_ip[ipv4_index], - rule->tuples_mask.dst_ip[ipv4_index]); - calc_y(tmp_y_l, rule->tuples.dst_ip[ipv4_index], - rule->tuples_mask.dst_ip[ipv4_index]); - *(__le32 *)key_x = cpu_to_le32(tmp_x_l); - *(__le32 *)key_y = cpu_to_le32(tmp_y_l); - - return true; - case bit(inner_src_port): - calc_x(tmp_x_s, rule->tuples.src_port, - rule->tuples_mask.src_port); - calc_y(tmp_y_s, rule->tuples.src_port, - rule->tuples_mask.src_port); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + case key_opt_mac: + for (i = 0; i < eth_alen; i++) { + calc_x(key_x[eth_alen - 1 - i], p[offset + i], + p[moffset + i]); + calc_y(key_y[eth_alen - 1 - i], p[offset + i], + p[moffset + i]); + } - case bit(inner_dst_port): - calc_x(tmp_x_s, rule->tuples.dst_port, - rule->tuples_mask.dst_port); - calc_y(tmp_y_s, rule->tuples.dst_port, - rule->tuples_mask.dst_port); - *(__le16 *)key_x = cpu_to_le16(tmp_x_s); - *(__le16 *)key_y = cpu_to_le16(tmp_y_s); + case key_opt_ip: + ip_offset = ipv4_index * sizeof(u32); + calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]), + *(u32 *)(&p[moffset + ip_offset])); + *(__le32 *)key_x = cpu_to_le32(tmp_x_l); + *(__le32 *)key_y = cpu_to_le32(tmp_y_l); - u32 check_tuple; - check_tuple = key_cfg->tuple_active & bit(i); + if (!(key_cfg->tuple_active & bit(i))) + continue; - tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x, + tuple_valid = hclge_fd_convert_tuple(i, cur_key_x, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +enum hclge_fd_key_opt { + key_opt_u8, + key_opt_le16, + key_opt_le32, + key_opt_mac, + key_opt_ip, + key_opt_vni, +}; + + enum hclge_fd_key_opt key_opt; + int offset; + int moffset;
Networking
fb72699dfef8706abe203ec8c8fc69a023c161ce
jian shen
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: add support for traffic class tuple support for flow director by ethtool
the hardware supports to parse and match the traffic class field of ipv6 packet for flow director, uses the same tuple as ip tos. so removes the limitation of configure 'tclass' by driver.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
refactor and new features for flow director
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['c']
1
20
7
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c - *unused_tuple |= bit(inner_src_mac) | bit(inner_dst_mac) | - bit(inner_ip_tos); + *unused_tuple |= bit(inner_src_mac) | bit(inner_dst_mac); - if (spec->tclass) - return -eopnotsupp; + if (!spec->tclass) + *unused_tuple |= bit(inner_ip_tos); - bit(inner_ip_tos) | bit(inner_src_port) | bit(inner_dst_port); + bit(inner_src_port) | bit(inner_dst_port); - if (spec->tclass) - return -eopnotsupp; + if (!spec->tclass) + *unused_tuple |= bit(inner_ip_tos); + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; + + rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass; + rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass; + + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & bit(inner_ip_tos) ? + 0 : rule->tuples_mask.ip_tos; + + spec->tclass = rule->tuples.ip_tos; + spec_mask->tclass = rule->unused_tuple & bit(inner_ip_tos) ? + 0 : rule->tuples_mask.ip_tos; +
Networking
ae4811913f576d3a891e2ca8a3ad11746f644c69
jian shen
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: refactor flow director configuration
currently, the flow director rule of arfs is configured in the io path. it's time-consuming. so move out the configuration, and configure it asynchronously. and keep ethtool and tc flower rule using synchronous way, otherwise the application maybe unable to know the rule is installed or pending.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
refactor and new features for flow director
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['h', 'c']
2
319
209
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c -static void hclge_clear_arfs_rules(struct hnae3_handle *handle); +static int hclge_clear_arfs_rules(struct hclge_dev *hdev); +static void hclge_sync_fd_table(struct hclge_dev *hdev); + hclge_sync_fd_table(hdev); +static void hclge_sync_fd_state(struct hclge_dev *hdev) +{ + if (hlist_empty(&hdev->fd_rule_list)) + hdev->fd_active_type = hclge_fd_rule_none; +} + +static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (!test_bit(location, hdev->fd_bmap)) { + set_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num++; + } +} + +static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) +{ + if (test_bit(location, hdev->fd_bmap)) { + clear_bit(location, hdev->fd_bmap); + hdev->hclge_fd_rule_num--; + } +} + +static void hclge_fd_free_node(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + hlist_del(&rule->rule_node); + kfree(rule); + hclge_sync_fd_state(hdev); +} + +static void hclge_update_fd_rule_node(struct hclge_dev *hdev, + struct hclge_fd_rule *old_rule, + struct hclge_fd_rule *new_rule, + enum hclge_fd_node_state state) +{ + switch (state) { + case hclge_fd_to_add: + case hclge_fd_active: + /* 1) if the new state is to_add, just replace the old rule + * with the same location, no matter its state, because the + * new rule will be configured to the hardware. + * 2) if the new state is active, it means the new rule + * has been configured to the hardware, so just replace + * the old rule node with the same location. + * 3) for it doesn't add a new node to the list, so it's + * unnecessary to update the rule number and fd_bmap. + */ + new_rule->rule_node.next = old_rule->rule_node.next; + new_rule->rule_node.pprev = old_rule->rule_node.pprev; + memcpy(old_rule, new_rule, sizeof(*old_rule)); + kfree(new_rule); + break; + case hclge_fd_deleted: + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + break; + case hclge_fd_to_del: + /* if new request is to_del, and old rule is existent + * 1) the state of old rule is to_del, we need do nothing, + * because we delete rule by location, other rule content + * is unncessary. + * 2) the state of old rule is active, we need to change its + * state to to_del, so the rule will be deleted when periodic + * task being scheduled. + * 3) the state of old rule is to_add, it means the rule hasn't + * been added to hardware, so we just delete the rule node from + * fd_rule_list directly. + */ + if (old_rule->state == hclge_fd_to_add) { + hclge_fd_dec_rule_cnt(hdev, old_rule->location); + hclge_fd_free_node(hdev, old_rule); + return; + } + old_rule->state = hclge_fd_to_del; + break; + } +} + +static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist, + u16 location, + struct hclge_fd_rule **parent) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return null; + /* record the parent node, use to keep the nodes in fd_rule_list + * in ascend order. + */ + *parent = rule; + } + + return null; +} + +/* insert fd rule node in ascend order according to rule->location */ +static void hclge_fd_insert_rule_node(struct hlist_head *hlist, + struct hclge_fd_rule *rule, + struct hclge_fd_rule *parent) +{ + init_hlist_node(&rule->rule_node); + + if (parent) + hlist_add_behind(&rule->rule_node, &parent->rule_node); + else + hlist_add_head(&rule->rule_node, hlist); +} + +static void hclge_update_fd_list(struct hclge_dev *hdev, + enum hclge_fd_node_state state, u16 location, + struct hclge_fd_rule *new_rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = null; + + fd_rule = hclge_find_fd_rule(hlist, location, &parent); + if (fd_rule) { + hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); + return; + } + + /* it's unlikely to fail here, because we have checked the rule + * exist before. + */ + if (unlikely(state == hclge_fd_to_del || state == hclge_fd_deleted)) { + dev_warn(&hdev->pdev->dev, + "failed to delete fd rule %u, it's inexistent ", + location); + return; + } + + hclge_fd_insert_rule_node(hlist, new_rule, parent); + hclge_fd_inc_rule_cnt(hdev, new_rule->location); + + if (state == hclge_fd_to_add) { + set_bit(hclge_state_fd_tbl_changed, &hdev->state); + hclge_task_schedule(hdev, 0); + } +} + -static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location) -{ - struct hclge_fd_rule *rule = null; - struct hlist_node *node2; - - spin_lock_bh(&hdev->fd_rule_lock); - hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { - if (rule->location >= location) - break; - } - - spin_unlock_bh(&hdev->fd_rule_lock); - - return rule && rule->location == location; -} - -/* make sure being called after lock up with fd_rule_lock */ -static int hclge_fd_update_rule_list(struct hclge_dev *hdev, - struct hclge_fd_rule *new_rule, - u16 location, - bool is_add) -{ - struct hclge_fd_rule *rule = null, *parent = null; - struct hlist_node *node2; - - if (is_add && !new_rule) - return -einval; - - hlist_for_each_entry_safe(rule, node2, - &hdev->fd_rule_list, rule_node) { - if (rule->location >= location) - break; - parent = rule; - } - - if (rule && rule->location == location) { - hlist_del(&rule->rule_node); - kfree(rule); - hdev->hclge_fd_rule_num--; - - if (!is_add) { - if (!hdev->hclge_fd_rule_num) - hdev->fd_active_type = hclge_fd_rule_none; - clear_bit(location, hdev->fd_bmap); - - return 0; - } - } else if (!is_add) { - dev_err(&hdev->pdev->dev, - "delete fail, rule %u is inexistent ", - location); - return -einval; - } - - init_hlist_node(&new_rule->rule_node); - - if (parent) - hlist_add_behind(&new_rule->rule_node, &parent->rule_node); - else - hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list); - - set_bit(location, hdev->fd_bmap); - hdev->hclge_fd_rule_num++; - hdev->fd_active_type = new_rule->rule_type; - - return 0; -} - -/* make sure being called after lock up with fd_rule_lock */ - if (!rule) { + ret = hclge_config_action(hdev, hclge_fd_stage_1, rule); + if (ret) + return ret; + + return hclge_config_key(hdev, hclge_fd_stage_1, rule); +} + +static int hclge_add_fd_entry_common(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + int ret; + + spin_lock_bh(&hdev->fd_rule_lock); + + if (hdev->fd_active_type != rule->rule_type && + (hdev->fd_active_type == hclge_fd_tc_flower_active || + hdev->fd_active_type == hclge_fd_ep_active)) { - "the flow director rule is null "); + "mode conflict(new type %d, active type %d), please delete existent rules first ", + rule->rule_type, hdev->fd_active_type); + spin_unlock_bh(&hdev->fd_rule_lock); - /* it will never fail here, so needn't to check return value */ - hclge_fd_update_rule_list(hdev, rule, rule->location, true); - - ret = hclge_config_action(hdev, hclge_fd_stage_1, rule); + ret = hclge_clear_arfs_rules(hdev); - goto clear_rule; + goto out; - ret = hclge_config_key(hdev, hclge_fd_stage_1, rule); + ret = hclge_fd_config_rule(hdev, rule); - goto clear_rule; + goto out; - return 0; + hclge_update_fd_list(hdev, hclge_fd_active, rule->location, rule); + hdev->fd_active_type = rule->rule_type; -clear_rule: - hclge_fd_update_rule_list(hdev, rule, rule->location, false); +out: + spin_unlock_bh(&hdev->fd_rule_lock); - if (hclge_is_cls_flower_active(handle)) { - dev_err(&hdev->pdev->dev, - "please delete all exist cls flower rules first "); - return -einval; - } - - /* to avoid rule conflict, when user configure rule by ethtool, - * we need to clear all arfs rules - */ - spin_lock_bh(&hdev->fd_rule_lock); - hclge_clear_arfs_rules(handle); - - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); - if (hclge_is_cls_flower_active(handle) || !hdev->hclge_fd_rule_num || - !hclge_fd_rule_exist(hdev, fs->location)) { + spin_lock_bh(&hdev->fd_rule_lock); + if (hdev->fd_active_type == hclge_fd_tc_flower_active || + !test_bit(fs->location, hdev->fd_bmap)) { + spin_unlock_bh(&hdev->fd_rule_lock); - return ret; + goto out; - spin_lock_bh(&hdev->fd_rule_lock); - ret = hclge_fd_update_rule_list(hdev, null, fs->location, false); + hclge_update_fd_list(hdev, hclge_fd_deleted, fs->location, null); +out: - -/* make sure being called after lock up with fd_rule_lock */ -static void hclge_del_all_fd_entries(struct hnae3_handle *handle, - bool clear_list) +static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, + bool clear_list) - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + spin_lock_bh(&hdev->fd_rule_lock); + + + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static void hclge_del_all_fd_entries(struct hnae3_handle *handle, + bool clear_list) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_clear_fd_rules_in_list(hdev, clear_list); - int ret; - ret = hclge_config_action(hdev, hclge_fd_stage_1, rule); - if (!ret) - ret = hclge_config_key(hdev, hclge_fd_stage_1, rule); - - if (ret) { - dev_warn(&hdev->pdev->dev, - "restore rule %u failed, remove it ", - rule->location); - clear_bit(rule->location, hdev->fd_bmap); - hlist_del(&rule->rule_node); - kfree(rule); - hdev->hclge_fd_rule_num--; - } + if (rule->state == hclge_fd_active) + rule->state = hclge_fd_to_add; - - if (hdev->hclge_fd_rule_num) - hdev->fd_active_type = hclge_fd_ep_active; - + set_bit(hclge_state_fd_tbl_changed, &hdev->state); + if (rule->state == hclge_fd_to_del) + continue; + - u16 tmp_queue_id; - int ret; - set_bit(bit_id, hdev->fd_bmap); - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); - - if (ret) - return ret; - - return rule->location; + hclge_update_fd_list(hdev, hclge_fd_to_add, rule->location, + rule); + hdev->fd_active_type = hclge_fd_arfs_active; + } else if (rule->queue_id != queue_id) { + rule->queue_id = queue_id; + rule->state = hclge_fd_to_add; + set_bit(hclge_state_fd_tbl_changed, &hdev->state); + hclge_task_schedule(hdev, 0); - - - if (rule->queue_id == queue_id) - return rule->location; - - tmp_queue_id = rule->queue_id; - rule->queue_id = queue_id; - ret = hclge_config_action(hdev, hclge_fd_stage_1, rule); - if (ret) { - rule->queue_id = tmp_queue_id; - return ret; - } - - hlist_head(del_list); + if (rule->state != hclge_fd_active) + continue; - hlist_del_init(&rule->rule_node); - hlist_add_head(&rule->rule_node, &del_list); - hdev->hclge_fd_rule_num--; - clear_bit(rule->location, hdev->fd_bmap); + rule->state = hclge_fd_to_del; + set_bit(hclge_state_fd_tbl_changed, &hdev->state); - - hlist_for_each_entry_safe(rule, node, &del_list, rule_node) { - hclge_fd_tcam_config(hdev, hclge_fd_stage_1, true, - rule->location, null, false); - kfree(rule); - } -static void hclge_clear_arfs_rules(struct hnae3_handle *handle) +static int hclge_clear_arfs_rules(struct hclge_dev *hdev) - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret; + + if (hdev->fd_active_type != hclge_fd_arfs_active) + return 0; + + hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { + switch (rule->state) { + case hclge_fd_to_del: + case hclge_fd_active: + ret = hclge_fd_tcam_config(hdev, hclge_fd_stage_1, true, + rule->location, null, false); + if (ret) + return ret; + fallthrough; + case hclge_fd_to_add: + hclge_fd_dec_rule_cnt(hdev, rule->location); + hlist_del(&rule->rule_node); + kfree(rule); + break; + default: + break; + } + } + hclge_sync_fd_state(hdev); - if (hdev->fd_active_type == hclge_fd_arfs_active) - hclge_del_all_fd_entries(handle, true); + return 0; - if (hdev->fd_active_type == hclge_fd_ep_active) { - dev_err(&hdev->pdev->dev, - "please remove all exist fd rules via ethtool first "); - return -einval; - } - - if (ret) - goto err; + if (ret) { + kfree(rule); + return ret; + } - spin_lock_bh(&hdev->fd_rule_lock); - hclge_clear_arfs_rules(handle); - - ret = hclge_fd_config_rule(hdev, rule); - - spin_unlock_bh(&hdev->fd_rule_lock); - - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to add cls flower rule, ret = %d ", ret); - goto err; - } + ret = hclge_add_fd_entry_common(hdev, rule); + if (ret) + kfree(rule); - return 0; -err: - kfree(rule); - dev_err(&hdev->pdev->dev, - "failed to delete cls flower rule %u, ret = %d ", - rule->location, ret); - ret = hclge_fd_update_rule_list(hdev, null, rule->location, false); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to delete cls flower rule %u in list, ret = %d ", - rule->location, ret); - spin_unlock_bh(&hdev->fd_rule_lock); - return ret; + hclge_update_fd_list(hdev, hclge_fd_deleted, rule->location, null); + spin_unlock_bh(&hdev->fd_rule_lock); + + return 0; +} + +static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) +{ + struct hclge_fd_rule *rule; + struct hlist_node *node; + int ret = 0; + + if (!test_and_clear_bit(hclge_state_fd_tbl_changed, &hdev->state)) + return; + + spin_lock_bh(&hdev->fd_rule_lock); + + hlist_for_each_entry_safe(rule, node, hlist, rule_node) { + switch (rule->state) { + case hclge_fd_to_add: + ret = hclge_fd_config_rule(hdev, rule); + if (ret) + goto out; + rule->state = hclge_fd_active; + break; + case hclge_fd_to_del: + ret = hclge_fd_tcam_config(hdev, hclge_fd_stage_1, true, + rule->location, null, false); + if (ret) + goto out; + hclge_fd_dec_rule_cnt(hdev, rule->location); + hclge_fd_free_node(hdev, rule); + break; + default: + break; + } +out: + if (ret) + set_bit(hclge_state_fd_tbl_changed, &hdev->state); + +} - return 0; +static void hclge_sync_fd_table(struct hclge_dev *hdev) +{ + if (test_and_clear_bit(hclge_state_fd_clear_all, &hdev->state)) { + bool clear_list = hdev->fd_active_type == hclge_fd_arfs_active; + + hclge_clear_fd_rules_in_list(hdev, clear_list); + } + + hclge_sync_fd_list(hdev, &hdev->fd_rule_list); - bool clear; - clear = hdev->fd_active_type == hclge_fd_arfs_active; - if (!enable) { - spin_lock_bh(&hdev->fd_rule_lock); - hclge_del_all_fd_entries(handle, clear); - spin_unlock_bh(&hdev->fd_rule_lock); - } else { + if (!enable) + set_bit(hclge_state_fd_clear_all, &hdev->state); + else - } + + hclge_task_schedule(hdev, 0); - hclge_clear_arfs_rules(handle); + hclge_clear_arfs_rules(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h + hclge_state_fd_tbl_changed, + hclge_state_fd_clear_all, +enum hclge_fd_node_state { + hclge_fd_to_add, + hclge_fd_to_del, + hclge_fd_active, + hclge_fd_deleted, +}; + + enum hclge_fd_node_state state;
Networking
fc4243b8de8b4e7170f07f2660dcab3f8ecda0e9
jian shen
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: refine for hns3_del_all_fd_entries()
for only pf driver can configure flow director rule, it's better to call hclge_del_all_fd_entries() directly in hclge layer, rather than call hns3_del_all_fd_entries() in hns3 layer. then the ae_algo->ops.del_all_fd_entries can be removed.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
refactor and new features for flow director
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['h', 'c']
3
3
19
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h - void (*del_all_fd_entries)(struct hnae3_handle *handle, - bool clear_list); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c -static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->del_all_fd_entries) - h->ae_algo->ops->del_all_fd_entries(h, clear_list); -} - - hns3_del_all_fd_rules(netdev, true); - diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c -static void hclge_del_all_fd_entries(struct hnae3_handle *handle, - bool clear_list) +static void hclge_del_all_fd_entries(struct hclge_dev *hdev) - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - hclge_clear_fd_rules_in_list(hdev, clear_list); + hclge_clear_fd_rules_in_list(hdev, true); + hclge_del_all_fd_entries(hdev); - .del_all_fd_entries = hclge_del_all_fd_entries,
Networking
f07203b0180f62791371cb50fb1afacd826250fc
jian shen
drivers
net
ethernet, hisilicon, hns3, hns3pf
net: hns3: add support for user-def data of flow director
for device_version_v3, the hardware supports to match specified data in the specified offset of packet payload. each layer can have one offset, and can't be masked when configure flow director rule by ethtool command. the layer is selected based on the flow-type, ether for l2, ip4/ipv6 for l3, and tcp4/tcp6/udp4/udp6 for l4. for example, tcp4/tcp6/udp4/udp6 rules share the same user-def offset, but each rule can have its own user-def value.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
refactor and new features for flow director
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['hns3 ']
['h', 'c']
3
359
14
--- diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h + hclge_opc_fd_user_def_op = 0x1207, +#define hclge_fd_user_def_oft_s 0 +#define hclge_fd_user_def_oft_m genmask(14, 0) +#define hclge_fd_user_def_en_b 15 +struct hclge_fd_user_def_cfg_cmd { + __le16 ol2_cfg; + __le16 l2_cfg; + __le16 ol3_cfg; + __le16 l3_cfg; + __le16 ol4_cfg; + __le16 l4_cfg; + u8 rsv[12]; +}; + diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c - { inner_l2_rsv, 16, key_opt_le16, -1, -1 }, + { inner_l2_rsv, 16, key_opt_le16, + offsetof(struct hclge_fd_rule, tuples.l2_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) }, - { inner_l3_rsv, 16, key_opt_le16, -1, -1 }, + { inner_l3_rsv, 16, key_opt_le16, + offsetof(struct hclge_fd_rule, tuples.l3_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) }, - { inner_l4_rsv, 32, key_opt_le32, -1, -1 }, + { inner_l4_rsv, 32, key_opt_le32, + offsetof(struct hclge_fd_rule, tuples.l4_user_def), + offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) }, +static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, + struct hclge_fd_user_def_cfg *cfg) +{ + struct hclge_fd_user_def_cfg_cmd *req; + struct hclge_desc desc; + u16 data = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, hclge_opc_fd_user_def_op, false); + + req = (struct hclge_fd_user_def_cfg_cmd *)desc.data; + + hnae3_set_bit(data, hclge_fd_user_def_en_b, cfg[0].ref_cnt > 0); + hnae3_set_field(data, hclge_fd_user_def_oft_m, + hclge_fd_user_def_oft_s, cfg[0].offset); + req->ol2_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, hclge_fd_user_def_en_b, cfg[1].ref_cnt > 0); + hnae3_set_field(data, hclge_fd_user_def_oft_m, + hclge_fd_user_def_oft_s, cfg[1].offset); + req->ol3_cfg = cpu_to_le16(data); + + data = 0; + hnae3_set_bit(data, hclge_fd_user_def_en_b, cfg[2].ref_cnt > 0); + hnae3_set_field(data, hclge_fd_user_def_oft_m, + hclge_fd_user_def_oft_s, cfg[2].offset); + req->ol4_cfg = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to set fd user def data, ret= %d ", ret); + return ret; +} + +static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) +{ + int ret; + + if (!test_and_clear_bit(hclge_state_fd_user_def_changed, &hdev->state)) + return; + + if (!locked) + spin_lock_bh(&hdev->fd_rule_lock); + + ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); + if (ret) + set_bit(hclge_state_fd_user_def_changed, &hdev->state); + + if (!locked) + spin_unlock_bh(&hdev->fd_rule_lock); +} + +static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hlist_head *hlist = &hdev->fd_rule_list; + struct hclge_fd_rule *fd_rule, *parent = null; + struct hclge_fd_user_def_info *info, *old_info; + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != hclge_fd_ep_active || + rule->ep.user_def.layer == hclge_fd_user_def_none) + return 0; + + /* for valid layer is start from 1, so need minus 1 to get the cfg */ + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + info = &rule->ep.user_def; + + if (!cfg->ref_cnt || cfg->offset == info->offset) + return 0; + + if (cfg->ref_cnt > 1) + goto error; + + fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent); + if (fd_rule) { + old_info = &fd_rule->ep.user_def; + if (info->layer == old_info->layer) + return 0; + } + +error: + dev_err(&hdev->pdev->dev, + "no available offset for layer%d fd rule, each layer only support one user def offset. ", + info->layer + 1); + return -enospc; +} + +static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != hclge_fd_ep_active || + rule->ep.user_def.layer == hclge_fd_user_def_none) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) { + cfg->offset = rule->ep.user_def.offset; + set_bit(hclge_state_fd_user_def_changed, &hdev->state); + } + cfg->ref_cnt++; +} + +static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, + struct hclge_fd_rule *rule) +{ + struct hclge_fd_user_def_cfg *cfg; + + if (!rule || rule->rule_type != hclge_fd_ep_active || + rule->ep.user_def.layer == hclge_fd_user_def_none) + return; + + cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; + if (!cfg->ref_cnt) + return; + + cfg->ref_cnt--; + if (!cfg->ref_cnt) { + cfg->offset = 0; + set_bit(hclge_state_fd_user_def_changed, &hdev->state); + } +} + + hclge_fd_dec_user_def_refcnt(hdev, fd_rule); + if (state == hclge_fd_active) + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + + hclge_fd_inc_user_def_refcnt(hdev, new_rule); + hclge_sync_fd_user_def_cfg(hdev, true); + +static void hclge_fd_disable_user_def(struct hclge_dev *hdev) +{ + struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; + + spin_lock_bh(&hdev->fd_rule_lock); + memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); + spin_unlock_bh(&hdev->fd_rule_lock); + + hclge_fd_set_user_def_cmd(hdev, cfg); +} + - if (hdev->fd_cfg.fd_mode == hclge_fd_mode_depth_2k_width_400b_stage_1) + if (hdev->fd_cfg.fd_mode == hclge_fd_mode_depth_2k_width_400b_stage_1) { + if (hdev->ae_dev->dev_version >= hnae3_device_version_v3) + key_cfg->tuple_active |= hclge_fd_tuple_user_def_tuples; + } +static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + switch (flow_type) { + case ether_flow: + info->layer = hclge_fd_user_def_l2; + *unused_tuple &= ~bit(inner_l2_rsv); + break; + case ip_user_flow: + case ipv6_user_flow: + info->layer = hclge_fd_user_def_l3; + *unused_tuple &= ~bit(inner_l3_rsv); + break; + case tcp_v4_flow: + case udp_v4_flow: + case tcp_v6_flow: + case udp_v6_flow: + info->layer = hclge_fd_user_def_l4; + *unused_tuple &= ~bit(inner_l4_rsv); + break; + default: + return -eopnotsupp; + } + + return 0; +} + +static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs) +{ + return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0; +} + +static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, + struct ethtool_rx_flow_spec *fs, + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) +{ + u32 tuple_active = hdev->fd_cfg.key_cfg[hclge_fd_stage_1].tuple_active; + u32 flow_type = fs->flow_type & ~(flow_ext | flow_mac_ext); + u16 data, offset, data_mask, offset_mask; + int ret; + + info->layer = hclge_fd_user_def_none; + *unused_tuple |= hclge_fd_tuple_user_def_tuples; + + if (!(fs->flow_type & flow_ext) || hclge_fd_is_user_def_all_masked(fs)) + return 0; + + /* user-def data from ethtool is 64 bit value, the bit0~15 is used + * for data, and bit32~47 is used for offset. + */ + data = be32_to_cpu(fs->h_ext.data[1]) & hclge_fd_user_def_data; + data_mask = be32_to_cpu(fs->m_ext.data[1]) & hclge_fd_user_def_data; + offset = be32_to_cpu(fs->h_ext.data[0]) & hclge_fd_user_def_offset; + offset_mask = be32_to_cpu(fs->m_ext.data[0]) & hclge_fd_user_def_offset; + + if (!(tuple_active & hclge_fd_tuple_user_def_tuples)) { + dev_err(&hdev->pdev->dev, "user-def bytes are not supported "); + return -eopnotsupp; + } + + if (offset > hclge_fd_max_user_def_offset) { + dev_err(&hdev->pdev->dev, + "user-def offset[%u] should be no more than %u ", + offset, hclge_fd_max_user_def_offset); + return -einval; + } + + if (offset_mask != hclge_fd_user_def_offset_unmask) { + dev_err(&hdev->pdev->dev, "user-def offset can't be masked "); + return -einval; + } + + ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info); + if (ret) { + dev_err(&hdev->pdev->dev, + "unsupported flow type for user-def bytes, ret = %d ", + ret); + return ret; + } + + info->data = data; + info->data_mask = data_mask; + info->offset = offset; + + return 0; +} + - u32 *unused_tuple) + u32 *unused_tuple, + struct hclge_fd_user_def_info *info) - if ((fs->flow_type & flow_ext) && - (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) { - dev_err(&hdev->pdev->dev, "user-def bytes are not supported "); - return -eopnotsupp; - } + ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); + if (ret) + return ret; +static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info, + struct hclge_fd_rule *rule) +{ + switch (info->layer) { + case hclge_fd_user_def_l2: + rule->tuples.l2_user_def = info->data; + rule->tuples_mask.l2_user_def = info->data_mask; + break; + case hclge_fd_user_def_l3: + rule->tuples.l3_user_def = info->data; + rule->tuples_mask.l3_user_def = info->data_mask; + break; + case hclge_fd_user_def_l4: + rule->tuples.l4_user_def = (u32)info->data << 16; + rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16; + break; + default: + break; + } + + rule->ep.user_def = *info; +} + - struct hclge_fd_rule *rule) + struct hclge_fd_rule *rule, + struct hclge_fd_user_def_info *info) + hclge_fd_get_user_def_tuple(info, rule); + ret = hclge_fd_check_user_def_refcnt(hdev, rule); + if (ret) + goto out; + + struct hclge_fd_user_def_info info; - ret = hclge_fd_check_spec(hdev, fs, &unused); + ret = hclge_fd_check_spec(hdev, fs, &unused, &info); - ret = hclge_fd_get_tuple(hdev, fs, rule); + ret = hclge_fd_get_tuple(hdev, fs, rule, &info); + hclge_fd_disable_user_def(hdev); +static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if ((rule->unused_tuple & hclge_fd_tuple_user_def_tuples) == + hclge_fd_tuple_user_def_tuples) { + fs->h_ext.data[0] = 0; + fs->h_ext.data[1] = 0; + fs->m_ext.data[0] = 0; + fs->m_ext.data[1] = 0; + } else { + fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset); + fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data); + fs->m_ext.data[0] = + cpu_to_be32(hclge_fd_user_def_offset_unmask); + fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask); + } +} + + + hclge_fd_get_user_def_info(fs, rule); + hclge_sync_fd_user_def_cfg(hdev, false); + - + set_bit(hclge_state_fd_user_def_changed, &hdev->state); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h + hclge_state_fd_user_def_changed, +#define hclge_fd_tuple_user_def_tuples \ + (bit(inner_l2_rsv) | bit(inner_l3_rsv) | bit(inner_l4_rsv)) + +#define hclge_fd_max_user_def_offset 9000 +#define hclge_fd_user_def_data genmask(15, 0) +#define hclge_fd_user_def_offset genmask(15, 0) +#define hclge_fd_user_def_offset_unmask genmask(15, 0) + +enum hclge_fd_user_def_layer { + hclge_fd_user_def_none, + hclge_fd_user_def_l2, + hclge_fd_user_def_l3, + hclge_fd_user_def_l4, +}; + +#define hclge_fd_user_def_layer_num 3 +struct hclge_fd_user_def_cfg { + u16 ref_cnt; + u16 offset; +}; + +struct hclge_fd_user_def_info { + enum hclge_fd_user_def_layer layer; + u16 data; + u16 data_mask; + u16 offset; +}; + + struct hclge_fd_user_def_cfg user_def_cfg[hclge_fd_user_def_layer_num]; + u16 l2_user_def; + u16 l3_user_def; + u32 l4_user_def; + struct { + struct hclge_fd_user_def_info user_def; + } ep;
Networking
67b0e1428e2f592c0fc2c7f682a5a049158782b8
jian shen
drivers
net
ethernet, hisilicon, hns3, hns3pf
iavf: add framework to enable ethtool rss config
add the virtchnl message interface to vf, so that vf can request rss input set(s) based on pf's capability.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add framework to enable ethtool rss config
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c']
4
214
0
--- diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h +#include "iavf_adv_rss.h" +#define iavf_flag_aq_add_adv_rss_cfg bit(27) +#define iavf_flag_aq_del_adv_rss_cfg bit(28) +#define adv_rss_support(_a) ((_a)->vf_res->vf_cap_flags & \ + virtchnl_vf_offload_adv_rss_pf) + + struct list_head adv_rss_list_head; + spinlock_t adv_rss_lock; /* protect the rss management list */ +void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); +void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +/* spdx-license-identifier: gpl-2.0 */ +/* copyright (c) 2021, intel corporation. */ + +#ifndef _iavf_adv_rss_h_ +#define _iavf_adv_rss_h_ + +struct iavf_adapter; + +/* state of advanced rss configuration */ +enum iavf_adv_rss_state_t { + iavf_adv_rss_add_request, /* user requests to add rss */ + iavf_adv_rss_add_pending, /* rss pending add by the pf */ + iavf_adv_rss_del_request, /* driver requests to delete rss */ + iavf_adv_rss_del_pending, /* rss pending delete by the pf */ + iavf_adv_rss_active, /* rss configuration is active */ +}; + +/* bookkeeping of advanced rss configuration */ +struct iavf_adv_rss { + enum iavf_adv_rss_state_t state; + struct list_head list; + + struct virtchnl_rss_cfg cfg_msg; +}; +#endif /* _iavf_adv_rss_h_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c + struct iavf_adv_rss *rss; + /* remove all advance rss configuration */ + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) + rss->state = iavf_adv_rss_del_request; + spin_unlock_bh(&adapter->adv_rss_lock); + + adapter->aq_required |= iavf_flag_aq_del_adv_rss_cfg; + if (adapter->aq_required & iavf_flag_aq_add_adv_rss_cfg) { + iavf_add_adv_rss_cfg(adapter); + return 0; + } + if (adapter->aq_required & iavf_flag_aq_del_adv_rss_cfg) { + iavf_del_adv_rss_cfg(adapter); + return 0; + } + spin_lock_init(&adapter->adv_rss_lock); + init_list_head(&adapter->adv_rss_list_head); + struct iavf_adv_rss *rss, *rsstmp; + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, + list) { + list_del(&rss->list); + kfree(rss); + } + spin_unlock_bh(&adapter->adv_rss_lock); + diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c + virtchnl_vf_offload_adv_rss_pf | +/** + * iavf_add_adv_rss_cfg + * @adapter: the vf adapter structure + * + * request that the pf add rss configuration as specified + * by the user via ethtool. + **/ +void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter) +{ + struct virtchnl_rss_cfg *rss_cfg; + struct iavf_adv_rss *rss; + bool process_rss = false; + int len; + + if (adapter->current_op != virtchnl_op_unknown) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "cannot add rss configuration, command %d pending ", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_rss_cfg); + rss_cfg = kzalloc(len, gfp_kernel); + if (!rss_cfg) + return; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == iavf_adv_rss_add_request) { + process_rss = true; + rss->state = iavf_adv_rss_add_pending; + memcpy(rss_cfg, &rss->cfg_msg, len); + break; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (process_rss) { + adapter->current_op = virtchnl_op_add_rss_cfg; + iavf_send_pf_msg(adapter, virtchnl_op_add_rss_cfg, + (u8 *)rss_cfg, len); + } else { + adapter->aq_required &= ~iavf_flag_aq_add_adv_rss_cfg; + } + + kfree(rss_cfg); +} + +/** + * iavf_del_adv_rss_cfg + * @adapter: the vf adapter structure + * + * request that the pf delete rss configuration as specified + * by the user via ethtool. + **/ +void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter) +{ + struct virtchnl_rss_cfg *rss_cfg; + struct iavf_adv_rss *rss; + bool process_rss = false; + int len; + + if (adapter->current_op != virtchnl_op_unknown) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "cannot remove rss configuration, command %d pending ", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_rss_cfg); + rss_cfg = kzalloc(len, gfp_kernel); + if (!rss_cfg) + return; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == iavf_adv_rss_del_request) { + process_rss = true; + rss->state = iavf_adv_rss_del_pending; + memcpy(rss_cfg, &rss->cfg_msg, len); + break; + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (process_rss) { + adapter->current_op = virtchnl_op_del_rss_cfg; + iavf_send_pf_msg(adapter, virtchnl_op_del_rss_cfg, + (u8 *)rss_cfg, len); + } else { + adapter->aq_required &= ~iavf_flag_aq_del_adv_rss_cfg; + } + + kfree(rss_cfg); +} + + case virtchnl_op_add_rss_cfg: { + struct iavf_adv_rss *rss, *rss_tmp; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rss_tmp, + &adapter->adv_rss_list_head, + list) { + if (rss->state == iavf_adv_rss_add_pending) { + list_del(&rss->list); + kfree(rss); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case virtchnl_op_del_rss_cfg: { + struct iavf_adv_rss *rss; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, + list) { + if (rss->state == iavf_adv_rss_del_pending) { + rss->state = iavf_adv_rss_active; + dev_err(&adapter->pdev->dev, "failed to delete rss configuration, error %s ", + iavf_stat_str(&adapter->hw, + v_retval)); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case virtchnl_op_add_rss_cfg: { + struct iavf_adv_rss *rss; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) + if (rss->state == iavf_adv_rss_add_pending) + rss->state = iavf_adv_rss_active; + spin_unlock_bh(&adapter->adv_rss_lock); + } + break; + case virtchnl_op_del_rss_cfg: { + struct iavf_adv_rss *rss, *rss_tmp; + + spin_lock_bh(&adapter->adv_rss_lock); + list_for_each_entry_safe(rss, rss_tmp, + &adapter->adv_rss_list_head, list) { + if (rss->state == iavf_adv_rss_del_pending) { + list_del(&rss->list); + kfree(rss); + } + } + spin_unlock_bh(&adapter->adv_rss_lock); + } + break;
Networking
0aaeb4fbc842b9e6ef11ee1415e6e88171056afb
haiyue wang
drivers
net
ethernet, iavf, intel
iavf: add framework to enable ethtool ntuple filters
enable ethtool ntuple filter support on the vf driver using the virtchnl interface to the pf driver and the flow director functionality in the hardware.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add framework to enable ethtool ntuple filters
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c']
4
257
1
--- diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h +#include "iavf_fdir.h" +#define iavf_flag_aq_add_fdir_filter bit(25) +#define iavf_flag_aq_del_fdir_filter bit(26) +#define fdir_fltr_support(_a) ((_a)->vf_res->vf_cap_flags & \ + virtchnl_vf_offload_fdir_pf) + +#define iavf_max_fdir_filters 128 /* max allowed flow director filters */ + u16 fdir_active_fltr; + struct list_head fdir_list_head; + spinlock_t fdir_fltr_lock; /* protect the flow director filter list */ +void iavf_add_fdir_filter(struct iavf_adapter *adapter); +void iavf_del_fdir_filter(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h +/* spdx-license-identifier: gpl-2.0 */ +/* copyright (c) 2021, intel corporation. */ + +#ifndef _iavf_fdir_h_ +#define _iavf_fdir_h_ + +struct iavf_adapter; + +/* state of flow director filter */ +enum iavf_fdir_fltr_state_t { + iavf_fdir_fltr_add_request, /* user requests to add filter */ + iavf_fdir_fltr_add_pending, /* filter pending add by the pf */ + iavf_fdir_fltr_del_request, /* user requests to delete filter */ + iavf_fdir_fltr_del_pending, /* filter pending delete by the pf */ + iavf_fdir_fltr_active, /* filter is active */ +}; + +/* bookkeeping of flow director filters */ +struct iavf_fdir_fltr { + enum iavf_fdir_fltr_state_t state; + struct list_head list; + + u32 flow_id; + + struct virtchnl_fdir_add vc_add_msg; +}; + +int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); +struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc); +#endif /* _iavf_fdir_h_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c - struct iavf_mac_filter *f; + struct iavf_fdir_fltr *fdir; + struct iavf_mac_filter *f; + /* remove all flow director filters */ + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + fdir->state = iavf_fdir_fltr_del_request; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + adapter->aq_required |= iavf_flag_aq_del_fdir_filter; + if (adapter->aq_required & iavf_flag_aq_add_fdir_filter) { + iavf_add_fdir_filter(adapter); + return iavf_success; + } + if (adapter->aq_required & iavf_flag_aq_del_fdir_filter) { + iavf_del_fdir_filter(adapter); + return iavf_success; + } + spin_lock_init(&adapter->fdir_fltr_lock); + init_list_head(&adapter->fdir_list_head); + struct iavf_fdir_fltr *fdir, *fdirtmp; + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) { + list_del(&fdir->list); + kfree(fdir); + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c + virtchnl_vf_offload_fdir_pf | +/** + * iavf_add_fdir_filter + * @adapter: the vf adapter structure + * + * request that the pf add flow director filters as specified + * by the user via ethtool. + **/ +void iavf_add_fdir_filter(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir; + struct virtchnl_fdir_add *f; + bool process_fltr = false; + int len; + + if (adapter->current_op != virtchnl_op_unknown) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "cannot add flow director filter, command %d pending ", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_fdir_add); + f = kzalloc(len, gfp_kernel); + if (!f) + return; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + if (fdir->state == iavf_fdir_fltr_add_request) { + process_fltr = true; + fdir->state = iavf_fdir_fltr_add_pending; + memcpy(f, &fdir->vc_add_msg, len); + break; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (!process_fltr) { + /* prevent iavf_add_fdir_filter() from being called when there + * are no filters to add + */ + adapter->aq_required &= ~iavf_flag_aq_add_fdir_filter; + kfree(f); + return; + } + adapter->current_op = virtchnl_op_add_fdir_filter; + iavf_send_pf_msg(adapter, virtchnl_op_add_fdir_filter, (u8 *)f, len); + kfree(f); +} + +/** + * iavf_del_fdir_filter + * @adapter: the vf adapter structure + * + * request that the pf delete flow director filters as specified + * by the user via ethtool. + **/ +void iavf_del_fdir_filter(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir; + struct virtchnl_fdir_del f; + bool process_fltr = false; + int len; + + if (adapter->current_op != virtchnl_op_unknown) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "cannot remove flow director filter, command %d pending ", + adapter->current_op); + return; + } + + len = sizeof(struct virtchnl_fdir_del); + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { + if (fdir->state == iavf_fdir_fltr_del_request) { + process_fltr = true; + memset(&f, 0, len); + f.vsi_id = fdir->vc_add_msg.vsi_id; + f.flow_id = fdir->flow_id; + fdir->state = iavf_fdir_fltr_del_pending; + break; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (!process_fltr) { + adapter->aq_required &= ~iavf_flag_aq_del_fdir_filter; + return; + } + + adapter->current_op = virtchnl_op_del_fdir_filter; + iavf_send_pf_msg(adapter, virtchnl_op_del_fdir_filter, (u8 *)&f, len); +} + + case virtchnl_op_add_fdir_filter: { + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, + &adapter->fdir_list_head, + list) { + if (fdir->state == iavf_fdir_fltr_add_pending) { + dev_info(&adapter->pdev->dev, "failed to add flow director filter, error %s ", + iavf_stat_str(&adapter->hw, + v_retval)); + if (msglen) + dev_err(&adapter->pdev->dev, + "%s ", msg); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case virtchnl_op_del_fdir_filter: { + struct iavf_fdir_fltr *fdir; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(fdir, &adapter->fdir_list_head, + list) { + if (fdir->state == iavf_fdir_fltr_del_pending) { + fdir->state = iavf_fdir_fltr_active; + dev_info(&adapter->pdev->dev, "failed to del flow director filter, error %s ", + iavf_stat_str(&adapter->hw, + v_retval)); + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case virtchnl_op_enable_vlan_stripping: + case virtchnl_op_disable_vlan_stripping: + dev_warn(&adapter->pdev->dev, "changing vlan stripping is not allowed when port vlan is configured "); + break; + case virtchnl_op_add_fdir_filter: { + struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg; + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, + &adapter->fdir_list_head, + list) { + if (fdir->state == iavf_fdir_fltr_add_pending) { + if (add_fltr->status == virtchnl_fdir_success) { + fdir->state = iavf_fdir_fltr_active; + fdir->flow_id = add_fltr->flow_id; + } else { + dev_info(&adapter->pdev->dev, "failed to add flow director filter with status: %d ", + add_fltr->status); + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break; + case virtchnl_op_del_fdir_filter: { + struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg; + struct iavf_fdir_fltr *fdir, *fdir_tmp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, + list) { + if (fdir->state == iavf_fdir_fltr_del_pending) { + if (del_fltr->status == virtchnl_fdir_success) { + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else { + fdir->state = iavf_fdir_fltr_active; + dev_info(&adapter->pdev->dev, "failed to delete flow director filter with status: %d ", + del_fltr->status); + } + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + } + break;
Networking
0dbfbabb840d711d7ea1627d88afd0520f374a90
haiyue wang chen bo box c chen intel com
drivers
net
ethernet, iavf, intel
iavf: support for modifying sctp rss flow hashing
provide the ability to enable sctp rss hashing by ethtool.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support for modifying sctp rss flow hashing
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c']
3
56
5
--- diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +/** + * iavf_fill_adv_rss_sctp_hdr - fill the sctp rss protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the rss configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + virtchnl_set_proto_hdr_type(hdr, sctp); + + if (hash_flds & iavf_adv_rss_hash_fld_sctp_src_port) + virtchnl_add_proto_hdr_field_bit(hdr, sctp, src_port); + + if (hash_flds & iavf_adv_rss_hash_fld_sctp_dst_port) + virtchnl_add_proto_hdr_field_bit(hdr, sctp, dst_port); +} + + case iavf_adv_rss_flow_seg_hdr_sctp: + iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds); + break; + else if (packet_hdrs & iavf_adv_rss_flow_seg_hdr_sctp) + proto = "sctp"; - iavf_adv_rss_hash_fld_udp_src_port)) + iavf_adv_rss_hash_fld_udp_src_port | + iavf_adv_rss_hash_fld_sctp_src_port)) - iavf_adv_rss_hash_fld_udp_dst_port)) + iavf_adv_rss_hash_fld_udp_dst_port | + iavf_adv_rss_hash_fld_sctp_dst_port)) diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h + iavf_adv_rss_flow_seg_hdr_sctp = 0x00000010, - iavf_adv_rss_flow_seg_hdr_udp) + iavf_adv_rss_flow_seg_hdr_udp | \ + iavf_adv_rss_flow_seg_hdr_sctp) + iavf_adv_rss_flow_field_idx_sctp_src_port, + iavf_adv_rss_flow_field_idx_sctp_dst_port, +#define iavf_adv_rss_hash_fld_sctp_src_port \ + bit_ull(iavf_adv_rss_flow_field_idx_sctp_src_port) +#define iavf_adv_rss_hash_fld_sctp_dst_port \ + bit_ull(iavf_adv_rss_flow_field_idx_sctp_dst_port) diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c + case sctp_v4_flow: + hdrs |= iavf_adv_rss_flow_seg_hdr_sctp | + iavf_adv_rss_flow_seg_hdr_ipv4; + break; + case sctp_v6_flow: + hdrs |= iavf_adv_rss_flow_seg_hdr_sctp | + iavf_adv_rss_flow_seg_hdr_ipv6; + break; + case sctp_v4_flow: + case sctp_v6_flow: + case sctp_v4_flow: + case sctp_v6_flow: + if (cmd->data & rxh_l4_b_0_1) + hfld |= iavf_adv_rss_hash_fld_sctp_src_port; + if (cmd->data & rxh_l4_b_2_3) + hfld |= iavf_adv_rss_hash_fld_sctp_dst_port; + break; - iavf_adv_rss_hash_fld_udp_src_port)) + iavf_adv_rss_hash_fld_udp_src_port | + iavf_adv_rss_hash_fld_sctp_src_port)) - iavf_adv_rss_hash_fld_udp_dst_port)) + iavf_adv_rss_hash_fld_udp_dst_port | + iavf_adv_rss_hash_fld_sctp_dst_port))
Networking
e41985f0fe8b68d1ac321bd4eda460fb553e65ad
haiyue wang
drivers
net
ethernet, iavf, intel
iavf: support for modifying tcp rss flow hashing
provides the ability to enable tcp rss hashing by ethtool.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support for modifying tcp rss flow hashing
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c', 'makefile']
5
450
4
--- diff --git a/drivers/net/ethernet/intel/iavf/makefile b/drivers/net/ethernet/intel/iavf/makefile --- a/drivers/net/ethernet/intel/iavf/makefile +++ b/drivers/net/ethernet/intel/iavf/makefile + iavf_adv_rss.o \ diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +// spdx-license-identifier: gpl-2.0 +/* copyright (c) 2021, intel corporation. */ + +/* advanced rss configuration ethtool support for iavf */ + +#include "iavf.h" + +/** + * iavf_fill_adv_rss_ip4_hdr - fill the ipv4 rss protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the rss configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_ip4_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + virtchnl_set_proto_hdr_type(hdr, ipv4); + + if (hash_flds & iavf_adv_rss_hash_fld_ipv4_sa) + virtchnl_add_proto_hdr_field_bit(hdr, ipv4, src); + + if (hash_flds & iavf_adv_rss_hash_fld_ipv4_da) + virtchnl_add_proto_hdr_field_bit(hdr, ipv4, dst); +} + +/** + * iavf_fill_adv_rss_ip6_hdr - fill the ipv6 rss protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the rss configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_ip6_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + virtchnl_set_proto_hdr_type(hdr, ipv6); + + if (hash_flds & iavf_adv_rss_hash_fld_ipv6_sa) + virtchnl_add_proto_hdr_field_bit(hdr, ipv6, src); + + if (hash_flds & iavf_adv_rss_hash_fld_ipv6_da) + virtchnl_add_proto_hdr_field_bit(hdr, ipv6, dst); +} + +/** + * iavf_fill_adv_rss_tcp_hdr - fill the tcp rss protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the rss configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_tcp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + virtchnl_set_proto_hdr_type(hdr, tcp); + + if (hash_flds & iavf_adv_rss_hash_fld_tcp_src_port) + virtchnl_add_proto_hdr_field_bit(hdr, tcp, src_port); + + if (hash_flds & iavf_adv_rss_hash_fld_tcp_dst_port) + virtchnl_add_proto_hdr_field_bit(hdr, tcp, dst_port); +} + +/** + * iavf_fill_adv_rss_cfg_msg - fill the rss configuration into virtchnl message + * @rss_cfg: the virtchnl message to be filled with rss configuration setting + * @packet_hdrs: the rss configuration protocol header types + * @hash_flds: the rss configuration protocol hash fields + * + * returns 0 if the rss configuration virtchnl message is filled successfully + */ +int +iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, + u32 packet_hdrs, u64 hash_flds) +{ + struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs; + struct virtchnl_proto_hdr *hdr; + + rss_cfg->rss_algorithm = virtchnl_rss_alg_toeplitz_asymmetric; + + proto_hdrs->tunnel_level = 0; /* always outer layer */ + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_hdrs & iavf_adv_rss_flow_seg_hdr_l3) { + case iavf_adv_rss_flow_seg_hdr_ipv4: + iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds); + break; + case iavf_adv_rss_flow_seg_hdr_ipv6: + iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds); + break; + default: + return -einval; + } + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + switch (packet_hdrs & iavf_adv_rss_flow_seg_hdr_l4) { + case iavf_adv_rss_flow_seg_hdr_tcp: + iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds); + break; + default: + return -einval; + } + + return 0; +} + +/** + * iavf_find_adv_rss_cfg_by_hdrs - find rss configuration with header type + * @adapter: pointer to the vf adapter structure + * @packet_hdrs: protocol header type to find. + * + * returns pointer to advance rss configuration if found or null + */ +struct iavf_adv_rss * +iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs) +{ + struct iavf_adv_rss *rss; + + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) + if (rss->packet_hdrs == packet_hdrs) + return rss; + + return null; +} + +/** + * iavf_print_adv_rss_cfg + * @adapter: pointer to the vf adapter structure + * @rss: pointer to the advance rss configuration to print + * @action: the string description about how to handle the rss + * @result: the string description about the virtchnl result + * + * print the advance rss configuration + **/ +void +iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, + const char *action, const char *result) +{ + u32 packet_hdrs = rss->packet_hdrs; + u64 hash_flds = rss->hash_flds; + static char hash_opt[300]; + const char *proto; + + if (packet_hdrs & iavf_adv_rss_flow_seg_hdr_tcp) + proto = "tcp"; + else + return; + + memset(hash_opt, 0, sizeof(hash_opt)); + + strcat(hash_opt, proto); + if (packet_hdrs & iavf_adv_rss_flow_seg_hdr_ipv4) + strcat(hash_opt, "v4 "); + else + strcat(hash_opt, "v6 "); + + if (hash_flds & (iavf_adv_rss_hash_fld_ipv4_sa | + iavf_adv_rss_hash_fld_ipv6_sa)) + strcat(hash_opt, "ip sa,"); + if (hash_flds & (iavf_adv_rss_hash_fld_ipv4_da | + iavf_adv_rss_hash_fld_ipv6_da)) + strcat(hash_opt, "ip da,"); + if (hash_flds & iavf_adv_rss_hash_fld_tcp_src_port) + strcat(hash_opt, "src port,"); + if (hash_flds & iavf_adv_rss_hash_fld_tcp_dst_port) + strcat(hash_opt, "dst port,"); + + if (!action) + action = ""; + + if (!result) + result = ""; + + dev_info(&adapter->pdev->dev, "%s %s %s ", action, hash_opt, result); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +enum iavf_adv_rss_flow_seg_hdr { + iavf_adv_rss_flow_seg_hdr_none = 0x00000000, + iavf_adv_rss_flow_seg_hdr_ipv4 = 0x00000001, + iavf_adv_rss_flow_seg_hdr_ipv6 = 0x00000002, + iavf_adv_rss_flow_seg_hdr_tcp = 0x00000004, +}; + +#define iavf_adv_rss_flow_seg_hdr_l3 \ + (iavf_adv_rss_flow_seg_hdr_ipv4 | \ + iavf_adv_rss_flow_seg_hdr_ipv6) + +#define iavf_adv_rss_flow_seg_hdr_l4 \ + (iavf_adv_rss_flow_seg_hdr_tcp) + +enum iavf_adv_rss_flow_field { + /* l3 */ + iavf_adv_rss_flow_field_idx_ipv4_sa, + iavf_adv_rss_flow_field_idx_ipv4_da, + iavf_adv_rss_flow_field_idx_ipv6_sa, + iavf_adv_rss_flow_field_idx_ipv6_da, + /* l4 */ + iavf_adv_rss_flow_field_idx_tcp_src_port, + iavf_adv_rss_flow_field_idx_tcp_dst_port, + + /* the total number of enums must not exceed 64 */ + iavf_adv_rss_flow_field_idx_max +}; + +#define iavf_adv_rss_hash_invalid 0 +#define iavf_adv_rss_hash_fld_ipv4_sa \ + bit_ull(iavf_adv_rss_flow_field_idx_ipv4_sa) +#define iavf_adv_rss_hash_fld_ipv6_sa \ + bit_ull(iavf_adv_rss_flow_field_idx_ipv6_sa) +#define iavf_adv_rss_hash_fld_ipv4_da \ + bit_ull(iavf_adv_rss_flow_field_idx_ipv4_da) +#define iavf_adv_rss_hash_fld_ipv6_da \ + bit_ull(iavf_adv_rss_flow_field_idx_ipv6_da) +#define iavf_adv_rss_hash_fld_tcp_src_port \ + bit_ull(iavf_adv_rss_flow_field_idx_tcp_src_port) +#define iavf_adv_rss_hash_fld_tcp_dst_port \ + bit_ull(iavf_adv_rss_flow_field_idx_tcp_dst_port) + + u32 packet_hdrs; + u64 hash_flds; + + +int +iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg, + u32 packet_hdrs, u64 hash_flds); +struct iavf_adv_rss * +iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs); +void +iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss, + const char *action, const char *result); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +/** + * iavf_adv_rss_parse_hdrs - parses headers from rss hash input + * @cmd: ethtool rxnfc command + * + * this function parses the rxnfc command and returns intended + * header types for rss configuration + */ +static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd) +{ + u32 hdrs = iavf_adv_rss_flow_seg_hdr_none; + + switch (cmd->flow_type) { + case tcp_v4_flow: + hdrs |= iavf_adv_rss_flow_seg_hdr_tcp | + iavf_adv_rss_flow_seg_hdr_ipv4; + break; + case tcp_v6_flow: + hdrs |= iavf_adv_rss_flow_seg_hdr_tcp | + iavf_adv_rss_flow_seg_hdr_ipv6; + break; + default: + break; + } + + return hdrs; +} + +/** + * iavf_adv_rss_parse_hash_flds - parses hash fields from rss hash input + * @cmd: ethtool rxnfc command + * + * this function parses the rxnfc command and returns intended hash fields for + * rss configuration + */ +static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd) +{ + u64 hfld = iavf_adv_rss_hash_invalid; + + if (cmd->data & rxh_ip_src || cmd->data & rxh_ip_dst) { + switch (cmd->flow_type) { + case tcp_v4_flow: + if (cmd->data & rxh_ip_src) + hfld |= iavf_adv_rss_hash_fld_ipv4_sa; + if (cmd->data & rxh_ip_dst) + hfld |= iavf_adv_rss_hash_fld_ipv4_da; + break; + case tcp_v6_flow: + if (cmd->data & rxh_ip_src) + hfld |= iavf_adv_rss_hash_fld_ipv6_sa; + if (cmd->data & rxh_ip_dst) + hfld |= iavf_adv_rss_hash_fld_ipv6_da; + break; + default: + break; + } + } + + if (cmd->data & rxh_l4_b_0_1 || cmd->data & rxh_l4_b_2_3) { + switch (cmd->flow_type) { + case tcp_v4_flow: + case tcp_v6_flow: + if (cmd->data & rxh_l4_b_0_1) + hfld |= iavf_adv_rss_hash_fld_tcp_src_port; + if (cmd->data & rxh_l4_b_2_3) + hfld |= iavf_adv_rss_hash_fld_tcp_dst_port; + break; + default: + break; + } + } + + return hfld; +} + +/** + * iavf_set_adv_rss_hash_opt - enable/disable flow types for rss hash + * @adapter: pointer to the vf adapter structure + * @cmd: ethtool rxnfc command + * + * returns success if the flow input set is supported. + */ +static int +iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct iavf_adv_rss *rss_old, *rss_new; + bool rss_new_add = false; + int count = 50, err = 0; + u64 hash_flds; + u32 hdrs; + + if (!adv_rss_support(adapter)) + return -eopnotsupp; + + hdrs = iavf_adv_rss_parse_hdrs(cmd); + if (hdrs == iavf_adv_rss_flow_seg_hdr_none) + return -einval; + + hash_flds = iavf_adv_rss_parse_hash_flds(cmd); + if (hash_flds == iavf_adv_rss_hash_invalid) + return -einval; + + rss_new = kzalloc(sizeof(*rss_new), gfp_kernel); + if (!rss_new) + return -enomem; + + if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) { + kfree(rss_new); + return -einval; + } + + while (test_and_set_bit(__iavf_in_critical_task, + &adapter->crit_section)) { + if (--count == 0) { + kfree(rss_new); + return -einval; + } + + udelay(1); + } + + spin_lock_bh(&adapter->adv_rss_lock); + rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); + if (rss_old) { + if (rss_old->state != iavf_adv_rss_active) { + err = -ebusy; + } else if (rss_old->hash_flds != hash_flds) { + rss_old->state = iavf_adv_rss_add_request; + rss_old->hash_flds = hash_flds; + memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg, + sizeof(rss_new->cfg_msg)); + adapter->aq_required |= iavf_flag_aq_add_adv_rss_cfg; + } else { + err = -eexist; + } + } else { + rss_new_add = true; + rss_new->state = iavf_adv_rss_add_request; + rss_new->packet_hdrs = hdrs; + rss_new->hash_flds = hash_flds; + list_add_tail(&rss_new->list, &adapter->adv_rss_list_head); + adapter->aq_required |= iavf_flag_aq_add_adv_rss_cfg; + } + spin_unlock_bh(&adapter->adv_rss_lock); + + if (!err) + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + + clear_bit(__iavf_in_critical_task, &adapter->crit_section); + + if (!rss_new_add) + kfree(rss_new); + + return err; +} + +/** + * iavf_get_adv_rss_hash_opt - retrieve hash fields for a given flow-type + * @adapter: pointer to the vf adapter structure + * @cmd: ethtool rxnfc command + * + * returns success if the flow input set is supported. + */ +static int +iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct iavf_adv_rss *rss; + u64 hash_flds; + u32 hdrs; + + if (!adv_rss_support(adapter)) + return -eopnotsupp; + + cmd->data = 0; + + hdrs = iavf_adv_rss_parse_hdrs(cmd); + if (hdrs == iavf_adv_rss_flow_seg_hdr_none) + return -einval; + + spin_lock_bh(&adapter->adv_rss_lock); + rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs); + if (rss) + hash_flds = rss->hash_flds; + else + hash_flds = iavf_adv_rss_hash_invalid; + spin_unlock_bh(&adapter->adv_rss_lock); + + if (hash_flds == iavf_adv_rss_hash_invalid) + return -einval; + + if (hash_flds & (iavf_adv_rss_hash_fld_ipv4_sa | + iavf_adv_rss_hash_fld_ipv6_sa)) + cmd->data |= (u64)rxh_ip_src; + + if (hash_flds & (iavf_adv_rss_hash_fld_ipv4_da | + iavf_adv_rss_hash_fld_ipv6_da)) + cmd->data |= (u64)rxh_ip_dst; + + if (hash_flds & iavf_adv_rss_hash_fld_tcp_src_port) + cmd->data |= (u64)rxh_l4_b_0_1; + + if (hash_flds & iavf_adv_rss_hash_fld_tcp_dst_port) + cmd->data |= (u64)rxh_l4_b_2_3; + + return 0; +} + + case ethtool_srxfh: + ret = iavf_set_adv_rss_hash_opt(adapter, cmd); + break; - netdev_info(netdev, - "rss hash info is not available to vf, use pf. "); + ret = iavf_get_adv_rss_hash_opt(adapter, cmd); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c + iavf_print_adv_rss_cfg(adapter, rss, + "input set change for", + "is pending"); + iavf_print_adv_rss_cfg(adapter, rss, + "failed to change the input set for", + null); - list_for_each_entry(rss, &adapter->adv_rss_list_head, list) - if (rss->state == iavf_adv_rss_add_pending) + list_for_each_entry(rss, &adapter->adv_rss_list_head, list) { + if (rss->state == iavf_adv_rss_add_pending) { + iavf_print_adv_rss_cfg(adapter, rss, + "input set change for", + "successful"); + } + }
Networking
5ab91e0593a15652d31d3eb0bd6d28bf0bc9b36c
haiyue wang
drivers
net
ethernet, iavf, intel
iavf: support for modifying udp rss flow hashing
provides the ability to enable udp rss hashing by ethtool.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support for modifying udp rss flow hashing
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c']
3
56
5
--- diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c +/** + * iavf_fill_adv_rss_udp_hdr - fill the udp rss protocol header + * @hdr: the virtchnl message protocol header data structure + * @hash_flds: the rss configuration protocol hash fields + */ +static void +iavf_fill_adv_rss_udp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds) +{ + virtchnl_set_proto_hdr_type(hdr, udp); + + if (hash_flds & iavf_adv_rss_hash_fld_udp_src_port) + virtchnl_add_proto_hdr_field_bit(hdr, udp, src_port); + + if (hash_flds & iavf_adv_rss_hash_fld_udp_dst_port) + virtchnl_add_proto_hdr_field_bit(hdr, udp, dst_port); +} + + case iavf_adv_rss_flow_seg_hdr_udp: + iavf_fill_adv_rss_udp_hdr(hdr, hash_flds); + break; + else if (packet_hdrs & iavf_adv_rss_flow_seg_hdr_udp) + proto = "udp"; - if (hash_flds & iavf_adv_rss_hash_fld_tcp_src_port) + if (hash_flds & (iavf_adv_rss_hash_fld_tcp_src_port | + iavf_adv_rss_hash_fld_udp_src_port)) - if (hash_flds & iavf_adv_rss_hash_fld_tcp_dst_port) + if (hash_flds & (iavf_adv_rss_hash_fld_tcp_dst_port | + iavf_adv_rss_hash_fld_udp_dst_port)) diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h --- a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h +++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h + iavf_adv_rss_flow_seg_hdr_udp = 0x00000008, - (iavf_adv_rss_flow_seg_hdr_tcp) + (iavf_adv_rss_flow_seg_hdr_tcp | \ + iavf_adv_rss_flow_seg_hdr_udp) + iavf_adv_rss_flow_field_idx_udp_src_port, + iavf_adv_rss_flow_field_idx_udp_dst_port, +#define iavf_adv_rss_hash_fld_udp_src_port \ + bit_ull(iavf_adv_rss_flow_field_idx_udp_src_port) +#define iavf_adv_rss_hash_fld_udp_dst_port \ + bit_ull(iavf_adv_rss_flow_field_idx_udp_dst_port) diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c + case udp_v4_flow: + hdrs |= iavf_adv_rss_flow_seg_hdr_udp | + iavf_adv_rss_flow_seg_hdr_ipv4; + break; + case udp_v6_flow: + hdrs |= iavf_adv_rss_flow_seg_hdr_udp | + iavf_adv_rss_flow_seg_hdr_ipv6; + break; + case udp_v4_flow: + case udp_v6_flow: + case udp_v4_flow: + case udp_v6_flow: + if (cmd->data & rxh_l4_b_0_1) + hfld |= iavf_adv_rss_hash_fld_udp_src_port; + if (cmd->data & rxh_l4_b_2_3) + hfld |= iavf_adv_rss_hash_fld_udp_dst_port; + break; - if (hash_flds & iavf_adv_rss_hash_fld_tcp_src_port) + if (hash_flds & (iavf_adv_rss_hash_fld_tcp_src_port | + iavf_adv_rss_hash_fld_udp_src_port)) - if (hash_flds & iavf_adv_rss_hash_fld_tcp_dst_port) + if (hash_flds & (iavf_adv_rss_hash_fld_tcp_dst_port | + iavf_adv_rss_hash_fld_udp_dst_port))
Networking
7b8f3f957b22746bc9a410d7cd2e9edd0efcc9f5
haiyue wang
drivers
net
ethernet, iavf, intel
iavf: enable flex-bytes support
flex-bytes allows for packet matching based on an offset and value. this is supported via the ethtool user-def option.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
enable flex-bytes support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c']
3
327
2
--- diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +/** + * iavf_is_mask_valid - check mask field set + * @mask: full mask to check + * @field: field for which mask should be valid + * + * if the mask is fully set return true. if it is not valid for field return + * false. + */ +static bool iavf_is_mask_valid(u64 mask, u64 field) +{ + return (mask & field) == field; +} + +/** + * iavf_parse_rx_flow_user_data - deconstruct user-defined data + * @fsp: pointer to ethtool rx flow specification + * @fltr: pointer to flow director filter for userdef data storage + * + * returns 0 on success, negative error value on failure + */ +static int +iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + struct iavf_flex_word *flex; + int i, cnt = 0; + + if (!(fsp->flow_type & flow_ext)) + return 0; + + for (i = 0; i < 2; i++) { +#define iavf_userdef_flex_word_m genmask(15, 0) +#define iavf_userdef_flex_offs_s 16 +#define iavf_userdef_flex_offs_m genmask(31, iavf_userdef_flex_offs_s) +#define iavf_userdef_flex_fltr_m genmask(31, 0) + u32 value = be32_to_cpu(fsp->h_ext.data[i]); + u32 mask = be32_to_cpu(fsp->m_ext.data[i]); + + if (!value || !mask) + continue; + + if (!iavf_is_mask_valid(mask, iavf_userdef_flex_fltr_m)) + return -einval; + + /* 504 is the maximum value for offsets, and offset is measured + * from the start of the mac address. + */ +#define iavf_userdef_flex_max_offs_val 504 + flex = &fltr->flex_words[cnt++]; + flex->word = value & iavf_userdef_flex_word_m; + flex->offset = (value & iavf_userdef_flex_offs_m) >> + iavf_userdef_flex_offs_s; + if (flex->offset > iavf_userdef_flex_max_offs_val) + return -einval; + } + + fltr->flex_cnt = cnt; + + return 0; +} + +/** + * iavf_fill_rx_flow_ext_data - fill the additional data + * @fsp: pointer to ethtool rx flow specification + * @fltr: pointer to flow director filter to get additional data + */ +static void +iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1]) + return; + + fsp->flow_type |= flow_ext; + + memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data)); +} + + iavf_fill_rx_flow_ext_data(fsp, rule); + + int err; + if (fsp->flow_type & flow_ext) { + memcpy(fltr->ext_data.usr_def, fsp->h_ext.data, + sizeof(fltr->ext_data.usr_def)); + memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data, + sizeof(fltr->ext_mask.usr_def)); + } + + err = iavf_parse_rx_flow_user_data(fsp, fltr); + if (err) + return err; + diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c +#define gtpu_port 2152 +#define nat_t_esp_port 4500 +#define pfcp_port 8805 + +/** + * iavf_pkt_udp_no_pay_len - the length of udp packet without payload + * @fltr: flow director filter data structure + */ +static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr) +{ + return sizeof(struct ethhdr) + + (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + + sizeof(struct udphdr); +} + +/** + * iavf_fill_fdir_gtpu_hdr - fill the gtp-u protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the gtp-u protocol header is set successfully + */ +static int +iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct virtchnl_proto_hdr *ehdr = null; /* extension header if it exists */ + u16 adj_offs, hdr_offs; + int i; + + virtchnl_set_proto_hdr_type(ghdr, gtpu_ip); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define iavf_gtpu_hdr_teid_offs0 4 +#define iavf_gtpu_hdr_teid_offs1 6 +#define iavf_gtpu_hdr_n_pdu_and_next_exthdr_offs 10 +#define iavf_gtpu_hdr_psc_pdu_type_and_qfi_offs 13 +#define iavf_gtpu_psc_exthdr_type 0x85 /* pdu session container extension header */ + if (fltr->flex_words[i].offset < adj_offs) + return -einval; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case iavf_gtpu_hdr_teid_offs0: + case iavf_gtpu_hdr_teid_offs1: { + __be16 *pay_word = (__be16 *)ghdr->buffer; + + pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word); + virtchnl_add_proto_hdr_field_bit(ghdr, gtpu_ip, teid); + } + break; + case iavf_gtpu_hdr_n_pdu_and_next_exthdr_offs: + if ((fltr->flex_words[i].word & 0xff) != iavf_gtpu_psc_exthdr_type) + return -eopnotsupp; + if (!ehdr) + ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + virtchnl_set_proto_hdr_type(ehdr, gtpu_eh); + break; + case iavf_gtpu_hdr_psc_pdu_type_and_qfi_offs: + if (!ehdr) + return -einval; + ehdr->buffer[1] = fltr->flex_words[i].word & 0x3f; + virtchnl_add_proto_hdr_field_bit(ehdr, gtpu_eh, qfi); + break; + default: + return -einval; + } + } + + uhdr->field_selector = 0; /* the pf ignores the udp header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_pfcp_hdr - fill the pfcp protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the pfcp protocol header is set successfully + */ +static int +iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + u16 adj_offs, hdr_offs; + int i; + + virtchnl_set_proto_hdr_type(hdr, pfcp); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define iavf_pfcp_hdr_sfield_and_msg_type_offs 0 + if (fltr->flex_words[i].offset < adj_offs) + return -einval; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case iavf_pfcp_hdr_sfield_and_msg_type_offs: + hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff; + virtchnl_add_proto_hdr_field_bit(hdr, pfcp, s_field); + break; + default: + return -einval; + } + } + + uhdr->field_selector = 0; /* the pf ignores the udp header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_nat_t_esp_hdr - fill the nat-t-esp protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the nat-t-esp protocol header is set successfully + */ +static int +iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1]; + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + u16 adj_offs, hdr_offs; + u32 spi = 0; + int i; + + virtchnl_set_proto_hdr_type(hdr, esp); + + adj_offs = iavf_pkt_udp_no_pay_len(fltr); + + for (i = 0; i < fltr->flex_cnt; i++) { +#define iavf_nat_t_esp_spi_offs0 0 +#define iavf_nat_t_esp_spi_offs1 2 + if (fltr->flex_words[i].offset < adj_offs) + return -einval; + + hdr_offs = fltr->flex_words[i].offset - adj_offs; + + switch (hdr_offs) { + case iavf_nat_t_esp_spi_offs0: + spi |= fltr->flex_words[i].word << 16; + break; + case iavf_nat_t_esp_spi_offs1: + spi |= fltr->flex_words[i].word; + break; + default: + return -einval; + } + } + + if (!spi) + return -eopnotsupp; /* not support ike header format with spi 0 */ + + *(__be32 *)hdr->buffer = htonl(spi); + virtchnl_add_proto_hdr_field_bit(hdr, esp, spi); + + uhdr->field_selector = 0; /* the pf ignores the udp header fields */ + + return 0; +} + +/** + * iavf_fill_fdir_udp_flex_pay_hdr - fill the udp payload header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the udp payload defined protocol header is set successfully + */ +static int +iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + int err; + + switch (ntohs(fltr->ip_data.dst_port)) { + case gtpu_port: + err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs); + break; + case nat_t_esp_port: + err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs); + break; + case pfcp_port: + err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs); + break; + default: + err = -eopnotsupp; + break; + } + + return err; +} + + fltr->ip_ver = 4; + + fltr->ip_ver = 6; + - return 0; + if (!fltr->flex_cnt) + return 0; + + return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs); - sizeof(fltr->ip_data))) { + sizeof(fltr->ip_data)) && + !memcmp(&tmp->ext_data, &fltr->ext_data, + sizeof(fltr->ext_data))) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h +struct iavf_flex_word { + u16 offset; + u16 word; +}; + + +struct iavf_fdir_extra { + u32 usr_def[2]; +}; + + struct iavf_fdir_extra ext_data; + struct iavf_fdir_extra ext_mask; + + + /* flex byte filter data */ + u8 ip_ver; /* used to adjust the flex offset, 4 : ipv4, 6 : ipv6 */ + u8 flex_cnt; + struct iavf_flex_word flex_words[2]; +
Networking
a6379db818a850d1c1012cffe160cfc14d64cb40
haiyue wang
drivers
net
ethernet, iavf, intel
iavf: support ethernet type flow director filters
support the addition and deletion of ethernet filters.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support ethernet type flow director filters
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c']
3
42
1
--- diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c + case iavf_fdir_flow_non_ip_l2: + return ether_flow; + case ether_flow: + return iavf_fdir_flow_non_ip_l2; + case ether_flow: + fsp->h_u.ether_spec.h_proto = rule->eth_data.etype; + fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype; + break; + case ether_flow: + fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; + fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto; + break; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c + struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer; + if (fltr->eth_mask.etype == htons(u16_max)) { + if (fltr->eth_data.etype == htons(eth_p_ip) || + fltr->eth_data.etype == htons(eth_p_ipv6)) + return -eopnotsupp; + + ehdr->h_proto = fltr->eth_data.etype; + virtchnl_add_proto_hdr_field_bit(hdr, eth, ethertype); + } + + case iavf_fdir_flow_non_ip_l2: + break; + case iavf_fdir_flow_non_ip_l2: + return "ethernet"; + case iavf_fdir_flow_non_ip_l2: + dev_info(&adapter->pdev->dev, "rule id: %u eth_type: 0x%x ", + fltr->loc, + ntohs(fltr->eth_data.etype)); + break; - if (!memcmp(&tmp->ip_data, &fltr->ip_data, + if (!memcmp(&tmp->eth_data, &fltr->eth_data, + sizeof(fltr->eth_data)) && + !memcmp(&tmp->ip_data, &fltr->ip_data, diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h + iavf_fdir_flow_non_ip_l2, +struct iavf_fdir_eth { + __be16 etype; +}; + + struct iavf_fdir_eth eth_data; + struct iavf_fdir_eth eth_mask; +
Networking
a6ccffaa8da32b6077e37e7d254d519bc071433a
haiyue wang
drivers
net
ethernet, iavf, intel
iavf: support ipv4 flow director filters
support the addition and deletion of ipv4 filters.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support ipv4 flow director filters
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c', 'makefile']
5
881
1
--- diff --git a/drivers/net/ethernet/intel/iavf/makefile b/drivers/net/ethernet/intel/iavf/makefile --- a/drivers/net/ethernet/intel/iavf/makefile +++ b/drivers/net/ethernet/intel/iavf/makefile -iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \ +iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +/** + * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool + * flow type values + * @flow: filter type to be converted + * + * returns the corresponding ethtool flow type. + */ +static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow) +{ + switch (flow) { + case iavf_fdir_flow_ipv4_tcp: + return tcp_v4_flow; + case iavf_fdir_flow_ipv4_udp: + return udp_v4_flow; + case iavf_fdir_flow_ipv4_sctp: + return sctp_v4_flow; + case iavf_fdir_flow_ipv4_ah: + return ah_v4_flow; + case iavf_fdir_flow_ipv4_esp: + return esp_v4_flow; + case iavf_fdir_flow_ipv4_other: + return ipv4_user_flow; + default: + /* 0 is undefined ethtool flow */ + return 0; + } +} + +/** + * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum + * @eth: ethtool flow type to be converted + * + * returns flow enum + */ +static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth) +{ + switch (eth) { + case tcp_v4_flow: + return iavf_fdir_flow_ipv4_tcp; + case udp_v4_flow: + return iavf_fdir_flow_ipv4_udp; + case sctp_v4_flow: + return iavf_fdir_flow_ipv4_sctp; + case ah_v4_flow: + return iavf_fdir_flow_ipv4_ah; + case esp_v4_flow: + return iavf_fdir_flow_ipv4_esp; + case ipv4_user_flow: + return iavf_fdir_flow_ipv4_other; + default: + return iavf_fdir_flow_none; + } +} + +/** + * iavf_get_ethtool_fdir_entry - fill ethtool structure with flow director filter data + * @adapter: the vf adapter structure that contains filter list + * @cmd: ethtool command data structure to receive the filter data + * + * returns 0 as expected for success by ethtool + */ +static int +iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct iavf_fdir_fltr *rule = null; + int ret = 0; + + if (!fdir_fltr_support(adapter)) + return -eopnotsupp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + + rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + if (!rule) { + ret = -einval; + goto release_lock; + } + + fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type); + + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case tcp_v4_flow: + case udp_v4_flow: + case sctp_v4_flow: + fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port; + fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos; + fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port; + fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos; + break; + case ah_v4_flow: + case esp_v4_flow: + fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi; + fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos; + fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi; + fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos; + break; + case ipv4_user_flow: + fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip; + fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip; + fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header; + fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos; + fsp->h_u.usr_ip4_spec.ip_ver = eth_rx_nfc_ip4; + fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto; + fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip; + fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip; + fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header; + fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos; + fsp->m_u.usr_ip4_spec.ip_ver = 0xff; + fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto; + break; + default: + ret = -einval; + break; + } + + if (rule->action == virtchnl_action_drop) + fsp->ring_cookie = rx_cls_flow_disc; + else + fsp->ring_cookie = rule->q_index; + +release_lock: + spin_unlock_bh(&adapter->fdir_fltr_lock); + return ret; +} + +/** + * iavf_get_fdir_fltr_ids - fill buffer with filter ids of active filters + * @adapter: the vf adapter structure containing the filter list + * @cmd: ethtool command data structure + * @rule_locs: ethtool array passed in from os to receive filter ids + * + * returns 0 as expected for success by ethtool + */ +static int +iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct iavf_fdir_fltr *fltr; + unsigned int cnt = 0; + int val = 0; + + if (!fdir_fltr_support(adapter)) + return -eopnotsupp; + + cmd->data = iavf_max_fdir_filters; + + spin_lock_bh(&adapter->fdir_fltr_lock); + + list_for_each_entry(fltr, &adapter->fdir_list_head, list) { + if (cnt == cmd->rule_cnt) { + val = -emsgsize; + goto release_lock; + } + rule_locs[cnt] = fltr->loc; + cnt++; + } + +release_lock: + spin_unlock_bh(&adapter->fdir_fltr_lock); + if (!val) + cmd->rule_cnt = cnt; + + return val; +} + +/** + * iavf_add_fdir_fltr_info - set the input set for flow director filter + * @adapter: pointer to the vf adapter structure + * @fsp: pointer to ethtool rx flow specification + * @fltr: filter structure + */ +static int +iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp, + struct iavf_fdir_fltr *fltr) +{ + u32 flow_type, q_index = 0; + enum virtchnl_action act; + + if (fsp->ring_cookie == rx_cls_flow_disc) { + act = virtchnl_action_drop; + } else { + q_index = fsp->ring_cookie; + if (q_index >= adapter->num_active_queues) + return -einval; + + act = virtchnl_action_queue; + } + + fltr->action = act; + fltr->loc = fsp->location; + fltr->q_index = q_index; + + flow_type = fsp->flow_type & ~(flow_ext | flow_mac_ext | flow_rss); + fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type); + + switch (flow_type) { + case tcp_v4_flow: + case udp_v4_flow: + case sctp_v4_flow: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst; + fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc; + fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst; + fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + break; + case ah_v4_flow: + case esp_v4_flow: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst; + fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi; + fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; + fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; + fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; + break; + case ipv4_user_flow: + fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; + fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst; + fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes; + fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos; + fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto; + fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src; + fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst; + fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; + fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; + fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + break; + default: + /* not doing un-parsed flow types */ + return -einval; + } + + if (iavf_fdir_is_dup_fltr(adapter, fltr)) + return -eexist; + + return iavf_fill_fdir_add_msg(adapter, fltr); +} + +/** + * iavf_add_fdir_ethtool - add flow director filter + * @adapter: pointer to the vf adapter structure + * @cmd: command to add flow director filter + * + * returns 0 on success and negative values for failure + */ +static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct iavf_fdir_fltr *fltr; + int count = 50; + int err; + + if (!fdir_fltr_support(adapter)) + return -eopnotsupp; + + if (fsp->flow_type & flow_mac_ext) + return -einval; + + if (adapter->fdir_active_fltr >= iavf_max_fdir_filters) { + dev_err(&adapter->pdev->dev, + "unable to add flow director filter because vf reached the limit of max allowed filters (%u) ", + iavf_max_fdir_filters); + return -enospc; + } + + spin_lock_bh(&adapter->fdir_fltr_lock); + if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { + dev_err(&adapter->pdev->dev, "failed to add flow director filter, it already exists "); + spin_unlock_bh(&adapter->fdir_fltr_lock); + return -eexist; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + fltr = kzalloc(sizeof(*fltr), gfp_kernel); + if (!fltr) + return -enomem; + + while (test_and_set_bit(__iavf_in_critical_task, + &adapter->crit_section)) { + if (--count == 0) { + kfree(fltr); + return -einval; + } + udelay(1); + } + + err = iavf_add_fdir_fltr_info(adapter, fsp, fltr); + if (err) + goto ret; + + spin_lock_bh(&adapter->fdir_fltr_lock); + iavf_fdir_list_add_fltr(adapter, fltr); + adapter->fdir_active_fltr++; + fltr->state = iavf_fdir_fltr_add_request; + adapter->aq_required |= iavf_flag_aq_add_fdir_filter; + spin_unlock_bh(&adapter->fdir_fltr_lock); + + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + +ret: + if (err && fltr) + kfree(fltr); + + clear_bit(__iavf_in_critical_task, &adapter->crit_section); + return err; +} + +/** + * iavf_del_fdir_ethtool - delete flow director filter + * @adapter: pointer to the vf adapter structure + * @cmd: command to delete flow director filter + * + * returns 0 on success and negative values for failure + */ +static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + struct iavf_fdir_fltr *fltr = null; + int err = 0; + + if (!fdir_fltr_support(adapter)) + return -eopnotsupp; + + spin_lock_bh(&adapter->fdir_fltr_lock); + fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location); + if (fltr) { + if (fltr->state == iavf_fdir_fltr_active) { + fltr->state = iavf_fdir_fltr_del_request; + adapter->aq_required |= iavf_flag_aq_del_fdir_filter; + } else { + err = -ebusy; + } + } else if (adapter->fdir_active_fltr) { + err = -einval; + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (fltr && fltr->state == iavf_fdir_fltr_del_request) + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + + return err; +} + +/** + * iavf_set_rxnfc - command to set rx flow rules. + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * returns 0 for success and negative values for errors + */ +static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + int ret = -eopnotsupp; + + switch (cmd->cmd) { + case ethtool_srxclsrlins: + ret = iavf_add_fdir_ethtool(adapter, cmd); + break; + case ethtool_srxclsrldel: + ret = iavf_del_fdir_ethtool(adapter, cmd); + break; + default: + break; + } + + return ret; +} + + case ethtool_grxclsrlcnt: + if (!fdir_fltr_support(adapter)) + break; + cmd->rule_cnt = adapter->fdir_active_fltr; + cmd->data = iavf_max_fdir_filters; + ret = 0; + break; + case ethtool_grxclsrule: + ret = iavf_get_ethtool_fdir_entry(adapter, cmd); + break; + case ethtool_grxclsrlall: + ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs); + break; + .set_rxnfc = iavf_set_rxnfc, diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c +// spdx-license-identifier: gpl-2.0 +/* copyright (c) 2020, intel corporation. */ + +/* flow director ethtool support for iavf */ + +#include "iavf.h" + +/** + * iavf_fill_fdir_ip4_hdr - fill the ipv4 protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the ipv4 protocol header is set successfully + */ +static int +iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct iphdr *iph = (struct iphdr *)hdr->buffer; + + virtchnl_set_proto_hdr_type(hdr, ipv4); + + if (fltr->ip_mask.tos == u8_max) { + iph->tos = fltr->ip_data.tos; + virtchnl_add_proto_hdr_field_bit(hdr, ipv4, dscp); + } + + if (fltr->ip_mask.proto == u8_max) { + iph->protocol = fltr->ip_data.proto; + virtchnl_add_proto_hdr_field_bit(hdr, ipv4, prot); + } + + if (fltr->ip_mask.v4_addrs.src_ip == htonl(u32_max)) { + iph->saddr = fltr->ip_data.v4_addrs.src_ip; + virtchnl_add_proto_hdr_field_bit(hdr, ipv4, src); + } + + if (fltr->ip_mask.v4_addrs.dst_ip == htonl(u32_max)) { + iph->daddr = fltr->ip_data.v4_addrs.dst_ip; + virtchnl_add_proto_hdr_field_bit(hdr, ipv4, dst); + } + + return 0; +} + +/** + * iavf_fill_fdir_tcp_hdr - fill the tcp protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the tcp protocol header is set successfully + */ +static int +iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct tcphdr *tcph = (struct tcphdr *)hdr->buffer; + + virtchnl_set_proto_hdr_type(hdr, tcp); + + if (fltr->ip_mask.src_port == htons(u16_max)) { + tcph->source = fltr->ip_data.src_port; + virtchnl_add_proto_hdr_field_bit(hdr, tcp, src_port); + } + + if (fltr->ip_mask.dst_port == htons(u16_max)) { + tcph->dest = fltr->ip_data.dst_port; + virtchnl_add_proto_hdr_field_bit(hdr, tcp, dst_port); + } + + return 0; +} + +/** + * iavf_fill_fdir_udp_hdr - fill the udp protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the udp protocol header is set successfully + */ +static int +iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct udphdr *udph = (struct udphdr *)hdr->buffer; + + virtchnl_set_proto_hdr_type(hdr, udp); + + if (fltr->ip_mask.src_port == htons(u16_max)) { + udph->source = fltr->ip_data.src_port; + virtchnl_add_proto_hdr_field_bit(hdr, udp, src_port); + } + + if (fltr->ip_mask.dst_port == htons(u16_max)) { + udph->dest = fltr->ip_data.dst_port; + virtchnl_add_proto_hdr_field_bit(hdr, udp, dst_port); + } + + return 0; +} + +/** + * iavf_fill_fdir_sctp_hdr - fill the sctp protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the sctp protocol header is set successfully + */ +static int +iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct sctphdr *sctph = (struct sctphdr *)hdr->buffer; + + virtchnl_set_proto_hdr_type(hdr, sctp); + + if (fltr->ip_mask.src_port == htons(u16_max)) { + sctph->source = fltr->ip_data.src_port; + virtchnl_add_proto_hdr_field_bit(hdr, sctp, src_port); + } + + if (fltr->ip_mask.dst_port == htons(u16_max)) { + sctph->dest = fltr->ip_data.dst_port; + virtchnl_add_proto_hdr_field_bit(hdr, sctp, dst_port); + } + + return 0; +} + +/** + * iavf_fill_fdir_ah_hdr - fill the ah protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the ah protocol header is set successfully + */ +static int +iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer; + + virtchnl_set_proto_hdr_type(hdr, ah); + + if (fltr->ip_mask.spi == htonl(u32_max)) { + ah->spi = fltr->ip_data.spi; + virtchnl_add_proto_hdr_field_bit(hdr, ah, spi); + } + + return 0; +} + +/** + * iavf_fill_fdir_esp_hdr - fill the esp protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the esp protocol header is set successfully + */ +static int +iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer; + + virtchnl_set_proto_hdr_type(hdr, esp); + + if (fltr->ip_mask.spi == htonl(u32_max)) { + esph->spi = fltr->ip_data.spi; + virtchnl_add_proto_hdr_field_bit(hdr, esp, spi); + } + + return 0; +} + +/** + * iavf_fill_fdir_l4_hdr - fill the l4 protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the l4 protocol header is set successfully + */ +static int +iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr; + __be32 *l4_4_data; + + if (!fltr->ip_mask.proto) /* ipv4/ipv6 header only */ + return 0; + + hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + l4_4_data = (__be32 *)hdr->buffer; + + /* l2tpv3 over ip with 'session id' */ + if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(u32_max)) { + virtchnl_set_proto_hdr_type(hdr, l2tpv3); + virtchnl_add_proto_hdr_field_bit(hdr, l2tpv3, sess_id); + + *l4_4_data = fltr->ip_data.l4_header; + } else { + return -eopnotsupp; + } + + return 0; +} + +/** + * iavf_fill_fdir_eth_hdr - fill the ethernet protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the ethernet protocol header is set successfully + */ +static int +iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + + virtchnl_set_proto_hdr_type(hdr, eth); + + return 0; +} + +/** + * iavf_fill_fdir_add_msg - fill the flow director filter into virtchnl message + * @adapter: pointer to the vf adapter structure + * @fltr: flow director filter data structure + * + * returns 0 if the add flow director virtchnl message is filled successfully + */ +int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg; + struct virtchnl_proto_hdrs *proto_hdrs; + int err; + + proto_hdrs = &vc_msg->rule_cfg.proto_hdrs; + + err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* l2 always exists */ + if (err) + return err; + + switch (fltr->flow_type) { + case iavf_fdir_flow_ipv4_tcp: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv4_udp: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv4_sctp: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv4_ah: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv4_esp: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv4_other: + err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); + break; + default: + err = -einval; + break; + } + + if (err) + return err; + + vc_msg->vsi_id = adapter->vsi.id; + vc_msg->rule_cfg.action_set.count = 1; + vc_msg->rule_cfg.action_set.actions[0].type = fltr->action; + vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index; + + return 0; +} + +/** + * iavf_fdir_flow_proto_name - get the flow protocol name + * @flow_type: flow director filter flow type + **/ +static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type) +{ + switch (flow_type) { + case iavf_fdir_flow_ipv4_tcp: + return "tcp"; + case iavf_fdir_flow_ipv4_udp: + return "udp"; + case iavf_fdir_flow_ipv4_sctp: + return "sctp"; + case iavf_fdir_flow_ipv4_ah: + return "ah"; + case iavf_fdir_flow_ipv4_esp: + return "esp"; + case iavf_fdir_flow_ipv4_other: + return "other"; + default: + return null; + } +} + +/** + * iavf_print_fdir_fltr + * @adapter: adapter structure + * @fltr: flow director filter to print + * + * print the flow director filter + **/ +void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type); + + if (!proto) + return; + + switch (fltr->flow_type) { + case iavf_fdir_flow_ipv4_tcp: + case iavf_fdir_flow_ipv4_udp: + case iavf_fdir_flow_ipv4_sctp: + dev_info(&adapter->pdev->dev, "rule id: %u dst_ip: %pi4 src_ip %pi4 %s: dst_port %hu src_port %hu ", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + proto, + ntohs(fltr->ip_data.dst_port), + ntohs(fltr->ip_data.src_port)); + break; + case iavf_fdir_flow_ipv4_ah: + case iavf_fdir_flow_ipv4_esp: + dev_info(&adapter->pdev->dev, "rule id: %u dst_ip: %pi4 src_ip %pi4 %s: spi %u ", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + proto, + ntohl(fltr->ip_data.spi)); + break; + case iavf_fdir_flow_ipv4_other: + dev_info(&adapter->pdev->dev, "rule id: %u dst_ip: %pi4 src_ip %pi4 proto: %u l4_bytes: 0x%x ", + fltr->loc, + &fltr->ip_data.v4_addrs.dst_ip, + &fltr->ip_data.v4_addrs.src_ip, + fltr->ip_data.proto, + ntohl(fltr->ip_data.l4_header)); + break; + default: + break; + } +} + +/** + * iavf_fdir_is_dup_fltr - test if filter is already in list + * @adapter: pointer to the vf adapter structure + * @fltr: flow director filter data structure + * + * returns true if the filter is found in the list + */ +bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct iavf_fdir_fltr *tmp; + bool ret = false; + + list_for_each_entry(tmp, &adapter->fdir_list_head, list) { + if (tmp->flow_type != fltr->flow_type) + continue; + + if (!memcmp(&tmp->ip_data, &fltr->ip_data, + sizeof(fltr->ip_data))) { + ret = true; + break; + } + } + + return ret; +} + +/** + * iavf_find_fdir_fltr_by_loc - find filter with location + * @adapter: pointer to the vf adapter structure + * @loc: location to find. + * + * returns pointer to flow director filter if found or null + */ +struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc) +{ + struct iavf_fdir_fltr *rule; + + list_for_each_entry(rule, &adapter->fdir_list_head, list) + if (rule->loc == loc) + return rule; + + return null; +} + +/** + * iavf_fdir_list_add_fltr - add a new node to the flow director filter list + * @adapter: pointer to the vf adapter structure + * @fltr: filter node to add to structure + */ +void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) +{ + struct iavf_fdir_fltr *rule, *parent = null; + + list_for_each_entry(rule, &adapter->fdir_list_head, list) { + if (rule->loc >= fltr->loc) + break; + parent = rule; + } + + if (parent) + list_add(&fltr->list, &parent->list); + else + list_add(&fltr->list, &adapter->fdir_list_head); +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h +enum iavf_fdir_flow_type { + /* none - used for undef/error */ + iavf_fdir_flow_none = 0, + iavf_fdir_flow_ipv4_tcp, + iavf_fdir_flow_ipv4_udp, + iavf_fdir_flow_ipv4_sctp, + iavf_fdir_flow_ipv4_ah, + iavf_fdir_flow_ipv4_esp, + iavf_fdir_flow_ipv4_other, + /* max - this must be last and add anything new just above it */ + iavf_fdir_flow_ptype_max, +}; + +struct iavf_ipv4_addrs { + __be32 src_ip; + __be32 dst_ip; +}; + +struct iavf_fdir_ip { + union { + struct iavf_ipv4_addrs v4_addrs; + }; + __be16 src_port; + __be16 dst_port; + __be32 l4_header; /* first 4 bytes of the layer 4 header */ + __be32 spi; /* security parameter index for ah/esp */ + union { + u8 tos; + }; + u8 proto; +}; + enum iavf_fdir_flow_type flow_type; + + struct iavf_fdir_ip ip_data; + struct iavf_fdir_ip ip_mask; + + enum virtchnl_action action; + u32 loc; /* rule location inside the flow table */ + u32 q_index; + diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c + iavf_print_fdir_fltr(adapter, fdir); + iavf_print_fdir_fltr(adapter, fdir); + dev_info(&adapter->pdev->dev, "flow director filter with location %u is added ", + fdir->loc); + iavf_print_fdir_fltr(adapter, fdir); + dev_info(&adapter->pdev->dev, "flow director filter with location %u is deleted ", + fdir->loc); + iavf_print_fdir_fltr(adapter, fdir);
Networking
527691bf0682d7ddcca77fc17dabd2fa090572ff
haiyue wang
drivers
net
ethernet, iavf, intel
iavf: support ipv6 flow director filters
support the addition and deletion of ipv6 filters.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
support ipv6 flow director filters
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['h', 'c']
3
246
0
--- diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c + case iavf_fdir_flow_ipv6_tcp: + return tcp_v6_flow; + case iavf_fdir_flow_ipv6_udp: + return udp_v6_flow; + case iavf_fdir_flow_ipv6_sctp: + return sctp_v6_flow; + case iavf_fdir_flow_ipv6_ah: + return ah_v6_flow; + case iavf_fdir_flow_ipv6_esp: + return esp_v6_flow; + case iavf_fdir_flow_ipv6_other: + return ipv6_user_flow; + case tcp_v6_flow: + return iavf_fdir_flow_ipv6_tcp; + case udp_v6_flow: + return iavf_fdir_flow_ipv6_udp; + case sctp_v6_flow: + return iavf_fdir_flow_ipv6_sctp; + case ah_v6_flow: + return iavf_fdir_flow_ipv6_ah; + case esp_v6_flow: + return iavf_fdir_flow_ipv6_esp; + case ipv6_user_flow: + return iavf_fdir_flow_ipv6_other; + case tcp_v6_flow: + case udp_v6_flow: + case sctp_v6_flow: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port; + fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port; + fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass; + break; + case ah_v6_flow: + case esp_v6_flow: + memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi; + fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass; + memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi; + fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass; + break; + case ipv6_user_flow: + memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header; + fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass; + fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto; + memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header; + fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass; + fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto; + break; + case tcp_v6_flow: + case udp_v6_flow: + case sctp_v6_flow: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc; + fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + break; + case ah_v6_flow: + case esp_v6_flow: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi; + fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; + fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; + break; + case ipv6_user_flow: + memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; + fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass; + fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto; + memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; + fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; + fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + break; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c +static const struct in6_addr ipv6_addr_full_mask = { + .in6_u = { + .u6_addr8 = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } + } +}; + +/** + * iavf_fill_fdir_ip6_hdr - fill the ipv6 protocol header + * @fltr: flow director filter data structure + * @proto_hdrs: flow director protocol headers data structure + * + * returns 0 if the ipv6 protocol header is set successfully + */ +static int +iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, + struct virtchnl_proto_hdrs *proto_hdrs) +{ + struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++]; + struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer; + + virtchnl_set_proto_hdr_type(hdr, ipv6); + + if (fltr->ip_mask.tclass == u8_max) { + iph->priority = (fltr->ip_data.tclass >> 4) & 0xf; + iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xf0; + virtchnl_add_proto_hdr_field_bit(hdr, ipv6, tc); + } + + if (fltr->ip_mask.proto == u8_max) { + iph->nexthdr = fltr->ip_data.proto; + virtchnl_add_proto_hdr_field_bit(hdr, ipv6, prot); + } + + if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip, + sizeof(struct in6_addr)); + virtchnl_add_proto_hdr_field_bit(hdr, ipv6, src); + } + + if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) { + memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip, + sizeof(struct in6_addr)); + virtchnl_add_proto_hdr_field_bit(hdr, ipv6, dst); + } + + return 0; +} + + case iavf_fdir_flow_ipv6_tcp: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv6_udp: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_udp_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv6_sctp: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv6_ah: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_ah_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv6_esp: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_esp_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv6_other: + err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) | + iavf_fill_fdir_l4_hdr(fltr, proto_hdrs); + break; + case iavf_fdir_flow_ipv6_tcp: + case iavf_fdir_flow_ipv6_udp: + case iavf_fdir_flow_ipv6_sctp: + case iavf_fdir_flow_ipv6_ah: + case iavf_fdir_flow_ipv6_esp: + case iavf_fdir_flow_ipv6_other: + case iavf_fdir_flow_ipv6_tcp: + case iavf_fdir_flow_ipv6_udp: + case iavf_fdir_flow_ipv6_sctp: + dev_info(&adapter->pdev->dev, "rule id: %u dst_ip: %pi6 src_ip %pi6 %s: dst_port %hu src_port %hu ", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + proto, + ntohs(fltr->ip_data.dst_port), + ntohs(fltr->ip_data.src_port)); + break; + case iavf_fdir_flow_ipv6_ah: + case iavf_fdir_flow_ipv6_esp: + dev_info(&adapter->pdev->dev, "rule id: %u dst_ip: %pi6 src_ip %pi6 %s: spi %u ", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + proto, + ntohl(fltr->ip_data.spi)); + break; + case iavf_fdir_flow_ipv6_other: + dev_info(&adapter->pdev->dev, "rule id: %u dst_ip: %pi6 src_ip %pi6 proto: %u l4_bytes: 0x%x ", + fltr->loc, + &fltr->ip_data.v6_addrs.dst_ip, + &fltr->ip_data.v6_addrs.src_ip, + fltr->ip_data.proto, + ntohl(fltr->ip_data.l4_header)); + break; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h + iavf_fdir_flow_ipv6_tcp, + iavf_fdir_flow_ipv6_udp, + iavf_fdir_flow_ipv6_sctp, + iavf_fdir_flow_ipv6_ah, + iavf_fdir_flow_ipv6_esp, + iavf_fdir_flow_ipv6_other, +struct iavf_ipv6_addrs { + struct in6_addr src_ip; + struct in6_addr dst_ip; +}; + + struct iavf_ipv6_addrs v6_addrs; + u8 tclass;
Networking
e90cbc257a6f3f9cc2b257acab561b197c708bab
haiyue wang
drivers
net
ethernet, iavf, intel
iavf: add support for udp segmentation offload
add code to support udp segmentation offload (uso) for hardware that supports it.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for udp segmentation offload
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['iavf ']
['c']
3
14
4
--- diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c + if (vfres->vf_cap_flags & virtchnl_vf_offload_uso) + hw_features |= netif_f_gso_udp_l4; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c - - csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); - /* compute length of segmentation header */ - *hdr_len = (l4.tcp->doff * 4) + l4_offset; + if (skb_shinfo(skb)->gso_type & skb_gso_udp_l4) { + csum_replace_by_diff(&l4.udp->check, + (__force __wsum)htonl(paylen)); + /* compute length of udp segmentation header */ + *hdr_len = (u8)sizeof(l4.udp) + l4_offset; + } else { + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(paylen)); + /* compute length of tcp segmentation header */ + *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset); + } diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c + virtchnl_vf_offload_uso |
Networking
c91a4f9feb67a199c27c2fe4df98ef9a49ab8ba0
brett creeley jesse brandeburg jesse brandeburg intel com konrad jankowski konrad jankowski intel com
drivers
net
ethernet, iavf, intel
ice: add support for xps
enable and configure xps. the driver code implemented sets up the transmit packet steering map, which in turn will be used by the kernel in queue selection during tx.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for xps
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['ice ']
['h', 'c']
2
29
0
--- diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c +/** + * ice_cfg_xps_tx_ring - configure xps for a tx ring + * @ring: the tx ring to configure + * + * this enables/disables xps for a given tx descriptor ring + * based on the tcs enabled for the vsi that ring belongs to. + */ +static void ice_cfg_xps_tx_ring(struct ice_ring *ring) +{ + if (!ring->q_vector || !ring->netdev) + return; + + /* we only initialize xps once, so as not to overwrite user settings */ + if (test_and_set_bit(ice_tx_xps_init_done, ring->xps_state)) + return; + + netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, + ring->q_index); +} + + /* configure xps */ + ice_cfg_xps_tx_ring(ring); + diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h +enum ice_ring_state_t { + ice_tx_xps_init_done, + ice_tx_nbits, +}; + + declare_bitmap(xps_state, ice_tx_nbits); /* xps config state */
Networking
634da4c118434cf8a0c5eabce9eb58502ef1521c
benita bose tony brelinski tonyx brelinski intel com
drivers
net
ethernet, ice, intel
ice: add new actions support for vf fdir
add two new actions support for vf fdir:
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add new actions support for vf fdir
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['ice ']
['h', 'c']
3
25
1
--- diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c + } else if (input->dest_ctl == + ice_fltr_prgm_desc_dest_direct_pkt_other) { + fdir_fltr_ctx.drop = ice_fxd_fltr_qw0_drop_no; + fdir_fltr_ctx.qindex = 0; + if (input->dest_ctl == + ice_fltr_prgm_desc_dest_direct_pkt_qgroup) + fdir_fltr_ctx.toq = input->q_region; - fdir_fltr_ctx.toq_prio = 3; + if (input->dest_ctl == ice_fltr_prgm_desc_dest_direct_pkt_other) + fdir_fltr_ctx.toq_prio = 0; + else + fdir_fltr_ctx.toq_prio = 3; diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h + ice_fltr_prgm_desc_dest_direct_pkt_qgroup, + ice_fltr_prgm_desc_dest_direct_pkt_other, + /* queue region size (=2^q_region) */ + u8 q_region; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c + case virtchnl_action_passthru: + dest_num++; + input->dest_ctl = ice_fltr_prgm_desc_dest_direct_pkt_other; + break; + case virtchnl_action_q_region: + dest_num++; + input->dest_ctl = ice_fltr_prgm_desc_dest_direct_pkt_qgroup; + input->q_index = action->act_conf.queue.index; + input->q_region = action->act_conf.queue.region; + break;
Networking
346bf25043976fe106cd4f739fc67765ac292a3a
qi zhang
drivers
net
ethernet, ice, intel
ice: add non-ip layer2 protocol fdir filter for avf
add new filter type that allow forward non-ip ethernet packets base on its ethertype. the filter is only enabled when comms ddp package is loaded.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add non-ip layer2 protocol fdir filter for avf
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['ice ']
['h', 'c']
6
73
4
--- diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c +static const u8 ice_fdir_non_ip_l2_pkt[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + + { + ice_fltr_ptype_non_ip_l2, + sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt, + sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt, + }, + case ice_fltr_ptype_non_ip_l2: + ice_pkt_insert_u16(loc, ice_mac_ethtype_offset, + input->ext_data.ether_type); + break; diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h +#define ice_mac_ethtype_offset 12 + __be16 ether_type; /* for non_ip_l2 */ diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c +static const u32 ice_ptypes_mac_non_ip_ofos[] = { + 0x00000846, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00400000, 0x03fff000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + - if (hdrs & ice_flow_seg_hdr_pppoe) { + if (hdrs & ice_flow_seg_hdr_eth_non_ip) { + src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos; + bitmap_and(params->ptypes, params->ptypes, src, + ice_flow_ptype_max); + } else if (hdrs & ice_flow_seg_hdr_pppoe) { diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h + ice_flow_seg_hdr_eth_non_ip = 0x00800000, diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h + ice_fltr_ptype_non_ip_l2, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +static enum virtchnl_proto_hdr_type vc_pattern_ether[] = { + virtchnl_proto_hdr_eth, + virtchnl_proto_hdr_none, +}; + -static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern[] = { +static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = { + {vc_pattern_ipv4, 0, null}, + {vc_pattern_ipv4_tcp, 0, null}, + {vc_pattern_ipv4_udp, 0, null}, + {vc_pattern_ipv4_sctp, 0, null}, + {vc_pattern_ipv6, 0, null}, + {vc_pattern_ipv6_tcp, 0, null}, + {vc_pattern_ipv6_udp, 0, null}, + {vc_pattern_ipv6_sctp, 0, null}, +}; + +static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = { + {vc_pattern_ether, 0, null}, + case ice_fltr_ptype_non_ip_l2: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_eth_non_ip); + break; + struct ice_pf *pf = vf->pf; + struct ice_hw *hw; - item = vc_fdir_pattern; - *len = array_size(vc_fdir_pattern); + hw = &pf->hw; + if (!strncmp(hw->active_pkg_name, "ice comms package", + sizeof(hw->active_pkg_name))) { + item = vc_fdir_pattern_comms; + *len = array_size(vc_fdir_pattern_comms); + } else { + item = vc_fdir_pattern_os; + *len = array_size(vc_fdir_pattern_os); + } + struct ethhdr *eth; + eth = (struct ethhdr *)hdr->buffer; + input->flow_type = ice_fltr_ptype_non_ip_l2; + + if (hdr->field_selector) + input->ext_data.ether_type = eth->h_proto;
Networking
21606584f1bb4c76aeb5a113e0e8a72681a270e4
qi zhang chen bo box c chen intel com
drivers
net
ethernet, ice, intel
ice: add support for per vf ctrl vsi enabling
we are going to enable fdir configure for avf through virtual channel. the first step is to add helper functions to support control vsi setup. a control vsi will be allocated for a vf when avf creates its first fdir rule through ice_vf_ctrl_vsi_setup(). the patch will also allocate fdir rule space for vf's control vsi. if a vf asks for flow director rules, then those should come entirely from the best effort pool and not from the guaranteed pool. the patch allow a vf vsi to have only space in the best effort rules.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for per vf ctrl vsi enabling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['ice ']
['h', 'c']
5
129
11
--- diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h -#define ice_fdir_msix 1 +#define ice_fdir_msix 2 +/* all vf control vsis share the same irq, so assign a unique id for them */ +#define ice_res_vf_ctrl_vec_id (ice_res_misc_vec_id - 1) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c + if (vsi->idx < pf->next_vsi && vsi->type == ice_vsi_ctrl && + vsi->vf_id != ice_inval_vfid) + pf->next_vsi = vsi->idx; - if (vsi->type == ice_vsi_ctrl) { - /* use the last vsi slot as the index for the control vsi */ + if (vsi->type == ice_vsi_ctrl && vf_id == ice_inval_vfid) { + /* use the last vsi slot as the index for pf control vsi */ + + if (vsi->type == ice_vsi_ctrl && vf_id != ice_inval_vfid) + pf->vf[vf_id].ctrl_vsi_idx = vsi->idx; - if (vsi->type != ice_vsi_pf) + if (!(vsi->type == ice_vsi_pf || vsi->type == ice_vsi_vf)) + if (vsi->type == ice_vsi_vf) { + vsi->num_gfltr = 0; + + /* each vsi gets same "best_effort" quota */ + vsi->num_bfltr = b_val; + } + - if (vsi->type != ice_vsi_pf && vsi->type != ice_vsi_ctrl) + if (vsi->type != ice_vsi_pf && vsi->type != ice_vsi_ctrl && + vsi->type != ice_vsi_vf) - base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx); + if (vsi->type == ice_vsi_ctrl && vsi->vf_id != ice_inval_vfid) { + struct ice_vf *vf; + int i; + + ice_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (i != vsi->vf_id && vf->ctrl_vsi_idx != ice_no_vsi) { + base = pf->vsi[vf->ctrl_vsi_idx]->base_vector; + break; + } + } + if (i == pf->num_alloc_vfs) + base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, + ice_res_vf_ctrl_vec_id); + } else { + base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, + vsi->idx); + } - if (vsi_type == ice_vsi_vf) + if (vsi_type == ice_vsi_vf || vsi_type == ice_vsi_ctrl) - if (vsi->type == ice_vsi_vf) + if (vsi->type == ice_vsi_vf || vsi->type == ice_vsi_ctrl) - if (vsi->type != ice_vsi_vf) { + if (vsi->type == ice_vsi_ctrl && vsi->vf_id != ice_inval_vfid) { + struct ice_vf *vf; + int i; + + ice_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (i != vsi->vf_id && vf->ctrl_vsi_idx != ice_no_vsi) + break; + } + if (i == pf->num_alloc_vfs) { + /* no other vfs left that have control vsi, reclaim sw + * interrupts back to the common pool + */ + ice_free_res(pf->irq_tracker, vsi->base_vector, + ice_res_vf_ctrl_vec_id); + pf->num_avail_sw_msix += vsi->num_q_vectors; + } + } else if (vsi->type != ice_vsi_vf) { diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c - err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0, - q_vector->name, q_vector); + if (vsi->type == ice_vsi_ctrl && vsi->vf_id != ice_inval_vfid) + err = devm_request_irq(dev, irq_num, vsi->irq_handler, + irqf_shared, q_vector->name, + q_vector); + else + err = devm_request_irq(dev, irq_num, vsi->irq_handler, + 0, q_vector->name, q_vector); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +/** + * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove vsi access + * @vf: vf that control vsi is being invalidated on + */ +static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf) +{ + vf->ctrl_vsi_idx = ice_no_vsi; +} + +/** + * ice_vf_ctrl_vsi_release - invalidate the vf's control vsi after freeing it + * @vf: vf that control vsi is being released on + */ +static void ice_vf_ctrl_vsi_release(struct ice_vf *vf) +{ + ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]); + ice_vf_ctrl_invalidate_vsi(vf); +} + + /* free vf control vsi */ + if (vf->ctrl_vsi_idx != ice_no_vsi) + ice_vf_ctrl_vsi_release(vf); +/** + * ice_vf_ctrl_vsi_setup - set up a vf control vsi + * @vf: vf to setup control vsi for + * + * returns pointer to the successfully allocated vsi struct on success, + * otherwise returns null on failure. + */ +struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) +{ + struct ice_port_info *pi = ice_vf_get_port_info(vf); + struct ice_pf *pf = vf->pf; + struct ice_vsi *vsi; + + vsi = ice_vsi_setup(pf, pi, ice_vsi_ctrl, vf->vf_id); + if (!vsi) { + dev_err(ice_pf_to_dev(pf), "failed to create vf control vsi "); + ice_vf_ctrl_invalidate_vsi(vf); + } + + return vsi; +} + + /* clean vf control vsi when resetting vfs since it should be + * setup only when vf creates its first fdir rule. + */ + if (vf->ctrl_vsi_idx != ice_no_vsi) + ice_vf_ctrl_invalidate_vsi(vf); + + /* clean vf control vsi when resetting vf since it should be setup + * only when vf creates its first fdir rule. + */ + if (vf->ctrl_vsi_idx != ice_no_vsi) + ice_vf_ctrl_vsi_release(vf); + + + /* ctrl_vsi_idx will be set to a valid value only when vf + * creates its first fdir rule. + */ + ice_vf_ctrl_invalidate_vsi(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h + u16 ctrl_vsi_idx; +struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
Networking
da62c5ff9dcdac67204d6647f3cd43ad931a59f4
qi zhang chen bo box c chen intel com
drivers
net
ethernet, ice, intel
ice: allow ignoring opcodes on specific vf
declare bitmap of allowed commands on vf. initialize default opcodes list that should be always supported. declare array of supported opcodes for each caps used in virtchnl code.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
add support for an allowlist/denylist of vf commands
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['ice ']
['h', 'c', 'makefile']
6
199
0
--- diff --git a/drivers/net/ethernet/intel/ice/makefile b/drivers/net/ethernet/intel/ice/makefile --- a/drivers/net/ethernet/intel/ice/makefile +++ b/drivers/net/ethernet/intel/ice/makefile +ice-$(config_pci_iov) += ice_virtchnl_allowlist.o diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +// spdx-license-identifier: gpl-2.0 +/* copyright (c) 2021, intel corporation. */ + +#include "ice_virtchnl_allowlist.h" + +/* purpose of this file is to share functionality to allowlist or denylist + * opcodes used in pf <-> vf communication. group of opcodes: + * - default -> should be always allowed after creating vf, + * default_allowlist_opcodes + * - opcodes needed by vf to work correctly, but not associated with caps -> + * should be allowed after successful vf resources allocation, + * working_allowlist_opcodes + * - opcodes needed by vf when caps are activated + * + * caps that don't use new opcodes (no opcodes should be allowed): + * - virtchnl_vf_offload_rss_aq + * - virtchnl_vf_offload_rss_reg + * - virtchnl_vf_offload_wb_on_itr + * - virtchnl_vf_offload_crc + * - virtchnl_vf_offload_rx_polling + * - virtchnl_vf_offload_rss_pctype_v2 + * - virtchnl_vf_offload_encap + * - virtchnl_vf_offload_encap_csum + * - virtchnl_vf_offload_rx_encap_csum + * - virtchnl_vf_offload_uso + */ + +/* default opcodes to communicate with vf */ +static const u32 default_allowlist_opcodes[] = { + virtchnl_op_get_vf_resources, virtchnl_op_version, virtchnl_op_reset_vf, +}; + +/* opcodes supported after successful virtchnl_op_get_vf_resources */ +static const u32 working_allowlist_opcodes[] = { + virtchnl_op_config_tx_queue, virtchnl_op_config_rx_queue, + virtchnl_op_config_vsi_queues, virtchnl_op_config_irq_map, + virtchnl_op_enable_queues, virtchnl_op_disable_queues, + virtchnl_op_get_stats, virtchnl_op_event, +}; + +/* virtchnl_vf_offload_l2 */ +static const u32 l2_allowlist_opcodes[] = { + virtchnl_op_add_eth_addr, virtchnl_op_del_eth_addr, + virtchnl_op_config_promiscuous_mode, +}; + +/* virtchnl_vf_offload_req_queues */ +static const u32 req_queues_allowlist_opcodes[] = { + virtchnl_op_request_queues, +}; + +/* virtchnl_vf_offload_vlan */ +static const u32 vlan_allowlist_opcodes[] = { + virtchnl_op_add_vlan, virtchnl_op_del_vlan, + virtchnl_op_enable_vlan_stripping, virtchnl_op_disable_vlan_stripping, +}; + +/* virtchnl_vf_offload_rss_pf */ +static const u32 rss_pf_allowlist_opcodes[] = { + virtchnl_op_config_rss_key, virtchnl_op_config_rss_lut, + virtchnl_op_get_rss_hena_caps, virtchnl_op_set_rss_hena, +}; + +/* virtchnl_vf_offload_fdir_pf */ +static const u32 fdir_pf_allowlist_opcodes[] = { + virtchnl_op_add_fdir_filter, virtchnl_op_del_fdir_filter, +}; + +struct allowlist_opcode_info { + const u32 *opcodes; + size_t size; +}; + +#define bit_index(caps) (hweight((caps) - 1)) +#define allow_item(caps, list) \ + [bit_index(caps)] = { \ + .opcodes = list, \ + .size = array_size(list) \ + } +static const struct allowlist_opcode_info allowlist_opcodes[] = { + allow_item(virtchnl_vf_offload_l2, l2_allowlist_opcodes), + allow_item(virtchnl_vf_offload_req_queues, req_queues_allowlist_opcodes), + allow_item(virtchnl_vf_offload_vlan, vlan_allowlist_opcodes), + allow_item(virtchnl_vf_offload_rss_pf, rss_pf_allowlist_opcodes), + allow_item(virtchnl_vf_offload_fdir_pf, fdir_pf_allowlist_opcodes), +}; + +/** + * ice_vc_is_opcode_allowed - check if this opcode is allowed on this vf + * @vf: pointer to vf structure + * @opcode: virtchnl opcode + * + * return true if message is allowed on this vf + */ +bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode) +{ + if (opcode >= virtchnl_op_max) + return false; + + return test_bit(opcode, vf->opcodes_allowlist); +} + +/** + * ice_vc_allowlist_opcodes - allowlist selected opcodes + * @vf: pointer to vf structure + * @opcodes: array of opocodes to allowlist + * @size: size of opcodes array + * + * function should be called to allowlist opcodes on vf. + */ +static void +ice_vc_allowlist_opcodes(struct ice_vf *vf, const u32 *opcodes, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + set_bit(opcodes[i], vf->opcodes_allowlist); +} + +/** + * ice_vc_clear_allowlist - clear all allowlist opcodes + * @vf: pointer to vf structure + */ +static void ice_vc_clear_allowlist(struct ice_vf *vf) +{ + bitmap_zero(vf->opcodes_allowlist, virtchnl_op_max); +} + +/** + * ice_vc_set_default_allowlist - allowlist default opcodes for vf + * @vf: pointer to vf structure + */ +void ice_vc_set_default_allowlist(struct ice_vf *vf) +{ + ice_vc_clear_allowlist(vf); + ice_vc_allowlist_opcodes(vf, default_allowlist_opcodes, + array_size(default_allowlist_opcodes)); +} + +/** + * ice_vc_set_working_allowlist - allowlist opcodes needed to by vf to work + * @vf: pointer to vf structure + * + * allowlist opcodes that aren't associated with specific caps, but + * are needed by vf to work. + */ +void ice_vc_set_working_allowlist(struct ice_vf *vf) +{ + ice_vc_allowlist_opcodes(vf, working_allowlist_opcodes, + array_size(working_allowlist_opcodes)); +} + +/** + * ice_vc_set_caps_allowlist - allowlist vf opcodes according caps + * @vf: pointer to vf structure + */ +void ice_vc_set_caps_allowlist(struct ice_vf *vf) +{ + unsigned long caps = vf->driver_caps; + unsigned int i; + + for_each_set_bit(i, &caps, array_size(allowlist_opcodes)) + ice_vc_allowlist_opcodes(vf, allowlist_opcodes[i].opcodes, + allowlist_opcodes[i].size); +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h +/* spdx-license-identifier: gpl-2.0 */ +/* copyright (c) 2021, intel corporation. */ + +#ifndef _ice_virtchnl_allowlist_h_ +#define _ice_virtchnl_allowlist_h_ +#include "ice.h" + +bool ice_vc_is_opcode_allowed(struct ice_vf *vf, u32 opcode); + +void ice_vc_set_default_allowlist(struct ice_vf *vf); +void ice_vc_set_working_allowlist(struct ice_vf *vf); +void ice_vc_set_caps_allowlist(struct ice_vf *vf); +#endif /* _ice_virtchnl_allowlist_h_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +#include "ice_virtchnl_allowlist.h" + vf->driver_caps = 0; + ice_vc_set_default_allowlist(vf); + + vf->driver_caps = 0; + ice_vc_set_default_allowlist(vf); + + ice_vc_set_default_allowlist(vf); + ice_vc_set_caps_allowlist(vf); + ice_vc_set_working_allowlist(vf); + + if (!ice_vc_is_opcode_allowed(vf, v_opcode)) { + ice_vc_send_msg_to_vf(vf, v_opcode, + virtchnl_status_err_not_supported, null, + 0); + return; + } + diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h + declare_bitmap(opcodes_allowlist, virtchnl_op_max); diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h + virtchnl_op_max,
Networking
c0dcaa55f91d925c9ac2c950ff84138534337a6c
michal swiatkowski
drivers
net
avf, ethernet, ice, intel
ice: enable fdir configure for avf
the virtual channel is going to be extended to support fdir and rss configure from avf. new data structures and op codes will be added, the patch enable the fdir part.
this release includes the landlock security module, which aims to make easier to sandbox applications; support for the clang control flow integrity, which aims to abort the program upon detecting certain forms of undefined behavior; support for randomising the stack address offset in each syscall; support for concurrent tbl flushing; preparatory apple m1 support; support for incoming amd and intel graphics chips; bpf support for calling kernel functions directly; a virtio sound driver for improved sound experience on virtualized guests; io_uring support for multi shot mode and a misc cgroup for miscellaneous resources. as always, there are many other features, new drivers, improvements and fixes.
enable fdir configure for avf
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'security', 'networking', 'architectures arm x86 mips powerpc riscv s390 ia64 xtensa']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'remote processors', 'clock', 'phy ("physical layer" framework)', 'various']
['ice ']
['h', 'c', 'makefile']
10
1,372
6
+------------+-----------+------------------------------+ +-------------------------------------------------------+ --- diff --git a/drivers/net/ethernet/intel/ice/makefile b/drivers/net/ethernet/intel/ice/makefile --- a/drivers/net/ethernet/intel/ice/makefile +++ b/drivers/net/ethernet/intel/ice/makefile -ice-$(config_pci_iov) += ice_virtchnl_pf.o ice_sriov.o +ice-$(config_pci_iov) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c + input->cnt_ena = ice_fxd_fltr_qw0_stat_ena_pkts; + input->fdid_prio = ice_fxd_fltr_qw1_fdid_pri_three; + input->comp_report = ice_fxd_fltr_qw0_comp_report_sw_fail; + diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c - fdir_fltr_ctx.cnt_ena = ice_fxd_fltr_qw0_stat_ena_pkts; + fdir_fltr_ctx.cnt_ena = input->cnt_ena; - fdir_fltr_ctx.comp_report = ice_fxd_fltr_qw0_comp_report_sw_fail; - fdir_fltr_ctx.fdid_prio = 3; + fdir_fltr_ctx.comp_report = input->comp_report; + fdir_fltr_ctx.fdid_prio = input->fdid_prio; diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h +#define ice_fdir_max_fltrs 16384 + + u8 cnt_ena; + u8 fdid_prio; + u8 comp_report; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +#define ice_fxd_fltr_qw0_comp_report_sw 0x2ull +#define ice_fxd_fltr_qw1_fdid_pri_three 0x3ull diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +// spdx-license-identifier: gpl-2.0 +/* copyright (c) 2021, intel corporation. */ + +#include "ice.h" +#include "ice_base.h" +#include "ice_lib.h" +#include "ice_flow.h" + +#define to_fltr_conf_from_desc(p) \ + container_of(p, struct virtchnl_fdir_fltr_conf, input) + +#define ice_flow_prof_type_s 0 +#define ice_flow_prof_type_m (0xffffffffull << ice_flow_prof_type_s) +#define ice_flow_prof_vsi_s 32 +#define ice_flow_prof_vsi_m (0xffffffffull << ice_flow_prof_vsi_s) + +/* flow profile id format: + * [0:31] - flow type, flow + tun_offs + * [32:63] - vsi index + */ +#define ice_flow_prof_fd(vsi, flow, tun_offs) \ + ((u64)(((((flow) + (tun_offs)) & ice_flow_prof_type_m)) | \ + (((u64)(vsi) << ice_flow_prof_vsi_s) & ice_flow_prof_vsi_m))) + +struct virtchnl_fdir_fltr_conf { + struct ice_fdir_fltr input; +}; + +struct virtchnl_fdir_inset_map { + enum virtchnl_proto_hdr_field field; + enum ice_flow_field fld; +}; + +static const struct virtchnl_fdir_inset_map fdir_inset_map[] = { + {virtchnl_proto_hdr_ipv4_src, ice_flow_field_idx_ipv4_sa}, + {virtchnl_proto_hdr_ipv4_dst, ice_flow_field_idx_ipv4_da}, + {virtchnl_proto_hdr_ipv4_dscp, ice_flow_field_idx_ipv4_dscp}, + {virtchnl_proto_hdr_ipv4_ttl, ice_flow_field_idx_ipv4_ttl}, + {virtchnl_proto_hdr_ipv4_prot, ice_flow_field_idx_ipv4_prot}, + {virtchnl_proto_hdr_ipv6_src, ice_flow_field_idx_ipv6_sa}, + {virtchnl_proto_hdr_ipv6_dst, ice_flow_field_idx_ipv6_da}, + {virtchnl_proto_hdr_ipv6_tc, ice_flow_field_idx_ipv6_dscp}, + {virtchnl_proto_hdr_ipv6_hop_limit, ice_flow_field_idx_ipv6_ttl}, + {virtchnl_proto_hdr_ipv6_prot, ice_flow_field_idx_ipv6_prot}, + {virtchnl_proto_hdr_udp_src_port, ice_flow_field_idx_udp_src_port}, + {virtchnl_proto_hdr_udp_dst_port, ice_flow_field_idx_udp_dst_port}, + {virtchnl_proto_hdr_tcp_src_port, ice_flow_field_idx_tcp_src_port}, + {virtchnl_proto_hdr_tcp_dst_port, ice_flow_field_idx_tcp_dst_port}, + {virtchnl_proto_hdr_sctp_src_port, ice_flow_field_idx_sctp_src_port}, + {virtchnl_proto_hdr_sctp_dst_port, ice_flow_field_idx_sctp_dst_port}, +}; + +/** + * ice_vc_fdir_param_check + * @vf: pointer to the vf structure + * @vsi_id: vf relative vsi id + * + * check for the valid vsi id, pf's state and vf's state + * + * return: 0 on success, and -einval on error. + */ +static int +ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) +{ + struct ice_pf *pf = vf->pf; + + if (!test_bit(ice_flag_fd_ena, pf->flags)) + return -einval; + + if (!test_bit(ice_vf_state_active, vf->vf_states)) + return -einval; + + if (!(vf->driver_caps & virtchnl_vf_offload_fdir_pf)) + return -einval; + + if (vsi_id != vf->lan_vsi_num) + return -einval; + + if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) + return -einval; + + if (!pf->vsi[vf->lan_vsi_idx]) + return -einval; + + return 0; +} + +/** + * ice_vf_start_ctrl_vsi + * @vf: pointer to the vf structure + * + * allocate ctrl_vsi for the first time and open the ctrl_vsi port for vf + * + * return: 0 on success, and other on error. + */ +static int ice_vf_start_ctrl_vsi(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct ice_vsi *ctrl_vsi; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + if (vf->ctrl_vsi_idx != ice_no_vsi) + return -eexist; + + ctrl_vsi = ice_vf_ctrl_vsi_setup(vf); + if (!ctrl_vsi) { + dev_dbg(dev, "could not setup control vsi for vf %d ", + vf->vf_id); + return -enomem; + } + + err = ice_vsi_open_ctrl(ctrl_vsi); + if (err) { + dev_dbg(dev, "could not open control vsi for vf %d ", + vf->vf_id); + goto err_vsi_open; + } + + return 0; + +err_vsi_open: + ice_vsi_release(ctrl_vsi); + if (vf->ctrl_vsi_idx != ice_no_vsi) { + pf->vsi[vf->ctrl_vsi_idx] = null; + vf->ctrl_vsi_idx = ice_no_vsi; + } + return err; +} + +/** + * ice_vc_fdir_alloc_prof - allocate profile for this filter flow type + * @vf: pointer to the vf structure + * @flow: filter flow type + * + * return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + if (!fdir->fdir_prof) { + fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf), + ice_fltr_ptype_max, + sizeof(*fdir->fdir_prof), + gfp_kernel); + if (!fdir->fdir_prof) + return -enomem; + } + + if (!fdir->fdir_prof[flow]) { + fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf), + sizeof(**fdir->fdir_prof), + gfp_kernel); + if (!fdir->fdir_prof[flow]) + return -enomem; + } + + return 0; +} + +/** + * ice_vc_fdir_free_prof - free profile for this filter flow type + * @vf: pointer to the vf structure + * @flow: filter flow type + */ +static void +ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + if (!fdir->fdir_prof) + return; + + if (!fdir->fdir_prof[flow]) + return; + + devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]); + fdir->fdir_prof[flow] = null; +} + +/** + * ice_vc_fdir_free_prof_all - free all the profile for this vf + * @vf: pointer to the vf structure + */ +static void ice_vc_fdir_free_prof_all(struct ice_vf *vf) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + enum ice_fltr_ptype flow; + + if (!fdir->fdir_prof) + return; + + for (flow = ice_fltr_ptype_nonf_none; flow < ice_fltr_ptype_max; flow++) + ice_vc_fdir_free_prof(vf, flow); + + devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof); + fdir->fdir_prof = null; +} + +/** + * ice_vc_fdir_parse_flow_fld + * @proto_hdr: virtual channel protocol filter header + * @conf: fdir configuration for each filter + * @fld: field type array + * @fld_cnt: field counter + * + * parse the virtual channel filter header and store them into field type array + * + * return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr, + struct virtchnl_fdir_fltr_conf *conf, + enum ice_flow_field *fld, int *fld_cnt) +{ + struct virtchnl_proto_hdr hdr; + u32 i; + + memcpy(&hdr, proto_hdr, sizeof(hdr)); + + for (i = 0; (i < array_size(fdir_inset_map)) && + virtchnl_get_proto_hdr_field(&hdr); i++) + if (virtchnl_test_proto_hdr(&hdr, fdir_inset_map[i].field)) { + fld[*fld_cnt] = fdir_inset_map[i].fld; + *fld_cnt += 1; + if (*fld_cnt >= ice_flow_field_idx_max) + return -einval; + virtchnl_del_proto_hdr_field(&hdr, + fdir_inset_map[i].field); + } + + return 0; +} + +/** + * ice_vc_fdir_set_flow_fld + * @vf: pointer to the vf structure + * @fltr: virtual channel add cmd buffer + * @conf: fdir configuration for each filter + * @seg: array of one or more packet segments that describe the flow + * + * parse the virtual channel add msg buffer's field vector and store them into + * flow's packet segment field + * + * return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf, + struct ice_flow_seg_info *seg) +{ + struct virtchnl_fdir_rule *rule = &fltr->rule_cfg; + enum ice_flow_field fld[ice_flow_field_idx_max]; + struct device *dev = ice_pf_to_dev(vf->pf); + struct virtchnl_proto_hdrs *proto; + int fld_cnt = 0; + int i; + + proto = &rule->proto_hdrs; + for (i = 0; i < proto->count; i++) { + struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; + int ret; + + ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt); + if (ret) + return ret; + } + + if (fld_cnt == 0) { + dev_dbg(dev, "empty input set for vf %d ", vf->vf_id); + return -einval; + } + + for (i = 0; i < fld_cnt; i++) + ice_flow_set_fld(seg, fld[i], + ice_flow_fld_off_inval, + ice_flow_fld_off_inval, + ice_flow_fld_off_inval, false); + + return 0; +} + +/** + * ice_vc_fdir_set_flow_hdr - config the flow's packet segment header + * @vf: pointer to the vf structure + * @conf: fdir configuration for each filter + * @seg: array of one or more packet segments that describe the flow + * + * return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_set_flow_hdr(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + struct ice_flow_seg_info *seg) +{ + enum ice_fltr_ptype flow = conf->input.flow_type; + struct device *dev = ice_pf_to_dev(vf->pf); + + switch (flow) { + case ice_fltr_ptype_nonf_ipv4_other: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_ipv4 | + ice_flow_seg_hdr_ipv_other); + break; + case ice_fltr_ptype_nonf_ipv4_tcp: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_tcp | + ice_flow_seg_hdr_ipv4 | + ice_flow_seg_hdr_ipv_other); + break; + case ice_fltr_ptype_nonf_ipv4_udp: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_udp | + ice_flow_seg_hdr_ipv4 | + ice_flow_seg_hdr_ipv_other); + break; + case ice_fltr_ptype_nonf_ipv4_sctp: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_sctp | + ice_flow_seg_hdr_ipv4 | + ice_flow_seg_hdr_ipv_other); + break; + case ice_fltr_ptype_nonf_ipv6_other: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_ipv6 | + ice_flow_seg_hdr_ipv_other); + break; + case ice_fltr_ptype_nonf_ipv6_tcp: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_tcp | + ice_flow_seg_hdr_ipv6 | + ice_flow_seg_hdr_ipv_other); + break; + case ice_fltr_ptype_nonf_ipv6_udp: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_udp | + ice_flow_seg_hdr_ipv6 | + ice_flow_seg_hdr_ipv_other); + break; + case ice_fltr_ptype_nonf_ipv6_sctp: + ice_flow_set_hdrs(seg, ice_flow_seg_hdr_sctp | + ice_flow_seg_hdr_ipv6 | + ice_flow_seg_hdr_ipv_other); + break; + default: + dev_dbg(dev, "invalid flow type 0x%x for vf %d failed ", + flow, vf->vf_id); + return -einval; + } + + return 0; +} + +/** + * ice_vc_fdir_rem_prof - remove profile for this filter flow type + * @vf: pointer to the vf structure + * @flow: filter flow type + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + */ +static void +ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + struct ice_fd_hw_prof *vf_prof; + struct ice_pf *pf = vf->pf; + struct ice_vsi *vf_vsi; + struct device *dev; + struct ice_hw *hw; + u64 prof_id; + int i; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + if (!fdir->fdir_prof || !fdir->fdir_prof[flow]) + return; + + vf_prof = fdir->fdir_prof[flow]; + + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vf_vsi) { + dev_dbg(dev, "null vf %d vsi pointer ", vf->vf_id); + return; + } + + if (!fdir->prof_entry_cnt[flow][tun]) + return; + + prof_id = ice_flow_prof_fd(vf_vsi->vsi_num, + flow, tun ? ice_fltr_ptype_max : 0); + + for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++) + if (vf_prof->entry_h[i][tun]) { + u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]); + + ice_rem_prof_id_flow(hw, ice_blk_fd, vsi_num, prof_id); + ice_flow_rem_entry(hw, ice_blk_fd, + vf_prof->entry_h[i][tun]); + vf_prof->entry_h[i][tun] = 0; + } + + ice_flow_rem_prof(hw, ice_blk_fd, prof_id); + devm_kfree(dev, vf_prof->fdir_seg[tun]); + vf_prof->fdir_seg[tun] = null; + + for (i = 0; i < vf_prof->cnt; i++) + vf_prof->vsi_h[i] = 0; + + fdir->prof_entry_cnt[flow][tun] = 0; +} + +/** + * ice_vc_fdir_rem_prof_all - remove profile for this vf + * @vf: pointer to the vf structure + */ +static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) +{ + enum ice_fltr_ptype flow; + + for (flow = ice_fltr_ptype_nonf_none; + flow < ice_fltr_ptype_max; flow++) { + ice_vc_fdir_rem_prof(vf, flow, 0); + ice_vc_fdir_rem_prof(vf, flow, 1); + } +} + +/** + * ice_vc_fdir_write_flow_prof + * @vf: pointer to the vf structure + * @flow: filter flow type + * @seg: array of one or more packet segments that describe the flow + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + * + * write the flow's profile config and packet segment into the hardware + * + * return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, + struct ice_flow_seg_info *seg, int tun) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + struct ice_vsi *vf_vsi, *ctrl_vsi; + struct ice_flow_seg_info *old_seg; + struct ice_flow_prof *prof = null; + struct ice_fd_hw_prof *vf_prof; + enum ice_status status; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + u64 entry1_h = 0; + u64 entry2_h = 0; + u64 prof_id; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + vf_vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vf_vsi) + return -einval; + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) + return -einval; + + vf_prof = fdir->fdir_prof[flow]; + old_seg = vf_prof->fdir_seg[tun]; + if (old_seg) { + if (!memcmp(old_seg, seg, sizeof(*seg))) { + dev_dbg(dev, "duplicated profile for vf %d! ", + vf->vf_id); + return -eexist; + } + + if (fdir->fdir_fltr_cnt[flow][tun]) { + ret = -einval; + dev_dbg(dev, "input set conflicts for vf %d ", + vf->vf_id); + goto err_exit; + } + + /* remove previously allocated profile */ + ice_vc_fdir_rem_prof(vf, flow, tun); + } + + prof_id = ice_flow_prof_fd(vf_vsi->vsi_num, flow, + tun ? ice_fltr_ptype_max : 0); + + status = ice_flow_add_prof(hw, ice_blk_fd, ice_flow_rx, prof_id, seg, + tun + 1, &prof); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "could not add vsi flow 0x%x for vf %d ", + flow, vf->vf_id); + goto err_exit; + } + + status = ice_flow_add_entry(hw, ice_blk_fd, prof_id, vf_vsi->idx, + vf_vsi->idx, ice_flow_prio_normal, + seg, &entry1_h); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "could not add flow 0x%x vsi entry for vf %d ", + flow, vf->vf_id); + goto err_prof; + } + + status = ice_flow_add_entry(hw, ice_blk_fd, prof_id, vf_vsi->idx, + ctrl_vsi->idx, ice_flow_prio_normal, + seg, &entry2_h); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, + "could not add flow 0x%x ctrl vsi entry for vf %d ", + flow, vf->vf_id); + goto err_entry_1; + } + + vf_prof->fdir_seg[tun] = seg; + vf_prof->cnt = 0; + fdir->prof_entry_cnt[flow][tun] = 0; + + vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h; + vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx; + vf_prof->cnt++; + fdir->prof_entry_cnt[flow][tun]++; + + vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h; + vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx; + vf_prof->cnt++; + fdir->prof_entry_cnt[flow][tun]++; + + return 0; + +err_entry_1: + ice_rem_prof_id_flow(hw, ice_blk_fd, + ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id); + ice_flow_rem_entry(hw, ice_blk_fd, entry1_h); +err_prof: + ice_flow_rem_prof(hw, ice_blk_fd, prof_id); +err_exit: + return ret; +} + +/** + * ice_vc_fdir_config_input_set + * @vf: pointer to the vf structure + * @fltr: virtual channel add cmd buffer + * @conf: fdir configuration for each filter + * @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter + * + * config the input set type and value for virtual channel add msg buffer + * + * return: 0 on success, and other on error. + */ +static int +ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf, int tun) +{ + struct ice_fdir_fltr *input = &conf->input; + struct device *dev = ice_pf_to_dev(vf->pf); + struct ice_flow_seg_info *seg; + enum ice_fltr_ptype flow; + int ret; + + flow = input->flow_type; + ret = ice_vc_fdir_alloc_prof(vf, flow); + if (ret) { + dev_dbg(dev, "alloc flow prof for vf %d failed ", vf->vf_id); + return ret; + } + + seg = devm_kzalloc(dev, sizeof(*seg), gfp_kernel); + if (!seg) + return -enomem; + + ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg); + if (ret) { + dev_dbg(dev, "set flow field for vf %d failed ", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg); + if (ret) { + dev_dbg(dev, "set flow hdr for vf %d failed ", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun); + if (ret == -eexist) { + devm_kfree(dev, seg); + } else if (ret) { + dev_dbg(dev, "write flow profile for vf %d failed ", + vf->vf_id); + goto err_exit; + } + + return 0; + +err_exit: + devm_kfree(dev, seg); + return ret; +} + +/** + * ice_vc_validate_fdir_fltr - validate the virtual channel filter + * @vf: pointer to the vf info + * @fltr: virtual channel add cmd buffer + * @conf: fdir configuration for each filter + * + * return: 0 on success, and other on error. + */ +static int +ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, + struct virtchnl_fdir_fltr_conf *conf) +{ + /* todo: rule validation */ + return -einval; +} + +/** + * ice_vc_fdir_comp_rules - compare if two filter rules have the same value + * @conf_a: fdir configuration for filter a + * @conf_b: fdir configuration for filter b + * + * return: 0 on success, and other on error. + */ +static bool +ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a, + struct virtchnl_fdir_fltr_conf *conf_b) +{ + struct ice_fdir_fltr *a = &conf_a->input; + struct ice_fdir_fltr *b = &conf_b->input; + + if (a->flow_type != b->flow_type) + return false; + if (memcmp(&a->ip, &b->ip, sizeof(a->ip))) + return false; + if (memcmp(&a->mask, &b->mask, sizeof(a->mask))) + return false; + if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data))) + return false; + if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask))) + return false; + + return true; +} + +/** + * ice_vc_fdir_is_dup_fltr + * @vf: pointer to the vf info + * @conf: fdir configuration for each filter + * + * check if there is duplicated rule with same conf value + * + * return: 0 true success, and false on error. + */ +static bool +ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf) +{ + struct ice_fdir_fltr *desc; + bool ret; + + list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) { + struct virtchnl_fdir_fltr_conf *node = + to_fltr_conf_from_desc(desc); + + ret = ice_vc_fdir_comp_rules(node, conf); + if (ret) + return true; + } + + return false; +} + +/** + * ice_vc_fdir_insert_entry + * @vf: pointer to the vf info + * @conf: fdir configuration for each filter + * @id: pointer to id value allocated by driver + * + * insert fdir conf entry into list and allocate id for this filter + * + * return: 0 true success, and other on error. + */ +static int +ice_vc_fdir_insert_entry(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, u32 *id) +{ + struct ice_fdir_fltr *input = &conf->input; + int i; + + /* alloc id corresponding with conf */ + i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0, + ice_fdir_max_fltrs, gfp_kernel); + if (i < 0) + return -einval; + *id = i; + + list_add(&input->fltr_node, &vf->fdir.fdir_rule_list); + return 0; +} + +/** + * ice_vc_fdir_remove_entry - remove fdir conf entry by id value + * @vf: pointer to the vf info + * @conf: fdir configuration for each filter + * @id: filter rule's id + */ +static void +ice_vc_fdir_remove_entry(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, u32 id) +{ + struct ice_fdir_fltr *input = &conf->input; + + idr_remove(&vf->fdir.fdir_rule_idr, id); + list_del(&input->fltr_node); +} + +/** + * ice_vc_fdir_lookup_entry - lookup fdir conf entry by id value + * @vf: pointer to the vf info + * @id: filter rule's id + * + * return: null on error, and other on success. + */ +static struct virtchnl_fdir_fltr_conf * +ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id) +{ + return idr_find(&vf->fdir.fdir_rule_idr, id); +} + +/** + * ice_vc_fdir_flush_entry - remove all fdir conf entry + * @vf: pointer to the vf info + */ +static void ice_vc_fdir_flush_entry(struct ice_vf *vf) +{ + struct virtchnl_fdir_fltr_conf *conf; + struct ice_fdir_fltr *desc, *temp; + + list_for_each_entry_safe(desc, temp, + &vf->fdir.fdir_rule_list, fltr_node) { + conf = to_fltr_conf_from_desc(desc); + list_del(&desc->fltr_node); + devm_kfree(ice_pf_to_dev(vf->pf), conf); + } +} + +/** + * ice_vc_fdir_write_fltr - write filter rule into hardware + * @vf: pointer to the vf info + * @conf: fdir configuration for each filter + * @add: true implies add rule, false implies del rules + * @is_tun: false implies non-tunnel type filter, true implies tunnel filter + * + * return: 0 on success, and other on error. + */ +static int ice_vc_fdir_write_fltr(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + bool add, bool is_tun) +{ + struct ice_fdir_fltr *input = &conf->input; + struct ice_vsi *vsi, *ctrl_vsi; + struct ice_fltr_desc desc; + enum ice_status status; + struct device *dev; + struct ice_pf *pf; + struct ice_hw *hw; + int ret; + u8 *pkt; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + vsi = pf->vsi[vf->lan_vsi_idx]; + if (!vsi) { + dev_dbg(dev, "invalid vsi for vf %d ", vf->vf_id); + return -einval; + } + + input->dest_vsi = vsi->idx; + input->comp_report = ice_fxd_fltr_qw0_comp_report_sw_fail; + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + dev_dbg(dev, "invalid ctrl_vsi for vf %d ", vf->vf_id); + return -einval; + } + + pkt = devm_kzalloc(dev, ice_fdir_max_raw_pkt_size, gfp_kernel); + if (!pkt) + return -enomem; + + ice_fdir_get_prgm_desc(hw, input, &desc, add); + status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + ret = ice_status_to_errno(status); + if (ret) { + dev_dbg(dev, "gen training pkt for vf %d ptype %d failed ", + vf->vf_id, input->flow_type); + goto err_free_pkt; + } + + ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); + if (ret) + goto err_free_pkt; + + return 0; + +err_free_pkt: + devm_kfree(dev, pkt); + return ret; +} + +/** + * ice_vc_add_fdir_fltr - add a fdir filter for vf by the msg buffer + * @vf: pointer to the vf info + * @msg: pointer to the msg buffer + * + * return: 0 on success, and other on error. + */ +int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg; + struct virtchnl_fdir_add *stat = null; + struct virtchnl_fdir_fltr_conf *conf; + enum virtchnl_status_code v_ret; + struct device *dev; + struct ice_pf *pf; + int is_tun = 0; + int len = 0; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); + if (ret) { + v_ret = virtchnl_status_err_param; + dev_dbg(dev, "parameter check for vf %d failed ", vf->vf_id); + goto err_exit; + } + + ret = ice_vf_start_ctrl_vsi(vf); + if (ret && (ret != -eexist)) { + v_ret = virtchnl_status_err_param; + dev_err(dev, "init fdir for vf %d failed, ret:%d ", + vf->vf_id, ret); + goto err_exit; + } + + stat = kzalloc(sizeof(*stat), gfp_kernel); + if (!stat) { + v_ret = virtchnl_status_err_no_memory; + dev_dbg(dev, "alloc stat for vf %d failed ", vf->vf_id); + goto err_exit; + } + + conf = devm_kzalloc(dev, sizeof(*conf), gfp_kernel); + if (!conf) { + v_ret = virtchnl_status_err_no_memory; + dev_dbg(dev, "alloc conf for vf %d failed ", vf->vf_id); + goto err_exit; + } + + len = sizeof(*stat); + ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); + if (ret) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_failure_rule_invalid; + dev_dbg(dev, "invalid fdir filter from vf %d ", vf->vf_id); + goto err_free_conf; + } + + if (fltr->validate_only) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_success; + devm_kfree(dev, conf); + ret = ice_vc_send_msg_to_vf(vf, virtchnl_op_add_fdir_filter, + v_ret, (u8 *)stat, len); + goto exit; + } + + ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); + if (ret) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_failure_rule_conflict; + dev_err(dev, "vf %d: fdir input set configure failed, ret:%d ", + vf->vf_id, ret); + goto err_free_conf; + } + + ret = ice_vc_fdir_is_dup_fltr(vf, conf); + if (ret) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_failure_rule_exist; + dev_dbg(dev, "vf %d: duplicated fdir rule detected ", + vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_insert_entry(vf, conf, &stat->flow_id); + if (ret) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_failure_rule_noresource; + dev_dbg(dev, "vf %d: insert fdir list failed ", vf->vf_id); + goto err_free_conf; + } + + ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun); + if (ret) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_failure_rule_noresource; + dev_err(dev, "vf %d: writing fdir rule failed, ret:%d ", + vf->vf_id, ret); + goto err_rem_entry; + } + + vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; + + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_success; +exit: + ret = ice_vc_send_msg_to_vf(vf, virtchnl_op_add_fdir_filter, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; + +err_rem_entry: + ice_vc_fdir_remove_entry(vf, conf, stat->flow_id); +err_free_conf: + devm_kfree(dev, conf); +err_exit: + ret = ice_vc_send_msg_to_vf(vf, virtchnl_op_add_fdir_filter, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; +} + +/** + * ice_vc_del_fdir_fltr - delete a fdir filter for vf by the msg buffer + * @vf: pointer to the vf info + * @msg: pointer to the msg buffer + * + * return: 0 on success, and other on error. + */ +int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) +{ + struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; + struct virtchnl_fdir_del *stat = null; + struct virtchnl_fdir_fltr_conf *conf; + enum virtchnl_status_code v_ret; + struct device *dev; + struct ice_pf *pf; + int is_tun = 0; + int len = 0; + int ret; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); + if (ret) { + v_ret = virtchnl_status_err_param; + dev_dbg(dev, "parameter check for vf %d failed ", vf->vf_id); + goto err_exit; + } + + stat = kzalloc(sizeof(*stat), gfp_kernel); + if (!stat) { + v_ret = virtchnl_status_err_no_memory; + dev_dbg(dev, "alloc stat for vf %d failed ", vf->vf_id); + goto err_exit; + } + + len = sizeof(*stat); + + conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id); + if (!conf) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_failure_rule_nonexist; + dev_dbg(dev, "vf %d: fdir invalid flow_id:0x%x ", + vf->vf_id, fltr->flow_id); + goto err_exit; + } + + /* just return failure when ctrl_vsi idx is invalid */ + if (vf->ctrl_vsi_idx == ice_no_vsi) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_failure_rule_noresource; + dev_err(dev, "invalid fdir ctrl_vsi for vf %d ", vf->vf_id); + goto err_exit; + } + + ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); + if (ret) { + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_failure_rule_noresource; + dev_err(dev, "vf %d: writing fdir rule failed, ret:%d ", + vf->vf_id, ret); + goto err_exit; + } + + ice_vc_fdir_remove_entry(vf, conf, fltr->flow_id); + devm_kfree(dev, conf); + vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; + + v_ret = virtchnl_status_success; + stat->status = virtchnl_fdir_success; + +err_exit: + ret = ice_vc_send_msg_to_vf(vf, virtchnl_op_del_fdir_filter, v_ret, + (u8 *)stat, len); + kfree(stat); + return ret; +} + +/** + * ice_vf_fdir_init - init fdir resource for vf + * @vf: pointer to the vf info + */ +void ice_vf_fdir_init(struct ice_vf *vf) +{ + struct ice_vf_fdir *fdir = &vf->fdir; + + idr_init(&fdir->fdir_rule_idr); + init_list_head(&fdir->fdir_rule_list); +} + +/** + * ice_vf_fdir_exit - destroy fdir resource for vf + * @vf: pointer to the vf info + */ +void ice_vf_fdir_exit(struct ice_vf *vf) +{ + ice_vc_fdir_flush_entry(vf); + idr_destroy(&vf->fdir.fdir_rule_idr); + ice_vc_fdir_rem_prof_all(vf); + ice_vc_fdir_free_prof_all(vf); +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h +/* spdx-license-identifier: gpl-2.0 */ +/* copyright (c) 2021, intel corporation. */ + +#ifndef _ice_virtchnl_fdir_h_ +#define _ice_virtchnl_fdir_h_ + +struct ice_vf; + +/* vf fdir information structure */ +struct ice_vf_fdir { + u16 fdir_fltr_cnt[ice_fltr_ptype_max][ice_fd_hw_seg_max]; + int prof_entry_cnt[ice_fltr_ptype_max][ice_fd_hw_seg_max]; + struct ice_fd_hw_prof **fdir_prof; + + struct idr fdir_rule_idr; + struct list_head fdir_rule_list; +}; + +int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg); +int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg); +void ice_vf_fdir_init(struct ice_vf *vf); +void ice_vf_fdir_exit(struct ice_vf *vf); + +#endif /* _ice_virtchnl_fdir_h_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c + ice_vf_fdir_exit(vf); + ice_vf_fdir_exit(vf); + ice_vf_fdir_exit(vf); + ice_vf_fdir_init(vf); -static int +int + if (vf->driver_caps & virtchnl_vf_offload_fdir_pf) + vfres->vf_cap_flags |= virtchnl_vf_offload_fdir_pf; + -static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) +bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) + case virtchnl_op_add_fdir_filter: + err = ice_vc_add_fdir_fltr(vf, msg); + break; + case virtchnl_op_del_fdir_filter: + err = ice_vc_del_fdir_fltr(vf, msg); + break; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +#include "ice_virtchnl_fdir.h" + struct ice_vf_fdir fdir; +int +ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen); +bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id); diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h + /* opcode 34 - 46 are reserved */ + virtchnl_op_add_fdir_filter = 47, + virtchnl_op_del_fdir_filter = 48, +#define virtchnl_vf_offload_fdir_pf 0x10000000 + virtchnl_action_passthru, + virtchnl_action_queue, + virtchnl_action_q_region, + virtchnl_action_mark, + virtchnl_action_count, +#define virtchnl_max_num_proto_hdrs 32 +#define proto_hdr_shift 5 +#define proto_hdr_field_start(proto_hdr_type) ((proto_hdr_type) << proto_hdr_shift) +#define proto_hdr_field_mask ((1ul << proto_hdr_shift) - 1) + +/* vf use these macros to configure each protocol header. + * specify which protocol headers and protocol header fields base on + * virtchnl_proto_hdr_type and virtchnl_proto_hdr_field. + * @param hdr: a struct of virtchnl_proto_hdr + * @param hdr_type: eth/ipv4/tcp, etc + * @param field: src/dst/teid/spi, etc + */ +#define virtchnl_add_proto_hdr_field(hdr, field) \ + ((hdr)->field_selector |= bit((field) & proto_hdr_field_mask)) +#define virtchnl_del_proto_hdr_field(hdr, field) \ + ((hdr)->field_selector &= ~bit((field) & proto_hdr_field_mask)) +#define virtchnl_test_proto_hdr_field(hdr, val) \ + ((hdr)->field_selector & bit((val) & proto_hdr_field_mask)) +#define virtchnl_get_proto_hdr_field(hdr) ((hdr)->field_selector) + +#define virtchnl_add_proto_hdr_field_bit(hdr, hdr_type, field) \ + (virtchnl_add_proto_hdr_field(hdr, \ + virtchnl_proto_hdr_ ## hdr_type ## _ ## field)) +#define virtchnl_del_proto_hdr_field_bit(hdr, hdr_type, field) \ + (virtchnl_del_proto_hdr_field(hdr, \ + virtchnl_proto_hdr_ ## hdr_type ## _ ## field)) + +#define virtchnl_set_proto_hdr_type(hdr, hdr_type) \ + ((hdr)->type = virtchnl_proto_hdr_ ## hdr_type) +#define virtchnl_get_proto_hdr_type(hdr) \ + (((hdr)->type) >> proto_hdr_shift) +#define virtchnl_test_proto_hdr_type(hdr, val) \ + ((hdr)->type == ((val) >> proto_hdr_shift)) +#define virtchnl_test_proto_hdr(hdr, val) \ + (virtchnl_test_proto_hdr_type((hdr), (val)) && \ + virtchnl_test_proto_hdr_field((hdr), (val))) + +/* protocol header type within a packet segment. a segment consists of one or + * more protocol headers that make up a logical group of protocol headers. each + * logical group of protocol headers encapsulates or is encapsulated using/by + * tunneling or encapsulation protocols for network virtualization. + */ +enum virtchnl_proto_hdr_type { + virtchnl_proto_hdr_none, + virtchnl_proto_hdr_eth, + virtchnl_proto_hdr_s_vlan, + virtchnl_proto_hdr_c_vlan, + virtchnl_proto_hdr_ipv4, + virtchnl_proto_hdr_ipv6, + virtchnl_proto_hdr_tcp, + virtchnl_proto_hdr_udp, + virtchnl_proto_hdr_sctp, + virtchnl_proto_hdr_gtpu_ip, + virtchnl_proto_hdr_gtpu_eh, + virtchnl_proto_hdr_gtpu_eh_pdu_dwn, + virtchnl_proto_hdr_gtpu_eh_pdu_up, + virtchnl_proto_hdr_pppoe, + virtchnl_proto_hdr_l2tpv3, + virtchnl_proto_hdr_esp, + virtchnl_proto_hdr_ah, + virtchnl_proto_hdr_pfcp, +}; + +/* protocol header field within a protocol header. */ +enum virtchnl_proto_hdr_field { + /* ether */ + virtchnl_proto_hdr_eth_src = + proto_hdr_field_start(virtchnl_proto_hdr_eth), + virtchnl_proto_hdr_eth_dst, + virtchnl_proto_hdr_eth_ethertype, + /* s-vlan */ + virtchnl_proto_hdr_s_vlan_id = + proto_hdr_field_start(virtchnl_proto_hdr_s_vlan), + /* c-vlan */ + virtchnl_proto_hdr_c_vlan_id = + proto_hdr_field_start(virtchnl_proto_hdr_c_vlan), + /* ipv4 */ + virtchnl_proto_hdr_ipv4_src = + proto_hdr_field_start(virtchnl_proto_hdr_ipv4), + virtchnl_proto_hdr_ipv4_dst, + virtchnl_proto_hdr_ipv4_dscp, + virtchnl_proto_hdr_ipv4_ttl, + virtchnl_proto_hdr_ipv4_prot, + /* ipv6 */ + virtchnl_proto_hdr_ipv6_src = + proto_hdr_field_start(virtchnl_proto_hdr_ipv6), + virtchnl_proto_hdr_ipv6_dst, + virtchnl_proto_hdr_ipv6_tc, + virtchnl_proto_hdr_ipv6_hop_limit, + virtchnl_proto_hdr_ipv6_prot, + /* tcp */ + virtchnl_proto_hdr_tcp_src_port = + proto_hdr_field_start(virtchnl_proto_hdr_tcp), + virtchnl_proto_hdr_tcp_dst_port, + /* udp */ + virtchnl_proto_hdr_udp_src_port = + proto_hdr_field_start(virtchnl_proto_hdr_udp), + virtchnl_proto_hdr_udp_dst_port, + /* sctp */ + virtchnl_proto_hdr_sctp_src_port = + proto_hdr_field_start(virtchnl_proto_hdr_sctp), + virtchnl_proto_hdr_sctp_dst_port, + /* gtpu_ip */ + virtchnl_proto_hdr_gtpu_ip_teid = + proto_hdr_field_start(virtchnl_proto_hdr_gtpu_ip), + /* gtpu_eh */ + virtchnl_proto_hdr_gtpu_eh_pdu = + proto_hdr_field_start(virtchnl_proto_hdr_gtpu_eh), + virtchnl_proto_hdr_gtpu_eh_qfi, + /* pppoe */ + virtchnl_proto_hdr_pppoe_sess_id = + proto_hdr_field_start(virtchnl_proto_hdr_pppoe), + /* l2tpv3 */ + virtchnl_proto_hdr_l2tpv3_sess_id = + proto_hdr_field_start(virtchnl_proto_hdr_l2tpv3), + /* esp */ + virtchnl_proto_hdr_esp_spi = + proto_hdr_field_start(virtchnl_proto_hdr_esp), + /* ah */ + virtchnl_proto_hdr_ah_spi = + proto_hdr_field_start(virtchnl_proto_hdr_ah), + /* pfcp */ + virtchnl_proto_hdr_pfcp_s_field = + proto_hdr_field_start(virtchnl_proto_hdr_pfcp), + virtchnl_proto_hdr_pfcp_seid, +}; + +struct virtchnl_proto_hdr { + enum virtchnl_proto_hdr_type type; + u32 field_selector; /* a bit mask to select field for header type */ + u8 buffer[64]; + /** + * binary buffer in network order for specific header type. + * for example, if type = virtchnl_proto_hdr_ipv4, a ipv4 + * header is expected to be copied into the buffer. + */ +}; + +virtchnl_check_struct_len(72, virtchnl_proto_hdr); + +struct virtchnl_proto_hdrs { + u8 tunnel_level; + /** + * specify where protocol header start from. + * 0 - from the outer layer + * 1 - from the first inner layer + * 2 - from the second inner layer + * .... + **/ + int count; /* the proto layers must < virtchnl_max_num_proto_hdrs */ + struct virtchnl_proto_hdr proto_hdr[virtchnl_max_num_proto_hdrs]; +}; + +virtchnl_check_struct_len(2312, virtchnl_proto_hdrs); + +/* action configuration for fdir */ +struct virtchnl_filter_action { + enum virtchnl_action type; + union { + /* used for queue and qgroup action */ + struct { + u16 index; + u8 region; + } queue; + /* used for count action */ + struct { + /* share counter id with other flow rules */ + u8 shared; + u32 id; /* counter id */ + } count; + /* used for mark action */ + u32 mark_id; + u8 reserve[32]; + } act_conf; +}; + +virtchnl_check_struct_len(36, virtchnl_filter_action); + +#define virtchnl_max_num_actions 8 + +struct virtchnl_filter_action_set { + /* action number must be less then virtchnl_max_num_actions */ + int count; + struct virtchnl_filter_action actions[virtchnl_max_num_actions]; +}; + +virtchnl_check_struct_len(292, virtchnl_filter_action_set); + +/* pattern and action for fdir rule */ +struct virtchnl_fdir_rule { + struct virtchnl_proto_hdrs proto_hdrs; + struct virtchnl_filter_action_set action_set; +}; + +virtchnl_check_struct_len(2604, virtchnl_fdir_rule); + +/* status returned to vf after vf requests fdir commands + * virtchnl_fdir_success + * vf fdir related request is successfully done by pf + * the request can be op_add/del. + * + * virtchnl_fdir_failure_rule_noresource + * op_add_fdir_filter request is failed due to no hardware resource. + * + * virtchnl_fdir_failure_rule_exist + * op_add_fdir_filter request is failed due to the rule is already existed. + * + * virtchnl_fdir_failure_rule_conflict + * op_add_fdir_filter request is failed due to conflict with existing rule. + * + * virtchnl_fdir_failure_rule_nonexist + * op_del_fdir_filter request is failed due to this rule doesn't exist. + * + * virtchnl_fdir_failure_rule_invalid + * op_add_fdir_filter request is failed due to parameters validation + * or hw doesn't support. + * + * virtchnl_fdir_failure_rule_timeout + * op_add/del_fdir_filter request is failed due to timing out + * for programming. + */ +enum virtchnl_fdir_prgm_status { + virtchnl_fdir_success = 0, + virtchnl_fdir_failure_rule_noresource, + virtchnl_fdir_failure_rule_exist, + virtchnl_fdir_failure_rule_conflict, + virtchnl_fdir_failure_rule_nonexist, + virtchnl_fdir_failure_rule_invalid, + virtchnl_fdir_failure_rule_timeout, +}; + +/* virtchnl_op_add_fdir_filter + * vf sends this request to pf by filling out vsi_id, + * validate_only and rule_cfg. pf will return flow_id + * if the request is successfully done and return add_status to vf. + */ +struct virtchnl_fdir_add { + u16 vsi_id; /* input */ + /* + * 1 for validating a fdir rule, 0 for creating a fdir rule. + * validate and create share one ops: virtchnl_op_add_fdir_filter. + */ + u16 validate_only; /* input */ + u32 flow_id; /* output */ + struct virtchnl_fdir_rule rule_cfg; /* input */ + enum virtchnl_fdir_prgm_status status; /* output */ +}; + +virtchnl_check_struct_len(2616, virtchnl_fdir_add); + +/* virtchnl_op_del_fdir_filter + * vf sends this request to pf by filling out vsi_id + * and flow_id. pf will return del_status to vf. + */ +struct virtchnl_fdir_del { + u16 vsi_id; /* input */ + u16 pad; + u32 flow_id; /* input */ + enum virtchnl_fdir_prgm_status status; /* output */ +}; + +virtchnl_check_struct_len(12, virtchnl_fdir_del); + + case virtchnl_op_add_fdir_filter: + valid_len = sizeof(struct virtchnl_fdir_add); + break; + case virtchnl_op_del_fdir_filter: + valid_len = sizeof(struct virtchnl_fdir_del); + break;
Networking
1f7ea1cd6a3748427512ccc9582e18cd9efea966
qi zhang
drivers
net
avf, ethernet, ice, intel