commit_title
stringlengths
13
124
commit_body
stringlengths
0
1.9k
release_summary
stringclasses
52 values
changes_summary
stringlengths
1
758
release_affected_domains
stringclasses
33 values
release_affected_drivers
stringclasses
51 values
domain_of_changes
stringlengths
2
571
language_set
stringclasses
983 values
diffstat_files
int64
1
300
diffstat_insertions
int64
0
309k
diffstat_deletions
int64
0
168k
commit_diff
stringlengths
92
23.4M
category
stringclasses
108 values
commit_hash
stringlengths
34
40
related_people
stringlengths
0
370
domain
stringclasses
21 values
subdomain
stringclasses
241 values
leaf_module
stringlengths
0
912
net/mlx5: e-switch, prepare eswitch to handle sf vport
prepare eswitch to handle sf vport during (a) querying eswitch functions (b) egress acl creation (c) account for sf vports in total vports calculation
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add mlx5 subfunction support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
73
4
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c - return mlx5_eswitch_is_vf_vport(esw, vport_num); + return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c + u16 max_sf_vports; + max_sf_vports = mlx5_sf_max_functions(dev); + /* device interface is array of 64-bits */ + if (max_sf_vports) + outlen += div_round_up(max_sf_vports, bits_per_type(__be64)) * sizeof(__be64); + - err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); - mlx5_eswitch_is_vf_vport(esw, vport_num); + mlx5_eswitch_is_vf_vport(esw, vport_num) || + mlx5_esw_is_sf_vport(esw, vport_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +#include "sf/sf.h" +static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw) +{ + /* pf and vf vports indices start from 0 to max_vfs */ + return mlx5_vport_pf_placeholder + mlx5_core_max_vfs(esw->dev); +} + +static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw) +{ + return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev); +} + +static inline int +mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num) +{ + return vport_num - mlx5_sf_start_function_id(esw->dev) + + mlx5_vport_pf_placeholder + mlx5_core_max_vfs(esw->dev); +} + +static inline u16 +mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx) +{ + return mlx5_sf_start_function_id(esw->dev) + idx - + (mlx5_vport_pf_placeholder + mlx5_core_max_vfs(esw->dev)); +} + +static inline bool +mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num) +{ + return mlx5_sf_supported(esw->dev) && + vport_num >= mlx5_sf_start_function_id(esw->dev) && + (vport_num < (mlx5_sf_start_function_id(esw->dev) + + mlx5_sf_max_functions(esw->dev))); +} + + if (mlx5_esw_is_sf_vport(esw, vport_num)) + return mlx5_esw_sf_vport_num_to_index(esw, vport_num); + + /* pf and vf vports start from 0 to max_vfs */ + /* sf vports indices are after vfs and before ecpf */ + if (mlx5_sf_supported(esw->dev) && + index > mlx5_core_max_vfs(esw->dev)) + return mlx5_esw_sf_vport_index_to_num(esw, index); + + /* pf and vf vports start from 0 to max_vfs */ +#define mlx5_esw_for_each_sf_rep(esw, i, rep) \ + for ((i) = mlx5_esw_sf_start_idx(esw); \ + (rep) = &(esw)->offloads.vport_reps[(i)], \ + (i) < mlx5_esw_sf_end_idx(esw); (i++)) + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int i; + + mlx5_esw_for_each_sf_rep(esw, i, rep) + __esw_offloads_unload_rep(esw, rep, rep_type); +} + + __unload_reps_sf_vport(esw, rep_type); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c +#include "sf/sf.h" - return mlx5_special_vports(dev) + mlx5_core_max_vfs(dev); + return mlx5_special_vports(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
Networking
d7f33a457beef8d522f346d18ab0a1e3366dc20f
vu pham
drivers
net
acl, core, esw, ethernet, mellanox, mlx5
net/mlx5: e-switch, add eswitch helpers for sf vport
add helpers to enable/disable eswitch port, register its devlink port and load its representor.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add mlx5 subfunction support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
4
97
8
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c + +int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, + u16 vport_num, u32 sfnum) +{ + struct mlx5_core_dev *dev = esw->dev; + struct netdev_phys_item_id ppid = {}; + unsigned int dl_port_index; + struct mlx5_vport *vport; + struct devlink *devlink; + u16 pfnum; + int err; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (is_err(vport)) + return ptr_err(vport); + + pfnum = pci_func(dev->pdev->devfn); + mlx5_esw_get_port_parent_id(dev, &ppid); + memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); + dl_port->attrs.switch_id.id_len = ppid.id_len; + devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum); + devlink = priv_to_devlink(dev); + dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); + err = devlink_port_register(devlink, dl_port, dl_port_index); + if (err) + return err; + + vport->dl_port = dl_port; + return 0; +} + +void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num) +{ + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (is_err(vport)) + return; + devlink_port_unregister(vport->dl_port); + vport->dl_port = null; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c -static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num, - enum mlx5_eswitch_vport_event enabled_events) +int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, + enum mlx5_eswitch_vport_event enabled_events) -static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) - err = esw_enable_vport(esw, vport_num, enabled_events); + err = mlx5_esw_vport_enable(esw, vport_num, enabled_events); - esw_disable_vport(esw, vport_num); + mlx5_esw_vport_disable(esw, vport_num); - esw_disable_vport(esw, vport_num); + mlx5_esw_vport_disable(esw, vport_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, + enum mlx5_eswitch_vport_event enabled_events); +void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); + +int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num); +void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num); + + +int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, + u16 vport_num, u32 sfnum); +void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); + +int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, + u16 vport_num, u32 sfnum); +void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c -static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) +int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num) -static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) +void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num) + +int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, + u16 vport_num, u32 sfnum) +{ + int err; + + err = mlx5_esw_vport_enable(esw, vport_num, mlx5_vport_uc_addr_change); + if (err) + return err; + + err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum); + if (err) + goto devlink_err; + + err = mlx5_esw_offloads_rep_load(esw, vport_num); + if (err) + goto rep_err; + return 0; + +rep_err: + mlx5_esw_devlink_sf_port_unregister(esw, vport_num); +devlink_err: + mlx5_esw_vport_disable(esw, vport_num); + return err; +} + +void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num) +{ + mlx5_esw_offloads_rep_unload(esw, vport_num); + mlx5_esw_devlink_sf_port_unregister(esw, vport_num); + mlx5_esw_vport_disable(esw, vport_num); +}
Networking
d970812b91d0fa685cde35e9b3f46a48d049f4e3
parav pandit roi dayan roid nvidia com
drivers
net
core, esw, ethernet, mellanox, mlx5
net/mlx5: sf, add port add delete functionality
to handle sf port management outside of the eswitch as independent software layer, introduce eswitch notifier apis so that mlx5 upper layer who wish to support sf port management in switchdev mode can perform its task whenever eswitch mode is set to switchdev or before eswitch is disabled.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add mlx5 subfunction support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'kconfig', 'c', 'makefile']
13
607
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/kconfig b/drivers/net/ethernet/mellanox/mlx5/core/kconfig --- a/drivers/net/ethernet/mellanox/mlx5/core/kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/kconfig + +config mlx5_sf_manager + bool + depends on mlx5_sf && mlx5_eswitch + default y + help + build support for subfuction port in the nic. a mellanox subfunction + port is managed through devlink. a subfunction supports rdma, netdevice + and vdpa device. it is similar to a sriov vf but it doesn't require + sriov support. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/makefile b/drivers/net/ethernet/mellanox/mlx5/core/makefile --- a/drivers/net/ethernet/mellanox/mlx5/core/makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/makefile + +# +# sf manager +# +mlx5_core-$(config_mlx5_sf_manager) += sf/cmd.o sf/hw_table.o sf/devlink.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c + case mlx5_cmd_op_dealloc_sf: + case mlx5_cmd_op_alloc_sf: + mlx5_command_str_case(alloc_sf); + mlx5_command_str_case(dealloc_sf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +#include "sf/sf.h" +#endif +#ifdef config_mlx5_sf_manager + .port_new = mlx5_devlink_sf_port_new, + .port_del = mlx5_devlink_sf_port_del, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode) +{ + struct mlx5_esw_event_info info = {}; + + info.new_mode = mode; + + blocking_notifier_call_chain(&esw->n_head, 0, &info); +} + + mlx5_esw_mode_change_notify(esw, mode); + + /* notify eswitch users that it is exiting from current mode. + * so that it can do necessary cleanup before the eswitch is disabled. + */ + mlx5_esw_mode_change_notify(esw, mlx5_eswitch_none); + + blocking_init_notifier_head(&esw->n_head); +int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&esw->n_head, nb); +} +void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&esw->n_head, nb); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h + struct blocking_notifier_head n_head; +/** + * mlx5_esw_event_info - indicates eswitch mode changed/changing. + * + * @new_mode: new mode of eswitch. + */ +struct mlx5_esw_event_info { + u16 new_mode; +}; + +int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); +void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c + err = mlx5_sf_hw_table_init(dev); + if (err) { + mlx5_core_err(dev, "failed to init sf hw table %d ", err); + goto err_sf_hw_table_cleanup; + } + + err = mlx5_sf_table_init(dev); + if (err) { + mlx5_core_err(dev, "failed to init sf table %d ", err); + goto err_sf_table_cleanup; + } + +err_sf_table_cleanup: + mlx5_sf_hw_table_cleanup(dev); +err_sf_hw_table_cleanup: + mlx5_vhca_event_cleanup(dev); + mlx5_sf_table_cleanup(dev); + mlx5_sf_hw_table_cleanup(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c +// spdx-license-identifier: gpl-2.0 or linux-openib +/* copyright (c) 2020 mellanox technologies ltd */ + +#include <linux/mlx5/driver.h> +#include "priv.h" + +int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id) +{ + u32 out[mlx5_st_sz_dw(alloc_sf_out)] = {}; + u32 in[mlx5_st_sz_dw(alloc_sf_in)] = {}; + + mlx5_set(alloc_sf_in, in, opcode, mlx5_cmd_op_alloc_sf); + mlx5_set(alloc_sf_in, in, function_id, function_id); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id) +{ + u32 out[mlx5_st_sz_dw(dealloc_sf_out)] = {}; + u32 in[mlx5_st_sz_dw(dealloc_sf_in)] = {}; + + mlx5_set(dealloc_sf_in, in, opcode, mlx5_cmd_op_dealloc_sf); + mlx5_set(dealloc_sf_in, in, function_id, function_id); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +// spdx-license-identifier: gpl-2.0 or linux-openib +/* copyright (c) 2020 mellanox technologies ltd */ + +#include <linux/mlx5/driver.h> +#include "eswitch.h" +#include "priv.h" + +struct mlx5_sf { + struct devlink_port dl_port; + unsigned int port_index; + u16 id; +}; + +struct mlx5_sf_table { + struct mlx5_core_dev *dev; /* to refer from notifier context. */ + struct xarray port_indices; /* port index based lookup. */ + refcount_t refcount; + struct completion disable_complete; + struct notifier_block esw_nb; +}; + +static struct mlx5_sf * +mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index) +{ + return xa_load(&table->port_indices, port_index); +} + +static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + return xa_insert(&table->port_indices, sf->port_index, sf, gfp_kernel); +} + +static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + xa_erase(&table->port_indices, sf->port_index); +} + +static struct mlx5_sf * +mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack) +{ + unsigned int dl_port_index; + struct mlx5_sf *sf; + u16 hw_fn_id; + int id_err; + int err; + + id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum); + if (id_err < 0) { + err = id_err; + goto id_err; + } + + sf = kzalloc(sizeof(*sf), gfp_kernel); + if (!sf) { + err = -enomem; + goto alloc_err; + } + sf->id = id_err; + hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id); + dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id); + sf->port_index = dl_port_index; + + err = mlx5_sf_id_insert(table, sf); + if (err) + goto insert_err; + + return sf; + +insert_err: + kfree(sf); +alloc_err: + mlx5_sf_hw_table_sf_free(table->dev, id_err); +id_err: + if (err == -eexist) + nl_set_err_msg_mod(extack, "sf already exist. choose different sfnum"); + return err_ptr(err); +} + +static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + mlx5_sf_id_erase(table, sf); + mlx5_sf_hw_table_sf_free(table->dev, sf->id); + kfree(sf); +} + +static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_table *table = dev->priv.sf_table; + + if (!table) + return null; + + return refcount_inc_not_zero(&table->refcount) ? table : null; +} + +static void mlx5_sf_table_put(struct mlx5_sf_table *table) +{ + if (refcount_dec_and_test(&table->refcount)) + complete(&table->disable_complete); +} + +static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + unsigned int *new_port_index) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_sf *sf; + u16 hw_fn_id; + int err; + + sf = mlx5_sf_alloc(table, new_attr->sfnum, extack); + if (is_err(sf)) + return ptr_err(sf); + + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id); + err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum); + if (err) + goto esw_err; + *new_port_index = sf->port_index; + return 0; + +esw_err: + mlx5_sf_free(table, sf); + return err; +} + +static void mlx5_sf_del(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + u16 hw_fn_id; + + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id); + mlx5_esw_offloads_sf_vport_disable(esw, hw_fn_id); + mlx5_sf_free(table, sf); +} + +static int +mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack) +{ + if (new_attr->flavour != devlink_port_flavour_pci_sf) { + nl_set_err_msg_mod(extack, "driver supports only sf port addition"); + return -eopnotsupp; + } + if (new_attr->port_index_valid) { + nl_set_err_msg_mod(extack, + "driver does not support user defined port index assignment"); + return -eopnotsupp; + } + if (!new_attr->sfnum_valid) { + nl_set_err_msg_mod(extack, + "user must provide unique sfnum. driver does not support auto assignment"); + return -eopnotsupp; + } + if (new_attr->controller_valid && new_attr->controller) { + nl_set_err_msg_mod(extack, "external controller is unsupported"); + return -eopnotsupp; + } + if (new_attr->pfnum != pci_func(dev->pdev->devfn)) { + nl_set_err_msg_mod(extack, "invalid pfnum supplied"); + return -eopnotsupp; + } + return 0; +} + +int mlx5_devlink_sf_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + unsigned int *new_port_index) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_sf_table *table; + int err; + + err = mlx5_sf_new_check_attr(dev, new_attr, extack); + if (err) + return err; + + table = mlx5_sf_table_try_get(dev); + if (!table) { + nl_set_err_msg_mod(extack, + "port add is only supported in eswitch switchdev mode or sf ports are disabled."); + return -eopnotsupp; + } + err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index); + mlx5_sf_table_put(table); + return err; +} + +int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_sf_table *table; + struct mlx5_sf *sf; + int err = 0; + + table = mlx5_sf_table_try_get(dev); + if (!table) { + nl_set_err_msg_mod(extack, + "port del is only supported in eswitch switchdev mode or sf ports are disabled."); + return -eopnotsupp; + } + sf = mlx5_sf_lookup_by_index(table, port_index); + if (!sf) { + err = -enodev; + goto sf_err; + } + + mlx5_sf_del(dev, table, sf); +sf_err: + mlx5_sf_table_put(table); + return err; +} + +static void mlx5_sf_destroy_all(struct mlx5_sf_table *table) +{ + struct mlx5_core_dev *dev = table->dev; + unsigned long index; + struct mlx5_sf *sf; + + xa_for_each(&table->port_indices, index, sf) + mlx5_sf_del(dev, table, sf); +} + +static void mlx5_sf_table_enable(struct mlx5_sf_table *table) +{ + if (!mlx5_sf_max_functions(table->dev)) + return; + + init_completion(&table->disable_complete); + refcount_set(&table->refcount, 1); +} + +static void mlx5_sf_table_disable(struct mlx5_sf_table *table) +{ + if (!mlx5_sf_max_functions(table->dev)) + return; + + if (!refcount_read(&table->refcount)) + return; + + /* balances with refcount_set; drop the reference so that new user cmd cannot start. */ + mlx5_sf_table_put(table); + wait_for_completion(&table->disable_complete); + + /* at this point, no new user commands can start. + * it is safe to destroy all user created sfs. + */ + mlx5_sf_destroy_all(table); +} + +static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb); + const struct mlx5_esw_event_info *mode = data; + + switch (mode->new_mode) { + case mlx5_eswitch_offloads: + mlx5_sf_table_enable(table); + break; + case mlx5_eswitch_none: + mlx5_sf_table_disable(table); + break; + default: + break; + }; + + return 0; +} + +static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev) +{ + return dev->priv.eswitch && mlx5_eswitch_manager(dev) && mlx5_sf_supported(dev); +} + +int mlx5_sf_table_init(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_table *table; + int err; + + if (!mlx5_sf_table_supported(dev)) + return 0; + + table = kzalloc(sizeof(*table), gfp_kernel); + if (!table) + return -enomem; + + table->dev = dev; + xa_init(&table->port_indices); + dev->priv.sf_table = table; + table->esw_nb.notifier_call = mlx5_sf_esw_event; + err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb); + if (err) + goto reg_err; + return 0; + +reg_err: + kfree(table); + dev->priv.sf_table = null; + return err; +} + +void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_table *table = dev->priv.sf_table; + + if (!table) + return; + + mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); + warn_on(refcount_read(&table->refcount)); + warn_on(!xa_empty(&table->port_indices)); + kfree(table); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +// spdx-license-identifier: gpl-2.0 or linux-openib +/* copyright (c) 2020 mellanox technologies ltd */ +#include <linux/mlx5/driver.h> +#include "vhca_event.h" +#include "priv.h" +#include "sf.h" +#include "ecpf.h" + +struct mlx5_sf_hw { + u32 usr_sfnum; + u8 allocated: 1; +}; + +struct mlx5_sf_hw_table { + struct mlx5_core_dev *dev; + struct mlx5_sf_hw *sfs; + int max_local_functions; + u8 ecpu: 1; +}; + +u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id) +{ + return sw_id + mlx5_sf_start_function_id(dev); +} + +int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + int sw_id = -enospc; + u16 hw_fn_id; + int err; + int i; + + if (!table->max_local_functions) + return -eopnotsupp; + + /* check if sf with same sfnum already exists or not. */ + for (i = 0; i < table->max_local_functions; i++) { + if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) + return -eexist; + } + + /* find the free entry and allocate the entry from the array */ + for (i = 0; i < table->max_local_functions; i++) { + if (!table->sfs[i].allocated) { + table->sfs[i].usr_sfnum = usr_sfnum; + table->sfs[i].allocated = true; + sw_id = i; + break; + } + } + if (sw_id == -enospc) { + err = -enospc; + goto err; + } + + hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id); + err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id); + if (err) + goto err; + + err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum); + if (err) + goto vhca_err; + + return sw_id; + +vhca_err: + mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); +err: + table->sfs[i].allocated = false; + return err; +} + +void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + u16 hw_fn_id; + + hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id); + mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); + table->sfs[id].allocated = false; +} + +int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_hw_table *table; + struct mlx5_sf_hw *sfs; + int max_functions; + + if (!mlx5_sf_supported(dev)) + return 0; + + max_functions = mlx5_sf_max_functions(dev); + table = kzalloc(sizeof(*table), gfp_kernel); + if (!table) + return -enomem; + + sfs = kcalloc(max_functions, sizeof(*sfs), gfp_kernel); + if (!sfs) + goto table_err; + + table->dev = dev; + table->sfs = sfs; + table->max_local_functions = max_functions; + table->ecpu = mlx5_read_embedded_cpu(dev); + dev->priv.sf_hw_table = table; + mlx5_core_dbg(dev, "sf hw table: max sfs = %d ", max_functions); + return 0; + +table_err: + kfree(table); + return -enomem; +} + +void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + + if (!table) + return; + + kfree(table->sfs); + kfree(table); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h +/* spdx-license-identifier: gpl-2.0 or linux-openib */ +/* copyright (c) 2020 mellanox technologies ltd */ + +#ifndef __mlx5_sf_priv_h__ +#define __mlx5_sf_priv_h__ + +#include <linux/mlx5/driver.h> + +int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id); +int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id); + +u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id); + +int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum); +void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +#ifdef config_mlx5_sf_manager + +int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev); +void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev); + +int mlx5_sf_table_init(struct mlx5_core_dev *dev); +void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); + +int mlx5_devlink_sf_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *add_attr, + struct netlink_ext_ack *extack, + unsigned int *new_port_index); +int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack); + +#else + +static inline int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev) +{ + return 0; +} + +static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev) +{ +} + +static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev) +{ + return 0; +} + +static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev) +{ +} + +#endif + diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h +struct mlx5_sf_hw_table; +struct mlx5_sf_table; +#ifdef config_mlx5_sf_manager + struct mlx5_sf_hw_table *sf_hw_table; + struct mlx5_sf_table *sf_table; +#endif
Networking
8f01054186683fe0986d54d584aa13723d51edce
parav pandit
include
linux
core, ethernet, mellanox, mlx5, sf
net/mlx5: sf, port function state change support
support changing the state of the sf port's function through devlink. when activating the sf port's function, enable the hca in the device followed by adding its auxiliary device. when deactivating the sf port's function, delete its auxiliary device followed by disabling the vhca.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add mlx5 subfunction support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
7
431
27
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c + .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, + .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c +#include "sf/sf.h" + err = mlx5_sf_hw_table_create(dev); + if (err) { + mlx5_core_err(dev, "sf table create failed %d ", err); + goto err_vhca; + } + + mlx5_sf_hw_table_destroy(dev); +err_vhca: + mlx5_sf_hw_table_destroy(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c + +int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id) +{ + u32 out[mlx5_st_sz_dw(enable_hca_out)] = {}; + u32 in[mlx5_st_sz_dw(enable_hca_in)] = {}; + + mlx5_set(enable_hca_in, in, opcode, mlx5_cmd_op_enable_hca); + mlx5_set(enable_hca_in, in, function_id, func_id); + mlx5_set(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +} + +int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id) +{ + u32 out[mlx5_st_sz_dw(disable_hca_out)] = {}; + u32 in[mlx5_st_sz_dw(disable_hca_in)] = {}; + + mlx5_set(disable_hca_in, in, opcode, mlx5_cmd_op_disable_hca); + mlx5_set(disable_hca_in, in, function_id, func_id); + mlx5_set(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +#include "sf/dev/dev.h" +#include "mlx5_ifc_vhca_event.h" +#include "vhca_event.h" +#include "ecpf.h" + u16 hw_fn_id; + u16 hw_state; + struct mutex sf_state_lock; /* serializes sf state among user cmds & vhca event handler. */ + struct notifier_block vhca_nb; + u8 ecpu: 1; +static struct mlx5_sf * +mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id) +{ + unsigned long index; + struct mlx5_sf *sf; + + xa_for_each(&table->port_indices, index, sf) { + if (sf->hw_fn_id == fn_id) + return sf; + } + return null; +} + + sf->hw_fn_id = hw_fn_id; + sf->hw_state = mlx5_vhca_state_allocated; +static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state) +{ + switch (hw_state) { + case mlx5_vhca_state_active: + case mlx5_vhca_state_in_use: + case mlx5_vhca_state_teardown_request: + return devlink_port_fn_state_active; + case mlx5_vhca_state_invalid: + case mlx5_vhca_state_allocated: + default: + return devlink_port_fn_state_inactive; + } +} + +static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state) +{ + switch (hw_state) { + case mlx5_vhca_state_in_use: + case mlx5_vhca_state_teardown_request: + return devlink_port_fn_opstate_attached; + case mlx5_vhca_state_invalid: + case mlx5_vhca_state_allocated: + case mlx5_vhca_state_active: + default: + return devlink_port_fn_opstate_detached; + } +} + +static bool mlx5_sf_is_active(const struct mlx5_sf *sf) +{ + return sf->hw_state == mlx5_vhca_state_active || sf->hw_state == mlx5_vhca_state_in_use; +} + +int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_sf_table *table; + struct mlx5_sf *sf; + int err = 0; + + table = mlx5_sf_table_try_get(dev); + if (!table) + return -eopnotsupp; + + sf = mlx5_sf_lookup_by_index(table, dl_port->index); + if (!sf) { + err = -eopnotsupp; + goto sf_err; + } + mutex_lock(&table->sf_state_lock); + *state = mlx5_sf_to_devlink_state(sf->hw_state); + *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state); + mutex_unlock(&table->sf_state_lock); +sf_err: + mlx5_sf_table_put(table); + return err; +} + +static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf) +{ + int err; + + if (mlx5_sf_is_active(sf)) + return 0; + if (sf->hw_state != mlx5_vhca_state_allocated) + return -einval; + + err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id); + if (err) + return err; + + sf->hw_state = mlx5_vhca_state_active; + return 0; +} + +static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf) +{ + int err; + + if (!mlx5_sf_is_active(sf)) + return 0; + + err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id); + if (err) + return err; + + sf->hw_state = mlx5_vhca_state_teardown_request; + return 0; +} + +static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, + struct mlx5_sf *sf, + enum devlink_port_fn_state state) +{ + int err = 0; + + mutex_lock(&table->sf_state_lock); + if (state == mlx5_sf_to_devlink_state(sf->hw_state)) + goto out; + if (state == devlink_port_fn_state_active) + err = mlx5_sf_activate(dev, sf); + else if (state == devlink_port_fn_state_inactive) + err = mlx5_sf_deactivate(dev, sf); + else + err = -einval; +out: + mutex_unlock(&table->sf_state_lock); + return err; +} + +int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_sf_table *table; + struct mlx5_sf *sf; + int err; + + table = mlx5_sf_table_try_get(dev); + if (!table) { + nl_set_err_msg_mod(extack, + "port state set is only supported in eswitch switchdev mode or sf ports are disabled."); + return -eopnotsupp; + } + sf = mlx5_sf_lookup_by_index(table, dl_port->index); + if (!sf) { + err = -enodev; + goto out; + } + + err = mlx5_sf_state_set(dev, table, sf, state); +out: + mlx5_sf_table_put(table); + return err; +} + -static void mlx5_sf_del(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, struct mlx5_sf *sf) -{ - struct mlx5_eswitch *esw = dev->priv.eswitch; - u16 hw_fn_id; - - hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id); - mlx5_esw_offloads_sf_vport_disable(esw, hw_fn_id); - mlx5_sf_free(table, sf); -} - +static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) +{ + if (sf->hw_state == mlx5_vhca_state_allocated) { + mlx5_sf_free(table, sf); + } else if (mlx5_sf_is_active(sf)) { + /* even if its active, it is treated as in_use because by the time, + * it is disabled here, it may getting used. so it is safe to + * always look for the event to ensure that it is recycled only after + * firmware gives confirmation that it is detached by the driver. + */ + mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id); + mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); + kfree(sf); + } else { + mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id); + kfree(sf); + } +} + + struct mlx5_eswitch *esw = dev->priv.eswitch; - mlx5_sf_del(dev, table, sf); + mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id); + mlx5_sf_id_erase(table, sf); + + mutex_lock(&table->sf_state_lock); + mlx5_sf_dealloc(table, sf); + mutex_unlock(&table->sf_state_lock); -static void mlx5_sf_destroy_all(struct mlx5_sf_table *table) +static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state) - struct mlx5_core_dev *dev = table->dev; - unsigned long index; + if (sf->hw_state == mlx5_vhca_state_active && new_state == mlx5_vhca_state_in_use) + return true; + + if (sf->hw_state == mlx5_vhca_state_in_use && new_state == mlx5_vhca_state_active) + return true; + + if (sf->hw_state == mlx5_vhca_state_teardown_request && + new_state == mlx5_vhca_state_allocated) + return true; + + return false; +} + +static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data) +{ + struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb); + const struct mlx5_vhca_state_event *event = data; + bool update = false; - xa_for_each(&table->port_indices, index, sf) - mlx5_sf_del(dev, table, sf); + table = mlx5_sf_table_try_get(table->dev); + if (!table) + return 0; + + mutex_lock(&table->sf_state_lock); + sf = mlx5_sf_lookup_by_function_id(table, event->function_id); + if (!sf) + goto sf_err; + + /* when driver is attached or detached to a function, an event + * notifies such state change. + */ + update = mlx5_sf_state_update_check(sf, event->new_vhca_state); + if (update) + sf->hw_state = event->new_vhca_state; +sf_err: + mutex_unlock(&table->sf_state_lock); + mlx5_sf_table_put(table); + return 0; +static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table) +{ + struct mlx5_eswitch *esw = table->dev->priv.eswitch; + unsigned long index; + struct mlx5_sf *sf; + + /* at this point, no new user commands can start and no vhca event can + * arrive. it is safe to destroy all user created sfs. + */ + xa_for_each(&table->port_indices, index, sf) { + mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id); + mlx5_sf_id_erase(table, sf); + mlx5_sf_dealloc(table, sf); + } +} + - /* balances with refcount_set; drop the reference so that new user cmd cannot start. */ + /* balances with refcount_set; drop the reference so that new user cmd cannot start + * and new vhca event handler cannnot run. + */ - /* at this point, no new user commands can start. - * it is safe to destroy all user created sfs. - */ - mlx5_sf_destroy_all(table); + mlx5_sf_deactivate_all(table); - if (!mlx5_sf_table_supported(dev)) + if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev)) + mutex_init(&table->sf_state_lock); + refcount_set(&table->refcount, 0); + + table->vhca_nb.notifier_call = mlx5_sf_vhca_event; + err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb); + if (err) + goto vhca_err; + +vhca_err: + mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb); + mutex_destroy(&table->sf_state_lock); + mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); + mutex_destroy(&table->sf_state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c +#include "mlx5_ifc_vhca_event.h" +#include "vhca_event.h" + u8 pending_delete: 1; + struct mutex table_lock; /* serializes sf deletion and vhca state change handler. */ + struct notifier_block vhca_nb; +static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id) +{ + return hw_id - mlx5_sf_start_function_id(dev); +} + + mutex_lock(&table->table_lock); - if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) - return -eexist; + if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) { + err = -eexist; + goto exist_err; + } + mutex_unlock(&table->table_lock); +exist_err: + mutex_unlock(&table->table_lock); -void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id) +static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id) + table->sfs[id].pending_delete = false; +} + +void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + + mutex_lock(&table->table_lock); + _mlx5_sf_hw_id_free(dev, id); + mutex_unlock(&table->table_lock); +} + +void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + u32 out[mlx5_st_sz_dw(query_vhca_state_out)] = {}; + u16 hw_fn_id; + u8 state; + int err; + + hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id); + mutex_lock(&table->table_lock); + err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out)); + if (err) + goto err; + state = mlx5_get(query_vhca_state_out, out, vhca_state_context.vhca_state); + if (state == mlx5_vhca_state_allocated) { + mlx5_cmd_dealloc_sf(table->dev, hw_fn_id); + table->sfs[id].allocated = false; + } else { + table->sfs[id].pending_delete = true; + } +err: + mutex_unlock(&table->table_lock); +} + +static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table) +{ + int i; + + for (i = 0; i < table->max_local_functions; i++) { + if (table->sfs[i].allocated) + _mlx5_sf_hw_id_free(table->dev, i); + } - if (!mlx5_sf_supported(dev)) + if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev)) + mutex_init(&table->table_lock); + mutex_destroy(&table->table_lock); + +static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data) +{ + struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb); + const struct mlx5_vhca_state_event *event = data; + struct mlx5_sf_hw *sf_hw; + u16 sw_id; + + if (event->new_vhca_state != mlx5_vhca_state_allocated) + return 0; + + sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id); + sf_hw = &table->sfs[sw_id]; + + mutex_lock(&table->table_lock); + /* sf driver notified through firmware that sf is finally detached. + * hence recycle the sf hardware id for reuse. + */ + if (sf_hw->allocated && sf_hw->pending_delete) + _mlx5_sf_hw_id_free(table->dev, sw_id); + mutex_unlock(&table->table_lock); + return 0; +} + +int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + + if (!table) + return 0; + + table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event; + return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb); +} + +void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) +{ + struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table; + + if (!table) + return; + + mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb); + /* dealloc sfs whose firmware event has been missed. */ + mlx5_sf_hw_dealloc_all(table); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h +int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id); +int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id); + +void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev); +void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev); + - +int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack); +int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack); +static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev) +{ + return 0; +} + +static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev) +{ +} +
Networking
6a3273217469790e6d0abc73893d0ebe6b69180d
parav pandit
drivers
net
core, ethernet, mellanox, mlx5, sf
devlink: add devlink port documentation
added documentation for devlink port and port function related commands.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add mlx5 subfunction support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['rst']
2
119
0
--- diff --git a/documentation/networking/devlink/devlink-port.rst b/documentation/networking/devlink/devlink-port.rst --- /dev/null +++ b/documentation/networking/devlink/devlink-port.rst +.. spdx-license-identifier: gpl-2.0 + +.. _devlink_port: + +============ +devlink port +============ + +''devlink-port'' is a port that exists on the device. it has a logically +separate ingress/egress point of the device. a devlink port can be any one +of many flavours. a devlink port flavour along with port attributes +describe what a port represents. + +a device driver that intends to publish a devlink port sets the +devlink port attributes and registers the devlink port. + +devlink port flavours are described below. + +.. list-table:: list of devlink port flavours + :widths: 33 90 + + * - flavour + - description + * - ''devlink_port_flavour_physical'' + - any kind of physical port. this can be an eswitch physical port or any + other physical port on the device. + * - ''devlink_port_flavour_dsa'' + - this indicates a dsa interconnect port. + * - ''devlink_port_flavour_cpu'' + - this indicates a cpu port applicable only to dsa. + * - ''devlink_port_flavour_pci_pf'' + - this indicates an eswitch port representing a port of pci + physical function (pf). + * - ''devlink_port_flavour_pci_vf'' + - this indicates an eswitch port representing a port of pci + virtual function (vf). + * - ''devlink_port_flavour_virtual'' + - this indicates a virtual port for the pci virtual function. + +devlink port can have a different type based on the link layer described below. + +.. list-table:: list of devlink port types + :widths: 23 90 + + * - type + - description + * - ''devlink_port_type_eth'' + - driver should set this port type when a link layer of the port is + ethernet. + * - ''devlink_port_type_ib'' + - driver should set this port type when a link layer of the port is + infiniband. + * - ''devlink_port_type_auto'' + - this type is indicated by the user when driver should detect the port + type automatically. + +pci controllers +--------------- +in most cases a pci device has only one controller. a controller consists of +potentially multiple physical and virtual functions. a function consists +of one or more ports. this port is represented by the devlink eswitch port. + +a pci device connected to multiple cpus or multiple pci root complexes or a +smartnic, however, may have multiple controllers. for a device with multiple +controllers, each controller is distinguished by a unique controller number. +an eswitch is on the pci device which supports ports of multiple controllers. + +an example view of a system with two controllers:: + + --------------------------------------------------------- + | | + | --------- --------- ------- ------- | + ----------- | | vf(s) | | sf(s) | |vf(s)| |sf(s)| | + | server | | ------- ----/---- ---/----- ------- ---/--- ---/--- | + | pci rc |=== | pf0 |______/________/ | pf1 |___/_______/ | + | connect | | ------- ------- | + ----------- | | controller_num=1 (no eswitch) | + ------|-------------------------------------------------- + (internal wire) + | + --------------------------------------------------------- + | devlink eswitch ports and reps | + | ----------------------------------------------------- | + | |ctrl-0 | ctrl-0 | ctrl-0 | ctrl-0 | ctrl-0 |ctrl-0 | | + | |pf0 | pf0vfn | pf0sfn | pf1 | pf1vfn |pf1sfn | | + | ----------------------------------------------------- | + | |ctrl-1 | ctrl-1 | ctrl-1 | ctrl-1 | ctrl-1 |ctrl-1 | | + | |pf0 | pf0vfn | pf0sfn | pf1 | pf1vfn |pf1sfn | | + | ----------------------------------------------------- | + | | + | | + ----------- | --------- --------- ------- ------- | + | smartnic| | | vf(s) | | sf(s) | |vf(s)| |sf(s)| | + | pci rc |==| ------- ----/---- ---/----- ------- ---/--- ---/--- | + | connect | | | pf0 |______/________/ | pf1 |___/_______/ | + ----------- | ------- ------- | + | | + | local controller_num=0 (eswitch) | + --------------------------------------------------------- + +in the above example, the external controller (identified by controller number = 1) +doesn't have the eswitch. local controller (identified by controller number = 0) +has the eswitch. the devlink instance on the local controller has eswitch +devlink ports for both the controllers. + +function configuration +====================== + +a user can configure the function attribute before enumerating the pci +function. usually it means, user should configure function attribute +before a bus specific device for the function is created. however, when +sriov is enabled, virtual function devices are created on the pci bus. +hence, function attribute should be configured before binding virtual +function device to the driver. + +a user may set the hardware address of the function using +'devlink port function set hw_addr' command. for ethernet port function +this means a mac address. diff --git a/documentation/networking/devlink/index.rst b/documentation/networking/devlink/index.rst --- a/documentation/networking/devlink/index.rst +++ b/documentation/networking/devlink/index.rst + devlink-port
Networking
c736111cf8d519d46ac62af36378aed472faaa12
parav pandit jiri pirko jiri nvidia com jacob keller jacob e keller intel com
documentation
networking
devlink
devlink: extend devlink port documentation for subfunctions
add devlink port documentation for subfunction management.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add mlx5 subfunction support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['rst']
2
86
3
--- diff --git a/documentation/driver-api/auxiliary_bus.rst b/documentation/driver-api/auxiliary_bus.rst --- a/documentation/driver-api/auxiliary_bus.rst +++ b/documentation/driver-api/auxiliary_bus.rst +.. _auxiliary_bus: + diff --git a/documentation/networking/devlink/devlink-port.rst b/documentation/networking/devlink/devlink-port.rst --- a/documentation/networking/devlink/devlink-port.rst +++ b/documentation/networking/devlink/devlink-port.rst - this indicates an eswitch port representing a port of pci + * - ''devlink_port_flavour_pci_sf'' + - this indicates an eswitch port representing a port of pci + subfunction (sf). - this indicates a virtual port for the pci virtual function. --------------- -potentially multiple physical and virtual functions. a function consists -of one or more ports. this port is represented by the devlink eswitch port. +potentially multiple physical, virtual functions and subfunctions. a function +consists of one or more ports. this port is represented by the devlink eswitch +port. -function device to the driver. +function device to the driver. for subfunctions, this means user should +configure port function attribute before activating the port function. + +subfunction +============ + +subfunction is a lightweight function that has a parent pci function on which +it is deployed. subfunction is created and deployed in unit of 1. unlike +sriov vfs, a subfunction doesn't require its own pci virtual function. +a subfunction communicates with the hardware through the parent pci function. + +to use a subfunction, 3 steps setup sequence is followed. +(1) create - create a subfunction; +(2) configure - configure subfunction attributes; +(3) deploy - deploy the subfunction; + +subfunction management is done using devlink port user interface. +user performs setup on the subfunction management device. + +(1) create +---------- +a subfunction is created using a devlink port interface. a user adds the +subfunction by adding a devlink port of subfunction flavour. the devlink +kernel code calls down to subfunction management driver (devlink ops) and asks +it to create a subfunction devlink port. driver then instantiates the +subfunction port and any associated objects such as health reporters and +representor netdevice. + +(2) configure +------------- +a subfunction devlink port is created but it is not active yet. that means the +entities are created on devlink side, the e-switch port representor is created, +but the subfunction device itself it not created. a user might use e-switch port +representor to do settings, putting it into bridge, adding tc rules, etc. a user +might as well configure the hardware address (such as mac address) of the +subfunction while subfunction is inactive. + +(3) deploy +---------- +once a subfunction is configured, user must activate it to use it. upon +activation, subfunction management driver asks the subfunction management +device to instantiate the subfunction device on particular pci function. +a subfunction device is created on the :ref:'documentation/driver-api/auxiliary_bus.rst <auxiliary_bus>'. +at this point a matching subfunction driver binds to the subfunction's auxiliary device. + +terms and definitions +===================== + +.. list-table:: terms and definitions + :widths: 22 90 + + * - term + - definitions + * - ''pci device'' + - a physical pci device having one or more pci bus consists of one or + more pci controllers. + * - ''pci controller'' + - a controller consists of potentially multiple physical functions, + virtual functions and subfunctions. + * - ''port function'' + - an object to manage the function of a port. + * - ''subfunction'' + - a lightweight function that has parent pci function on which it is + deployed. + * - ''subfunction device'' + - a bus device of the subfunction, usually on a auxiliary bus. + * - ''subfunction driver'' + - a device driver for the subfunction auxiliary device. + * - ''subfunction management device'' + - a pci physical function that supports subfunction management. + * - ''subfunction management driver'' + - a device driver for pci physical function that supports + subfunction management using devlink port interface. + * - ''subfunction host driver'' + - a device driver for pci physical function that hosts subfunction + devices. in most cases it is same as subfunction management driver. when + subfunction is used on external controller, subfunction management and + host drivers are different.
Networking
6474ce7ecd80c5071861ba96c864f03d84319e73
parav pandit
documentation
driver-api
devlink
net/mlx5: add devlink subfunction port documentation
add documentation for subfunction management using devlink port.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add mlx5 subfunction support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['rst']
1
210
0
--- diff --git a/documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst b/documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst --- a/documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst +++ b/documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst - 'enabling the driver and kconfig options'_ - 'devlink info'_ - 'devlink parameters'_ +- 'mlx5 subfunction'_ +- 'mlx5 port function'_ - 'devlink health reporters'_ - 'mlx5 tracepoints'_ +mlx5 subfunction +================ +mlx5 supports subfunction management using devlink port (see :ref:'documentation/networking/devlink/devlink-port.rst <devlink_port>') interface. + +a subfunction has its own function capabilities and its own resources. this +means a subfunction has its own dedicated queues (txq, rxq, cq, eq). these +queues are neither shared nor stolen from the parent pci function. + +when a subfunction is rdma capable, it has its own qp1, gid table and rdma +resources neither shared nor stolen from the parent pci function. + +a subfunction has a dedicated window in pci bar space that is not shared +with ther other subfunctions or the parent pci function. this ensures that all +devices (netdev, rdma, vdpa etc.) of the subfunction accesses only assigned +pci bar space. + +a subfunction supports eswitch representation through which it supports tc +offloads. the user configures eswitch to send/receive packets from/to +the subfunction port. + +subfunctions share pci level resources such as pci msi-x irqs with +other subfunctions and/or with its parent pci function. + +example mlx5 software, system and device view:: + + _______ + | admin | + | user |---------- + |_______| | + | | + ____|____ __|______ _________________ + | | | | | | + | devlink | | tc tool | | user | + | tool | |_________| | applications | + |_________| | |_________________| + | | | | + | | | | userspace + +---------|-------------|-------------------|----------|--------------------+ + | | +----------+ +----------+ kernel + | | | netdev | | rdma dev | + | | +----------+ +----------+ + (devlink port add/del | ^ ^ + port function set) | | | + | | +---------------| + _____|___ | | _______|_______ + | | | | | mlx5 class | + | devlink | +------------+ | | drivers | + | kernel | | rep netdev | | |(mlx5_core,ib) | + |_________| +------------+ | |_______________| + | | | ^ + (devlink ops) | | (probe/remove) + _________|________ | | ____|________ + | subfunction | | +---------------+ | subfunction | + | management driver|----- | subfunction |---| driver | + | (mlx5_core) | | auxiliary dev | | (mlx5_core) | + |__________________| +---------------+ |_____________| + | ^ + (sf add/del, vhca events) | + | (device add/del) + _____|____ ____|________ + | | | subfunction | + | pci nic |---- activate/deactive events---->| host driver | + |__________| | (mlx5_core) | + |_____________| + +subfunction is created using devlink port interface. + +- change device to switchdev mode:: + + $ devlink dev eswitch set pci/0000:06:00.0 mode switchdev + +- add a devlink port of subfunction flaovur:: + + $ devlink port add pci/0000:06:00.0 flavour pcisf pfnum 0 sfnum 88 + pci/0000:06:00.0/32768: type eth netdev eth6 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false + function: + hw_addr 00:00:00:00:00:00 state inactive opstate detached + +- show a devlink port of the subfunction:: + + $ devlink port show pci/0000:06:00.0/32768 + pci/0000:06:00.0/32768: type eth netdev enp6s0pf0sf88 flavour pcisf pfnum 0 sfnum 88 + function: + hw_addr 00:00:00:00:00:00 state inactive opstate detached + +- delete a devlink port of subfunction after use:: + + $ devlink port del pci/0000:06:00.0/32768 + +mlx5 function attributes +======================== +the mlx5 driver provides a mechanism to setup pci vf/sf function attributes in +a unified way for smartnic and non-smartnic. + +this is supported only when the eswitch mode is set to switchdev. port function +configuration of the pci vf/sf is supported through devlink eswitch port. + +port function attributes should be set before pci vf/sf is enumerated by the +driver. + +mac address setup +----------------- +mlx5 driver provides mechanism to setup the mac address of the pci vf/sf. + +the configured mac address of the pci vf/sf will be used by netdevice and rdma +device created for the pci vf/sf. + +- get the mac address of the vf identified by its unique devlink port index:: + + $ devlink port show pci/0000:06:00.0/2 + pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 + function: + hw_addr 00:00:00:00:00:00 + +- set the mac address of the vf identified by its unique devlink port index:: + + $ devlink port function set pci/0000:06:00.0/2 hw_addr 00:11:22:33:44:55 + + $ devlink port show pci/0000:06:00.0/2 + pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 + function: + hw_addr 00:11:22:33:44:55 + +- get the mac address of the sf identified by its unique devlink port index:: + + $ devlink port show pci/0000:06:00.0/32768 + pci/0000:06:00.0/32768: type eth netdev enp6s0pf0sf88 flavour pcisf pfnum 0 sfnum 88 + function: + hw_addr 00:00:00:00:00:00 + +- set the mac address of the vf identified by its unique devlink port index:: + + $ devlink port function set pci/0000:06:00.0/32768 hw_addr 00:00:00:00:88:88 + + $ devlink port show pci/0000:06:00.0/32768 + pci/0000:06:00.0/32768: type eth netdev enp6s0pf0sf88 flavour pcivf pfnum 0 sfnum 88 + function: + hw_addr 00:00:00:00:88:88 + +sf state setup +-------------- +to use the sf, the user must active the sf using the sf function state +attribute. + +- get the state of the sf identified by its unique devlink port index:: + + $ devlink port show ens2f0npf0sf88 + pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false + function: + hw_addr 00:00:00:00:88:88 state inactive opstate detached + +- activate the function and verify its state is active:: + + $ devlink port function set ens2f0npf0sf88 state active + + $ devlink port show ens2f0npf0sf88 + pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false + function: + hw_addr 00:00:00:00:88:88 state active opstate detached + +upon function activation, the pf driver instance gets the event from the device +that a particular sf was activated. it's the cue to put the device on bus, probe +it and instantiate the devlink instance and class specific auxiliary devices +for it. + +- show the auxiliary device and port of the subfunction:: + + $ devlink dev show + devlink dev show auxiliary/mlx5_core.sf.4 + + $ devlink port show auxiliary/mlx5_core.sf.4/1 + auxiliary/mlx5_core.sf.4/1: type eth netdev p0sf88 flavour virtual port 0 splittable false + + $ rdma link show mlx5_0/1 + link mlx5_0/1 state active physical_state link_up netdev p0sf88 + + $ rdma dev show + 8: rocep6s0f1: node_type ca fw 16.29.0550 node_guid 248a:0703:00b3:d113 sys_image_guid 248a:0703:00b3:d112 + 13: mlx5_0: node_type ca fw 16.29.0550 node_guid 0000:00ff:fe00:8888 sys_image_guid 248a:0703:00b3:d112 + +- subfunction auxiliary device and class device hierarchy:: + + mlx5_core.sf.4 + (subfunction auxiliary device) + /\ + / \ + / \ + / \ + / \ + mlx5_core.eth.4 mlx5_core.rdma.4 + (sf eth aux dev) (sf rdma aux dev) + | | + | | + p0sf88 mlx5_0 + (sf netdev) (sf rdma device) + +additionally, the sf port also gets the event when the driver attaches to the +auxiliary device of the subfunction. this results in changing the operational +state of the function. this provides visiblity to the user to decide when is it +safe to delete the sf port for graceful termination of the subfunction. + +- show the sf port operational state:: + + $ devlink port show ens2f0npf0sf88 + pci/0000:06:00.0/32768: type eth netdev ens2f0npf0sf88 flavour pcisf controller 0 pfnum 0 sfnum 88 external false splittable false + function: + hw_addr 00:00:00:00:88:88 state active opstate attached +
Networking
142d93d12dc187f6a32aae2048da0c8230636b86
parav pandit
documentation
networking
device_drivers, ethernet, mellanox
net/mlx5e: enable xdp for connect-x ipsec capable devices
this limitation was inherited by previous innova (fpga) ipsec implementation, it uses its private set of rq handlers which does not support xdp, for connect-x this is no longer true.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
enable xdp for connect-x ipsec capable devices
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
3
2
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c - if (mlx5_ipsec_dev(priv->mdev)) { - netdev_warn(netdev, "can't set xdp with ipsec offload "); + if (mlx5_fpga_is_ipsec_device(priv->mdev)) { + netdev_warn(netdev, + "xdp is not available on innova cards with ipsec support ");
Networking
e33f9f5f2d3a5fa97728a43708f41da2d4faae65
raed salem
drivers
net
core, ethernet, mellanox, mlx5
net: sched: add multi-queue support to sch_tree_lock
the existing qdiscs that set tcq_f_mqroot don't use sch_tree_lock. however, hardware-offloaded htb will start setting this flag while also using sch_tree_lock.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
htb offload
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h']
1
10
4
--- diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h -static inline void sch_tree_lock(const struct qdisc *q) +static inline void sch_tree_lock(struct qdisc *q) - spin_lock_bh(qdisc_root_sleeping_lock(q)); + if (q->flags & tcq_f_mqroot) + spin_lock_bh(qdisc_lock(q)); + else + spin_lock_bh(qdisc_root_sleeping_lock(q)); -static inline void sch_tree_unlock(const struct qdisc *q) +static inline void sch_tree_unlock(struct qdisc *q) - spin_unlock_bh(qdisc_root_sleeping_lock(q)); + if (q->flags & tcq_f_mqroot) + spin_unlock_bh(qdisc_lock(q)); + else + spin_unlock_bh(qdisc_root_sleeping_lock(q));
Networking
ca1e4ab199933e1af3f9a86d31060b7f9181c3fc
maxim mikityanskiy
include
net
net: sched: add extack to qdisc_class_ops.delete
in a following commit, sch_htb will start using extack in the delete class operation to pass hardware errors in offload mode. this commit prepares for that by adding the extack parameter to this callback and converting usage of the existing qdiscs.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
htb offload
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
10
22
12
--- diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h - int (*delete)(struct qdisc *, unsigned long); + int (*delete)(struct qdisc *, unsigned long, + struct netlink_ext_ack *); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c - struct qdisc *q, unsigned long cl) + struct qdisc *q, unsigned long cl, + struct netlink_ext_ack *extack) - err = cops->delete(q, cl); + err = cops->delete(q, cl, extack); - err = tclass_del_notify(net, cops, skb, n, q, cl); + err = tclass_del_notify(net, cops, skb, n, q, cl, extack); diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c -static int atm_tc_delete(struct qdisc *sch, unsigned long arg) +static int atm_tc_delete(struct qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c -static int cbq_delete(struct qdisc *sch, unsigned long arg) +static int cbq_delete(struct qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c -static int drr_delete_class(struct qdisc *sch, unsigned long arg) +static int drr_delete_class(struct qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c -static int dsmark_delete(struct qdisc *sch, unsigned long arg) +static int dsmark_delete(struct qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c -hfsc_delete_class(struct qdisc *sch, unsigned long arg) +hfsc_delete_class(struct qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c -static int htb_delete(struct qdisc *sch, unsigned long arg) +static int htb_delete(struct qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c -static int qfq_delete_class(struct qdisc *sch, unsigned long arg) +static int qfq_delete_class(struct qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c -static int sfb_delete(struct qdisc *sch, unsigned long cl) +static int sfb_delete(struct qdisc *sch, unsigned long cl, + struct netlink_ext_ack *extack)
Networking
4dd78a73738afa92d33a226ec477b42938b31c83
maxim mikityanskiy tariq toukan tariqt nvidia com
include
net
sch_htb: hierarchical qos hardware offload
htb doesn't scale well because of contention on a single lock, and it also consumes cpu. this patch adds support for offloading htb to hardware that supports hierarchical rate limiting.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
htb offload
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
512
28
--- diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h + tc_setup_qdisc_htb, diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h +enum tc_htb_command { + /* root */ + tc_htb_create, /* initialize htb offload. */ + tc_htb_destroy, /* destroy htb offload. */ + + /* classes */ + /* allocate qid and create leaf. */ + tc_htb_leaf_alloc_queue, + /* convert leaf to inner, preserve and return qid, create new leaf. */ + tc_htb_leaf_to_inner, + /* delete leaf, while siblings remain. */ + tc_htb_leaf_del, + /* delete leaf, convert parent to leaf, preserving qid. */ + tc_htb_leaf_del_last, + /* tc_htb_leaf_del_last, but delete driver data on hardware errors. */ + tc_htb_leaf_del_last_force, + /* modify parameters of a node. */ + tc_htb_node_modify, + + /* class qdisc */ + tc_htb_leaf_query_queue, /* query qid by classid. */ +}; + +struct tc_htb_qopt_offload { + struct netlink_ext_ack *extack; + enum tc_htb_command command; + u16 classid; + u32 parent_classid; + u16 qid; + u16 moved_qid; + u64 rate; + u64 ceil; +}; + +#define tc_htb_classid_root u32_max + diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h + tca_htb_offload, diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c + + struct qdisc **direct_qdiscs; + unsigned int num_direct_qdiscs; + + bool offload; - if (cl->leaf.q) + if (cl->leaf.q && !q->offload) + [tca_htb_offload] = { .type = nla_flag }, +static void htb_set_lockdep_class_child(struct qdisc *q) +{ + static struct lock_class_key child_key; + + lockdep_set_class(qdisc_lock(q), &child_key); +} + +static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt) +{ + return dev->netdev_ops->ndo_setup_tc(dev, tc_setup_qdisc_htb, opt); +} + + struct net_device *dev = qdisc_dev(sch); + struct tc_htb_qopt_offload offload_opt; + unsigned int ntx; + q->offload = nla_get_flag(tb[tca_htb_offload]); + + if (q->offload) { + if (sch->parent != tc_h_root) + return -eopnotsupp; + + if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) + return -eopnotsupp; + + q->num_direct_qdiscs = dev->real_num_tx_queues; + q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, + sizeof(*q->direct_qdiscs), + gfp_kernel); + if (!q->direct_qdiscs) + return -enomem; + } + - return err; + goto err_free_direct_qdiscs; + if (!q->offload) + return 0; + + for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); + struct qdisc *qdisc; + + qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, + tc_h_make(sch->handle, 0), extack); + if (!qdisc) { + err = -enomem; + goto err_free_qdiscs; + } + + htb_set_lockdep_class_child(qdisc); + q->direct_qdiscs[ntx] = qdisc; + qdisc->flags |= tcq_f_onetxqueue | tcq_f_noparent; + } + + sch->flags |= tcq_f_mqroot; + + offload_opt = (struct tc_htb_qopt_offload) { + .command = tc_htb_create, + .parent_classid = tc_h_maj(sch->handle) >> 16, + .classid = tc_h_min(q->defcls), + .extack = extack, + }; + err = htb_offload(dev, &offload_opt); + if (err) + goto err_free_qdiscs; + + +err_free_qdiscs: + /* tc_htb_create call failed, avoid any further calls to the driver. */ + q->offload = false; + + for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx]; + ntx++) + qdisc_put(q->direct_qdiscs[ntx]); + + qdisc_class_hash_destroy(&q->clhash); + /* prevent use-after-free and double-free when htb_destroy gets called. + */ + q->clhash.hash = null; + q->clhash.hashsize = 0; + +err_free_direct_qdiscs: + kfree(q->direct_qdiscs); + q->direct_qdiscs = null; + return err; +} + +static void htb_attach_offload(struct qdisc *sch) +{ + struct net_device *dev = qdisc_dev(sch); + struct htb_sched *q = qdisc_priv(sch); + unsigned int ntx; + + for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { + struct qdisc *old, *qdisc = q->direct_qdiscs[ntx]; + + old = dev_graft_qdisc(qdisc->dev_queue, qdisc); + qdisc_put(old); + qdisc_hash_add(qdisc, false); + } + for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) { + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); + struct qdisc *old = dev_graft_qdisc(dev_queue, null); + + qdisc_put(old); + } + + kfree(q->direct_qdiscs); + q->direct_qdiscs = null; +} + +static void htb_attach_software(struct qdisc *sch) +{ + struct net_device *dev = qdisc_dev(sch); + unsigned int ntx; + + /* resemble qdisc_graft behavior. */ + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx); + struct qdisc *old = dev_graft_qdisc(dev_queue, sch); + + qdisc_refcount_inc(sch); + + qdisc_put(old); + } +} + +static void htb_attach(struct qdisc *sch) +{ + struct htb_sched *q = qdisc_priv(sch); + + if (q->offload) + htb_attach_offload(sch); + else + htb_attach_software(sch); + if (q->offload) + sch->flags |= tcq_f_offloaded; + else + sch->flags &= ~tcq_f_offloaded; + + if (q->offload && nla_put_flag(skb, tca_htb_offload)) + goto nla_put_failure; +static struct netdev_queue * +htb_select_queue(struct qdisc *sch, struct tcmsg *tcm) +{ + struct net_device *dev = qdisc_dev(sch); + struct tc_htb_qopt_offload offload_opt; + int err; + + offload_opt = (struct tc_htb_qopt_offload) { + .command = tc_htb_leaf_query_queue, + .classid = tc_h_min(tcm->tcm_parent), + }; + err = htb_offload(dev, &offload_opt); + if (err || offload_opt.qid >= dev->num_tx_queues) + return null; + return netdev_get_tx_queue(dev, offload_opt.qid); +} + +static struct qdisc * +htb_graft_helper(struct netdev_queue *dev_queue, struct qdisc *new_q) +{ + struct net_device *dev = dev_queue->dev; + struct qdisc *old_q; + + if (dev->flags & iff_up) + dev_deactivate(dev); + old_q = dev_graft_qdisc(dev_queue, new_q); + if (new_q) + new_q->flags |= tcq_f_onetxqueue | tcq_f_noparent; + if (dev->flags & iff_up) + dev_activate(dev); + + return old_q; +} + +static void htb_offload_move_qdisc(struct qdisc *sch, u16 qid_old, u16 qid_new) +{ + struct netdev_queue *queue_old, *queue_new; + struct net_device *dev = qdisc_dev(sch); + struct qdisc *qdisc; + + queue_old = netdev_get_tx_queue(dev, qid_old); + queue_new = netdev_get_tx_queue(dev, qid_new); + + if (dev->flags & iff_up) + dev_deactivate(dev); + qdisc = dev_graft_qdisc(queue_old, null); + qdisc->dev_queue = queue_new; + qdisc = dev_graft_qdisc(queue_new, qdisc); + if (dev->flags & iff_up) + dev_activate(dev); + + warn_on(!(qdisc->flags & tcq_f_builtin)); +} + + struct netdev_queue *dev_queue = sch->dev_queue; + struct htb_sched *q = qdisc_priv(sch); + struct qdisc *old_q; - if (new == null && - (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, - cl->common.classid, extack)) == null) - return -enobufs; + + if (q->offload) { + dev_queue = new->dev_queue; + warn_on(dev_queue != cl->leaf.q->dev_queue); + } + + if (!new) { + new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, + cl->common.classid, extack); + if (!new) + return -enobufs; + } + + if (q->offload) { + htb_set_lockdep_class_child(new); + /* one ref for cl->leaf.q, the other for dev_queue->qdisc. */ + qdisc_refcount_inc(new); + old_q = htb_graft_helper(dev_queue, new); + } + + if (q->offload) { + warn_on(old_q != *old); + qdisc_put(old_q); + } + -static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, +static void htb_parent_to_leaf(struct qdisc *sch, struct htb_class *cl, + struct htb_sched *q = qdisc_priv(sch); +static void htb_parent_to_leaf_offload(struct qdisc *sch, + struct netdev_queue *dev_queue, + struct qdisc *new_q) +{ + struct qdisc *old_q; + + /* one ref for cl->leaf.q, the other for dev_queue->qdisc. */ + qdisc_refcount_inc(new_q); + old_q = htb_graft_helper(dev_queue, new_q); + warn_on(!(old_q->flags & tcq_f_builtin)); +} + +static int htb_destroy_class_offload(struct qdisc *sch, struct htb_class *cl, + bool last_child, bool destroying, + struct netlink_ext_ack *extack) +{ + struct tc_htb_qopt_offload offload_opt; + struct qdisc *q = cl->leaf.q; + struct qdisc *old = null; + int err; + + if (cl->level) + return -einval; + + warn_on(!q); + if (!destroying) { + /* on destroy of htb, two cases are possible: + * 1. q is a normal qdisc, but q->dev_queue has noop qdisc. + * 2. q is a noop qdisc (for nodes that were inner), + * q->dev_queue is noop_netdev_queue. + */ + old = htb_graft_helper(q->dev_queue, null); + warn_on(!old); + warn_on(old != q); + } + + offload_opt = (struct tc_htb_qopt_offload) { + .command = !last_child ? tc_htb_leaf_del : + destroying ? tc_htb_leaf_del_last_force : + tc_htb_leaf_del_last, + .classid = cl->common.classid, + .extack = extack, + }; + err = htb_offload(qdisc_dev(sch), &offload_opt); + + if (!err || destroying) + qdisc_put(old); + else + htb_graft_helper(q->dev_queue, old); + + if (last_child) + return err; + + if (!err && offload_opt.moved_qid != 0) { + if (destroying) + q->dev_queue = netdev_get_tx_queue(qdisc_dev(sch), + offload_opt.qid); + else + htb_offload_move_qdisc(sch, offload_opt.moved_qid, + offload_opt.qid); + } + + return err; +} + + struct net_device *dev = qdisc_dev(sch); + struct tc_htb_qopt_offload offload_opt; + bool nonempty, changed; - for (i = 0; i < q->clhash.hashsize; i++) { - hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], - common.hnode) - htb_destroy_class(sch, cl); - } + + do { + nonempty = false; + changed = false; + for (i = 0; i < q->clhash.hashsize; i++) { + hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], + common.hnode) { + bool last_child; + + if (!q->offload) { + htb_destroy_class(sch, cl); + continue; + } + + nonempty = true; + + if (cl->level) + continue; + + changed = true; + + last_child = htb_parent_last_child(cl); + htb_destroy_class_offload(sch, cl, last_child, + true, null); + qdisc_class_hash_remove(&q->clhash, + &cl->common); + if (cl->parent) + cl->parent->children--; + if (last_child) + htb_parent_to_leaf(sch, cl, null); + htb_destroy_class(sch, cl); + } + } + } while (changed); + warn_on(nonempty); + + + if (!q->offload) + return; + + offload_opt = (struct tc_htb_qopt_offload) { + .command = tc_htb_destroy, + }; + htb_offload(dev, &offload_opt); + + if (!q->direct_qdiscs) + return; + for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++) + qdisc_put(q->direct_qdiscs[i]); + kfree(q->direct_qdiscs); + int err; - if (!cl->level && htb_parent_last_child(cl)) { - new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + if (!cl->level && htb_parent_last_child(cl)) + last_child = 1; + + if (q->offload) { + err = htb_destroy_class_offload(sch, cl, last_child, false, + extack); + if (err) + return err; + } + + if (last_child) { + struct netdev_queue *dev_queue; + + dev_queue = q->offload ? cl->leaf.q->dev_queue : sch->dev_queue; + new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, - last_child = 1; + if (q->offload) { + if (new_q) + htb_set_lockdep_class_child(new_q); + htb_parent_to_leaf_offload(sch, dev_queue, new_q); + } - htb_parent_to_leaf(q, cl, new_q); + htb_parent_to_leaf(sch, cl, new_q); + struct tc_htb_qopt_offload offload_opt; + struct netdev_queue *dev_queue; + rate64 = tb[tca_htb_rate64] ? nla_get_u64(tb[tca_htb_rate64]) : 0; + ceil64 = tb[tca_htb_ceil64] ? nla_get_u64(tb[tca_htb_ceil64]) : 0; + - struct qdisc *new_q; + struct net_device *dev = qdisc_dev(sch); + struct qdisc *new_q, *old_q; - if (err) { - tcf_block_put(cl->block); - kfree(cl); - goto failure; - } + if (err) + goto err_block_put; + cl->common.classid = classid; + + /* make sure nothing interrupts us in between of two + * ndo_setup_tc calls. + */ + assert_rtnl(); + - new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + if (!q->offload) { + dev_queue = sch->dev_queue; + } else if (!(parent && !parent->level)) { + /* assign a dev_queue to this classid. */ + offload_opt = (struct tc_htb_qopt_offload) { + .command = tc_htb_leaf_alloc_queue, + .classid = cl->common.classid, + .parent_classid = parent ? + tc_h_min(parent->common.classid) : + tc_htb_classid_root, + .rate = max_t(u64, hopt->rate.rate, rate64), + .ceil = max_t(u64, hopt->ceil.rate, ceil64), + .extack = extack, + }; + err = htb_offload(dev, &offload_opt); + if (err) { + pr_err("htb: tc_htb_leaf_alloc_queue failed with err = %d ", + err); + goto err_kill_estimator; + } + dev_queue = netdev_get_tx_queue(dev, offload_opt.qid); + } else { /* first child. */ + dev_queue = parent->leaf.q->dev_queue; + old_q = htb_graft_helper(dev_queue, null); + warn_on(old_q != parent->leaf.q); + offload_opt = (struct tc_htb_qopt_offload) { + .command = tc_htb_leaf_to_inner, + .classid = cl->common.classid, + .parent_classid = + tc_h_min(parent->common.classid), + .rate = max_t(u64, hopt->rate.rate, rate64), + .ceil = max_t(u64, hopt->ceil.rate, ceil64), + .extack = extack, + }; + err = htb_offload(dev, &offload_opt); + if (err) { + pr_err("htb: tc_htb_leaf_to_inner failed with err = %d ", + err); + htb_graft_helper(dev_queue, old_q); + goto err_kill_estimator; + } + qdisc_put(old_q); + } + new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops, + if (q->offload) { + if (new_q) { + htb_set_lockdep_class_child(new_q); + /* one ref for cl->leaf.q, the other for + * dev_queue->qdisc. + */ + qdisc_refcount_inc(new_q); + } + old_q = htb_graft_helper(dev_queue, new_q); + /* no qdisc_put needed. */ + warn_on(!(old_q->flags & tcq_f_builtin)); + } + - cl->common.classid = classid; - sch_tree_lock(sch); - } - rate64 = tb[tca_htb_rate64] ? nla_get_u64(tb[tca_htb_rate64]) : 0; + if (q->offload) { + struct net_device *dev = qdisc_dev(sch); + + offload_opt = (struct tc_htb_qopt_offload) { + .command = tc_htb_node_modify, + .classid = cl->common.classid, + .rate = max_t(u64, hopt->rate.rate, rate64), + .ceil = max_t(u64, hopt->ceil.rate, ceil64), + .extack = extack, + }; + err = htb_offload(dev, &offload_opt); + if (err) + /* estimator was replaced, and rollback may fail + * as well, so we don't try to recover it, and + * the estimator won't work property with the + * offload anyway, because bstats are updated + * only when the stats are queried. + */ + return err; + } - ceil64 = tb[tca_htb_ceil64] ? nla_get_u64(tb[tca_htb_ceil64]) : 0; + sch_tree_lock(sch); + } +err_kill_estimator: + gen_kill_estimator(&cl->rate_est); +err_block_put: + tcf_block_put(cl->block); + kfree(cl); + .select_queue = htb_select_queue, + .attach = htb_attach, diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h --- a/tools/include/uapi/linux/pkt_sched.h +++ b/tools/include/uapi/linux/pkt_sched.h + tca_htb_offload,
Networking
d03b195b5aa015f6c11988b86a3625f8d5dbac52
maxim mikityanskiy
include
uapi
linux, uapi
sch_htb: stats for offloaded htb
this commit adds support for statistics of offloaded htb. bytes and packets counters for leaf and inner nodes are supported, the values are taken from per-queue qdiscs, and the numbers that the user sees should have the same behavior as the software (non-offloaded) htb.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
htb offload
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
53
0
--- diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c + struct gnet_stats_basic_packed bstats_bias; + struct htb_sched *q = qdisc_priv(sch); + if (q->offload && nla_put_flag(skb, tca_htb_offload)) + goto nla_put_failure; +static void htb_offload_aggregate_stats(struct htb_sched *q, + struct htb_class *cl) +{ + struct htb_class *c; + unsigned int i; + + memset(&cl->bstats, 0, sizeof(cl->bstats)); + + for (i = 0; i < q->clhash.hashsize; i++) { + hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { + struct htb_class *p = c; + + while (p && p->level < cl->level) + p = p->parent; + + if (p != cl) + continue; + + cl->bstats.bytes += c->bstats_bias.bytes; + cl->bstats.packets += c->bstats_bias.packets; + if (c->level == 0) { + cl->bstats.bytes += c->leaf.q->bstats.bytes; + cl->bstats.packets += c->leaf.q->bstats.packets; + } + } + } +} + + struct htb_sched *q = qdisc_priv(sch); + if (q->offload) { + if (!cl->level) { + if (cl->leaf.q) + cl->bstats = cl->leaf.q->bstats; + else + memset(&cl->bstats, 0, sizeof(cl->bstats)); + cl->bstats.bytes += cl->bstats_bias.bytes; + cl->bstats.packets += cl->bstats_bias.packets; + } else { + htb_offload_aggregate_stats(q, cl); + } + } + + if (cl->parent) { + cl->parent->bstats_bias.bytes += q->bstats.bytes; + cl->parent->bstats_bias.packets += q->bstats.packets; + } + + parent->bstats_bias.bytes += old_q->bstats.bytes; + parent->bstats_bias.packets += old_q->bstats.packets;
Networking
83271586249c8ecf8458834864c827f67ad57773
maxim mikityanskiy tariq toukan tariqt nvidia com
net
sched
net/mlx5e: support htb offload
this commit adds support for htb offload in the mlx5e driver.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
htb offload
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c', 'makefile']
15
1,516
49
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/makefile b/drivers/net/ethernet/mellanox/mlx5/core/makefile --- a/drivers/net/ethernet/mellanox/mlx5/core/makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/makefile - diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o fw_reset.o + diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \ + fw_reset.o qos.o - en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o + en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \ + en/qos.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +#include "en/qos.h" +#define mlx5e_state_dereference(priv, p) \ + rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) + + struct mlx5e_txqsq __rcu * __rcu *qos_sqs; + u16 qos_sqs_size; + bool qos_update; + u16 qos_queue_group_id; +struct mlx5e_htb { + declare_hashtable(qos_tc2node, order_base_2(mlx5e_qos_max_leaf_nodes)); + declare_bitmap(qos_used_qids, mlx5e_qos_max_leaf_nodes); + struct mlx5e_sq_stats **qos_sq_stats; + u16 max_qos_sqs; + u16 maj_id; + u16 defcls; +}; + - struct mlx5e_txqsq *txq2sq[(mlx5e_max_num_channels + 1) * mlx5e_max_num_tc]; + struct mlx5e_txqsq *txq2sq[(mlx5e_max_num_channels + 1) * mlx5e_max_num_tc + + mlx5e_qos_max_leaf_nodes]; + struct mlx5e_htb htb; +int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv); +int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, + struct mlx5e_params *params, struct mlx5e_sq_param *param, + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid); + u16 qos_queue_group_id, +void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, + struct mlx5e_sq_param *param); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c - err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn); + err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +// spdx-license-identifier: gpl-2.0 or linux-openib +/* copyright (c) 2020, mellanox technologies inc. all rights reserved. */ + +#include "en.h" +#include "params.h" +#include "../qos.h" + +#define bytes_in_mbit 125000 + +int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) +{ + return min(mlx5e_qos_max_leaf_nodes, mlx5_qos_max_leaf_nodes(mdev)); +} + +int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv) +{ + int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev)); + + return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1; +} + +/* software representation of the qos tree (internal to this file) */ + +static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv) +{ + int size = mlx5e_qos_max_leaf_nodes(priv->mdev); + int res; + + warn_once(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held ", __func__); + res = find_first_zero_bit(priv->htb.qos_used_qids, size); + + return res == size ? -enospc : res; +} + +struct mlx5e_qos_node { + struct hlist_node hnode; + struct rcu_head rcu; + struct mlx5e_qos_node *parent; + u64 rate; + u32 bw_share; + u32 max_average_bw; + u32 hw_id; + u32 classid; /* 16-bit, except root. */ + u16 qid; +}; + +#define mlx5e_qos_qid_inner 0xffff +#define mlx5e_htb_classid_root 0xffffffff + +static struct mlx5e_qos_node * +mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid, + struct mlx5e_qos_node *parent) +{ + struct mlx5e_qos_node *node; + + node = kzalloc(sizeof(*node), gfp_kernel); + if (!node) + return err_ptr(-enomem); + + node->parent = parent; + + node->qid = qid; + __set_bit(qid, priv->htb.qos_used_qids); + + node->classid = classid; + hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid); + + mlx5e_update_tx_netdev_queues(priv); + + return node; +} + +static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv) +{ + struct mlx5e_qos_node *node; + + node = kzalloc(sizeof(*node), gfp_kernel); + if (!node) + return err_ptr(-enomem); + + node->qid = mlx5e_qos_qid_inner; + node->classid = mlx5e_htb_classid_root; + hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid); + + return node; +} + +static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid) +{ + struct mlx5e_qos_node *node = null; + + hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) { + if (node->classid == classid) + break; + } + + return node; +} + +static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid) +{ + struct mlx5e_qos_node *node = null; + + hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) { + if (node->classid == classid) + break; + } + + return node; +} + +static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node) +{ + hash_del_rcu(&node->hnode); + if (node->qid != mlx5e_qos_qid_inner) { + __clear_bit(node->qid, priv->htb.qos_used_qids); + mlx5e_update_tx_netdev_queues(priv); + } + kfree_rcu(node, rcu); +} + +/* tx datapath api */ + +static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) +{ + /* these channel params are safe to access from the datapath, because: + * 1. this function is called only after checking priv->htb.maj_id != 0, + * and the number of queues can't change while htb offload is active. + * 2. when priv->htb.maj_id becomes 0, synchronize_rcu waits for + * mlx5e_select_queue to finish while holding priv->state_lock, + * preventing other code from changing the number of queues. + */ + bool is_ptp = mlx5e_get_pflag(&chs->params, mlx5e_pflag_tx_port_ts); + + return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid; +} + +int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid) +{ + struct mlx5e_qos_node *node; + u16 qid; + int res; + + rcu_read_lock(); + + node = mlx5e_sw_node_find_rcu(priv, classid); + if (!node) { + res = -enoent; + goto out; + } + qid = read_once(node->qid); + if (qid == mlx5e_qos_qid_inner) { + res = -einval; + goto out; + } + res = mlx5e_qid_from_qos(&priv->channels, qid); + +out: + rcu_read_unlock(); + return res; +} + +static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid) +{ + struct mlx5e_params *params = &priv->channels.params; + struct mlx5e_txqsq __rcu **qos_sqs; + struct mlx5e_channel *c; + int ix; + + ix = qid % params->num_channels; + qid /= params->num_channels; + c = priv->channels.c[ix]; + + qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); + return mlx5e_state_dereference(priv, qos_sqs[qid]); +} + +/* sq lifecycle */ + +static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs, + struct mlx5e_qos_node *node) +{ + struct mlx5e_create_cq_param ccp = {}; + struct mlx5e_txqsq __rcu **qos_sqs; + struct mlx5e_sq_param param_sq; + struct mlx5e_cq_param param_cq; + int txq_ix, ix, qid, err = 0; + struct mlx5e_params *params; + struct mlx5e_channel *c; + struct mlx5e_txqsq *sq; + + params = &chs->params; + + txq_ix = mlx5e_qid_from_qos(chs, node->qid); + + warn_on(node->qid > priv->htb.max_qos_sqs); + if (node->qid == priv->htb.max_qos_sqs) { + struct mlx5e_sq_stats *stats, **stats_list = null; + + if (priv->htb.max_qos_sqs == 0) { + stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev), + sizeof(*stats_list), + gfp_kernel); + if (!stats_list) + return -enomem; + } + stats = kzalloc(sizeof(*stats), gfp_kernel); + if (!stats) { + kvfree(stats_list); + return -enomem; + } + if (stats_list) + write_once(priv->htb.qos_sq_stats, stats_list); + write_once(priv->htb.qos_sq_stats[node->qid], stats); + /* order max_qos_sqs increment after writing the array pointer. + * pairs with smp_load_acquire in en_stats.c. + */ + smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1); + } + + ix = node->qid % params->num_channels; + qid = node->qid / params->num_channels; + c = chs->c[ix]; + + qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); + sq = kzalloc(sizeof(*sq), gfp_kernel); + + if (!sq) + return -enomem; + + mlx5e_build_create_cq_param(&ccp, c); + + memset(&param_sq, 0, sizeof(param_sq)); + memset(&param_cq, 0, sizeof(param_cq)); + mlx5e_build_sq_param(priv, params, &param_sq); + mlx5e_build_tx_cq_param(priv, params, &param_cq); + err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq); + if (err) + goto err_free_sq; + err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params, + &param_sq, sq, 0, node->hw_id, node->qid); + if (err) + goto err_close_cq; + + rcu_assign_pointer(qos_sqs[qid], sq); + + return 0; + +err_close_cq: + mlx5e_close_cq(&sq->cq); +err_free_sq: + kfree(sq); + return err; +} + +static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node) +{ + struct mlx5e_txqsq *sq; + + sq = mlx5e_get_qos_sq(priv, node->qid); + + write_once(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq); + + /* make the change to txq2sq visible before the queue is started. + * as mlx5e_xmit runs under a spinlock, there is an implicit acquire, + * which pairs with this barrier. + */ + smp_wmb(); + + qos_dbg(priv->mdev, "activate qos sq qid %u ", node->qid); + mlx5e_activate_txqsq(sq); +} + +static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) +{ + struct mlx5e_txqsq *sq; + + sq = mlx5e_get_qos_sq(priv, qid); + if (!sq) /* handle the case when the sq failed to open. */ + return; + + qos_dbg(priv->mdev, "deactivate qos sq qid %u ", qid); + mlx5e_deactivate_txqsq(sq); + + /* the queue is disabled, no synchronization with datapath is needed. */ + priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = null; +} + +static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid) +{ + struct mlx5e_txqsq __rcu **qos_sqs; + struct mlx5e_params *params; + struct mlx5e_channel *c; + struct mlx5e_txqsq *sq; + int ix; + + params = &priv->channels.params; + + ix = qid % params->num_channels; + qid /= params->num_channels; + c = priv->channels.c[ix]; + qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); + sq = rcu_replace_pointer(qos_sqs[qid], null, lockdep_is_held(&priv->state_lock)); + if (!sq) /* handle the case when the sq failed to open. */ + return; + + synchronize_rcu(); /* sync with napi. */ + + mlx5e_close_txqsq(sq); + mlx5e_close_cq(&sq->cq); + kfree(sq); +} + +void mlx5e_qos_close_queues(struct mlx5e_channel *c) +{ + struct mlx5e_txqsq __rcu **qos_sqs; + int i; + + qos_sqs = rcu_replace_pointer(c->qos_sqs, null, lockdep_is_held(&c->priv->state_lock)); + if (!qos_sqs) + return; + synchronize_rcu(); /* sync with napi. */ + + for (i = 0; i < c->qos_sqs_size; i++) { + struct mlx5e_txqsq *sq; + + sq = mlx5e_state_dereference(c->priv, qos_sqs[i]); + if (!sq) /* handle the case when the sq failed to open. */ + continue; + + mlx5e_close_txqsq(sq); + mlx5e_close_cq(&sq->cq); + kfree(sq); + } + + kvfree(qos_sqs); +} + +static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs) +{ + int i; + + for (i = 0; i < chs->num; i++) + mlx5e_qos_close_queues(chs->c[i]); +} + +static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) +{ + u16 qos_sqs_size; + int i; + + qos_sqs_size = div_round_up(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num); + + for (i = 0; i < chs->num; i++) { + struct mlx5e_txqsq **sqs; + + sqs = kvcalloc(qos_sqs_size, sizeof(struct mlx5e_txqsq *), gfp_kernel); + if (!sqs) + goto err_free; + + write_once(chs->c[i]->qos_sqs_size, qos_sqs_size); + smp_wmb(); /* pairs with mlx5e_napi_poll. */ + rcu_assign_pointer(chs->c[i]->qos_sqs, sqs); + } + + return 0; + +err_free: + while (--i >= 0) { + struct mlx5e_txqsq **sqs; + + sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, null, + lockdep_is_held(&priv->state_lock)); + + synchronize_rcu(); /* sync with napi. */ + kvfree(sqs); + } + return -enomem; +} + +int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) +{ + struct mlx5e_qos_node *node = null; + int bkt, err; + + if (!priv->htb.maj_id) + return 0; + + err = mlx5e_qos_alloc_queues(priv, chs); + if (err) + return err; + + hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) { + if (node->qid == mlx5e_qos_qid_inner) + continue; + err = mlx5e_open_qos_sq(priv, chs, node); + if (err) { + mlx5e_qos_close_all_queues(chs); + return err; + } + } + + return 0; +} + +void mlx5e_qos_activate_queues(struct mlx5e_priv *priv) +{ + struct mlx5e_qos_node *node = null; + int bkt; + + hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) { + if (node->qid == mlx5e_qos_qid_inner) + continue; + mlx5e_activate_qos_sq(priv, node); + } +} + +void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) +{ + struct mlx5e_params *params = &c->priv->channels.params; + struct mlx5e_txqsq __rcu **qos_sqs; + int i; + + qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs); + if (!qos_sqs) + return; + + for (i = 0; i < c->qos_sqs_size; i++) { + u16 qid = params->num_channels * i + c->ix; + struct mlx5e_txqsq *sq; + + sq = mlx5e_state_dereference(c->priv, qos_sqs[i]); + if (!sq) /* handle the case when the sq failed to open. */ + continue; + + qos_dbg(c->mdev, "deactivate qos sq qid %u ", qid); + mlx5e_deactivate_txqsq(sq); + + /* the queue is disabled, no synchronization with datapath is needed. */ + c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = null; + } +} + +static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs) +{ + int i; + + for (i = 0; i < chs->num; i++) + mlx5e_qos_deactivate_queues(chs->c[i]); +} + +/* htb api */ + +int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, + struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *root; + bool opened; + int err; + + qos_dbg(priv->mdev, "tc_htb_create handle %04x:, default :%04x ", htb_maj_id, htb_defcls); + + if (!mlx5_qos_is_supported(priv->mdev)) { + nl_set_err_msg_mod(extack, + "missing qos capabilities. try disabling sriov or use a supported device."); + return -eopnotsupp; + } + + opened = test_bit(mlx5e_state_opened, &priv->state); + if (opened) { + err = mlx5e_qos_alloc_queues(priv, &priv->channels); + if (err) + return err; + } + + root = mlx5e_sw_node_create_root(priv); + if (is_err(root)) { + err = ptr_err(root); + goto err_free_queues; + } + + err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id); + if (err) { + nl_set_err_msg_mod(extack, "firmware error. try upgrading firmware."); + goto err_sw_node_delete; + } + + write_once(priv->htb.defcls, htb_defcls); + /* order maj_id after defcls - pairs with + * mlx5e_select_queue/mlx5e_select_htb_queues. + */ + smp_store_release(&priv->htb.maj_id, htb_maj_id); + + return 0; + +err_sw_node_delete: + mlx5e_sw_node_delete(priv, root); + +err_free_queues: + if (opened) + mlx5e_qos_close_all_queues(&priv->channels); + return err; +} + +int mlx5e_htb_root_del(struct mlx5e_priv *priv) +{ + struct mlx5e_qos_node *root; + int err; + + qos_dbg(priv->mdev, "tc_htb_destroy "); + + write_once(priv->htb.maj_id, 0); + synchronize_rcu(); /* sync with mlx5e_select_htb_queue and tx data path. */ + + root = mlx5e_sw_node_find(priv, mlx5e_htb_classid_root); + if (!root) { + qos_err(priv->mdev, "failed to find the root node in the qos tree "); + return -enoent; + } + err = mlx5_qos_destroy_node(priv->mdev, root->hw_id); + if (err) + qos_err(priv->mdev, "failed to destroy root node %u, err = %d ", + root->hw_id, err); + mlx5e_sw_node_delete(priv, root); + + mlx5e_qos_deactivate_all_queues(&priv->channels); + mlx5e_qos_close_all_queues(&priv->channels); + + return err; +} + +static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate, + struct mlx5e_qos_node *parent, u32 *bw_share) +{ + u64 share = 0; + + while (parent->classid != mlx5e_htb_classid_root && !parent->max_average_bw) + parent = parent->parent; + + if (parent->max_average_bw) + share = div64_u64(div_u64(rate * 100, bytes_in_mbit), + parent->max_average_bw); + else + share = 101; + + *bw_share = share == 0 ? 1 : share > 100 ? 0 : share; + + qos_dbg(priv->mdev, "convert: rate %llu, parent ceil %llu -> bw_share %u ", + rate, (u64)parent->max_average_bw * bytes_in_mbit, *bw_share); + + return 0; +} + +static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw) +{ + *max_average_bw = div_u64(ceil, bytes_in_mbit); + + qos_dbg(priv->mdev, "convert: ceil %llu -> max_average_bw %u ", + ceil, *max_average_bw); +} + +int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid, + u32 parent_classid, u64 rate, u64 ceil, + struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *node, *parent; + int qid; + int err; + + qos_dbg(priv->mdev, "tc_htb_leaf_alloc_queue classid %04x, parent %04x, rate %llu, ceil %llu ", + classid, parent_classid, rate, ceil); + + qid = mlx5e_find_unused_qos_qid(priv); + if (qid < 0) { + nl_set_err_msg_mod(extack, "maximum amount of leaf classes is reached."); + return qid; + } + + parent = mlx5e_sw_node_find(priv, parent_classid); + if (!parent) + return -einval; + + node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent); + if (is_err(node)) + return ptr_err(node); + + node->rate = rate; + mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share); + mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw); + + err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id, + node->bw_share, node->max_average_bw, + &node->hw_id); + if (err) { + nl_set_err_msg_mod(extack, "firmware error when creating a leaf node."); + qos_err(priv->mdev, "failed to create a leaf node (class %04x), err = %d ", + classid, err); + mlx5e_sw_node_delete(priv, node); + return err; + } + + if (test_bit(mlx5e_state_opened, &priv->state)) { + err = mlx5e_open_qos_sq(priv, &priv->channels, node); + if (err) { + nl_set_err_msg_mod(extack, "error creating an sq."); + qos_warn(priv->mdev, "failed to create a qos sq (class %04x), err = %d ", + classid, err); + } else { + mlx5e_activate_qos_sq(priv, node); + } + } + + return mlx5e_qid_from_qos(&priv->channels, node->qid); +} + +int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid, + u64 rate, u64 ceil, struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *node, *child; + int err, tmp_err; + u32 new_hw_id; + u16 qid; + + qos_dbg(priv->mdev, "tc_htb_leaf_to_inner classid %04x, upcoming child %04x, rate %llu, ceil %llu ", + classid, child_classid, rate, ceil); + + node = mlx5e_sw_node_find(priv, classid); + if (!node) + return -enoent; + + err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id, + node->bw_share, node->max_average_bw, + &new_hw_id); + if (err) { + nl_set_err_msg_mod(extack, "firmware error when creating an inner node."); + qos_err(priv->mdev, "failed to create an inner node (class %04x), err = %d ", + classid, err); + return err; + } + + /* intentionally reuse the qid for the upcoming first child. */ + child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node); + if (is_err(child)) { + err = ptr_err(child); + goto err_destroy_hw_node; + } + + child->rate = rate; + mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share); + mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw); + + err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share, + child->max_average_bw, &child->hw_id); + if (err) { + nl_set_err_msg_mod(extack, "firmware error when creating a leaf node."); + qos_err(priv->mdev, "failed to create a leaf node (class %04x), err = %d ", + classid, err); + goto err_delete_sw_node; + } + + /* no fail point. */ + + qid = node->qid; + /* pairs with mlx5e_get_txq_by_classid. */ + write_once(node->qid, mlx5e_qos_qid_inner); + + if (test_bit(mlx5e_state_opened, &priv->state)) { + mlx5e_deactivate_qos_sq(priv, qid); + mlx5e_close_qos_sq(priv, qid); + } + + err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); + if (err) /* not fatal. */ + qos_warn(priv->mdev, "failed to destroy leaf node %u (class %04x), err = %d ", + node->hw_id, classid, err); + + node->hw_id = new_hw_id; + + if (test_bit(mlx5e_state_opened, &priv->state)) { + err = mlx5e_open_qos_sq(priv, &priv->channels, child); + if (err) { + nl_set_err_msg_mod(extack, "error creating an sq."); + qos_warn(priv->mdev, "failed to create a qos sq (class %04x), err = %d ", + classid, err); + } else { + mlx5e_activate_qos_sq(priv, child); + } + } + + return 0; + +err_delete_sw_node: + child->qid = mlx5e_qos_qid_inner; + mlx5e_sw_node_delete(priv, child); + +err_destroy_hw_node: + tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id); + if (tmp_err) /* not fatal. */ + qos_warn(priv->mdev, "failed to roll back creation of an inner node %u (class %04x), err = %d ", + new_hw_id, classid, tmp_err); + return err; +} + +static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid) +{ + struct mlx5e_qos_node *node = null; + int bkt; + + hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) + if (node->qid == qid) + break; + + return node; +} + +static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq) +{ + qos_dbg(priv->mdev, "reactivate qos sq qid %u ", qid); + netdev_tx_reset_queue(txq); + netif_tx_start_queue(txq); +} + +static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) +{ + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid); + struct qdisc *qdisc = dev_queue->qdisc_sleeping; + + if (!qdisc) + return; + + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); +} + +int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, + u16 *new_qid, struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *node; + struct netdev_queue *txq; + u16 qid, moved_qid; + bool opened; + int err; + + qos_dbg(priv->mdev, "tc_htb_leaf_del classid %04x ", classid); + + *old_qid = *new_qid = 0; + + node = mlx5e_sw_node_find(priv, classid); + if (!node) + return -enoent; + + /* store qid for reuse. */ + qid = node->qid; + + opened = test_bit(mlx5e_state_opened, &priv->state); + if (opened) { + txq = netdev_get_tx_queue(priv->netdev, + mlx5e_qid_from_qos(&priv->channels, qid)); + mlx5e_deactivate_qos_sq(priv, qid); + mlx5e_close_qos_sq(priv, qid); + } + + err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); + if (err) /* not fatal. */ + qos_warn(priv->mdev, "failed to destroy leaf node %u (class %04x), err = %d ", + node->hw_id, classid, err); + + mlx5e_sw_node_delete(priv, node); + + moved_qid = mlx5e_qos_cur_leaf_nodes(priv); + + if (moved_qid == 0) { + /* the last qos sq was just destroyed. */ + if (opened) + mlx5e_reactivate_qos_sq(priv, qid, txq); + return 0; + } + moved_qid--; + + if (moved_qid < qid) { + /* the highest qos sq was just destroyed. */ + warn(moved_qid != qid - 1, "gaps in queue numeration: destroyed queue %u, the highest queue is %u", + qid, moved_qid); + if (opened) + mlx5e_reactivate_qos_sq(priv, qid, txq); + return 0; + } + + warn(moved_qid == qid, "can't move node with qid %u to itself", qid); + qos_dbg(priv->mdev, "moving qos sq %u to %u ", moved_qid, qid); + + node = mlx5e_sw_node_find_by_qid(priv, moved_qid); + warn(!node, "could not find a node with qid %u to move to queue %u", + moved_qid, qid); + + /* stop traffic to the old queue. */ + write_once(node->qid, mlx5e_qos_qid_inner); + __clear_bit(moved_qid, priv->htb.qos_used_qids); + + if (opened) { + txq = netdev_get_tx_queue(priv->netdev, + mlx5e_qid_from_qos(&priv->channels, moved_qid)); + mlx5e_deactivate_qos_sq(priv, moved_qid); + mlx5e_close_qos_sq(priv, moved_qid); + } + + /* prevent packets from the old class from getting into the new one. */ + mlx5e_reset_qdisc(priv->netdev, moved_qid); + + __set_bit(qid, priv->htb.qos_used_qids); + write_once(node->qid, qid); + + if (test_bit(mlx5e_state_opened, &priv->state)) { + err = mlx5e_open_qos_sq(priv, &priv->channels, node); + if (err) { + nl_set_err_msg_mod(extack, "error creating an sq."); + qos_warn(priv->mdev, "failed to create a qos sq (class %04x) while moving qid %u to %u, err = %d ", + node->classid, moved_qid, qid, err); + } else { + mlx5e_activate_qos_sq(priv, node); + } + } + + mlx5e_update_tx_netdev_queues(priv); + if (opened) + mlx5e_reactivate_qos_sq(priv, moved_qid, txq); + + *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid); + *new_qid = mlx5e_qid_from_qos(&priv->channels, qid); + return 0; +} + +int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, + struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *node, *parent; + u32 old_hw_id, new_hw_id; + int err, saved_err = 0; + u16 qid; + + qos_dbg(priv->mdev, "tc_htb_leaf_del_last%s classid %04x ", + force ? "_force" : "", classid); + + node = mlx5e_sw_node_find(priv, classid); + if (!node) + return -enoent; + + err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id, + node->parent->bw_share, + node->parent->max_average_bw, + &new_hw_id); + if (err) { + nl_set_err_msg_mod(extack, "firmware error when creating a leaf node."); + qos_err(priv->mdev, "failed to create a leaf node (class %04x), err = %d ", + classid, err); + if (!force) + return err; + saved_err = err; + } + + /* store qid for reuse and prevent clearing the bit. */ + qid = node->qid; + /* pairs with mlx5e_get_txq_by_classid. */ + write_once(node->qid, mlx5e_qos_qid_inner); + + if (test_bit(mlx5e_state_opened, &priv->state)) { + mlx5e_deactivate_qos_sq(priv, qid); + mlx5e_close_qos_sq(priv, qid); + } + + /* prevent packets from the old class from getting into the new one. */ + mlx5e_reset_qdisc(priv->netdev, qid); + + err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); + if (err) /* not fatal. */ + qos_warn(priv->mdev, "failed to destroy leaf node %u (class %04x), err = %d ", + node->hw_id, classid, err); + + parent = node->parent; + mlx5e_sw_node_delete(priv, node); + + node = parent; + write_once(node->qid, qid); + + /* early return on error in force mode. parent will still be an inner + * node to be deleted by a following delete operation. + */ + if (saved_err) + return saved_err; + + old_hw_id = node->hw_id; + node->hw_id = new_hw_id; + + if (test_bit(mlx5e_state_opened, &priv->state)) { + err = mlx5e_open_qos_sq(priv, &priv->channels, node); + if (err) { + nl_set_err_msg_mod(extack, "error creating an sq."); + qos_warn(priv->mdev, "failed to create a qos sq (class %04x), err = %d ", + classid, err); + } else { + mlx5e_activate_qos_sq(priv, node); + } + } + + err = mlx5_qos_destroy_node(priv->mdev, old_hw_id); + if (err) /* not fatal. */ + qos_warn(priv->mdev, "failed to destroy leaf node %u (class %04x), err = %d ", + node->hw_id, classid, err); + + return 0; +} + +static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node, + struct netlink_ext_ack *extack) +{ + struct mlx5e_qos_node *child; + int err = 0; + int bkt; + + hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) { + u32 old_bw_share = child->bw_share; + int err_one; + + if (child->parent != node) + continue; + + mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share); + if (child->bw_share == old_bw_share) + continue; + + err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share, + child->max_average_bw, child->hw_id); + if (!err && err_one) { + err = err_one; + + nl_set_err_msg_mod(extack, "firmware error when modifying a child node."); + qos_err(priv->mdev, "failed to modify a child node (class %04x), err = %d ", + node->classid, err); + } + } + + return err; +} + +int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, + struct netlink_ext_ack *extack) +{ + u32 bw_share, max_average_bw; + struct mlx5e_qos_node *node; + bool ceil_changed = false; + int err; + + qos_dbg(priv->mdev, "tc_htb_leaf_modify classid %04x, rate %llu, ceil %llu ", + classid, rate, ceil); + + node = mlx5e_sw_node_find(priv, classid); + if (!node) + return -enoent; + + node->rate = rate; + mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share); + mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw); + + err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share, + max_average_bw, node->hw_id); + if (err) { + nl_set_err_msg_mod(extack, "firmware error when modifying a node."); + qos_err(priv->mdev, "failed to modify a node (class %04x), err = %d ", + classid, err); + return err; + } + + if (max_average_bw != node->max_average_bw) + ceil_changed = true; + + node->bw_share = bw_share; + node->max_average_bw = max_average_bw; + + if (ceil_changed) + err = mlx5e_qos_update_children(priv, node, extack); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h +/* spdx-license-identifier: gpl-2.0 or linux-openib */ +/* copyright (c) 2020, mellanox technologies inc. all rights reserved. */ + +#ifndef __mlx5e_en_qos_h +#define __mlx5e_en_qos_h + +#include <linux/mlx5/driver.h> + +#define mlx5e_qos_max_leaf_nodes 256 + +struct mlx5e_priv; +struct mlx5e_channels; +struct mlx5e_channel; + +int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); +int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv); + +/* tx datapath api */ +int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid); +struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid); + +/* sq lifecycle */ +int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs); +void mlx5e_qos_activate_queues(struct mlx5e_priv *priv); +void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c); +void mlx5e_qos_close_queues(struct mlx5e_channel *c); + +/* htb api */ +int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls, + struct netlink_ext_ack *extack); +int mlx5e_htb_root_del(struct mlx5e_priv *priv); +int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid, + u32 parent_classid, u64 rate, u64 ceil, + struct netlink_ext_ack *extack); +int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid, + u64 rate, u64 ceil, struct netlink_ext_ack *extack); +int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, + u16 *new_qid, struct netlink_ext_ack *extack); +int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, + struct netlink_ext_ack *extack); +int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, + struct netlink_ext_ack *extack); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c + /* don't allow changing the number of channels if htb offload is active, + * because the numeration of the qos sqs will change, while per-queue + * qdiscs are attached. + */ + if (priv->htb.maj_id) { + err = -einval; + netdev_err(priv->netdev, "%s: htb offload is active, cannot change the number of channels ", + __func__); + goto out; + } + + /* don't allow changing the ptp state if htb offload is active, because + * the numeration of the qos sqs will change, while per-queue qdiscs are + * attached. + */ + if (priv->htb.maj_id) { + netdev_err(priv->netdev, "%s: htb offload is active, cannot change the ptp state ", + __func__); + return -einval; + } + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +#include "qos.h" - sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + u64 bitmask = 0; - mlx5_set64(modify_sq_in, in, modify_bitmask, 1); - mlx5_set(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); + bitmask |= 1; + mlx5_set(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); + if (p->qos_update && p->next_state == mlx5_sqc_state_rdy) { + bitmask |= 1 << 2; + mlx5_set(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id); + } + mlx5_set64(modify_sq_in, in, modify_bitmask, bitmask); + u16 qos_queue_group_id, + if (qos_queue_group_id) { + msp.qos_update = true; + msp.qos_queue_group_id = qos_queue_group_id; + } -static int mlx5e_open_txqsq(struct mlx5e_channel *c, - u32 tisn, - int txq_ix, - struct mlx5e_params *params, - struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, - int tc) +int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, + struct mlx5e_params *params, struct mlx5e_sq_param *param, + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid) + if (qos_queue_group_id) + sq->stats = c->priv->htb.qos_sq_stats[qos_qid]; + else + sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + - err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn); -static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) +void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) - err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); - err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); + err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); - params, &cparam->txq_sq, &c->sq[tc], tc); + params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0); + + mlx5e_qos_deactivate_queues(c); + mlx5e_qos_close_queues(c); -static void mlx5e_build_sq_param(struct mlx5e_priv *priv, - struct mlx5e_params *params, - struct mlx5e_sq_param *param) +void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, + struct mlx5e_sq_param *param) + err = mlx5e_qos_open_queues(priv, chs); + if (err) + goto err_close_ptp; + +err_close_ptp: + if (chs->port_ptp) + mlx5e_port_ptp_close(chs->port_ptp); + +int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) +{ + int qos_queues, nch, ntc, num_txqs, err; + + qos_queues = mlx5e_qos_cur_leaf_nodes(priv); + + nch = priv->channels.params.num_channels; + ntc = priv->channels.params.num_tc; + num_txqs = nch * ntc + qos_queues; + if (mlx5e_get_pflag(&priv->channels.params, mlx5e_pflag_tx_port_ts)) + num_txqs += ntc; + + mlx5e_dbg(drv, priv, "setting num_txqs %d ", num_txqs); + err = netif_set_real_num_tx_queues(priv->netdev, num_txqs); + if (err) + netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d ", err); + + return err; +} + - int num_txqs, num_rxqs, nch, ntc; + int num_rxqs, nch, ntc; - num_txqs = nch * ntc; - if (mlx5e_get_pflag(&priv->channels.params, mlx5e_pflag_tx_port_ts)) - num_txqs += ntc; - err = netif_set_real_num_tx_queues(netdev, num_txqs); - if (err) { - netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d ", err); + err = mlx5e_update_tx_netdev_queues(priv); + if (err) - } + mlx5e_qos_activate_queues(priv); + /* mqprio is another toplevel qdisc that can't be attached + * simultaneously with the offloaded htb. + */ + if (warn_on(priv->htb.maj_id)) { + err = -einval; + goto out; + } + +static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb) +{ + int res; + + switch (htb->command) { + case tc_htb_create: + return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid, + htb->extack); + case tc_htb_destroy: + return mlx5e_htb_root_del(priv); + case tc_htb_leaf_alloc_queue: + res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid, + htb->rate, htb->ceil, htb->extack); + if (res < 0) + return res; + htb->qid = res; + return 0; + case tc_htb_leaf_to_inner: + return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid, + htb->rate, htb->ceil, htb->extack); + case tc_htb_leaf_del: + return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid, + htb->extack); + case tc_htb_leaf_del_last: + case tc_htb_leaf_del_last_force: + return mlx5e_htb_leaf_del_last(priv, htb->classid, + htb->command == tc_htb_leaf_del_last_force, + htb->extack); + case tc_htb_node_modify: + return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil, + htb->extack); + case tc_htb_leaf_query_queue: + res = mlx5e_get_txq_by_classid(priv, htb->classid); + if (res < 0) + return res; + htb->qid = res; + return 0; + default: + return -eopnotsupp; + } +} + + int err; + case tc_setup_qdisc_htb: + mutex_lock(&priv->state_lock); + err = mlx5e_setup_tc_htb(priv, type_data); + mutex_unlock(&priv->state_lock); + return err; -#if is_enabled(config_mlx5_cls_act) -static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) +static int set_feature_hw_tc(struct net_device *netdev, bool enable) +#if is_enabled(config_mlx5_cls_act) +#endif + + if (!enable && priv->htb.maj_id) { + netdev_err(netdev, "active htb offload, can't turn hw_tc_offload off "); + return -einval; + } -#endif -#if is_enabled(config_mlx5_cls_act) - err |= mlx5e_handle_feature(netif_f_hw_tc, set_feature_tc_num_filters); -#endif + err |= mlx5e_handle_feature(netif_f_hw_tc, set_feature_hw_tc); + if (mlx5_qos_is_supported(mdev)) + netdev->features |= netif_f_hw_tc; + hash_init(priv->htb.qos_tc2node); + int i; + + + for (i = 0; i < priv->htb.max_qos_sqs; i++) + kfree(priv->htb.qos_sq_stats[i]); + kvfree(priv->htb.qos_sq_stats); + int qos_sqs = 0; + if (mlx5_qos_is_supported(mdev)) + qos_sqs = mlx5e_qos_max_leaf_nodes(mdev); + - nch * profile->max_tc + ptp_txqs, + nch * profile->max_tc + ptp_txqs + qos_sqs, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, + struct mlx5e_sw_stats *s) +{ + struct mlx5e_sq_stats **stats; + u16 max_qos_sqs; + int i; + + /* pairs with smp_store_release in mlx5e_open_qos_sq. */ + max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); + stats = read_once(priv->htb.qos_sq_stats); + + for (i = 0; i < max_qos_sqs; i++) { + mlx5e_stats_grp_sw_update_stats_sq(s, read_once(stats[i])); + + /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ + barrier(); + } +} + + mlx5e_stats_grp_sw_update_stats_qos(priv, s); +static const struct counter_desc qos_sq_stats_desc[] = { + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, packets) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, bytes) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tso_packets) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tso_bytes) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tso_inner_packets) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tso_inner_bytes) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, csum_partial) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, csum_partial_inner) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, added_vlan_packets) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, nop) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, mpwqe_blks) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, mpwqe_pkts) }, +#ifdef config_mlx5_en_tls + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_encrypted_packets) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_encrypted_bytes) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_ctx) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_ooo) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_dump_packets) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_dump_bytes) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_resync_bytes) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_skip_no_sync_data) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_drop_no_sync_data) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, tls_drop_bypass_req) }, +#endif + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, csum_none) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, stopped) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, dropped) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, xmit_more) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, recover) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, cqes) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, wake) }, + { mlx5e_declare_qos_tx_stat(struct mlx5e_sq_stats, cqe_err) }, +}; + +#define num_qos_sq_stats array_size(qos_sq_stats_desc) + +static mlx5e_declare_stats_grp_op_num_stats(qos) +{ + /* pairs with smp_store_release in mlx5e_open_qos_sq. */ + return num_qos_sq_stats * smp_load_acquire(&priv->htb.max_qos_sqs); +} + +static mlx5e_declare_stats_grp_op_fill_strs(qos) +{ + /* pairs with smp_store_release in mlx5e_open_qos_sq. */ + u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); + int i, qid; + + for (qid = 0; qid < max_qos_sqs; qid++) + for (i = 0; i < num_qos_sq_stats; i++) + sprintf(data + (idx++) * eth_gstring_len, + qos_sq_stats_desc[i].format, qid); + + return idx; +} + +static mlx5e_declare_stats_grp_op_fill_stats(qos) +{ + struct mlx5e_sq_stats **stats; + u16 max_qos_sqs; + int i, qid; + + /* pairs with smp_store_release in mlx5e_open_qos_sq. */ + max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs); + stats = read_once(priv->htb.qos_sq_stats); + + for (qid = 0; qid < max_qos_sqs; qid++) { + struct mlx5e_sq_stats *s = read_once(stats[qid]); + + for (i = 0; i < num_qos_sq_stats; i++) + data[idx++] = mlx5e_read_ctr64_cpu(s, qos_sq_stats_desc, i); + } + + return idx; +} + +static mlx5e_declare_stats_grp_op_update_stats(qos) { return; } +static mlx5e_define_stats_grp(qos, 0); + &mlx5e_stats_grp(qos), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +#define mlx5e_declare_qos_tx_stat(type, fld) "qos_tx%d_"#fld, offsetof(type, fld) + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb, + u16 htb_maj_id) +{ + u16 classid; + + if ((tc_h_maj(skb->priority) >> 16) == htb_maj_id) + classid = tc_h_min(skb->priority); + else + classid = read_once(priv->htb.defcls); + + if (!classid) + return 0; + + return mlx5e_get_txq_by_classid(priv, classid); +} + + int num_tc_x_num_ch; - if (unlikely(priv->channels.port_ptp)) { - int num_tc_x_num_ch; + /* sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ + num_tc_x_num_ch = read_once(priv->num_tc_x_num_ch); + if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) { + /* order maj_id before defcls - pairs with mlx5e_htb_root_add. */ + u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id); - if (unlikely(skb_shinfo(skb)->tx_flags & skbtx_hw_tstamp) && - mlx5e_use_ptpsq(skb)) - return mlx5e_select_ptpsq(dev, skb); + if (unlikely(htb_maj_id)) { + txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id); + if (txq_ix > 0) + return txq_ix; + } - /* sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ - num_tc_x_num_ch = read_once(priv->num_tc_x_num_ch); + if (unlikely(priv->channels.port_ptp)) + if (unlikely(skb_shinfo(skb)->tx_flags & skbtx_hw_tstamp) && + mlx5e_use_ptpsq(skb)) + return mlx5e_select_ptpsq(dev, skb); - /* fix netdev_pick_tx() not to choose ptp_channel txqs. + /* fix netdev_pick_tx() not to choose ptp_channel and htb txqs. - * driver to select these queues only at mlx5e_select_ptpsq(). + * driver to select these queues only at mlx5e_select_ptpsq() + * and mlx5e_select_htb_queue(). + if (unlikely(!sq)) { + dev_kfree_skb_any(skb); + return netdev_tx_ok; + } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c + struct mlx5e_txqsq __rcu **qos_sqs; + u16 qos_sqs_size; + qos_sqs = rcu_dereference(c->qos_sqs); + + if (unlikely(qos_sqs)) { + smp_rmb(); /* pairs with mlx5e_qos_alloc_queues. */ + qos_sqs_size = read_once(c->qos_sqs_size); + + for (i = 0; i < qos_sqs_size; i++) { + struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]); + + if (sq) + busy |= mlx5e_poll_tx_cq(&sq->cq, budget); + } + } + + if (unlikely(qos_sqs)) { + for (i = 0; i < qos_sqs_size; i++) { + struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]); + + if (sq) { + mlx5e_handle_tx_dim(sq); + mlx5e_cq_arm(&sq->cq); + } + } + } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c +// spdx-license-identifier: gpl-2.0 or linux-openib +/* copyright (c) 2020, mellanox technologies inc. all rights reserved. */ + +#include "qos.h" + +#define mlx5_qos_default_dwrr_uid 0 + +bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev) +{ + if (!mlx5_cap_gen(mdev, qos)) + return false; + if (!mlx5_cap_qos(mdev, nic_sq_scheduling)) + return false; + if (!mlx5_cap_qos(mdev, nic_bw_share)) + return false; + if (!mlx5_cap_qos(mdev, nic_rate_limit)) + return false; + return true; +} + +int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) +{ + return 1 << mlx5_cap_qos(mdev, log_max_qos_nic_queue_group); +} + +int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 *id) +{ + u32 sched_ctx[mlx5_st_sz_dw(scheduling_context)] = {0}; + + mlx5_set(scheduling_context, sched_ctx, parent_element_id, parent_id); + mlx5_set(scheduling_context, sched_ctx, element_type, + scheduling_context_element_type_queue_group); + mlx5_set(scheduling_context, sched_ctx, bw_share, bw_share); + mlx5_set(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); + + return mlx5_create_scheduling_element_cmd(mdev, scheduling_hierarchy_nic, + sched_ctx, id); +} + +int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 *id) +{ + u32 sched_ctx[mlx5_st_sz_dw(scheduling_context)] = {0}; + void *attr; + + mlx5_set(scheduling_context, sched_ctx, parent_element_id, parent_id); + mlx5_set(scheduling_context, sched_ctx, element_type, + scheduling_context_element_type_tsar); + mlx5_set(scheduling_context, sched_ctx, bw_share, bw_share); + mlx5_set(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); + + attr = mlx5_addr_of(scheduling_context, sched_ctx, element_attributes); + mlx5_set(tsar_element, attr, tsar_type, tsar_element_tsar_type_dwrr); + + return mlx5_create_scheduling_element_cmd(mdev, scheduling_hierarchy_nic, + sched_ctx, id); +} + +int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id) +{ + return mlx5_qos_create_inner_node(mdev, mlx5_qos_default_dwrr_uid, 0, 0, id); +} + +int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 id) +{ + u32 sched_ctx[mlx5_st_sz_dw(scheduling_context)] = {0}; + u32 bitmask = 0; + + mlx5_set(scheduling_context, sched_ctx, parent_element_id, parent_id); + mlx5_set(scheduling_context, sched_ctx, bw_share, bw_share); + mlx5_set(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); + + bitmask |= modify_scheduling_element_in_modify_bitmask_bw_share; + bitmask |= modify_scheduling_element_in_modify_bitmask_max_average_bw; + + return mlx5_modify_scheduling_element_cmd(mdev, scheduling_hierarchy_nic, + sched_ctx, id, bitmask); +} + +int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id) +{ + return mlx5_destroy_scheduling_element_cmd(mdev, scheduling_hierarchy_nic, id); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/qos.h --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.h +/* spdx-license-identifier: gpl-2.0 or linux-openib */ +/* copyright (c) 2020, mellanox technologies inc. all rights reserved. */ + +#ifndef __mlx5_qos_h +#define __mlx5_qos_h + +#include "mlx5_core.h" + +#define mlx5_debug_qos_mask bit(4) + +#define qos_err(mdev, fmt, ...) \ + mlx5_core_err(mdev, "qos: " fmt, ##__va_args__) +#define qos_warn(mdev, fmt, ...) \ + mlx5_core_warn(mdev, "qos: " fmt, ##__va_args__) +#define qos_dbg(mdev, fmt, ...) \ + mlx5_core_dbg_mask(mdev, mlx5_debug_qos_mask, "qos: " fmt, ##__va_args__) + +bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev); +int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev); + +int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 *id); +int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, + u32 bw_share, u32 max_avg_bw, u32 *id); +int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id); +int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share, + u32 max_avg_bw, u32 id); +int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id); + +#endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h - u8 reserved_at_7[0x4]; + u8 reserved_at_7[0x1]; + u8 nic_sq_scheduling[0x1]; + u8 nic_bw_share[0x1]; + u8 nic_rate_limit[0x1]; - u8 reserved_at_20[0x20]; + u8 reserved_at_20[0xb]; + u8 log_max_qos_nic_queue_group[0x5]; + u8 reserved_at_30[0x10]; - u8 reserved_at_110[0x10]; + u8 qos_queue_group_id[0x10]; + scheduling_context_element_type_queue_group = 0x4, + scheduling_hierarchy_nic = 0x3,
Networking
214baf22870cfa437522f3bd4fbae56338674b04
maxim mikityanskiy
include
linux
core, en, ethernet, mellanox, mlx5
devlink: add dmac filter generic packet trap
add packet trap that can report packets that were dropped due to destination mac filtering.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c', 'rst']
3
9
0
--- diff --git a/documentation/networking/devlink/devlink-trap.rst b/documentation/networking/devlink/devlink-trap.rst --- a/documentation/networking/devlink/devlink-trap.rst +++ b/documentation/networking/devlink/devlink-trap.rst - ''drop'' - traps packets that the device decided to drop in case they hit a + * - ''dmac_filter'' + - ''drop'' + - traps incoming packets that the device decided to drop because + the destination mac is not configured in the mac table and + the interface is not in promiscuous mode diff --git a/include/net/devlink.h b/include/net/devlink.h --- a/include/net/devlink.h +++ b/include/net/devlink.h + devlink_trap_generic_id_dmac_filter, +#define devlink_trap_generic_name_dmac_filter \ + "dest_mac_filter" diff --git a/net/core/devlink.c b/net/core/devlink.c --- a/net/core/devlink.c +++ b/net/core/devlink.c + devlink_trap(dmac_filter, drop),
Networking
e78ab164591ffd55d2771401ed0d9b083dad55fa
aya levin ido schimmel idosch nvidia com moshe shemesh moshe nvidia com tariq toukan tariqt nvidia com
net
core
devlink
net/mlx5: add support for devlink traps in mlx5 core driver
add devlink traps infra-structure to mlx5 core driver. add traps list to mlx5_priv and corresponding api: - mlx5_devlink_trap_report() to wrap trap reports to devlink - mlx5_devlink_trap_get_num_active() to decide whether to open/close trap resources.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
4
103
0
- mlx5_devlink_trap_report() to wrap trap reports to devlink - mlx5_devlink_trap_get_num_active() to decide whether to open/close trap --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id) +{ + struct mlx5_devlink_trap *dl_trap; + + list_for_each_entry(dl_trap, &dev->priv.traps, list) + if (dl_trap->trap.id == trap_id) + return dl_trap; + + return null; +} + +static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap, + void *trap_ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_devlink_trap *dl_trap; + + dl_trap = kzalloc(sizeof(*dl_trap), gfp_kernel); + if (!dl_trap) + return -enomem; + + dl_trap->trap.id = trap->id; + dl_trap->trap.action = devlink_trap_action_drop; + dl_trap->item = trap_ctx; + + if (mlx5_find_trap_by_id(dev, trap->id)) { + kfree(dl_trap); + mlx5_core_err(dev, "devlink trap: trap 0x%x already found", trap->id); + return -eexist; + } + + list_add_tail(&dl_trap->list, &dev->priv.traps); + return 0; +} + +static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap, + void *trap_ctx) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_devlink_trap *dl_trap; + + dl_trap = mlx5_find_trap_by_id(dev, trap->id); + if (!dl_trap) { + mlx5_core_err(dev, "devlink trap: missing trap id 0x%x", trap->id); + return; + } + list_del(&dl_trap->list); + kfree(dl_trap); +} + + .trap_init = mlx5_devlink_trap_init, + .trap_fini = mlx5_devlink_trap_fini, +void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb, + struct devlink_port *dl_port) +{ + struct devlink *devlink = priv_to_devlink(dev); + struct mlx5_devlink_trap *dl_trap; + + dl_trap = mlx5_find_trap_by_id(dev, trap_id); + if (!dl_trap) { + mlx5_core_err(dev, "devlink trap: report on invalid trap id 0x%x", trap_id); + return; + } + + if (dl_trap->trap.action != devlink_trap_action_trap) { + mlx5_core_dbg(dev, "devlink trap: trap id %d has action %d", trap_id, + dl_trap->trap.action); + return; + } + devlink_trap_report(devlink, skb, dl_trap->item, dl_port, null); +} + +int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev) +{ + struct mlx5_devlink_trap *dl_trap; + int count = 0; + + list_for_each_entry(dl_trap, &dev->priv.traps, list) + if (dl_trap->trap.action == devlink_trap_action_trap) + count++; + + return count; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +struct mlx5_trap_ctx { + int id; + int action; +}; + +struct mlx5_devlink_trap { + struct mlx5_trap_ctx trap; + void *item; + struct list_head list; +}; + +struct mlx5_core_dev; +void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb, + struct devlink_port *dl_port); +int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c + init_list_head(&priv->traps); + diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h + struct list_head traps;
Networking
3d347b1b19da20f973d1d3c6bb60c11185606afd
aya levin tariq toukan tariqt nvidia com
include
linux
core, ethernet, mellanox, mlx5
net/mlx5: register to devlink ingress vlan filter trap
add traps registration to mlx5_core devlink register/unregister flow. this patch registers ingress_vlan_filter trap.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
51
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +#define mlx5_trap_drop(_id, _group_id) \ + devlink_trap_generic(drop, drop, _id, \ + devlink_trap_group_generic_id_##_group_id, \ + devlink_trap_metadata_type_f_in_port) + +static const struct devlink_trap mlx5_traps_arr[] = { + mlx5_trap_drop(ingress_vlan_filter, l2_drops), +}; + +static const struct devlink_trap_group mlx5_trap_groups_arr[] = { + devlink_trap_group_generic(l2_drops, 0), +}; + +static int mlx5_devlink_traps_register(struct devlink *devlink) +{ + struct mlx5_core_dev *core_dev = devlink_priv(devlink); + int err; + + err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr, + array_size(mlx5_trap_groups_arr)); + if (err) + return err; + + err = devlink_traps_register(devlink, mlx5_traps_arr, array_size(mlx5_traps_arr), + &core_dev->priv); + if (err) + goto err_trap_group; + return 0; + +err_trap_group: + devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr, + array_size(mlx5_trap_groups_arr)); + return err; +} + +static void mlx5_devlink_traps_unregister(struct devlink *devlink) +{ + devlink_traps_unregister(devlink, mlx5_traps_arr, array_size(mlx5_traps_arr)); + devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr, + array_size(mlx5_trap_groups_arr)); +} + + + err = mlx5_devlink_traps_register(devlink); + if (err) + goto traps_reg_err; + +traps_reg_err: + devlink_params_unregister(devlink, mlx5_devlink_params, + array_size(mlx5_devlink_params)); + mlx5_devlink_traps_unregister(devlink);
Networking
82e6c96f04e13c72d91777455836ffd012853caa
aya levin tariq toukan tariqt nvidia com
drivers
net
core, ethernet, mellanox, mlx5
net/mlx5: register to devlink dmac filter trap
core driver is registered to the devlink traps service, which enables the admin to redeem packets that were destined to be dropped due to a particular reason. register to dmac filter, allow visibility of packets that were filtered out by the mac table.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
1
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c + mlx5_trap_drop(dmac_filter, l2_drops),
Networking
f679247f25b65cf71298e25d6850bc4bac2c9802
aya levin moshe shemesh moshe nvidia com tariq toukan tariqt nvidia com
drivers
net
core, ethernet, mellanox, mlx5
net/mlx5: rename events notifier header
change the naming of events notifier head to clarify that it handles only firmware events. coming patches in the set, add event notifier for software events.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
11
8
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c -/* handler which forwards the event to events->nh, driver notifiers */ +/* handler which forwards the event to events->fw_nh, driver notifiers */ - /* driver notifier chain */ - struct atomic_notifier_head nh; + /* driver notifier chain for fw events */ + struct atomic_notifier_head fw_nh; - atomic_notifier_call_chain(&events->nh, event, data); + atomic_notifier_call_chain(&events->fw_nh, event, data); - atomic_init_notifier_head(&events->nh); + atomic_init_notifier_head(&events->fw_nh); +/* this api is used only for processing and forwarding firmware + * events to mlx5 consumer. + */ - return atomic_notifier_chain_register(&events->nh, nb); + return atomic_notifier_chain_register(&events->fw_nh, nb); - return atomic_notifier_chain_unregister(&events->nh, nb); + return atomic_notifier_chain_unregister(&events->fw_nh, nb); - return atomic_notifier_call_chain(&events->nh, event, data); + return atomic_notifier_call_chain(&events->fw_nh, event, data);
Networking
3eac5d949afeca60982165e6fc4cece6f5882843
aya levin tariq toukan tariqt nvidia com
drivers
net
core, ethernet, mellanox, mlx5
net/mlx5: notify on trap action by blocking event
in order to allow mlx5 core driver to trigger synchronous operations to its consumers, add a blocking events handler. add wrappers to blocking_notifier_[call_chain/chain_register/chain_unregister]. add trap callback for action set and notify about this change. following patches in the set add a listener for this event.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
4
83
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +static int mlx5_devlink_trap_action_set(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + enum devlink_trap_action action_orig; + struct mlx5_devlink_trap *dl_trap; + int err = 0; + + dl_trap = mlx5_find_trap_by_id(dev, trap->id); + if (!dl_trap) { + mlx5_core_err(dev, "devlink trap: set action on invalid trap id 0x%x", trap->id); + err = -einval; + goto out; + } + + if (action != devlink_trap_action_drop && action != devlink_trap_action_trap) { + err = -eopnotsupp; + goto out; + } + + if (action == dl_trap->trap.action) + goto out; + + action_orig = dl_trap->trap.action; + dl_trap->trap.action = action; + err = mlx5_blocking_notifier_call_chain(dev, mlx5_driver_event_type_trap, + &dl_trap->trap); + if (err) + dl_trap->trap.action = action_orig; +out: + return err; +} + + .trap_action_set = mlx5_devlink_trap_action_set, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c + /* driver notifier chain for sw events */ + struct blocking_notifier_head sw_nh; + blocking_init_notifier_head(&events->sw_nh); + +/* this api is used only for processing and forwarding driver-specific + * events to mlx5 consumers. + */ +int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb) +{ + struct mlx5_events *events = dev->priv.events; + + return blocking_notifier_chain_register(&events->sw_nh, nb); +} + +int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb) +{ + struct mlx5_events *events = dev->priv.events; + + return blocking_notifier_chain_unregister(&events->sw_nh, nb); +} + +int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event, + void *data) +{ + struct mlx5_events *events = dev->priv.events; + + return blocking_notifier_call_chain(&events->sw_nh, event, data); +} diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h +enum mlx5_driver_event { + mlx5_driver_event_type_trap = 0, +}; + diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h +/* async-atomic event notifier used by mlx5 core to forward fw + * evetns recived from event queue to mlx5 consumers. + * optimise event queue dipatching. + */ + +/* async-atomic event notifier used for forwarding + * evetns from the event queue into the to mlx5 events dispatcher, + * eswitch, clock and others. + */ +/* blocking event notifier used to forward sw events, used for slow path */ +int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event, + void *data); +
Networking
241dc159391fb9d351362d911a39dff84074cc92
aya levin tariq toukan tariqt nvidia com
include
linux
core, ethernet, mellanox, mlx5
net/mlx5e: optimize promiscuous mode
change steering flow to optimize traffic in promiscuous mode. on demand, add a high priority table containing a catch-all rule. all incoming packets are caught by this rule and steered directly to the ttc table. prior to this change, packets in promiscuous mode may suffer from up to 4 steering hops before reaching ttc table. in addition, this patch will allow us adding a catch-all rule at the end of mac table to serve mac trap, with no impact on promiscuous mode performance.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
3
100
34
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +struct mlx5e_promisc_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_handle *rule; +}; + - struct mlx5e_l2_rule promisc; - mlx5e_vlan_ft_level = 0, + mlx5e_promisc_ft_level, + mlx5e_vlan_ft_level, + struct mlx5e_promisc_table promisc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c - mlx5e_promisc = 2, +#define mlx5e_promisc_group0_size bit(0) +#define mlx5e_promisc_table_size mlx5e_promisc_group0_size + +static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv) +{ + struct mlx5_flow_table *ft = priv->fs.promisc.ft.t; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_handle **rule_p; + mlx5_declare_flow_act(flow_act); + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), gfp_kernel); + if (!spec) + return -enomem; + dest.type = mlx5_flow_destination_type_flow_table; + dest.ft = priv->fs.ttc.ft.t; + + rule_p = &priv->fs.promisc.rule; + *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + if (is_err(*rule_p)) { + err = ptr_err(*rule_p); + *rule_p = null; + netdev_err(priv->netdev, "%s: add promiscuous rule failed ", __func__); + } + kvfree(spec); + return err; +} + +static int mlx5e_create_promisc_table(struct mlx5e_priv *priv) +{ + struct mlx5e_flow_table *ft = &priv->fs.promisc.ft; + struct mlx5_flow_table_attr ft_attr = {}; + int err; + + ft_attr.max_fte = mlx5e_promisc_table_size; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.level = mlx5e_promisc_ft_level; + ft_attr.prio = mlx5e_nic_prio; + + ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + if (is_err(ft->t)) { + err = ptr_err(ft->t); + netdev_err(priv->netdev, "fail to create promisc table err=%d ", err); + return err; + } + + err = mlx5e_add_promisc_rule(priv); + if (err) + goto err_destroy_promisc_table; + + return 0; + +err_destroy_promisc_table: + mlx5_destroy_flow_table(ft->t); + ft->t = null; + + return err; +} + +static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv) +{ + if (warn(!priv->fs.promisc.rule, "trying to remove non-existing promiscuous rule")) + return; + mlx5_del_flow_rules(priv->fs.promisc.rule); + priv->fs.promisc.rule = null; +} + +static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv) +{ + if (warn(!priv->fs.promisc.ft.t, "trying to remove non-existing promiscuous table")) + return; + mlx5e_del_promisc_rule(priv); + mlx5_destroy_flow_table(priv->fs.promisc.ft.t); + priv->fs.promisc.ft.t = null; +} + + int err; - if (!priv->channels.params.vlan_strip_disable) + err = mlx5e_create_promisc_table(priv); + if (err) + enable_promisc = false; + if (!priv->channels.params.vlan_strip_disable && !err) - mlx5e_add_l2_flow_rule(priv, &ea->promisc, mlx5e_promisc); - if (!priv->fs.vlan.cvlan_filter_disabled) - mlx5e_add_any_vid_rules(priv); - if (disable_promisc) { - if (!priv->fs.vlan.cvlan_filter_disabled) - mlx5e_del_any_vid_rules(priv); - mlx5e_del_l2_flow_rule(priv, &ea->promisc); - } + if (disable_promisc) + mlx5e_destroy_promisc_table(priv); - - case mlx5e_promisc: - break; -#define mlx5e_num_l2_groups 3 -#define mlx5e_l2_group1_size bit(0) -#define mlx5e_l2_group2_size bit(15) -#define mlx5e_l2_group3_size bit(0) +#define mlx5e_num_l2_groups 2 +#define mlx5e_l2_group1_size bit(15) +#define mlx5e_l2_group2_size bit(0) - mlx5e_l2_group2_size +\ - mlx5e_l2_group3_size) + mlx5e_l2_group2_size) - /* flow group for promiscuous */ - mlx5_set_cfg(in, start_flow_index, ix); - ix += mlx5e_l2_group1_size; - mlx5_set_cfg(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (is_err(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; - - ix += mlx5e_l2_group2_size; + ix += mlx5e_l2_group1_size; - ix += mlx5e_l2_group3_size; + ix += mlx5e_l2_group2_size; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c -/* vlan, mac, ttc, inner ttc, {arfs/accel and esp/esp_err} */ -#define kernel_nic_prio_num_levels 6 +/* promiscuous, vlan, mac, ttc, inner ttc, {arfs/accel and esp/esp_err} */ +#define kernel_nic_prio_num_levels 7
Networking
1c46d7409f301592731f941a7ec6c51cb6b54b0b
aya levin moshe shemesh moshe nvidia com maor gottlieb maorg nvidia com tariq toukan tariqt nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: add flow steering vlan trap rule
add flow group to the vlan table to hold the catch-all vlan rule. add api which adds/removes vlan trap rule. this rule catches packets that were destined to be dropped due to no-match with previous vlan rules. the trap rule steer these packets to the trap tir related to the trap-rq.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
2
65
2
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h + struct mlx5_flow_handle *trap_rule; +int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); +void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +static struct mlx5_flow_handle * +mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num) +{ + struct mlx5_flow_destination dest = {}; + mlx5_declare_flow_act(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + + spec = kvzalloc(sizeof(*spec), gfp_kernel); + if (!spec) + return err_ptr(-enomem); + spec->flow_context.flags |= flow_context_has_tag; + spec->flow_context.flow_tag = trap_id; + dest.type = mlx5_flow_destination_type_tir; + dest.tir_num = tir_num; + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); + kvfree(spec); + return rule; +} + +int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) +{ + struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; + struct mlx5_flow_handle *rule; + int err; + + rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); + if (is_err(rule)) { + err = ptr_err(rule); + priv->fs.vlan.trap_rule = null; + netdev_err(priv->netdev, "%s: add vlan trap rule failed, err %d ", + __func__, err); + return err; + } + priv->fs.vlan.trap_rule = rule; + return 0; +} + +void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv) +{ + if (priv->fs.vlan.trap_rule) { + mlx5_del_flow_rules(priv->fs.vlan.trap_rule); + priv->fs.vlan.trap_rule = null; + } +} + + mlx5e_remove_vlan_trap(priv); + -#define mlx5e_num_vlan_groups 4 +#define mlx5e_num_vlan_groups 5 +#define mlx5e_vlan_group_trap_size bit(0) /* must be last */ - mlx5e_vlan_group3_size) + mlx5e_vlan_group3_size +\ + mlx5e_vlan_group_trap_size) + memset(in, 0, inlen); + mlx5_set_cfg(in, start_flow_index, ix); + ix += mlx5e_vlan_group_trap_size; + mlx5_set_cfg(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (is_err(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; +
Networking
e2a1a00498aea4e3bf31b65b8691d2e7fc7e3693
aya levin moshe shemesh moshe nvidia com tariq toukan tariqt nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: add flow steering dmac trap rule
add flow group to the l2 table to hold the catch-all dmac rule. add api which adds/removes dmac trap rule. this rule catches packets that were destined to be dropped due to no-match with previous dmac rules. the trap rule steer these packets to the trap tir related to the trap-rq.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
2
43
2
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h + struct mlx5_flow_handle *trap_rule; +int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); +void mlx5e_remove_mac_trap(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) +{ + struct mlx5_flow_table *ft = priv->fs.l2.ft.t; + struct mlx5_flow_handle *rule; + int err; + + rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); + if (is_err(rule)) { + err = ptr_err(rule); + priv->fs.l2.trap_rule = null; + netdev_err(priv->netdev, "%s: add mac trap rule failed, err %d ", + __func__, err); + return err; + } + priv->fs.l2.trap_rule = rule; + return 0; +} + +void mlx5e_remove_mac_trap(struct mlx5e_priv *priv) +{ + if (priv->fs.l2.trap_rule) { + mlx5_del_flow_rules(priv->fs.l2.trap_rule); + priv->fs.l2.trap_rule = null; + } +} + -#define mlx5e_num_l2_groups 2 +#define mlx5e_num_l2_groups 3 +#define mlx5e_l2_group_trap_size bit(0) /* must be last */ - mlx5e_l2_group2_size) + mlx5e_l2_group2_size +\ + mlx5e_l2_group_trap_size) + /* flow group for l2 traps */ + memset(in, 0, inlen); + mlx5_set_cfg(in, start_flow_index, ix); + ix += mlx5e_l2_group_trap_size; + mlx5_set_cfg(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (is_err(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; +
Networking
ceef1b66bddaaee3124f66cd0279189e29bd3f56
aya levin moshe shemesh moshe nvidia com tariq toukan tariqt nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: expose rx dma info helpers
in order to support rqs outside of channel context, change mlx5e_init_di_list() signature to accept numa node instead of cpu. in addition, expose dma info helpers as api. this api will be used for rq's creation in other files in downstream patches.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
2
6
6
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node); +void mlx5e_free_di_list(struct mlx5e_rq *rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c -static int mlx5e_init_di_list(struct mlx5e_rq *rq, - int wq_sz, int cpu) +int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node) - rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), - gfp_kernel, cpu_to_node(cpu)); + rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), gfp_kernel, node); -static void mlx5e_free_di_list(struct mlx5e_rq *rq) +void mlx5e_free_di_list(struct mlx5e_rq *rq) - err = mlx5e_init_di_list(rq, wq_sz, c->cpu); + err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
Networking
cf74760932602fb25d16c57e49dbc445c81d0ff1
aya levin moshe shemesh moshe nvidia com tariq toukan tariqt nvidia com
drivers
net
core, ethernet, mellanox, mlx5
net/mlx5e: add trap entity to eth driver
introduce mlx5e_trap which includes a dedicated rq and napi for trapped packets. trap-rq processes packets that were destined to be dropped, but for debug and visibility sake these packets are trapped and reported to devlink. trap-rq connects between the hw and the driver and is not a part of a channel. open mlx5e_create_rq() and mlx5_core_destroy_rq() as api and add dedicate rq handlers which report to devlink of trapped packets.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c', 'makefile']
7
505
4
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/makefile b/drivers/net/ethernet/mellanox/mlx5/core/makefile --- a/drivers/net/ethernet/mellanox/mlx5/core/makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/makefile - en/qos.o + en/qos.o en/trap.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h +void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params); +struct mlx5e_trap; + + struct mlx5e_trap *en_trap; + struct mlx5e_channel_stats trap_stats; +int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param); +void mlx5e_destroy_rq(struct mlx5e_rq *rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +// spdx-license-identifier: gpl-2.0 or linux-openib +/* copyright (c) 2020 mellanox technologies */ + +#include <net/page_pool.h> +#include "en/txrx.h" +#include "en/params.h" +#include "en/trap.h" + +static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget) +{ + struct mlx5e_trap *trap_ctx = container_of(napi, struct mlx5e_trap, napi); + struct mlx5e_ch_stats *ch_stats = trap_ctx->stats; + struct mlx5e_rq *rq = &trap_ctx->rq; + bool busy = false; + int work_done = 0; + + ch_stats->poll++; + + work_done = mlx5e_poll_rx_cq(&rq->cq, budget); + busy |= work_done == budget; + busy |= rq->post_wqes(rq); + + if (busy) + return budget; + + if (unlikely(!napi_complete_done(napi, work_done))) + return work_done; + + mlx5e_cq_arm(&rq->cq); + return work_done; +} + +static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp, + struct mlx5e_rq_stats *stats, struct mlx5e_params *params, + struct mlx5e_ch_stats *ch_stats, + struct mlx5e_rq *rq) +{ + void *rqc_wq = mlx5_addr_of(rqc, rqp->rqc, wq); + struct mlx5_core_dev *mdev = priv->mdev; + struct page_pool_params pp_params = {}; + int node = dev_to_node(mdev->device); + u32 pool_size; + int wq_sz; + int err; + int i; + + rqp->wq.db_numa_node = node; + + rq->wq_type = params->rq_wq_type; + rq->pdev = mdev->device; + rq->netdev = priv->netdev; + rq->mdev = mdev; + rq->priv = priv; + rq->stats = stats; + rq->clock = &mdev->clock; + rq->tstamp = &priv->tstamp; + rq->hw_mtu = mlx5e_sw2hw_mtu(params, params->sw_mtu); + + xdp_rxq_info_unused(&rq->xdp_rxq); + + rq->buff.map_dir = dma_from_device; + rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, null); + pool_size = 1 << params->log_rq_mtu_frames; + + err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl); + if (err) + return err; + + rq->wqe.wq.db = &rq->wqe.wq.db[mlx5_rcv_dbr]; + + wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); + + rq->wqe.info = rqp->frags_info; + rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride; + rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags), + (wq_sz << rq->wqe.info.log_num_frags)), + gfp_kernel, node); + if (!rq->wqe.frags) { + err = -enomem; + goto err_wq_cyc_destroy; + } + + err = mlx5e_init_di_list(rq, wq_sz, node); + if (err) + goto err_free_frags; + + rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + + mlx5e_rq_set_trap_handlers(rq, params); + + /* create a page_pool and register it with rxq */ + pp_params.order = 0; + pp_params.flags = 0; /* no-internal dma mapping in page_pool */ + pp_params.pool_size = pool_size; + pp_params.nid = node; + pp_params.dev = mdev->device; + pp_params.dma_dir = rq->buff.map_dir; + + /* page_pool can be used even when there is no rq->xdp_prog, + * given page_pool does not handle dma mapping there is no + * required state to clear. and page_pool gracefully handle + * elevated refcnt. + */ + rq->page_pool = page_pool_create(&pp_params); + if (is_err(rq->page_pool)) { + err = ptr_err(rq->page_pool); + rq->page_pool = null; + goto err_free_di_list; + } + for (i = 0; i < wq_sz; i++) { + struct mlx5e_rx_wqe_cyc *wqe = + mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++) { + u32 frag_size = rq->wqe.info.arr[f].frag_size | + mlx5_hw_start_padding; + + wqe->data[f].byte_count = cpu_to_be32(frag_size); + wqe->data[f].lkey = rq->mkey_be; + } + /* check if num_frags is not a pow of two */ + if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { + wqe->data[f].byte_count = 0; + wqe->data[f].lkey = cpu_to_be32(mlx5_invalid_lkey); + wqe->data[f].addr = 0; + } + } + return 0; + +err_free_di_list: + mlx5e_free_di_list(rq); +err_free_frags: + kvfree(rq->wqe.frags); +err_wq_cyc_destroy: + mlx5_wq_destroy(&rq->wq_ctrl); + + return err; +} + +static void mlx5e_free_trap_rq(struct mlx5e_rq *rq) +{ + page_pool_destroy(rq->page_pool); + mlx5e_free_di_list(rq); + kvfree(rq->wqe.frags); + mlx5_wq_destroy(&rq->wq_ctrl); +} + +static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi, + struct mlx5e_rq_stats *stats, struct mlx5e_params *params, + struct mlx5e_rq_param *rq_param, + struct mlx5e_ch_stats *ch_stats, + struct mlx5e_rq *rq) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_create_cq_param ccp = {}; + struct dim_cq_moder trap_moder = {}; + struct mlx5e_cq *cq = &rq->cq; + int err; + + ccp.node = dev_to_node(mdev->device); + ccp.ch_stats = ch_stats; + ccp.napi = napi; + ccp.ix = 0; + err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq); + if (err) + return err; + + err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq); + if (err) + goto err_destroy_cq; + + err = mlx5e_create_rq(rq, rq_param); + if (err) + goto err_free_rq; + + err = mlx5e_modify_rq_state(rq, mlx5_rqc_state_rst, mlx5_rqc_state_rdy); + if (err) + goto err_destroy_rq; + + return 0; + +err_destroy_rq: + mlx5e_destroy_rq(rq); + mlx5e_free_rx_descs(rq); +err_free_rq: + mlx5e_free_trap_rq(rq); +err_destroy_cq: + mlx5e_close_cq(cq); + + return err; +} + +static void mlx5e_close_trap_rq(struct mlx5e_rq *rq) +{ + mlx5e_destroy_rq(rq); + mlx5e_free_rx_descs(rq); + mlx5e_free_trap_rq(rq); + mlx5e_close_cq(&rq->cq); +} + +static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, + u32 rqn) +{ + void *tirc; + int inlen; + u32 *in; + int err; + + inlen = mlx5_st_sz_bytes(create_tir_in); + in = kvzalloc(inlen, gfp_kernel); + if (!in) + return -enomem; + + tirc = mlx5_addr_of(create_tir_in, in, ctx); + mlx5_set(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn); + mlx5_set(tirc, tirc, rx_hash_fn, mlx5_rx_hash_fn_none); + mlx5_set(tirc, tirc, disp_type, mlx5_tirc_disp_type_direct); + mlx5_set(tirc, tirc, inline_rqn, rqn); + err = mlx5e_create_tir(mdev, tir, in); + kvfree(in); + + return err; +} + +static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) +{ + mlx5e_destroy_tir(mdev, tir); +} + +static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq) +{ + set_bit(mlx5e_rq_state_enabled, &rq->state); +} + +static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq) +{ + clear_bit(mlx5e_rq_state_enabled, &rq->state); +} + +static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t) +{ + struct mlx5e_params *params = &t->params; + + params->rq_wq_type = mlx5_wq_type_cyclic; + mlx5e_init_rq_type_params(priv->mdev, params); + params->sw_mtu = priv->netdev->max_mtu; + mlx5e_build_rq_param(priv, params, null, &t->rq_param); +} + +static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv) +{ + int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0)); + struct net_device *netdev = priv->netdev; + struct mlx5e_trap *t; + int err; + + t = kvzalloc_node(sizeof(*t), gfp_kernel, cpu_to_node(cpu)); + if (!t) + return err_ptr(-enomem); + + mlx5e_build_trap_params(priv, t); + + t->priv = priv; + t->mdev = priv->mdev; + t->tstamp = &priv->tstamp; + t->pdev = mlx5_core_dma_dev(priv->mdev); + t->netdev = priv->netdev; + t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); + t->stats = &priv->trap_stats.ch; + + netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64); + + err = mlx5e_open_trap_rq(priv, &t->napi, + &priv->trap_stats.rq, + &t->params, &t->rq_param, + &priv->trap_stats.ch, + &t->rq); + if (unlikely(err)) + goto err_napi_del; + + err = mlx5e_create_trap_direct_rq_tir(t->mdev, &t->tir, t->rq.rqn); + if (err) + goto err_close_trap_rq; + + return t; + +err_close_trap_rq: + mlx5e_close_trap_rq(&t->rq); +err_napi_del: + netif_napi_del(&t->napi); + kvfree(t); + return err_ptr(err); +} + +void mlx5e_close_trap(struct mlx5e_trap *trap) +{ + mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir); + mlx5e_close_trap_rq(&trap->rq); + netif_napi_del(&trap->napi); + kvfree(trap); +} + +static void mlx5e_activate_trap(struct mlx5e_trap *trap) +{ + napi_enable(&trap->napi); + mlx5e_activate_trap_rq(&trap->rq); + napi_schedule(&trap->napi); +} + +void mlx5e_deactivate_trap(struct mlx5e_priv *priv) +{ + struct mlx5e_trap *trap = priv->en_trap; + + mlx5e_deactivate_trap_rq(&trap->rq); + napi_disable(&trap->napi); +} + +static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv) +{ + struct mlx5e_trap *trap; + + trap = mlx5e_open_trap(priv); + if (is_err(trap)) + goto out; + + mlx5e_activate_trap(trap); +out: + return trap; +} + +static void mlx5e_del_trap_queue(struct mlx5e_priv *priv) +{ + mlx5e_deactivate_trap(priv); + mlx5e_close_trap(priv->en_trap); + priv->en_trap = null; +} + +static int mlx5e_trap_get_tirn(struct mlx5e_trap *en_trap) +{ + return en_trap->tir.tirn; +} + +static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id) +{ + bool open_queue = !priv->en_trap; + struct mlx5e_trap *trap; + int err; + + if (open_queue) { + trap = mlx5e_add_trap_queue(priv); + if (is_err(trap)) + return ptr_err(trap); + priv->en_trap = trap; + } + + switch (trap_id) { + case devlink_trap_generic_id_ingress_vlan_filter: + err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap)); + if (err) + goto err_out; + break; + default: + netdev_warn(priv->netdev, "%s: unknown trap id %d ", __func__, trap_id); + err = -einval; + goto err_out; + } + return 0; + +err_out: + if (open_queue) + mlx5e_del_trap_queue(priv); + return err; +} + +static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id) +{ + switch (trap_id) { + case devlink_trap_generic_id_ingress_vlan_filter: + mlx5e_remove_vlan_trap(priv); + break; + default: + netdev_warn(priv->netdev, "%s: unknown trap id %d ", __func__, trap_id); + return -einval; + } + if (priv->en_trap && !mlx5_devlink_trap_get_num_active(priv->mdev)) + mlx5e_del_trap_queue(priv); + + return 0; +} + +int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx) +{ + int err = 0; + + switch (trap_ctx->action) { + case devlink_trap_action_trap: + err = mlx5e_handle_action_trap(priv, trap_ctx->id); + break; + case devlink_trap_action_drop: + err = mlx5e_handle_action_drop(priv, trap_ctx->id); + break; + default: + netdev_warn(priv->netdev, "%s: unsupported action %d ", __func__, + trap_ctx->action); + err = -einval; + } + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h +/* spdx-license-identifier: gpl-2.0 or linux-openib */ +/* copyright (c) 2020, mellanox technologies */ + +#ifndef __mlx5e_trap_h__ +#define __mlx5e_trap_h__ + +#include "../en.h" +#include "../devlink.h" + +struct mlx5e_trap { + /* data path */ + struct mlx5e_rq rq; + struct mlx5e_tir tir; + struct napi_struct napi; + struct device *pdev; + struct net_device *netdev; + __be32 mkey_be; + + /* data path - accessed per napi poll */ + struct mlx5e_ch_stats *stats; + + /* control */ + struct mlx5e_priv *priv; + struct mlx5_core_dev *mdev; + struct hwtstamp_config *tstamp; + declare_bitmap(state, mlx5e_channel_num_states); + + struct mlx5e_params params; + struct mlx5e_rq_param rq_param; +}; + +void mlx5e_close_trap(struct mlx5e_trap *trap); +void mlx5e_deactivate_trap(struct mlx5e_priv *priv); +int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c -static int mlx5e_create_rq(struct mlx5e_rq *rq, - struct mlx5e_rq_param *param) +int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) -static void mlx5e_destroy_rq(struct mlx5e_rq *rq) +void mlx5e_destroy_rq(struct mlx5e_rq *rq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +#include "devlink.h" + +static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + struct mlx5e_priv *priv = netdev_priv(rq->netdev); + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + struct mlx5e_wqe_frag_info *wi; + struct sk_buff *skb; + u32 cqe_bcnt; + u16 trap_id; + u16 ci; + + trap_id = get_cqe_flow_tag(cqe); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + + if (unlikely(mlx5e_rx_err_cqe(cqe))) { + rq->stats->wqe_err++; + goto free_wqe; + } + + skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt); + if (!skb) + goto free_wqe; + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + skb_push(skb, eth_hlen); + + mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port); + dev_kfree_skb_any(skb); + +free_wqe: + mlx5e_free_rx_wqe(rq, wi, false); + mlx5_wq_cyc_pop(wq); +} + +void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params) +{ + rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, null) ? + mlx5e_skb_from_cqe_linear : + mlx5e_skb_from_cqe_nonlinear; + rq->post_wqes = mlx5e_post_rx_wqes; + rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; + rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe; +} diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h +static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe) +{ + return be32_to_cpu(cqe->sop_drop_qpn) & 0xfff; +} +
Networking
5543e989fe5e2fe6a5829ee42c00152cac2bb8a0
aya levin tariq toukan tariqt nvidia com
include
linux
core, en, ethernet, mellanox, mlx5
net/mlx5e: add listener to trap event
add support for listening to blocking events in the eth driver. listen on trap event. if received, call mlx5e_handle_trap_event() which: 1) verifies if driver needs open/close trap-rq with respect to the active traps count. 2) inspects trap id and its action (trap/drop) and add/remove the flow steering rule accordingly. otherwise, return an error.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
2
36
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h + struct notifier_block blocking_events_nb; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +#include "en/trap.h" +static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb); + int err; + + switch (event) { + case mlx5_driver_event_type_trap: + err = mlx5e_handle_trap_event(priv, data); + break; + default: + netdev_warn(priv->netdev, "sync event: unknouwn event %ld ", event); + err = -einval; + } + return err; +} + +static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv) +{ + priv->blocking_events_nb.notifier_call = blocking_event; + mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb); +} + +static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv) +{ + mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb); +} + + mlx5e_enable_blocking_events(priv); + mlx5e_disable_blocking_events(priv); + if (priv->en_trap) { + mlx5e_deactivate_trap(priv); + mlx5e_close_trap(priv->en_trap); + priv->en_trap = null; + }
Networking
70038b73e40e2ce6bc4c8f25bbf0747b7a07a61f
aya levin tariq toukan tariqt nvidia com
drivers
net
core, ethernet, mellanox, mlx5
net/mlx5e: add listener to dmac filter trap event
add support for trapping packets which didn't match any dmac in the mac table. add a listener which adds/removes mac trap rule in the flow steering according to the trap's action trap/drop.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
8
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c + case devlink_trap_generic_id_dmac_filter: + err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap)); + if (err) + goto err_out; + break; + case devlink_trap_generic_id_dmac_filter: + mlx5e_remove_mac_trap(priv); + break;
Networking
49fdbd23418f5b18536d02f257096bd71fc83086
aya levin moshe shemesh moshe nvidia com tariq toukan tariqt nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: enable traps according to link state
avoid trapping packets when the interface is down, and revive them when interface is back up. add api to mlx5 core retrieving the action by trap id. use it to apply traps when interface is up, and disable then when interface is down.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
devlink trap support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
62
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, + enum devlink_trap_action *action) +{ + struct mlx5_devlink_trap *dl_trap; + + dl_trap = mlx5_find_trap_by_id(dev, trap_id); + if (!dl_trap) { + mlx5_core_err(dev, "devlink trap: get action on invalid trap id 0x%x", + trap_id); + return -einval; + } + + *action = dl_trap->trap.action; + return 0; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, + enum devlink_trap_action *action); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c + /* traps are unarmed when interface is down, no need to update + * them. the configuration is saved in the core driver, + * queried and applied upon interface up operation in + * mlx5e_open_locked(). + */ + if (!test_bit(mlx5e_state_opened, &priv->state)) + return 0; + + +static int mlx5e_apply_trap(struct mlx5e_priv *priv, int trap_id, bool enable) +{ + enum devlink_trap_action action; + int err; + + err = mlx5_devlink_traps_get_action(priv->mdev, trap_id, &action); + if (err) + return err; + if (action == devlink_trap_action_trap) + err = enable ? mlx5e_handle_action_trap(priv, trap_id) : + mlx5e_handle_action_drop(priv, trap_id); + return err; +} + +static const int mlx5e_traps_arr[] = { + devlink_trap_generic_id_ingress_vlan_filter, + devlink_trap_generic_id_dmac_filter, +}; + +int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable) +{ + int err; + int i; + + for (i = 0; i < array_size(mlx5e_traps_arr); i++) { + err = mlx5e_apply_trap(priv, mlx5e_traps_arr[i], enable); + if (err) + return err; + } + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h +int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c + mlx5e_apply_traps(priv, true); + mlx5e_apply_traps(priv, false);
Networking
eb3862a0525d26f0975ed4f750bc151920f2f25c
aya levin moshe shemesh moshe nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5: e-switch, refactor setting source port
setting the source port requires only the e-switch and vport number. refactor the function to get those parameters instead of passing the full attribute.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
12
7
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c - struct mlx5_esw_flow_attr *attr) + struct mlx5_eswitch *src_esw, + u16 vport) - mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch, - attr->in_rep->vport)); + mlx5_eswitch_get_vport_metadata_for_match(src_esw, + vport)); - mlx5_set(fte_match_set_misc, misc, source_port, attr->in_rep->vport); + mlx5_set(fte_match_set_misc, misc, source_port, vport); - mlx5_cap_gen(attr->in_mdev, vhca_id)); + mlx5_cap_gen(src_esw->dev, vhca_id)); - mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr); + mlx5_eswitch_set_rule_source_port(esw, spec, + esw_attr->in_mdev->priv.eswitch, + esw_attr->in_rep->vport); - mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr); + mlx5_eswitch_set_rule_source_port(esw, spec, + esw_attr->in_mdev->priv.eswitch, + esw_attr->in_rep->vport);
Networking
b055ecf5827d81a60144560266a78fea652bdf1a
mark bloch saeed mahameed saeedm nvidia com
drivers
net
core, ethernet, mellanox, mlx5
net/mlx5e: e-switch, maintain vhca_id to vport_num mapping
following patches in the series need to be able to map vf netdev to vport. since it is trivial to obtain vhca_id from netdev, maintain mapping from vhca_id to vport_num inside eswitch offloads using xarray. provide function mlx5_eswitch_vhca_id_to_vport() to be used by tc code in following patches to obtain the mapping.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
119
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + mlx5_cap_gen(esw->dev, vhca_resource_manager)) { + ret = mlx5_esw_vport_vhca_id_set(esw, vport_num); + if (ret) + goto err_vhca_mapping; + } + + +err_vhca_mapping: + esw_vport_cleanup(esw, vport); + mutex_unlock(&esw->state_lock); + return ret; + + if (!mlx5_esw_is_manager_vport(esw, vport->vport) && + mlx5_cap_gen(esw->dev, vhca_resource_manager)) + mlx5_esw_vport_vhca_id_clear(esw, vport_num); + + xa_init_flags(&esw->offloads.vhca_map, xa_flags_alloc); + warn_on(!xa_empty(&esw->offloads.vhca_map)); + xa_destroy(&esw->offloads.vhca_map); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +#include <linux/xarray.h> + struct xarray vhca_map; +int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); +void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); +int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c + +static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) +{ + int query_out_sz = mlx5_st_sz_bytes(query_hca_cap_out); + void *query_ctx; + void *hca_caps; + int err; + + *vhca_id = 0; + if (mlx5_esw_is_manager_vport(esw, vport_num) || + !mlx5_cap_gen(esw->dev, vhca_resource_manager)) + return -eperm; + + query_ctx = kzalloc(query_out_sz, gfp_kernel); + if (!query_ctx) + return -enomem; + + err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx); + if (err) + goto out_free; + + hca_caps = mlx5_addr_of(query_hca_cap_out, query_ctx, capability); + *vhca_id = mlx5_get(cmd_hca_cap, hca_caps, vhca_id); + +out_free: + kfree(query_ctx); + return err; +} + +int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) +{ + u16 *old_entry, *vhca_map_entry, vhca_id; + int err; + + err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); + if (err) { + esw_warn(esw->dev, "getting vhca_id for vport failed (vport=%u,err=%d) ", + vport_num, err); + return err; + } + + vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), gfp_kernel); + if (!vhca_map_entry) + return -enomem; + + *vhca_map_entry = vport_num; + old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, gfp_kernel); + if (xa_is_err(old_entry)) { + kfree(vhca_map_entry); + return xa_err(old_entry); + } + kfree(old_entry); + return 0; +} + +void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) +{ + u16 *vhca_map_entry, vhca_id; + int err; + + err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); + if (err) + esw_warn(esw->dev, "getting vhca_id for vport failed (vport=%hu,err=%d) ", + vport_num, err); + + vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id); + kfree(vhca_map_entry); +} + +int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num) +{ + u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id); + + if (!res) + return -enoent; + + *vport_num = *res; + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c + +int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out) +{ + u16 opmod = (mlx5_cap_general << 1) | (hca_cap_opmod_get_max & 0x01); + u8 in[mlx5_st_sz_bytes(query_hca_cap_in)] = {}; + + mlx5_set(query_hca_cap_in, in, opcode, mlx5_cmd_op_query_hca_cap); + mlx5_set(query_hca_cap_in, in, op_mod, opmod); + mlx5_set(query_hca_cap_in, in, function_id, function_id); + mlx5_set(query_hca_cap_in, in, other_function, true); + return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out); +}
Networking
84ae9c1f29c06cb4aaf9b1ad290e0abee44ceebc
vlad buslov roi dayan roid nvidia com
drivers
net
core, ethernet, mellanox, mlx5
net/mlx5e: always set attr mdev pointer
eswitch offloads extensions in following patches in the series require attr->esw_attr->in_mdev pointer to always be set. this is already the case for all code paths except mlx5_tc_ct_entry_add_rule() function. fix the function to assign mdev pointer with priv->mdev value.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
2
0
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c + if (ct_priv->ns_type == mlx5_flow_namespace_fdb) + attr->esw_attr->in_mdev = priv->mdev;
Networking
275c21d6cbe2ffb49aa1f054bff7ddfc9126564c
vlad buslov roi dayan roid nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5: e-switch, refactor rule offload forward action processing
following patches in the series extend forwarding functionality with vf tunnel tx and rx handling. extract action forwarding processing code into dedicated functions to simplify further extensions:
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
129
60
- handle every forwarding case with dedicated function instead of inline - extract forwarding dest dispatch conditional into helper function - unify forwaring cleanup code in error path of --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +static void +esw_setup_ft_dest(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_attr *attr, + int i) +{ + flow_act->flags |= flow_act_ignore_flow_level; + dest[i].type = mlx5_flow_destination_type_flow_table; + dest[i].ft = attr->dest_ft; +} + +static void +esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_fs_chains *chains, + int i) +{ + flow_act->flags |= flow_act_ignore_flow_level; + dest[i].type = mlx5_flow_destination_type_flow_table; + dest[i].ft = mlx5_chains_get_tc_end_ft(chains); +} + +static int +esw_setup_chain_dest(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_fs_chains *chains, + u32 chain, u32 prio, u32 level, + int i) +{ + struct mlx5_flow_table *ft; + + flow_act->flags |= flow_act_ignore_flow_level; + ft = mlx5_chains_get_table(chains, chain, prio, level); + if (is_err(ft)) + return ptr_err(ft); + + dest[i].type = mlx5_flow_destination_type_flow_table; + dest[i].ft = ft; + return 0; +} + +static void +esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level) +{ + mlx5_chains_put_table(chains, chain, prio, level); +} + +static void +esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, + int attr_idx, int dest_idx, bool pkt_reformat) +{ + dest[dest_idx].type = mlx5_flow_destination_type_vport; + dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport; + dest[dest_idx].vport.vhca_id = + mlx5_cap_gen(esw_attr->dests[attr_idx].mdev, vhca_id); + if (mlx5_cap_esw(esw->dev, merged_eswitch)) + dest[dest_idx].vport.flags |= mlx5_flow_dest_vport_vhca_id; + if (esw_attr->dests[attr_idx].flags & mlx5_esw_dest_encap) { + if (pkt_reformat) { + flow_act->action |= mlx5_flow_context_action_packet_reformat; + flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; + } + dest[dest_idx].vport.flags |= mlx5_flow_dest_vport_reformat_id; + dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat; + } +} + +static int +esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr, + int i) +{ + int j; + + for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++) + esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true); + return i; +} + +static int +esw_setup_dests(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + int *i) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + int err = 0; + + if (attr->dest_ft) { + esw_setup_ft_dest(dest, flow_act, attr, *i); + (*i)++; + } else if (attr->flags & mlx5_esw_attr_flag_slow_path) { + esw_setup_slow_path_dest(dest, flow_act, chains, *i); + (*i)++; + } else if (attr->dest_chain) { + err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, + 1, 0, *i); + (*i)++; + } else { + *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); + } + + return err; +} + +static void +esw_cleanup_dests(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr) +{ + struct mlx5_fs_chains *chains = esw_chains(esw); + + if (!(attr->flags & mlx5_esw_attr_flag_slow_path) && attr->dest_chain) + esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); +} + - int j, i = 0; + int i = 0; - struct mlx5_flow_table *ft; - - if (attr->dest_ft) { - flow_act.flags |= flow_act_ignore_flow_level; - dest[i].type = mlx5_flow_destination_type_flow_table; - dest[i].ft = attr->dest_ft; - i++; - } else if (attr->flags & mlx5_esw_attr_flag_slow_path) { - flow_act.flags |= flow_act_ignore_flow_level; - dest[i].type = mlx5_flow_destination_type_flow_table; - dest[i].ft = mlx5_chains_get_tc_end_ft(chains); - i++; - } else if (attr->dest_chain) { - flow_act.flags |= flow_act_ignore_flow_level; - ft = mlx5_chains_get_table(chains, attr->dest_chain, - 1, 0); - if (is_err(ft)) { - rule = err_cast(ft); - goto err_create_goto_table; - } - - dest[i].type = mlx5_flow_destination_type_flow_table; - dest[i].ft = ft; - i++; - } else { - for (j = esw_attr->split_count; j < esw_attr->out_count; j++) { - dest[i].type = mlx5_flow_destination_type_vport; - dest[i].vport.num = esw_attr->dests[j].rep->vport; - dest[i].vport.vhca_id = - mlx5_cap_gen(esw_attr->dests[j].mdev, vhca_id); - if (mlx5_cap_esw(esw->dev, merged_eswitch)) - dest[i].vport.flags |= - mlx5_flow_dest_vport_vhca_id; - if (esw_attr->dests[j].flags & mlx5_esw_dest_encap) { - flow_act.action |= mlx5_flow_context_action_packet_reformat; - flow_act.pkt_reformat = - esw_attr->dests[j].pkt_reformat; - dest[i].vport.flags |= mlx5_flow_dest_vport_reformat_id; - dest[i].vport.pkt_reformat = - esw_attr->dests[j].pkt_reformat; - } - i++; - } + int err; + + err = esw_setup_dests(dest, &flow_act, esw, attr, &i); + if (err) { + rule = err_ptr(err); + goto err_create_goto_table; - if (!(attr->flags & mlx5_esw_attr_flag_slow_path) && attr->dest_chain) - mlx5_chains_put_table(chains, attr->dest_chain, 1, 0); + esw_cleanup_dests(esw, attr); - for (i = 0; i < esw_attr->split_count; i++) { - dest[i].type = mlx5_flow_destination_type_vport; - dest[i].vport.num = esw_attr->dests[i].rep->vport; - dest[i].vport.vhca_id = - mlx5_cap_gen(esw_attr->dests[i].mdev, vhca_id); - if (mlx5_cap_esw(esw->dev, merged_eswitch)) - dest[i].vport.flags |= mlx5_flow_dest_vport_vhca_id; - if (esw_attr->dests[i].flags & mlx5_esw_dest_encap) { - dest[i].vport.flags |= mlx5_flow_dest_vport_reformat_id; - dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat; - } - } + for (i = 0; i < esw_attr->split_count; i++) + esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); - if (attr->dest_chain) - mlx5_chains_put_table(chains, attr->dest_chain, 1, 0); + esw_cleanup_dests(esw, attr);
Networking
9e51c0a624925076fe07a09674172495c8c09e59
vlad buslov
drivers
net
core, ethernet, mellanox, mlx5
net/mlx5e: vf tunnel tx traffic offloading
when tunnel endpoint is on vf, driver still assumes that endpoint is on uplink and incorrectly configures encap rule offload according to that assumption. as a result, traffic is sent directly to the uplink and rules installed on representor of tunnel endpoint vf are ignored.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
201
11
- for tunneling flows perform route lookup on route and out devices pair. - recirculate encapsulated packets to vf vport in order to apply any flow - hardware advertises capability to preserve reg_c_0 value on packet - vport metadata matching is enabled. - termination tables are to be used by the flow. --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c + [vport_to_reg] = { + .mfield = mlx5_action_in_field_metadata_reg_c_0, + .moffset = 2, + .mlen = 2, + }, +static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv); + +static bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) +{ + struct mlx5_core_dev *out_mdev, *route_mdev; + struct mlx5e_priv *out_priv, *route_priv; + + out_priv = netdev_priv(out_dev); + out_mdev = out_priv->mdev; + route_priv = netdev_priv(route_dev); + route_mdev = route_priv->mdev; + + if (out_mdev->coredev_type != mlx5_coredev_pf || + route_mdev->coredev_type != mlx5_coredev_vf) + return false; + + return same_hw_devs(out_priv, route_priv); +} + +static int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, + u16 *vport) +{ + struct mlx5e_priv *out_priv, *route_priv; + struct mlx5_core_dev *route_mdev; + struct mlx5_eswitch *esw; + u16 vhca_id; + int err; + + out_priv = netdev_priv(out_dev); + esw = out_priv->mdev->priv.eswitch; + route_priv = netdev_priv(route_dev); + route_mdev = route_priv->mdev; + + vhca_id = mlx5_cap_gen(route_mdev, vhca_id); + err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); + return err; +} + +static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + struct net_device *out_dev, + int route_dev_ifindex, + int out_index) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *route_dev; + u16 vport_num; + int err = 0; + u32 data; + + route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex); + + if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops || + !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) + goto out; + + err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num); + if (err) + goto out; + + attr->dest_chain = 0; + attr->action |= mlx5_flow_context_action_mod_hdr; + esw_attr->dests[out_index].flags |= mlx5_esw_dest_chain_with_src_port_change; + data = mlx5_eswitch_get_vport_metadata_for_set(esw_attr->in_mdev->priv.eswitch, + vport_num); + err = mlx5e_tc_match_to_reg_set(esw->dev, mod_hdr_acts, + mlx5_flow_namespace_fdb, vport_to_reg, data); + if (err) + goto out; + +out: + if (route_dev) + dev_put(route_dev); + return err; +} + + err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev, + e->route_dev_ifindex, out_index); + if (err) + goto out_err; + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h + vport_to_reg, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h + mlx5_esw_dest_chain_with_src_port_change = bit(2), + mlx5_esw_attr_flag_src_rewrite = bit(3), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, + int from, int to) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + int i; + + for (i = from; i < to; i++) + if (esw_attr->dests[i].flags & mlx5_esw_dest_chain_with_src_port_change) + mlx5_chains_put_table(chains, 0, 1, 0); +} + +static bool +esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr) +{ + int i; + + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) + if (esw_attr->dests[i].flags & mlx5_esw_dest_chain_with_src_port_change) + return true; + return false; +} + +static int +esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, + struct mlx5_fs_chains *chains, + struct mlx5_flow_attr *attr, + int *i) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int j, err; + + if (!(attr->flags & mlx5_esw_attr_flag_src_rewrite)) + return -eopnotsupp; + + for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { + err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i); + if (err) + goto err_setup_chain; + flow_act->action |= mlx5_flow_context_action_packet_reformat; + flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat; + } + return 0; + +err_setup_chain: + esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); + return err; +} + +static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + + esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); +} + + struct mlx5_flow_spec *spec, + if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) && + mlx5_cap_gen(esw_attr->in_mdev, reg_c_preserve) && + mlx5_eswitch_vport_match_metadata_enabled(esw)) + attr->flags |= mlx5_esw_attr_flag_src_rewrite; + + } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { + err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; - if (!(attr->flags & mlx5_esw_attr_flag_slow_path) && attr->dest_chain) - esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); + if (!(attr->flags & mlx5_esw_attr_flag_slow_path)) { + if (attr->dest_chain) + esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0); + else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) + esw_cleanup_chain_src_port_rewrite(esw, attr); + } + mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); + - err = esw_setup_dests(dest, &flow_act, esw, attr, &i); + err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i); - mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr); - - int i; + int i, err = 0; - for (i = 0; i < esw_attr->split_count; i++) - esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); + for (i = 0; i < esw_attr->split_count; i++) { + if (esw_is_chain_src_port_rewrite(esw, esw_attr)) + err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, + &i); + else + esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); + + if (err) { + rule = err_ptr(err); + goto err_chain_src_rewrite; + } + } - if (is_err(rule)) - goto add_err; + if (is_err(rule)) { + i = esw_attr->split_count; + goto err_chain_src_rewrite; + } -add_err: +err_chain_src_rewrite: + esw_put_dest_tables_loop(esw, attr, 0, i); + esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count); + +u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, + u16 vport_num) +{ + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); + + if (warn_on_once(is_err(vport))) + return 0; + + return vport->metadata; +} +export_symbol(mlx5_eswitch_get_vport_metadata_for_set); diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h +u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, + u16 vport_num);
Networking
10742efc20a429b2040658af685d6bb2aa674a73
vlad buslov
include
linux
core, ethernet, mellanox, mlx5
net/mlx5e: refactor tun routing helpers
refactor tun routing helpers to use dedicated struct mlx5e_tc_tun_route_attr instead of multiple output arguments. this simplifies the callers (no need to keep track of bunch of output param pointers) and allows to unify struct release code in new mlx5e_tc_tun_route_attr_cleanup() helper instead of requiring callers to manually release some of the output parameters that require it.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
126
109
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +struct mlx5e_tc_tun_route_attr { + struct net_device *out_dev; + struct net_device *route_dev; + union { + struct flowi4 fl4; + struct flowi6 fl6; + } fl; + struct neighbour *n; + u8 ttl; +}; + +#define tc_tun_route_attr_init(name) struct mlx5e_tc_tun_route_attr name = {} + +static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr) +{ + if (attr->n) + neigh_release(attr->n); + if (attr->route_dev) + dev_put(attr->route_dev); +} + - struct net_device **out_dev, - struct net_device **route_dev, - struct flowi4 *fl4, - struct neighbour **out_n, - u8 *out_ttl) + struct mlx5e_tc_tun_route_attr *attr) + struct net_device *route_dev; + struct net_device *out_dev; - fl4->flowi4_oif = uplink_dev->ifindex; + attr->fl.fl4.flowi4_oif = uplink_dev->ifindex; - rt = ip_route_output_key(dev_net(mirred_dev), fl4); + rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4); - ip_rt_put(rt); - return -enetunreach; + ret = -enetunreach; + goto err_rt_release; - ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev); - if (ret < 0) { - ip_rt_put(rt); - return ret; - } - dev_hold(*route_dev); + ret = get_route_and_out_devs(priv, rt->dst.dev, &route_dev, &out_dev); + if (ret < 0) + goto err_rt_release; + dev_hold(route_dev); - if (!(*out_ttl)) - *out_ttl = ip4_dst_hoplimit(&rt->dst); - n = dst_neigh_lookup(&rt->dst, &fl4->daddr); - ip_rt_put(rt); + if (!attr->ttl) + attr->ttl = ip4_dst_hoplimit(&rt->dst); + n = dst_neigh_lookup(&rt->dst, &attr->fl.fl4.daddr); - dev_put(*route_dev); - return -enomem; + ret = -enomem; + goto err_dev_release; - *out_n = n; + ip_rt_put(rt); + attr->route_dev = route_dev; + attr->out_dev = out_dev; + attr->n = n; + +err_dev_release: + dev_put(route_dev); +err_rt_release: + ip_rt_put(rt); + return ret; -static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev, - struct neighbour *n) +static void mlx5e_route_lookup_ipv4_put(struct mlx5e_tc_tun_route_attr *attr) - neigh_release(n); - dev_put(route_dev); + mlx5e_tc_tun_route_attr_cleanup(attr); - struct net_device *out_dev, *route_dev; - struct flowi4 fl4 = {}; - struct neighbour *n; + tc_tun_route_attr_init(attr); - u8 nud_state, ttl; + u8 nud_state; - fl4.flowi4_tos = tun_key->tos; - fl4.daddr = tun_key->u.ipv4.dst; - fl4.saddr = tun_key->u.ipv4.src; - ttl = tun_key->ttl; + attr.fl.fl4.flowi4_tos = tun_key->tos; + attr.fl.fl4.daddr = tun_key->u.ipv4.dst; + attr.fl.fl4.saddr = tun_key->u.ipv4.src; + attr.ttl = tun_key->ttl; - err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev, - &fl4, &n, &ttl); + err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr); - (is_vlan_dev(route_dev) ? vlan_eth_hlen : eth_hlen) + + (is_vlan_dev(attr.route_dev) ? vlan_eth_hlen : eth_hlen) + - e->m_neigh.dev = n->dev; - e->m_neigh.family = n->ops->family; - memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); - e->out_dev = out_dev; - e->route_dev_ifindex = route_dev->ifindex; + e->m_neigh.dev = attr.n->dev; + e->m_neigh.family = attr.n->ops->family; + memcpy(&e->m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); + e->out_dev = attr.out_dev; + e->route_dev_ifindex = attr.route_dev->ifindex; - err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); + err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e); - read_lock_bh(&n->lock); - nud_state = n->nud_state; - ether_addr_copy(e->h_dest, n->ha); - read_unlock_bh(&n->lock); + read_lock_bh(&attr.n->lock); + nud_state = attr.n->nud_state; + ether_addr_copy(e->h_dest, attr.n->ha); + read_unlock_bh(&attr.n->lock); - ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e, + ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, - ip->ttl = ttl; - ip->daddr = fl4.daddr; - ip->saddr = fl4.saddr; + ip->ttl = attr.ttl; + ip->daddr = attr.fl.fl4.daddr; + ip->saddr = attr.fl.fl4.saddr; - neigh_event_send(n, null); + neigh_event_send(attr.n, null); - mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); - mlx5e_route_lookup_ipv4_put(route_dev, n); + mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); + mlx5e_route_lookup_ipv4_put(&attr); - mlx5e_route_lookup_ipv4_put(route_dev, n); + mlx5e_route_lookup_ipv4_put(&attr); - struct net_device **out_dev, - struct net_device **route_dev, - struct flowi6 *fl6, - struct neighbour **out_n, - u8 *out_ttl) + struct mlx5e_tc_tun_route_attr *attr) + struct net_device *route_dev; + struct net_device *out_dev; - - dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), null, fl6, + dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), null, &attr->fl.fl6, - if (!(*out_ttl)) - *out_ttl = ip6_dst_hoplimit(dst); + if (!attr->ttl) + attr->ttl = ip6_dst_hoplimit(dst); - ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); - if (ret < 0) { - dst_release(dst); - return ret; - } + ret = get_route_and_out_devs(priv, dst->dev, &route_dev, &out_dev); + if (ret < 0) + goto err_dst_release; - dev_hold(*route_dev); - n = dst_neigh_lookup(dst, &fl6->daddr); - dst_release(dst); + dev_hold(route_dev); + n = dst_neigh_lookup(dst, &attr->fl.fl6.daddr); - dev_put(*route_dev); - return -enomem; + ret = -enomem; + goto err_dev_release; - *out_n = n; + dst_release(dst); + attr->out_dev = out_dev; + attr->route_dev = route_dev; + attr->n = n; + +err_dev_release: + dev_put(route_dev); +err_dst_release: + dst_release(dst); + return ret; -static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev, - struct neighbour *n) +static void mlx5e_route_lookup_ipv6_put(struct mlx5e_tc_tun_route_attr *attr) - neigh_release(n); - dev_put(route_dev); + mlx5e_tc_tun_route_attr_cleanup(attr); - struct net_device *out_dev, *route_dev; - struct flowi6 fl6 = {}; + tc_tun_route_attr_init(attr); - struct neighbour *n = null; - u8 nud_state, ttl; + u8 nud_state; - ttl = tun_key->ttl; - - fl6.flowlabel = ip6_make_flowinfo(rt_tos(tun_key->tos), tun_key->label); - fl6.daddr = tun_key->u.ipv6.dst; - fl6.saddr = tun_key->u.ipv6.src; + attr.ttl = tun_key->ttl; + attr.fl.fl6.flowlabel = ip6_make_flowinfo(rt_tos(tun_key->tos), tun_key->label); + attr.fl.fl6.daddr = tun_key->u.ipv6.dst; + attr.fl.fl6.saddr = tun_key->u.ipv6.src; - err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev, - &fl6, &n, &ttl); + err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr); - (is_vlan_dev(route_dev) ? vlan_eth_hlen : eth_hlen) + + (is_vlan_dev(attr.route_dev) ? vlan_eth_hlen : eth_hlen) + - e->m_neigh.dev = n->dev; - e->m_neigh.family = n->ops->family; - memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); - e->out_dev = out_dev; - e->route_dev_ifindex = route_dev->ifindex; + e->m_neigh.dev = attr.n->dev; + e->m_neigh.family = attr.n->ops->family; + memcpy(&e->m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); + e->out_dev = attr.out_dev; + e->route_dev_ifindex = attr.route_dev->ifindex; - err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); + err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e); - read_lock_bh(&n->lock); - nud_state = n->nud_state; - ether_addr_copy(e->h_dest, n->ha); - read_unlock_bh(&n->lock); + read_lock_bh(&attr.n->lock); + nud_state = attr.n->nud_state; + ether_addr_copy(e->h_dest, attr.n->ha); + read_unlock_bh(&attr.n->lock); - ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e, + ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, - ip6h->hop_limit = ttl; - ip6h->daddr = fl6.daddr; - ip6h->saddr = fl6.saddr; + ip6h->hop_limit = attr.ttl; + ip6h->daddr = attr.fl.fl6.daddr; + ip6h->saddr = attr.fl.fl6.saddr; - neigh_event_send(n, null); + neigh_event_send(attr.n, null); - mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); - mlx5e_route_lookup_ipv6_put(route_dev, n); + mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); + mlx5e_route_lookup_ipv6_put(&attr); - mlx5e_route_lookup_ipv6_put(route_dev, n); + mlx5e_route_lookup_ipv6_put(&attr);
Networking
6717986e15a067ac49370e3f563063c8154e6854
vlad buslov
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5: e-switch, indirect table infrastructure
indirect table infrastructure is used to allow fully processing vf tunnel traffic in hardware. kernel software model uses two tc rules for such traffic: ul rep to tunnel device, then tunnel vf rep to destination vf rep. to implement such pipeline driver needs to program the hardware after matching on ul rule to overwrite source vport from ul to tunnel vf and recirculate the packet to the root table to allow matching on the rule installed on tunnel vf. for this indirect table matches all encapsulated traffic by tunnel parameters and all other ip traffic is sent to tunnel vf by the miss rule.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c', 'makefile']
6
616
0
- mlx5_esw_indir_table_{init|destroy}() - init and destroy opaque indirect - mlx5_esw_indir_table_get() - get or create new table according to vport - mlx5_esw_indir_table_put() - decrease reference to the indirect table and - mlx5_esw_indir_table_needed() - check that in_port is an uplink port and - mlx5_esw_indir_table_decap_vport() - function returns decap vport of --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/makefile b/drivers/net/ethernet/mellanox/mlx5/core/makefile --- a/drivers/net/ethernet/mellanox/mlx5/core/makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/makefile + esw/indir_table.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h + u8 ip_version; +struct mlx5_rx_tun_attr { + u16 decap_vport; + union { + __be32 v4; + struct in6_addr v6; + } src_ip; /* valid if decap_vport is not zero */ + union { + __be32 v4; + struct in6_addr v6; + } dst_ip; /* valid if decap_vport is not zero */ + u32 vni; +}; + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +// spdx-license-identifier: gpl-2.0 or linux-openib +/* copyright (c) 2021 mellanox technologies. */ + +#include <linux/etherdevice.h> +#include <linux/idr.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/vport.h> +#include <linux/mlx5/fs.h> +#include "mlx5_core.h" +#include "eswitch.h" +#include "en.h" +#include "en_tc.h" +#include "fs_core.h" +#include "esw/indir_table.h" +#include "lib/fs_chains.h" + +#define mlx5_esw_indir_table_size 128 +#define mlx5_esw_indir_table_recirc_idx_max (mlx5_esw_indir_table_size - 2) +#define mlx5_esw_indir_table_fwd_idx (mlx5_esw_indir_table_size - 1) + +struct mlx5_esw_indir_table_rule { + struct list_head list; + struct mlx5_flow_handle *handle; + union { + __be32 v4; + struct in6_addr v6; + } dst_ip; + u32 vni; + struct mlx5_modify_hdr *mh; + refcount_t refcnt; +}; + +struct mlx5_esw_indir_table_entry { + struct hlist_node hlist; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *recirc_grp; + struct mlx5_flow_group *fwd_grp; + struct mlx5_flow_handle *fwd_rule; + struct list_head recirc_rules; + int recirc_cnt; + int fwd_ref; + + u16 vport; + u8 ip_version; +}; + +struct mlx5_esw_indir_table { + struct mutex lock; /* protects table */ + declare_hashtable(table, 8); +}; + +struct mlx5_esw_indir_table * +mlx5_esw_indir_table_init(void) +{ + struct mlx5_esw_indir_table *indir = kvzalloc(sizeof(*indir), gfp_kernel); + + if (!indir) + return err_ptr(-enomem); + + mutex_init(&indir->lock); + hash_init(indir->table); + return indir; +} + +void +mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) +{ + mutex_destroy(&indir->lock); + kvfree(indir); +} + +bool +mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport_num, + struct mlx5_core_dev *dest_mdev) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + + /* use indirect table for all ip traffic from ul to vf with vport + * destination when source rewrite flag is set. + */ + return esw_attr->in_rep->vport == mlx5_vport_uplink && + mlx5_eswitch_is_vf_vport(esw, vport_num) && + esw->dev == dest_mdev && + attr->ip_version && + attr->flags & mlx5_esw_attr_flag_src_rewrite; +} + +u16 +mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + + return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0; +} + +static struct mlx5_esw_indir_table_rule * +mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_esw_indir_table_rule *rule; + + list_for_each_entry(rule, &e->recirc_rules, list) + if (rule->vni == attr->rx_tun_attr->vni && + !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip, + sizeof(attr->rx_tun_attr->dst_ip))) + goto found; + return null; + +found: + refcount_inc(&rule->refcnt); + return rule; +} + +static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + struct mlx5_esw_indir_table_entry *e) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + struct mlx5e_tc_mod_hdr_acts mod_acts = {}; + struct mlx5_flow_destination dest = {}; + struct mlx5_esw_indir_table_rule *rule; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *rule_spec; + struct mlx5_flow_handle *handle; + int err = 0; + u32 data; + + rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr); + if (rule) + return 0; + + if (e->recirc_cnt == mlx5_esw_indir_table_recirc_idx_max) + return -einval; + + rule_spec = kvzalloc(sizeof(*rule_spec), gfp_kernel); + if (!rule_spec) + return -enomem; + + rule = kzalloc(sizeof(*rule), gfp_kernel); + if (!rule) { + err = -enomem; + goto out; + } + + rule_spec->match_criteria_enable = mlx5_match_outer_headers | + mlx5_match_misc_parameters | + mlx5_match_misc_parameters_2; + if (mlx5_cap_flowtable_nic_rx(esw->dev, ft_field_support.outer_ip_version)) { + mlx5_set(fte_match_param, rule_spec->match_criteria, + outer_headers.ip_version, 0xf); + mlx5_set(fte_match_param, rule_spec->match_value, outer_headers.ip_version, + attr->ip_version); + } else if (attr->ip_version) { + mlx5_set_to_ones(fte_match_param, rule_spec->match_criteria, + outer_headers.ethertype); + mlx5_set(fte_match_param, rule_spec->match_value, outer_headers.ethertype, + (attr->ip_version == 4 ? eth_p_ip : eth_p_ipv6)); + } else { + err = -eopnotsupp; + goto err_mod_hdr; + } + + if (attr->ip_version == 4) { + mlx5_set_to_ones(fte_match_param, rule_spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + mlx5_set(fte_match_param, rule_spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4, + ntohl(esw_attr->rx_tun_attr->dst_ip.v4)); + } else if (attr->ip_version == 6) { + int len = sizeof(struct in6_addr); + + memset(mlx5_addr_of(fte_match_param, rule_spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, len); + memcpy(mlx5_addr_of(fte_match_param, rule_spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &esw_attr->rx_tun_attr->dst_ip.v6, len); + } + + mlx5_set_to_ones(fte_match_param, rule_spec->match_criteria, + misc_parameters.vxlan_vni); + mlx5_set(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni, + mlx5_get(fte_match_param, spec->match_value, misc_parameters.vxlan_vni)); + + mlx5_set(fte_match_param, rule_spec->match_criteria, + misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); + mlx5_set(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch, + mlx5_vport_uplink)); + + /* modify flow source to recirculate packet */ + data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport); + err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, mlx5_flow_namespace_fdb, + vport_to_reg, data); + if (err) + goto err_mod_hdr; + + flow_act.modify_hdr = mlx5_modify_header_alloc(esw->dev, mlx5_flow_namespace_fdb, + mod_acts.num_actions, mod_acts.actions); + if (is_err(flow_act.modify_hdr)) { + err = ptr_err(flow_act.modify_hdr); + goto err_mod_hdr; + } + + flow_act.action = mlx5_flow_context_action_fwd_dest | mlx5_flow_context_action_mod_hdr; + flow_act.flags = flow_act_ignore_flow_level | flow_act_no_append; + dest.type = mlx5_flow_destination_type_flow_table; + dest.ft = mlx5_chains_get_table(chains, 0, 1, 0); + if (!dest.ft) { + err = ptr_err(dest.ft); + goto err_table; + } + handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1); + if (is_err(handle)) { + err = ptr_err(handle); + goto err_handle; + } + + dealloc_mod_hdr_actions(&mod_acts); + rule->handle = handle; + rule->vni = esw_attr->rx_tun_attr->vni; + rule->mh = flow_act.modify_hdr; + memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip, + sizeof(esw_attr->rx_tun_attr->dst_ip)); + refcount_set(&rule->refcnt, 1); + list_add(&rule->list, &e->recirc_rules); + e->recirc_cnt++; + goto out; + +err_handle: + mlx5_chains_put_table(chains, 0, 1, 0); +err_table: + mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr); +err_mod_hdr: + kfree(rule); +out: + kfree(rule_spec); + return err; +} + +static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_esw_indir_table_entry *e) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_fs_chains *chains = esw_chains(esw); + struct mlx5_esw_indir_table_rule *rule; + + list_for_each_entry(rule, &e->recirc_rules, list) + if (rule->vni == esw_attr->rx_tun_attr->vni && + !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip, + sizeof(esw_attr->rx_tun_attr->dst_ip))) + goto found; + + return; + +found: + if (!refcount_dec_and_test(&rule->refcnt)) + return; + + mlx5_del_flow_rules(rule->handle); + mlx5_chains_put_table(chains, 0, 1, 0); + mlx5_modify_header_dealloc(esw->dev, rule->mh); + list_del(&rule->list); + kfree(rule); + e->recirc_cnt--; +} + +static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + struct mlx5_esw_indir_table_entry *e) +{ + int err = 0, inlen = mlx5_st_sz_bytes(create_flow_group_in); + u32 *in, *match; + + in = kvzalloc(inlen, gfp_kernel); + if (!in) + return -enomem; + + mlx5_set(create_flow_group_in, in, match_criteria_enable, mlx5_match_outer_headers | + mlx5_match_misc_parameters | mlx5_match_misc_parameters_2); + match = mlx5_addr_of(create_flow_group_in, in, match_criteria); + + if (mlx5_cap_flowtable_nic_rx(esw->dev, ft_field_support.outer_ip_version)) + mlx5_set(fte_match_param, match, outer_headers.ip_version, 0xf); + else + mlx5_set_to_ones(fte_match_param, match, outer_headers.ethertype); + + if (attr->ip_version == 4) { + mlx5_set_to_ones(fte_match_param, match, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + } else if (attr->ip_version == 6) { + memset(mlx5_addr_of(fte_match_param, match, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, sizeof(struct in6_addr)); + } else { + err = -eopnotsupp; + goto out; + } + + mlx5_set_to_ones(fte_match_param, match, misc_parameters.vxlan_vni); + mlx5_set(fte_match_param, match, misc_parameters_2.metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_mask()); + mlx5_set(create_flow_group_in, in, start_flow_index, 0); + mlx5_set(create_flow_group_in, in, end_flow_index, mlx5_esw_indir_table_recirc_idx_max); + e->recirc_grp = mlx5_create_flow_group(e->ft, in); + if (is_err(e->recirc_grp)) { + err = ptr_err(e->recirc_grp); + goto out; + } + + init_list_head(&e->recirc_rules); + e->recirc_cnt = 0; + +out: + kfree(in); + return err; +} + +static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, + struct mlx5_esw_indir_table_entry *e) +{ + int err = 0, inlen = mlx5_st_sz_bytes(create_flow_group_in); + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec; + u32 *in; + + in = kvzalloc(inlen, gfp_kernel); + if (!in) + return -enomem; + + spec = kvzalloc(sizeof(*spec), gfp_kernel); + if (!spec) { + kfree(in); + return -enomem; + } + + /* hold one entry */ + mlx5_set(create_flow_group_in, in, start_flow_index, mlx5_esw_indir_table_fwd_idx); + mlx5_set(create_flow_group_in, in, end_flow_index, mlx5_esw_indir_table_fwd_idx); + e->fwd_grp = mlx5_create_flow_group(e->ft, in); + if (is_err(e->fwd_grp)) { + err = ptr_err(e->fwd_grp); + goto err_out; + } + + flow_act.action = mlx5_flow_context_action_fwd_dest; + dest.type = mlx5_flow_destination_type_vport; + dest.vport.num = e->vport; + dest.vport.vhca_id = mlx5_cap_gen(esw->dev, vhca_id); + e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1); + if (is_err(e->fwd_rule)) { + mlx5_destroy_flow_group(e->fwd_grp); + err = ptr_err(e->fwd_rule); + } + +err_out: + kfree(spec); + kfree(in); + return err; +} + +static struct mlx5_esw_indir_table_entry * +mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, u16 vport, bool decap) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_namespace *root_ns; + struct mlx5_esw_indir_table_entry *e; + struct mlx5_flow_table *ft; + int err = 0; + + root_ns = mlx5_get_flow_namespace(esw->dev, mlx5_flow_namespace_fdb); + if (!root_ns) + return err_ptr(-enoent); + + e = kzalloc(sizeof(*e), gfp_kernel); + if (!e) + return err_ptr(-enomem); + + ft_attr.prio = fdb_tc_offload; + ft_attr.max_fte = mlx5_esw_indir_table_size; + ft_attr.flags = mlx5_flow_table_unmanaged; + ft_attr.level = 1; + + ft = mlx5_create_flow_table(root_ns, &ft_attr); + if (is_err(ft)) { + err = ptr_err(ft); + goto tbl_err; + } + e->ft = ft; + e->vport = vport; + e->ip_version = attr->ip_version; + e->fwd_ref = !decap; + + err = mlx5_create_indir_recirc_group(esw, attr, spec, e); + if (err) + goto recirc_grp_err; + + if (decap) { + err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); + if (err) + goto recirc_rule_err; + } + + err = mlx5_create_indir_fwd_group(esw, e); + if (err) + goto fwd_grp_err; + + hash_add(esw->fdb_table.offloads.indir->table, &e->hlist, + vport << 16 | attr->ip_version); + + return e; + +fwd_grp_err: + if (decap) + mlx5_esw_indir_table_rule_put(esw, attr, e); +recirc_rule_err: + mlx5_destroy_flow_group(e->recirc_grp); +recirc_grp_err: + mlx5_destroy_flow_table(e->ft); +tbl_err: + kfree(e); + return err_ptr(err); +} + +static struct mlx5_esw_indir_table_entry * +mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version) +{ + struct mlx5_esw_indir_table_entry *e; + u32 key = vport << 16 | ip_version; + + hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key) + if (e->vport == vport && e->ip_version == ip_version) + return e; + + return null; +} + +struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + u16 vport, bool decap) +{ + struct mlx5_esw_indir_table_entry *e; + int err; + + mutex_lock(&esw->fdb_table.offloads.indir->lock); + e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); + if (e) { + if (!decap) { + e->fwd_ref++; + } else { + err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); + if (err) + goto out_err; + } + } else { + e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap); + if (is_err(e)) { + err = ptr_err(e); + esw_warn(esw->dev, "failed to create indirection table, err %d. ", err); + goto out_err; + } + } + mutex_unlock(&esw->fdb_table.offloads.indir->lock); + return e->ft; + +out_err: + mutex_unlock(&esw->fdb_table.offloads.indir->lock); + return err_ptr(err); +} + +void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport, bool decap) +{ + struct mlx5_esw_indir_table_entry *e; + + mutex_lock(&esw->fdb_table.offloads.indir->lock); + e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); + if (!e) + goto out; + + if (!decap) + e->fwd_ref--; + else + mlx5_esw_indir_table_rule_put(esw, attr, e); + + if (e->fwd_ref || e->recirc_cnt) + goto out; + + hash_del(&e->hlist); + mlx5_destroy_flow_group(e->recirc_grp); + mlx5_del_flow_rules(e->fwd_rule); + mlx5_destroy_flow_group(e->fwd_grp); + mlx5_destroy_flow_table(e->ft); + kfree(e); +out: + mutex_unlock(&esw->fdb_table.offloads.indir->lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h +/* spdx-license-identifier: gpl-2.0 or linux-openib */ +/* copyright (c) 2021 mellanox technologies. */ + +#ifndef __mlx5_esw_ft_h__ +#define __mlx5_esw_ft_h__ + +#ifdef config_mlx5_cls_act + +struct mlx5_esw_indir_table * +mlx5_esw_indir_table_init(void); +void +mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir); + +struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + u16 vport, bool decap); +void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport, bool decap); + +bool +mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport_num, + struct mlx5_core_dev *dest_mdev); + +u16 +mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr); + +#else +/* indir api stubs */ +struct mlx5_esw_indir_table * +mlx5_esw_indir_table_init(void) +{ + return null; +} + +void +mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) +{ +} + +static inline struct mlx5_flow_table * +mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + u16 vport, bool decap) +{ + return err_ptr(-eopnotsupp); +} + +static inline void +mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport, bool decap) +{ +} + +bool +mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + u16 vport_num, + struct mlx5_core_dev *dest_mdev) +{ + return false; +} + +static inline u16 +mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr) +{ + return 0; +} +#endif + +#endif /* __mlx5_esw_ft_h__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +struct mlx5_esw_indir_table; + + struct mlx5_esw_indir_table *indir; + + struct mlx5_rx_tun_attr *rx_tun_attr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +#include "esw/indir_table.h" + struct mlx5_esw_indir_table *indir; + indir = mlx5_esw_indir_table_init(); + if (is_err(indir)) { + err = ptr_err(indir); + goto create_indir_err; + } + esw->fdb_table.offloads.indir = indir; + + mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir); +create_indir_err: + mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
Networking
34ca65352ddf2aaa094f4016369103c4c7b98958
vlad buslov
drivers
net
core, esw, ethernet, mellanox, mlx5
net/mlx5e: remove redundant match on tunnel destination mac
remove hardcoded match on tunnel destination mac address. such match is no longer required and would be wrong for stacked devices topology where encapsulation destination mac address will be the address of tunnel vf that can change dynamically on route change (implemented in following patches in the series).
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['c']
1
0
8
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c - /* enforce dmac when offloading incoming tunneled flows. - * flow counters require a match on the dmac. - */ - mlx5_set_to_ones(fte_match_set_lyr_2_4, headers_c, dmac_47_16); - mlx5_set_to_ones(fte_match_set_lyr_2_4, headers_c, dmac_15_0); - ether_addr_copy(mlx5_addr_of(fte_match_set_lyr_2_4, headers_v, - dmac_47_16), priv->netdev->dev_addr); -
Networking
4ad9116c84ed3243f7b706f07646a995f3bca502
vlad buslov roi dayan roid nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: vf tunnel rx traffic offloading
when tunnel endpoint is on vf the encapsulated rx traffic is exposed on the representor of the vf without any further processing of rules installed on the vf. detect such case by checking if the device returned by route lookup in decap rule handling code is a mlx5 vf and handle it with new redirection tables api.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
271
8
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *flow_attr) +{ + struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr; + tc_tun_route_attr_init(attr); + u16 vport_num; + int err = 0; + + if (flow_attr->ip_version == 4) { + /* addresses are swapped for decap */ + attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4; + attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4; + err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr); + } +#if is_enabled(config_inet) && is_enabled(config_ipv6) + else if (flow_attr->ip_version == 6) { + /* addresses are swapped for decap */ + attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6; + attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6; + err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr); + } +#endif + else + return 0; + + if (err) + return err; + + if (attr.route_dev->netdev_ops != &mlx5e_netdev_ops || + !mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev)) + goto out; + + err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num); + if (err) + goto out; + + esw_attr->rx_tun_attr->vni = mlx5_get(fte_match_param, spec->match_value, + misc_parameters.vxlan_vni); + esw_attr->rx_tun_attr->decap_vport = vport_num; + +out: + if (flow_attr->ip_version == 4) + mlx5e_route_lookup_ipv4_put(&attr); +#if is_enabled(config_inet) && is_enabled(config_ipv6) + else if (flow_attr->ip_version == 6) + mlx5e_route_lookup_ipv6_put(&attr); +#endif + return err; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c -static bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) +bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev) -static int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, - u16 *vport) +int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) + kvfree(attr->esw_attr->rx_tun_attr); +static u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) +{ + void *headers_v; + u16 ethertype; + u8 ip_version; + + if (outer) + headers_v = mlx5_addr_of(fte_match_param, spec->match_value, outer_headers); + else + headers_v = mlx5_addr_of(fte_match_param, spec->match_value, inner_headers); + + ip_version = mlx5_get(fte_match_set_lyr_2_4, headers_v, ip_version); + /* return ip_version converted from ethertype anyway */ + if (!ip_version) { + ethertype = mlx5_get(fte_match_set_lyr_2_4, headers_v, ethertype); + if (ethertype == eth_p_ip || ethertype == eth_p_arp) + ip_version = 4; + else if (ethertype == eth_p_ipv6) + ip_version = 6; + } + return ip_version; +} + +static int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec) +{ + struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; + struct mlx5_rx_tun_attr *tun_attr; + void *daddr, *saddr; + u8 ip_version; + + tun_attr = kvzalloc(sizeof(*tun_attr), gfp_kernel); + if (!tun_attr) + return -enomem; + + esw_attr->rx_tun_attr = tun_attr; + ip_version = mlx5e_tc_get_ip_version(spec, true); + + if (ip_version == 4) { + daddr = mlx5_addr_of(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + saddr = mlx5_addr_of(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + tun_attr->dst_ip.v4 = *(__be32 *)daddr; + tun_attr->src_ip.v4 = *(__be32 *)saddr; + } +#if is_enabled(config_inet) && is_enabled(config_ipv6) + else if (ip_version == 6) { + int ipv6_size = mlx5_fld_sz_bytes(ipv6_layout, ipv6); + + daddr = mlx5_addr_of(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6); + saddr = mlx5_addr_of(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6); + memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size); + memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size); + } +#endif + return 0; +} + + struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev); + err = mlx5e_tc_set_attr_rx_tun(flow, spec); + if (err) + return err; + } else if (tunnel && tunnel->tunnel_type == mlx5e_tc_tunnel_type_vxlan) { + struct mlx5_flow_spec *tmp_spec; + + tmp_spec = kvzalloc(sizeof(*tmp_spec), gfp_kernel); + if (!tmp_spec) { + nl_set_err_msg_mod(extack, "failed to allocate memory for vxlan tmp spec"); + netdev_warn(priv->netdev, "failed to allocate memory for vxlan tmp spec"); + return -enomem; + } + memcpy(tmp_spec, spec, sizeof(*tmp_spec)); + + err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level); + if (err) { + kvfree(tmp_spec); + nl_set_err_msg_mod(extack, "failed to parse tunnel attributes"); + netdev_warn(priv->netdev, "failed to parse tunnel attributes"); + return err; + } + err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec); + kvfree(tmp_spec); + if (err) + return err; + if (decap && esw_attr->rx_tun_attr) { + err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr); + if (err) + return err; + } + + /* always set ip version for indirect table handling */ + attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev); +int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, + u16 *vport); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +#include "esw/indir_table.h" + struct mlx5_flow_attr *attr, + if (mlx5_esw_indir_table_decap_vport(attr)) + vport = mlx5_esw_indir_table_decap_vport(attr); +static int +esw_setup_decap_indir(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec) +{ + struct mlx5_flow_table *ft; + + if (!(attr->flags & mlx5_esw_attr_flag_src_rewrite)) + return -eopnotsupp; + + ft = mlx5_esw_indir_table_get(esw, attr, spec, + mlx5_esw_indir_table_decap_vport(attr), true); + return ptr_err_or_zero(ft); +} + +esw_cleanup_decap_indir(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr) +{ + if (mlx5_esw_indir_table_decap_vport(attr)) + mlx5_esw_indir_table_put(esw, attr, + mlx5_esw_indir_table_decap_vport(attr), + true); +} + +static int + struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + + if (mlx5_esw_indir_table_decap_vport(attr)) + return esw_setup_decap_indir(esw, attr, spec); + return 0; + else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + esw_attr->dests[i].mdev)) + mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, + false); +static bool +esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int i; + + for (i = esw_attr->split_count; i < esw_attr->out_count; i++) + if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, + esw_attr->dests[i].mdev)) + return true; + return false; +} + +static int +esw_setup_indir_table(struct mlx5_flow_destination *dest, + struct mlx5_flow_act *flow_act, + struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5_flow_spec *spec, + bool ignore_flow_lvl, + int *i) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + int j, err; + + if (!(attr->flags & mlx5_esw_attr_flag_src_rewrite)) + return -eopnotsupp; + + for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) { + if (ignore_flow_lvl) + flow_act->flags |= flow_act_ignore_flow_level; + dest[*i].type = mlx5_flow_destination_type_flow_table; + + dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, + esw_attr->dests[j].rep->vport, false); + if (is_err(dest[*i].ft)) { + err = ptr_err(dest[*i].ft); + goto err_indir_tbl_get; + } + } + + if (mlx5_esw_indir_table_decap_vport(attr)) { + err = esw_setup_decap_indir(esw, attr, spec); + if (err) + goto err_indir_tbl_get; + } + + return 0; + +err_indir_tbl_get: + esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j); + return err; +} + +static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + + esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count); + esw_cleanup_decap_indir(esw, attr); +} + - esw_setup_ft_dest(dest, flow_act, attr, *i); + esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); + } else if (esw_is_indir_table(esw, attr)) { + err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); - if (!(attr->flags & mlx5_esw_attr_flag_slow_path)) { + if (attr->dest_ft) { + esw_cleanup_decap_indir(esw, attr); + } else if (!(attr->flags & mlx5_esw_attr_flag_slow_path)) { + else if (esw_is_indir_table(esw, attr)) + esw_cleanup_indir_table(esw, attr); - mlx5_eswitch_set_rule_source_port(esw, spec, + mlx5_eswitch_set_rule_source_port(esw, spec, attr, - if (esw_is_chain_src_port_rewrite(esw, esw_attr)) + if (esw_is_indir_table(esw, attr)) + err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); + else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) - mlx5_eswitch_set_rule_source_port(esw, spec, + mlx5_eswitch_set_rule_source_port(esw, spec, attr,
Networking
a508728a4c8bfaf15839d5b23c19bf6b9908d43d
vlad buslov
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: refactor reg_c1 usage
following patch in series uses reg_c1 in eswitch code. to use reg_c1 helpers in both tc and eswitch code, refactor existing helpers according to similar use case of reg_c0 and move the functionality into eswitch.h. calculate reg mappings length from new defines to ensure that they are always in sync and only need to be changed in single place.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
26
9
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c - zone_restore_id = reg_c1 & zone_restore_max; + zone_restore_id = reg_c1 & esw_zone_id_mask; - tunnel_id = reg_c1 >> reg_mapping_shift(tunnel_to_reg); + tunnel_id = reg_c1 >> esw_tun_offset; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h - .mlen = 1,\ + .mlen = (esw_zone_id_bits / 8),\ - .mlen = 1,\ + .mlen = (esw_zone_id_bits / 8),\ -#define zone_restore_bits (reg_mapping_mlen(zone_restore_to_reg) * 8) -#define zone_restore_max genmask(zone_restore_bits - 1, 0) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c - .mlen = 3, + .mlen = ((esw_tun_opts_bits + esw_tun_id_bits) / 8), - zone_restore_max; + esw_zone_id_mask; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h - if (reg_b >> (mlx5e_tc_table_chain_tag_bits + zone_restore_bits)) + if (reg_b >> (mlx5e_tc_table_chain_tag_bits + esw_zone_id_bits)) diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h + +/* reg c1 usage: + * reg c1 = < esw_tun_id(12) | esw_tun_opts(12) | esw_zone_id(8) > + * + * highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is + * encapsulation tunnel options, and the lowest 8 bits are used for zone id. + * + * zone id is used to restore ct flow when packet misses on chain. + * + * tunnel id and options are used together to restore the tunnel info metadata + * on miss and to support inner header rewrite by means of implicit chain 0 + * flows. + */ +#define esw_zone_id_bits 8 +#define esw_tun_opts_bits 12 +#define esw_tun_id_bits 12 +#define esw_tun_offset esw_zone_id_bits +#define esw_zone_id_mask genmask(esw_zone_id_bits - 1, 0) +
Networking
48d216e5596a58e3cfa6d4548343f982c5921b79
vlad buslov roi dayan roid nvidia com
include
linux
core, en, ethernet, mellanox, mlx5, rep
net/mlx5e: match recirculated packet miss in slow table using reg_c1
previous patch in series that implements stack devices rx path implements indirect table rules that match on tunnel vni. after such rule is created all tunnel traffic is recirculated to root table. however, recirculated packet might not match on any rules installed in the table (for example, when ip traffic follows arp traffic). in that case packets appear on representor of tunnel endpoint vf instead being redirected to the vf itself.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
143
8
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c - mapping = mapping_create(sz_enc_opts, enc_opts_bits_mask, true); + /* 0xfff is reserved for stack devices slow path table mark */ + mapping = mapping_create(sz_enc_opts, enc_opts_bits_mask - 1, true); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c - goto err_mod_hdr; + goto err_ethertype; - goto err_mod_hdr; + goto err_mod_hdr_regc0; + + err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, mlx5_flow_namespace_fdb, + tunnel_to_reg, esw_tun_slow_table_goto_vport); + if (err) + goto err_mod_hdr_regc1; - goto err_mod_hdr; + goto err_mod_hdr_alloc; -err_mod_hdr: +err_mod_hdr_alloc: +err_mod_hdr_regc1: + dealloc_mod_hdr_actions(&mod_acts); +err_mod_hdr_regc0: +err_ethertype: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h + struct mlx5_flow_group *send_to_vport_meta_grp; + struct mlx5_flow_handle **send_to_vport_meta_rules; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules; + int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num; + + if (!num_vfs || !flows) + return; + + mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) + mlx5_del_flow_rules(flows[i++]); + + kvfree(flows); +} + +static int +mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) +{ + int num_vfs, vport_num, rule_idx = 0, err = 0; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_handle **flows; + struct mlx5_flow_spec *spec; + + num_vfs = esw->esw_funcs.num_vfs; + flows = kvzalloc(num_vfs * sizeof(*flows), gfp_kernel); + if (!flows) + return -enomem; + + spec = kvzalloc(sizeof(*spec), gfp_kernel); + if (!spec) { + err = -enomem; + goto alloc_err; + } + + mlx5_set(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); + mlx5_set(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_1, esw_tun_mask); + mlx5_set(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1, + esw_tun_slow_table_goto_vport_mark); + + spec->match_criteria_enable = mlx5_match_misc_parameters_2; + dest.type = mlx5_flow_destination_type_vport; + flow_act.action = mlx5_flow_context_action_fwd_dest; + + mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) { + mlx5_set(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); + dest.vport.num = vport_num; + + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); + if (is_err(flow_rule)) { + err = ptr_err(flow_rule); + esw_warn(esw->dev, "fdb: failed to add send to vport meta rule idx %d, err %ld ", + rule_idx, ptr_err(flow_rule)); + goto rule_err; + } + flows[rule_idx++] = flow_rule; + } + + esw->fdb_table.offloads.send_to_vport_meta_rules = flows; + kvfree(spec); + return 0; + +rule_err: + while (--rule_idx >= 0) + mlx5_del_flow_rules(flows[rule_idx]); + kvfree(spec); +alloc_err: + kvfree(flows); + return err; +} + + int num_vfs, table_size, ix, err = 0; - int table_size, ix, err = 0; - mlx5_esw_miss_flows + esw->total_vports; + mlx5_esw_miss_flows + esw->total_vports + esw->esw_funcs.num_vfs; + /* meta send to vport */ + memset(flow_group_in, 0, inlen); + mlx5_set(create_flow_group_in, flow_group_in, match_criteria_enable, + mlx5_match_misc_parameters_2); + + match_criteria = mlx5_addr_of(create_flow_group_in, flow_group_in, match_criteria); + + mlx5_set(fte_match_param, match_criteria, + misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); + mlx5_set(fte_match_param, match_criteria, + misc_parameters_2.metadata_reg_c_1, esw_tun_mask); + + num_vfs = esw->esw_funcs.num_vfs; + if (num_vfs) { + mlx5_set(create_flow_group_in, flow_group_in, start_flow_index, ix); + mlx5_set(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1); + ix += num_vfs; + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (is_err(g)) { + err = ptr_err(g); + esw_warn(dev, "failed to create send-to-vport meta flow group err(%d) ", + err); + goto send_vport_meta_err; + } + esw->fdb_table.offloads.send_to_vport_meta_grp = g; + + err = mlx5_eswitch_add_send_to_vport_meta_rules(esw); + if (err) + goto meta_rule_err; + } + + mlx5_eswitch_del_send_to_vport_meta_rules(esw); +meta_rule_err: + if (esw->fdb_table.offloads.send_to_vport_meta_grp) + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); +send_vport_meta_err: + mlx5_eswitch_del_send_to_vport_meta_rules(esw); + if (esw->fdb_table.offloads.send_to_vport_meta_grp) + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp); diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h -#define esw_tun_offset esw_zone_id_bits +#define esw_tun_opts_offset esw_zone_id_bits +#define esw_tun_offset esw_tun_opts_offset +#define esw_tun_opts_mask genmask(32 - esw_tun_id_bits - 1, esw_tun_opts_offset) +#define esw_tun_mask genmask(31, esw_tun_offset) +#define esw_tun_id_slow_table_goto_vport 0 /* 0 is not a valid tunnel id */ +#define esw_tun_opts_slow_table_goto_vport 0xfff /* 0xfff is a reserved mapping */ +#define esw_tun_slow_table_goto_vport ((esw_tun_id_slow_table_goto_vport << esw_tun_opts_bits) | \ + esw_tun_opts_slow_table_goto_vport) +#define esw_tun_slow_table_goto_vport_mark esw_tun_opts_mask
Networking
8e404fefa58b6138531e3d4b5647ee79f75ae9a8
vlad buslov
drivers
net
core, esw, ethernet, mellanox, mlx5
net/mlx5e: extract tc tunnel encap/decap code to dedicated file
following patches in series extend the extracted code with routing infrastructure. to improve code modularity created a dedicated tc_tun_encap.c source file and move encap/decap related code to the new file. export code that is used by both regular tc code and encap/decap code into tc_priv.h (new header intended to be used only by tc module). rename some exported functions by adding "mlx5e_" prefix to their names.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c', 'makefile']
7
947
885
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/makefile b/drivers/net/ethernet/mellanox/mlx5/core/makefile --- a/drivers/net/ethernet/mellanox/mlx5/core/makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/makefile - esw/indir_table.o \ + esw/indir_table.o en/tc_tun_encap.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +/* spdx-license-identifier: gpl-2.0 or linux-openib */ +/* copyright (c) 2021 mellanox technologies. */ + +#ifndef __mlx5_en_tc_priv_h__ +#define __mlx5_en_tc_priv_h__ + +#include "en_tc.h" + +#define mlx5e_tc_flow_base (mlx5e_tc_flag_last_exported_bit + 1) + +#define mlx5e_tc_max_splits 1 + +enum { + mlx5e_tc_flow_flag_ingress = mlx5e_tc_flag_ingress_bit, + mlx5e_tc_flow_flag_egress = mlx5e_tc_flag_egress_bit, + mlx5e_tc_flow_flag_eswitch = mlx5e_tc_flag_esw_offload_bit, + mlx5e_tc_flow_flag_ft = mlx5e_tc_flag_ft_offload_bit, + mlx5e_tc_flow_flag_nic = mlx5e_tc_flag_nic_offload_bit, + mlx5e_tc_flow_flag_offloaded = mlx5e_tc_flow_base, + mlx5e_tc_flow_flag_hairpin = mlx5e_tc_flow_base + 1, + mlx5e_tc_flow_flag_hairpin_rss = mlx5e_tc_flow_base + 2, + mlx5e_tc_flow_flag_slow = mlx5e_tc_flow_base + 3, + mlx5e_tc_flow_flag_dup = mlx5e_tc_flow_base + 4, + mlx5e_tc_flow_flag_not_ready = mlx5e_tc_flow_base + 5, + mlx5e_tc_flow_flag_deleted = mlx5e_tc_flow_base + 6, + mlx5e_tc_flow_flag_ct = mlx5e_tc_flow_base + 7, + mlx5e_tc_flow_flag_l3_to_l2_decap = mlx5e_tc_flow_base + 8, +}; + +struct mlx5e_tc_flow_parse_attr { + const struct ip_tunnel_info *tun_info[mlx5_max_flow_fwd_vports]; + struct net_device *filter_dev; + struct mlx5_flow_spec spec; + struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; + int mirred_ifindex[mlx5_max_flow_fwd_vports]; + struct ethhdr eth; +}; + +/* helper struct for accessing a struct containing list_head array. + * containing struct + * |- helper array + * [0] helper item 0 + * |- list_head item 0 + * |- index (0) + * [1] helper item 1 + * |- list_head item 1 + * |- index (1) + * to access the containing struct from one of the list_head items: + * 1. get the helper item from the list_head item using + * helper item = + * container_of(list_head item, helper struct type, list_head field) + * 2. get the contining struct from the helper item and its index in the array: + * containing struct = + * container_of(helper item, containing struct type, helper field[index]) + */ +struct encap_flow_item { + struct mlx5e_encap_entry *e; /* attached encap instance */ + struct list_head list; + int index; +}; + +struct mlx5e_tc_flow { + struct rhash_head node; + struct mlx5e_priv *priv; + u64 cookie; + unsigned long flags; + struct mlx5_flow_handle *rule[mlx5e_tc_max_splits + 1]; + + /* flows sharing the same reformat object - currently mpls decap */ + struct list_head l3_to_l2_reformat; + struct mlx5e_decap_entry *decap_reformat; + + /* flow can be associated with multiple encap ids. + * the number of encaps is bounded by the number of supported + * destinations. + */ + struct encap_flow_item encaps[mlx5_max_flow_fwd_vports]; + struct mlx5e_tc_flow *peer_flow; + struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ + struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ + struct list_head hairpin; /* flows sharing the same hairpin */ + struct list_head peer; /* flows with peer flow */ + struct list_head unready; /* flows not ready to be offloaded (e.g + * due to missing route) + */ + struct net_device *orig_dev; /* netdev adding flow first */ + int tmp_efi_index; + struct list_head tmp_list; /* temporary flow list used by neigh update */ + refcount_t refcnt; + struct rcu_head rcu_head; + struct completion init_done; + int tunnel_id; /* the mapped tunnel id of this flow */ + struct mlx5_flow_attr *attr; +}; + +u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer); + +struct mlx5_flow_handle * +mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr); + +bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow); + +static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + /* complete all memory stores before setting bit. */ + smp_mb__before_atomic(); + set_bit(flag, &flow->flags); +} + +#define flow_flag_set(flow, flag) __flow_flag_set(flow, mlx5e_tc_flow_flag_##flag) + +static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, + unsigned long flag) +{ + /* test_and_set_bit() provides all necessary barriers */ + return test_and_set_bit(flag, &flow->flags); +} + +#define flow_flag_test_and_set(flow, flag) \ + __flow_flag_test_and_set(flow, \ + mlx5e_tc_flow_flag_##flag) + +static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + /* complete all memory stores before clearing bit. */ + smp_mb__before_atomic(); + clear_bit(flag, &flow->flags); +} + +#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ + mlx5e_tc_flow_flag_##flag) + +static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) +{ + bool ret = test_bit(flag, &flow->flags); + + /* read fields of flow structure only after checking flags. */ + smp_mb__after_atomic(); + return ret; +} + +#define flow_flag_test(flow, flag) __flow_flag_test(flow, \ + mlx5e_tc_flow_flag_##flag) + +void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow); +struct mlx5_flow_handle * +mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec); +void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr); + +struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow); +void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); + +struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow); + +#endif /* __mlx5_en_tc_priv_h__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +#include "en/tc_priv.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +// spdx-license-identifier: gpl-2.0 or linux-openib +/* copyright (c) 2021 mellanox technologies. */ + +#include "tc_tun_encap.h" +#include "en_tc.h" +#include "tc_tun.h" +#include "rep/tc.h" +#include "diag/en_tc_tracepoint.h" + +int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec) +{ + struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; + struct mlx5_rx_tun_attr *tun_attr; + void *daddr, *saddr; + u8 ip_version; + + tun_attr = kvzalloc(sizeof(*tun_attr), gfp_kernel); + if (!tun_attr) + return -enomem; + + esw_attr->rx_tun_attr = tun_attr; + ip_version = mlx5e_tc_get_ip_version(spec, true); + + if (ip_version == 4) { + daddr = mlx5_addr_of(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + saddr = mlx5_addr_of(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + tun_attr->dst_ip.v4 = *(__be32 *)daddr; + tun_attr->src_ip.v4 = *(__be32 *)saddr; + } +#if is_enabled(config_inet) && is_enabled(config_ipv6) + else if (ip_version == 6) { + int ipv6_size = mlx5_fld_sz_bytes(ipv6_layout, ipv6); + + daddr = mlx5_addr_of(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6); + saddr = mlx5_addr_of(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6); + memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size); + memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size); + } +#endif + return 0; +} + +void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct list_head *flow_list) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_attr *attr; + struct mlx5_flow_spec *spec; + struct mlx5e_tc_flow *flow; + int err; + + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + e->encap_size, e->encap_header, + mlx5_flow_namespace_fdb); + if (is_err(e->pkt_reformat)) { + mlx5_core_warn(priv->mdev, "failed to offload cached encapsulation header, %lu ", + ptr_err(e->pkt_reformat)); + return; + } + e->flags |= mlx5_encap_entry_valid; + mlx5e_rep_queue_neigh_stats_work(priv); + + list_for_each_entry(flow, flow_list, tmp_list) { + bool all_flow_encaps_valid = true; + int i; + + if (!mlx5e_is_offloaded_flow(flow)) + continue; + attr = flow->attr; + esw_attr = attr->esw_attr; + spec = &attr->parse_attr->spec; + + esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat; + esw_attr->dests[flow->tmp_efi_index].flags |= mlx5_esw_dest_encap_valid; + /* flow can be associated with multiple encap entries. + * before offloading the flow verify that all of them have + * a valid neighbour. + */ + for (i = 0; i < mlx5_max_flow_fwd_vports; i++) { + if (!(esw_attr->dests[i].flags & mlx5_esw_dest_encap)) + continue; + if (!(esw_attr->dests[i].flags & mlx5_esw_dest_encap_valid)) { + all_flow_encaps_valid = false; + break; + } + } + /* do not offload flows with unresolved neighbors */ + if (!all_flow_encaps_valid) + continue; + /* update from slow path rule to encap rule */ + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); + if (is_err(rule)) { + err = ptr_err(rule); + mlx5_core_warn(priv->mdev, "failed to update cached encapsulation flow, %d ", + err); + continue; + } + + mlx5e_tc_unoffload_from_slow_path(esw, flow); + flow->rule[0] = rule; + /* was unset when slow path rule removed */ + flow_flag_set(flow, offloaded); + } +} + +void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct list_head *flow_list) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_attr *attr; + struct mlx5_flow_spec *spec; + struct mlx5e_tc_flow *flow; + int err; + + list_for_each_entry(flow, flow_list, tmp_list) { + if (!mlx5e_is_offloaded_flow(flow)) + continue; + attr = flow->attr; + esw_attr = attr->esw_attr; + spec = &attr->parse_attr->spec; + + /* update from encap rule to slow path rule */ + rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); + /* mark the flow's encap dest as non-valid */ + esw_attr->dests[flow->tmp_efi_index].flags &= ~mlx5_esw_dest_encap_valid; + + if (is_err(rule)) { + err = ptr_err(rule); + mlx5_core_warn(priv->mdev, "failed to update slow path (encap) flow, %d ", + err); + continue; + } + + mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); + flow->rule[0] = rule; + /* was unset when fast path rule removed */ + flow_flag_set(flow, offloaded); + } + + /* we know that the encap is valid */ + e->flags &= ~mlx5_encap_entry_valid; + mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); +} + +/* takes reference to all flows attached to encap and adds the flows to + * flow_list using 'tmp_list' list_head in mlx5e_tc_flow. + */ +void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list) +{ + struct encap_flow_item *efi; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(efi, &e->flows, list) { + flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + if (is_err(mlx5e_flow_get(flow))) + continue; + wait_for_completion(&flow->init_done); + + flow->tmp_efi_index = efi->index; + list_add(&flow->tmp_list, flow_list); + } +} + +static struct mlx5e_encap_entry * +mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe, + struct mlx5e_encap_entry *e) +{ + struct mlx5e_encap_entry *next = null; + +retry: + rcu_read_lock(); + + /* find encap with non-zero reference counter value */ + for (next = e ? + list_next_or_null_rcu(&nhe->encap_list, + &e->encap_list, + struct mlx5e_encap_entry, + encap_list) : + list_first_or_null_rcu(&nhe->encap_list, + struct mlx5e_encap_entry, + encap_list); + next; + next = list_next_or_null_rcu(&nhe->encap_list, + &next->encap_list, + struct mlx5e_encap_entry, + encap_list)) + if (mlx5e_encap_take(next)) + break; + + rcu_read_unlock(); + + /* release starting encap */ + if (e) + mlx5e_encap_put(netdev_priv(e->out_dev), e); + if (!next) + return next; + + /* wait for encap to be fully initialized */ + wait_for_completion(&next->res_ready); + /* continue searching if encap entry is not in valid state after completion */ + if (!(next->flags & mlx5_encap_entry_valid)) { + e = next; + goto retry; + } + + return next; +} + +void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) +{ + struct mlx5e_neigh *m_neigh = &nhe->m_neigh; + struct mlx5e_encap_entry *e = null; + struct mlx5e_tc_flow *flow; + struct mlx5_fc *counter; + struct neigh_table *tbl; + bool neigh_used = false; + struct neighbour *n; + u64 lastuse; + + if (m_neigh->family == af_inet) + tbl = &arp_tbl; +#if is_enabled(config_ipv6) + else if (m_neigh->family == af_inet6) + tbl = ipv6_stub->nd_tbl; +#endif + else + return; + + /* mlx5e_get_next_valid_encap() releases previous encap before returning + * next one. + */ + while ((e = mlx5e_get_next_valid_encap(nhe, e)) != null) { + struct mlx5e_priv *priv = netdev_priv(e->out_dev); + struct encap_flow_item *efi, *tmp; + struct mlx5_eswitch *esw; + list_head(flow_list); + + esw = priv->mdev->priv.eswitch; + mutex_lock(&esw->offloads.encap_tbl_lock); + list_for_each_entry_safe(efi, tmp, &e->flows, list) { + flow = container_of(efi, struct mlx5e_tc_flow, + encaps[efi->index]); + if (is_err(mlx5e_flow_get(flow))) + continue; + list_add(&flow->tmp_list, &flow_list); + + if (mlx5e_is_offloaded_flow(flow)) { + counter = mlx5e_tc_get_counter(flow); + lastuse = mlx5_fc_query_lastuse(counter); + if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { + neigh_used = true; + break; + } + } + } + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_put_encap_flow_list(priv, &flow_list); + if (neigh_used) { + /* release current encap before breaking the loop */ + mlx5e_encap_put(priv, e); + break; + } + } + + trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used); + + if (neigh_used) { + nhe->reported_lastuse = jiffies; + + /* find the relevant neigh according to the cached device and + * dst ip pair + */ + n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); + if (!n) + return; + + neigh_event_send(n, null); + neigh_release(n); + } +} + +static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) +{ + warn_on(!list_empty(&e->flows)); + + if (e->compl_result > 0) { + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); + + if (e->flags & mlx5_encap_entry_valid) + mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); + } + + kfree(e->tun_info); + kfree(e->encap_header); + kfree_rcu(e, rcu); +} + +static void mlx5e_decap_dealloc(struct mlx5e_priv *priv, + struct mlx5e_decap_entry *d) +{ + warn_on(!list_empty(&d->flows)); + + if (!d->compl_result) + mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat); + + kfree_rcu(d, rcu); +} + +void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock)) + return; + hash_del_rcu(&e->encap_hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_encap_dealloc(priv, e); +} + +static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock)) + return; + hash_del_rcu(&d->hlist); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + mlx5e_decap_dealloc(priv, d); +} + +void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, int out_index) +{ + struct mlx5e_encap_entry *e = flow->encaps[out_index].e; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + /* flow wasn't fully initialized */ + if (!e) + return; + + mutex_lock(&esw->offloads.encap_tbl_lock); + list_del(&flow->encaps[out_index].list); + flow->encaps[out_index].e = null; + if (!refcount_dec_and_test(&e->refcnt)) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + return; + } + hash_del_rcu(&e->encap_hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_encap_dealloc(priv, e); +} + +void mlx5e_detach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_decap_entry *d = flow->decap_reformat; + + if (!d) + return; + + mutex_lock(&esw->offloads.decap_tbl_lock); + list_del(&flow->l3_to_l2_reformat); + flow->decap_reformat = null; + + if (!refcount_dec_and_test(&d->refcnt)) { + mutex_unlock(&esw->offloads.decap_tbl_lock); + return; + } + hash_del_rcu(&d->hlist); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + mlx5e_decap_dealloc(priv, d); +} + +struct encap_key { + const struct ip_tunnel_key *ip_tun_key; + struct mlx5e_tc_tunnel *tc_tunnel; +}; + +static int cmp_encap_info(struct encap_key *a, + struct encap_key *b) +{ + return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || + a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type; +} + +static int cmp_decap_info(struct mlx5e_decap_key *a, + struct mlx5e_decap_key *b) +{ + return memcmp(&a->key, &b->key, sizeof(b->key)); +} + +static int hash_encap_info(struct encap_key *key) +{ + return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), + key->tc_tunnel->tunnel_type); +} + +static int hash_decap_info(struct mlx5e_decap_key *key) +{ + return jhash(&key->key, sizeof(key->key), 0); +} + +bool mlx5e_encap_take(struct mlx5e_encap_entry *e) +{ + return refcount_inc_not_zero(&e->refcnt); +} + +static bool mlx5e_decap_take(struct mlx5e_decap_entry *e) +{ + return refcount_inc_not_zero(&e->refcnt); +} + +static struct mlx5e_encap_entry * +mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, + uintptr_t hash_key) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_encap_entry *e; + struct encap_key e_key; + + hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, + encap_hlist, hash_key) { + e_key.ip_tun_key = &e->tun_info->key; + e_key.tc_tunnel = e->tunnel; + if (!cmp_encap_info(&e_key, key) && + mlx5e_encap_take(e)) + return e; + } + + return null; +} + +static struct mlx5e_decap_entry * +mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key, + uintptr_t hash_key) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_decap_key r_key; + struct mlx5e_decap_entry *e; + + hash_for_each_possible_rcu(esw->offloads.decap_tbl, e, + hlist, hash_key) { + r_key = e->key; + if (!cmp_decap_info(&r_key, key) && + mlx5e_decap_take(e)) + return e; + } + return null; +} + +struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info) +{ + size_t tun_size = sizeof(*tun_info) + tun_info->options_len; + + return kmemdup(tun_info, tun_size, gfp_kernel); +} + +static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index, + struct mlx5e_encap_entry *e, + struct netlink_ext_ack *extack) +{ + int i; + + for (i = 0; i < out_index; i++) { + if (flow->encaps[i].e != e) + continue; + nl_set_err_msg_mod(extack, "can't duplicate encap action"); + netdev_err(priv->netdev, "can't duplicate encap action "); + return true; + } + + return false; +} + +static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw, + struct mlx5_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + struct net_device *out_dev, + int route_dev_ifindex, + int out_index) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct net_device *route_dev; + u16 vport_num; + int err = 0; + u32 data; + + route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex); + + if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops || + !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) + goto out; + + err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num); + if (err) + goto out; + + attr->dest_chain = 0; + attr->action |= mlx5_flow_context_action_mod_hdr; + esw_attr->dests[out_index].flags |= mlx5_esw_dest_chain_with_src_port_change; + data = mlx5_eswitch_get_vport_metadata_for_set(esw_attr->in_mdev->priv.eswitch, + vport_num); + err = mlx5e_tc_match_to_reg_set(esw->dev, mod_hdr_acts, + mlx5_flow_namespace_fdb, vport_to_reg, data); + if (err) + goto out; + +out: + if (route_dev) + dev_put(route_dev); + return err; +} + +int mlx5e_attach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct net_device *mirred_dev, + int out_index, + struct netlink_ext_ack *extack, + struct net_device **encap_dev, + bool *encap_valid) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + const struct ip_tunnel_info *tun_info; + struct encap_key key; + struct mlx5e_encap_entry *e; + unsigned short family; + uintptr_t hash_key; + int err = 0; + + parse_attr = attr->parse_attr; + tun_info = parse_attr->tun_info[out_index]; + family = ip_tunnel_info_af(tun_info); + key.ip_tun_key = &tun_info->key; + key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev); + if (!key.tc_tunnel) { + nl_set_err_msg_mod(extack, "unsupported tunnel"); + return -eopnotsupp; + } + + hash_key = hash_encap_info(&key); + + mutex_lock(&esw->offloads.encap_tbl_lock); + e = mlx5e_encap_get(priv, &key, hash_key); + + /* must verify if encap is valid or not */ + if (e) { + /* check that entry was not already attached to this flow */ + if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { + err = -eopnotsupp; + goto out_err; + } + + mutex_unlock(&esw->offloads.encap_tbl_lock); + wait_for_completion(&e->res_ready); + + /* protect against concurrent neigh update. */ + mutex_lock(&esw->offloads.encap_tbl_lock); + if (e->compl_result < 0) { + err = -eremoteio; + goto out_err; + } + goto attach_flow; + } + + e = kzalloc(sizeof(*e), gfp_kernel); + if (!e) { + err = -enomem; + goto out_err; + } + + refcount_set(&e->refcnt, 1); + init_completion(&e->res_ready); + + tun_info = mlx5e_dup_tun_info(tun_info); + if (!tun_info) { + err = -enomem; + goto out_err_init; + } + e->tun_info = tun_info; + err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); + if (err) + goto out_err_init; + + init_list_head(&e->flows); + hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + if (family == af_inet) + err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); + else if (family == af_inet6) + err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); + + /* protect against concurrent neigh update. */ + mutex_lock(&esw->offloads.encap_tbl_lock); + complete_all(&e->res_ready); + if (err) { + e->compl_result = err; + goto out_err; + } + e->compl_result = 1; + +attach_flow: + err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev, + e->route_dev_ifindex, out_index); + if (err) + goto out_err; + + flow->encaps[out_index].e = e; + list_add(&flow->encaps[out_index].list, &e->flows); + flow->encaps[out_index].index = out_index; + *encap_dev = e->out_dev; + if (e->flags & mlx5_encap_entry_valid) { + attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat; + attr->esw_attr->dests[out_index].flags |= mlx5_esw_dest_encap_valid; + *encap_valid = true; + } else { + *encap_valid = false; + } + mutex_unlock(&esw->offloads.encap_tbl_lock); + + return err; + +out_err: + mutex_unlock(&esw->offloads.encap_tbl_lock); + if (e) + mlx5e_encap_put(priv, e); + return err; + +out_err_init: + mutex_unlock(&esw->offloads.encap_tbl_lock); + kfree(tun_info); + kfree(e); + return err; +} + +int mlx5e_attach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5e_decap_entry *d; + struct mlx5e_decap_key key; + uintptr_t hash_key; + int err = 0; + + parse_attr = flow->attr->parse_attr; + if (sizeof(parse_attr->eth) > mlx5_cap_esw(priv->mdev, max_encap_header_size)) { + nl_set_err_msg_mod(extack, + "encap header larger than max supported"); + return -eopnotsupp; + } + + key.key = parse_attr->eth; + hash_key = hash_decap_info(&key); + mutex_lock(&esw->offloads.decap_tbl_lock); + d = mlx5e_decap_get(priv, &key, hash_key); + if (d) { + mutex_unlock(&esw->offloads.decap_tbl_lock); + wait_for_completion(&d->res_ready); + mutex_lock(&esw->offloads.decap_tbl_lock); + if (d->compl_result) { + err = -eremoteio; + goto out_free; + } + goto found; + } + + d = kzalloc(sizeof(*d), gfp_kernel); + if (!d) { + err = -enomem; + goto out_err; + } + + d->key = key; + refcount_set(&d->refcnt, 1); + init_completion(&d->res_ready); + init_list_head(&d->flows); + hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key); + mutex_unlock(&esw->offloads.decap_tbl_lock); + + d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + mlx5_reformat_type_l3_tunnel_to_l2, + sizeof(parse_attr->eth), + &parse_attr->eth, + mlx5_flow_namespace_fdb); + if (is_err(d->pkt_reformat)) { + err = ptr_err(d->pkt_reformat); + d->compl_result = err; + } + mutex_lock(&esw->offloads.decap_tbl_lock); + complete_all(&d->res_ready); + if (err) + goto out_free; + +found: + flow->decap_reformat = d; + attr->decap_pkt_reformat = d->pkt_reformat; + list_add(&flow->l3_to_l2_reformat, &d->flows); + mutex_unlock(&esw->offloads.decap_tbl_lock); + return 0; + +out_free: + mutex_unlock(&esw->offloads.decap_tbl_lock); + mlx5e_decap_put(priv, d); + return err; + +out_err: + mutex_unlock(&esw->offloads.decap_tbl_lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +/* spdx-license-identifier: gpl-2.0 or linux-openib */ +/* copyright (c) 2021 mellanox technologies. */ + +#ifndef __mlx5_en_tc_tun_encap_h__ +#define __mlx5_en_tc_tun_encap_h__ + +#include "tc_priv.h" + +void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, int out_index); + +int mlx5e_attach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct net_device *mirred_dev, + int out_index, + struct netlink_ext_ack *extack, + struct net_device **encap_dev, + bool *encap_valid); +int mlx5e_attach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct netlink_ext_ack *extack); +void mlx5e_detach_decap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + +struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info); + +int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, + struct mlx5_flow_spec *spec); + +#endif /* __mlx5_en_tc_tun_encap_h__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +#include "en/tc_priv.h" +#include "en/tc_tun_encap.h" -#define mlx5e_tc_flow_base (mlx5e_tc_flag_last_exported_bit + 1) - -enum { - mlx5e_tc_flow_flag_ingress = mlx5e_tc_flag_ingress_bit, - mlx5e_tc_flow_flag_egress = mlx5e_tc_flag_egress_bit, - mlx5e_tc_flow_flag_eswitch = mlx5e_tc_flag_esw_offload_bit, - mlx5e_tc_flow_flag_ft = mlx5e_tc_flag_ft_offload_bit, - mlx5e_tc_flow_flag_nic = mlx5e_tc_flag_nic_offload_bit, - mlx5e_tc_flow_flag_offloaded = mlx5e_tc_flow_base, - mlx5e_tc_flow_flag_hairpin = mlx5e_tc_flow_base + 1, - mlx5e_tc_flow_flag_hairpin_rss = mlx5e_tc_flow_base + 2, - mlx5e_tc_flow_flag_slow = mlx5e_tc_flow_base + 3, - mlx5e_tc_flow_flag_dup = mlx5e_tc_flow_base + 4, - mlx5e_tc_flow_flag_not_ready = mlx5e_tc_flow_base + 5, - mlx5e_tc_flow_flag_deleted = mlx5e_tc_flow_base + 6, - mlx5e_tc_flow_flag_ct = mlx5e_tc_flow_base + 7, - mlx5e_tc_flow_flag_l3_to_l2_decap = mlx5e_tc_flow_base + 8, -}; - -#define mlx5e_tc_max_splits 1 - -/* helper struct for accessing a struct containing list_head array. - * containing struct - * |- helper array - * [0] helper item 0 - * |- list_head item 0 - * |- index (0) - * [1] helper item 1 - * |- list_head item 1 - * |- index (1) - * to access the containing struct from one of the list_head items: - * 1. get the helper item from the list_head item using - * helper item = - * container_of(list_head item, helper struct type, list_head field) - * 2. get the contining struct from the helper item and its index in the array: - * containing struct = - * container_of(helper item, containing struct type, helper field[index]) - */ -struct encap_flow_item { - struct mlx5e_encap_entry *e; /* attached encap instance */ - struct list_head list; - int index; -}; - -struct mlx5e_tc_flow { - struct rhash_head node; - struct mlx5e_priv *priv; - u64 cookie; - unsigned long flags; - struct mlx5_flow_handle *rule[mlx5e_tc_max_splits + 1]; - - /* flows sharing the same reformat object - currently mpls decap */ - struct list_head l3_to_l2_reformat; - struct mlx5e_decap_entry *decap_reformat; - - /* flow can be associated with multiple encap ids. - * the number of encaps is bounded by the number of supported - * destinations. - */ - struct encap_flow_item encaps[mlx5_max_flow_fwd_vports]; - struct mlx5e_tc_flow *peer_flow; - struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ - struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ - struct list_head hairpin; /* flows sharing the same hairpin */ - struct list_head peer; /* flows with peer flow */ - struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ - struct net_device *orig_dev; /* netdev adding flow first */ - int tmp_efi_index; - struct list_head tmp_list; /* temporary flow list used by neigh update */ - refcount_t refcnt; - struct rcu_head rcu_head; - struct completion init_done; - int tunnel_id; /* the mapped tunnel id of this flow */ - struct mlx5_flow_attr *attr; -}; - -struct mlx5e_tc_flow_parse_attr { - const struct ip_tunnel_info *tun_info[mlx5_max_flow_fwd_vports]; - struct net_device *filter_dev; - struct mlx5_flow_spec spec; - struct mlx5e_tc_mod_hdr_acts mod_hdr_acts; - int mirred_ifindex[mlx5_max_flow_fwd_vports]; - struct ethhdr eth; -}; -static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) +struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) -static void mlx5e_flow_put(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) +void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) -static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) -{ - /* complete all memory stores before setting bit. */ - smp_mb__before_atomic(); - set_bit(flag, &flow->flags); -} - -#define flow_flag_set(flow, flag) __flow_flag_set(flow, mlx5e_tc_flow_flag_##flag) - -static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, - unsigned long flag) -{ - /* test_and_set_bit() provides all necessary barriers */ - return test_and_set_bit(flag, &flow->flags); -} - -#define flow_flag_test_and_set(flow, flag) \ - __flow_flag_test_and_set(flow, \ - mlx5e_tc_flow_flag_##flag) - -static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) -{ - /* complete all memory stores before clearing bit. */ - smp_mb__before_atomic(); - clear_bit(flag, &flow->flags); -} - -#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ - mlx5e_tc_flow_flag_##flag) - -static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) -{ - bool ret = test_bit(flag, &flow->flags); - - /* read fields of flow structure only after checking flags. */ - smp_mb__after_atomic(); - return ret; -} - -#define flow_flag_test(flow, flag) __flow_flag_test(flow, \ - mlx5e_tc_flow_flag_##flag) - -static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) +bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) -static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, int out_index); - -static int mlx5e_attach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct net_device *mirred_dev, - int out_index, - struct netlink_ext_ack *extack, - struct net_device **encap_dev, - bool *encap_valid); -static int mlx5e_attach_decap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack); -static void mlx5e_detach_decap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow); - -static struct mlx5_flow_handle * +struct mlx5_flow_handle * -static void -mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, - struct mlx5e_tc_flow *flow, - struct mlx5_flow_attr *attr) +void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow, + struct mlx5_flow_attr *attr) -static struct mlx5_flow_handle * +struct mlx5_flow_handle * -static void -mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, - struct mlx5e_tc_flow *flow) +void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, + struct mlx5e_tc_flow *flow) -void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e, - struct list_head *flow_list) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_esw_flow_attr *esw_attr; - struct mlx5_flow_handle *rule; - struct mlx5_flow_attr *attr; - struct mlx5_flow_spec *spec; - struct mlx5e_tc_flow *flow; - int err; - - e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, - e->reformat_type, - e->encap_size, e->encap_header, - mlx5_flow_namespace_fdb); - if (is_err(e->pkt_reformat)) { - mlx5_core_warn(priv->mdev, "failed to offload cached encapsulation header, %lu ", - ptr_err(e->pkt_reformat)); - return; - } - e->flags |= mlx5_encap_entry_valid; - mlx5e_rep_queue_neigh_stats_work(priv); - - list_for_each_entry(flow, flow_list, tmp_list) { - bool all_flow_encaps_valid = true; - int i; - - if (!mlx5e_is_offloaded_flow(flow)) - continue; - attr = flow->attr; - esw_attr = attr->esw_attr; - spec = &attr->parse_attr->spec; - - esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat; - esw_attr->dests[flow->tmp_efi_index].flags |= mlx5_esw_dest_encap_valid; - /* flow can be associated with multiple encap entries. - * before offloading the flow verify that all of them have - * a valid neighbour. - */ - for (i = 0; i < mlx5_max_flow_fwd_vports; i++) { - if (!(esw_attr->dests[i].flags & mlx5_esw_dest_encap)) - continue; - if (!(esw_attr->dests[i].flags & mlx5_esw_dest_encap_valid)) { - all_flow_encaps_valid = false; - break; - } - } - /* do not offload flows with unresolved neighbors */ - if (!all_flow_encaps_valid) - continue; - /* update from slow path rule to encap rule */ - rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); - if (is_err(rule)) { - err = ptr_err(rule); - mlx5_core_warn(priv->mdev, "failed to update cached encapsulation flow, %d ", - err); - continue; - } - - mlx5e_tc_unoffload_from_slow_path(esw, flow); - flow->rule[0] = rule; - /* was unset when slow path rule removed */ - flow_flag_set(flow, offloaded); - } -} - -void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, - struct mlx5e_encap_entry *e, - struct list_head *flow_list) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_esw_flow_attr *esw_attr; - struct mlx5_flow_handle *rule; - struct mlx5_flow_attr *attr; - struct mlx5_flow_spec *spec; - struct mlx5e_tc_flow *flow; - int err; - - list_for_each_entry(flow, flow_list, tmp_list) { - if (!mlx5e_is_offloaded_flow(flow)) - continue; - attr = flow->attr; - esw_attr = attr->esw_attr; - spec = &attr->parse_attr->spec; - - /* update from encap rule to slow path rule */ - rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); - /* mark the flow's encap dest as non-valid */ - esw_attr->dests[flow->tmp_efi_index].flags &= ~mlx5_esw_dest_encap_valid; - - if (is_err(rule)) { - err = ptr_err(rule); - mlx5_core_warn(priv->mdev, "failed to update slow path (encap) flow, %d ", - err); - continue; - } - - mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); - flow->rule[0] = rule; - /* was unset when fast path rule removed */ - flow_flag_set(flow, offloaded); - } - - /* we know that the encap is valid */ - e->flags &= ~mlx5_encap_entry_valid; - mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); -} - -static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) +struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) -/* takes reference to all flows attached to encap and adds the flows to - * flow_list using 'tmp_list' list_head in mlx5e_tc_flow. - */ -void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list) -{ - struct encap_flow_item *efi; - struct mlx5e_tc_flow *flow; - - list_for_each_entry(efi, &e->flows, list) { - flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); - if (is_err(mlx5e_flow_get(flow))) - continue; - wait_for_completion(&flow->init_done); - - flow->tmp_efi_index = efi->index; - list_add(&flow->tmp_list, flow_list); - } -} - -static struct mlx5e_encap_entry * -mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe, - struct mlx5e_encap_entry *e) -{ - struct mlx5e_encap_entry *next = null; - -retry: - rcu_read_lock(); - - /* find encap with non-zero reference counter value */ - for (next = e ? - list_next_or_null_rcu(&nhe->encap_list, - &e->encap_list, - struct mlx5e_encap_entry, - encap_list) : - list_first_or_null_rcu(&nhe->encap_list, - struct mlx5e_encap_entry, - encap_list); - next; - next = list_next_or_null_rcu(&nhe->encap_list, - &next->encap_list, - struct mlx5e_encap_entry, - encap_list)) - if (mlx5e_encap_take(next)) - break; - - rcu_read_unlock(); - - /* release starting encap */ - if (e) - mlx5e_encap_put(netdev_priv(e->out_dev), e); - if (!next) - return next; - - /* wait for encap to be fully initialized */ - wait_for_completion(&next->res_ready); - /* continue searching if encap entry is not in valid state after completion */ - if (!(next->flags & mlx5_encap_entry_valid)) { - e = next; - goto retry; - } - - return next; -} - -void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) -{ - struct mlx5e_neigh *m_neigh = &nhe->m_neigh; - struct mlx5e_encap_entry *e = null; - struct mlx5e_tc_flow *flow; - struct mlx5_fc *counter; - struct neigh_table *tbl; - bool neigh_used = false; - struct neighbour *n; - u64 lastuse; - - if (m_neigh->family == af_inet) - tbl = &arp_tbl; -#if is_enabled(config_ipv6) - else if (m_neigh->family == af_inet6) - tbl = ipv6_stub->nd_tbl; -#endif - else - return; - - /* mlx5e_get_next_valid_encap() releases previous encap before returning - * next one. - */ - while ((e = mlx5e_get_next_valid_encap(nhe, e)) != null) { - struct mlx5e_priv *priv = netdev_priv(e->out_dev); - struct encap_flow_item *efi, *tmp; - struct mlx5_eswitch *esw; - list_head(flow_list); - - esw = priv->mdev->priv.eswitch; - mutex_lock(&esw->offloads.encap_tbl_lock); - list_for_each_entry_safe(efi, tmp, &e->flows, list) { - flow = container_of(efi, struct mlx5e_tc_flow, - encaps[efi->index]); - if (is_err(mlx5e_flow_get(flow))) - continue; - list_add(&flow->tmp_list, &flow_list); - - if (mlx5e_is_offloaded_flow(flow)) { - counter = mlx5e_tc_get_counter(flow); - lastuse = mlx5_fc_query_lastuse(counter); - if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { - neigh_used = true; - break; - } - } - } - mutex_unlock(&esw->offloads.encap_tbl_lock); - - mlx5e_put_encap_flow_list(priv, &flow_list); - if (neigh_used) { - /* release current encap before breaking the loop */ - mlx5e_encap_put(priv, e); - break; - } - } - - trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used); - - if (neigh_used) { - nhe->reported_lastuse = jiffies; - - /* find the relevant neigh according to the cached device and - * dst ip pair - */ - n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); - if (!n) - return; - - neigh_event_send(n, null); - neigh_release(n); - } -} - -static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) -{ - warn_on(!list_empty(&e->flows)); - - if (e->compl_result > 0) { - mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); - - if (e->flags & mlx5_encap_entry_valid) - mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); - } - - kfree(e->tun_info); - kfree(e->encap_header); - kfree_rcu(e, rcu); -} - -static void mlx5e_decap_dealloc(struct mlx5e_priv *priv, - struct mlx5e_decap_entry *d) -{ - warn_on(!list_empty(&d->flows)); - - if (!d->compl_result) - mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat); - - kfree_rcu(d, rcu); -} - -void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - - if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock)) - return; - hash_del_rcu(&e->encap_hlist); - mutex_unlock(&esw->offloads.encap_tbl_lock); - - mlx5e_encap_dealloc(priv, e); -} - -static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - - if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock)) - return; - hash_del_rcu(&d->hlist); - mutex_unlock(&esw->offloads.decap_tbl_lock); - - mlx5e_decap_dealloc(priv, d); -} - -static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, int out_index) -{ - struct mlx5e_encap_entry *e = flow->encaps[out_index].e; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - - /* flow wasn't fully initialized */ - if (!e) - return; - - mutex_lock(&esw->offloads.encap_tbl_lock); - list_del(&flow->encaps[out_index].list); - flow->encaps[out_index].e = null; - if (!refcount_dec_and_test(&e->refcnt)) { - mutex_unlock(&esw->offloads.encap_tbl_lock); - return; - } - hash_del_rcu(&e->encap_hlist); - mutex_unlock(&esw->offloads.encap_tbl_lock); - - mlx5e_encap_dealloc(priv, e); -} - -static void mlx5e_detach_decap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_decap_entry *d = flow->decap_reformat; - - if (!d) - return; - - mutex_lock(&esw->offloads.decap_tbl_lock); - list_del(&flow->l3_to_l2_reformat); - flow->decap_reformat = null; - - if (!refcount_dec_and_test(&d->refcnt)) { - mutex_unlock(&esw->offloads.decap_tbl_lock); - return; - } - hash_del_rcu(&d->hlist); - mutex_unlock(&esw->offloads.decap_tbl_lock); - - mlx5e_decap_dealloc(priv, d); -} - -static u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) +u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer) -static int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, - struct mlx5_flow_spec *spec) -{ - struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; - struct mlx5_rx_tun_attr *tun_attr; - void *daddr, *saddr; - u8 ip_version; - - tun_attr = kvzalloc(sizeof(*tun_attr), gfp_kernel); - if (!tun_attr) - return -enomem; - - esw_attr->rx_tun_attr = tun_attr; - ip_version = mlx5e_tc_get_ip_version(spec, true); - - if (ip_version == 4) { - daddr = mlx5_addr_of(fte_match_param, spec->match_value, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); - saddr = mlx5_addr_of(fte_match_param, spec->match_value, - outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); - tun_attr->dst_ip.v4 = *(__be32 *)daddr; - tun_attr->src_ip.v4 = *(__be32 *)saddr; - } -#if is_enabled(config_inet) && is_enabled(config_ipv6) - else if (ip_version == 6) { - int ipv6_size = mlx5_fld_sz_bytes(ipv6_layout, ipv6); - - daddr = mlx5_addr_of(fte_match_param, spec->match_value, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6); - saddr = mlx5_addr_of(fte_match_param, spec->match_value, - outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6); - memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size); - memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size); - } -#endif - return 0; -} - -struct encap_key { - const struct ip_tunnel_key *ip_tun_key; - struct mlx5e_tc_tunnel *tc_tunnel; -}; - -static inline int cmp_encap_info(struct encap_key *a, - struct encap_key *b) -{ - return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) || - a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type; -} - -static inline int cmp_decap_info(struct mlx5e_decap_key *a, - struct mlx5e_decap_key *b) -{ - return memcmp(&a->key, &b->key, sizeof(b->key)); -} - -static inline int hash_encap_info(struct encap_key *key) -{ - return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key), - key->tc_tunnel->tunnel_type); -} - -static inline int hash_decap_info(struct mlx5e_decap_key *key) -{ - return jhash(&key->key, sizeof(key->key), 0); -} - -bool mlx5e_encap_take(struct mlx5e_encap_entry *e) -{ - return refcount_inc_not_zero(&e->refcnt); -} - -static bool mlx5e_decap_take(struct mlx5e_decap_entry *e) -{ - return refcount_inc_not_zero(&e->refcnt); -} - -static struct mlx5e_encap_entry * -mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, - uintptr_t hash_key) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_encap_entry *e; - struct encap_key e_key; - - hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, - encap_hlist, hash_key) { - e_key.ip_tun_key = &e->tun_info->key; - e_key.tc_tunnel = e->tunnel; - if (!cmp_encap_info(&e_key, key) && - mlx5e_encap_take(e)) - return e; - } - - return null; -} - -static struct mlx5e_decap_entry * -mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key, - uintptr_t hash_key) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_decap_key r_key; - struct mlx5e_decap_entry *e; - - hash_for_each_possible_rcu(esw->offloads.decap_tbl, e, - hlist, hash_key) { - r_key = e->key; - if (!cmp_decap_info(&r_key, key) && - mlx5e_decap_take(e)) - return e; - } - return null; -} - -static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info) -{ - size_t tun_size = sizeof(*tun_info) + tun_info->options_len; - - return kmemdup(tun_info, tun_size, gfp_kernel); -} - -static bool is_duplicated_encap_entry(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - int out_index, - struct mlx5e_encap_entry *e, - struct netlink_ext_ack *extack) -{ - int i; - - for (i = 0; i < out_index; i++) { - if (flow->encaps[i].e != e) - continue; - nl_set_err_msg_mod(extack, "can't duplicate encap action"); - netdev_err(priv->netdev, "can't duplicate encap action "); - return true; - } - - return false; -} - -static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw, - struct mlx5_flow_attr *attr, - struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, - struct net_device *out_dev, - int route_dev_ifindex, - int out_index) -{ - struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; - struct net_device *route_dev; - u16 vport_num; - int err = 0; - u32 data; - - route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex); - - if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops || - !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) - goto out; - - err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num); - if (err) - goto out; - - attr->dest_chain = 0; - attr->action |= mlx5_flow_context_action_mod_hdr; - esw_attr->dests[out_index].flags |= mlx5_esw_dest_chain_with_src_port_change; - data = mlx5_eswitch_get_vport_metadata_for_set(esw_attr->in_mdev->priv.eswitch, - vport_num); - err = mlx5e_tc_match_to_reg_set(esw->dev, mod_hdr_acts, - mlx5_flow_namespace_fdb, vport_to_reg, data); - if (err) - goto out; - -out: - if (route_dev) - dev_put(route_dev); - return err; -} - -static int mlx5e_attach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct net_device *mirred_dev, - int out_index, - struct netlink_ext_ack *extack, - struct net_device **encap_dev, - bool *encap_valid) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5_flow_attr *attr = flow->attr; - const struct ip_tunnel_info *tun_info; - struct encap_key key; - struct mlx5e_encap_entry *e; - unsigned short family; - uintptr_t hash_key; - int err = 0; - - parse_attr = attr->parse_attr; - tun_info = parse_attr->tun_info[out_index]; - family = ip_tunnel_info_af(tun_info); - key.ip_tun_key = &tun_info->key; - key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev); - if (!key.tc_tunnel) { - nl_set_err_msg_mod(extack, "unsupported tunnel"); - return -eopnotsupp; - } - - hash_key = hash_encap_info(&key); - - mutex_lock(&esw->offloads.encap_tbl_lock); - e = mlx5e_encap_get(priv, &key, hash_key); - - /* must verify if encap is valid or not */ - if (e) { - /* check that entry was not already attached to this flow */ - if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) { - err = -eopnotsupp; - goto out_err; - } - - mutex_unlock(&esw->offloads.encap_tbl_lock); - wait_for_completion(&e->res_ready); - - /* protect against concurrent neigh update. */ - mutex_lock(&esw->offloads.encap_tbl_lock); - if (e->compl_result < 0) { - err = -eremoteio; - goto out_err; - } - goto attach_flow; - } - - e = kzalloc(sizeof(*e), gfp_kernel); - if (!e) { - err = -enomem; - goto out_err; - } - - refcount_set(&e->refcnt, 1); - init_completion(&e->res_ready); - - tun_info = dup_tun_info(tun_info); - if (!tun_info) { - err = -enomem; - goto out_err_init; - } - e->tun_info = tun_info; - err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); - if (err) - goto out_err_init; - - init_list_head(&e->flows); - hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); - mutex_unlock(&esw->offloads.encap_tbl_lock); - - if (family == af_inet) - err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); - else if (family == af_inet6) - err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); - - /* protect against concurrent neigh update. */ - mutex_lock(&esw->offloads.encap_tbl_lock); - complete_all(&e->res_ready); - if (err) { - e->compl_result = err; - goto out_err; - } - e->compl_result = 1; - -attach_flow: - err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev, - e->route_dev_ifindex, out_index); - if (err) - goto out_err; - - flow->encaps[out_index].e = e; - list_add(&flow->encaps[out_index].list, &e->flows); - flow->encaps[out_index].index = out_index; - *encap_dev = e->out_dev; - if (e->flags & mlx5_encap_entry_valid) { - attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat; - attr->esw_attr->dests[out_index].flags |= mlx5_esw_dest_encap_valid; - *encap_valid = true; - } else { - *encap_valid = false; - } - mutex_unlock(&esw->offloads.encap_tbl_lock); - - return err; - -out_err: - mutex_unlock(&esw->offloads.encap_tbl_lock); - if (e) - mlx5e_encap_put(priv, e); - return err; - -out_err_init: - mutex_unlock(&esw->offloads.encap_tbl_lock); - kfree(tun_info); - kfree(e); - return err; -} - -static int mlx5e_attach_decap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; - struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5e_decap_entry *d; - struct mlx5e_decap_key key; - uintptr_t hash_key; - int err = 0; - - parse_attr = flow->attr->parse_attr; - if (sizeof(parse_attr->eth) > mlx5_cap_esw(priv->mdev, max_encap_header_size)) { - nl_set_err_msg_mod(extack, - "encap header larger than max supported"); - return -eopnotsupp; - } - - key.key = parse_attr->eth; - hash_key = hash_decap_info(&key); - mutex_lock(&esw->offloads.decap_tbl_lock); - d = mlx5e_decap_get(priv, &key, hash_key); - if (d) { - mutex_unlock(&esw->offloads.decap_tbl_lock); - wait_for_completion(&d->res_ready); - mutex_lock(&esw->offloads.decap_tbl_lock); - if (d->compl_result) { - err = -eremoteio; - goto out_free; - } - goto found; - } - - d = kzalloc(sizeof(*d), gfp_kernel); - if (!d) { - err = -enomem; - goto out_err; - } - - d->key = key; - refcount_set(&d->refcnt, 1); - init_completion(&d->res_ready); - init_list_head(&d->flows); - hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key); - mutex_unlock(&esw->offloads.decap_tbl_lock); - - d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, - mlx5_reformat_type_l3_tunnel_to_l2, - sizeof(parse_attr->eth), - &parse_attr->eth, - mlx5_flow_namespace_fdb); - if (is_err(d->pkt_reformat)) { - err = ptr_err(d->pkt_reformat); - d->compl_result = err; - } - mutex_lock(&esw->offloads.decap_tbl_lock); - complete_all(&d->res_ready); - if (err) - goto out_free; - -found: - flow->decap_reformat = d; - attr->decap_pkt_reformat = d->pkt_reformat; - list_add(&flow->l3_to_l2_reformat, &d->flows); - mutex_unlock(&esw->offloads.decap_tbl_lock); - return 0; - -out_free: - mutex_unlock(&esw->offloads.decap_tbl_lock); - mlx5e_decap_put(priv, d); - return err; - -out_err: - mutex_unlock(&esw->offloads.decap_tbl_lock); - return err; -} - - parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info); + parse_attr->tun_info[esw_attr->out_count] = + mlx5e_dup_tun_info(info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +#include "en/tc_tun.h" +#include "en_rep.h"
Networking
0d9f96471493d5483d116c137693f03604332a04
vlad buslov roi dayan roid nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: create route entry infrastructure
implement dedicated route entry infrastructure to be used in following patch by route update event. both encap (indirectly through their corresponding encap entries) and decap (directly) flows are attached to routing entry. since route update also requires updating encap (route device mac address is a source mac address of tunnel encapsulation), same encap_tbl_lock mutex is used for synchronization.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
7
290
11
- per-eswitch hash table is used for quick entry lookup. - flows are attached to per-entry linked list and hold reference to entry - atomic reference counting and rcu mechanisms are used as synchronization --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h + mlx5e_tc_flow_flag_tun_rx = mlx5e_tc_flow_base + 9, +struct encap_route_flow_item { + struct mlx5e_route_entry *r; /* attached route instance */ + int index; +}; + + /* flows sharing same route entry */ + struct list_head decap_routes; + struct mlx5e_route_entry *decap_route; + struct encap_route_flow_item encap_routes[mlx5_max_flow_fwd_vports]; + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +struct mlx5e_route_key { + int ip_version; + union { + __be32 v4; + struct in6_addr v6; + } endpoint_ip; +}; + +struct mlx5e_route_entry { + struct mlx5e_route_key key; + struct list_head encap_entries; + struct list_head decap_flows; + struct hlist_node hlist; + refcount_t refcnt; + struct rcu_head rcu; +}; + + if (!tun_attr->dst_ip.v4 || !tun_attr->src_ip.v4) + return 0; + struct in6_addr zerov6 = {}; + if (!memcmp(&tun_attr->dst_ip.v6, &zerov6, sizeof(zerov6)) || + !memcmp(&tun_attr->src_ip.v6, &zerov6, sizeof(zerov6))) + return 0; + /* only set the flag if both src and dst ip addresses exist. they are + * required to establish routing. + */ + flow_flag_set(flow, tun_rx); + list_del(&e->route_list); +static void mlx5e_detach_encap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index); + + if (flow->attr->esw_attr->dests[out_index].flags & + mlx5_esw_dest_chain_with_src_port_change) + mlx5e_detach_encap_route(priv, flow, out_index); + + list_del(&e->route_list); +static int mlx5e_attach_encap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5e_encap_entry *e, + bool new_encap_entry, + int out_index); + + bool entry_created = false; + entry_created = true; + init_list_head(&e->route_list); - err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev, - e->route_dev_ifindex, out_index); + err = mlx5e_attach_encap_route(priv, flow, e, entry_created, out_index); + +static int cmp_route_info(struct mlx5e_route_key *a, + struct mlx5e_route_key *b) +{ + if (a->ip_version == 4 && b->ip_version == 4) + return memcmp(&a->endpoint_ip.v4, &b->endpoint_ip.v4, + sizeof(a->endpoint_ip.v4)); + else if (a->ip_version == 6 && b->ip_version == 6) + return memcmp(&a->endpoint_ip.v6, &b->endpoint_ip.v6, + sizeof(a->endpoint_ip.v6)); + return 1; +} + +static u32 hash_route_info(struct mlx5e_route_key *key) +{ + if (key->ip_version == 4) + return jhash(&key->endpoint_ip.v4, sizeof(key->endpoint_ip.v4), 0); + return jhash(&key->endpoint_ip.v6, sizeof(key->endpoint_ip.v6), 0); +} + +static struct mlx5e_route_entry * +mlx5e_route_get(struct mlx5e_priv *priv, struct mlx5e_route_key *key, + u32 hash_key) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_route_key r_key; + struct mlx5e_route_entry *r; + + hash_for_each_possible(esw->offloads.route_tbl, r, hlist, hash_key) { + r_key = r->key; + if (!cmp_route_info(&r_key, key) && + refcount_inc_not_zero(&r->refcnt)) + return r; + } + return null; +} + +static struct mlx5e_route_entry * +mlx5e_route_get_create(struct mlx5e_priv *priv, + struct mlx5e_route_key *key) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_route_entry *r; + u32 hash_key; + + hash_key = hash_route_info(key); + r = mlx5e_route_get(priv, key, hash_key); + if (r) + return r; + + r = kzalloc(sizeof(*r), gfp_kernel); + if (!r) + return err_ptr(-enomem); + + r->key = *key; + refcount_set(&r->refcnt, 1); + init_list_head(&r->decap_flows); + init_list_head(&r->encap_entries); + hash_add(esw->offloads.route_tbl, &r->hlist, hash_key); + return r; +} + +int mlx5e_attach_decap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5e_route_entry *r; + struct mlx5e_route_key key; + int err = 0; + + esw_attr = attr->esw_attr; + parse_attr = attr->parse_attr; + mutex_lock(&esw->offloads.encap_tbl_lock); + if (!esw_attr->rx_tun_attr) + goto out; + + err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr); + if (err || !esw_attr->rx_tun_attr->decap_vport) + goto out; + + key.ip_version = attr->ip_version; + if (key.ip_version == 4) + key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4; + else + key.endpoint_ip.v6 = esw_attr->rx_tun_attr->dst_ip.v6; + + r = mlx5e_route_get_create(priv, &key); + if (is_err(r)) { + err = ptr_err(r); + goto out; + } + + flow->decap_route = r; + list_add(&flow->decap_routes, &r->decap_flows); + mutex_unlock(&esw->offloads.encap_tbl_lock); + return 0; + +out: + mutex_unlock(&esw->offloads.encap_tbl_lock); + return err; +} + +static int mlx5e_attach_encap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5e_encap_entry *e, + bool new_encap_entry, + int out_index) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + const struct ip_tunnel_info *tun_info; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5e_route_entry *r; + struct mlx5e_route_key key; + unsigned short family; + int err = 0; + + esw_attr = attr->esw_attr; + parse_attr = attr->parse_attr; + tun_info = parse_attr->tun_info[out_index]; + family = ip_tunnel_info_af(tun_info); + + if (family == af_inet) { + key.endpoint_ip.v4 = tun_info->key.u.ipv4.src; + key.ip_version = 4; + } else if (family == af_inet6) { + key.endpoint_ip.v6 = tun_info->key.u.ipv6.src; + key.ip_version = 6; + } + + err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev, + e->route_dev_ifindex, out_index); + if (err || !(esw_attr->dests[out_index].flags & + mlx5_esw_dest_chain_with_src_port_change)) + return err; + + r = mlx5e_route_get_create(priv, &key); + if (is_err(r)) + return ptr_err(r); + + flow->encap_routes[out_index].r = r; + if (new_encap_entry) + list_add(&e->route_list, &r->encap_entries); + flow->encap_routes[out_index].index = out_index; + return 0; +} + +static void mlx5e_route_dealloc(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r) +{ + warn_on(!list_empty(&r->decap_flows)); + warn_on(!list_empty(&r->encap_entries)); + + kfree_rcu(r, rcu); +} + +void mlx5e_detach_decap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_route_entry *r = flow->decap_route; + + if (!r) + return; + + mutex_lock(&esw->offloads.encap_tbl_lock); + list_del(&flow->decap_routes); + flow->decap_route = null; + + if (!refcount_dec_and_test(&r->refcnt)) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + return; + } + hash_del_rcu(&r->hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_route_dealloc(priv, r); +} + +static void mlx5e_detach_encap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + int out_index) +{ + struct mlx5e_route_entry *r = flow->encap_routes[out_index].r; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_encap_entry *e, *tmp; + + if (!r) + return; + + mutex_lock(&esw->offloads.encap_tbl_lock); + flow->encap_routes[out_index].r = null; + + if (!refcount_dec_and_test(&r->refcnt)) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + return; + } + list_for_each_entry_safe(e, tmp, &r->encap_entries, route_list) + list_del_init(&e->route_list); + hash_del_rcu(&r->hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_route_dealloc(priv, r); +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +int mlx5e_attach_decap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); +void mlx5e_detach_decap_route(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h + struct list_head route_list; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c + if (flow_flag_test(flow, tun_rx)) { + err = mlx5e_attach_decap_route(priv, flow); + if (err) + return err; + } + + struct mlx5_esw_flow_attr *esw_attr; + esw_attr = attr->esw_attr; - for (out_index = 0; out_index < mlx5_max_flow_fwd_vports; out_index++) - if (attr->esw_attr->dests[out_index].flags & mlx5_esw_dest_encap) { + if (flow->decap_route) + mlx5e_detach_decap_route(priv, flow); + + for (out_index = 0; out_index < mlx5_max_flow_fwd_vports; out_index++) { + if (esw_attr->dests[out_index].flags & mlx5_esw_dest_encap) { + } - mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter); + mlx5_fc_destroy(esw_attr->counter_dev, attr->counter); - if (decap && esw_attr->rx_tun_attr) { - err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr); - if (err) - return err; - } - diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c + hash_init(esw->offloads.route_tbl); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h + declare_hashtable(route_tbl, 8);
Networking
777bb800c6967517772e882118b414e1c6cb7087
vlad buslov
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: refactor neigh update infrastructure
following patches in series implements route update which can cause encap entries to migrate between routing devices. consecutively, their parent nhe's need to be also transferable between devices instead of having neigh device as a part of their immutable key. move neigh device from struct mlx5_neigh to struct mlx5e_neigh_hash_entry and check that nhe and neigh devices are the same in workqueue neigh update handler.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
9
35
31
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h - tp_struct__entry(__string(devname, nhe->m_neigh.dev->name) + tp_struct__entry(__string(devname, nhe->neigh_dev->name) - __assign_str(devname, mn->dev->name); + __assign_str(devname, nhe->neigh_dev->name); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h - tp_struct__entry(__string(devname, nhe->m_neigh.dev->name) + tp_struct__entry(__string(devname, nhe->neigh_dev->name) - __assign_str(devname, mn->dev->name); + __assign_str(devname, nhe->neigh_dev->name); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c + bool neigh_connected, same_dev; - bool neigh_connected; + same_dev = read_once(nhe->neigh_dev) == n->dev; + if (!same_dev) + goto out; + +out: - m_neigh.dev = n->dev; - if (p->dev == nhe->m_neigh.dev) { + if (p->dev == read_once(nhe->neigh_dev)) { - struct mlx5e_encap_entry *e, + struct mlx5e_neigh *m_neigh, + struct net_device *neigh_dev, - memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh)); + memcpy(&(*nhe)->m_neigh, m_neigh, sizeof(*m_neigh)); + write_once((*nhe)->neigh_dev, neigh_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h - struct mlx5e_encap_entry *e, + struct mlx5e_neigh *m_neigh, + struct net_device *neigh_dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c - struct mlx5e_encap_entry *e) + struct mlx5e_encap_entry *e, + struct mlx5e_neigh *m_neigh, + struct net_device *neigh_dev) - nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); + nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh); - err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); + err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h - struct mlx5e_encap_entry *e); + struct mlx5e_encap_entry *e, + struct mlx5e_neigh *m_neigh, + struct net_device *neigh_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c + struct mlx5e_neigh m_neigh = {}; - /* used by mlx5e_detach_encap to lookup a neigh hash table - * entry in the neigh hash table when a user deletes a rule - */ - e->m_neigh.dev = attr.n->dev; - e->m_neigh.family = attr.n->ops->family; - memcpy(&e->m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); + m_neigh.family = attr.n->ops->family; + memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); - err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e); + err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.n->dev); + struct mlx5e_neigh m_neigh = {}; - /* used by mlx5e_detach_encap to lookup a neigh hash table - * entry in the neigh hash table when a user deletes a rule - */ - e->m_neigh.dev = attr.n->dev; - e->m_neigh.family = attr.n->ops->family; - memcpy(&e->m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); + m_neigh.family = attr.n->ops->family; + memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len); - err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e); + err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.n->dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c - n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); + n = neigh_lookup(tbl, &m_neigh->dst_ip, read_once(nhe->neigh_dev)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h - struct net_device *dev; + struct net_device *neigh_dev; - struct mlx5e_neigh m_neigh;
Networking
2221d954d984d07dc66a4fd0f11a8b2705816a6f
vlad buslov
drivers
net
core, diag, en, ethernet, mellanox, mlx5, rep
net/mlx5e: tc preparation refactoring for routing update event
following patch in series implement routing update event which requires ability to modify rule match_to_reg modify header actions dynamically during rule lifetime. in order to accommodate such behavior, refactor and extend tc infrastructure in following ways:
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
288
9
- modify mod_hdr infrastructure to preserve its parse attribute for whole - extend match_to_reg infrastructure with new function - extend tun api with new functions mlx5e_tc_tun_update_header_ipv{4|6}() --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c - dealloc_mod_hdr_actions(mod_acts); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +{ + int max_encap_size = mlx5_cap_esw(priv->mdev, max_encap_header_size); + const struct ip_tunnel_key *tun_key = &e->tun_info->key; + tc_tun_route_attr_init(attr); + int ipv4_encap_size; + char *encap_header; + struct iphdr *ip; + u8 nud_state; + int err; + + /* add the ip fields */ + attr.fl.fl4.flowi4_tos = tun_key->tos; + attr.fl.fl4.daddr = tun_key->u.ipv4.dst; + attr.fl.fl4.saddr = tun_key->u.ipv4.src; + attr.ttl = tun_key->ttl; + + err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr); + if (err) + return err; + + ipv4_encap_size = + (is_vlan_dev(attr.route_dev) ? vlan_eth_hlen : eth_hlen) + + sizeof(struct iphdr) + + e->tunnel->calc_hlen(e); + + if (max_encap_size < ipv4_encap_size) { + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d ", + ipv4_encap_size, max_encap_size); + err = -eopnotsupp; + goto release_neigh; + } + + encap_header = kzalloc(ipv4_encap_size, gfp_kernel); + if (!encap_header) { + err = -enomem; + goto release_neigh; + } + + e->route_dev_ifindex = attr.route_dev->ifindex; + + read_lock_bh(&attr.n->lock); + nud_state = attr.n->nud_state; + ether_addr_copy(e->h_dest, attr.n->ha); + write_once(e->nhe->neigh_dev, attr.n->dev); + read_unlock_bh(&attr.n->lock); + + /* add ethernet header */ + ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, + eth_p_ip); + + /* add ip header */ + ip->tos = tun_key->tos; + ip->version = 0x4; + ip->ihl = 0x5; + ip->ttl = attr.ttl; + ip->daddr = attr.fl.fl4.daddr; + ip->saddr = attr.fl.fl4.saddr; + + /* add tunneling protocol header */ + err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), + &ip->protocol, e); + if (err) + goto free_encap; + + e->encap_size = ipv4_encap_size; + kfree(e->encap_header); + e->encap_header = encap_header; + + if (!(nud_state & nud_valid)) { + neigh_event_send(attr.n, null); + /* the encap entry will be made valid on neigh update event + * and not used before that. + */ + goto release_neigh; + } + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + ipv4_encap_size, encap_header, + mlx5_flow_namespace_fdb); + if (is_err(e->pkt_reformat)) { + err = ptr_err(e->pkt_reformat); + goto free_encap; + } + + e->flags |= mlx5_encap_entry_valid; + mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); + mlx5e_route_lookup_ipv4_put(&attr); + return err; + +free_encap: + kfree(encap_header); +release_neigh: + mlx5e_route_lookup_ipv4_put(&attr); + return err; +} + + +int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +{ + int max_encap_size = mlx5_cap_esw(priv->mdev, max_encap_header_size); + const struct ip_tunnel_key *tun_key = &e->tun_info->key; + tc_tun_route_attr_init(attr); + struct ipv6hdr *ip6h; + int ipv6_encap_size; + char *encap_header; + u8 nud_state; + int err; + + attr.ttl = tun_key->ttl; + + attr.fl.fl6.flowlabel = ip6_make_flowinfo(rt_tos(tun_key->tos), tun_key->label); + attr.fl.fl6.daddr = tun_key->u.ipv6.dst; + attr.fl.fl6.saddr = tun_key->u.ipv6.src; + + err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr); + if (err) + return err; + + ipv6_encap_size = + (is_vlan_dev(attr.route_dev) ? vlan_eth_hlen : eth_hlen) + + sizeof(struct ipv6hdr) + + e->tunnel->calc_hlen(e); + + if (max_encap_size < ipv6_encap_size) { + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d ", + ipv6_encap_size, max_encap_size); + err = -eopnotsupp; + goto release_neigh; + } + + encap_header = kzalloc(ipv6_encap_size, gfp_kernel); + if (!encap_header) { + err = -enomem; + goto release_neigh; + } + + e->route_dev_ifindex = attr.route_dev->ifindex; + + read_lock_bh(&attr.n->lock); + nud_state = attr.n->nud_state; + ether_addr_copy(e->h_dest, attr.n->ha); + write_once(e->nhe->neigh_dev, attr.n->dev); + read_unlock_bh(&attr.n->lock); + + /* add ethernet header */ + ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e, + eth_p_ipv6); + + /* add ip header */ + ip6_flow_hdr(ip6h, tun_key->tos, 0); + /* the hw fills up ipv6 payload len */ + ip6h->hop_limit = attr.ttl; + ip6h->daddr = attr.fl.fl6.daddr; + ip6h->saddr = attr.fl.fl6.saddr; + + /* add tunneling protocol header */ + err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr), + &ip6h->nexthdr, e); + if (err) + goto free_encap; + + e->encap_size = ipv6_encap_size; + kfree(e->encap_header); + e->encap_header = encap_header; + + if (!(nud_state & nud_valid)) { + neigh_event_send(attr.n, null); + /* the encap entry will be made valid on neigh update event + * and not used before that. + */ + goto release_neigh; + } + + e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + ipv6_encap_size, encap_header, + mlx5_flow_namespace_fdb); + if (is_err(e->pkt_reformat)) { + err = ptr_err(e->pkt_reformat); + goto free_encap; + } + + e->flags |= mlx5_encap_entry_valid; + mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); + mlx5e_route_lookup_ipv6_put(&attr); + return err; + +free_encap: + kfree(encap_header); +release_neigh: + mlx5e_route_lookup_ipv6_put(&attr); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e); +int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e); +int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +{ return -eopnotsupp; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c -mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, - struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, - enum mlx5_flow_namespace_type ns, - enum mlx5e_tc_attr_to_reg type, - u32 data) +mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5_flow_namespace_type ns, + enum mlx5e_tc_attr_to_reg type, + u32 data) + err = mod_hdr_acts->num_actions; - return 0; + return err; +int +mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5_flow_namespace_type ns, + enum mlx5e_tc_attr_to_reg type, + u32 data) +{ + int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data); + + return ret < 0 ? ret : 0; +} + +void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5e_tc_attr_to_reg type, + int act_id, u32 data) +{ + int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; + int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield; + int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen; + char *modact; + + modact = mod_hdr_acts->actions + (act_id * mlx5_mh_act_sz); + + /* firmware has 5bit length field and 0 means 32bits */ + if (mlen == 4) + mlen = 0; + + mlx5_set(set_action_in, modact, action_type, mlx5_action_type_set); + mlx5_set(set_action_in, modact, field, mfield); + mlx5_set(set_action_in, modact, offset, moffset * 8); + mlx5_set(set_action_in, modact, length, mlen * 8); + mlx5_set(set_action_in, modact, data, data); +} + +int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow) +{ + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts; + struct mlx5_modify_hdr *mod_hdr; + + mod_hdr = mlx5_modify_header_alloc(priv->mdev, + get_flow_name_space(flow), + mod_hdr_acts->num_actions, + mod_hdr_acts->actions); + if (is_err(mod_hdr)) + return ptr_err(mod_hdr); + + warn_on(flow->attr->modify_hdr); + flow->attr->modify_hdr = mod_hdr; + + return 0; +} + - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); - if (attr->action & mlx5_flow_context_action_mod_hdr) + if (attr->action & mlx5_flow_context_action_mod_hdr) { + dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts); + } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5e_tc_attr_to_reg type, + int act_id, u32 data); + +int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + enum mlx5_flow_namespace_type ns, + enum mlx5e_tc_attr_to_reg type, + u32 data); + +int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct mlx5e_tc_flow *flow); +
Networking
c7b9038d8af68e351e09a8427fa0264be8dc811f
vlad buslov
drivers
net
core, en, ethernet, mellanox, mlx5
net/mlx5e: rename some encap-specific api to generic names
some of the encap-specific functions and fields will also be used by route update infrastructure in following patches. rename them to generic names.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
5
9
9
--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c - mlx5e_put_encap_flow_list(priv, &flow_list); + mlx5e_put_flow_list(priv, &flow_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h - int tmp_efi_index; + int tmp_entry_index; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c - esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat; - esw_attr->dests[flow->tmp_efi_index].flags |= mlx5_esw_dest_encap_valid; + esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat; + esw_attr->dests[flow->tmp_entry_index].flags |= mlx5_esw_dest_encap_valid; - esw_attr->dests[flow->tmp_efi_index].flags &= ~mlx5_esw_dest_encap_valid; + esw_attr->dests[flow->tmp_entry_index].flags &= ~mlx5_esw_dest_encap_valid; - flow->tmp_efi_index = efi->index; + flow->tmp_entry_index = efi->index; - mlx5e_put_encap_flow_list(priv, &flow_list); + mlx5e_put_flow_list(priv, &flow_list); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c -void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) +void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h -void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list); +void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
Networking
021905f8067d13d9c80db88f1b5398cdd3e35cc5
vlad buslov roi dayan roid nvidia com
drivers
net
core, en, ethernet, mellanox, mlx5, rep
net/mlx5e: handle fib events to update tunnel endpoint device
process fib route update events to dynamically update the stack device rules when tunnel routing changes. use rtnl lock to prevent fib event handler from running concurrently with neigh update and neigh stats workqueue tasks. use encap_tbl_lock mutex to synchronize with tc rule update path that doesn't use rtnl lock.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement support for vf tunneling
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlx5/mlx5e ']
['h', 'c']
7
773
67
- unoffload all flows attached to route encaps from slow or fast path - update encap ip header according to new route dev. - update flows mod_hdr action that is responsible for overwriting reg_c0 - offload all flows to either slow or fast path depending on encap - unoffload all route flows from hardware. when last route flow is deleted - update flow attr decap_vport and destination mac according to underlying - offload all route flows back to hardware creating new indirect table --- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h + mlx5e_tc_flow_flag_failed = mlx5e_tc_flow_base + 10, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +#include <net/fib_notifier.h> +enum { + mlx5e_route_entry_valid = bit(0), +}; + + u32 flags; + int tunnel_dev_index; +struct mlx5e_tc_tun_encap { + struct mlx5e_priv *priv; + struct notifier_block fib_nb; + spinlock_t route_lock; /* protects route_tbl */ + unsigned long route_tbl_last_update; + declare_hashtable(route_tbl, 8); +}; + +static bool mlx5e_route_entry_valid(struct mlx5e_route_entry *r) +{ + return r->flags & mlx5e_route_entry_valid; +} + +static bool mlx5e_tc_flow_all_encaps_valid(struct mlx5_esw_flow_attr *esw_attr) +{ + bool all_flow_encaps_valid = true; + int i; + + /* flow can be associated with multiple encap entries. + * before offloading the flow verify that all of them have + * a valid neighbour. + */ + for (i = 0; i < mlx5_max_flow_fwd_vports; i++) { + if (!(esw_attr->dests[i].flags & mlx5_esw_dest_encap)) + continue; + if (!(esw_attr->dests[i].flags & mlx5_esw_dest_encap_valid)) { + all_flow_encaps_valid = false; + break; + } + } + + return all_flow_encaps_valid; +} + + if (e->flags & mlx5_encap_entry_no_route) + return; + - bool all_flow_encaps_valid = true; - int i; - - /* flow can be associated with multiple encap entries. - * before offloading the flow verify that all of them have - * a valid neighbour. - */ - for (i = 0; i < mlx5_max_flow_fwd_vports; i++) { - if (!(esw_attr->dests[i].flags & mlx5_esw_dest_encap)) - continue; - if (!(esw_attr->dests[i].flags & mlx5_esw_dest_encap_valid)) { - all_flow_encaps_valid = false; - break; - } - } + - if (!all_flow_encaps_valid) + if (!mlx5e_tc_flow_all_encaps_valid(esw_attr)) +static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow, + struct list_head *flow_list, + int index) +{ + if (is_err(mlx5e_flow_get(flow))) + return; + wait_for_completion(&flow->init_done); + + flow->tmp_entry_index = index; + list_add(&flow->tmp_list, flow_list); +} + - if (is_err(mlx5e_flow_get(flow))) - continue; - wait_for_completion(&flow->init_done); - - flow->tmp_entry_index = efi->index; - list_add(&flow->tmp_list, flow_list); + mlx5e_take_tmp_flow(flow, flow_list, efi->index); +/* takes reference to all flows attached to route and adds the flows to + * flow_list using 'tmp_list' list_head in mlx5e_tc_flow. + */ +static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r, + struct list_head *flow_list) +{ + struct mlx5e_tc_flow *flow; + + list_for_each_entry(flow, &r->decap_flows, decap_routes) + mlx5e_take_tmp_flow(flow, flow_list, 0); +} + - err = mlx5e_tc_match_to_reg_set(esw->dev, mod_hdr_acts, - mlx5_flow_namespace_fdb, vport_to_reg, data); + err = mlx5e_tc_match_to_reg_set_and_get_id(esw->dev, mod_hdr_acts, + mlx5_flow_namespace_fdb, + vport_to_reg, data); + if (err >= 0) { + esw_attr->dests[out_index].src_port_rewrite_act_id = err; + err = 0; + } + +out: + if (route_dev) + dev_put(route_dev); + return err; +} + +static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw, + struct mlx5_esw_flow_attr *attr, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, + struct net_device *out_dev, + int route_dev_ifindex, + int out_index) +{ + int act_id = attr->dests[out_index].src_port_rewrite_act_id; + struct net_device *route_dev; + u16 vport_num; + int err = 0; + u32 data; + + route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex); + + if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops || + !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) { + err = -enodev; + goto out; + } + + err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num); + data = mlx5_eswitch_get_vport_metadata_for_set(attr->in_mdev->priv.eswitch, + vport_num); + mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, vport_to_reg, act_id, data); + +static unsigned int mlx5e_route_tbl_get_last_update(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_tc_tun_encap *encap; + unsigned int ret; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, rep_eth); + uplink_priv = &uplink_rpriv->uplink_priv; + encap = uplink_priv->encap; + + spin_lock_bh(&encap->route_lock); + ret = encap->route_tbl_last_update; + spin_unlock_bh(&encap->route_lock); + return ret; +} + + unsigned long tbl_time_before, + unsigned long tbl_time_before = 0; + tbl_time_before = mlx5e_route_tbl_get_last_update(priv); - err = mlx5e_attach_encap_route(priv, flow, e, entry_created, out_index); + err = mlx5e_attach_encap_route(priv, flow, e, entry_created, tbl_time_before, + out_index); +static void mlx5e_route_dealloc(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r) +{ + warn_on(!list_empty(&r->decap_flows)); + warn_on(!list_empty(&r->encap_entries)); + + kfree_rcu(r, rcu); +} + +static void mlx5e_route_put(struct mlx5e_priv *priv, struct mlx5e_route_entry *r) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (!refcount_dec_and_mutex_lock(&r->refcnt, &esw->offloads.encap_tbl_lock)) + return; + + hash_del_rcu(&r->hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_route_dealloc(priv, r); +} + +static void mlx5e_route_put_locked(struct mlx5e_priv *priv, struct mlx5e_route_entry *r) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + lockdep_assert_held(&esw->offloads.encap_tbl_lock); + + if (!refcount_dec_and_test(&r->refcnt)) + return; + hash_del_rcu(&r->hlist); + mlx5e_route_dealloc(priv, r); +} + -mlx5e_route_get(struct mlx5e_priv *priv, struct mlx5e_route_key *key, +mlx5e_route_get(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key, - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - hash_for_each_possible(esw->offloads.route_tbl, r, hlist, hash_key) { + hash_for_each_possible(encap->route_tbl, r, hlist, hash_key) { - struct mlx5e_route_key *key) + struct mlx5e_route_key *key, + int tunnel_dev_index, + unsigned long *route_tbl_change_time) + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *uplink_rpriv; + struct mlx5e_tc_tun_encap *encap; + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, rep_eth); + uplink_priv = &uplink_rpriv->uplink_priv; + encap = uplink_priv->encap; + - r = mlx5e_route_get(priv, key, hash_key); - if (r) + spin_lock_bh(&encap->route_lock); + r = mlx5e_route_get(encap, key, hash_key); + spin_unlock_bh(&encap->route_lock); + if (r) { + if (!mlx5e_route_entry_valid(r)) { + mlx5e_route_put_locked(priv, r); + return err_ptr(-einval); + } + } + r->flags |= mlx5e_route_entry_valid; + r->tunnel_dev_index = tunnel_dev_index; - hash_add(esw->offloads.route_tbl, &r->hlist, hash_key); + + spin_lock_bh(&encap->route_lock); + *route_tbl_change_time = encap->route_tbl_last_update; + hash_add(encap->route_tbl, &r->hlist, hash_key); + spin_unlock_bh(&encap->route_lock); + +static struct mlx5e_route_entry * +mlx5e_route_lookup_for_update(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key) +{ + u32 hash_key = hash_route_info(key); + struct mlx5e_route_entry *r; + + spin_lock_bh(&encap->route_lock); + encap->route_tbl_last_update = jiffies; + r = mlx5e_route_get(encap, key, hash_key); + spin_unlock_bh(&encap->route_lock); + + return r; +} + +struct mlx5e_tc_fib_event_data { + struct work_struct work; + unsigned long event; + struct mlx5e_route_entry *r; + struct net_device *ul_dev; +}; + +static void mlx5e_tc_fib_event_work(struct work_struct *work); +static struct mlx5e_tc_fib_event_data * +mlx5e_tc_init_fib_work(unsigned long event, struct net_device *ul_dev, gfp_t flags) +{ + struct mlx5e_tc_fib_event_data *fib_work; + + fib_work = kzalloc(sizeof(*fib_work), flags); + if (warn_on(!fib_work)) + return null; + + init_work(&fib_work->work, mlx5e_tc_fib_event_work); + fib_work->event = event; + fib_work->ul_dev = ul_dev; + + return fib_work; +} + +static int +mlx5e_route_enqueue_update(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r, + unsigned long event) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_fib_event_data *fib_work; + struct mlx5e_rep_priv *uplink_rpriv; + struct net_device *ul_dev; + + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, rep_eth); + ul_dev = uplink_rpriv->netdev; + + fib_work = mlx5e_tc_init_fib_work(event, ul_dev, gfp_kernel); + if (!fib_work) + return -enomem; + + dev_hold(ul_dev); + refcount_inc(&r->refcnt); + fib_work->r = r; + queue_work(priv->wq, &fib_work->work); + + return 0; +} + + unsigned long tbl_time_before, tbl_time_after; + tbl_time_before = mlx5e_route_tbl_get_last_update(priv); + tbl_time_after = tbl_time_before; - r = mlx5e_route_get_create(priv, &key); + r = mlx5e_route_get_create(priv, &key, parse_attr->filter_dev->ifindex, + &tbl_time_after); + /* routing changed concurrently. fib event handler might have missed new + * entry, schedule update. + */ + if (tbl_time_before != tbl_time_after) { + err = mlx5e_route_enqueue_update(priv, r, fib_event_entry_replace); + if (err) { + mlx5e_route_put_locked(priv, r); + goto out; + } + } + unsigned long tbl_time_before, + unsigned long tbl_time_after = tbl_time_before; - r = mlx5e_route_get_create(priv, &key); + r = mlx5e_route_get_create(priv, &key, parse_attr->mirred_ifindex[out_index], + &tbl_time_after); + /* routing changed concurrently. fib event handler might have missed new + * entry, schedule update. + */ + if (tbl_time_before != tbl_time_after) { + err = mlx5e_route_enqueue_update(priv, r, fib_event_entry_replace); + if (err) { + mlx5e_route_put_locked(priv, r); + return err; + } + } -static void mlx5e_route_dealloc(struct mlx5e_priv *priv, - struct mlx5e_route_entry *r) -{ - warn_on(!list_empty(&r->decap_flows)); - warn_on(!list_empty(&r->encap_entries)); - - kfree_rcu(r, rcu); -} - +static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct list_head *encap_flows) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(flow, encap_flows, tmp_list) { + struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_esw_flow_attr *esw_attr; + + if (!mlx5e_is_offloaded_flow(flow)) + continue; + esw_attr = attr->esw_attr; + + if (flow_flag_test(flow, slow)) + mlx5e_tc_unoffload_from_slow_path(esw, flow); + else + mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); + mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); + attr->modify_hdr = null; + + esw_attr->dests[flow->tmp_entry_index].flags &= + ~mlx5_esw_dest_encap_valid; + esw_attr->dests[flow->tmp_entry_index].pkt_reformat = null; + } + + e->flags |= mlx5_encap_entry_no_route; + if (e->flags & mlx5_encap_entry_valid) { + e->flags &= ~mlx5_encap_entry_valid; + mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat); + e->pkt_reformat = null; + } +} + +static void mlx5e_reoffload_encap(struct mlx5e_priv *priv, + struct net_device *tunnel_dev, + struct mlx5e_encap_entry *e, + struct list_head *encap_flows) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow *flow; + int err; + + err = ip_tunnel_info_af(e->tun_info) == af_inet ? + mlx5e_tc_tun_update_header_ipv4(priv, tunnel_dev, e) : + mlx5e_tc_tun_update_header_ipv6(priv, tunnel_dev, e); + if (err) + mlx5_core_warn(priv->mdev, "failed to update encap header, %d", err); + e->flags &= ~mlx5_encap_entry_no_route; + + list_for_each_entry(flow, encap_flows, tmp_list) { + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_esw_flow_attr *esw_attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + + if (flow_flag_test(flow, failed)) + continue; + + esw_attr = attr->esw_attr; + parse_attr = attr->parse_attr; + spec = &parse_attr->spec; + + err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts, + e->out_dev, e->route_dev_ifindex, + flow->tmp_entry_index); + if (err) { + mlx5_core_warn(priv->mdev, "failed to update vf tunnel err=%d", err); + continue; + } + + err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow); + if (err) { + mlx5_core_warn(priv->mdev, "failed to update flow mod_hdr err=%d", + err); + continue; + } + + if (e->flags & mlx5_encap_entry_valid) { + esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat; + esw_attr->dests[flow->tmp_entry_index].flags |= mlx5_esw_dest_encap_valid; + if (!mlx5e_tc_flow_all_encaps_valid(esw_attr)) + goto offload_to_slow_path; + /* update from slow path rule to encap rule */ + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); + if (is_err(rule)) { + err = ptr_err(rule); + mlx5_core_warn(priv->mdev, "failed to update cached encapsulation flow, %d ", + err); + } else { + flow->rule[0] = rule; + } + } else { +offload_to_slow_path: + rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec); + /* mark the flow's encap dest as non-valid */ + esw_attr->dests[flow->tmp_entry_index].flags &= + ~mlx5_esw_dest_encap_valid; + + if (is_err(rule)) { + err = ptr_err(rule); + mlx5_core_warn(priv->mdev, "failed to update slow path (encap) flow, %d ", + err); + } else { + flow->rule[0] = rule; + } + } + flow_flag_set(flow, offloaded); + } +} + +static int mlx5e_update_route_encaps(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r, + struct list_head *flow_list, + bool replace) +{ + struct net_device *tunnel_dev; + struct mlx5e_encap_entry *e; + + tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index); + if (!tunnel_dev) + return -enodev; + + list_for_each_entry(e, &r->encap_entries, route_list) { + list_head(encap_flows); + + mlx5e_take_all_encap_flows(e, &encap_flows); + if (list_empty(&encap_flows)) + continue; + + if (mlx5e_route_entry_valid(r)) + mlx5e_invalidate_encap(priv, e, &encap_flows); + + if (!replace) { + list_splice(&encap_flows, flow_list); + continue; + } + + mlx5e_reoffload_encap(priv, tunnel_dev, e, &encap_flows); + list_splice(&encap_flows, flow_list); + } + + return 0; +} + +static void mlx5e_unoffload_flow_list(struct mlx5e_priv *priv, + struct list_head *flow_list) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(flow, flow_list, tmp_list) + if (mlx5e_is_offloaded_flow(flow)) + mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); +} + +static void mlx5e_reoffload_decap(struct mlx5e_priv *priv, + struct list_head *decap_flows) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_tc_flow *flow; + + list_for_each_entry(flow, decap_flows, tmp_list) { + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5_flow_attr *attr = flow->attr; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err; + + if (flow_flag_test(flow, failed)) + continue; + + parse_attr = attr->parse_attr; + spec = &parse_attr->spec; + err = mlx5e_tc_tun_route_lookup(priv, spec, attr); + if (err) { + mlx5_core_warn(priv->mdev, "failed to lookup route for flow, %d ", + err); + continue; + } + + rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr); + if (is_err(rule)) { + err = ptr_err(rule); + mlx5_core_warn(priv->mdev, "failed to update cached decap flow, %d ", + err); + } else { + flow->rule[0] = rule; + flow_flag_set(flow, offloaded); + } + } +} + +static int mlx5e_update_route_decap_flows(struct mlx5e_priv *priv, + struct mlx5e_route_entry *r, + struct list_head *flow_list, + bool replace) +{ + struct net_device *tunnel_dev; + list_head(decap_flows); + + tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index); + if (!tunnel_dev) + return -enodev; + + mlx5e_take_all_route_decap_flows(r, &decap_flows); + if (mlx5e_route_entry_valid(r)) + mlx5e_unoffload_flow_list(priv, &decap_flows); + if (replace) + mlx5e_reoffload_decap(priv, &decap_flows); + + list_splice(&decap_flows, flow_list); + + return 0; +} + +static void mlx5e_tc_fib_event_work(struct work_struct *work) +{ + struct mlx5e_tc_fib_event_data *event_data = + container_of(work, struct mlx5e_tc_fib_event_data, work); + struct net_device *ul_dev = event_data->ul_dev; + struct mlx5e_priv *priv = netdev_priv(ul_dev); + struct mlx5e_route_entry *r = event_data->r; + struct mlx5_eswitch *esw; + list_head(flow_list); + bool replace; + int err; + + /* sync with concurrent neigh updates */ + rtnl_lock(); + esw = priv->mdev->priv.eswitch; + mutex_lock(&esw->offloads.encap_tbl_lock); + replace = event_data->event == fib_event_entry_replace; + + if (!mlx5e_route_entry_valid(r) && !replace) + goto out; + + err = mlx5e_update_route_encaps(priv, r, &flow_list, replace); + if (err) + mlx5_core_warn(priv->mdev, "failed to update route encaps, %d ", + err); + + err = mlx5e_update_route_decap_flows(priv, r, &flow_list, replace); + if (err) + mlx5_core_warn(priv->mdev, "failed to update route decap flows, %d ", + err); + + if (replace) + r->flags |= mlx5e_route_entry_valid; +out: + mutex_unlock(&esw->offloads.encap_tbl_lock); + rtnl_unlock(); + + mlx5e_put_flow_list(priv, &flow_list); + mlx5e_route_put(priv, event_data->r); + dev_put(event_data->ul_dev); + kfree(event_data); +} + +static struct mlx5e_tc_fib_event_data * +mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv, + struct net_device *ul_dev, + struct mlx5e_tc_tun_encap *encap, + unsigned long event, + struct fib_notifier_info *info) +{ + struct fib_entry_notifier_info *fen_info; + struct mlx5e_tc_fib_event_data *fib_work; + struct mlx5e_route_entry *r; + struct mlx5e_route_key key; + struct net_device *fib_dev; + + fen_info = container_of(info, struct fib_entry_notifier_info, info); + fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev; + if (fib_dev->netdev_ops != &mlx5e_netdev_ops || + fen_info->dst_len != 32) + return null; + + fib_work = mlx5e_tc_init_fib_work(event, ul_dev, gfp_atomic); + if (!fib_work) + return err_ptr(-enomem); + + key.endpoint_ip.v4 = htonl(fen_info->dst); + key.ip_version = 4; + + /* can't fail after this point because releasing reference to r + * requires obtaining sleeping mutex which we can't do in atomic + * context. + */ + r = mlx5e_route_lookup_for_update(encap, &key); + if (!r) + goto out; + fib_work->r = r; + dev_hold(ul_dev); + + return fib_work; + +out: + kfree(fib_work); + return null; +} + +static struct mlx5e_tc_fib_event_data * +mlx5e_init_fib_work_ipv6(struct mlx5e_priv *priv, + struct net_device *ul_dev, + struct mlx5e_tc_tun_encap *encap, + unsigned long event, + struct fib_notifier_info *info) +{ + struct fib6_entry_notifier_info *fen_info; + struct mlx5e_tc_fib_event_data *fib_work; + struct mlx5e_route_entry *r; + struct mlx5e_route_key key; + struct net_device *fib_dev; + + fen_info = container_of(info, struct fib6_entry_notifier_info, info); + fib_dev = fib6_info_nh_dev(fen_info->rt); + if (fib_dev->netdev_ops != &mlx5e_netdev_ops || + fen_info->rt->fib6_dst.plen != 128) + return null; + + fib_work = mlx5e_tc_init_fib_work(event, ul_dev, gfp_atomic); + if (!fib_work) + return err_ptr(-enomem); + + memcpy(&key.endpoint_ip.v6, &fen_info->rt->fib6_dst.addr, + sizeof(fen_info->rt->fib6_dst.addr)); + key.ip_version = 6; + + /* can't fail after this point because releasing reference to r + * requires obtaining sleeping mutex which we can't do in atomic + * context. + */ + r = mlx5e_route_lookup_for_update(encap, &key); + if (!r) + goto out; + fib_work->r = r; + dev_hold(ul_dev); + + return fib_work; + +out: + kfree(fib_work); + return null; +} + +static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct mlx5e_tc_fib_event_data *fib_work; + struct fib_notifier_info *info = ptr; + struct mlx5e_tc_tun_encap *encap; + struct net_device *ul_dev; + struct mlx5e_priv *priv; + + encap = container_of(nb, struct mlx5e_tc_tun_encap, fib_nb); + priv = encap->priv; + ul_dev = priv->netdev; + priv = netdev_priv(ul_dev); + + switch (event) { + case fib_event_entry_replace: + case fib_event_entry_del: + if (info->family == af_inet) + fib_work = mlx5e_init_fib_work_ipv4(priv, ul_dev, encap, event, info); + else if (info->family == af_inet6) + fib_work = mlx5e_init_fib_work_ipv6(priv, ul_dev, encap, event, info); + else + return notify_done; + + if (!is_err_or_null(fib_work)) { + queue_work(priv->wq, &fib_work->work); + } else if (is_err(fib_work)) { + nl_set_err_msg_mod(info->extack, "failed to init fib work"); + mlx5_core_warn(priv->mdev, "failed to init fib work, %ld ", + ptr_err(fib_work)); + } + + break; + default: + return notify_done; + } + + return notify_done; +} + +struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_tun_encap *encap; + int err; + + encap = kvzalloc(sizeof(*encap), gfp_kernel); + if (!encap) + return err_ptr(-enomem); + + encap->priv = priv; + encap->fib_nb.notifier_call = mlx5e_tc_tun_fib_event; + spin_lock_init(&encap->route_lock); + hash_init(encap->route_tbl); + err = register_fib_notifier(dev_net(priv->netdev), &encap->fib_nb, + null, null); + if (err) { + kvfree(encap); + return err_ptr(err); + } + + return encap; +} + +void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap) +{ + if (!encap) + return; + + unregister_fib_notifier(dev_net(encap->priv->netdev), &encap->fib_nb); + flush_workqueue(encap->priv->wq); /* flush fib event works */ + kvfree(encap); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h +struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv); +void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +struct mlx5e_tc_tun_encap; + + + /* tc tunneling encapsulation private data */ + struct mlx5e_tc_tun_encap *encap; + mlx5_encap_entry_no_route = bit(2), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c + bool vf_tun = false, encap_valid = true; - bool encap_valid = true; - return -eopnotsupp; + err = -eopnotsupp; + goto err_out; - return -eopnotsupp; + err = -eopnotsupp; + goto err_out; - return err; + goto err_out; - return err; + goto err_out; - return err; + goto err_out; + if (esw_attr->dests[out_index].flags & + mlx5_esw_dest_chain_with_src_port_change) + vf_tun = true; - return err; + goto err_out; - err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); - if (err) - return err; + if (vf_tun) { + err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow); + if (err) + goto err_out; + } else { + err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); + if (err) + goto err_out; + } - if (is_err(counter)) - return ptr_err(counter); + if (is_err(counter)) { + err = ptr_err(counter); + goto err_out; + } - if (is_err(flow->rule[0])) - return ptr_err(flow->rule[0]); - else - flow_flag_set(flow, offloaded); + if (is_err(flow->rule[0])) { + err = ptr_err(flow->rule[0]); + goto err_out; + } + flow_flag_set(flow, offloaded); + +err_out: + flow_flag_set(flow, failed); + return err; + bool vf_tun = false; + if (esw_attr->dests[out_index].flags & + mlx5_esw_dest_chain_with_src_port_change) + vf_tun = true; - kvfree(attr->parse_attr); - kvfree(attr->esw_attr->rx_tun_attr); - mlx5e_detach_mod_hdr(priv, flow); + if (vf_tun && attr->modify_hdr) + mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr); + else + mlx5e_detach_mod_hdr(priv, flow); + kvfree(attr->parse_attr); + kvfree(attr->esw_attr->rx_tun_attr); - dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts); + flow_flag_set(flow, failed); + uplink_priv->encap = mlx5e_tc_tun_init(priv); + if (is_err(uplink_priv->encap)) + goto err_register_fib_notifier; + +err_register_fib_notifier: + rhashtable_destroy(tc_ht); - rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, null); - + rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, null); + mlx5e_tc_tun_cleanup(uplink_priv->encap); + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c - hash_init(esw->offloads.route_tbl); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h - declare_hashtable(route_tbl, 8); + int src_port_rewrite_act_id;
Networking
8914add2c9e5518f6a864936658bba5752510b39
vlad buslov
drivers
net
core, en, ethernet, mellanox, mlx5
ethtool: validate master slave configuration before rtnl_lock()
create a new function for input validations to be called before rtnl_lock() and move the master slave validation to that function.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support setting lanes via ethtool
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlxsw']
['c']
1
19
8
--- diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c +static int ethnl_check_linkmodes(struct genl_info *info, struct nlattr **tb) +{ + const struct nlattr *master_slave_cfg; + + master_slave_cfg = tb[ethtool_a_linkmodes_master_slave_cfg]; + if (master_slave_cfg && + !ethnl_validate_master_slave_cfg(nla_get_u8(master_slave_cfg))) { + nl_set_err_msg_attr(info->extack, master_slave_cfg, + "master/slave value is invalid"); + return -eopnotsupp; + } + + return 0; +} + - u8 cfg = nla_get_u8(master_slave_cfg); - - - if (!ethnl_validate_master_slave_cfg(cfg)) { - nl_set_err_msg_attr(info->extack, master_slave_cfg, - "master/slave value is invalid"); - return -eopnotsupp; - } + ret = ethnl_check_linkmodes(info, tb); + if (ret < 0) + return ret; +
Networking
189e7a8d94208a26b7f7876d155cf695393f8efa
danielle ratson
net
ethtool
ethtool: extend link modes settings uapi with lanes
currently, when auto negotiation is on, the user can advertise all the linkmodes which correspond to a specific speed, but does not have a similar selector for the number of lanes. this is significant when a specific speed can be achieved using different number of lanes. for example, 2x50 or 4x25.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support setting lanes via ethtool
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlxsw']
['h', 'c', 'rst']
5
93
21
--- diff --git a/documentation/networking/ethtool-netlink.rst b/documentation/networking/ethtool-netlink.rst --- a/documentation/networking/ethtool-netlink.rst +++ b/documentation/networking/ethtool-netlink.rst + ''ethtool_a_linkmodes_lanes'' u32 lanes -of speed and duplex is specified, kernel adjusts advertised modes to all -supported modes matching speed, duplex or both (whatever is specified). this -autoselection is done on ethtool side with ioctl interface, netlink interface -is supposed to allow requesting changes without knowing what exactly kernel -supports. +of speed, duplex and lanes is specified, kernel adjusts advertised modes to all +supported modes matching speed, duplex, lanes or all (whatever is specified). +this autoselection is done on ethtool side with ioctl interface, netlink +interface is supposed to allow requesting changes without knowing what exactly +kernel supports. diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h + u32 lanes; + * @cap_link_lanes_supported: indicates if the driver supports lanes + * parameter. + u32 cap_link_lanes_supported:1; diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h + ethtool_a_linkmodes_lanes, /* u32 */ diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c + u8 lanes; -#define __define_link_mode_params(_speed, _type, _duplex) \ - [ethtool_link_mode(_speed, _type, _duplex)] = { \ - .speed = speed_ ## _speed, \ +#define __link_mode_lanes_cr 1 +#define __link_mode_lanes_cr2 2 +#define __link_mode_lanes_cr4 4 +#define __link_mode_lanes_cr8 8 +#define __link_mode_lanes_dr 1 +#define __link_mode_lanes_dr2 2 +#define __link_mode_lanes_dr4 4 +#define __link_mode_lanes_dr8 8 +#define __link_mode_lanes_kr 1 +#define __link_mode_lanes_kr2 2 +#define __link_mode_lanes_kr4 4 +#define __link_mode_lanes_kr8 8 +#define __link_mode_lanes_sr 1 +#define __link_mode_lanes_sr2 2 +#define __link_mode_lanes_sr4 4 +#define __link_mode_lanes_sr8 8 +#define __link_mode_lanes_er 1 +#define __link_mode_lanes_kx 1 +#define __link_mode_lanes_kx4 4 +#define __link_mode_lanes_lr 1 +#define __link_mode_lanes_lr4 4 +#define __link_mode_lanes_lr4_er4 4 +#define __link_mode_lanes_lr_er_fr 1 +#define __link_mode_lanes_lr2_er2_fr2 2 +#define __link_mode_lanes_lr4_er4_fr4 4 +#define __link_mode_lanes_lr8_er8_fr8 8 +#define __link_mode_lanes_lrm 1 +#define __link_mode_lanes_mld2 2 +#define __link_mode_lanes_t 1 +#define __link_mode_lanes_t1 1 +#define __link_mode_lanes_x 1 +#define __link_mode_lanes_fx 1 + +#define __define_link_mode_params(_speed, _type, _duplex) \ + [ethtool_link_mode(_speed, _type, _duplex)] = { \ + .speed = speed_ ## _speed, \ + .lanes = __link_mode_lanes_ ## _type, \ + .lanes = 0, \ + [ethtool_a_linkmodes_lanes] = nla_policy_range(nla_u32, 1, 8), -/* set advertised link modes to all supported modes matching requested speed - * and duplex values. called when autonegotiation is on, speed or duplex is - * requested but no link mode change. this is done in userspace with ioctl() - * interface, move it into kernel for netlink. +/* set advertised link modes to all supported modes matching requested speed, + * lanes and duplex values. called when autonegotiation is on, speed, lanes or + * duplex is requested but no link mode change. this is done in userspace with + * ioctl() interface, move it into kernel for netlink. - bool req_speed, bool req_duplex) + bool req_speed, bool req_lanes, bool req_duplex) + (!req_lanes || info->lanes == ksettings->lanes) && - const struct nlattr *master_slave_cfg; + const struct nlattr *master_slave_cfg, *lanes_cfg; + lanes_cfg = tb[ethtool_a_linkmodes_lanes]; + if (lanes_cfg && !is_power_of_2(nla_get_u32(lanes_cfg))) { + nl_set_err_msg_attr(info->extack, lanes_cfg, + "lanes value is invalid"); + return -einval; + } + - bool *mod) + bool *mod, const struct net_device *dev) - bool req_speed, req_duplex; - const struct nlattr *master_slave_cfg; + bool req_speed, req_lanes, req_duplex; + const struct nlattr *master_slave_cfg, *lanes_cfg; + req_lanes = tb[ethtool_a_linkmodes_lanes]; + + lanes_cfg = tb[ethtool_a_linkmodes_lanes]; + if (lanes_cfg) { + /* if autoneg is off and lanes parameter is not supported by the + * driver, return an error. + */ + if (!lsettings->autoneg && + !dev->ethtool_ops->cap_link_lanes_supported) { + nl_set_err_msg_attr(info->extack, lanes_cfg, + "lanes configuration not supported by device"); + return -eopnotsupp; + } + } else if (!lsettings->autoneg) { + /* if autoneg is off and lanes parameter is not passed from user, + * set the lanes parameter to 0. + */ + ksettings->lanes = 0; + } + + ethnl_update_u32(&ksettings->lanes, lanes_cfg, mod); - (req_speed || req_duplex) && - ethnl_auto_linkmodes(ksettings, req_speed, req_duplex)) + (req_speed || req_lanes || req_duplex) && + ethnl_auto_linkmodes(ksettings, req_speed, req_lanes, req_duplex)) - ret = ethnl_update_linkmodes(info, tb, &ksettings, &mod); + ret = ethnl_update_linkmodes(info, tb, &ksettings, &mod, dev); diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h -extern const struct nla_policy ethnl_linkmodes_set_policy[ethtool_a_linkmodes_master_slave_cfg + 1]; +extern const struct nla_policy ethnl_linkmodes_set_policy[ethtool_a_linkmodes_lanes + 1];
Networking
012ce4dd3102a0f4d80167de343e9d44b257c1b8
danielle ratson
include
linux
linux
ethtool: get link mode in use instead of speed and duplex parameters
currently, when user space queries the link's parameters, as speed and duplex, each parameter is passed from the driver to ethtool.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support setting lanes via ethtool
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlxsw']
['h', 'c']
5
174
156
--- diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h + enum ethtool_link_mode_bit_indices link_mode; diff --git a/net/ethtool/common.c b/net/ethtool/common.c --- a/net/ethtool/common.c +++ b/net/ethtool/common.c +#define __link_mode_lanes_cr 1 +#define __link_mode_lanes_cr2 2 +#define __link_mode_lanes_cr4 4 +#define __link_mode_lanes_cr8 8 +#define __link_mode_lanes_dr 1 +#define __link_mode_lanes_dr2 2 +#define __link_mode_lanes_dr4 4 +#define __link_mode_lanes_dr8 8 +#define __link_mode_lanes_kr 1 +#define __link_mode_lanes_kr2 2 +#define __link_mode_lanes_kr4 4 +#define __link_mode_lanes_kr8 8 +#define __link_mode_lanes_sr 1 +#define __link_mode_lanes_sr2 2 +#define __link_mode_lanes_sr4 4 +#define __link_mode_lanes_sr8 8 +#define __link_mode_lanes_er 1 +#define __link_mode_lanes_kx 1 +#define __link_mode_lanes_kx4 4 +#define __link_mode_lanes_lr 1 +#define __link_mode_lanes_lr4 4 +#define __link_mode_lanes_lr4_er4 4 +#define __link_mode_lanes_lr_er_fr 1 +#define __link_mode_lanes_lr2_er2_fr2 2 +#define __link_mode_lanes_lr4_er4_fr4 4 +#define __link_mode_lanes_lr8_er8_fr8 8 +#define __link_mode_lanes_lrm 1 +#define __link_mode_lanes_mld2 2 +#define __link_mode_lanes_t 1 +#define __link_mode_lanes_t1 1 +#define __link_mode_lanes_x 1 +#define __link_mode_lanes_fx 1 + +#define __define_link_mode_params(_speed, _type, _duplex) \ + [ethtool_link_mode(_speed, _type, _duplex)] = { \ + .speed = speed_ ## _speed, \ + .lanes = __link_mode_lanes_ ## _type, \ + .duplex = __duplex_ ## _duplex \ + } +#define __duplex_half duplex_half +#define __duplex_full duplex_full +#define __define_special_mode_params(_mode) \ + [ethtool_link_mode_ ## _mode ## _bit] = { \ + .speed = speed_unknown, \ + .lanes = 0, \ + .duplex = duplex_unknown, \ + } + +const struct link_mode_info link_mode_params[] = { + __define_link_mode_params(10, t, half), + __define_link_mode_params(10, t, full), + __define_link_mode_params(100, t, half), + __define_link_mode_params(100, t, full), + __define_link_mode_params(1000, t, half), + __define_link_mode_params(1000, t, full), + __define_special_mode_params(autoneg), + __define_special_mode_params(tp), + __define_special_mode_params(aui), + __define_special_mode_params(mii), + __define_special_mode_params(fibre), + __define_special_mode_params(bnc), + __define_link_mode_params(10000, t, full), + __define_special_mode_params(pause), + __define_special_mode_params(asym_pause), + __define_link_mode_params(2500, x, full), + __define_special_mode_params(backplane), + __define_link_mode_params(1000, kx, full), + __define_link_mode_params(10000, kx4, full), + __define_link_mode_params(10000, kr, full), + [ethtool_link_mode_10000baser_fec_bit] = { + .speed = speed_10000, + .duplex = duplex_full, + }, + __define_link_mode_params(20000, mld2, full), + __define_link_mode_params(20000, kr2, full), + __define_link_mode_params(40000, kr4, full), + __define_link_mode_params(40000, cr4, full), + __define_link_mode_params(40000, sr4, full), + __define_link_mode_params(40000, lr4, full), + __define_link_mode_params(56000, kr4, full), + __define_link_mode_params(56000, cr4, full), + __define_link_mode_params(56000, sr4, full), + __define_link_mode_params(56000, lr4, full), + __define_link_mode_params(25000, cr, full), + __define_link_mode_params(25000, kr, full), + __define_link_mode_params(25000, sr, full), + __define_link_mode_params(50000, cr2, full), + __define_link_mode_params(50000, kr2, full), + __define_link_mode_params(100000, kr4, full), + __define_link_mode_params(100000, sr4, full), + __define_link_mode_params(100000, cr4, full), + __define_link_mode_params(100000, lr4_er4, full), + __define_link_mode_params(50000, sr2, full), + __define_link_mode_params(1000, x, full), + __define_link_mode_params(10000, cr, full), + __define_link_mode_params(10000, sr, full), + __define_link_mode_params(10000, lr, full), + __define_link_mode_params(10000, lrm, full), + __define_link_mode_params(10000, er, full), + __define_link_mode_params(2500, t, full), + __define_link_mode_params(5000, t, full), + __define_special_mode_params(fec_none), + __define_special_mode_params(fec_rs), + __define_special_mode_params(fec_baser), + __define_link_mode_params(50000, kr, full), + __define_link_mode_params(50000, sr, full), + __define_link_mode_params(50000, cr, full), + __define_link_mode_params(50000, lr_er_fr, full), + __define_link_mode_params(50000, dr, full), + __define_link_mode_params(100000, kr2, full), + __define_link_mode_params(100000, sr2, full), + __define_link_mode_params(100000, cr2, full), + __define_link_mode_params(100000, lr2_er2_fr2, full), + __define_link_mode_params(100000, dr2, full), + __define_link_mode_params(200000, kr4, full), + __define_link_mode_params(200000, sr4, full), + __define_link_mode_params(200000, lr4_er4_fr4, full), + __define_link_mode_params(200000, dr4, full), + __define_link_mode_params(200000, cr4, full), + __define_link_mode_params(100, t1, full), + __define_link_mode_params(1000, t1, full), + __define_link_mode_params(400000, kr8, full), + __define_link_mode_params(400000, sr8, full), + __define_link_mode_params(400000, lr8_er8_fr8, full), + __define_link_mode_params(400000, dr8, full), + __define_link_mode_params(400000, cr8, full), + __define_special_mode_params(fec_llrs), + __define_link_mode_params(100000, kr, full), + __define_link_mode_params(100000, sr, full), + __define_link_mode_params(100000, lr_er_fr, full), + __define_link_mode_params(100000, dr, full), + __define_link_mode_params(100000, cr, full), + __define_link_mode_params(200000, kr2, full), + __define_link_mode_params(200000, sr2, full), + __define_link_mode_params(200000, lr2_er2_fr2, full), + __define_link_mode_params(200000, dr2, full), + __define_link_mode_params(200000, cr2, full), + __define_link_mode_params(400000, kr4, full), + __define_link_mode_params(400000, sr4, full), + __define_link_mode_params(400000, lr4_er4_fr4, full), + __define_link_mode_params(400000, dr4, full), + __define_link_mode_params(400000, cr4, full), + __define_link_mode_params(100, fx, half), + __define_link_mode_params(100, fx, full), +}; +static_assert(array_size(link_mode_params) == __ethtool_link_mode_mask_nbits); + diff --git a/net/ethtool/common.h b/net/ethtool/common.h --- a/net/ethtool/common.h +++ b/net/ethtool/common.h +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +extern const struct link_mode_info link_mode_params[]; diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c + const struct link_mode_info *link_info; + int err; + - return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); + + link_ksettings->link_mode = -1; + err = dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); + if (err) + return err; + + if (link_ksettings->link_mode != -1) { + link_info = &link_mode_params[link_ksettings->link_mode]; + link_ksettings->base.speed = link_info->speed; + link_ksettings->lanes = link_info->lanes; + link_ksettings->base.duplex = link_info->duplex; + } + + return 0; diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c +/* linkmodes_get */ + -struct link_mode_info { - int speed; - u8 lanes; - u8 duplex; -}; - -#define __link_mode_lanes_cr 1 -#define __link_mode_lanes_cr2 2 -#define __link_mode_lanes_cr4 4 -#define __link_mode_lanes_cr8 8 -#define __link_mode_lanes_dr 1 -#define __link_mode_lanes_dr2 2 -#define __link_mode_lanes_dr4 4 -#define __link_mode_lanes_dr8 8 -#define __link_mode_lanes_kr 1 -#define __link_mode_lanes_kr2 2 -#define __link_mode_lanes_kr4 4 -#define __link_mode_lanes_kr8 8 -#define __link_mode_lanes_sr 1 -#define __link_mode_lanes_sr2 2 -#define __link_mode_lanes_sr4 4 -#define __link_mode_lanes_sr8 8 -#define __link_mode_lanes_er 1 -#define __link_mode_lanes_kx 1 -#define __link_mode_lanes_kx4 4 -#define __link_mode_lanes_lr 1 -#define __link_mode_lanes_lr4 4 -#define __link_mode_lanes_lr4_er4 4 -#define __link_mode_lanes_lr_er_fr 1 -#define __link_mode_lanes_lr2_er2_fr2 2 -#define __link_mode_lanes_lr4_er4_fr4 4 -#define __link_mode_lanes_lr8_er8_fr8 8 -#define __link_mode_lanes_lrm 1 -#define __link_mode_lanes_mld2 2 -#define __link_mode_lanes_t 1 -#define __link_mode_lanes_t1 1 -#define __link_mode_lanes_x 1 -#define __link_mode_lanes_fx 1 - -#define __define_link_mode_params(_speed, _type, _duplex) \ - [ethtool_link_mode(_speed, _type, _duplex)] = { \ - .speed = speed_ ## _speed, \ - .lanes = __link_mode_lanes_ ## _type, \ - .duplex = __duplex_ ## _duplex \ - } -#define __duplex_half duplex_half -#define __duplex_full duplex_full -#define __define_special_mode_params(_mode) \ - [ethtool_link_mode_ ## _mode ## _bit] = { \ - .speed = speed_unknown, \ - .lanes = 0, \ - .duplex = duplex_unknown, \ - } - -static const struct link_mode_info link_mode_params[] = { - __define_link_mode_params(10, t, half), - __define_link_mode_params(10, t, full), - __define_link_mode_params(100, t, half), - __define_link_mode_params(100, t, full), - __define_link_mode_params(1000, t, half), - __define_link_mode_params(1000, t, full), - __define_special_mode_params(autoneg), - __define_special_mode_params(tp), - __define_special_mode_params(aui), - __define_special_mode_params(mii), - __define_special_mode_params(fibre), - __define_special_mode_params(bnc), - __define_link_mode_params(10000, t, full), - __define_special_mode_params(pause), - __define_special_mode_params(asym_pause), - __define_link_mode_params(2500, x, full), - __define_special_mode_params(backplane), - __define_link_mode_params(1000, kx, full), - __define_link_mode_params(10000, kx4, full), - __define_link_mode_params(10000, kr, full), - [ethtool_link_mode_10000baser_fec_bit] = { - .speed = speed_10000, - .duplex = duplex_full, - }, - __define_link_mode_params(20000, mld2, full), - __define_link_mode_params(20000, kr2, full), - __define_link_mode_params(40000, kr4, full), - __define_link_mode_params(40000, cr4, full), - __define_link_mode_params(40000, sr4, full), - __define_link_mode_params(40000, lr4, full), - __define_link_mode_params(56000, kr4, full), - __define_link_mode_params(56000, cr4, full), - __define_link_mode_params(56000, sr4, full), - __define_link_mode_params(56000, lr4, full), - __define_link_mode_params(25000, cr, full), - __define_link_mode_params(25000, kr, full), - __define_link_mode_params(25000, sr, full), - __define_link_mode_params(50000, cr2, full), - __define_link_mode_params(50000, kr2, full), - __define_link_mode_params(100000, kr4, full), - __define_link_mode_params(100000, sr4, full), - __define_link_mode_params(100000, cr4, full), - __define_link_mode_params(100000, lr4_er4, full), - __define_link_mode_params(50000, sr2, full), - __define_link_mode_params(1000, x, full), - __define_link_mode_params(10000, cr, full), - __define_link_mode_params(10000, sr, full), - __define_link_mode_params(10000, lr, full), - __define_link_mode_params(10000, lrm, full), - __define_link_mode_params(10000, er, full), - __define_link_mode_params(2500, t, full), - __define_link_mode_params(5000, t, full), - __define_special_mode_params(fec_none), - __define_special_mode_params(fec_rs), - __define_special_mode_params(fec_baser), - __define_link_mode_params(50000, kr, full), - __define_link_mode_params(50000, sr, full), - __define_link_mode_params(50000, cr, full), - __define_link_mode_params(50000, lr_er_fr, full), - __define_link_mode_params(50000, dr, full), - __define_link_mode_params(100000, kr2, full), - __define_link_mode_params(100000, sr2, full), - __define_link_mode_params(100000, cr2, full), - __define_link_mode_params(100000, lr2_er2_fr2, full), - __define_link_mode_params(100000, dr2, full), - __define_link_mode_params(200000, kr4, full), - __define_link_mode_params(200000, sr4, full), - __define_link_mode_params(200000, lr4_er4_fr4, full), - __define_link_mode_params(200000, dr4, full), - __define_link_mode_params(200000, cr4, full), - __define_link_mode_params(100, t1, full), - __define_link_mode_params(1000, t1, full), - __define_link_mode_params(400000, kr8, full), - __define_link_mode_params(400000, sr8, full), - __define_link_mode_params(400000, lr8_er8_fr8, full), - __define_link_mode_params(400000, dr8, full), - __define_link_mode_params(400000, cr8, full), - __define_special_mode_params(fec_llrs), - __define_link_mode_params(100000, kr, full), - __define_link_mode_params(100000, sr, full), - __define_link_mode_params(100000, lr_er_fr, full), - __define_link_mode_params(100000, dr, full), - __define_link_mode_params(100000, cr, full), - __define_link_mode_params(200000, kr2, full), - __define_link_mode_params(200000, sr2, full), - __define_link_mode_params(200000, lr2_er2_fr2, full), - __define_link_mode_params(200000, dr2, full), - __define_link_mode_params(200000, cr2, full), - __define_link_mode_params(400000, kr4, full), - __define_link_mode_params(400000, sr4, full), - __define_link_mode_params(400000, lr4_er4_fr4, full), - __define_link_mode_params(400000, dr4, full), - __define_link_mode_params(400000, cr4, full), - __define_link_mode_params(100, fx, half), - __define_link_mode_params(100, fx, full), -}; - - build_bug_on(array_size(link_mode_params) != - __ethtool_link_mode_mask_nbits); -
Networking
c8907043c6ac9ed58e6c1a76f2824be714b42228
danielle ratson
include
linux
ethtool: expose the number of lanes in use
currently, ethtool does not expose how many lanes are used when the link is up.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support setting lanes via ethtool
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlxsw']
['c']
1
8
0
--- diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c + if (!dev->ethtool_ops->cap_link_lanes_supported) + data->ksettings.lanes = 0; + + nla_total_size(sizeof(u32)) /* linkmodes_speed */ + + nla_total_size(sizeof(u32)) /* linkmodes_lanes */ + nla_total_size(sizeof(u8)) /* linkmodes_duplex */ + 0; + if (ksettings->lanes && + nla_put_u32(skb, ethtool_a_linkmodes_lanes, ksettings->lanes)) + return -emsgsize; +
Networking
7dc33f0914a9c8f992592cddfab2bab7faf162b9
danielle ratson
net
ethtool
mlxsw: ethtool: remove max lanes filtering
currently, when a speed can be supported by different number of lanes, the supported link modes bitmask contains only link modes with a single number of lanes.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support setting lanes via ethtool
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlxsw']
['h', 'c']
2
15
22
--- diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h - u8 width, unsigned long *mode); + unsigned long *mode); - u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width, + u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c - u8 width, struct ethtool_link_ksettings *cmd) + struct ethtool_link_ksettings *cmd) - ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, + ops->from_ptys_link(mlxsw_sp, eth_proto_cap, - u32 eth_proto_admin, bool autoneg, u8 width, + u32 eth_proto_admin, bool autoneg, - ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, + ops->from_ptys_link(mlxsw_sp, eth_proto_admin, - mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, - mlxsw_sp_port->mapping.width, cmd); + mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); - mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, - mlxsw_sp_port->mapping.width, cmd); + mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, cmd); - ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, - cmd) : + ops->to_ptys_advert_link(mlxsw_sp, cmd) : - u8 width, unsigned long *mode) + unsigned long *mode) -mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, +mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, - u8 width, unsigned long *mode) + unsigned long *mode) - u8 mask_width = mlxsw_sp_port_mask_width_get(width); - if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && - (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) + if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) -mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, +mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, - u8 mask_width = mlxsw_sp_port_mask_width_get(width); - if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && - mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], + if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
Networking
5fc4053df3d9af1bf728feff90b8494dc036aae2
danielle ratson
drivers
net
ethernet, mellanox, mlxsw
mlxsw: ethtool: add support for setting lanes when autoneg is off
currently, when auto negotiation is set to off, the user can force a specific speed or both speed and duplex. the user cannot influence the number of lanes that will be forced.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support setting lanes via ethtool
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlxsw']
['h', 'c']
2
78
41
--- diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h - u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed); + u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width, + const struct ethtool_link_ksettings *cmd); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c - ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, - cmd->base.speed); + ops->to_ptys_speed_lanes(mlxsw_sp, mlxsw_sp_port->mapping.width, + cmd); - netdev_err(dev, "no supported speed requested "); + netdev_err(dev, "no supported speed or lanes requested "); - .get_drvinfo = mlxsw_sp_port_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_link_ext_state = mlxsw_sp_port_get_link_ext_state, - .get_pauseparam = mlxsw_sp_port_get_pauseparam, - .set_pauseparam = mlxsw_sp_port_set_pauseparam, - .get_strings = mlxsw_sp_port_get_strings, - .set_phys_id = mlxsw_sp_port_set_phys_id, - .get_ethtool_stats = mlxsw_sp_port_get_stats, - .get_sset_count = mlxsw_sp_port_get_sset_count, - .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, - .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, - .get_module_info = mlxsw_sp_get_module_info, - .get_module_eeprom = mlxsw_sp_get_module_eeprom, - .get_ts_info = mlxsw_sp_get_ts_info, + .cap_link_lanes_supported = true, + .get_drvinfo = mlxsw_sp_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ext_state = mlxsw_sp_port_get_link_ext_state, + .get_pauseparam = mlxsw_sp_port_get_pauseparam, + .set_pauseparam = mlxsw_sp_port_set_pauseparam, + .get_strings = mlxsw_sp_port_get_strings, + .set_phys_id = mlxsw_sp_port_set_phys_id, + .get_ethtool_stats = mlxsw_sp_port_get_stats, + .get_sset_count = mlxsw_sp_port_get_sset_count, + .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, + .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, + .get_module_info = mlxsw_sp_get_module_info, + .get_module_eeprom = mlxsw_sp_get_module_eeprom, + .get_ts_info = mlxsw_sp_get_ts_info, -static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, - u32 speed) +static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width, + const struct ethtool_link_ksettings *cmd) + if (cmd->lanes > width) + return ptys_proto; + - if (speed == mlxsw_sp1_port_link_mode[i].speed) + if (cmd->base.speed == mlxsw_sp1_port_link_mode[i].speed) - .to_ptys_speed = mlxsw_sp1_to_ptys_speed, + .to_ptys_speed_lanes = mlxsw_sp1_to_ptys_speed_lanes, - u8 mask_width; + u32 width; + u8 mask_sup_width; - .mask_width = mlxsw_sp_port_mask_width_1x | + .mask_sup_width = mlxsw_sp_port_mask_width_1x | + .width = 1, - .mask_width = mlxsw_sp_port_mask_width_1x | + .mask_sup_width = mlxsw_sp_port_mask_width_1x | + .width = 1, - .mask_width = mlxsw_sp_port_mask_width_1x | + .mask_sup_width = mlxsw_sp_port_mask_width_1x | + .width = 1, - .mask_width = mlxsw_sp_port_mask_width_1x | + .mask_sup_width = mlxsw_sp_port_mask_width_1x | + .width = 1, - .mask_width = mlxsw_sp_port_mask_width_4x | + .mask_sup_width = mlxsw_sp_port_mask_width_4x | + .width = 4, - .mask_width = mlxsw_sp_port_mask_width_1x | + .mask_sup_width = mlxsw_sp_port_mask_width_1x | + .width = 1, - .mask_width = mlxsw_sp_port_mask_width_2x | + .mask_sup_width = mlxsw_sp_port_mask_width_2x | + .width = 2, - .mask_width = mlxsw_sp_port_mask_width_1x, + .mask_sup_width = mlxsw_sp_port_mask_width_1x, + .width = 1, - .mask_width = mlxsw_sp_port_mask_width_4x | + .mask_sup_width = mlxsw_sp_port_mask_width_4x | + .width = 4, - .mask_width = mlxsw_sp_port_mask_width_2x, + .mask_sup_width = mlxsw_sp_port_mask_width_2x, + .width = 2, - .mask_width = mlxsw_sp_port_mask_width_4x | + .mask_sup_width = mlxsw_sp_port_mask_width_4x | + .width = 4, - .mask_width = mlxsw_sp_port_mask_width_8x, + .mask_sup_width = mlxsw_sp_port_mask_width_8x, + .width = 8, -static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, - u8 width, u32 speed) +static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width, + const struct ethtool_link_ksettings *cmd) + struct mlxsw_sp2_port_link_mode link_mode; + if (cmd->lanes > width) + return ptys_proto; + - if ((speed == mlxsw_sp2_port_link_mode[i].speed) && - (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) - ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; + if (cmd->base.speed == mlxsw_sp2_port_link_mode[i].speed) { + link_mode = mlxsw_sp2_port_link_mode[i]; + + if (!cmd->lanes) { + /* if number of lanes was not set by user space, + * choose the link mode that supports the width + * of the port. + */ + if (mask_width & link_mode.mask_sup_width) + ptys_proto |= link_mode.mask; + } else if (cmd->lanes == link_mode.width) { + /* else if the number of lanes was set, choose + * the link mode that its actual width equals to + * it. + */ + ptys_proto |= link_mode.mask; + } + } - .to_ptys_speed = mlxsw_sp2_to_ptys_speed, + .to_ptys_speed_lanes = mlxsw_sp2_to_ptys_speed_lanes,
Networking
763ece86f0c27c751d6fac6b15863f5124f79a52
danielle ratson
drivers
net
ethernet, mellanox, mlxsw
mlxsw: ethtool: pass link mode in use to ethtool
currently, when user space queries the link's parameters, as speed and duplex, each parameter is passed from the driver to ethtool.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support setting lanes via ethtool
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlxsw']
['h', 'c']
2
30
23
--- diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h - void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp, - bool carrier_ok, u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd); + void (*from_ptys_link_mode)(struct mlxsw_sp *mlxsw_sp, + bool carrier_ok, u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c - ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), - eth_proto_oper, cmd); + ops->from_ptys_link_mode(mlxsw_sp, netif_carrier_ok(dev), + eth_proto_oper, cmd); -mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, - u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd) +mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) - cmd->base.speed = speed_unknown; - cmd->base.duplex = duplex_unknown; + int i; + + cmd->link_mode = -1; - cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); - if (cmd->base.speed != speed_unknown) - cmd->base.duplex = duplex_full; + for (i = 0; i < mlxsw_sp1_port_link_mode_len; i++) { + if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) + cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool; + } - .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, + .from_ptys_link_mode = mlxsw_sp1_from_ptys_link_mode, -mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, - u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd) +mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) - cmd->base.speed = speed_unknown; - cmd->base.duplex = duplex_unknown; + struct mlxsw_sp2_port_link_mode link; + int i; + + cmd->link_mode = -1; - cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); - if (cmd->base.speed != speed_unknown) - cmd->base.duplex = duplex_full; + for (i = 0; i < mlxsw_sp2_port_link_mode_len; i++) { + if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) { + link = mlxsw_sp2_port_link_mode[i]; + cmd->link_mode = link.mask_ethtool[1]; + } + } - .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, + .from_ptys_link_mode = mlxsw_sp2_from_ptys_link_mode,
Networking
25a96f057a0fab318376c85bd83afda267f8ad33
danielle ratson
drivers
net
ethernet, mellanox, mlxsw
net: selftests: add lanes setting test
test that setting lanes parameter is working.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support setting lanes via ethtool
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mlxsw']
['sh']
3
249
0
--- diff --git a/tools/testing/selftests/drivers/net/mlxsw/ethtool_lanes.sh b/tools/testing/selftests/drivers/net/mlxsw/ethtool_lanes.sh --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/ethtool_lanes.sh +#!/bin/bash +# spdx-license-identifier: gpl-2.0 + +lib_dir=$(dirname $0)/../../../net/forwarding + +all_tests=" + autoneg + autoneg_force_mode +" + +num_netifs=2 +: ${timeout:=30000} # ms +source $lib_dir/lib.sh +source $lib_dir/ethtool_lib.sh + +setup_prepare() +{ + swp1=${netifs[p1]} + swp2=${netifs[p2]} + + ip link set dev $swp1 up + ip link set dev $swp2 up + + busywait "$timeout" wait_for_port_up ethtool $swp2 + check_err $? "ports did not come up" + + local lanes_exist=$(ethtool $swp1 | grep 'lanes:') + if [[ -z $lanes_exist ]]; then + log_test "skip: driver does not support lanes setting" + exit 1 + fi + + ip link set dev $swp2 down + ip link set dev $swp1 down +} + +check_lanes() +{ + local dev=$1; shift + local lanes=$1; shift + local max_speed=$1; shift + local chosen_lanes + + chosen_lanes=$(ethtool $dev | grep 'lanes:') + chosen_lanes=${chosen_lanes#*"lanes: "} + + ((chosen_lanes == lanes)) + check_err $? "swp1 advertise $max_speed and $lanes, devs sync to $chosen_lanes" +} + +check_unsupported_lanes() +{ + local dev=$1; shift + local max_speed=$1; shift + local max_lanes=$1; shift + local autoneg=$1; shift + local autoneg_str="" + + local unsupported_lanes=$((max_lanes *= 2)) + + if [[ $autoneg -eq 0 ]]; then + autoneg_str="autoneg off" + fi + + ethtool -s $swp1 speed $max_speed lanes $unsupported_lanes $autoneg_str &> /dev/null + check_fail $? "unsuccessful $unsupported_lanes lanes setting was expected" +} + +max_speed_and_lanes_get() +{ + local dev=$1; shift + local arr=("$@") + local max_lanes + local max_speed + local -a lanes_arr + local -a speeds_arr + local -a max_values + + for ((i=0; i<${#arr[@]}; i+=2)); do + speeds_arr+=("${arr[$i]}") + lanes_arr+=("${arr[i+1]}") + done + + max_values+=($(get_max "${speeds_arr[@]}")) + max_values+=($(get_max "${lanes_arr[@]}")) + + echo ${max_values[@]} +} + +search_linkmode() +{ + local speed=$1; shift + local lanes=$1; shift + local arr=("$@") + + for ((i=0; i<${#arr[@]}; i+=2)); do + if [[ $speed -eq ${arr[$i]} && $lanes -eq ${arr[i+1]} ]]; then + return 1 + fi + done + return 0 +} + +autoneg() +{ + ret=0 + + local lanes + local max_speed + local max_lanes + + local -a linkmodes_params=($(dev_linkmodes_params_get $swp1 1)) + local -a max_values=($(max_speed_and_lanes_get $swp1 "${linkmodes_params[@]}")) + max_speed=${max_values[0]} + max_lanes=${max_values[1]} + + lanes=$max_lanes + + while [[ $lanes -ge 1 ]]; do + search_linkmode $max_speed $lanes "${linkmodes_params[@]}" + if [[ $? -eq 1 ]]; then + ethtool_set $swp1 speed $max_speed lanes $lanes + ip link set dev $swp1 up + ip link set dev $swp2 up + busywait "$timeout" wait_for_port_up ethtool $swp2 + check_err $? "ports did not come up" + + check_lanes $swp1 $lanes $max_speed + log_test "$lanes lanes is autonegotiated" + fi + let $((lanes /= 2)) + done + + check_unsupported_lanes $swp1 $max_speed $max_lanes 1 + log_test "lanes number larger than max width is not set" + + ip link set dev $swp2 down + ip link set dev $swp1 down +} + +autoneg_force_mode() +{ + ret=0 + + local lanes + local max_speed + local max_lanes + + local -a linkmodes_params=($(dev_linkmodes_params_get $swp1 1)) + local -a max_values=($(max_speed_and_lanes_get $swp1 "${linkmodes_params[@]}")) + max_speed=${max_values[0]} + max_lanes=${max_values[1]} + + lanes=$max_lanes + + while [[ $lanes -ge 1 ]]; do + search_linkmode $max_speed $lanes "${linkmodes_params[@]}" + if [[ $? -eq 1 ]]; then + ethtool_set $swp1 speed $max_speed lanes $lanes autoneg off + ethtool_set $swp2 speed $max_speed lanes $lanes autoneg off + ip link set dev $swp1 up + ip link set dev $swp2 up + busywait "$timeout" wait_for_port_up ethtool $swp2 + check_err $? "ports did not come up" + + check_lanes $swp1 $lanes $max_speed + log_test "autoneg off, $lanes lanes detected during force mode" + fi + let $((lanes /= 2)) + done + + check_unsupported_lanes $swp1 $max_speed $max_lanes 0 + log_test "lanes number larger than max width is not set" + + ip link set dev $swp2 down + ip link set dev $swp1 down + + ethtool -s $swp2 autoneg on + ethtool -s $swp1 autoneg on +} + +check_ethtool_lanes_support +setup_prepare + +tests_run + +exit $exit_status diff --git a/tools/testing/selftests/net/forwarding/ethtool_lib.sh b/tools/testing/selftests/net/forwarding/ethtool_lib.sh --- a/tools/testing/selftests/net/forwarding/ethtool_lib.sh +++ b/tools/testing/selftests/net/forwarding/ethtool_lib.sh +dev_linkmodes_params_get() +{ + local dev=$1; shift + local adver=$1; shift + local -a linkmodes_params + local param_count + local arr + + if (($adver)); then + mode="advertised link modes" + else + mode="supported link modes" + fi + + local -a dev_linkmodes=($(dev_speeds_get $dev 1 $adver)) + for ((i=0; i<${#dev_linkmodes[@]}; i++)); do + linkmodes_params[$i]=$(echo -e "${dev_linkmodes[$i]}" | \ + # replaces all non numbers with spaces + sed -e 's/[^0-9]/ /g' | \ + # squeeze spaces in sequence to 1 space + tr -s ' ') + # count how many numbers were found in the linkmode + param_count=$(echo "${linkmodes_params[$i]}" | wc -w) + if [[ $param_count -eq 1 ]]; then + linkmodes_params[$i]="${linkmodes_params[$i]} 1" + elif [[ $param_count -ge 3 ]]; then + arr=(${linkmodes_params[$i]}) + # take only first two params + linkmodes_params[$i]=$(echo "${arr[@]:0:2}") + fi + done + echo ${linkmodes_params[@]} +} + diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh +check_ethtool_lanes_support() +{ + ethtool --help 2>&1| grep lanes &> /dev/null + if [[ $? -ne 0 ]]; then + echo "skip: ethtool too old; it is missing lanes support" + exit 1 + fi +} + +get_max() +{ + local arr=("$@") + + max=${arr[0]} + for cur in ${arr[@]}; do + if [[ $cur -gt $max ]]; then + max=$cur + fi + done + + echo $max +} + +wait_for_port_up() +{ + "$@" | grep -q "link detected: yes" +} +
Networking
f72e2f48c71051f54e6fa214dc57f586386173b5
danielle ratson
tools
testing
drivers, forwarding, mlxsw, net, selftests
net: mscc: ocelot: auto-detect packet buffer size and number of frame references
instead of reading these values from the reference manual and writing them down into the driver, it appears that the hardware gives us the option of detecting them dynamically.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h', 'c']
8
26
7
--- diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c - ocelot->shared_queue_sz = felix->info->shared_queue_sz; diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h - int shared_queue_sz; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c - .shared_queue_sz = 128 * 1024, diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c - .shared_queue_sz = 256 * 1024, diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c - atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) / + atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) / +static void ocelot_detect_features(struct ocelot *ocelot) +{ + int mmgt, eq_ctrl; + + /* for ocelot, felix, seville, serval etc, sys:mmgt:mmgt:freecnt holds + * the number of 240-byte free memory words (aka 4-cell chunks) and not + * 192 bytes as the documentation incorrectly says. + */ + mmgt = ocelot_read(ocelot, sys_mmgt); + ocelot->packet_buffer_size = 240 * sys_mmgt_freecnt(mmgt); + + eq_ctrl = ocelot_read(ocelot, qsys_eq_ctrl); + ocelot->num_frame_refs = qsys_mmgt_eq_ctrl_fp_free_cnt(eq_ctrl); + + dev_info(ocelot->dev, + "detected %d bytes of packet buffer and %d frame references ", + ocelot->packet_buffer_size, ocelot->num_frame_refs); +} + + ocelot_detect_features(ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c - ocelot->shared_queue_sz = 224 * 1024; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h - int shared_queue_sz; + int packet_buffer_size; + int num_frame_refs; diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h --- a/include/soc/mscc/ocelot_qsys.h +++ b/include/soc/mscc/ocelot_qsys.h +#define qsys_mmgt_eq_ctrl_fp_free_cnt(x) ((x) & genmask(15, 0)) +#define qsys_mmgt_eq_ctrl_fp_free_cnt_m genmask(15, 0) +
Networking
f6fe01d6fa24dd3c89996ad82780872441e86bfa
vladimir oltean
include
soc
dsa, ethernet, mscc, ocelot
net: mscc: ocelot: add ops for decoding watermark threshold and occupancy
we'll need to read back the watermark thresholds and occupancy from hardware (for devlink-sb integration), not only to write them as we did so far in ocelot_port_set_maxlen. so introduce 2 new functions in struct ocelot_ops, similar to wm_enc, and implement them for the 3 supported mscc_ocelot switches.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h', 'c']
5
54
6
--- diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c +static u16 vsc9959_wm_dec(u16 wm) +{ + warn_on(wm & ~genmask(8, 0)); + + if (wm & bit(8)) + return (wm & genmask(7, 0)) * 16; + + return wm; +} + +static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse) +{ + *inuse = (val & genmask(23, 12)) >> 12; + *maxuse = val & genmask(11, 0); +} + + .wm_dec = vsc9959_wm_dec, + .wm_stat = vsc9959_wm_stat, diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c +static u16 vsc9953_wm_dec(u16 wm) +{ + warn_on(wm & ~genmask(9, 0)); + + if (wm & bit(9)) + return (wm & genmask(8, 0)) * 16; + + return wm; +} + +static void vsc9953_wm_stat(u32 val, u32 *inuse, u32 *maxuse) +{ + *inuse = (val & genmask(25, 13)) >> 13; + *maxuse = val & genmask(12, 0); +} + + .wm_dec = vsc9953_wm_dec, + .wm_stat = vsc9953_wm_stat, diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c +static u16 ocelot_wm_dec(u16 wm) +{ + if (wm & bit(8)) + return (wm & genmask(7, 0)) * 16; + + return wm; +} + +static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse) +{ + *inuse = (val & genmask(23, 12)) >> 12; + *maxuse = val & genmask(11, 0); +} + + .wm_dec = ocelot_wm_dec, + .wm_stat = ocelot_wm_stat, diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h + u16 (*wm_dec)(u16 value); + void (*wm_stat)(u32 val, u32 *inuse, u32 *maxuse); diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h --- a/include/soc/mscc/ocelot_qsys.h +++ b/include/soc/mscc/ocelot_qsys.h -#define qsys_res_stat_inuse(x) (((x) << 12) & genmask(23, 12)) -#define qsys_res_stat_inuse_m genmask(23, 12) -#define qsys_res_stat_inuse_x(x) (((x) & genmask(23, 12)) >> 12) -#define qsys_res_stat_maxuse(x) ((x) & genmask(11, 0)) -#define qsys_res_stat_maxuse_m genmask(11, 0) -
Networking
703b762190e643bf46a048ebe99504b14d71449c
vladimir oltean
include
soc
dsa, ethernet, mscc, ocelot
net: dsa: add ops for devlink-sb
switches that care about qos might have hardware support for reserving buffer pools for individual ports or traffic classes, and configuring their sizes and thresholds. through devlink-sb (shared buffers), this is all configurable, as well as their occupancy being viewable.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h', 'c']
2
192
1
--- diff --git a/include/net/dsa.h b/include/net/dsa.h --- a/include/net/dsa.h +++ b/include/net/dsa.h + int (*devlink_sb_pool_get)(struct dsa_switch *ds, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info); + int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack); + int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); + int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, + struct netlink_ext_ack *extack); + int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); + int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack); + int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds, + unsigned int sb_index); + int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds, + unsigned int sb_index); + int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); + int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c +static int dsa_devlink_sb_pool_get(struct devlink *dl, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + + if (!ds->ops->devlink_sb_pool_get) + return -eopnotsupp; + + return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index, + pool_info); +} + +static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + + if (!ds->ops->devlink_sb_pool_set) + return -eopnotsupp; + + return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size, + threshold_type, extack); +} + +static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); + int port = dsa_devlink_port_to_port(dlp); + + if (!ds->ops->devlink_sb_port_pool_get) + return -eopnotsupp; + + return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index, + pool_index, p_threshold); +} + +static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp, + unsigned int sb_index, u16 pool_index, + u32 threshold, + struct netlink_ext_ack *extack) +{ + struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); + int port = dsa_devlink_port_to_port(dlp); + + if (!ds->ops->devlink_sb_port_pool_set) + return -eopnotsupp; + + return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index, + pool_index, threshold, extack); +} + +static int +dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); + int port = dsa_devlink_port_to_port(dlp); + + if (!ds->ops->devlink_sb_tc_pool_bind_get) + return -eopnotsupp; + + return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index, + tc_index, pool_type, + p_pool_index, p_threshold); +} + +static int +dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); + int port = dsa_devlink_port_to_port(dlp); + + if (!ds->ops->devlink_sb_tc_pool_bind_set) + return -eopnotsupp; + + return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index, + tc_index, pool_type, + pool_index, threshold, + extack); +} + +static int dsa_devlink_sb_occ_snapshot(struct devlink *dl, + unsigned int sb_index) +{ + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + + if (!ds->ops->devlink_sb_occ_snapshot) + return -eopnotsupp; + + return ds->ops->devlink_sb_occ_snapshot(ds, sb_index); +} + +static int dsa_devlink_sb_occ_max_clear(struct devlink *dl, + unsigned int sb_index) +{ + struct dsa_switch *ds = dsa_devlink_to_ds(dl); + + if (!ds->ops->devlink_sb_occ_max_clear) + return -eopnotsupp; + + return ds->ops->devlink_sb_occ_max_clear(ds, sb_index); +} + +static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp, + unsigned int sb_index, + u16 pool_index, u32 *p_cur, + u32 *p_max) +{ + struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); + int port = dsa_devlink_port_to_port(dlp); + + if (!ds->ops->devlink_sb_occ_port_pool_get) + return -eopnotsupp; + + return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index, + pool_index, p_cur, p_max); +} + +static int +dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); + int port = dsa_devlink_port_to_port(dlp); + + if (!ds->ops->devlink_sb_occ_tc_port_bind_get) + return -eopnotsupp; + + return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port, + sb_index, tc_index, + pool_type, p_cur, + p_max); +} + - .info_get = dsa_devlink_info_get, + .info_get = dsa_devlink_info_get, + .sb_pool_get = dsa_devlink_sb_pool_get, + .sb_pool_set = dsa_devlink_sb_pool_set, + .sb_port_pool_get = dsa_devlink_sb_port_pool_get, + .sb_port_pool_set = dsa_devlink_sb_port_pool_set, + .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set, + .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot, + .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear, + .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
Networking
2a6ef763037238a5aa6a6505fc6693ee77c1a59b
vladimir oltean
include
net
net: dsa: felix: reindent struct dsa_switch_ops
the devlink function pointer names are super long, and they would break the alignment. so reindent the existing ops now by adding one tab.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['c']
1
37
37
--- diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c - .get_tag_protocol = felix_get_tag_protocol, - .setup = felix_setup, - .teardown = felix_teardown, - .set_ageing_time = felix_set_ageing_time, - .get_strings = felix_get_strings, - .get_ethtool_stats = felix_get_ethtool_stats, - .get_sset_count = felix_get_sset_count, - .get_ts_info = felix_get_ts_info, - .phylink_validate = felix_phylink_validate, - .phylink_mac_config = felix_phylink_mac_config, - .phylink_mac_link_down = felix_phylink_mac_link_down, - .phylink_mac_link_up = felix_phylink_mac_link_up, - .port_enable = felix_port_enable, - .port_disable = felix_port_disable, - .port_fdb_dump = felix_fdb_dump, - .port_fdb_add = felix_fdb_add, - .port_fdb_del = felix_fdb_del, - .port_mdb_add = felix_mdb_add, - .port_mdb_del = felix_mdb_del, - .port_bridge_join = felix_bridge_join, - .port_bridge_leave = felix_bridge_leave, - .port_stp_state_set = felix_bridge_stp_state_set, - .port_vlan_filtering = felix_vlan_filtering, - .port_vlan_add = felix_vlan_add, - .port_vlan_del = felix_vlan_del, - .port_hwtstamp_get = felix_hwtstamp_get, - .port_hwtstamp_set = felix_hwtstamp_set, - .port_rxtstamp = felix_rxtstamp, - .port_txtstamp = felix_txtstamp, - .port_change_mtu = felix_change_mtu, - .port_max_mtu = felix_get_max_mtu, - .port_policer_add = felix_port_policer_add, - .port_policer_del = felix_port_policer_del, - .cls_flower_add = felix_cls_flower_add, - .cls_flower_del = felix_cls_flower_del, - .cls_flower_stats = felix_cls_flower_stats, - .port_setup_tc = felix_port_setup_tc, + .get_tag_protocol = felix_get_tag_protocol, + .setup = felix_setup, + .teardown = felix_teardown, + .set_ageing_time = felix_set_ageing_time, + .get_strings = felix_get_strings, + .get_ethtool_stats = felix_get_ethtool_stats, + .get_sset_count = felix_get_sset_count, + .get_ts_info = felix_get_ts_info, + .phylink_validate = felix_phylink_validate, + .phylink_mac_config = felix_phylink_mac_config, + .phylink_mac_link_down = felix_phylink_mac_link_down, + .phylink_mac_link_up = felix_phylink_mac_link_up, + .port_enable = felix_port_enable, + .port_disable = felix_port_disable, + .port_fdb_dump = felix_fdb_dump, + .port_fdb_add = felix_fdb_add, + .port_fdb_del = felix_fdb_del, + .port_mdb_add = felix_mdb_add, + .port_mdb_del = felix_mdb_del, + .port_bridge_join = felix_bridge_join, + .port_bridge_leave = felix_bridge_leave, + .port_stp_state_set = felix_bridge_stp_state_set, + .port_vlan_filtering = felix_vlan_filtering, + .port_vlan_add = felix_vlan_add, + .port_vlan_del = felix_vlan_del, + .port_hwtstamp_get = felix_hwtstamp_get, + .port_hwtstamp_set = felix_hwtstamp_set, + .port_rxtstamp = felix_rxtstamp, + .port_txtstamp = felix_txtstamp, + .port_change_mtu = felix_change_mtu, + .port_max_mtu = felix_get_max_mtu, + .port_policer_add = felix_port_policer_add, + .port_policer_del = felix_port_policer_del, + .cls_flower_add = felix_cls_flower_add, + .cls_flower_del = felix_cls_flower_del, + .cls_flower_stats = felix_cls_flower_stats, + .port_setup_tc = felix_port_setup_tc,
Networking
a7096915e4276fff6905a8eff89986ef9704bbe7
vladimir oltean florian fainelli f fainelli gmail com
drivers
net
dsa, ocelot
net: dsa: felix: perform teardown in reverse order of setup
in general it is desirable that cleanup is the reverse process of setup. in this case i am not seeing any particular issue, but with the introduction of devlink-sb for felix, a non-obvious decision had to be made as to where to put its cleanup method. when there's a convention in place, that decision becomes obvious.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['c']
1
5
5
--- diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c - if (felix->info->mdio_bus_free) - felix->info->mdio_bus_free(ocelot); + ocelot_deinit_timestamp(ocelot); + ocelot_deinit(ocelot); - ocelot_deinit_timestamp(ocelot); - /* stop workqueue thread */ - ocelot_deinit(ocelot); + + if (felix->info->mdio_bus_free) + felix->info->mdio_bus_free(ocelot);
Networking
d19741b0f54487cf3a11307900f8633935cd2849
vladimir oltean florian fainelli f fainelli gmail com
drivers
net
dsa, ocelot
net: mscc: ocelot: export num_tc constant from felix to common switch lib
we should be moving anything that isn't dsa-specific or soc-specific out of the felix dsa driver, and into the common mscc_ocelot switch library.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h', 'c']
5
5
4
--- diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c - for (i = 0; i < felix_num_tc * 2; i++) { + for (i = 0; i < ocelot_num_tc * 2; i++) { diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h -#define felix_num_tc 8 diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c - .num_tx_queues = felix_num_tc, + .num_tx_queues = ocelot_num_tc, - ocelot->num_flooding_pgids = felix_num_tc; + ocelot->num_flooding_pgids = ocelot_num_tc; diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c + .num_tx_queues = ocelot_num_tc, diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h +#define ocelot_num_tc 8
Networking
70d39a6e62d31a4a7372a712ccc6f8063bbb1550
vladimir oltean
include
soc
dsa, mscc, ocelot
net: mscc: ocelot: delete unused ocelot_set_cpu_port prototype
this is a leftover of commit 69df578c5f4b ("net: mscc: ocelot: eliminate confusion between cpu and npi port") which renamed that function.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h']
1
0
4
--- diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h -void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, - enum ocelot_tag_prefix injection, - enum ocelot_tag_prefix extraction); -
Networking
c6c65d47ddebe82cf32f98ea56f10daf82dab16c
vladimir oltean florian fainelli f fainelli gmail com
drivers
net
ethernet, mscc
net: mscc: ocelot: register devlink ports
add devlink integration into the mscc_ocelot switchdev driver. all physical ports (i.e. the unused ones as well) except the cpu port module at ocelot->num_phys_ports are registered with devlink, and that requires keeping the devlink_port structure outside struct ocelot_port_private, since the latter has a 1:1 mapping with a struct net_device (which does not exist for unused ports).
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h', 'c']
4
148
44
--- diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h +int ocelot_devlink_init(struct ocelot *ocelot); +void ocelot_devlink_teardown(struct ocelot *ocelot); +int ocelot_port_devlink_init(struct ocelot *ocelot, int port, + enum devlink_port_flavour flavour); +void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port); +extern const struct devlink_ops ocelot_devlink_ops; diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c +const struct devlink_ops ocelot_devlink_ops = { +}; + +int ocelot_port_devlink_init(struct ocelot *ocelot, int port, + enum devlink_port_flavour flavour) +{ + struct devlink_port *dlp = &ocelot->devlink_ports[port]; + int id_len = sizeof(ocelot->base_mac); + struct devlink *dl = ocelot->devlink; + struct devlink_port_attrs attrs = {}; + + memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len); + attrs.switch_id.id_len = id_len; + attrs.phys.port_number = port; + attrs.flavour = flavour; + + devlink_port_attrs_set(dlp, &attrs); + + return devlink_port_register(dl, dlp, port); +} + +void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port) +{ + struct devlink_port *dlp = &ocelot->devlink_ports[port]; + + devlink_port_unregister(dlp); +} + +static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + return &ocelot->devlink_ports[port]; +} + -static int ocelot_port_get_phys_port_name(struct net_device *dev, - char *buf, size_t len) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - int port = priv->chip_port; - int ret; - - ret = snprintf(buf, len, "p%d", port); - if (ret >= len) - return -einval; - - return 0; -} - -static int ocelot_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - - ppid->id_len = sizeof(ocelot->base_mac); - memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len); - - return 0; -} - - .ndo_get_phys_port_name = ocelot_port_get_phys_port_name, - .ndo_get_port_parent_id = ocelot_get_port_parent_id, + .ndo_get_devlink_port = ocelot_get_devlink_port, diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c +static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_port_devlink_teardown(ocelot, port); +} + - int err; + bool *registered_ports; + int port, err; + u32 reg; + ocelot->devlink_ports = devm_kcalloc(ocelot->dev, + ocelot->num_phys_ports, + sizeof(*ocelot->devlink_ports), + gfp_kernel); + if (!ocelot->devlink_ports) + return -enomem; + + registered_ports = kcalloc(ocelot->num_phys_ports, sizeof(bool), + gfp_kernel); + if (!registered_ports) + return -enomem; + + struct devlink_port *dlp; - u32 port; - if (of_property_read_u32(portnp, "reg", &port)) + if (of_property_read_u32(portnp, "reg", &reg)) + port = reg; + + err = ocelot_port_devlink_init(ocelot, port, + devlink_port_flavour_physical); + if (err) { + of_node_put(portnp); + goto out_teardown; + } + - return err; + goto out_teardown; + registered_ports[port] = true; + + dlp = &ocelot->devlink_ports[port]; + devlink_port_type_eth_set(dlp, priv->dev); - return -einval; + err = -einval; + goto out_teardown; - return err; + goto out_teardown; + /* initialize unused devlink ports at the end */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + if (registered_ports[port]) + continue; + + err = ocelot_port_devlink_init(ocelot, port, + devlink_port_flavour_unused); + if (err) { + while (port-- >= 0) { + if (!registered_ports[port]) + continue; + ocelot_port_devlink_teardown(ocelot, port); + } + + goto out_teardown; + } + } + + kfree(registered_ports); + + +out_teardown: + /* unregister the network interfaces */ + mscc_ocelot_release_ports(ocelot); + /* tear down devlink ports for the registered network interfaces */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + if (!registered_ports[port]) + continue; + + ocelot_port_devlink_teardown(ocelot, port); + } + kfree(registered_ports); + return err; + struct devlink *devlink; - ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), gfp_kernel); - if (!ocelot) + devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot)); + if (!devlink) + ocelot = devlink_priv(devlink); + ocelot->devlink = priv_to_devlink(ocelot); - return ptr_err(target); + err = ptr_err(target); + goto out_free_devlink; - return ptr_err(hsio); + err = ptr_err(hsio); + goto out_free_devlink; - return err; + goto out_free_devlink; - return -enodev; + goto out_free_devlink; - return err; + goto out_free_devlink; - return err; + goto out_free_devlink; - return -enodev; + err = -enodev; + goto out_free_devlink; - err = mscc_ocelot_init_ports(pdev, ports); + err = devlink_register(devlink, ocelot->dev); + err = mscc_ocelot_init_ports(pdev, ports); + if (err) + goto out_ocelot_devlink_unregister; + +out_ocelot_devlink_unregister: + devlink_unregister(devlink); +out_free_devlink: + devlink_free(devlink); + mscc_ocelot_teardown_devlink_ports(ocelot); + devlink_unregister(ocelot->devlink); + devlink_free(ocelot->devlink); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h + struct devlink *devlink; + struct devlink_port *devlink_ports;
Networking
6c30384eb1dec96b678ff9c01c15134b1a0e81f4
vladimir oltean
include
soc
ethernet, mscc
net: mscc: ocelot: initialize watermarks to sane defaults
this is meant to be a gentle introduction into the world of watermarks on ocelot. the code is placed in ocelot_devlink.c because it will be integrated with devlink, even if it isn't right now.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h', 'c', 'makefile']
4
446
1
--- diff --git a/drivers/net/ethernet/mscc/makefile b/drivers/net/ethernet/mscc/makefile --- a/drivers/net/ethernet/mscc/makefile +++ b/drivers/net/ethernet/mscc/makefile - ocelot_ptp.o + ocelot_ptp.o \ + ocelot_devlink.o diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c + ocelot_watermark_init(ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h +void ocelot_watermark_init(struct ocelot *ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_devlink.c +// spdx-license-identifier: (gpl-2.0 or mit) +/* copyright 2020-2021 nxp semiconductors + */ +#include <net/devlink.h> +#include "ocelot.h" + +/* the queue system tracks four resource consumptions: + * resource 0: memory tracked per source port + * resource 1: frame references tracked per source port + * resource 2: memory tracked per destination port + * resource 3: frame references tracked per destination port + */ +#define ocelot_resource_sz 256 +#define ocelot_num_resources 4 + +#define buf_xxxx_i (0 * ocelot_resource_sz) +#define ref_xxxx_i (1 * ocelot_resource_sz) +#define buf_xxxx_e (2 * ocelot_resource_sz) +#define ref_xxxx_e (3 * ocelot_resource_sz) + +/* for each resource type there are 4 types of watermarks: + * q_rsrv: reservation per qos class per port + * prio_shr: sharing watermark per qos class across all ports + * p_rsrv: reservation per port + * col_shr: sharing watermark per color (drop precedence) across all ports + */ +#define xxx_q_rsrv_x 0 +#define xxx_prio_shr_x 216 +#define xxx_p_rsrv_x 224 +#define xxx_col_shr_x 254 + +/* reservation watermarks + * ---------------------- + * + * for setting up the reserved areas, egress watermarks exist per port and per + * qos class for both ingress and egress. + */ + +/* amount of packet buffer + * | per qos class + * | | reserved + * | | | per egress port + * | | | | + * v v v v + * buf_q_rsrv_e + */ +#define buf_q_rsrv_e(port, prio) \ + (buf_xxxx_e + xxx_q_rsrv_x + ocelot_num_tc * (port) + (prio)) + +/* amount of packet buffer + * | for all port's traffic classes + * | | reserved + * | | | per egress port + * | | | | + * v v v v + * buf_p_rsrv_e + */ +#define buf_p_rsrv_e(port) \ + (buf_xxxx_e + xxx_p_rsrv_x + (port)) + +/* amount of packet buffer + * | per qos class + * | | reserved + * | | | per ingress port + * | | | | + * v v v v + * buf_q_rsrv_i + */ +#define buf_q_rsrv_i(port, prio) \ + (buf_xxxx_i + xxx_q_rsrv_x + ocelot_num_tc * (port) + (prio)) + +/* amount of packet buffer + * | for all port's traffic classes + * | | reserved + * | | | per ingress port + * | | | | + * v v v v + * buf_p_rsrv_i + */ +#define buf_p_rsrv_i(port) \ + (buf_xxxx_i + xxx_p_rsrv_x + (port)) + +/* amount of frame references + * | per qos class + * | | reserved + * | | | per egress port + * | | | | + * v v v v + * ref_q_rsrv_e + */ +#define ref_q_rsrv_e(port, prio) \ + (ref_xxxx_e + xxx_q_rsrv_x + ocelot_num_tc * (port) + (prio)) + +/* amount of frame references + * | for all port's traffic classes + * | | reserved + * | | | per egress port + * | | | | + * v v v v + * ref_p_rsrv_e + */ +#define ref_p_rsrv_e(port) \ + (ref_xxxx_e + xxx_p_rsrv_x + (port)) + +/* amount of frame references + * | per qos class + * | | reserved + * | | | per ingress port + * | | | | + * v v v v + * ref_q_rsrv_i + */ +#define ref_q_rsrv_i(port, prio) \ + (ref_xxxx_i + xxx_q_rsrv_x + ocelot_num_tc * (port) + (prio)) + +/* amount of frame references + * | for all port's traffic classes + * | | reserved + * | | | per ingress port + * | | | | + * v v v v + * ref_p_rsrv_i + */ +#define ref_p_rsrv_i(port) \ + (ref_xxxx_i + xxx_p_rsrv_x + (port)) + +/* sharing watermarks + * ------------------ + * + * the shared memory area is shared between all ports. + */ + +/* amount of buffer + * | per qos class + * | | from the shared memory area + * | | | for egress traffic + * | | | | + * v v v v + * buf_prio_shr_e + */ +#define buf_prio_shr_e(prio) \ + (buf_xxxx_e + xxx_prio_shr_x + (prio)) + +/* amount of buffer + * | per color (drop precedence level) + * | | from the shared memory area + * | | | for egress traffic + * | | | | + * v v v v + * buf_col_shr_e + */ +#define buf_col_shr_e(dp) \ + (buf_xxxx_e + xxx_col_shr_x + (1 - (dp))) + +/* amount of buffer + * | per qos class + * | | from the shared memory area + * | | | for ingress traffic + * | | | | + * v v v v + * buf_prio_shr_i + */ +#define buf_prio_shr_i(prio) \ + (buf_xxxx_i + xxx_prio_shr_x + (prio)) + +/* amount of buffer + * | per color (drop precedence level) + * | | from the shared memory area + * | | | for ingress traffic + * | | | | + * v v v v + * buf_col_shr_i + */ +#define buf_col_shr_i(dp) \ + (buf_xxxx_i + xxx_col_shr_x + (1 - (dp))) + +/* amount of frame references + * | per qos class + * | | from the shared area + * | | | for egress traffic + * | | | | + * v v v v + * ref_prio_shr_e + */ +#define ref_prio_shr_e(prio) \ + (ref_xxxx_e + xxx_prio_shr_x + (prio)) + +/* amount of frame references + * | per color (drop precedence level) + * | | from the shared area + * | | | for egress traffic + * | | | | + * v v v v + * ref_col_shr_e + */ +#define ref_col_shr_e(dp) \ + (ref_xxxx_e + xxx_col_shr_x + (1 - (dp))) + +/* amount of frame references + * | per qos class + * | | from the shared area + * | | | for ingress traffic + * | | | | + * v v v v + * ref_prio_shr_i + */ +#define ref_prio_shr_i(prio) \ + (ref_xxxx_i + xxx_prio_shr_x + (prio)) + +/* amount of frame references + * | per color (drop precedence level) + * | | from the shared area + * | | | for ingress traffic + * | | | | + * v v v v + * ref_col_shr_i + */ +#define ref_col_shr_i(dp) \ + (ref_xxxx_i + xxx_col_shr_x + (1 - (dp))) + +static u32 ocelot_wm_read(struct ocelot *ocelot, int index) +{ + int wm = ocelot_read_gix(ocelot, qsys_res_cfg, index); + + return ocelot->ops->wm_dec(wm); +} + +static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val) +{ + u32 wm = ocelot->ops->wm_enc(val); + + ocelot_write_gix(ocelot, wm, qsys_res_cfg, index); +} + +/* the hardware comes out of reset with strange defaults: the sum of all + * reservations for frame memory is larger than the total buffer size. + * one has to wonder how can the reservation watermarks still guarantee + * anything under congestion. + * bring some sense into the hardware by changing the defaults to disable all + * reservations and rely only on the sharing watermark for frames with drop + * precedence 0. the user can still explicitly request reservations per port + * and per port-tc through devlink-sb. + */ +static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot, + int port) +{ + int prio; + + for (prio = 0; prio < ocelot_num_tc; prio++) { + ocelot_wm_write(ocelot, buf_q_rsrv_i(port, prio), 0); + ocelot_wm_write(ocelot, buf_q_rsrv_e(port, prio), 0); + ocelot_wm_write(ocelot, ref_q_rsrv_i(port, prio), 0); + ocelot_wm_write(ocelot, ref_q_rsrv_e(port, prio), 0); + } + + ocelot_wm_write(ocelot, buf_p_rsrv_i(port), 0); + ocelot_wm_write(ocelot, buf_p_rsrv_e(port), 0); + ocelot_wm_write(ocelot, ref_p_rsrv_i(port), 0); + ocelot_wm_write(ocelot, ref_p_rsrv_e(port), 0); +} + +/* we want the sharing watermarks to consume all nonreserved resources, for + * efficient resource utilization (a single traffic flow should be able to use + * up the entire buffer space and frame resources as long as there's no + * interference). + * the switch has 10 sharing watermarks per lookup: 8 per traffic class and 2 + * per color (drop precedence). + * the trouble with configuring these sharing watermarks is that: + * (1) there's a risk that we overcommit the resources if we configure + * (a) all 8 per-tc sharing watermarks to the max + * (b) all 2 per-color sharing watermarks to the max + * (2) there's a risk that we undercommit the resources if we configure + * (a) all 8 per-tc sharing watermarks to "max / 8" + * (b) all 2 per-color sharing watermarks to "max / 2" + * so for linux, let's just disable the sharing watermarks per traffic class + * (setting them to 0 will make them always exceeded), and rely only on the + * sharing watermark for drop priority 0. so frames with drop priority set to 1 + * by qos classification or policing will still be allowed, but only as long as + * the port and port-tc reservations are not exceeded. + */ +static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot) +{ + int prio; + + for (prio = 0; prio < ocelot_num_tc; prio++) { + ocelot_wm_write(ocelot, buf_prio_shr_i(prio), 0); + ocelot_wm_write(ocelot, buf_prio_shr_e(prio), 0); + ocelot_wm_write(ocelot, ref_prio_shr_i(prio), 0); + ocelot_wm_write(ocelot, ref_prio_shr_e(prio), 0); + } +} + +static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i, + u32 *buf_rsrv_e) +{ + int port, prio; + + *buf_rsrv_i = 0; + *buf_rsrv_e = 0; + + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < ocelot_num_tc; prio++) { + *buf_rsrv_i += ocelot_wm_read(ocelot, + buf_q_rsrv_i(port, prio)); + *buf_rsrv_e += ocelot_wm_read(ocelot, + buf_q_rsrv_e(port, prio)); + } + + *buf_rsrv_i += ocelot_wm_read(ocelot, buf_p_rsrv_i(port)); + *buf_rsrv_e += ocelot_wm_read(ocelot, buf_p_rsrv_e(port)); + } + + *buf_rsrv_i *= ocelot_buffer_cell_sz; + *buf_rsrv_e *= ocelot_buffer_cell_sz; +} + +static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i, + u32 *ref_rsrv_e) +{ + int port, prio; + + *ref_rsrv_i = 0; + *ref_rsrv_e = 0; + + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < ocelot_num_tc; prio++) { + *ref_rsrv_i += ocelot_wm_read(ocelot, + ref_q_rsrv_i(port, prio)); + *ref_rsrv_e += ocelot_wm_read(ocelot, + ref_q_rsrv_e(port, prio)); + } + + *ref_rsrv_i += ocelot_wm_read(ocelot, ref_p_rsrv_i(port)); + *ref_rsrv_e += ocelot_wm_read(ocelot, ref_p_rsrv_e(port)); + } +} + +/* calculate all reservations, then set up the sharing watermark for dp=0 to + * consume the remaining resources up to the pool's configured size. + */ +static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot) +{ + u32 buf_rsrv_i, buf_rsrv_e; + u32 ref_rsrv_i, ref_rsrv_e; + u32 buf_shr_i, buf_shr_e; + u32 ref_shr_i, ref_shr_e; + + ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e); + ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e); + + buf_shr_i = ocelot->packet_buffer_size - buf_rsrv_i; + buf_shr_e = ocelot->packet_buffer_size - buf_rsrv_e; + ref_shr_i = ocelot->num_frame_refs - ref_rsrv_i; + ref_shr_e = ocelot->num_frame_refs - ref_rsrv_e; + + buf_shr_i /= ocelot_buffer_cell_sz; + buf_shr_e /= ocelot_buffer_cell_sz; + + ocelot_wm_write(ocelot, buf_col_shr_i(0), buf_shr_i); + ocelot_wm_write(ocelot, buf_col_shr_e(0), buf_shr_e); + ocelot_wm_write(ocelot, ref_col_shr_e(0), ref_shr_e); + ocelot_wm_write(ocelot, ref_col_shr_i(0), ref_shr_i); + ocelot_wm_write(ocelot, buf_col_shr_i(1), 0); + ocelot_wm_write(ocelot, buf_col_shr_e(1), 0); + ocelot_wm_write(ocelot, ref_col_shr_e(1), 0); + ocelot_wm_write(ocelot, ref_col_shr_i(1), 0); +} + +/* the hardware works like this: + * + * frame forwarding decision taken + * | + * v + * +--------------------+--------------------+--------------------+ + * | | | | + * v v v v + * ingress memory egress memory ingress frame egress frame + * check check reference check reference check + * | | | | + * v v v v + * buf_q_rsrv_i ok buf_q_rsrv_e ok ref_q_rsrv_i ok ref_q_rsrv_e ok + *(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * buf_p_rsrv_i ok| buf_p_rsrv_e ok| ref_p_rsrv_i ok| ref_p_rsrv_e ok| + * (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * buf_prio_shr_i ok| buf_prio_shr_e ok| ref_prio_shr_i ok| ref_prio_shr_e ok| + * (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v | v | v | v | + * buf_col_shr_i ok| buf_col_shr_e ok| ref_col_shr_i ok| ref_col_shr_e ok| + * (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+ + * | | | | | | | | + * |exceeded | |exceeded | |exceeded | |exceeded | + * v v v v v v v v + * fail success fail success fail success fail success + * | | | | | | | | + * v v v v v v v v + * +-----+----+ +-----+----+ +-----+----+ +-----+-----+ + * | | | | + * +-------> or <-------+ +-------> or <-------+ + * | | + * v v + * +----------------> and <-----------------+ + * | + * v + * fifo drop / accept + * + * we are modeling each of the 4 parallel lookups as a devlink-sb pool. + * at least one (ingress or egress) memory pool and one (ingress or egress) + * frame reference pool need to have resources for frame acceptance to succeed. + * + * the following watermarks are controlled explicitly through devlink-sb: + * buf_q_rsrv_i, buf_q_rsrv_e, ref_q_rsrv_i, ref_q_rsrv_e + * buf_p_rsrv_i, buf_p_rsrv_e, ref_p_rsrv_i, ref_p_rsrv_e + * the following watermarks are controlled implicitly through devlink-sb: + * buf_col_shr_i, buf_col_shr_e, ref_col_shr_i, ref_col_shr_e + * the following watermarks are unused and disabled: + * buf_prio_shr_i, buf_prio_shr_e, ref_prio_shr_i, ref_prio_shr_e + * + * this function overrides the hardware defaults with more sane ones (no + * reservations by default, let sharing use all resources) and disables the + * unused watermarks. + */ +void ocelot_watermark_init(struct ocelot *ocelot) +{ + int all_tcs = genmask(ocelot_num_tc - 1, 0); + int port; + + ocelot_write(ocelot, all_tcs, qsys_res_qos_mode); + + for (port = 0; port <= ocelot->num_phys_ports; port++) + ocelot_disable_reservation_watermarks(ocelot, port); + + ocelot_disable_tc_sharing_watermarks(ocelot); + ocelot_setup_sharing_watermarks(ocelot); +}
Networking
a4ae997adcbdf8ead133bafa5e9e2d6925c576b6
vladimir oltean
drivers
net
ethernet, mscc
net: mscc: ocelot: configure watermarks using devlink-sb
using devlink-sb, we can configure 12/16 (the important 75%) of the switch's controlling watermarks for congestion drops, and we can monitor 50% of the watermark occupancies (we can monitor the reservation watermarks, but not the sharing watermarks, which are exposed as pool sizes).
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
configuring congestion watermarks on ocelot switch using devlink-sb
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h', 'c']
7
761
11
--- diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c + ocelot->devlink = felix->ds->devlink; + err = ocelot_devlink_sb_register(ocelot); + if (err) + return err; + + ocelot_devlink_sb_unregister(ocelot); +static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, + u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); +} + +static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, + threshold_type, extack); +} + +static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, + p_threshold); +} + +static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, + threshold, extack); +} + +static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, + pool_type, p_pool_index, + p_threshold); +} + +static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, + pool_type, pool_index, threshold, + extack); +} + +static int felix_sb_occ_snapshot(struct dsa_switch *ds, + unsigned int sb_index) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_occ_snapshot(ocelot, sb_index); +} + +static int felix_sb_occ_max_clear(struct dsa_switch *ds, + unsigned int sb_index) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_occ_max_clear(ocelot, sb_index); +} + +static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, + p_cur, p_max); +} + +static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, + pool_type, p_cur, p_max); +} + + .devlink_sb_pool_get = felix_sb_pool_get, + .devlink_sb_pool_set = felix_sb_pool_set, + .devlink_sb_port_pool_get = felix_sb_port_pool_get, + .devlink_sb_port_pool_set = felix_sb_port_pool_set, + .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, + .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, + .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, + .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, + .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, + .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c - - dev_info(ocelot->dev, - "detected %d bytes of packet buffer and %d frame references ", - ocelot->packet_buffer_size, ocelot->num_frame_refs); - ocelot_watermark_init(ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h -void ocelot_watermark_init(struct ocelot *ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c --- a/drivers/net/ethernet/mscc/ocelot_devlink.c +++ b/drivers/net/ethernet/mscc/ocelot_devlink.c +static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse, + u32 *maxuse) +{ + int res_stat = ocelot_read_gix(ocelot, qsys_res_stat, index); + + return ocelot->ops->wm_stat(res_stat, inuse, maxuse); +} + - buf_shr_i = ocelot->packet_buffer_size - buf_rsrv_i; - buf_shr_e = ocelot->packet_buffer_size - buf_rsrv_e; - ref_shr_i = ocelot->num_frame_refs - ref_rsrv_i; - ref_shr_e = ocelot->num_frame_refs - ref_rsrv_e; + buf_shr_i = ocelot->pool_size[ocelot_sb_buf][ocelot_sb_pool_ing] - + buf_rsrv_i; + buf_shr_e = ocelot->pool_size[ocelot_sb_buf][ocelot_sb_pool_egr] - + buf_rsrv_e; + ref_shr_i = ocelot->pool_size[ocelot_sb_ref][ocelot_sb_pool_ing] - + ref_rsrv_i; + ref_shr_e = ocelot->pool_size[ocelot_sb_ref][ocelot_sb_pool_egr] - + ref_rsrv_e; +/* ensure that all reservations can be enforced */ +static int ocelot_watermark_validate(struct ocelot *ocelot, + struct netlink_ext_ack *extack) +{ + u32 buf_rsrv_i, buf_rsrv_e; + u32 ref_rsrv_i, ref_rsrv_e; + + ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e); + ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e); + + if (buf_rsrv_i > ocelot->pool_size[ocelot_sb_buf][ocelot_sb_pool_ing]) { + nl_set_err_msg_mod(extack, + "ingress frame reservations exceed pool size"); + return -erange; + } + if (buf_rsrv_e > ocelot->pool_size[ocelot_sb_buf][ocelot_sb_pool_egr]) { + nl_set_err_msg_mod(extack, + "egress frame reservations exceed pool size"); + return -erange; + } + if (ref_rsrv_i > ocelot->pool_size[ocelot_sb_ref][ocelot_sb_pool_ing]) { + nl_set_err_msg_mod(extack, + "ingress reference reservations exceed pool size"); + return -erange; + } + if (ref_rsrv_e > ocelot->pool_size[ocelot_sb_ref][ocelot_sb_pool_egr]) { + nl_set_err_msg_mod(extack, + "egress reference reservations exceed pool size"); + return -erange; + } + + return 0; +} + -void ocelot_watermark_init(struct ocelot *ocelot) +static void ocelot_watermark_init(struct ocelot *ocelot) + +/* pool size and type are fixed up at runtime. keeping this structure to + * look up the cell size multipliers. + */ +static const struct devlink_sb_pool_info ocelot_sb_pool[] = { + [ocelot_sb_buf] = { + .cell_size = ocelot_buffer_cell_sz, + .threshold_type = devlink_sb_threshold_type_static, + }, + [ocelot_sb_ref] = { + .cell_size = 1, + .threshold_type = devlink_sb_threshold_type_static, + }, +}; + +/* returns the pool size configured through ocelot_sb_pool_set */ +int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + if (sb_index >= ocelot_sb_num) + return -enodev; + if (pool_index >= ocelot_sb_pool_num) + return -enodev; + + *pool_info = ocelot_sb_pool[sb_index]; + pool_info->size = ocelot->pool_size[sb_index][pool_index]; + if (pool_index) + pool_info->pool_type = devlink_sb_pool_type_ingress; + else + pool_info->pool_type = devlink_sb_pool_type_egress; + + return 0; +} +export_symbol(ocelot_sb_pool_get); + +/* the pool size received here configures the total amount of resources used on + * ingress (or on egress, depending upon the pool index). the pool size, minus + * the values for the port and port-tc reservations, is written into the + * col_shr(dp=0) sharing watermark. + */ +int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + u32 old_pool_size; + int err; + + if (sb_index >= ocelot_sb_num) { + nl_set_err_msg_mod(extack, + "invalid sb, use 0 for buffers and 1 for frame references"); + return -enodev; + } + if (pool_index >= ocelot_sb_pool_num) { + nl_set_err_msg_mod(extack, + "invalid pool, use 0 for ingress and 1 for egress"); + return -enodev; + } + if (threshold_type != devlink_sb_threshold_type_static) { + nl_set_err_msg_mod(extack, + "only static threshold supported"); + return -eopnotsupp; + } + + old_pool_size = ocelot->pool_size[sb_index][pool_index]; + ocelot->pool_size[sb_index][pool_index] = size; + + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot->pool_size[sb_index][pool_index] = old_pool_size; + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +export_symbol(ocelot_sb_pool_set); + +/* this retrieves the configuration made with ocelot_sb_port_pool_set */ +int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + int wm_index; + + switch (sb_index) { + case ocelot_sb_buf: + if (pool_index == ocelot_sb_pool_ing) + wm_index = buf_p_rsrv_i(port); + else + wm_index = buf_p_rsrv_e(port); + break; + case ocelot_sb_ref: + if (pool_index == ocelot_sb_pool_ing) + wm_index = ref_p_rsrv_i(port); + else + wm_index = ref_p_rsrv_e(port); + break; + default: + return -enodev; + } + + *p_threshold = ocelot_wm_read(ocelot, wm_index); + *p_threshold *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +export_symbol(ocelot_sb_port_pool_get); + +/* this configures the p_rsrv per-port reserved resource watermark */ +int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, struct netlink_ext_ack *extack) +{ + int wm_index, err; + u32 old_thr; + + switch (sb_index) { + case ocelot_sb_buf: + if (pool_index == ocelot_sb_pool_ing) + wm_index = buf_p_rsrv_i(port); + else + wm_index = buf_p_rsrv_e(port); + break; + case ocelot_sb_ref: + if (pool_index == ocelot_sb_pool_ing) + wm_index = ref_p_rsrv_i(port); + else + wm_index = ref_p_rsrv_e(port); + break; + default: + nl_set_err_msg_mod(extack, "invalid shared buffer"); + return -enodev; + } + + threshold /= ocelot_sb_pool[sb_index].cell_size; + + old_thr = ocelot_wm_read(ocelot, wm_index); + ocelot_wm_write(ocelot, wm_index, threshold); + + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot_wm_write(ocelot, wm_index, old_thr); + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +export_symbol(ocelot_sb_port_pool_set); + +/* this retrieves the configuration done by ocelot_sb_tc_pool_bind_set */ +int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + int wm_index; + + switch (sb_index) { + case ocelot_sb_buf: + if (pool_type == devlink_sb_pool_type_ingress) + wm_index = buf_q_rsrv_i(port, tc_index); + else + wm_index = buf_q_rsrv_e(port, tc_index); + break; + case ocelot_sb_ref: + if (pool_type == devlink_sb_pool_type_ingress) + wm_index = ref_q_rsrv_i(port, tc_index); + else + wm_index = ref_q_rsrv_e(port, tc_index); + break; + default: + return -enodev; + } + + *p_threshold = ocelot_wm_read(ocelot, wm_index); + *p_threshold *= ocelot_sb_pool[sb_index].cell_size; + + if (pool_type == devlink_sb_pool_type_ingress) + *p_pool_index = 0; + else + *p_pool_index = 1; + + return 0; +} +export_symbol(ocelot_sb_tc_pool_bind_get); + +/* this configures the q_rsrv per-port-tc reserved resource watermark */ +int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + int wm_index, err; + u32 old_thr; + + /* paranoid check? */ + if (pool_index == ocelot_sb_pool_ing && + pool_type != devlink_sb_pool_type_ingress) + return -einval; + if (pool_index == ocelot_sb_pool_egr && + pool_type != devlink_sb_pool_type_egress) + return -einval; + + switch (sb_index) { + case ocelot_sb_buf: + if (pool_type == devlink_sb_pool_type_ingress) + wm_index = buf_q_rsrv_i(port, tc_index); + else + wm_index = buf_q_rsrv_e(port, tc_index); + break; + case ocelot_sb_ref: + if (pool_type == devlink_sb_pool_type_ingress) + wm_index = ref_q_rsrv_i(port, tc_index); + else + wm_index = ref_q_rsrv_e(port, tc_index); + break; + default: + nl_set_err_msg_mod(extack, "invalid shared buffer"); + return -enodev; + } + + threshold /= ocelot_sb_pool[sb_index].cell_size; + + old_thr = ocelot_wm_read(ocelot, wm_index); + ocelot_wm_write(ocelot, wm_index, threshold); + err = ocelot_watermark_validate(ocelot, extack); + if (err) { + ocelot_wm_write(ocelot, wm_index, old_thr); + return err; + } + + ocelot_setup_sharing_watermarks(ocelot); + + return 0; +} +export_symbol(ocelot_sb_tc_pool_bind_set); + +/* the hardware does not support atomic snapshots, we'll read out the + * occupancy registers individually and have this as just a stub. + */ +int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index) +{ + return 0; +} +export_symbol(ocelot_sb_occ_snapshot); + +/* the watermark occupancy registers are cleared upon read, + * so let's read them. + */ +int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index) +{ + u32 inuse, maxuse; + int port, prio; + + switch (sb_index) { + case ocelot_sb_buf: + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < ocelot_num_tc; prio++) { + ocelot_wm_status(ocelot, buf_q_rsrv_i(port, prio), + &inuse, &maxuse); + ocelot_wm_status(ocelot, buf_q_rsrv_e(port, prio), + &inuse, &maxuse); + } + ocelot_wm_status(ocelot, buf_p_rsrv_i(port), + &inuse, &maxuse); + ocelot_wm_status(ocelot, buf_p_rsrv_e(port), + &inuse, &maxuse); + } + break; + case ocelot_sb_ref: + for (port = 0; port <= ocelot->num_phys_ports; port++) { + for (prio = 0; prio < ocelot_num_tc; prio++) { + ocelot_wm_status(ocelot, ref_q_rsrv_i(port, prio), + &inuse, &maxuse); + ocelot_wm_status(ocelot, ref_q_rsrv_e(port, prio), + &inuse, &maxuse); + } + ocelot_wm_status(ocelot, ref_p_rsrv_i(port), + &inuse, &maxuse); + ocelot_wm_status(ocelot, ref_p_rsrv_e(port), + &inuse, &maxuse); + } + break; + default: + return -enodev; + } + + return 0; +} +export_symbol(ocelot_sb_occ_max_clear); + +/* this retrieves the watermark occupancy for per-port p_rsrv watermarks */ +int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + int wm_index; + + switch (sb_index) { + case ocelot_sb_buf: + if (pool_index == ocelot_sb_pool_ing) + wm_index = buf_p_rsrv_i(port); + else + wm_index = buf_p_rsrv_e(port); + break; + case ocelot_sb_ref: + if (pool_index == ocelot_sb_pool_ing) + wm_index = ref_p_rsrv_i(port); + else + wm_index = ref_p_rsrv_e(port); + break; + default: + return -enodev; + } + + ocelot_wm_status(ocelot, wm_index, p_cur, p_max); + *p_cur *= ocelot_sb_pool[sb_index].cell_size; + *p_max *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +export_symbol(ocelot_sb_occ_port_pool_get); + +/* this retrieves the watermark occupancy for per-port-tc q_rsrv watermarks */ +int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + int wm_index; + + switch (sb_index) { + case ocelot_sb_buf: + if (pool_type == devlink_sb_pool_type_ingress) + wm_index = buf_q_rsrv_i(port, tc_index); + else + wm_index = buf_q_rsrv_e(port, tc_index); + break; + case ocelot_sb_ref: + if (pool_type == devlink_sb_pool_type_ingress) + wm_index = ref_q_rsrv_i(port, tc_index); + else + wm_index = ref_q_rsrv_e(port, tc_index); + break; + default: + return -enodev; + } + + ocelot_wm_status(ocelot, wm_index, p_cur, p_max); + *p_cur *= ocelot_sb_pool[sb_index].cell_size; + *p_max *= ocelot_sb_pool[sb_index].cell_size; + + return 0; +} +export_symbol(ocelot_sb_occ_tc_port_bind_get); + +int ocelot_devlink_sb_register(struct ocelot *ocelot) +{ + int err; + + err = devlink_sb_register(ocelot->devlink, ocelot_sb_buf, + ocelot->packet_buffer_size, 1, 1, + ocelot_num_tc, ocelot_num_tc); + if (err) + return err; + + err = devlink_sb_register(ocelot->devlink, ocelot_sb_ref, + ocelot->num_frame_refs, 1, 1, + ocelot_num_tc, ocelot_num_tc); + if (err) { + devlink_sb_unregister(ocelot->devlink, ocelot_sb_buf); + return err; + } + + ocelot->pool_size[ocelot_sb_buf][ocelot_sb_pool_ing] = ocelot->packet_buffer_size; + ocelot->pool_size[ocelot_sb_buf][ocelot_sb_pool_egr] = ocelot->packet_buffer_size; + ocelot->pool_size[ocelot_sb_ref][ocelot_sb_pool_ing] = ocelot->num_frame_refs; + ocelot->pool_size[ocelot_sb_ref][ocelot_sb_pool_egr] = ocelot->num_frame_refs; + + ocelot_watermark_init(ocelot); + + return 0; +} +export_symbol(ocelot_devlink_sb_register); + +void ocelot_devlink_sb_unregister(struct ocelot *ocelot) +{ + devlink_sb_unregister(ocelot->devlink, ocelot_sb_buf); + devlink_sb_unregister(ocelot->devlink, ocelot_sb_ref); +} +export_symbol(ocelot_devlink_sb_unregister); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c + * + * this contains glue logic between the switchdev driver operations and the + * mscc_ocelot_switch_lib. + * copyright 2020-2021 nxp semiconductors +static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp) +{ + return devlink_priv(dlp->devlink); +} + +static int devlink_port_to_port(struct devlink_port *dlp) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + + return dlp - ocelot->devlink_ports; +} + +static int ocelot_devlink_sb_pool_get(struct devlink *dl, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); +} + +static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, + threshold_type, extack); +} + +static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, + p_threshold); +} + +static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp, + unsigned int sb_index, u16 pool_index, + u32 threshold, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, + threshold, extack); +} + +static int +ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, + pool_type, p_pool_index, + p_threshold); +} + +static int +ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, + pool_type, pool_index, threshold, + extack); +} + +static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl, + unsigned int sb_index) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_occ_snapshot(ocelot, sb_index); +} + +static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl, + unsigned int sb_index) +{ + struct ocelot *ocelot = devlink_priv(dl); + + return ocelot_sb_occ_max_clear(ocelot, sb_index); +} + +static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp, + unsigned int sb_index, + u16 pool_index, u32 *p_cur, + u32 *p_max) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, + p_cur, p_max); +} + +static int +ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct ocelot *ocelot = devlink_port_to_ocelot(dlp); + int port = devlink_port_to_port(dlp); + + return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, + tc_index, pool_type, + p_cur, p_max); +} + + .sb_pool_get = ocelot_devlink_sb_pool_get, + .sb_pool_set = ocelot_devlink_sb_pool_set, + .sb_port_pool_get = ocelot_devlink_sb_port_pool_get, + .sb_port_pool_set = ocelot_devlink_sb_port_pool_set, + .sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set, + .sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot, + .sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear, + .sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get, diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c + err = ocelot_devlink_sb_register(ocelot); + if (err) + goto out_ocelot_release_ports; + +out_ocelot_release_ports: + mscc_ocelot_release_ports(ocelot); + mscc_ocelot_teardown_devlink_ports(ocelot); + ocelot_devlink_sb_unregister(ocelot); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h +enum ocelot_sb { + ocelot_sb_buf, + ocelot_sb_ref, + ocelot_sb_num, +}; + +enum ocelot_sb_pool { + ocelot_sb_pool_ing, + ocelot_sb_pool_egr, + ocelot_sb_pool_num, +}; + + u32 pool_size[ocelot_sb_num][ocelot_sb_pool_num]; +int ocelot_devlink_sb_register(struct ocelot *ocelot); +void ocelot_devlink_sb_unregister(struct ocelot *ocelot); +int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, + struct devlink_sb_pool_info *pool_info); +int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type, + struct netlink_ext_ack *extack); +int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); +int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 threshold, struct netlink_ext_ack *extack); +int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); +int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold, + struct netlink_ext_ack *extack); +int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index); +int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index); +int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); +int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); +
Networking
f59fd9cab7305266f4148776c3b66329551a2a3a
vladimir oltean
include
soc
dsa, ethernet, mscc, ocelot
net: mscc: ocelot: offload bridge port flags to device
we should not be unconditionally enabling address learning, since doing that is actively detrimential when a port is standalone and not offloading a bridge. namely, if a port in the switch is standalone and others are offloading the bridge, then we could enter a situation where we learn an address towards the standalone port, but the bridged ports could not forward the packet there, because the cpu is the only path between the standalone and the bridged ports. the solution of course is to not enable address learning unless the bridge asks for it.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
offload bridge port flags to device
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mscc', 'ocelot']
['h', 'c']
4
158
5
--- diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c +static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags val, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_pre_bridge_flags(ocelot, port, val); +} + +static int felix_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags val, + struct netlink_ext_ack *extack) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_bridge_flags(ocelot, port, val); + + return 0; +} + + .port_pre_bridge_flags = felix_pre_bridge_flags, + .port_bridge_flags = felix_bridge_flags, diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c + struct ocelot_port *ocelot_port = ocelot->ports[port]; - port_cfg |= ana_port_port_cfg_learn_ena; + if (ocelot_port->learn_ena) + port_cfg |= ana_port_port_cfg_learn_ena; +static void ocelot_port_set_learning(struct ocelot *ocelot, int port, + bool enabled) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u32 val = 0; + + if (enabled) + val = ana_port_port_cfg_learn_ena; + + ocelot_rmw_gix(ocelot, val, ana_port_port_cfg_learn_ena, + ana_port_port_cfg, port); + + ocelot_port->learn_ena = enabled; +} + +static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = bit(port); + + ocelot_rmw_rix(ocelot, val, bit(port), ana_pgid_pgid, pgid_uc); +} + +static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = bit(port); + + ocelot_rmw_rix(ocelot, val, bit(port), ana_pgid_pgid, pgid_mc); +} + +static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port, + bool enabled) +{ + u32 val = 0; + + if (enabled) + val = bit(port); + + ocelot_rmw_rix(ocelot, val, bit(port), ana_pgid_pgid, pgid_bc); +} + +int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & ~(br_learning | br_flood | br_mcast_flood | + br_bcast_flood)) + return -einval; + + return 0; +} +export_symbol(ocelot_port_pre_bridge_flags); + +void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags flags) +{ + if (flags.mask & br_learning) + ocelot_port_set_learning(ocelot, port, + !!(flags.val & br_learning)); + + if (flags.mask & br_flood) + ocelot_port_set_ucast_flood(ocelot, port, + !!(flags.val & br_flood)); + + if (flags.mask & br_mcast_flood) + ocelot_port_set_mcast_flood(ocelot, port, + !!(flags.val & br_mcast_flood)); + + if (flags.mask & br_bcast_flood) + ocelot_port_set_bcast_flood(ocelot, port, + !!(flags.val & br_bcast_flood)); +} +export_symbol(ocelot_port_bridge_flags); + + /* disable source address learning for standalone mode */ + ocelot_port_set_learning(ocelot, port, false); + diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c + case switchdev_attr_id_port_pre_bridge_flags: + err = ocelot_port_pre_bridge_flags(ocelot, port, + attr->u.brport_flags); + break; + case switchdev_attr_id_port_bridge_flags: + ocelot_port_bridge_flags(ocelot, port, attr->u.brport_flags); + break; +static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge) +{ + struct switchdev_brport_flags flags; + int err; + + flags.mask = br_learning | br_flood | br_mcast_flood | br_bcast_flood; + flags.val = flags.mask; + + err = ocelot_port_bridge_join(ocelot, port, bridge); + if (err) + return err; + + ocelot_port_bridge_flags(ocelot, port, flags); + + return 0; +} + +static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port, + struct net_device *bridge) +{ + struct switchdev_brport_flags flags; + int err; + + flags.mask = br_learning | br_flood | br_mcast_flood | br_bcast_flood; + flags.val = flags.mask & ~br_learning; + + err = ocelot_port_bridge_leave(ocelot, port, bridge); + + ocelot_port_bridge_flags(ocelot, port, flags); + + return err; +} + - err = ocelot_port_bridge_join(ocelot, port, - info->upper_dev); + err = ocelot_netdevice_bridge_join(ocelot, port, + info->upper_dev); - err = ocelot_port_bridge_leave(ocelot, port, - info->upper_dev); + err = ocelot_netdevice_bridge_leave(ocelot, port, + info->upper_dev); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h + bool learn_ena; +int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags val); +void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, + struct switchdev_brport_flags val);
Networking
421741ea5672cf16fa551bcde23e327075ed419e
vladimir oltean
include
soc
dsa, ethernet, mscc, ocelot
mt76: mt7915: add implicit tx beamforming support
add ht/vht implicit tx beamforming support and enable it via debugfs.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add implicit tx beamforming support and enable it trough debugfs
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ', 'mt7915']
['h', 'c']
4
87
58
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +static int +mt7915_implicit_txbf_set(void *data, u64 val) +{ + struct mt7915_dev *dev = data; + + if (test_bit(mt76_state_running, &dev->mphy.state)) + return -ebusy; + + dev->ibf = !!val; + + return mt7915_mcu_set_txbf_type(dev); +} + +static int +mt7915_implicit_txbf_get(void *data, u64 *val) +{ + struct mt7915_dev *dev = data; + + *val = dev->ibf; + + return 0; +} + +define_debugfs_attribute(fops_implicit_txbf, mt7915_implicit_txbf_get, + mt7915_implicit_txbf_set, "%lld "); + + debugfs_create_file("implicit_txbf", 0600, dir, dev, + &fops_implicit_txbf); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c - /* - * todo: dbdc & check whether ibf phase calibration data has - * been stored in eeprom offset 0x651~0x7b8, then write down - * 0x1111 into 0x651 and 0x651 to trigger ibf. - */ - - /* enable ibf & ebf */ + /* enable ebf */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c + bf->bf_cap = mt_ebf; -mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct sta_rec_bf *bf) +mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy, + struct sta_rec_bf *bf) - bf->bf_cap |= mt_ibf; + bf->bf_cap = mt_ibf; + bf->nr = hweight8(phy->mt76->chainmask) - 1; - bf->ibf_ncol = bf->nc; - - if (sta->bandwidth <= ieee80211_sta_rx_bw_40 && !bf->nc) - bf->ibf_timeout = 0x48; + bf->ibf_ncol = n; - struct sta_rec_bf *bf) + struct sta_rec_bf *bf, bool explicit) - u8 bfee_nr, bfer_nr, n, tx_ant = hweight8(phy->mt76->chainmask) - 1; - u16 mcs_map; + u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map); + u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); + u8 tx_ant = hweight8(phy->mt76->chainmask) - 1; - bf->bf_cap |= mt_ebf; - mt7915_mcu_sta_sounding_rate(bf); + if (explicit) { + u8 bfee_nr, bfer_nr; - bfee_nr = field_get(ieee80211_vht_cap_beamformee_sts_mask, - pc->cap); - bfer_nr = field_get(ieee80211_vht_cap_sounding_dimensions_mask, - vc->cap); - mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map); + mt7915_mcu_sta_sounding_rate(bf); + bfee_nr = field_get(ieee80211_vht_cap_beamformee_sts_mask, + pc->cap); + bfer_nr = field_get(ieee80211_vht_cap_sounding_dimensions_mask, + vc->cap); + bf->nr = min_t(u8, min_t(u8, bfer_nr, bfee_nr), tx_ant); + bf->nc = min_t(u8, nss_mcs, bf->nr); + bf->ibf_ncol = bf->nc; - n = min_t(u8, bfer_nr, bfee_nr); - bf->nr = min_t(u8, n, tx_ant); - n = mt7915_mcu_get_sta_nss(mcs_map); - - bf->nc = min_t(u8, n, bf->nr); - bf->ibf_ncol = bf->nc; + if (sta->bandwidth == ieee80211_sta_rx_bw_160) + bf->nr = 1; + } else { + bf->bf_cap = mt_ibf; + bf->nr = tx_ant; + bf->nc = min_t(u8, nss_mcs, bf->nr); + bf->ibf_ncol = nss_mcs; - /* force nr from 4 to 2 */ - if (sta->bandwidth == ieee80211_sta_rx_bw_160) - bf->nr = 1; + if (sta->bandwidth == ieee80211_sta_rx_bw_160) + bf->ibf_nrow = 1; + } - const struct ieee80211_he_cap_elem *ve; - const struct ieee80211_sta_he_cap *vc; - u8 bfee_nr, bfer_nr, nss_mcs; - u16 mcs_map; - - vc = mt7915_get_he_phy_cap(phy, vif); - ve = &vc->he_cap_elem; + const struct ieee80211_sta_he_cap *vc = mt7915_get_he_phy_cap(phy, vif); + const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem; + u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80); + u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); + u8 bfee_nr, bfer_nr; - bf->bf_cap |= mt_ebf; - - - - mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.tx_mcs_80); - nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); - - bool enable) + bool enable, bool explicit) + /* he: ebf only, in accordance with spec + * vht: support ebf and ibf + * ht: ibf only, since mac80211 lacks of ebf support + */ + if (sta->he_cap.has_he && explicit) + mt7915_mcu_sta_bfer_he(sta, vif, phy, bf); + else if (sta->vht_cap.vht_supported) + mt7915_mcu_sta_bfer_vht(sta, phy, bf, explicit); + else if (sta->ht_cap.ht_supported) + mt7915_mcu_sta_bfer_ht(sta, phy, bf); + else + return; + - bf->ibf_timeout = 0x18; - if (sta->he_cap.has_he) - mt7915_mcu_sta_bfer_he(sta, vif, phy, bf); - else if (sta->vht_cap.vht_supported) - mt7915_mcu_sta_bfer_vht(sta, phy, bf); - else if (sta->ht_cap.ht_supported) - mt7915_mcu_sta_bfer_ht(sta, bf); + if (!explicit && sta->bandwidth <= ieee80211_sta_rx_bw_40 && !bf->nc) + bf->ibf_timeout = 0x48; + else + bf->ibf_timeout = 0x18; - if (bf->bf_cap & mt_ebf && bf->nr != tx_ant) + if (explicit && bf->nr != tx_ant) - if (ebf) { + if (ebf || dev->ibf) { - mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable); + mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable, ebf); - .ibf = false, + .ibf = dev->ibf, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h + bool ibf;
Networking
6d6dc980e07d1f891cff4fbf7beedc81af800ff5
ryder lee shayne chen shayne chen mediatek com
drivers
net
mediatek, mt76, mt7915, wireless
mt76: mt7915: add support for flash mode
add support for getting rf values from flash. this is used for some test purposes and products. if the mtd partition is configured in dts, driver will read from flash to init eeprom command; if not, still init it with efuse's values.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add support for flash mode
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ', 'mt7915']
['h', 'c']
4
56
6
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c - memset(dev->mt76.eeprom.data, -1, mt7915_eeprom_size); + if (ret) + dev->flash_mode = true; + else + memset(dev->mt76.eeprom.data, -1, mt7915_eeprom_size); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev) +{ +#define total_page_mask genmask(7, 5) +#define page_idx_mask genmask(4, 2) +#define per_page_size 0x400 + struct mt7915_mcu_eeprom req = { .buffer_mode = ee_mode_buffer }; + u8 total = mt7915_eeprom_size / per_page_size; + u8 *eep = (u8 *)dev->mt76.eeprom.data; + int eep_len; + int i; + + for (i = 0; i <= total; i++, eep += eep_len) { + struct sk_buff *skb; + int ret; + + if (i == total) + eep_len = mt7915_eeprom_size % per_page_size; + else + eep_len = per_page_size; + + skb = mt76_mcu_msg_alloc(&dev->mt76, null, + sizeof(req) + eep_len); + if (!skb) + return -enomem; + + req.format = field_prep(total_page_mask, total) | + field_prep(page_idx_mask, i) | ee_format_whole; + req.len = cpu_to_le16(eep_len); + + skb_put_data(skb, &req, sizeof(req)); + skb_put_data(skb, eep, eep_len); + + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, + mcu_ext_cmd_efuse_buffer_mode, true); + if (ret) + return ret; + } + + return 0; +} + - struct req_hdr { - u8 buffer_mode; - u8 format; - __le16 len; - } __packed req = { + struct mt7915_mcu_eeprom req = { + if (dev->flash_mode) + return mt7915_mcu_set_eeprom_flash(dev); + diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +struct mt7915_mcu_eeprom { + u8 buffer_mode; + u8 format; + __le16 len; +} __packed; + diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h + bool flash_mode;
Networking
26f18380e6ca1276e299f3774a550629651117a8
shayne chen
drivers
net
mediatek, mt76, mt7915, wireless
mt76: mt7915: add support for using a secondary pcie link for gen1
for performance reasons, mt7915 supports using 2 pcie gen1 links on platforms that don't support gen2. add support for using this to move traffic for a second dbdc band onto a dedicated link
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add support for using a secondary pcie link for gen1
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ', 'mt7915']
['h', 'c']
5
290
39
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c -void mt7915_dma_prefetch(struct mt7915_dev *dev) +static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs) - mt76_wr(dev, mt_wfdma0_rx_ring0_ext_ctrl, prefetch(0x0, 0x4)); - mt76_wr(dev, mt_wfdma0_rx_ring1_ext_ctrl, prefetch(0x40, 0x4)); - mt76_wr(dev, mt_wfdma0_rx_ring2_ext_ctrl, prefetch(0x80, 0x0)); - - mt76_wr(dev, mt_wfdma1_tx_ring0_ext_ctrl, prefetch(0x80, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring1_ext_ctrl, prefetch(0xc0, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring2_ext_ctrl, prefetch(0x100, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring3_ext_ctrl, prefetch(0x140, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring4_ext_ctrl, prefetch(0x180, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring5_ext_ctrl, prefetch(0x1c0, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring6_ext_ctrl, prefetch(0x200, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring7_ext_ctrl, prefetch(0x240, 0x4)); - - mt76_wr(dev, mt_wfdma1_tx_ring16_ext_ctrl, prefetch(0x280, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring17_ext_ctrl, prefetch(0x2c0, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring18_ext_ctrl, prefetch(0x300, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring19_ext_ctrl, prefetch(0x340, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring20_ext_ctrl, prefetch(0x380, 0x4)); - mt76_wr(dev, mt_wfdma1_tx_ring21_ext_ctrl, prefetch(0x3c0, 0x0)); - - mt76_wr(dev, mt_wfdma1_rx_ring0_ext_ctrl, prefetch(0x3c0, 0x4)); - mt76_wr(dev, mt_wfdma1_rx_ring1_ext_ctrl, prefetch(0x400, 0x4)); - mt76_wr(dev, mt_wfdma1_rx_ring2_ext_ctrl, prefetch(0x440, 0x4)); - mt76_wr(dev, mt_wfdma1_rx_ring3_ext_ctrl, prefetch(0x480, 0x0)); + mt76_wr(dev, mt_wfdma0_rx_ring0_ext_ctrl + ofs, prefetch(0x0, 0x4)); + mt76_wr(dev, mt_wfdma0_rx_ring1_ext_ctrl + ofs, prefetch(0x40, 0x4)); + mt76_wr(dev, mt_wfdma0_rx_ring2_ext_ctrl + ofs, prefetch(0x80, 0x0)); + + mt76_wr(dev, mt_wfdma1_tx_ring0_ext_ctrl + ofs, prefetch(0x80, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring1_ext_ctrl + ofs, prefetch(0xc0, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring2_ext_ctrl + ofs, prefetch(0x100, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring3_ext_ctrl + ofs, prefetch(0x140, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring4_ext_ctrl + ofs, prefetch(0x180, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring5_ext_ctrl + ofs, prefetch(0x1c0, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring6_ext_ctrl + ofs, prefetch(0x200, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring7_ext_ctrl + ofs, prefetch(0x240, 0x4)); + + mt76_wr(dev, mt_wfdma1_tx_ring16_ext_ctrl + ofs, prefetch(0x280, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring17_ext_ctrl + ofs, prefetch(0x2c0, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring18_ext_ctrl + ofs, prefetch(0x300, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring19_ext_ctrl + ofs, prefetch(0x340, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring20_ext_ctrl + ofs, prefetch(0x380, 0x4)); + mt76_wr(dev, mt_wfdma1_tx_ring21_ext_ctrl + ofs, prefetch(0x3c0, 0x0)); + + mt76_wr(dev, mt_wfdma1_rx_ring0_ext_ctrl + ofs, prefetch(0x3c0, 0x4)); + mt76_wr(dev, mt_wfdma1_rx_ring1_ext_ctrl + ofs, prefetch(0x400, 0x4)); + mt76_wr(dev, mt_wfdma1_rx_ring2_ext_ctrl + ofs, prefetch(0x440, 0x4)); + mt76_wr(dev, mt_wfdma1_rx_ring3_ext_ctrl + ofs, prefetch(0x480, 0x0)); +} + +void mt7915_dma_prefetch(struct mt7915_dev *dev) +{ + __mt7915_dma_prefetch(dev, 0); + if (dev->hif2) + __mt7915_dma_prefetch(dev, mt_wfdma1_pcie1_base - mt_wfdma1_base); + u32 hif1_ofs = 0; + if (dev->hif2) + hif1_ofs = mt_wfdma1_pcie1_base - mt_wfdma1_base; + - /* configure perfetch settings */ - mt7915_dma_prefetch(dev); - + if (dev->hif2) { + mt76_set(dev, mt_wfdma1_glo_cfg + hif1_ofs, + mt_wfdma1_glo_cfg_omit_tx_info | + mt_wfdma1_glo_cfg_omit_rx_info); + + mt76_wr(dev, mt_wfdma0_rst_dtx_ptr + hif1_ofs, ~0); + mt76_wr(dev, mt_wfdma1_rst_dtx_ptr + hif1_ofs, ~0); + + mt76_wr(dev, mt_wfdma0_pri_dly_int_cfg0 + hif1_ofs, 0); + mt76_wr(dev, mt_wfdma1_pri_dly_int_cfg0 + hif1_ofs, 0); + } + + /* configure perfetch settings */ + mt7915_dma_prefetch(dev); + - rx_buf_size, mt_rx_data_ring_base); + rx_buf_size, + mt_rx_data_ring_base + hif1_ofs); - rx_buf_size, mt_rx_event_ring_base); + rx_buf_size, + mt_rx_event_ring_base + hif1_ofs); + if (dev->hif2) { + mt76_set(dev, mt_wfdma0_glo_cfg + hif1_ofs, + (mt_wfdma0_glo_cfg_tx_dma_en | + mt_wfdma0_glo_cfg_rx_dma_en)); + mt76_set(dev, mt_wfdma1_glo_cfg + hif1_ofs, + (mt_wfdma1_glo_cfg_tx_dma_en | + mt_wfdma1_glo_cfg_rx_dma_en)); + mt76_set(dev, mt_wfdma_host_config, + mt_wfdma_host_config_pdma_band); + } + diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c + u32 hif1_ofs = mt_wfdma1_pcie1_base - mt_wfdma1_base; + if (dev->hif2) { + mt76_clear(dev, mt_wfdma0_glo_cfg + hif1_ofs, + (mt_wfdma0_glo_cfg_tx_dma_en | + mt_wfdma0_glo_cfg_rx_dma_en)); + mt76_clear(dev, mt_wfdma1_glo_cfg + hif1_ofs, + (mt_wfdma1_glo_cfg_tx_dma_en | + mt_wfdma1_glo_cfg_rx_dma_en)); + } + if (dev->hif2) { + mt76_set(dev, mt_wfdma0_glo_cfg + hif1_ofs, + (mt_wfdma0_glo_cfg_tx_dma_en | + mt_wfdma0_glo_cfg_rx_dma_en)); + mt76_set(dev, mt_wfdma1_glo_cfg + hif1_ofs, + (mt_wfdma1_glo_cfg_tx_dma_en | + mt_wfdma1_glo_cfg_rx_dma_en)); + } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +struct mt7915_hif { + struct list_head list; + + struct device *dev; + void __iomem *regs; + int irq; +}; + + struct mt7915_hif *hif2; + + u32 hif_idx; -extern struct pci_driver mt7915_pci_driver; +void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg, + u32 clear, u32 set); + - mt76_set_irq_mask(&dev->mt76, mt_int_mask_csr, 0, mask); + if (dev->hif2) + mt7915_dual_hif_set_irq_mask(dev, true, 0, mask); + else + mt76_set_irq_mask(&dev->mt76, mt_int_mask_csr, 0, mask); - mt76_set_irq_mask(&dev->mt76, mt_int_mask_csr, mask, 0); + if (dev->hif2) + mt7915_dual_hif_set_irq_mask(dev, true, mask, 0); + else + mt76_set_irq_mask(&dev->mt76, mt_int_mask_csr, mask, 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +static list_head(hif_list); +static define_spinlock(hif_lock); +static u32 hif_idx; + - { pci_device(0x14c3, 0x7915) }, + { pci_device(pci_vendor_id_mediatek, 0x7915) }, +static const struct pci_device_id mt7915_hif_device_table[] = { + { pci_device(pci_vendor_id_mediatek, 0x7916) }, + { }, +}; + +void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg, + u32 clear, u32 set) +{ + struct mt76_dev *mdev = &dev->mt76; + unsigned long flags; + + spin_lock_irqsave(&mdev->mmio.irq_lock, flags); + + mdev->mmio.irqmask &= ~clear; + mdev->mmio.irqmask |= set; + + if (write_reg) { + mt76_wr(dev, mt_int_mask_csr, mdev->mmio.irqmask); + mt76_wr(dev, mt_int1_mask_csr, mdev->mmio.irqmask); + } + + spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags); +} + +static struct mt7915_hif * +mt7915_pci_get_hif2(struct mt7915_dev *dev) +{ + struct mt7915_hif *hif; + u32 val; + + spin_lock_bh(&hif_lock); + + list_for_each_entry(hif, &hif_list, list) { + val = readl(hif->regs + mt_pcie_recog_id); + val &= mt_pcie_recog_id_mask; + if (val != dev->hif_idx) + continue; + + get_device(hif->dev); + goto out; + } + hif = null; + +out: + spin_unlock_bh(&hif_lock); + + return hif; +} + +static void mt7915_put_hif2(struct mt7915_hif *hif) +{ + if (!hif) + return; + + put_device(hif->dev); +} + - u32 intr, mask; + u32 intr, intr1, mask; + if (dev->hif2) { + intr1 = mt76_rr(dev, mt_int1_source_csr); + intr1 &= dev->mt76.mmio.irqmask; + mt76_wr(dev, mt_int1_source_csr, intr1); + + intr |= intr1; + } + +static void mt7915_pci_init_hif2(struct mt7915_dev *dev) +{ + struct mt7915_hif *hif; + + dev->hif_idx = ++hif_idx; + if (!pci_get_device(pci_vendor_id_mediatek, 0x7916, null)) + return; + + mt76_wr(dev, mt_pcie_recog_id, dev->hif_idx | mt_pcie_recog_id_sem); + + hif = mt7915_pci_get_hif2(dev); + if (!hif) + return; + + dev->hif2 = hif; + + mt76_wr(dev, mt_int1_mask_csr, 0); + + if (devm_request_irq(dev->mt76.dev, hif->irq, mt7915_irq_handler, + irqf_shared, kbuild_modname "-hif", dev)) { + mt7915_put_hif2(hif); + hif = null; + } + + /* master switch of pcie tnterrupt enable */ + mt7915_l1_wr(dev, mt_pcie1_mac_int_enable, 0xff); +} + +static int mt7915_pci_hif2_probe(struct pci_dev *pdev) +{ + struct mt7915_hif *hif; + + hif = devm_kzalloc(&pdev->dev, sizeof(*hif), gfp_kernel); + if (!hif) + return -enomem; + + hif->dev = &pdev->dev; + hif->regs = pcim_iomap_table(pdev)[0]; + hif->irq = pdev->irq; + spin_lock_bh(&hif_lock); + list_add(&hif->list, &hif_list); + spin_unlock_bh(&hif_lock); + pci_set_drvdata(pdev, hif); + + return 0; +} + + if (id->device == 0x7916) + return mt7915_pci_hif2_probe(pdev); + + mt7915_pci_init_hif2(dev); + +static void mt7915_hif_remove(struct pci_dev *pdev) +{ + struct mt7915_hif *hif = pci_get_drvdata(pdev); + + list_del(&hif->list); +} + - struct mt76_dev *mdev = pci_get_drvdata(pdev); - struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); + struct mt76_dev *mdev; + struct mt7915_dev *dev; + mdev = pci_get_drvdata(pdev); + dev = container_of(mdev, struct mt7915_dev, mt76); + mt7915_put_hif2(dev->hif2); -struct pci_driver mt7915_pci_driver = { +static struct pci_driver mt7915_hif_driver = { + .name = kbuild_modname "_hif", + .id_table = mt7915_hif_device_table, + .probe = mt7915_pci_probe, + .remove = mt7915_hif_remove, +}; + +static struct pci_driver mt7915_pci_driver = { -module_pci_driver(mt7915_pci_driver); +static int __init mt7915_init(void) +{ + int ret; + + ret = pci_register_driver(&mt7915_hif_driver); + if (ret) + return ret; + + ret = pci_register_driver(&mt7915_pci_driver); + if (ret) + pci_unregister_driver(&mt7915_hif_driver); + + return ret; +} + +static void __exit mt7915_exit(void) +{ + pci_unregister_driver(&mt7915_pci_driver); + pci_unregister_driver(&mt7915_hif_driver); +} + +module_init(mt7915_init); +module_exit(mt7915_exit); +module_device_table(pci, mt7915_hif_device_table); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h + +#define mt_int_band1_mask (mt_int_rx_done_wa_ext | \ + mt_int_tx_done_band1) + +#define mt_wfdma_host_config mt_wfdma_ext_csr(0x30) +#define mt_wfdma_host_config_pdma_band bit(0) + +#define mt_int1_source_csr mt_wfdma_ext_csr(0x88) +#define mt_int1_mask_csr mt_wfdma_ext_csr(0x8c) + +#define mt_pcie_recog_id mt_wfdma_ext_csr(0x90) +#define mt_pcie_recog_id_mask genmask(30, 0) +#define mt_pcie_recog_id_sem bit(31) + +#define mt_pcie1_mac_base 0x74020000 +#define mt_pcie1_mac(ofs) (mt_pcie1_mac_base + (ofs)) +#define mt_pcie1_mac_int_enable mt_pcie1_mac(0x188) +
Networking
9093cfff72e3e55b703ed38fa1af87c204d89cf1
felix fietkau
drivers
net
mediatek, mt76, mt7915, wireless
mt76: mt7915: support txbf for dbdc
with this patch, txbf can be run on both bands simultaneously.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
support txbf for dbdc
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ', 'mt7915']
['h', 'c']
3
27
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c + + if (dev->dbdc_support) { + ret = mt7915_mcu_set_txbf_module(dev); + if (ret) + return ret; + } + diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev) +{ +#define mt_bf_module_update 25 + struct { + u8 action; + u8 bf_num; + u8 bf_bitmap; + u8 bf_sel[8]; + u8 rsv[8]; + } __packed req = { + .action = mt_bf_module_update, + .bf_num = 2, + .bf_bitmap = genmask(1, 0), + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_ext_cmd(txbf_action), &req, + sizeof(req), true); +} + diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev);
Networking
07c0d0012f9e2afda01615fb909f4f28128a51a1
ryder lee
drivers
net
mediatek, mt76, mt7915, wireless
mt76: mt7921: add mac support
add rx packet description parsing, tx packet description compositon, handle packet recycling and provide mac information mt76 core needs to support mac80211.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
4
2,457
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +// spdx-license-identifier: isc +/* copyright (c) 2020 mediatek inc. */ + +#include <linux/etherdevice.h> +#include <linux/timekeeping.h> +#include "mt7921.h" +#include "../dma.h" +#include "mac.h" + +#define to_rssi(field, rxv) ((field_get(field, rxv) - 220) / 2) + +#define he_bits(f) cpu_to_le16(ieee80211_radiotap_he_##f) +#define he_prep(f, m, v) le16_encode_bits(le32_get_bits(v, mt_crxv_he_##m),\ + ieee80211_radiotap_he_##f) + +static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev, + u16 idx, bool unicast) +{ + struct mt7921_sta *sta; + struct mt76_wcid *wcid; + + if (idx >= array_size(dev->mt76.wcid)) + return null; + + wcid = rcu_dereference(dev->mt76.wcid[idx]); + if (unicast || !wcid) + return wcid; + + if (!wcid->sta) + return null; + + sta = container_of(wcid, struct mt7921_sta, wcid); + if (!sta->vif) + return null; + + return &sta->vif->sta.wcid; +} + +void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) +{ +} + +bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask) +{ + mt76_rmw(dev, mt_wtbl_update, mt_wtbl_update_wlan_idx, + field_prep(mt_wtbl_update_wlan_idx, idx) | mask); + + return mt76_poll(dev, mt_wtbl_update, mt_wtbl_update_busy, + 0, 5000); +} + +static u32 mt7921_mac_wtbl_lmac_addr(struct mt7921_dev *dev, u16 wcid) +{ + mt76_wr(dev, mt_wtblon_top_wducr, + field_prep(mt_wtblon_top_wducr_group, (wcid >> 7))); + + return mt_wtbl_lmac_offs(wcid, 0); +} + +static void mt7921_mac_sta_poll(struct mt7921_dev *dev) +{ + static const u8 ac_to_tid[] = { + [ieee80211_ac_be] = 0, + [ieee80211_ac_bk] = 1, + [ieee80211_ac_vi] = 4, + [ieee80211_ac_vo] = 6 + }; + struct ieee80211_sta *sta; + struct mt7921_sta *msta; + u32 tx_time[ieee80211_num_acs], rx_time[ieee80211_num_acs]; + list_head(sta_poll_list); + int i; + + spin_lock_bh(&dev->sta_poll_lock); + list_splice_init(&dev->sta_poll_list, &sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + + rcu_read_lock(); + + while (true) { + bool clear = false; + u32 addr; + u16 idx; + + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&sta_poll_list)) { + spin_unlock_bh(&dev->sta_poll_lock); + break; + } + msta = list_first_entry(&sta_poll_list, + struct mt7921_sta, poll_list); + list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + + idx = msta->wcid.idx; + addr = mt7921_mac_wtbl_lmac_addr(dev, idx) + 20 * 4; + + for (i = 0; i < ieee80211_num_acs; i++) { + u32 tx_last = msta->airtime_ac[i]; + u32 rx_last = msta->airtime_ac[i + 4]; + + msta->airtime_ac[i] = mt76_rr(dev, addr); + msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + + tx_time[i] = msta->airtime_ac[i] - tx_last; + rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + + if ((tx_last | rx_last) & bit(30)) + clear = true; + + addr += 8; + } + + if (clear) { + mt7921_mac_wtbl_update(dev, idx, + mt_wtbl_update_adm_count_clear); + memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + } + + if (!msta->wcid.sta) + continue; + + sta = container_of((void *)msta, struct ieee80211_sta, + drv_priv); + for (i = 0; i < ieee80211_num_acs; i++) { + u8 q = mt7921_lmac_mapping(dev, i); + u32 tx_cur = tx_time[q]; + u32 rx_cur = rx_time[q]; + u8 tid = ac_to_tid[i]; + + if (!tx_cur && !rx_cur) + continue; + + ieee80211_sta_register_airtime(sta, tid, tx_cur, + rx_cur); + } + } + + rcu_read_unlock(); +} + +static void +mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, + struct ieee80211_radiotap_he *he, + __le32 *rxv) +{ + u32 ru_h, ru_l; + u8 ru, offs = 0; + + ru_l = field_get(mt_prxv_he_ru_alloc_l, le32_to_cpu(rxv[0])); + ru_h = field_get(mt_prxv_he_ru_alloc_h, le32_to_cpu(rxv[1])); + ru = (u8)(ru_l | ru_h << 4); + + status->bw = rate_info_bw_he_ru; + + switch (ru) { + case 0 ... 36: + status->he_ru = nl80211_rate_info_he_ru_alloc_26; + offs = ru; + break; + case 37 ... 52: + status->he_ru = nl80211_rate_info_he_ru_alloc_52; + offs = ru - 37; + break; + case 53 ... 60: + status->he_ru = nl80211_rate_info_he_ru_alloc_106; + offs = ru - 53; + break; + case 61 ... 64: + status->he_ru = nl80211_rate_info_he_ru_alloc_242; + offs = ru - 61; + break; + case 65 ... 66: + status->he_ru = nl80211_rate_info_he_ru_alloc_484; + offs = ru - 65; + break; + case 67: + status->he_ru = nl80211_rate_info_he_ru_alloc_996; + break; + case 68: + status->he_ru = nl80211_rate_info_he_ru_alloc_2x996; + break; + } + + he->data1 |= he_bits(data1_bw_ru_alloc_known); + he->data2 |= he_bits(data2_ru_offset_known) | + le16_encode_bits(offs, + ieee80211_radiotap_he_data2_ru_offset); +} + +static void +mt7921_mac_decode_he_radiotap(struct sk_buff *skb, + struct mt76_rx_status *status, + __le32 *rxv, u32 phy) +{ + /* todo: struct ieee80211_radiotap_he_mu */ + static const struct ieee80211_radiotap_he known = { + .data1 = he_bits(data1_data_mcs_known) | + he_bits(data1_data_dcm_known) | + he_bits(data1_stbc_known) | + he_bits(data1_coding_known) | + he_bits(data1_ldpc_xsymseg_known) | + he_bits(data1_doppler_known) | + he_bits(data1_bss_color_known), + .data2 = he_bits(data2_gi_known) | + he_bits(data2_txbf_known) | + he_bits(data2_pe_disambig_known) | + he_bits(data2_txop_known), + }; + struct ieee80211_radiotap_he *he = null; + u32 ltf_size = le32_get_bits(rxv[2], mt_crxv_he_ltf_size) + 1; + + he = skb_push(skb, sizeof(known)); + memcpy(he, &known, sizeof(known)); + + he->data3 = he_prep(data3_bss_color, bss_color, rxv[14]) | + he_prep(data3_ldpc_xsymseg, ldpc_ext_sym, rxv[2]); + he->data5 = he_prep(data5_pe_disambig, pe_disambig, rxv[2]) | + le16_encode_bits(ltf_size, + ieee80211_radiotap_he_data5_ltf_size); + he->data6 = he_prep(data6_txop, txop_dur, rxv[14]) | + he_prep(data6_doppler, doppler, rxv[14]); + + switch (phy) { + case mt_phy_type_he_su: + he->data1 |= he_bits(data1_format_su) | + he_bits(data1_ul_dl_known) | + he_bits(data1_beam_change_known) | + he_bits(data1_sptl_reuse_known); + + he->data3 |= he_prep(data3_beam_change, beam_chng, rxv[14]) | + he_prep(data3_ul_dl, uplink, rxv[2]); + he->data4 |= he_prep(data4_su_mu_sptl_reuse, sr_mask, rxv[11]); + break; + case mt_phy_type_he_ext_su: + he->data1 |= he_bits(data1_format_ext_su) | + he_bits(data1_ul_dl_known); + + he->data3 |= he_prep(data3_ul_dl, uplink, rxv[2]); + break; + case mt_phy_type_he_mu: + he->data1 |= he_bits(data1_format_mu) | + he_bits(data1_ul_dl_known) | + he_bits(data1_sptl_reuse_known); + + he->data3 |= he_prep(data3_ul_dl, uplink, rxv[2]); + he->data4 |= he_prep(data4_su_mu_sptl_reuse, sr_mask, rxv[11]); + + mt7921_mac_decode_he_radiotap_ru(status, he, rxv); + break; + case mt_phy_type_he_tb: + he->data1 |= he_bits(data1_format_trig) | + he_bits(data1_sptl_reuse_known) | + he_bits(data1_sptl_reuse2_known) | + he_bits(data1_sptl_reuse3_known) | + he_bits(data1_sptl_reuse4_known); + + he->data4 |= he_prep(data4_tb_sptl_reuse1, sr_mask, rxv[11]) | + he_prep(data4_tb_sptl_reuse2, sr1_mask, rxv[11]) | + he_prep(data4_tb_sptl_reuse3, sr2_mask, rxv[11]) | + he_prep(data4_tb_sptl_reuse4, sr3_mask, rxv[11]); + + mt7921_mac_decode_he_radiotap_ru(status, he, rxv); + break; + default: + break; + } +} + +static void +mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy, + struct mt76_rx_status *status, u8 chfreq) +{ + if (!test_bit(mt76_hw_scanning, &mphy->state) && + !test_bit(mt76_hw_sched_scanning, &mphy->state) && + !test_bit(mt76_state_roc, &mphy->state)) { + status->freq = mphy->chandef.chan->center_freq; + status->band = mphy->chandef.chan->band; + return; + } + + status->band = chfreq <= 14 ? nl80211_band_2ghz : nl80211_band_5ghz; + status->freq = ieee80211_channel_to_frequency(chfreq, status->band); +} + +int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7921_phy *phy = &dev->phy; + struct ieee80211_supported_band *sband; + struct ieee80211_hdr *hdr; + __le32 *rxd = (__le32 *)skb->data; + __le32 *rxv = null; + u32 mode = 0; + u32 rxd1 = le32_to_cpu(rxd[1]); + u32 rxd2 = le32_to_cpu(rxd[2]); + u32 rxd3 = le32_to_cpu(rxd[3]); + bool unicast, insert_ccmp_hdr = false; + u8 remove_pad; + int i, idx; + u8 chfreq; + + memset(status, 0, sizeof(*status)); + + if (rxd1 & mt_rxd1_normal_band_idx) + return -einval; + + if (!test_bit(mt76_state_running, &mphy->state)) + return -einval; + + chfreq = field_get(mt_rxd3_normal_ch_freq, rxd3); + unicast = field_get(mt_rxd3_normal_addr_type, rxd3) == mt_rxd3_normal_u2m; + idx = field_get(mt_rxd1_normal_wlan_idx, rxd1); + status->wcid = mt7921_rx_get_wcid(dev, idx, unicast); + + if (status->wcid) { + struct mt7921_sta *msta; + + msta = container_of(status->wcid, struct mt7921_sta, wcid); + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + } + + mt7921_get_status_freq_info(dev, mphy, status, chfreq); + + if (status->band == nl80211_band_5ghz) + sband = &mphy->sband_5g.sband; + else + sband = &mphy->sband_2g.sband; + + if (!sband->channels) + return -einval; + + if (rxd1 & mt_rxd1_normal_fcs_err) + status->flag |= rx_flag_failed_fcs_crc; + + if (rxd1 & mt_rxd1_normal_tkip_mic_err) + status->flag |= rx_flag_mmic_error; + + if (field_get(mt_rxd1_normal_sec_mode, rxd1) != 0 && + !(rxd1 & (mt_rxd1_normal_clm | mt_rxd1_normal_cm))) { + status->flag |= rx_flag_decrypted; + status->flag |= rx_flag_iv_stripped; + status->flag |= rx_flag_mmic_stripped | rx_flag_mic_stripped; + } + + if (!(rxd2 & mt_rxd2_normal_non_ampdu)) { + status->flag |= rx_flag_ampdu_details; + + /* all subframes of an a-mpdu have the same timestamp */ + if (phy->rx_ampdu_ts != rxd[14]) { + if (!++phy->ampdu_ref) + phy->ampdu_ref++; + } + phy->rx_ampdu_ts = rxd[14]; + + status->ampdu_ref = phy->ampdu_ref; + } + + remove_pad = field_get(mt_rxd2_normal_hdr_offset, rxd2); + + if (rxd2 & mt_rxd2_normal_max_len_error) + return -einval; + + rxd += 6; + if (rxd1 & mt_rxd1_normal_group_4) { + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -einval; + } + + if (rxd1 & mt_rxd1_normal_group_1) { + u8 *data = (u8 *)rxd; + + if (status->flag & rx_flag_decrypted) { + status->iv[0] = data[5]; + status->iv[1] = data[4]; + status->iv[2] = data[3]; + status->iv[3] = data[2]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + + insert_ccmp_hdr = field_get(mt_rxd2_normal_frag, rxd2); + } + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -einval; + } + + if (rxd1 & mt_rxd1_normal_group_2) { + rxd += 2; + if ((u8 *)rxd - skb->data >= skb->len) + return -einval; + } + + /* rxd group 3 - p-rxv */ + if (rxd1 & mt_rxd1_normal_group_3) { + u32 v0, v1, v2; + + rxv = rxd; + rxd += 2; + if ((u8 *)rxd - skb->data >= skb->len) + return -einval; + + v0 = le32_to_cpu(rxv[0]); + v1 = le32_to_cpu(rxv[1]); + v2 = le32_to_cpu(rxv[2]); + + if (v0 & mt_prxv_ht_ad_code) + status->enc_flags |= rx_enc_flag_ldpc; + + status->chains = mphy->antenna_mask; + status->chain_signal[0] = to_rssi(mt_prxv_rcpi0, v1); + status->chain_signal[1] = to_rssi(mt_prxv_rcpi1, v1); + status->chain_signal[2] = to_rssi(mt_prxv_rcpi2, v1); + status->chain_signal[3] = to_rssi(mt_prxv_rcpi3, v1); + status->signal = status->chain_signal[0]; + + for (i = 1; i < hweight8(mphy->antenna_mask); i++) { + if (!(status->chains & bit(i))) + continue; + + status->signal = max(status->signal, + status->chain_signal[i]); + } + + /* rxd group 5 - c-rxv */ + if (rxd1 & mt_rxd1_normal_group_5) { + u8 stbc = field_get(mt_crxv_ht_stbc, v2); + u8 gi = field_get(mt_crxv_ht_short_gi, v2); + bool cck = false; + + rxd += 18; + if ((u8 *)rxd - skb->data >= skb->len) + return -einval; + + idx = i = field_get(mt_prxv_tx_rate, v0); + mode = field_get(mt_crxv_tx_mode, v2); + + switch (mode) { + case mt_phy_type_cck: + cck = true; + fallthrough; + case mt_phy_type_ofdm: + i = mt76_get_rate(&dev->mt76, sband, i, cck); + break; + case mt_phy_type_ht_gf: + case mt_phy_type_ht: + status->encoding = rx_enc_ht; + if (i > 31) + return -einval; + break; + case mt_phy_type_vht: + status->nss = + field_get(mt_prxv_nsts, v0) + 1; + status->encoding = rx_enc_vht; + if (i > 9) + return -einval; + break; + case mt_phy_type_he_mu: + status->flag |= rx_flag_radiotap_he_mu; + fallthrough; + case mt_phy_type_he_su: + case mt_phy_type_he_ext_su: + case mt_phy_type_he_tb: + status->nss = + field_get(mt_prxv_nsts, v0) + 1; + status->encoding = rx_enc_he; + status->flag |= rx_flag_radiotap_he; + i &= genmask(3, 0); + + if (gi <= nl80211_rate_info_he_gi_3_2) + status->he_gi = gi; + + status->he_dcm = !!(idx & mt_prxv_tx_dcm); + break; + default: + return -einval; + } + status->rate_idx = i; + + switch (field_get(mt_crxv_frame_mode, v2)) { + case ieee80211_sta_rx_bw_20: + break; + case ieee80211_sta_rx_bw_40: + if (mode & mt_phy_type_he_ext_su && + (idx & mt_prxv_tx_er_su_106t)) { + status->bw = rate_info_bw_he_ru; + status->he_ru = + nl80211_rate_info_he_ru_alloc_106; + } else { + status->bw = rate_info_bw_40; + } + break; + case ieee80211_sta_rx_bw_80: + status->bw = rate_info_bw_80; + break; + case ieee80211_sta_rx_bw_160: + status->bw = rate_info_bw_160; + break; + default: + return -einval; + } + + status->enc_flags |= rx_enc_flag_stbc_mask * stbc; + if (mode < mt_phy_type_he_su && gi) + status->enc_flags |= rx_enc_flag_short_gi; + } + } + + skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); + + if (insert_ccmp_hdr) { + u8 key_id = field_get(mt_rxd1_normal_key_id, rxd1); + + mt76_insert_ccmp_hdr(skb, key_id); + } + + if (rxv && status->flag & rx_flag_radiotap_he) + mt7921_mac_decode_he_radiotap(skb, status, rxv, mode); + + hdr = mt76_skb_get_hdr(skb); + if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) + return 0; + + status->aggr = unicast && + !ieee80211_is_qos_nullfunc(hdr->frame_control); + status->tid = *ieee80211_get_qos_ctl(hdr) & ieee80211_qos_ctl_tid_mask; + status->seqno = ieee80211_seq_to_sn(le16_to_cpu(hdr->seq_ctrl)); + + return 0; +} + +static void +mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid) +{ + u8 tid = skb->priority & ieee80211_qos_ctl_tid_mask; + u8 fc_type, fc_stype; + bool wmm = false; + u32 val; + + if (wcid->sta) { + struct ieee80211_sta *sta; + + sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); + wmm = sta->wme; + } + + val = field_prep(mt_txd1_hdr_format, mt_hdr_format_802_3) | + field_prep(mt_txd1_tid, tid); + + if (be16_to_cpu(skb->protocol) >= eth_p_802_3_min) + val |= mt_txd1_eth_802_3; + + txwi[1] |= cpu_to_le32(val); + + fc_type = ieee80211_ftype_data >> 2; + fc_stype = wmm ? ieee80211_stype_qos_data >> 4 : 0; + + val = field_prep(mt_txd2_frame_type, fc_type) | + field_prep(mt_txd2_sub_type, fc_stype); + + txwi[2] |= cpu_to_le32(val); + + val = field_prep(mt_txd7_type, fc_type) | + field_prep(mt_txd7_sub_type, fc_stype); + txwi[7] |= cpu_to_le32(val); +} + +static void +mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct ieee80211_key_conf *key) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + struct ieee80211_tx_info *info = ieee80211_skb_cb(skb); + bool multicast = is_multicast_ether_addr(hdr->addr1); + u8 tid = skb->priority & ieee80211_qos_ctl_tid_mask; + __le16 fc = hdr->frame_control; + u8 fc_type, fc_stype; + u32 val; + + if (ieee80211_is_action(fc) && + mgmt->u.action.category == wlan_category_back && + mgmt->u.action.u.addba_req.action_code == wlan_action_addba_req) { + u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab); + + txwi[5] |= cpu_to_le32(mt_txd5_add_ba); + tid = (capab >> 2) & ieee80211_qos_ctl_tid_mask; + } else if (ieee80211_is_back_req(hdr->frame_control)) { + struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr; + u16 control = le16_to_cpu(bar->control); + + tid = field_get(ieee80211_bar_ctrl_tid_info_mask, control); + } + + val = field_prep(mt_txd1_hdr_format, mt_hdr_format_802_11) | + field_prep(mt_txd1_hdr_info, + ieee80211_get_hdrlen_from_skb(skb) / 2) | + field_prep(mt_txd1_tid, tid); + txwi[1] |= cpu_to_le32(val); + + fc_type = (le16_to_cpu(fc) & ieee80211_fctl_ftype) >> 2; + fc_stype = (le16_to_cpu(fc) & ieee80211_fctl_stype) >> 4; + + val = field_prep(mt_txd2_frame_type, fc_type) | + field_prep(mt_txd2_sub_type, fc_stype) | + field_prep(mt_txd2_multicast, multicast); + + if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && + key->cipher == wlan_cipher_suite_aes_cmac) { + val |= mt_txd2_bip; + txwi[3] &= ~cpu_to_le32(mt_txd3_protect_frame); + } + + if (!ieee80211_is_data(fc) || multicast) + val |= mt_txd2_fix_rate; + + txwi[2] |= cpu_to_le32(val); + + if (ieee80211_is_beacon(fc)) { + txwi[3] &= ~cpu_to_le32(mt_txd3_sw_power_mgmt); + txwi[3] |= cpu_to_le32(mt_txd3_rem_tx_count); + } + + if (info->flags & ieee80211_tx_ctl_injected) { + u16 seqno = le16_to_cpu(hdr->seq_ctrl); + + if (ieee80211_is_back_req(hdr->frame_control)) { + struct ieee80211_bar *bar; + + bar = (struct ieee80211_bar *)skb->data; + seqno = le16_to_cpu(bar->start_seq_num); + } + + val = mt_txd3_sn_valid | + field_prep(mt_txd3_seq, ieee80211_seq_to_sn(seqno)); + txwi[3] |= cpu_to_le32(val); + } + + val = field_prep(mt_txd7_type, fc_type) | + field_prep(mt_txd7_sub_type, fc_stype); + txwi[7] |= cpu_to_le32(val); +} + +void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, bool beacon) +{ + struct ieee80211_tx_info *info = ieee80211_skb_cb(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt76_phy *mphy = &dev->mphy; + u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; + bool is_8023 = info->flags & ieee80211_tx_ctl_hw_80211_encap; + u16 tx_count = 15; + u32 val; + + if (vif) { + struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + + omac_idx = mvif->omac_idx; + wmm_idx = mvif->wmm_idx; + } + + if (beacon) { + p_fmt = mt_tx_type_fw; + q_idx = mt_lmac_bcn0; + } else if (skb_get_queue_mapping(skb) >= mt_txq_psd) { + p_fmt = mt_tx_type_ct; + q_idx = mt_lmac_altx0; + } else { + p_fmt = mt_tx_type_ct; + q_idx = wmm_idx * mt7921_max_wmm_sets + + mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb)); + } + + val = field_prep(mt_txd0_tx_bytes, skb->len + mt_txd_size) | + field_prep(mt_txd0_pkt_fmt, p_fmt) | + field_prep(mt_txd0_q_idx, q_idx); + txwi[0] = cpu_to_le32(val); + + val = mt_txd1_long_format | + field_prep(mt_txd1_wlan_idx, wcid->idx) | + field_prep(mt_txd1_own_mac, omac_idx); + + txwi[1] = cpu_to_le32(val); + txwi[2] = 0; + + val = mt_txd3_sw_power_mgmt | + field_prep(mt_txd3_rem_tx_count, tx_count); + if (key) + val |= mt_txd3_protect_frame; + if (info->flags & ieee80211_tx_ctl_no_ack) + val |= mt_txd3_no_ack; + + txwi[3] = cpu_to_le32(val); + txwi[4] = 0; + txwi[5] = 0; + txwi[6] = 0; + txwi[7] = wcid->amsdu ? cpu_to_le32(mt_txd7_hw_amsdu) : 0; + + if (is_8023) + mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid); + else + mt7921_mac_write_txwi_80211(dev, txwi, skb, key); + + if (txwi[2] & cpu_to_le32(mt_txd2_fix_rate)) { + u16 rate; + + /* hardware won't add htc for mgmt/ctrl frame */ + txwi[2] |= cpu_to_le32(mt_txd2_htc_vld); + + if (mphy->chandef.chan->band == nl80211_band_5ghz) + rate = mt7921_5g_rate_default; + else + rate = mt7921_2g_rate_default; + + val = mt_txd6_fixed_bw | + field_prep(mt_txd6_tx_rate, rate); + txwi[6] |= cpu_to_le32(val); + txwi[3] |= cpu_to_le32(mt_txd3_ba_disable); + } +} + +static void +mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info, + void *txp_ptr, u32 id) +{ + struct mt7921_hw_txp *txp = txp_ptr; + struct mt7921_txp_ptr *ptr = &txp->ptr[0]; + int i, nbuf = tx_info->nbuf - 1; + + tx_info->buf[0].len = mt_txd_size + sizeof(*txp); + tx_info->nbuf = 1; + + txp->msdu_id[0] = cpu_to_le16(id | mt_msdu_id_valid); + + for (i = 0; i < nbuf; i++) { + u16 len = tx_info->buf[i + 1].len & mt_txd_len_mask; + u32 addr = tx_info->buf[i + 1].addr; + + if (i == nbuf - 1) + len |= mt_txd_len_last; + + if (i & 1) { + ptr->buf1 = cpu_to_le32(addr); + ptr->len1 = cpu_to_le16(len); + ptr++; + } else { + ptr->buf0 = cpu_to_le32(addr); + ptr->len0 = cpu_to_le16(len); + } + } +} + +static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked) +{ + struct mt76_phy *mphy = &dev->mphy; + struct mt76_queue *q; + + q = mphy->q_tx[0]; + if (blocked == q->blocked) + return; + + q->blocked = blocked; + if (!blocked) + mt76_worker_schedule(&dev->mt76.tx_worker); +} + +int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct ieee80211_tx_info *info = ieee80211_skb_cb(tx_info->skb); + struct ieee80211_key_conf *key = info->control.hw_key; + struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb); + struct mt76_txwi_cache *t; + struct mt7921_txp_common *txp; + int id; + u8 *txwi = (u8 *)txwi_ptr; + + if (unlikely(tx_info->skb->len <= eth_hlen)) + return -einval; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + cb->wcid = wcid->idx; + + t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); + t->skb = tx_info->skb; + + spin_lock_bh(&dev->token_lock); + id = idr_alloc(&dev->token, t, 0, mt7921_token_size, gfp_atomic); + if (id >= 0) + dev->token_count++; + + if (dev->token_count >= mt7921_token_size - mt7921_token_free_thr) + mt7921_set_tx_blocked(dev, true); + spin_unlock_bh(&dev->token_lock); + + if (id < 0) + return id; + + mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, + false); + + txp = (struct mt7921_txp_common *)(txwi + mt_txd_size); + memset(txp, 0, sizeof(struct mt7921_txp_common)); + mt7921_write_hw_txp(dev, tx_info, txp, id); + + tx_info->skb = dma_dummy_data; + + return 0; +} + +static void +mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +{ + struct mt7921_sta *msta; + u16 fc, tid; + u32 val; + + if (!sta || !sta->ht_cap.ht_supported) + return; + + tid = field_get(mt_txd1_tid, le32_to_cpu(txwi[1])); + if (tid >= 6) /* skip vo queue */ + return; + + val = le32_to_cpu(txwi[2]); + fc = field_get(mt_txd2_frame_type, val) << 2 | + field_get(mt_txd2_sub_type, val) << 4; + if (unlikely(fc != (ieee80211_ftype_data | ieee80211_stype_qos_data))) + return; + + msta = (struct mt7921_sta *)sta->drv_priv; + if (!test_and_set_bit(tid, &msta->ampdu_state)) + ieee80211_start_tx_ba_session(sta, tid, 0); +} + +static void +mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb, + struct ieee80211_sta *sta, u8 stat, + struct list_head *free_list) +{ + struct ieee80211_tx_info *info = ieee80211_skb_cb(skb); + struct ieee80211_tx_status status = { + .sta = sta, + .info = info, + .skb = skb, + .free_list = free_list, + }; + struct ieee80211_hw *hw; + + if (sta) { + struct mt7921_sta *msta; + + msta = (struct mt7921_sta *)sta->drv_priv; + status.rate = &msta->stats.tx_rate; + } + + hw = mt76_tx_status_get_hw(mdev, skb); + + if (info->flags & ieee80211_tx_ctl_ampdu) + info->flags |= ieee80211_tx_stat_ampdu; + + if (stat) + ieee80211_tx_info_clear_status(info); + + if (!(info->flags & ieee80211_tx_ctl_no_ack)) + info->flags |= ieee80211_tx_stat_ack; + + info->status.tx_time = 0; + ieee80211_tx_status_ext(hw, &status); +} + +void mt7921_txp_skb_unmap(struct mt76_dev *dev, + struct mt76_txwi_cache *t) +{ + struct mt7921_txp_common *txp; + int i; + + txp = mt7921_txwi_to_txp(dev, t); + + for (i = 0; i < array_size(txp->hw.ptr); i++) { + struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i]; + bool last; + u16 len; + + len = le16_to_cpu(ptr->len0); + last = len & mt_txd_len_last; + len &= mt_txd_len_mask; + dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len, + dma_to_device); + if (last) + break; + + len = le16_to_cpu(ptr->len1); + last = len & mt_txd_len_last; + len &= mt_txd_len_mask; + dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len, + dma_to_device); + if (last) + break; + } +} + +void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_txwi_cache *txwi; + struct ieee80211_sta *sta = null; + list_head(free_list); + struct sk_buff *tmp; + bool wake = false; + u8 i, count; + + /* clean dma queues and unmap buffers first */ + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[mt_txq_psd], false); + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[mt_txq_be], false); + + /* todo: mt_tx_free_latency is msdu time from the txd is queued into ple, + * to the time ack is received or dropped by hw (air + hw queue time). + * should avoid accessing wtbl to get tx airtime, and use it instead. + */ + count = field_get(mt_tx_free_msdu_cnt, le16_to_cpu(free->ctrl)); + for (i = 0; i < count; i++) { + u32 msdu, info = le32_to_cpu(free->info[i]); + u8 stat; + + /* 1'b1: new wcid pair. + * 1'b0: msdu_id with the same 'wcid pair' as above. + */ + if (info & mt_tx_free_pair) { + struct mt7921_sta *msta; + struct mt7921_phy *phy; + struct mt76_wcid *wcid; + u16 idx; + + count++; + idx = field_get(mt_tx_free_wlan_id, info); + wcid = rcu_dereference(dev->mt76.wcid[idx]); + sta = wcid_to_sta(wcid); + if (!sta) + continue; + + msta = container_of(wcid, struct mt7921_sta, wcid); + phy = msta->vif->phy; + spin_lock_bh(&dev->sta_poll_lock); + if (list_empty(&msta->stats_list)) + list_add_tail(&msta->stats_list, &phy->stats_list); + if (list_empty(&msta->poll_list)) + list_add_tail(&msta->poll_list, &dev->sta_poll_list); + spin_unlock_bh(&dev->sta_poll_lock); + continue; + } + + msdu = field_get(mt_tx_free_msdu_id, info); + stat = field_get(mt_tx_free_status, info); + + spin_lock_bh(&dev->token_lock); + txwi = idr_remove(&dev->token, msdu); + if (txwi) + dev->token_count--; + if (dev->token_count < mt7921_token_size - mt7921_token_free_thr && + dev->mphy.q_tx[0]->blocked) + wake = true; + spin_unlock_bh(&dev->token_lock); + + if (!txwi) + continue; + + mt7921_txp_skb_unmap(mdev, txwi); + if (txwi->skb) { + struct ieee80211_tx_info *info = ieee80211_skb_cb(txwi->skb); + void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi); + + if (likely(txwi->skb->protocol != cpu_to_be16(eth_p_pae))) + mt7921_tx_check_aggr(sta, txwi_ptr); + + if (sta && !info->tx_time_est) { + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + int pending; + + pending = atomic_dec_return(&wcid->non_aql_packets); + if (pending < 0) + atomic_cmpxchg(&wcid->non_aql_packets, pending, 0); + } + + mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list); + txwi->skb = null; + } + + mt76_put_txwi(mdev, txwi); + } + + mt7921_mac_sta_poll(dev); + + if (wake) { + spin_lock_bh(&dev->token_lock); + mt7921_set_tx_blocked(dev, false); + spin_unlock_bh(&dev->token_lock); + } + + mt76_worker_schedule(&dev->mt76.tx_worker); + + napi_consume_skb(skb, 1); + + list_for_each_entry_safe(skb, tmp, &free_list, list) { + skb_list_del_init(skb); + napi_consume_skb(skb, 1); + } +} + +void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e) +{ + struct mt7921_dev *dev; + + if (!e->txwi) { + dev_kfree_skb_any(e->skb); + return; + } + + dev = container_of(mdev, struct mt7921_dev, mt76); + + /* error path */ + if (e->skb == dma_dummy_data) { + struct mt76_txwi_cache *t; + struct mt7921_txp_common *txp; + u16 token; + + txp = mt7921_txwi_to_txp(mdev, e->txwi); + + token = le16_to_cpu(txp->hw.msdu_id[0]) & ~mt_msdu_id_valid; + spin_lock_bh(&dev->token_lock); + t = idr_remove(&dev->token, token); + spin_unlock_bh(&dev->token_lock); + e->skb = t ? t->skb : null; + } + + if (e->skb) { + struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb); + struct mt76_wcid *wcid; + + wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]); + + mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0, + null); + } +} + +void mt7921_mac_reset_counters(struct mt7921_phy *phy) +{ + struct mt7921_dev *dev = phy->dev; + int i; + + for (i = 0; i < 4; i++) { + mt76_rr(dev, mt_tx_agg_cnt(0, i)); + mt76_rr(dev, mt_tx_agg_cnt2(0, i)); + } + + dev->mt76.phy.survey_time = ktime_get_boottime(); + memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2); + + /* reset airtime counters */ + mt76_rr(dev, mt_mib_sdr9(0)); + mt76_rr(dev, mt_mib_sdr36(0)); + mt76_rr(dev, mt_mib_sdr37(0)); + + mt76_set(dev, mt_wf_rmac_mib_time0(0), mt_wf_rmac_mib_rxtime_clr); + mt76_set(dev, mt_wf_rmac_mib_airtime0(0), mt_wf_rmac_mib_rxtime_clr); +} + +void mt7921_mac_set_timing(struct mt7921_phy *phy) +{ + s16 coverage_class = phy->coverage_class; + struct mt7921_dev *dev = phy->dev; + u32 val, reg_offset; + u32 cck = field_prep(mt_timeout_val_plcp, 231) | + field_prep(mt_timeout_val_cca, 48); + u32 ofdm = field_prep(mt_timeout_val_plcp, 60) | + field_prep(mt_timeout_val_cca, 28); + int sifs, offset; + bool is_5ghz = phy->mt76->chandef.chan->band == nl80211_band_5ghz; + + if (!test_bit(mt76_state_running, &phy->mt76->state)) + return; + + if (is_5ghz) + sifs = 16; + else + sifs = 10; + + mt76_set(dev, mt_arb_scr(0), + mt_arb_scr_tx_disable | mt_arb_scr_rx_disable); + udelay(1); + + offset = 3 * coverage_class; + reg_offset = field_prep(mt_timeout_val_plcp, offset) | + field_prep(mt_timeout_val_cca, offset); + + mt76_wr(dev, mt_tmac_cdtr(0), cck + reg_offset); + mt76_wr(dev, mt_tmac_odtr(0), ofdm + reg_offset); + mt76_wr(dev, mt_tmac_icr0(0), + field_prep(mt_ifs_eifs, 360) | + field_prep(mt_ifs_rifs, 2) | + field_prep(mt_ifs_sifs, sifs) | + field_prep(mt_ifs_slot, phy->slottime)); + + if (phy->slottime < 20 || is_5ghz) + val = mt7921_cfend_rate_default; + else + val = mt7921_cfend_rate_11b; + + mt76_rmw_field(dev, mt_agg_acr0(0), mt_agg_acr_cfend_rate, val); + mt76_clear(dev, mt_arb_scr(0), + mt_arb_scr_tx_disable | mt_arb_scr_rx_disable); +} + +static u8 +mt7921_phy_get_nf(struct mt7921_phy *phy, int idx) +{ + return 0; +} + +static void +mt7921_phy_update_channel(struct mt76_phy *mphy, int idx) +{ + struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76); + struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv; + struct mt76_channel_state *state; + u64 busy_time, tx_time, rx_time, obss_time; + int nf; + + busy_time = mt76_get_field(dev, mt_mib_sdr9(idx), + mt_mib_sdr9_busy_mask); + tx_time = mt76_get_field(dev, mt_mib_sdr36(idx), + mt_mib_sdr36_txtime_mask); + rx_time = mt76_get_field(dev, mt_mib_sdr37(idx), + mt_mib_sdr37_rxtime_mask); + obss_time = mt76_get_field(dev, mt_wf_rmac_mib_airtime14(idx), + mt_mib_obsstime_mask); + + nf = mt7921_phy_get_nf(phy, idx); + if (!phy->noise) + phy->noise = nf << 4; + else if (nf) + phy->noise += nf - (phy->noise >> 4); + + state = mphy->chan_state; + state->cc_busy += busy_time; + state->cc_tx += tx_time; + state->cc_rx += rx_time + obss_time; + state->cc_bss_rx += rx_time; + state->noise = -(phy->noise >> 4); +} + +void mt7921_update_channel(struct mt76_dev *mdev) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + + mt7921_phy_update_channel(&mdev->phy, 0); + /* reset obss airtime */ + mt76_set(dev, mt_wf_rmac_mib_time0(0), mt_wf_rmac_mib_rxtime_clr); +} + +static bool +mt7921_wait_reset_state(struct mt7921_dev *dev, u32 state) +{ + bool ret; + + ret = wait_event_timeout(dev->reset_wait, + (read_once(dev->reset_state) & state), + mt7921_reset_timeout); + + warn(!ret, "timeout waiting for mcu reset state %x ", state); + return ret; +} + +static void +mt7921_dma_reset(struct mt7921_phy *phy) +{ + struct mt7921_dev *dev = phy->dev; + int i; + + mt76_clear(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_en | mt_wfdma0_glo_cfg_rx_dma_en); + + usleep_range(1000, 2000); + + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[mt_mcuq_wa], true); + for (i = 0; i < __mt_txq_max; i++) + mt76_queue_tx_cleanup(dev, phy->mt76->q_tx[i], true); + + mt76_for_each_q_rx(&dev->mt76, i) { + mt76_queue_rx_reset(dev, i); + } + + mt76_set(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_en | mt_wfdma0_glo_cfg_rx_dma_en); +} + +void mt7921_tx_token_put(struct mt7921_dev *dev) +{ + struct mt76_txwi_cache *txwi; + int id; + + spin_lock_bh(&dev->token_lock); + idr_for_each_entry(&dev->token, txwi, id) { + mt7921_txp_skb_unmap(&dev->mt76, txwi); + if (txwi->skb) { + struct ieee80211_hw *hw; + + hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb); + ieee80211_free_txskb(hw, txwi->skb); + } + mt76_put_txwi(&dev->mt76, txwi); + dev->token_count--; + } + spin_unlock_bh(&dev->token_lock); + idr_destroy(&dev->token); +} + +/* system error recovery */ +void mt7921_mac_reset_work(struct work_struct *work) +{ + struct mt7921_dev *dev; + + dev = container_of(work, struct mt7921_dev, reset_work); + + if (!(read_once(dev->reset_state) & mt_mcu_cmd_stop_dma)) + return; + + ieee80211_stop_queues(mt76_hw(dev)); + + set_bit(mt76_reset, &dev->mphy.state); + set_bit(mt76_mcu_reset, &dev->mphy.state); + wake_up(&dev->mt76.mcu.wait); + cancel_delayed_work_sync(&dev->mphy.mac_work); + + /* lock/unlock all queues to ensure that no tx is pending */ + mt76_txq_schedule_all(&dev->mphy); + + mt76_worker_disable(&dev->mt76.tx_worker); + napi_disable(&dev->mt76.napi[0]); + napi_disable(&dev->mt76.napi[1]); + napi_disable(&dev->mt76.napi[2]); + napi_disable(&dev->mt76.tx_napi); + + mutex_lock(&dev->mt76.mutex); + + mt76_wr(dev, mt_mcu_int_event, mt_mcu_int_event_dma_stopped); + + mt7921_tx_token_put(dev); + idr_init(&dev->token); + + if (mt7921_wait_reset_state(dev, mt_mcu_cmd_reset_done)) { + mt7921_dma_reset(&dev->phy); + + mt76_wr(dev, mt_mcu_int_event, mt_mcu_int_event_dma_init); + mt7921_wait_reset_state(dev, mt_mcu_cmd_recovery_done); + } + + clear_bit(mt76_mcu_reset, &dev->mphy.state); + clear_bit(mt76_reset, &dev->mphy.state); + + mt76_worker_enable(&dev->mt76.tx_worker); + napi_enable(&dev->mt76.tx_napi); + napi_schedule(&dev->mt76.tx_napi); + + napi_enable(&dev->mt76.napi[0]); + napi_schedule(&dev->mt76.napi[0]); + + napi_enable(&dev->mt76.napi[1]); + napi_schedule(&dev->mt76.napi[1]); + + napi_enable(&dev->mt76.napi[2]); + napi_schedule(&dev->mt76.napi[2]); + + ieee80211_wake_queues(mt76_hw(dev)); + + mt76_wr(dev, mt_mcu_int_event, mt_mcu_int_event_reset_done); + mt7921_wait_reset_state(dev, mt_mcu_cmd_normal_state); + + mutex_unlock(&dev->mt76.mutex); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work, + mt7921_watchdog_time); +} + +static void +mt7921_mac_update_mib_stats(struct mt7921_phy *phy) +{ + struct mt7921_dev *dev = phy->dev; + struct mib_stats *mib = &phy->mib; + int i, aggr0 = 0, aggr1; + + memset(mib, 0, sizeof(*mib)); + + mib->fcs_err_cnt = mt76_get_field(dev, mt_mib_sdr3(0), + mt_mib_sdr3_fcs_err_mask); + + for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) { + u32 val, val2; + + val = mt76_rr(dev, mt_mib_mb_sdr1(0, i)); + + val2 = field_get(mt_mib_ack_fail_count_mask, val); + if (val2 > mib->ack_fail_cnt) + mib->ack_fail_cnt = val2; + + val2 = field_get(mt_mib_ba_miss_count_mask, val); + if (val2 > mib->ba_miss_cnt) + mib->ba_miss_cnt = val2; + + val = mt76_rr(dev, mt_mib_mb_sdr0(0, i)); + val2 = field_get(mt_mib_rts_retries_count_mask, val); + if (val2 > mib->rts_retries_cnt) { + mib->rts_cnt = field_get(mt_mib_rts_count_mask, val); + mib->rts_retries_cnt = val2; + } + + val = mt76_rr(dev, mt_tx_agg_cnt(0, i)); + val2 = mt76_rr(dev, mt_tx_agg_cnt2(0, i)); + + dev->mt76.aggr_stats[aggr0++] += val & 0xffff; + dev->mt76.aggr_stats[aggr0++] += val >> 16; + dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff; + dev->mt76.aggr_stats[aggr1++] += val2 >> 16; + } +} + +void mt7921_mac_work(struct work_struct *work) +{ + struct mt7921_phy *phy; + struct mt76_phy *mphy; + + mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, + mac_work.work); + phy = mphy->priv; + + mutex_lock(&mphy->dev->mutex); + + mt76_update_survey(mphy->dev); + if (++mphy->mac_work_count == 5) { + mphy->mac_work_count = 0; + + mt7921_mac_update_mib_stats(phy); + } + + mutex_unlock(&mphy->dev->mutex); + + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, + mt7921_watchdog_time); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h +/* spdx-license-identifier: isc */ +/* copyright (c) 2020 mediatek inc. */ + +#ifndef __mt7921_mac_h +#define __mt7921_mac_h + +#define mt_ct_parse_len 72 +#define mt_ct_dma_buf_num 2 + +#define mt_rxd0_length genmask(15, 0) +#define mt_rxd0_pkt_flag genmask(19, 16) +#define mt_rxd0_pkt_type genmask(31, 27) + +#define mt_rxd0_normal_eth_type_ofs genmask(22, 16) +#define mt_rxd0_normal_ip_sum bit(23) +#define mt_rxd0_normal_udp_tcp_sum bit(24) + +enum rx_pkt_type { + pkt_type_txs, + pkt_type_txrxv, + pkt_type_normal, + pkt_type_rx_dup_rfb, + pkt_type_rx_tmr, + pkt_type_retrieve, + pkt_type_txrx_notify, + pkt_type_rx_event, + pkt_type_normal_mcu, +}; + +/* rxd dw1 */ +#define mt_rxd1_normal_wlan_idx genmask(9, 0) +#define mt_rxd1_normal_group_1 bit(11) +#define mt_rxd1_normal_group_2 bit(12) +#define mt_rxd1_normal_group_3 bit(13) +#define mt_rxd1_normal_group_4 bit(14) +#define mt_rxd1_normal_group_5 bit(15) +#define mt_rxd1_normal_sec_mode genmask(20, 16) +#define mt_rxd1_normal_key_id genmask(22, 21) +#define mt_rxd1_normal_cm bit(23) +#define mt_rxd1_normal_clm bit(24) +#define mt_rxd1_normal_icv_err bit(25) +#define mt_rxd1_normal_tkip_mic_err bit(26) +#define mt_rxd1_normal_fcs_err bit(27) +#define mt_rxd1_normal_band_idx bit(28) +#define mt_rxd1_normal_spp_en bit(29) +#define mt_rxd1_normal_add_om bit(30) +#define mt_rxd1_normal_sec_done bit(31) + +/* rxd dw2 */ +#define mt_rxd2_normal_bssid genmask(5, 0) +#define mt_rxd2_normal_co_ant bit(6) +#define mt_rxd2_normal_bf_cqi bit(7) +#define mt_rxd2_normal_mac_hdr_len genmask(12, 8) +#define mt_rxd2_normal_hdr_trans bit(13) +#define mt_rxd2_normal_hdr_offset genmask(15, 14) +#define mt_rxd2_normal_tid genmask(19, 16) +#define mt_rxd2_normal_mu_bar bit(21) +#define mt_rxd2_normal_sw_bit bit(22) +#define mt_rxd2_normal_amsdu_err bit(23) +#define mt_rxd2_normal_max_len_error bit(24) +#define mt_rxd2_normal_hdr_trans_error bit(25) +#define mt_rxd2_normal_int_frame bit(26) +#define mt_rxd2_normal_frag bit(27) +#define mt_rxd2_normal_null_frame bit(28) +#define mt_rxd2_normal_ndata bit(29) +#define mt_rxd2_normal_non_ampdu bit(30) +#define mt_rxd2_normal_bf_report bit(31) + +/* rxd dw3 */ +#define mt_rxd3_normal_rxv_seq genmask(7, 0) +#define mt_rxd3_normal_ch_freq genmask(15, 8) +#define mt_rxd3_normal_addr_type genmask(17, 16) +#define mt_rxd3_normal_u2m bit(0) +#define mt_rxd3_normal_htc_vld bit(0) +#define mt_rxd3_normal_tsf_compare_loss bit(19) +#define mt_rxd3_normal_beacon_mc bit(20) +#define mt_rxd3_normal_beacon_uc bit(21) +#define mt_rxd3_normal_amsdu bit(22) +#define mt_rxd3_normal_mesh bit(23) +#define mt_rxd3_normal_mhcp bit(24) +#define mt_rxd3_normal_no_info_wb bit(25) +#define mt_rxd3_normal_disable_rx_hdr_trans bit(26) +#define mt_rxd3_normal_power_save_stat bit(27) +#define mt_rxd3_normal_more bit(28) +#define mt_rxd3_normal_unwant bit(29) +#define mt_rxd3_normal_rx_drop bit(30) +#define mt_rxd3_normal_vlan2eth bit(31) + +/* rxd dw4 */ +#define mt_rxd4_normal_payload_format genmask(1, 0) +#define mt_rxd4_normal_pattern_drop bit(9) +#define mt_rxd4_normal_cls bit(10) +#define mt_rxd4_normal_ofld genmask(12, 11) +#define mt_rxd4_normal_magic_pkt bit(13) +#define mt_rxd4_normal_wol genmask(18, 14) +#define mt_rxd4_normal_cls_bitmap genmask(28, 19) +#define mt_rxd3_normal_pf_mode bit(29) +#define mt_rxd3_normal_pf_sts genmask(31, 30) + +/* p-rxv */ +#define mt_prxv_tx_rate genmask(6, 0) +#define mt_prxv_tx_dcm bit(4) +#define mt_prxv_tx_er_su_106t bit(5) +#define mt_prxv_nsts genmask(9, 7) +#define mt_prxv_ht_ad_code bit(11) +#define mt_prxv_he_ru_alloc_l genmask(31, 28) +#define mt_prxv_he_ru_alloc_h genmask(3, 0) +#define mt_prxv_rcpi3 genmask(31, 24) +#define mt_prxv_rcpi2 genmask(23, 16) +#define mt_prxv_rcpi1 genmask(15, 8) +#define mt_prxv_rcpi0 genmask(7, 0) + +/* c-rxv */ +#define mt_crxv_ht_stbc genmask(1, 0) +#define mt_crxv_tx_mode genmask(7, 4) +#define mt_crxv_frame_mode genmask(10, 8) +#define mt_crxv_ht_short_gi genmask(14, 13) +#define mt_crxv_he_ltf_size genmask(18, 17) +#define mt_crxv_he_ldpc_ext_sym bit(20) +#define mt_crxv_he_pe_disambig bit(23) +#define mt_crxv_he_uplink bit(31) + +#define mt_crxv_he_sr_mask genmask(11, 8) +#define mt_crxv_he_sr1_mask genmask(16, 12) +#define mt_crxv_he_sr2_mask genmask(20, 17) +#define mt_crxv_he_sr3_mask genmask(24, 21) + +#define mt_crxv_he_bss_color genmask(5, 0) +#define mt_crxv_he_txop_dur genmask(12, 6) +#define mt_crxv_he_beam_chng bit(13) +#define mt_crxv_he_doppler bit(16) + +#define mt_crxv_snr genmask(18, 13) +#define mt_crxv_foe_lo genmask(31, 19) +#define mt_crxv_foe_hi genmask(6, 0) +#define mt_crxv_foe_shift 13 + +enum tx_header_format { + mt_hdr_format_802_3, + mt_hdr_format_cmd, + mt_hdr_format_802_11, + mt_hdr_format_802_11_ext, +}; + +enum tx_pkt_type { + mt_tx_type_ct, + mt_tx_type_sf, + mt_tx_type_cmd, + mt_tx_type_fw, +}; + +enum tx_port_idx { + mt_tx_port_idx_lmac, + mt_tx_port_idx_mcu +}; + +enum tx_mcu_port_q_idx { + mt_tx_mcu_port_rx_q0 = 0x20, + mt_tx_mcu_port_rx_q1, + mt_tx_mcu_port_rx_q2, + mt_tx_mcu_port_rx_q3, + mt_tx_mcu_port_rx_fwdl = 0x3e +}; + +#define mt_ct_info_apply_txd bit(0) +#define mt_ct_info_copy_host_txd_all bit(1) +#define mt_ct_info_mgmt_frame bit(2) +#define mt_ct_info_none_cipher_frame bit(3) +#define mt_ct_info_hsr2_tx bit(4) +#define mt_ct_info_from_host bit(7) + +#define mt_txd_size (8 * 4) + +#define mt_txd0_q_idx genmask(31, 25) +#define mt_txd0_pkt_fmt genmask(24, 23) +#define mt_txd0_eth_type_offset genmask(22, 16) +#define mt_txd0_tx_bytes genmask(15, 0) + +#define mt_txd1_long_format bit(31) +#define mt_txd1_tgid bit(30) +#define mt_txd1_own_mac genmask(29, 24) +#define mt_txd1_amsdu bit(23) +#define mt_txd1_tid genmask(22, 20) +#define mt_txd1_hdr_pad genmask(19, 18) +#define mt_txd1_hdr_format genmask(17, 16) +#define mt_txd1_hdr_info genmask(15, 11) +#define mt_txd1_eth_802_3 bit(15) +#define mt_txd1_vta bit(10) +#define mt_txd1_wlan_idx genmask(9, 0) + +#define mt_txd2_fix_rate bit(31) +#define mt_txd2_fixed_rate bit(30) +#define mt_txd2_power_offset genmask(29, 24) +#define mt_txd2_max_tx_time genmask(23, 16) +#define mt_txd2_frag genmask(15, 14) +#define mt_txd2_htc_vld bit(13) +#define mt_txd2_duration bit(12) +#define mt_txd2_bip bit(11) +#define mt_txd2_multicast bit(10) +#define mt_txd2_rts bit(9) +#define mt_txd2_sounding bit(8) +#define mt_txd2_ndpa bit(7) +#define mt_txd2_ndp bit(6) +#define mt_txd2_frame_type genmask(5, 4) +#define mt_txd2_sub_type genmask(3, 0) + +#define mt_txd3_sn_valid bit(31) +#define mt_txd3_pn_valid bit(30) +#define mt_txd3_sw_power_mgmt bit(29) +#define mt_txd3_ba_disable bit(28) +#define mt_txd3_seq genmask(27, 16) +#define mt_txd3_rem_tx_count genmask(15, 11) +#define mt_txd3_tx_count genmask(10, 6) +#define mt_txd3_timing_measure bit(5) +#define mt_txd3_das bit(4) +#define mt_txd3_eeosp bit(3) +#define mt_txd3_emrd bit(2) +#define mt_txd3_protect_frame bit(1) +#define mt_txd3_no_ack bit(0) + +#define mt_txd4_pn_low genmask(31, 0) + +#define mt_txd5_pn_high genmask(31, 16) +#define mt_txd5_md bit(15) +#define mt_txd5_add_ba bit(14) +#define mt_txd5_tx_status_host bit(10) +#define mt_txd5_tx_status_mcu bit(9) +#define mt_txd5_tx_status_fmt bit(8) +#define mt_txd5_pid genmask(7, 0) + +#define mt_txd6_tx_ibf bit(31) +#define mt_txd6_tx_ebf bit(30) +#define mt_txd6_tx_rate genmask(29, 16) +#define mt_txd6_sgi genmask(15, 14) +#define mt_txd6_heltf genmask(13, 12) +#define mt_txd6_ldpc bit(11) +#define mt_txd6_spe_id_idx bit(10) +#define mt_txd6_ant_id genmask(7, 4) +#define mt_txd6_dyn_bw bit(3) +#define mt_txd6_fixed_bw bit(2) +#define mt_txd6_bw genmask(1, 0) + +#define mt_txd7_txd_len genmask(31, 30) +#define mt_txd7_udp_tcp_sum bit(29) +#define mt_txd7_ip_sum bit(28) + +#define mt_txd7_type genmask(21, 20) +#define mt_txd7_sub_type genmask(19, 16) + +#define mt_txd7_pse_fid genmask(27, 16) +#define mt_txd7_spe_idx genmask(15, 11) +#define mt_txd7_hw_amsdu bit(10) +#define mt_txd7_tx_time genmask(9, 0) + +#define mt_tx_rate_stbc bit(13) +#define mt_tx_rate_nss genmask(12, 10) +#define mt_tx_rate_mode genmask(9, 6) +#define mt_tx_rate_su_ext_tone bit(5) +#define mt_tx_rate_dcm bit(4) +#define mt_tx_rate_idx genmask(3, 0) + +#define mt_txp_max_buf_num 6 + +struct mt7921_txp { + __le16 flags; + __le16 token; + u8 bss_idx; + __le16 rept_wds_wcid; + u8 nbuf; + __le32 buf[mt_txp_max_buf_num]; + __le16 len[mt_txp_max_buf_num]; +} __packed __aligned(4); + +struct mt7921_tx_free { + __le16 rx_byte_cnt; + __le16 ctrl; + u8 txd_cnt; + u8 rsv[3]; + __le32 info[]; +} __packed __aligned(4); + +#define mt_tx_free_msdu_cnt genmask(9, 0) +#define mt_tx_free_wlan_id genmask(23, 14) +#define mt_tx_free_latency genmask(12, 0) +/* 0: success, others: dropped */ +#define mt_tx_free_status genmask(14, 13) +#define mt_tx_free_msdu_id genmask(30, 16) +#define mt_tx_free_pair bit(31) +/* will support this field in further revision */ +#define mt_tx_free_rate genmask(13, 0) + +static inline struct mt7921_txp_common * +mt7921_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t) +{ + u8 *txwi; + + if (!t) + return null; + + txwi = mt76_get_txwi_ptr(dev, t); + + return (struct mt7921_txp_common *)(txwi + mt_txd_size); +} + +#define mt_hw_txp_max_msdu_num 4 +#define mt_hw_txp_max_buf_num 4 + +#define mt_msdu_id_valid bit(15) + +#define mt_txd_len_mask genmask(11, 0) +#define mt_txd_len_msdu_last bit(14) +#define mt_txd_len_amsdu_last bit(15) +#define mt_txd_len_last bit(15) + +struct mt7921_txp_ptr { + __le32 buf0; + __le16 len0; + __le16 len1; + __le32 buf1; +} __packed __aligned(4); + +struct mt7921_hw_txp { + __le16 msdu_id[mt_hw_txp_max_msdu_num]; + struct mt7921_txp_ptr ptr[mt_hw_txp_max_buf_num / 2]; +} __packed __aligned(4); + +struct mt7921_txp_common { + union { + struct mt7921_hw_txp hw; + }; +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +/* spdx-license-identifier: isc */ +/* copyright (c) 2020 mediatek inc. */ + +#ifndef __mt7921_h +#define __mt7921_h + +#include <linux/interrupt.h> +#include <linux/ktime.h> +#include "../mt76.h" +#include "regs.h" + +#define mt7921_max_interfaces 4 +#define mt7921_max_wmm_sets 4 +#define mt7921_wtbl_size 20 +#define mt7921_wtbl_reserved (mt7921_wtbl_size - 1) +#define mt7921_wtbl_sta (mt7921_wtbl_reserved - \ + mt7921_max_interfaces) + +#define mt7921_hw_scan_timeout (hz / 10) +#define mt7921_watchdog_time (hz / 10) +#define mt7921_reset_timeout (30 * hz) + +#define mt7921_tx_ring_size 2048 +#define mt7921_tx_mcu_ring_size 256 +#define mt7921_tx_fwdl_ring_size 128 + +#define mt7921_rx_ring_size 1536 +#define mt7921_rx_mcu_ring_size 512 + +#define mt7921_firmware_wm "mediatek/wifi_ram_code_mt7961_1.bin" +#define mt7921_rom_patch "mediatek/wifi_mt7961_patch_mcu_1_2_hdr.bin" + +#define mt7921_eeprom_size 3584 +#define mt7921_token_size 8192 +#define mt7921_token_free_thr 64 + +#define mt7921_cfend_rate_default 0x49 /* ofdm 24m */ +#define mt7921_cfend_rate_11b 0x03 /* 11b lp, 11m */ +#define mt7921_5g_rate_default 0x4b /* ofdm 6m */ +#define mt7921_2g_rate_default 0x0 /* cck 1m */ + +#define mt7921_sku_rate_num 161 +#define mt7921_sku_max_delta_idx mt7921_sku_rate_num +#define mt7921_sku_table_size (mt7921_sku_rate_num + 1) + +#define mt7921_scan_ie_len 600 + +struct mt7921_vif; +struct mt7921_sta; + +enum mt7921_txq_id { + mt7921_txq_band0, + mt7921_txq_band1, + mt7921_txq_fwdl = 16, + mt7921_txq_mcu_wm, +}; + +enum mt7921_rxq_id { + mt7921_rxq_band0 = 0, + mt7921_rxq_band1, + mt7921_rxq_mcu_wm = 0, +}; + +struct mt7921_sta_stats { + struct rate_info prob_rate; + struct rate_info tx_rate; + + unsigned long per; + unsigned long changed; + unsigned long jiffies; +}; + +struct mt7921_sta_key_conf { + s8 keyidx; + u8 key[16]; +}; + +struct mt7921_sta { + struct mt76_wcid wcid; /* must be first */ + + struct mt7921_vif *vif; + + struct list_head stats_list; + struct list_head poll_list; + u32 airtime_ac[8]; + + struct mt7921_sta_stats stats; + + unsigned long ampdu_state; + + struct mt7921_sta_key_conf bip; +}; + +struct mt7921_vif { + struct mt76_vif mt76; /* must be first */ + + struct mt7921_sta sta; + struct mt7921_phy *phy; + + struct ieee80211_tx_queue_params queue_params[ieee80211_num_acs]; +}; + +struct mib_stats { + u16 ack_fail_cnt; + u16 fcs_err_cnt; + u16 rts_cnt; + u16 rts_retries_cnt; + u16 ba_miss_cnt; +}; + +struct mt7921_phy { + struct mt76_phy *mt76; + struct mt7921_dev *dev; + + struct ieee80211_sband_iftype_data iftype[2][num_nl80211_iftypes]; + + struct ieee80211_vif *monitor_vif; + + u32 rxfilter; + u64 omac_mask; + + u16 noise; + + s16 coverage_class; + u8 slottime; + + __le32 rx_ampdu_ts; + u32 ampdu_ref; + + struct mib_stats mib; + struct list_head stats_list; + + struct sk_buff_head scan_event_list; + struct delayed_work scan_work; +}; + +struct mt7921_dev { + union { /* must be first */ + struct mt76_dev mt76; + struct mt76_phy mphy; + }; + + const struct mt76_bus_ops *bus_ops; + struct mt7921_phy phy; + struct tasklet_struct irq_tasklet; + + u16 chainmask; + + struct work_struct init_work; + struct work_struct reset_work; + wait_queue_head_t reset_wait; + u32 reset_state; + + struct list_head sta_poll_list; + spinlock_t sta_poll_lock; + + spinlock_t token_lock; + int token_count; + struct idr token; + + u8 fw_debug; +}; + +enum { + hw_bssid_0 = 0x0, + hw_bssid_1, + hw_bssid_2, + hw_bssid_3, + hw_bssid_max = hw_bssid_3, + ext_bssid_start = 0x10, + ext_bssid_1, + ext_bssid_15 = 0x1f, + ext_bssid_max = ext_bssid_15, + repeater_bssid_start = 0x20, + repeater_bssid_max = 0x3f, +}; + +enum { + mt_lmac_ac00, + mt_lmac_ac01, + mt_lmac_ac02, + mt_lmac_ac03, + mt_lmac_altx0 = 0x10, + mt_lmac_bmc0, + mt_lmac_bcn0, +}; + +static inline struct mt7921_phy * +mt7921_hw_phy(struct ieee80211_hw *hw) +{ + struct mt76_phy *phy = hw->priv; + + return phy->priv; +} + +static inline struct mt7921_dev * +mt7921_hw_dev(struct ieee80211_hw *hw) +{ + struct mt76_phy *phy = hw->priv; + + return container_of(phy->dev, struct mt7921_dev, mt76); +} + +static inline u8 mt7921_lmac_mapping(struct mt7921_dev *dev, u8 ac) +{ + /* lmac uses the reverse order of mac80211 ac indexes */ + return 3 - ac; +} + +extern const struct ieee80211_ops mt7921_ops; +extern struct pci_driver mt7921_pci_driver; + +u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr); + +int mt7921_register_device(struct mt7921_dev *dev); +void mt7921_unregister_device(struct mt7921_dev *dev); +int mt7921_eeprom_init(struct mt7921_dev *dev); +void mt7921_eeprom_parse_band_config(struct mt7921_phy *phy); +int mt7921_eeprom_get_target_power(struct mt7921_dev *dev, + struct ieee80211_channel *chan, + u8 chain_idx); +void mt7921_eeprom_init_sku(struct mt7921_dev *dev); +int mt7921_dma_init(struct mt7921_dev *dev); +void mt7921_dma_prefetch(struct mt7921_dev *dev); +void mt7921_dma_cleanup(struct mt7921_dev *dev); +int mt7921_mcu_init(struct mt7921_dev *dev); +int mt7921_mcu_add_bss_info(struct mt7921_phy *phy, + struct ieee80211_vif *vif, int enable); +int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif, + struct mt7921_sta *msta, struct ieee80211_key_conf *key, + enum set_key_cmd cmd); +int mt7921_set_channel(struct mt7921_phy *phy); +int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd); +int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif); +int mt7921_mcu_set_eeprom(struct mt7921_dev *dev); +int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset); +int mt7921_mcu_set_mac(struct mt7921_dev *dev, int band, bool enable, + bool hdr_trans); +int mt7921_mcu_set_rts_thresh(struct mt7921_phy *phy, u32 val); +int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl); +void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb); +void mt7921_mcu_exit(struct mt7921_dev *dev); + +static inline bool is_mt7921(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7961; +} + +static inline void mt7921_irq_enable(struct mt7921_dev *dev, u32 mask) +{ + mt76_set_irq_mask(&dev->mt76, 0, 0, mask); + + tasklet_schedule(&dev->irq_tasklet); +} + +static inline u32 +mt7921_reg_map_l1(struct mt7921_dev *dev, u32 addr) +{ + u32 offset = field_get(mt_hif_remap_l1_offset, addr); + u32 base = field_get(mt_hif_remap_l1_base, addr); + + mt76_rmw_field(dev, mt_hif_remap_l1, mt_hif_remap_l1_mask, base); + /* use read to push write */ + mt76_rr(dev, mt_hif_remap_l1); + + return mt_hif_remap_base_l1 + offset; +} + +static inline u32 +mt7921_l1_rr(struct mt7921_dev *dev, u32 addr) +{ + return mt76_rr(dev, mt7921_reg_map_l1(dev, addr)); +} + +static inline void +mt7921_l1_wr(struct mt7921_dev *dev, u32 addr, u32 val) +{ + mt76_wr(dev, mt7921_reg_map_l1(dev, addr), val); +} + +static inline u32 +mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val) +{ + val |= mt7921_l1_rr(dev, addr) & ~mask; + mt7921_l1_wr(dev, addr, val); + + return val; +} + +#define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val) +#define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0) + +bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask); +void mt7921_mac_reset_counters(struct mt7921_phy *phy); +void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_key_conf *key, bool beacon); +void mt7921_mac_set_timing(struct mt7921_phy *phy); +int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb); +void mt7921_mac_fill_rx_vector(struct mt7921_dev *dev, struct sk_buff *skb); +void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb); +int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7921_mac_work(struct work_struct *work); +void mt7921_mac_reset_work(struct work_struct *work); +int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); +void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); +int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc); +void mt7921_tx_token_put(struct mt7921_dev *dev); +void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb); +void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); +void mt7921_stats_work(struct work_struct *work); +void mt7921_txp_skb_unmap(struct mt76_dev *dev, + struct mt76_txwi_cache *txwi); +void mt7921_set_stream_he_caps(struct mt7921_phy *phy); +void mt7921_update_channel(struct mt76_dev *mdev); +int mt7921_init_debugfs(struct mt7921_dev *dev); +int +mt7921_mcu_uni_add_dev(struct mt7921_dev *dev, + struct ieee80211_vif *vif, bool enable); +int +mt7921_mcu_uni_add_bss(struct mt7921_phy *phy, struct ieee80211_vif *vif, + bool enable); + +int +mt7921_mcu_uni_add_sta(struct mt7921_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable); +int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable); +int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable); +void mt7921_scan_work(struct work_struct *work); +int mt7921_mcu_set_channel_domain(struct mt7921_phy *phy); +int mt7921_mcu_hw_scan(struct mt7921_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req); +int mt7921_mcu_cancel_hw_scan(struct mt7921_phy *phy, + struct ieee80211_vif *vif); +u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u16 wlan_idx); +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +/* spdx-license-identifier: isc */ +/* copyright (c) 2020 mediatek inc. */ + +#ifndef __mt7921_regs_h +#define __mt7921_regs_h + +/* mcu wfdma1 */ +#define mt_mcu_wfdma1_base 0x3000 +#define mt_mcu_wfdma1(ofs) (mt_mcu_wfdma1_base + (ofs)) + +#define mt_mcu_int_event mt_mcu_wfdma1(0x108) +#define mt_mcu_int_event_dma_stopped bit(0) +#define mt_mcu_int_event_dma_init bit(1) +#define mt_mcu_int_event_ser_trigger bit(2) +#define mt_mcu_int_event_reset_done bit(3) + +#define mt_ple_base 0x8000 +#define mt_ple(ofs) (mt_ple_base + (ofs)) + +#define mt_ple_fl_q0_ctrl mt_ple(0x1b0) +#define mt_ple_fl_q1_ctrl mt_ple(0x1b4) +#define mt_ple_fl_q2_ctrl mt_ple(0x1b8) +#define mt_ple_fl_q3_ctrl mt_ple(0x1bc) + +#define mt_ple_ac_qempty(ac, n) mt_ple(0x300 + 0x10 * (ac) + \ + ((n) << 2)) +#define mt_ple_amsdu_pack_msdu_cnt(n) mt_ple(0x10e0 + ((n) << 2)) + +#define mt_mdp_base 0xf000 +#define mt_mdp(ofs) (mt_mdp_base + (ofs)) + +#define mt_mdp_dcr0 mt_mdp(0x000) +#define mt_mdp_dcr0_damsdu_en bit(15) +#define mt_mdp_dcr0_rx_hdr_trans_en bit(19) + +#define mt_mdp_dcr1 mt_mdp(0x004) +#define mt_mdp_dcr1_max_rx_len genmask(15, 3) + +#define mt_mdp_bnrcfr0(_band) mt_mdp(0x070 + ((_band) << 8)) +#define mt_mdp_rcfr0_mcu_rx_mgmt genmask(5, 4) +#define mt_mdp_rcfr0_mcu_rx_ctl_non_bar genmask(7, 6) +#define mt_mdp_rcfr0_mcu_rx_ctl_bar genmask(9, 8) + +#define mt_mdp_bnrcfr1(_band) mt_mdp(0x074 + ((_band) << 8)) +#define mt_mdp_rcfr1_mcu_rx_bypass genmask(23, 22) +#define mt_mdp_rcfr1_rx_dropped_ucast genmask(28, 27) +#define mt_mdp_rcfr1_rx_dropped_mcast genmask(30, 29) +#define mt_mdp_to_hif 0 +#define mt_mdp_to_wm 1 + +/* tmac: band 0(0x21000), band 1(0xa1000) */ +#define mt_wf_tmac_base(_band) ((_band) ? 0xa1000 : 0x21000) +#define mt_wf_tmac(_band, ofs) (mt_wf_tmac_base(_band) + (ofs)) + +#define mt_tmac_tcr0(_band) mt_wf_tmac(_band, 0) +#define mt_tmac_tcr0_tbtt_stop_ctrl bit(25) + +#define mt_tmac_cdtr(_band) mt_wf_tmac(_band, 0x090) +#define mt_tmac_odtr(_band) mt_wf_tmac(_band, 0x094) +#define mt_timeout_val_plcp genmask(15, 0) +#define mt_timeout_val_cca genmask(31, 16) + +#define mt_tmac_icr0(_band) mt_wf_tmac(_band, 0x0a4) +#define mt_ifs_eifs genmask(8, 0) +#define mt_ifs_rifs genmask(14, 10) +#define mt_ifs_sifs genmask(22, 16) +#define mt_ifs_slot genmask(30, 24) + +#define mt_tmac_ctcr0(_band) mt_wf_tmac(_band, 0x0f4) +#define mt_tmac_ctcr0_ins_ddlmt_reftime genmask(5, 0) +#define mt_tmac_ctcr0_ins_ddlmt_en bit(17) +#define mt_tmac_ctcr0_ins_ddlmt_vht_smpdu_en bit(18) + +#define mt_tmac_trcr0(_band) mt_wf_tmac(_band, 0x09c) +#define mt_tmac_tfcr0(_band) mt_wf_tmac(_band, 0x1e0) + +#define mt_wf_dma_base(_band) ((_band) ? 0xa1e00 : 0x21e00) +#define mt_wf_dma(_band, ofs) (mt_wf_dma_base(_band) + (ofs)) + +#define mt_dma_dcr0(_band) mt_wf_dma(_band, 0x000) +#define mt_dma_dcr0_max_rx_len genmask(15, 3) +#define mt_dma_dcr0_rxd_g5_en bit(23) + +/* lpon: band 0(0x24200), band 1(0xa4200) */ +#define mt_wf_lpon_base(_band) ((_band) ? 0xa4200 : 0x24200) +#define mt_wf_lpon(_band, ofs) (mt_wf_lpon_base(_band) + (ofs)) + +#define mt_lpon_uttr0(_band) mt_wf_lpon(_band, 0x080) +#define mt_lpon_uttr1(_band) mt_wf_lpon(_band, 0x084) + +#define mt_lpon_tcr(_band, n) mt_wf_lpon(_band, 0x0a8 + (n) * 4) +#define mt_lpon_tcr_sw_mode genmask(1, 0) +#define mt_lpon_tcr_sw_write bit(0) + +/* mib: band 0(0x24800), band 1(0xa4800) */ +#define mt_wf_mib_base(_band) ((_band) ? 0xa4800 : 0x24800) +#define mt_wf_mib(_band, ofs) (mt_wf_mib_base(_band) + (ofs)) + +#define mt_mib_sdr3(_band) mt_wf_mib(_band, 0x014) +#define mt_mib_sdr3_fcs_err_mask genmask(15, 0) + +#define mt_mib_sdr9(_band) mt_wf_mib(_band, 0x02c) +#define mt_mib_sdr9_busy_mask genmask(23, 0) + +#define mt_mib_sdr16(_band) mt_wf_mib(_band, 0x048) +#define mt_mib_sdr16_busy_mask genmask(23, 0) + +#define mt_mib_sdr34(_band) mt_wf_mib(_band, 0x090) +#define mt_mib_mu_bf_tx_cnt genmask(15, 0) + +#define mt_mib_sdr36(_band) mt_wf_mib(_band, 0x098) +#define mt_mib_sdr36_txtime_mask genmask(23, 0) +#define mt_mib_sdr37(_band) mt_wf_mib(_band, 0x09c) +#define mt_mib_sdr37_rxtime_mask genmask(23, 0) + +#define mt_mib_dr8(_band) mt_wf_mib(_band, 0x0c0) +#define mt_mib_dr9(_band) mt_wf_mib(_band, 0x0c4) +#define mt_mib_dr11(_band) mt_wf_mib(_band, 0x0cc) + +#define mt_mib_mb_sdr0(_band, n) mt_wf_mib(_band, 0x100 + ((n) << 4)) +#define mt_mib_rts_retries_count_mask genmask(31, 16) +#define mt_mib_rts_count_mask genmask(15, 0) + +#define mt_mib_mb_sdr1(_band, n) mt_wf_mib(_band, 0x104 + ((n) << 4)) +#define mt_mib_ba_miss_count_mask genmask(15, 0) +#define mt_mib_ack_fail_count_mask genmask(31, 16) + +#define mt_mib_mb_sdr2(_band, n) mt_wf_mib(_band, 0x108 + ((n) << 4)) +#define mt_mib_frame_retries_count_mask genmask(15, 0) + +#define mt_tx_agg_cnt(_band, n) mt_wf_mib(_band, 0x0a8 + ((n) << 2)) +#define mt_tx_agg_cnt2(_band, n) mt_wf_mib(_band, 0x164 + ((n) << 2)) +#define mt_mib_arng(_band, n) mt_wf_mib(_band, 0x4b8 + ((n) << 2)) +#define mt_mib_arncr_range(val, n) (((val) >> ((n) << 3)) & genmask(7, 0)) + +#define mt_wtblon_top_base 0x34000 +#define mt_wtblon_top(ofs) (mt_wtblon_top_base + (ofs)) +#define mt_wtblon_top_wducr mt_wtblon_top(0x0) +#define mt_wtblon_top_wducr_group genmask(2, 0) + +#define mt_wtbl_update mt_wtblon_top(0x030) +#define mt_wtbl_update_wlan_idx genmask(9, 0) +#define mt_wtbl_update_adm_count_clear bit(12) +#define mt_wtbl_update_busy bit(31) + +#define mt_wtbl_base 0x38000 +#define mt_wtbl_lmac_id genmask(14, 8) +#define mt_wtbl_lmac_dw genmask(7, 2) +#define mt_wtbl_lmac_offs(_id, _dw) (mt_wtbl_base | \ + field_prep(mt_wtbl_lmac_id, _id) | \ + field_prep(mt_wtbl_lmac_dw, _dw)) + +/* agg: band 0(0x20800), band 1(0xa0800) */ +#define mt_wf_agg_base(_band) ((_band) ? 0xa0800 : 0x20800) +#define mt_wf_agg(_band, ofs) (mt_wf_agg_base(_band) + (ofs)) + +#define mt_agg_awscr0(_band, _n) mt_wf_agg(_band, 0x05c + (_n) * 4) +#define mt_agg_pcr0(_band, _n) mt_wf_agg(_band, 0x06c + (_n) * 4) +#define mt_agg_pcr0_mm_prot bit(0) +#define mt_agg_pcr0_gf_prot bit(1) +#define mt_agg_pcr0_bw20_prot bit(2) +#define mt_agg_pcr0_bw40_prot bit(4) +#define mt_agg_pcr0_bw80_prot bit(6) +#define mt_agg_pcr0_erp_prot genmask(12, 8) +#define mt_agg_pcr0_vht_prot bit(13) +#define mt_agg_pcr0_pta_win_dis bit(15) + +#define mt_agg_pcr1_rts0_num_thres genmask(31, 23) +#define mt_agg_pcr1_rts0_len_thres genmask(19, 0) + +#define mt_agg_acr0(_band) mt_wf_agg(_band, 0x084) +#define mt_agg_acr_cfend_rate genmask(13, 0) +#define mt_agg_acr_bar_rate genmask(29, 16) + +#define mt_agg_mrcr(_band) mt_wf_agg(_band, 0x098) +#define mt_agg_mrcr_bar_cnt_limit genmask(15, 12) +#define mt_agg_mrcr_last_rts_cts_rn bit(6) +#define mt_agg_mrcr_rts_fail_limit genmask(11, 7) +#define mt_agg_mrcr_txcmd_rts_fail_limit genmask(28, 24) + +#define mt_agg_atcr1(_band) mt_wf_agg(_band, 0x0f0) +#define mt_agg_atcr3(_band) mt_wf_agg(_band, 0x0f4) + +/* arb: band 0(0x20c00), band 1(0xa0c00) */ +#define mt_wf_arb_base(_band) ((_band) ? 0xa0c00 : 0x20c00) +#define mt_wf_arb(_band, ofs) (mt_wf_arb_base(_band) + (ofs)) + +#define mt_arb_scr(_band) mt_wf_arb(_band, 0x080) +#define mt_arb_scr_tx_disable bit(8) +#define mt_arb_scr_rx_disable bit(9) + +#define mt_arb_drngr0(_band, _n) mt_wf_arb(_band, 0x194 + (_n) * 4) + +/* rmac: band 0(0x21400), band 1(0xa1400) */ +#define mt_wf_rmac_base(_band) ((_band) ? 0xa1400 : 0x21400) +#define mt_wf_rmac(_band, ofs) (mt_wf_rmac_base(_band) + (ofs)) + +#define mt_wf_rfcr(_band) mt_wf_rmac(_band, 0x000) +#define mt_wf_rfcr_drop_stbc_multi bit(0) +#define mt_wf_rfcr_drop_fcsfail bit(1) +#define mt_wf_rfcr_drop_version bit(3) +#define mt_wf_rfcr_drop_probereq bit(4) +#define mt_wf_rfcr_drop_mcast bit(5) +#define mt_wf_rfcr_drop_bcast bit(6) +#define mt_wf_rfcr_drop_mcast_filtered bit(7) +#define mt_wf_rfcr_drop_a3_mac bit(8) +#define mt_wf_rfcr_drop_a3_bssid bit(9) +#define mt_wf_rfcr_drop_a2_bssid bit(10) +#define mt_wf_rfcr_drop_other_beacon bit(11) +#define mt_wf_rfcr_drop_frame_report bit(12) +#define mt_wf_rfcr_drop_ctl_rsv bit(13) +#define mt_wf_rfcr_drop_cts bit(14) +#define mt_wf_rfcr_drop_rts bit(15) +#define mt_wf_rfcr_drop_duplicate bit(16) +#define mt_wf_rfcr_drop_other_bss bit(17) +#define mt_wf_rfcr_drop_other_uc bit(18) +#define mt_wf_rfcr_drop_other_tim bit(19) +#define mt_wf_rfcr_drop_ndpa bit(20) +#define mt_wf_rfcr_drop_unwanted_ctl bit(21) + +#define mt_wf_rfcr1(_band) mt_wf_rmac(_band, 0x004) +#define mt_wf_rfcr1_drop_ack bit(4) +#define mt_wf_rfcr1_drop_bf_poll bit(5) +#define mt_wf_rfcr1_drop_ba bit(6) +#define mt_wf_rfcr1_drop_cfend bit(7) +#define mt_wf_rfcr1_drop_cfack bit(8) + +#define mt_wf_rmac_mib_time0(_band) mt_wf_rmac(_band, 0x03c4) +#define mt_wf_rmac_mib_rxtime_clr bit(31) +#define mt_wf_rmac_mib_rxtime_en bit(30) + +#define mt_wf_rmac_mib_airtime14(_band) mt_wf_rmac(_band, 0x03b8) +#define mt_mib_obsstime_mask genmask(23, 0) +#define mt_wf_rmac_mib_airtime0(_band) mt_wf_rmac(_band, 0x0380) + +/* wfdma0 */ +#define mt_wfdma0_base 0xd4000 +#define mt_wfdma0(ofs) (mt_wfdma0_base + (ofs)) + +#define mt_wfdma0_rst mt_wfdma0(0x100) +#define mt_wfdma0_rst_logic_rst bit(4) +#define mt_wfdma0_rst_dmashdl_all_rst bit(5) + +#define mt_wfdma0_busy_ena mt_wfdma0(0x13c) +#define mt_wfdma0_busy_ena_tx_fifo0 bit(0) +#define mt_wfdma0_busy_ena_tx_fifo1 bit(1) +#define mt_wfdma0_busy_ena_rx_fifo bit(2) + +#define mt_mcu_cmd mt_wfdma0(0x1f0) +#define mt_mcu_cmd_stop_dma_fw_reload bit(1) +#define mt_mcu_cmd_stop_dma bit(2) +#define mt_mcu_cmd_reset_done bit(3) +#define mt_mcu_cmd_recovery_done bit(4) +#define mt_mcu_cmd_normal_state bit(5) +#define mt_mcu_cmd_error_mask genmask(5, 1) + +#define mt_wfdma0_host_int_sta mt_wfdma0(0x200) +#define host_rx_done_int_sts0 bit(0) /* rx mcu */ +#define host_rx_done_int_sts2 bit(2) /* rx data */ +#define host_rx_done_int_sts4 bit(22) /* rx mcu after fw downloaded */ +#define host_tx_done_int_sts16 bit(26) +#define host_tx_done_int_sts17 bit(27) /* mcu tx done*/ + +#define mt_wfdma0_host_int_ena mt_wfdma0(0x204) +#define host_rx_done_int_ena0 bit(0) +#define host_rx_done_int_ena1 bit(1) +#define host_rx_done_int_ena2 bit(2) +#define host_rx_done_int_ena3 bit(3) +#define host_tx_done_int_ena0 bit(4) +#define host_tx_done_int_ena1 bit(5) +#define host_tx_done_int_ena2 bit(6) +#define host_tx_done_int_ena3 bit(7) +#define host_tx_done_int_ena4 bit(8) +#define host_tx_done_int_ena5 bit(9) +#define host_tx_done_int_ena6 bit(10) +#define host_tx_done_int_ena7 bit(11) +#define host_tx_done_int_ena8 bit(12) +#define host_tx_done_int_ena9 bit(13) +#define host_tx_done_int_ena10 bit(14) +#define host_tx_done_int_ena11 bit(15) +#define host_tx_done_int_ena12 bit(16) +#define host_tx_done_int_ena13 bit(17) +#define host_tx_done_int_ena14 bit(18) +#define host_rx_coherent_en bit(20) +#define host_tx_coherent_en bit(21) +#define host_rx_done_int_ena4 bit(22) +#define host_rx_done_int_ena5 bit(23) +#define host_tx_done_int_ena16 bit(26) +#define host_tx_done_int_ena17 bit(27) +#define mcu2host_sw_int_ena bit(29) +#define host_tx_done_int_ena18 bit(30) + +/* wfdma interrupt */ +#define mt_int_rx_done_data host_rx_done_int_ena2 +#define mt_int_rx_done_wm host_rx_done_int_ena0 +#define mt_int_rx_done_wm2 host_rx_done_int_ena4 +#define mt_int_rx_done_all (mt_int_rx_done_data | \ + mt_int_rx_done_wm | \ + mt_int_rx_done_wm2) +#define mt_int_tx_done_mcu_wm host_tx_done_int_ena17 +#define mt_int_tx_done_fwdl host_tx_done_int_ena16 +#define mt_int_tx_done_band0 host_tx_done_int_ena0 +#define mt_int_mcu_cmd mcu2host_sw_int_ena + +#define mt_int_tx_done_mcu (mt_int_tx_done_mcu_wm | \ + mt_int_tx_done_fwdl) +#define mt_int_tx_done_all (mt_int_tx_done_mcu_wm | \ + mt_int_tx_done_band0 | \ + genmask(18, 4)) + +#define mt_wfdma0_glo_cfg mt_wfdma0(0x208) +#define mt_wfdma0_glo_cfg_tx_dma_en bit(0) +#define mt_wfdma0_glo_cfg_tx_dma_busy bit(1) +#define mt_wfdma0_glo_cfg_rx_dma_en bit(2) +#define mt_wfdma0_glo_cfg_rx_dma_busy bit(3) +#define mt_wfdma0_glo_cfg_tx_wb_ddone bit(6) +#define mt_wfdma0_glo_cfg_fifo_little_endian bit(12) +#define mt_wfdma0_glo_cfg_csr_disp_base_ptr_chain_en bit(15) +#define mt_wfdma0_glo_cfg_omit_rx_info_pfet2 bit(21) +#define mt_wfdma0_glo_cfg_omit_rx_info bit(27) +#define mt_wfdma0_glo_cfg_omit_tx_info bit(28) +#define mt_wfdma0_glo_cfg_clk_gat_dis bit(30) + +#define mt_wfdma0_rst_dtx_ptr mt_wfdma0(0x20c) +#define mt_wfdma0_glo_cfg_ext0 mt_wfdma0(0x2b0) +#define mt_wfdma0_csr_tx_dmashdl_enable bit(6) +#define mt_wfdma0_pri_dly_int_cfg0 mt_wfdma0(0x2f0) + +#define mt_rx_data_ring_base mt_wfdma0(0x520) + +#define mt_wfdma0_tx_ring0_ext_ctrl mt_wfdma0(0x600) +#define mt_wfdma0_tx_ring1_ext_ctrl mt_wfdma0(0x604) +#define mt_wfdma0_tx_ring2_ext_ctrl mt_wfdma0(0x608) +#define mt_wfdma0_tx_ring3_ext_ctrl mt_wfdma0(0x60c) +#define mt_wfdma0_tx_ring4_ext_ctrl mt_wfdma0(0x610) +#define mt_wfdma0_tx_ring5_ext_ctrl mt_wfdma0(0x614) +#define mt_wfdma0_tx_ring6_ext_ctrl mt_wfdma0(0x618) +#define mt_wfdma0_tx_ring16_ext_ctrl mt_wfdma0(0x640) +#define mt_wfdma0_tx_ring17_ext_ctrl mt_wfdma0(0x644) + +#define mt_wfdma0_rx_ring0_ext_ctrl mt_wfdma0(0x680) +#define mt_wfdma0_rx_ring1_ext_ctrl mt_wfdma0(0x684) +#define mt_wfdma0_rx_ring2_ext_ctrl mt_wfdma0(0x688) +#define mt_wfdma0_rx_ring3_ext_ctrl mt_wfdma0(0x68c) +#define mt_wfdma0_rx_ring4_ext_ctrl mt_wfdma0(0x690) +#define mt_wfdma0_rx_ring5_ext_ctrl mt_wfdma0(0x694) + +#define mt_tx_ring_base mt_wfdma0(0x300) +#define mt_rx_event_ring_base mt_wfdma0(0x500) + +/* wfdma csr */ +#define mt_wfdma_ext_csr_base 0xd7000 +#define mt_wfdma_ext_csr(ofs) (mt_wfdma_ext_csr_base + (ofs)) +#define mt_wfdma_ext_csr_hif_misc mt_wfdma_ext_csr(0x44) +#define mt_wfdma_ext_csr_hif_misc_busy bit(0) + +#define mt_infra_cfg_base 0xfe000 +#define mt_infra(ofs) (mt_infra_cfg_base + (ofs)) + +#define mt_hif_remap_l1 mt_infra(0x260) +#define mt_hif_remap_l1_mask genmask(15, 0) +#define mt_hif_remap_l1_offset genmask(15, 0) +#define mt_hif_remap_l1_base genmask(31, 16) +#define mt_hif_remap_base_l1 0xe0000 + +#define mt_swdef_base 0x41f200 +#define mt_swdef(ofs) (mt_swdef_base + (ofs)) +#define mt_swdef_mode mt_swdef(0x3c) +#define mt_swdef_normal_mode 0 +#define mt_swdef_icap_mode 1 +#define mt_swdef_spectrum_mode 2 + +#define mt_top_base 0x18060000 +#define mt_top(ofs) (mt_top_base + (ofs)) + +#define mt_top_lpcr_host_band0 mt_top(0x10) +#define mt_top_lpcr_host_fw_own bit(0) +#define mt_top_lpcr_host_drv_own bit(1) + +#define mt_top_misc mt_top(0xf0) +#define mt_top_misc_fw_state genmask(2, 0) + +#define mt_hw_bound 0x70010020 +#define mt_hw_chipid 0x70010200 +#define mt_hw_rev 0x70010204 + +#define mt_pcie_mac_base 0x74030000 +#define mt_pcie_mac(ofs) (mt_pcie_mac_base + (ofs)) +#define mt_pcie_mac_int_enable mt_pcie_mac(0x188) + +#define mt_dma_shdl(ofs) (0xd6000 + (ofs)) +#define mt_dmashdl_sw_control mt_dma_shdl(0x004) +#define mt_dmashdl_dmashdl_bypass bit(28) +#define mt_dmashdl_optional mt_dma_shdl(0x008) +#define mt_dmashdl_page mt_dma_shdl(0x00c) +#define mt_dmashdl_refill mt_dma_shdl(0x010) +#define mt_dmashdl_pkt_max_size mt_dma_shdl(0x01c) +#define mt_dmashdl_pkt_max_size_ple genmask(11, 0) +#define mt_dmashdl_pkt_max_size_pse genmask(27, 16) + +#define mt_dmashdl_group_quota(_n) mt_dma_shdl(0x020 + ((_n) << 2)) +#define mt_dmashdl_group_quota_min genmask(11, 0) +#define mt_dmashdl_group_quota_max genmask(27, 16) + +#define mt_dmashdl_q_map(_n) mt_dma_shdl(0x060 + ((_n) << 2)) +#define mt_dmashdl_q_map_mask genmask(3, 0) +#define mt_dmashdl_q_map_shift(_n) (4 * ((_n) % 8)) + +#define mt_dmashdl_sched_set(_n) mt_dma_shdl(0x070 + ((_n) << 2)) + +#define mt_conn_on_misc 0x7c0600f0 +#define mt_top_misc2_fw_n9_rdy genmask(1, 0) + +#endif
Networking
163f4d22c118d4eb9e275bf9ee1577c0d14b3208
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: add mcu support
mt7921 contains a microprocessor with which the host can use command/event to communicate to implement offload features such as establish connection, hardware scan and so on. the host has to download the rom patch, ram firmware and finally activate the mcu to complete the mt7921 initialization.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
4
3,462
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +static void +mt7921_mac_sta_stats_work(struct mt7921_phy *phy) +{ + struct mt7921_dev *dev = phy->dev; + struct mt7921_sta *msta; + list_head(list); + + spin_lock_bh(&dev->sta_poll_lock); + list_splice_init(&phy->stats_list, &list); + + while (!list_empty(&list)) { + msta = list_first_entry(&list, struct mt7921_sta, stats_list); + list_del_init(&msta->stats_list); + spin_unlock_bh(&dev->sta_poll_lock); + + /* query wtbl info to report tx rate for further devices */ + mt7921_get_wtbl_info(dev, msta->wcid.idx); + + spin_lock_bh(&dev->sta_poll_lock); + } + + spin_unlock_bh(&dev->sta_poll_lock); +} + + if (++phy->sta_work_count == 10) { + phy->sta_work_count = 0; + mt7921_mac_sta_stats_work(phy); + }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +// spdx-license-identifier: isc +/* copyright (c) 2020 mediatek inc. */ + +#include <linux/firmware.h> +#include <linux/fs.h> +#include "mt7921.h" +#include "mcu.h" +#include "mac.h" + +struct mt7921_patch_hdr { + char build_date[16]; + char platform[4]; + __be32 hw_sw_ver; + __be32 patch_ver; + __be16 checksum; + u16 reserved; + struct { + __be32 patch_ver; + __be32 subsys; + __be32 feature; + __be32 n_region; + __be32 crc; + u32 reserved[11]; + } desc; +} __packed; + +struct mt7921_patch_sec { + __be32 type; + __be32 offs; + __be32 size; + union { + __be32 spec[13]; + struct { + __be32 addr; + __be32 len; + __be32 sec_key_idx; + __be32 align_len; + u32 reserved[9]; + } info; + }; +} __packed; + +struct mt7921_fw_trailer { + u8 chip_id; + u8 eco_code; + u8 n_region; + u8 format_ver; + u8 format_flag; + u8 reserved[2]; + char fw_ver[10]; + char build_date[15]; + u32 crc; +} __packed; + +struct mt7921_fw_region { + __le32 decomp_crc; + __le32 decomp_len; + __le32 decomp_blk_sz; + u8 reserved[4]; + __le32 addr; + __le32 len; + u8 feature_set; + u8 reserved1[15]; +} __packed; + +#define mcu_patch_address 0x200000 + +#define mt_sta_bfer bit(0) +#define mt_sta_bfee bit(1) + +#define fw_feature_set_encrypt bit(0) +#define fw_feature_set_key_idx genmask(2, 1) +#define fw_feature_encry_mode bit(4) +#define fw_feature_override_addr bit(5) + +#define dl_mode_encrypt bit(0) +#define dl_mode_key_idx genmask(2, 1) +#define dl_mode_reset_sec_iv bit(3) +#define dl_mode_working_pda_cr4 bit(4) +#define dl_config_encry_mode_sel bit(6) +#define dl_mode_need_rsp bit(31) + +#define fw_start_override bit(0) +#define fw_start_working_pda_cr4 bit(2) + +#define patch_sec_type_mask genmask(15, 0) +#define patch_sec_type_info 0x2 + +#define to_wcid_lo(id) field_get(genmask(7, 0), (u16)id) +#define to_wcid_hi(id) field_get(genmask(9, 8), (u16)id) + +#define he_phy(p, c) u8_get_bits(c, ieee80211_he_phy_##p) +#define he_mac(m, c) u8_get_bits(c, ieee80211_he_mac_##m) + +static enum mt7921_cipher_type +mt7921_mcu_get_cipher(int cipher) +{ + switch (cipher) { + case wlan_cipher_suite_wep40: + return mt_cipher_wep40; + case wlan_cipher_suite_wep104: + return mt_cipher_wep104; + case wlan_cipher_suite_tkip: + return mt_cipher_tkip; + case wlan_cipher_suite_aes_cmac: + return mt_cipher_bip_cmac_128; + case wlan_cipher_suite_ccmp: + return mt_cipher_aes_ccmp; + case wlan_cipher_suite_ccmp_256: + return mt_cipher_ccmp_256; + case wlan_cipher_suite_gcmp: + return mt_cipher_gcmp; + case wlan_cipher_suite_gcmp_256: + return mt_cipher_gcmp_256; + case wlan_cipher_suite_sms4: + return mt_cipher_wapi; + default: + return mt_cipher_none; + } +} + +static u8 mt7921_mcu_chan_bw(struct cfg80211_chan_def *chandef) +{ + static const u8 width_to_bw[] = { + [nl80211_chan_width_40] = cmd_cbw_40mhz, + [nl80211_chan_width_80] = cmd_cbw_80mhz, + [nl80211_chan_width_80p80] = cmd_cbw_8080mhz, + [nl80211_chan_width_160] = cmd_cbw_160mhz, + [nl80211_chan_width_5] = cmd_cbw_5mhz, + [nl80211_chan_width_10] = cmd_cbw_10mhz, + [nl80211_chan_width_20] = cmd_cbw_20mhz, + [nl80211_chan_width_20_noht] = cmd_cbw_20mhz, + }; + + if (chandef->width >= array_size(width_to_bw)) + return 0; + + return width_to_bw[chandef->width]; +} + +static const struct ieee80211_sta_he_cap * +mt7921_get_he_phy_cap(struct mt7921_phy *phy, struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband; + enum nl80211_band band; + + band = phy->mt76->chandef.chan->band; + sband = phy->mt76->hw->wiphy->bands[band]; + + return ieee80211_get_he_iftype_cap(sband, vif->type); +} + +static u8 +mt7921_get_phy_mode(struct mt7921_dev *dev, struct ieee80211_vif *vif, + enum nl80211_band band, struct ieee80211_sta *sta) +{ + struct ieee80211_sta_ht_cap *ht_cap; + struct ieee80211_sta_vht_cap *vht_cap; + const struct ieee80211_sta_he_cap *he_cap; + u8 mode = 0; + + if (sta) { + ht_cap = &sta->ht_cap; + vht_cap = &sta->vht_cap; + he_cap = &sta->he_cap; + } else { + struct ieee80211_supported_band *sband; + struct mt7921_phy *phy = &dev->phy; + + sband = phy->mt76->hw->wiphy->bands[band]; + ht_cap = &sband->ht_cap; + vht_cap = &sband->vht_cap; + he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); + } + + if (band == nl80211_band_2ghz) { + mode |= phy_mode_b | phy_mode_g; + + if (ht_cap->ht_supported) + mode |= phy_mode_gn; + + if (he_cap->has_he) + mode |= phy_mode_ax_24g; + } else if (band == nl80211_band_5ghz) { + mode |= phy_mode_a; + + if (ht_cap->ht_supported) + mode |= phy_mode_an; + + if (vht_cap->vht_supported) + mode |= phy_mode_ac; + + if (he_cap->has_he) + mode |= phy_mode_ax_5g; + } + + return mode; +} + +static u8 +mt7921_get_phy_mode_v2(struct mt7921_dev *dev, struct ieee80211_vif *vif, + enum nl80211_band band, struct ieee80211_sta *sta) +{ + struct ieee80211_sta_ht_cap *ht_cap; + struct ieee80211_sta_vht_cap *vht_cap; + const struct ieee80211_sta_he_cap *he_cap; + u8 mode = 0; + + if (sta) { + ht_cap = &sta->ht_cap; + vht_cap = &sta->vht_cap; + he_cap = &sta->he_cap; + } else { + struct ieee80211_supported_band *sband; + struct mt7921_phy *phy = &dev->phy; + + sband = phy->mt76->hw->wiphy->bands[band]; + ht_cap = &sband->ht_cap; + vht_cap = &sband->vht_cap; + he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); + } + + if (band == nl80211_band_2ghz) { + mode |= phy_type_bit_hr_dsss | phy_type_bit_erp; + + if (ht_cap->ht_supported) + mode |= phy_type_bit_ht; + + if (he_cap->has_he) + mode |= phy_type_bit_he; + } else if (band == nl80211_band_5ghz) { + mode |= phy_type_bit_ofdm; + + if (ht_cap->ht_supported) + mode |= phy_type_bit_ht; + + if (vht_cap->vht_supported) + mode |= phy_type_bit_vht; + + if (he_cap->has_he) + mode |= phy_type_bit_he; + } + + return mode; +} + +static int +mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb) +{ + struct mt7921_mcu_eeprom_info *res; + u8 *buf; + + if (!skb) + return -einval; + + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + + res = (struct mt7921_mcu_eeprom_info *)skb->data; + buf = dev->eeprom.data + le32_to_cpu(res->addr); + memcpy(buf, res->data, 16); + + return 0; +} + +static int +mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, + struct sk_buff *skb, int seq) +{ + struct mt7921_mcu_rxd *rxd; + int ret = 0; + + if (!skb) { + dev_err(mdev->dev, "message %d (seq %d) timeout ", + cmd, seq); + return -etimedout; + } + + rxd = (struct mt7921_mcu_rxd *)skb->data; + if (seq != rxd->seq) + return -eagain; + + switch (cmd) { + case mcu_cmd_patch_sem_control: + skb_pull(skb, sizeof(*rxd) - 4); + ret = *skb->data; + break; + case mcu_ext_cmd_thermal_ctrl: + skb_pull(skb, sizeof(*rxd) + 4); + ret = le32_to_cpu(*(__le32 *)skb->data); + break; + case mcu_ext_cmd_efuse_access: + ret = mt7921_mcu_parse_eeprom(mdev, skb); + break; + case mcu_uni_cmd_dev_info_update: + case mcu_uni_cmd_bss_info_update: + case mcu_uni_cmd_sta_rec_update: + case mcu_uni_cmd_hif_ctrl: + case mcu_uni_cmd_offload: + case mcu_uni_cmd_suspend: { + struct mt7921_mcu_uni_event *event; + + skb_pull(skb, sizeof(*rxd)); + event = (struct mt7921_mcu_uni_event *)skb->data; + ret = le32_to_cpu(event->status); + break; + } + case mcu_cmd_reg_read: { + struct mt7921_mcu_reg_event *event; + + skb_pull(skb, sizeof(*rxd)); + event = (struct mt7921_mcu_reg_event *)skb->data; + ret = (int)le32_to_cpu(event->val); + break; + } + default: + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + break; + } + + return ret; +} + +static int +mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, int *wait_seq) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + int txd_len, mcu_cmd = cmd & mcu_cmd_mask; + enum mt76_mcuq_id txq = mt_mcuq_wm; + struct mt7921_uni_txd *uni_txd; + struct mt7921_mcu_txd *mcu_txd; + __le32 *txd; + u32 val; + u8 seq; + + /* todo: make dynamic based on msg type */ + mdev->mcu.timeout = 20 * hz; + + seq = ++dev->mt76.mcu.msg_seq & 0xf; + if (!seq) + seq = ++dev->mt76.mcu.msg_seq & 0xf; + + if (cmd == mcu_cmd_fw_scatter) { + txq = mt_mcuq_fwdl; + goto exit; + } + + txd_len = cmd & mcu_uni_prefix ? sizeof(*uni_txd) : sizeof(*mcu_txd); + txd = (__le32 *)skb_push(skb, txd_len); + + val = field_prep(mt_txd0_tx_bytes, skb->len) | + field_prep(mt_txd0_pkt_fmt, mt_tx_type_cmd) | + field_prep(mt_txd0_q_idx, mt_tx_mcu_port_rx_q0); + txd[0] = cpu_to_le32(val); + + val = mt_txd1_long_format | + field_prep(mt_txd1_hdr_format, mt_hdr_format_cmd); + txd[1] = cpu_to_le32(val); + + if (cmd & mcu_uni_prefix) { + uni_txd = (struct mt7921_uni_txd *)txd; + uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd)); + uni_txd->option = mcu_cmd_uni_ext_ack; + uni_txd->cid = cpu_to_le16(mcu_cmd); + uni_txd->s2d_index = mcu_s2d_h2n; + uni_txd->pkt_type = mcu_pkt_id; + uni_txd->seq = seq; + + goto exit; + } + + mcu_txd = (struct mt7921_mcu_txd *)txd; + mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd)); + mcu_txd->pq_id = cpu_to_le16(mcu_pq_id(mt_tx_port_idx_mcu, + mt_tx_mcu_port_rx_q0)); + mcu_txd->pkt_type = mcu_pkt_id; + mcu_txd->seq = seq; + + switch (cmd & ~mcu_cmd_mask) { + case mcu_fw_prefix: + mcu_txd->set_query = mcu_q_na; + mcu_txd->cid = mcu_cmd; + break; + case mcu_ce_prefix: + if (cmd & mcu_query_mask) + mcu_txd->set_query = mcu_q_query; + else + mcu_txd->set_query = mcu_q_set; + mcu_txd->cid = mcu_cmd; + break; + default: + mcu_txd->cid = mcu_cmd_ext_cid; + if (cmd & mcu_query_prefix || cmd == mcu_ext_cmd_efuse_access) + mcu_txd->set_query = mcu_q_query; + else + mcu_txd->set_query = mcu_q_set; + mcu_txd->ext_cid = mcu_cmd; + mcu_txd->ext_cid_ack = 1; + break; + } + + mcu_txd->s2d_index = mcu_s2d_h2n; + warn_on(cmd == mcu_ext_cmd_efuse_access && + mcu_txd->set_query != mcu_q_query); + +exit: + if (wait_seq) + *wait_seq = seq; + + return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); +} + +static void +mt7921_mcu_tx_rate_parse(struct mt76_phy *mphy, + struct mt7921_mcu_peer_cap *peer, + struct rate_info *rate, u16 r) +{ + struct ieee80211_supported_band *sband; + u16 flags = 0; + u8 txmode = field_get(mt_wtbl_rate_tx_mode, r); + u8 gi = 0; + u8 bw = 0; + + rate->mcs = field_get(mt_wtbl_rate_mcs, r); + rate->nss = field_get(mt_wtbl_rate_nss, r) + 1; + + switch (peer->bw) { + case ieee80211_sta_rx_bw_160: + gi = peer->g16; + break; + case ieee80211_sta_rx_bw_80: + gi = peer->g8; + break; + case ieee80211_sta_rx_bw_40: + gi = peer->g4; + break; + default: + gi = peer->g2; + break; + } + + gi = txmode >= mt_phy_type_he_su ? + field_get(mt_wtbl_rate_he_gi, gi) : + field_get(mt_wtbl_rate_gi, gi); + + switch (txmode) { + case mt_phy_type_cck: + case mt_phy_type_ofdm: + if (mphy->chandef.chan->band == nl80211_band_5ghz) + sband = &mphy->sband_5g.sband; + else + sband = &mphy->sband_2g.sband; + + rate->legacy = sband->bitrates[rate->mcs].bitrate; + break; + case mt_phy_type_ht: + case mt_phy_type_ht_gf: + flags |= rate_info_flags_mcs; + + if (gi) + flags |= rate_info_flags_short_gi; + break; + case mt_phy_type_vht: + flags |= rate_info_flags_vht_mcs; + + if (gi) + flags |= rate_info_flags_short_gi; + break; + case mt_phy_type_he_su: + case mt_phy_type_he_ext_su: + case mt_phy_type_he_tb: + case mt_phy_type_he_mu: + rate->he_gi = gi; + rate->he_dcm = field_get(mt_ra_rate_dcm_en, r); + + flags |= rate_info_flags_he_mcs; + break; + default: + break; + } + rate->flags = flags; + + bw = mt7921_mcu_chan_bw(&mphy->chandef) - field_get(mt_ra_rate_bw, r); + + switch (bw) { + case ieee80211_sta_rx_bw_160: + rate->bw = rate_info_bw_160; + break; + case ieee80211_sta_rx_bw_80: + rate->bw = rate_info_bw_80; + break; + case ieee80211_sta_rx_bw_40: + rate->bw = rate_info_bw_40; + break; + default: + rate->bw = rate_info_bw_20; + break; + } +} + +static void +mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, + u16 wlan_idx) +{ + struct mt7921_mcu_wlan_info_event *wtbl_info = + (struct mt7921_mcu_wlan_info_event *)(skb->data); + struct rate_info rate = {}; + u8 curr_idx = wtbl_info->rate_info.rate_idx; + u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]); + struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap; + struct mt76_phy *mphy = &dev->mphy; + struct mt7921_sta_stats *stats; + struct mt7921_sta *msta; + struct mt76_wcid *wcid; + + if (wlan_idx >= mt76_n_wcids) + return; + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + if (!wcid) { + stats->tx_rate = rate; + return; + } + + msta = container_of(wcid, struct mt7921_sta, wcid); + stats = &msta->stats; + + /* current rate */ + mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr); + stats->tx_rate = rate; +} + +static void +mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv; + + spin_lock_bh(&dev->mt76.lock); + __skb_queue_tail(&phy->scan_event_list, skb); + spin_unlock_bh(&dev->mt76.lock); + + ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, + mt7921_hw_scan_timeout); +} + +static void +mt7921_mcu_bss_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + struct mt7921_mcu_bss_event *event; + + event = (struct mt7921_mcu_bss_event *)(skb->data + + sizeof(struct mt7921_mcu_rxd)); + if (event->is_absent) + ieee80211_stop_queues(mphy->hw); + else + ieee80211_wake_queues(mphy->hw); +} + +static void +mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; + struct debug_msg { + __le16 id; + u8 type; + u8 flag; + __le32 value; + __le16 len; + u8 content[512]; + } __packed * debug_msg; + u16 cur_len; + int i; + + skb_pull(skb, sizeof(*rxd)); + debug_msg = (struct debug_msg *)skb->data; + + cur_len = min_t(u16, le16_to_cpu(debug_msg->len), 512); + + if (debug_msg->type == 0x3) { + for (i = 0 ; i < cur_len; i++) + if (!debug_msg->content[i]) + debug_msg->content[i] = ' '; + + dev_dbg(dev->mt76.dev, "%s", debug_msg->content); + } +} + +static void +mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; + + switch (rxd->eid) { + case mcu_event_bss_beacon_loss: + break; + case mcu_event_sched_scan_done: + case mcu_event_scan_done: + mt7921_mcu_scan_event(dev, skb); + return; + case mcu_event_bss_absence: + mt7921_mcu_bss_event(dev, skb); + break; + case mcu_event_dbg_msg: + mt7921_mcu_debug_msg_event(dev, skb); + break; + default: + break; + } + dev_kfree_skb(skb); +} + +void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data; + + if (rxd->eid == 0x6) { + mt76_mcu_rx_event(&dev->mt76, skb); + return; + } + + if (rxd->ext_eid == mcu_ext_event_rate_report || + rxd->eid == mcu_event_bss_beacon_loss || + rxd->eid == mcu_event_sched_scan_done || + rxd->eid == mcu_event_bss_absence || + rxd->eid == mcu_event_scan_done || + rxd->eid == mcu_event_dbg_msg || + !rxd->seq) + mt7921_mcu_rx_unsolicited_event(dev, skb); + else + mt76_mcu_rx_event(&dev->mt76, skb); +} + +static struct sk_buff * +mt7921_mcu_alloc_sta_req(struct mt7921_dev *dev, struct mt7921_vif *mvif, + struct mt7921_sta *msta, int len) +{ + struct sta_req_hdr hdr = { + .bss_idx = mvif->mt76.idx, + .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0, + .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0, + .muar_idx = msta ? mvif->mt76.omac_idx : 0, + .is_tlv_append = 1, + }; + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(&dev->mt76, null, len); + if (!skb) + return err_ptr(-enomem); + + skb_put_data(skb, &hdr, sizeof(hdr)); + + return skb; +} + +static struct wtbl_req_hdr * +mt7921_mcu_alloc_wtbl_req(struct mt7921_dev *dev, struct mt7921_sta *msta, + int cmd, void *sta_wtbl, struct sk_buff **skb) +{ + struct tlv *sta_hdr = sta_wtbl; + struct wtbl_req_hdr hdr = { + .wlan_idx_lo = to_wcid_lo(msta->wcid.idx), + .wlan_idx_hi = to_wcid_hi(msta->wcid.idx), + .operation = cmd, + }; + struct sk_buff *nskb = *skb; + + if (!nskb) { + nskb = mt76_mcu_msg_alloc(&dev->mt76, null, + mt7921_wtbl_update_ba_size); + if (!nskb) + return err_ptr(-enomem); + + *skb = nskb; + } + + if (sta_hdr) + sta_hdr->len = cpu_to_le16(sizeof(hdr)); + + return skb_put_data(nskb, &hdr, sizeof(hdr)); +} + +static struct tlv * +mt7921_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len, + void *sta_ntlv, void *sta_wtbl) +{ + struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv; + struct tlv *sta_hdr = sta_wtbl; + struct tlv *ptlv, tlv = { + .tag = cpu_to_le16(tag), + .len = cpu_to_le16(len), + }; + u16 ntlv; + + ptlv = skb_put(skb, len); + memcpy(ptlv, &tlv, sizeof(tlv)); + + ntlv = le16_to_cpu(ntlv_hdr->tlv_num); + ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1); + + if (sta_hdr) { + u16 size = le16_to_cpu(sta_hdr->len); + + sta_hdr->len = cpu_to_le16(size + len); + } + + return ptlv; +} + +static struct tlv * +mt7921_mcu_add_tlv(struct sk_buff *skb, int tag, int len) +{ + return mt7921_mcu_add_nested_tlv(skb, tag, len, skb->data, null); +} + +static void +mt7921_mcu_uni_bss_he_tlv(struct tlv *tlv, struct ieee80211_vif *vif, + struct mt7921_phy *phy) +{ +#define default_he_pe_duration 4 +#define default_he_duration_rts_thres 1023 + const struct ieee80211_sta_he_cap *cap; + struct bss_info_uni_he *he; + + cap = mt7921_get_he_phy_cap(phy, vif); + + he = (struct bss_info_uni_he *)tlv; + he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; + if (!he->he_pe_duration) + he->he_pe_duration = default_he_pe_duration; + + he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); + if (!he->he_rts_thres) + he->he_rts_thres = cpu_to_le16(default_he_duration_rts_thres); + + he->max_nss_mcs[cmd_he_mcs_bw80] = cap->he_mcs_nss_supp.tx_mcs_80; + he->max_nss_mcs[cmd_he_mcs_bw160] = cap->he_mcs_nss_supp.tx_mcs_160; + he->max_nss_mcs[cmd_he_mcs_bw8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; +} + +/** starec & wtbl **/ +static int +mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb, + struct ieee80211_key_conf *key, enum set_key_cmd cmd) +{ + struct mt7921_sta_key_conf *bip = &msta->bip; + struct sta_rec_sec *sec; + struct tlv *tlv; + u32 len = sizeof(*sec); + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_key_v2, sizeof(*sec)); + + sec = (struct sta_rec_sec *)tlv; + sec->add = cmd; + + if (cmd == set_key) { + struct sec_key *sec_key; + u8 cipher; + + cipher = mt7921_mcu_get_cipher(key->cipher); + if (cipher == mt_cipher_none) + return -eopnotsupp; + + sec_key = &sec->key[0]; + sec_key->cipher_len = sizeof(*sec_key); + + if (cipher == mt_cipher_bip_cmac_128) { + sec_key->cipher_id = mt_cipher_aes_ccmp; + sec_key->key_id = bip->keyidx; + sec_key->key_len = 16; + memcpy(sec_key->key, bip->key, 16); + + sec_key = &sec->key[1]; + sec_key->cipher_id = mt_cipher_bip_cmac_128; + sec_key->cipher_len = sizeof(*sec_key); + sec_key->key_len = 16; + memcpy(sec_key->key, key->key, 16); + + sec->n_cipher = 2; + } else { + sec_key->cipher_id = cipher; + sec_key->key_id = key->keyidx; + sec_key->key_len = key->keylen; + memcpy(sec_key->key, key->key, key->keylen); + + if (cipher == mt_cipher_tkip) { + /* rx/tx mic keys are swapped */ + memcpy(sec_key->key + 16, key->key + 24, 8); + memcpy(sec_key->key + 24, key->key + 16, 8); + } + + /* store key_conf for bip batch update */ + if (cipher == mt_cipher_aes_ccmp) { + memcpy(bip->key, key->key, key->keylen); + bip->keyidx = key->keyidx; + } + + len -= sizeof(*sec_key); + sec->n_cipher = 1; + } + } else { + len -= sizeof(sec->key); + sec->n_cipher = 0; + } + sec->len = cpu_to_le16(len); + + return 0; +} + +int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif, + struct mt7921_sta *msta, struct ieee80211_key_conf *key, + enum set_key_cmd cmd) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct sk_buff *skb; + int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec); + int ret; + + skb = mt7921_mcu_alloc_sta_req(dev, mvif, msta, len); + if (is_err(skb)) + return ptr_err(skb); + + ret = mt7921_mcu_sta_key_tlv(msta, skb, key, cmd); + if (ret) + return ret; + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + mcu_uni_cmd_sta_rec_update, true); +} + +static void +mt7921_mcu_sta_ba_tlv(struct sk_buff *skb, + struct ieee80211_ampdu_params *params, + bool enable, bool tx) +{ + struct sta_rec_ba *ba; + struct tlv *tlv; + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_ba, sizeof(*ba)); + + ba = (struct sta_rec_ba *)tlv; + ba->ba_type = tx ? mt_ba_type_originator : mt_ba_type_recipient, + ba->winsize = cpu_to_le16(params->buf_size); + ba->ssn = cpu_to_le16(params->ssn); + ba->ba_en = enable << params->tid; + ba->amsdu = params->amsdu; + ba->tid = params->tid; +} + +static void +mt7921_mcu_wtbl_ba_tlv(struct sk_buff *skb, + struct ieee80211_ampdu_params *params, + bool enable, bool tx, void *sta_wtbl, + void *wtbl_tlv) +{ + struct wtbl_ba *ba; + struct tlv *tlv; + + tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_ba, sizeof(*ba), + wtbl_tlv, sta_wtbl); + + ba = (struct wtbl_ba *)tlv; + ba->tid = params->tid; + + if (tx) { + ba->ba_type = mt_ba_type_originator; + ba->sn = enable ? cpu_to_le16(params->ssn) : 0; + ba->ba_en = enable; + } else { + memcpy(ba->peer_addr, params->sta->addr, eth_alen); + ba->ba_type = mt_ba_type_recipient; + ba->rst_ba_tid = params->tid; + ba->rst_ba_sel = rst_ba_mac_tid_match; + ba->rst_ba_sb = 1; + } + + if (enable && tx) + ba->ba_winsize = cpu_to_le16(params->buf_size); +} + +static int +mt7921_mcu_sta_ba(struct mt7921_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable, bool tx, int cmd) +{ + struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv; + struct mt7921_vif *mvif = msta->vif; + struct wtbl_req_hdr *wtbl_hdr; + struct tlv *sta_wtbl; + struct sk_buff *skb; + int ret; + + if (enable && tx && !params->amsdu) + msta->wcid.amsdu = false; + + skb = mt7921_mcu_alloc_sta_req(dev, mvif, msta, + mt7921_sta_update_max_size); + if (is_err(skb)) + return ptr_err(skb); + + sta_wtbl = mt7921_mcu_add_tlv(skb, sta_rec_wtbl, sizeof(struct tlv)); + + wtbl_hdr = mt7921_mcu_alloc_wtbl_req(dev, msta, wtbl_set, sta_wtbl, + &skb); + mt7921_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); + + ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); + if (ret) + return ret; + + skb = mt7921_mcu_alloc_sta_req(dev, mvif, msta, + mt7921_sta_update_max_size); + if (is_err(skb)) + return ptr_err(skb); + + mt7921_mcu_sta_ba_tlv(skb, params, enable, tx); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); +} + +int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + return mt7921_mcu_sta_ba(dev, params, enable, true, mcu_uni_cmd_sta_rec_update); +} + +int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) +{ + return mt7921_mcu_sta_ba(dev, params, enable, false, mcu_uni_cmd_sta_rec_update); +} + +static void +mt7921_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, void *sta_wtbl, + void *wtbl_tlv) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct wtbl_generic *generic; + struct wtbl_rx *rx; + struct tlv *tlv; + + tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_generic, sizeof(*generic), + wtbl_tlv, sta_wtbl); + + generic = (struct wtbl_generic *)tlv; + + if (sta) { + if (vif->type == nl80211_iftype_station) + generic->partial_aid = cpu_to_le16(vif->bss_conf.aid); + else + generic->partial_aid = cpu_to_le16(sta->aid); + memcpy(generic->peer_addr, sta->addr, eth_alen); + generic->muar_idx = mvif->mt76.omac_idx; + generic->qos = sta->wme; + } else { + /* use bssid in station mode */ + if (vif->type == nl80211_iftype_station) + memcpy(generic->peer_addr, vif->bss_conf.bssid, + eth_alen); + else + eth_broadcast_addr(generic->peer_addr); + + generic->muar_idx = 0xe; + } + + tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_rx, sizeof(*rx), + wtbl_tlv, sta_wtbl); + + rx = (struct wtbl_rx *)tlv; + rx->rca1 = sta ? vif->type != nl80211_iftype_ap : 1; + rx->rca2 = 1; + rx->rv = 1; +} + +static void +mt7921_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ +#define extra_info_ver bit(0) +#define extra_info_new bit(1) + struct sta_rec_basic *basic; + struct tlv *tlv; + int conn_type; + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_basic, sizeof(*basic)); + + basic = (struct sta_rec_basic *)tlv; + basic->extra_info = cpu_to_le16(extra_info_ver); + + if (enable) { + basic->extra_info |= cpu_to_le16(extra_info_new); + basic->conn_state = conn_state_port_secure; + } else { + basic->conn_state = conn_state_disconnect; + } + + if (!sta) { + basic->conn_type = cpu_to_le32(connection_infra_bc); + eth_broadcast_addr(basic->peer_addr); + return; + } + + switch (vif->type) { + case nl80211_iftype_mesh_point: + case nl80211_iftype_ap: + if (vif->p2p) + conn_type = connection_p2p_gc; + else + conn_type = connection_infra_sta; + basic->conn_type = cpu_to_le32(conn_type); + basic->aid = cpu_to_le16(sta->aid); + break; + case nl80211_iftype_station: + if (vif->p2p) + conn_type = connection_p2p_go; + else + conn_type = connection_infra_ap; + basic->conn_type = cpu_to_le32(conn_type); + basic->aid = cpu_to_le16(vif->bss_conf.aid); + break; + case nl80211_iftype_adhoc: + basic->conn_type = cpu_to_le32(connection_ibss_adhoc); + basic->aid = cpu_to_le16(sta->aid); + break; + default: + warn_on(1); + break; + } + + memcpy(basic->peer_addr, sta->addr, eth_alen); + basic->qos = sta->wme; +} + +static void +mt7921_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; + struct sta_rec_he *he; + struct tlv *tlv; + u32 cap = 0; + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_he, sizeof(*he)); + + he = (struct sta_rec_he *)tlv; + + if (elem->mac_cap_info[0] & ieee80211_he_mac_cap0_htc_he) + cap |= sta_rec_he_cap_htc; + + if (elem->mac_cap_info[2] & ieee80211_he_mac_cap2_bsr) + cap |= sta_rec_he_cap_bsr; + + if (elem->mac_cap_info[3] & ieee80211_he_mac_cap3_omi_control) + cap |= sta_rec_he_cap_om; + + if (elem->mac_cap_info[4] & ieee80211_he_mac_cap4_amdsu_in_ampdu) + cap |= sta_rec_he_cap_amsdu_in_ampdu; + + if (elem->mac_cap_info[4] & ieee80211_he_mac_cap4_bqr) + cap |= sta_rec_he_cap_bqr; + + if (elem->phy_cap_info[0] & + (ieee80211_he_phy_cap0_channel_width_set_ru_mapping_in_2g | + ieee80211_he_phy_cap0_channel_width_set_ru_mapping_in_5g)) + cap |= sta_rec_he_cap_bw20_ru242_support; + + if (elem->phy_cap_info[1] & + ieee80211_he_phy_cap1_ldpc_coding_in_payload) + cap |= sta_rec_he_cap_ldpc; + + if (elem->phy_cap_info[1] & + ieee80211_he_phy_cap1_he_ltf_and_gi_for_he_ppdus_0_8us) + cap |= sta_rec_he_cap_su_ppdu_1ltf_8us_gi; + + if (elem->phy_cap_info[2] & + ieee80211_he_phy_cap2_ndp_4x_ltf_and_3_2us) + cap |= sta_rec_he_cap_ndp_4ltf_3dot2ms_gi; + + if (elem->phy_cap_info[2] & + ieee80211_he_phy_cap2_stbc_tx_under_80mhz) + cap |= sta_rec_he_cap_le_eq_80m_tx_stbc; + + if (elem->phy_cap_info[2] & + ieee80211_he_phy_cap2_stbc_rx_under_80mhz) + cap |= sta_rec_he_cap_le_eq_80m_rx_stbc; + + if (elem->phy_cap_info[6] & + ieee80211_he_phy_cap6_partial_bw_ext_range) + cap |= sta_rec_he_cap_partial_bw_ext_range; + + if (elem->phy_cap_info[7] & + ieee80211_he_phy_cap7_he_su_mu_ppdu_4xltf_and_08_us_gi) + cap |= sta_rec_he_cap_su_mu_ppdu_4ltf_8us_gi; + + if (elem->phy_cap_info[7] & + ieee80211_he_phy_cap7_stbc_tx_above_80mhz) + cap |= sta_rec_he_cap_gt_80m_tx_stbc; + + if (elem->phy_cap_info[7] & + ieee80211_he_phy_cap7_stbc_rx_above_80mhz) + cap |= sta_rec_he_cap_gt_80m_rx_stbc; + + if (elem->phy_cap_info[8] & + ieee80211_he_phy_cap8_he_er_su_ppdu_4xltf_and_08_us_gi) + cap |= sta_rec_he_cap_er_su_ppdu_4ltf_8us_gi; + + if (elem->phy_cap_info[8] & + ieee80211_he_phy_cap8_he_er_su_1xltf_and_08_us_gi) + cap |= sta_rec_he_cap_er_su_ppdu_1ltf_8us_gi; + + if (elem->phy_cap_info[9] & + ieee80211_he_phy_cap9_non_triggered_cqi_feedback) + cap |= sta_rec_he_cap_trig_cqi_fk; + + if (elem->phy_cap_info[9] & + ieee80211_he_phy_cap9_tx_1024_qam_less_than_242_tone_ru) + cap |= sta_rec_he_cap_tx_1024qam_under_ru242; + + if (elem->phy_cap_info[9] & + ieee80211_he_phy_cap9_rx_1024_qam_less_than_242_tone_ru) + cap |= sta_rec_he_cap_rx_1024qam_under_ru242; + + he->he_cap = cpu_to_le32(cap); + + switch (sta->bandwidth) { + case ieee80211_sta_rx_bw_160: + if (elem->phy_cap_info[0] & + ieee80211_he_phy_cap0_channel_width_set_80plus80_mhz_in_5g) + he->max_nss_mcs[cmd_he_mcs_bw8080] = + he_cap->he_mcs_nss_supp.rx_mcs_80p80; + + he->max_nss_mcs[cmd_he_mcs_bw160] = + he_cap->he_mcs_nss_supp.rx_mcs_160; + fallthrough; + default: + he->max_nss_mcs[cmd_he_mcs_bw80] = + he_cap->he_mcs_nss_supp.rx_mcs_80; + break; + } + + he->t_frame_dur = + he_mac(cap1_tf_mac_pad_dur_mask, elem->mac_cap_info[1]); + he->max_ampdu_exp = + he_mac(cap3_max_ampdu_len_exp_mask, elem->mac_cap_info[3]); + + he->bw_set = + he_phy(cap0_channel_width_set_mask, elem->phy_cap_info[0]); + he->device_class = + he_phy(cap1_device_class_a, elem->phy_cap_info[1]); + he->punc_pream_rx = + he_phy(cap1_preamble_punc_rx_mask, elem->phy_cap_info[1]); + + he->dcm_tx_mode = + he_phy(cap3_dcm_max_const_tx_mask, elem->phy_cap_info[3]); + he->dcm_tx_max_nss = + he_phy(cap3_dcm_max_tx_nss_2, elem->phy_cap_info[3]); + he->dcm_rx_mode = + he_phy(cap3_dcm_max_const_rx_mask, elem->phy_cap_info[3]); + he->dcm_rx_max_nss = + he_phy(cap3_dcm_max_rx_nss_2, elem->phy_cap_info[3]); + he->dcm_rx_max_nss = + he_phy(cap8_dcm_max_ru_mask, elem->phy_cap_info[8]); + + he->pkt_ext = 2; +} + +static void +mt7921_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + struct ieee80211_vif *vif) +{ + struct sta_rec_uapsd *uapsd; + struct tlv *tlv; + + if (vif->type != nl80211_iftype_ap || !sta->wme) + return; + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_apps, sizeof(*uapsd)); + uapsd = (struct sta_rec_uapsd *)tlv; + + if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_vo) { + uapsd->dac_map |= bit(3); + uapsd->tac_map |= bit(3); + } + if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_vi) { + uapsd->dac_map |= bit(2); + uapsd->tac_map |= bit(2); + } + if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_be) { + uapsd->dac_map |= bit(1); + uapsd->tac_map |= bit(1); + } + if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_bk) { + uapsd->dac_map |= bit(0); + uapsd->tac_map |= bit(0); + } + uapsd->max_sp = sta->max_sp; +} + +static void +mt7921_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct sta_rec_amsdu *amsdu; + struct tlv *tlv; + + if (!sta->max_amsdu_len) + return; + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_hw_amsdu, sizeof(*amsdu)); + amsdu = (struct sta_rec_amsdu *)tlv; + amsdu->max_amsdu_num = 8; + amsdu->amsdu_en = true; + amsdu->max_mpdu_size = sta->max_amsdu_len >= + ieee80211_max_mpdu_len_vht_7991; + msta->wcid.amsdu = true; +} + +static bool +mt7921_hw_amsdu_supported(struct ieee80211_vif *vif) +{ + switch (vif->type) { + case nl80211_iftype_ap: + case nl80211_iftype_station: + return true; + default: + return false; + } +} + +static void +mt7921_mcu_sta_tlv(struct mt7921_dev *dev, struct sk_buff *skb, + struct ieee80211_sta *sta, struct ieee80211_vif *vif) +{ + struct tlv *tlv; + struct sta_rec_state *state; + struct sta_rec_phy *phy; + struct sta_rec_ra_info *ra_info; + struct cfg80211_chan_def *chandef = &dev->mphy.chandef; + enum nl80211_band band = chandef->chan->band; + + /* starec ht */ + if (sta->ht_cap.ht_supported) { + struct sta_rec_ht *ht; + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_ht, sizeof(*ht)); + ht = (struct sta_rec_ht *)tlv; + ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); + + if (mt7921_hw_amsdu_supported(vif)) + mt7921_mcu_sta_amsdu_tlv(skb, sta); + } + + /* starec vht */ + if (sta->vht_cap.vht_supported) { + struct sta_rec_vht *vht; + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_vht, sizeof(*vht)); + vht = (struct sta_rec_vht *)tlv; + vht->vht_cap = cpu_to_le32(sta->vht_cap.cap); + vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map; + } + + /* starec he */ + if (sta->he_cap.has_he) + mt7921_mcu_sta_he_tlv(skb, sta); + + /* starec uapsd */ + mt7921_mcu_sta_uapsd_tlv(skb, sta, vif); + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_phy, sizeof(*phy)); + phy = (struct sta_rec_phy *)tlv; + phy->phy_type = mt7921_get_phy_mode_v2(dev, vif, band, sta); + phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_ra, sizeof(*ra_info)); + ra_info = (struct sta_rec_ra_info *)tlv; + ra_info->legacy = cpu_to_le16((u16)sta->supp_rates[band]); + + if (sta->ht_cap.ht_supported) { + memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask, + ht_mcs_mask_num); + } + + tlv = mt7921_mcu_add_tlv(skb, sta_rec_state, sizeof(*state)); + state = (struct sta_rec_state *)tlv; + state->state = 2; + + if (sta->vht_cap.vht_supported) { + state->vht_opmode = sta->bandwidth; + state->vht_opmode |= (sta->rx_nss - 1) << + ieee80211_opmode_notif_rx_nss_shift; + } +} + +static void +mt7921_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + void *sta_wtbl, void *wtbl_tlv) +{ + struct wtbl_smps *smps; + struct tlv *tlv; + + tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_smps, sizeof(*smps), + wtbl_tlv, sta_wtbl); + smps = (struct wtbl_smps *)tlv; + + if (sta->smps_mode == ieee80211_smps_dynamic) + smps->smps = true; +} + +static void +mt7921_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + void *sta_wtbl, void *wtbl_tlv) +{ + struct wtbl_ht *ht = null; + struct tlv *tlv; + + /* wtbl ht */ + if (sta->ht_cap.ht_supported) { + tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_ht, sizeof(*ht), + wtbl_tlv, sta_wtbl); + ht = (struct wtbl_ht *)tlv; + ht->ldpc = !!(sta->ht_cap.cap & ieee80211_ht_cap_ldpc_coding); + ht->af = sta->ht_cap.ampdu_factor; + ht->mm = sta->ht_cap.ampdu_density; + ht->ht = true; + } + + /* wtbl vht */ + if (sta->vht_cap.vht_supported) { + struct wtbl_vht *vht; + u8 af; + + tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_vht, sizeof(*vht), + wtbl_tlv, sta_wtbl); + vht = (struct wtbl_vht *)tlv; + vht->ldpc = !!(sta->vht_cap.cap & ieee80211_vht_cap_rxldpc); + vht->vht = true; + + af = field_get(ieee80211_vht_cap_max_a_mpdu_length_exponent_mask, + sta->vht_cap.cap); + if (ht) + ht->af = max_t(u8, ht->af, af); + } + + mt7921_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv); +} + +static int mt7921_mcu_start_firmware(struct mt7921_dev *dev, u32 addr, + u32 option) +{ + struct { + __le32 option; + __le32 addr; + } req = { + .option = cpu_to_le32(option), + .addr = cpu_to_le32(addr), + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_fw_start_req, &req, + sizeof(req), true); +} + +static int mt7921_mcu_restart(struct mt76_dev *dev) +{ + struct { + u8 power_mode; + u8 rsv[3]; + } req = { + .power_mode = 1, + }; + + return mt76_mcu_send_msg(dev, mcu_cmd_nic_power_ctrl, &req, + sizeof(req), false); +} + +static int mt7921_mcu_patch_sem_ctrl(struct mt7921_dev *dev, bool get) +{ + struct { + __le32 op; + } req = { + .op = cpu_to_le32(get ? patch_sem_get : patch_sem_release), + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_patch_sem_control, &req, + sizeof(req), true); +} + +static int mt7921_mcu_start_patch(struct mt7921_dev *dev) +{ + struct { + u8 check_crc; + u8 reserved[3]; + } req = { + .check_crc = 0, + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_patch_finish_req, &req, + sizeof(req), true); +} + +static int mt7921_driver_own(struct mt7921_dev *dev) +{ + u32 reg = mt7921_reg_map_l1(dev, mt_top_lpcr_host_band0); + + mt76_wr(dev, reg, mt_top_lpcr_host_drv_own); + if (!mt76_poll_msec(dev, reg, mt_top_lpcr_host_fw_own, + 0, 500)) { + dev_err(dev->mt76.dev, "timeout for driver own "); + return -eio; + } + + return 0; +} + +static int mt7921_mcu_init_download(struct mt7921_dev *dev, u32 addr, + u32 len, u32 mode) +{ + struct { + __le32 addr; + __le32 len; + __le32 mode; + } req = { + .addr = cpu_to_le32(addr), + .len = cpu_to_le32(len), + .mode = cpu_to_le32(mode), + }; + int attr; + + if (req.addr == cpu_to_le32(mcu_patch_address) || addr == 0x900000) + attr = mcu_cmd_patch_start_req; + else + attr = mcu_cmd_target_address_len_req; + + return mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true); +} + +static int mt7921_load_patch(struct mt7921_dev *dev) +{ + const struct mt7921_patch_hdr *hdr; + const struct firmware *fw = null; + int i, ret, sem; + + sem = mt7921_mcu_patch_sem_ctrl(dev, 1); + switch (sem) { + case patch_is_dl: + return 0; + case patch_not_dl_sem_success: + break; + default: + dev_err(dev->mt76.dev, "failed to get patch semaphore "); + return -eagain; + } + + ret = request_firmware(&fw, mt7921_rom_patch, dev->mt76.dev); + if (ret) + goto out; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev->mt76.dev, "invalid firmware "); + ret = -einval; + goto out; + } + + hdr = (const struct mt7921_patch_hdr *)(fw->data); + + dev_info(dev->mt76.dev, "hw/sw version: 0x%x, build time: %.16s ", + be32_to_cpu(hdr->hw_sw_ver), hdr->build_date); + + for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) { + struct mt7921_patch_sec *sec; + const u8 *dl; + u32 len, addr; + + sec = (struct mt7921_patch_sec *)(fw->data + sizeof(*hdr) + + i * sizeof(*sec)); + if ((be32_to_cpu(sec->type) & patch_sec_type_mask) != + patch_sec_type_info) { + ret = -einval; + goto out; + } + + addr = be32_to_cpu(sec->info.addr); + len = be32_to_cpu(sec->info.len); + dl = fw->data + be32_to_cpu(sec->offs); + + ret = mt7921_mcu_init_download(dev, addr, len, + dl_mode_need_rsp); + if (ret) { + dev_err(dev->mt76.dev, "download request failed "); + goto out; + } + + ret = mt76_mcu_send_firmware(&dev->mt76, mcu_cmd_fw_scatter, + dl, len); + if (ret) { + dev_err(dev->mt76.dev, "failed to send patch "); + goto out; + } + } + + ret = mt7921_mcu_start_patch(dev); + if (ret) + dev_err(dev->mt76.dev, "failed to start patch "); + +out: + sem = mt7921_mcu_patch_sem_ctrl(dev, 0); + switch (sem) { + case patch_rel_sem_success: + break; + default: + ret = -eagain; + dev_err(dev->mt76.dev, "failed to release patch semaphore "); + goto out; + } + release_firmware(fw); + + return ret; +} + +static u32 mt7921_mcu_gen_dl_mode(u8 feature_set, bool is_wa) +{ + u32 ret = 0; + + ret |= (feature_set & fw_feature_set_encrypt) ? + (dl_mode_encrypt | dl_mode_reset_sec_iv) : 0; + ret |= (feature_set & fw_feature_encry_mode) ? + dl_config_encry_mode_sel : 0; + ret |= field_prep(dl_mode_key_idx, + field_get(fw_feature_set_key_idx, feature_set)); + ret |= dl_mode_need_rsp; + ret |= is_wa ? dl_mode_working_pda_cr4 : 0; + + return ret; +} + +static int +mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev, + const struct mt7921_fw_trailer *hdr, + const u8 *data, bool is_wa) +{ + int i, offset = 0; + u32 override = 0, option = 0; + + for (i = 0; i < hdr->n_region; i++) { + const struct mt7921_fw_region *region; + int err; + u32 len, addr, mode; + + region = (const struct mt7921_fw_region *)((const u8 *)hdr - + (hdr->n_region - i) * sizeof(*region)); + mode = mt7921_mcu_gen_dl_mode(region->feature_set, is_wa); + len = le32_to_cpu(region->len); + addr = le32_to_cpu(region->addr); + + if (region->feature_set & fw_feature_override_addr) + override = addr; + + err = mt7921_mcu_init_download(dev, addr, len, mode); + if (err) { + dev_err(dev->mt76.dev, "download request failed "); + return err; + } + + err = mt76_mcu_send_firmware(&dev->mt76, mcu_cmd_fw_scatter, + data + offset, len); + if (err) { + dev_err(dev->mt76.dev, "failed to send firmware. "); + return err; + } + + offset += len; + } + + if (override) + option |= fw_start_override; + + if (is_wa) + option |= fw_start_working_pda_cr4; + + return mt7921_mcu_start_firmware(dev, override, option); +} + +static int mt7921_load_ram(struct mt7921_dev *dev) +{ + const struct mt7921_fw_trailer *hdr; + const struct firmware *fw; + int ret; + + ret = request_firmware(&fw, mt7921_firmware_wm, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev->mt76.dev, "invalid firmware "); + ret = -einval; + goto out; + } + + hdr = (const struct mt7921_fw_trailer *)(fw->data + fw->size - + sizeof(*hdr)); + + dev_info(dev->mt76.dev, "wm firmware version: %.10s, build time: %.15s ", + hdr->fw_ver, hdr->build_date); + + ret = mt7921_mcu_send_ram_firmware(dev, hdr, fw->data, false); + if (ret) { + dev_err(dev->mt76.dev, "failed to start wm firmware "); + goto out; + } + + snprintf(dev->mt76.hw->wiphy->fw_version, + sizeof(dev->mt76.hw->wiphy->fw_version), + "%.10s-%.15s", hdr->fw_ver, hdr->build_date); + +out: + release_firmware(fw); + + return ret; +} + +static int mt7921_load_firmware(struct mt7921_dev *dev) +{ + int ret; + + ret = mt76_get_field(dev, mt_conn_on_misc, mt_top_misc2_fw_n9_rdy); + if (ret) { + dev_dbg(dev->mt76.dev, "firmware is already download "); + return -eio; + } + + ret = mt7921_load_patch(dev); + if (ret) + return ret; + + ret = mt7921_load_ram(dev); + if (ret) + return ret; + + if (!mt76_poll_msec(dev, mt_conn_on_misc, mt_top_misc2_fw_n9_rdy, + mt_top_misc2_fw_n9_rdy, 1500)) { + dev_err(dev->mt76.dev, "timeout for initializing firmware "); + + return -eio; + } + + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[mt_mcuq_fwdl], false); + + dev_err(dev->mt76.dev, "firmware init done "); + + return 0; +} + +int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl) +{ + struct { + u8 ctrl_val; + u8 pad[3]; + } data = { + .ctrl_val = ctrl + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_fwlog_2_host, &data, + sizeof(data), false); +} + +int mt7921_mcu_init(struct mt7921_dev *dev) +{ + static const struct mt76_mcu_ops mt7921_mcu_ops = { + .headroom = sizeof(struct mt7921_mcu_txd), + .mcu_skb_send_msg = mt7921_mcu_send_message, + .mcu_parse_response = mt7921_mcu_parse_response, + .mcu_restart = mt7921_mcu_restart, + }; + int ret; + + dev->mt76.mcu_ops = &mt7921_mcu_ops; + + ret = mt7921_driver_own(dev); + if (ret) + return ret; + + ret = mt7921_load_firmware(dev); + if (ret) + return ret; + + set_bit(mt76_state_mcu_running, &dev->mphy.state); + mt7921_mcu_fw_log_2_host(dev, 1); + + return 0; +} + +void mt7921_mcu_exit(struct mt7921_dev *dev) +{ + u32 reg = mt7921_reg_map_l1(dev, mt_top_misc); + + __mt76_mcu_restart(&dev->mt76); + if (!mt76_poll_msec(dev, reg, mt_top_misc_fw_state, + field_prep(mt_top_misc_fw_state, + fw_state_fw_download), 1000)) { + dev_err(dev->mt76.dev, "failed to exit mcu "); + return; + } + + reg = mt7921_reg_map_l1(dev, mt_top_lpcr_host_band0); + mt76_wr(dev, reg, mt_top_lpcr_host_fw_own); + skb_queue_purge(&dev->mt76.mcu.res_q); +} + +int mt7921_mcu_set_mac(struct mt7921_dev *dev, int band, + bool enable, bool hdr_trans) +{ + struct { + u8 enable; + u8 band; + u8 rsv[2]; + } __packed req_mac = { + .enable = enable, + .band = band, + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_ext_cmd_mac_init_ctrl, + &req_mac, sizeof(req_mac), true); +} + +int mt7921_mcu_set_rts_thresh(struct mt7921_phy *phy, u32 val) +{ + struct mt7921_dev *dev = phy->dev; + struct { + u8 prot_idx; + u8 band; + u8 rsv[2]; + __le32 len_thresh; + __le32 pkt_thresh; + } __packed req = { + .prot_idx = 1, + .band = phy != &dev->phy, + .len_thresh = cpu_to_le32(val), + .pkt_thresh = cpu_to_le32(0x2), + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_ext_cmd_protect_ctrl, &req, + sizeof(req), true); +} + +int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif) +{ +#define wmm_aifs_set bit(0) +#define wmm_cw_min_set bit(1) +#define wmm_cw_max_set bit(2) +#define wmm_txop_set bit(3) +#define wmm_param_set genmask(3, 0) +#define tx_cmd_mode 1 + struct edca { + u8 queue; + u8 set; + u8 aifs; + u8 cw_min; + __le16 cw_max; + __le16 txop; + }; + struct mt7921_mcu_tx { + u8 total; + u8 action; + u8 valid; + u8 mode; + + struct edca edca[ieee80211_num_acs]; + } __packed req = { + .valid = true, + .mode = tx_cmd_mode, + .total = ieee80211_num_acs, + }; + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + int ac; + + for (ac = 0; ac < ieee80211_num_acs; ac++) { + struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; + struct edca *e = &req.edca[ac]; + + e->set = wmm_param_set; + e->queue = ac + mvif->mt76.wmm_idx * mt7921_max_wmm_sets; + e->aifs = q->aifs; + e->txop = cpu_to_le16(q->txop); + + if (q->cw_min) + e->cw_min = fls(q->cw_min); + else + e->cw_min = 5; + + if (q->cw_max) + e->cw_max = cpu_to_le16(fls(q->cw_max)); + else + e->cw_max = cpu_to_le16(10); + } + return mt76_mcu_send_msg(&dev->mt76, mcu_ext_cmd_edca_update, &req, + sizeof(req), true); +} + +int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd) +{ + struct mt7921_dev *dev = phy->dev; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq1 = chandef->center_freq1; + struct { + u8 control_ch; + u8 center_ch; + u8 bw; + u8 tx_streams_num; + u8 rx_streams; /* mask or num */ + u8 switch_reason; + u8 band_idx; + u8 center_ch2; /* for 80+80 only */ + __le16 cac_case; + u8 channel_band; + u8 rsv0; + __le32 outband_freq; + u8 txpower_drop; + u8 ap_bw; + u8 ap_center_ch; + u8 rsv1[57]; + } __packed req = { + .control_ch = chandef->chan->hw_value, + .center_ch = ieee80211_frequency_to_channel(freq1), + .bw = mt7921_mcu_chan_bw(chandef), + .tx_streams_num = hweight8(phy->mt76->antenna_mask), + .rx_streams = phy->mt76->antenna_mask, + .band_idx = phy != &dev->phy, + .channel_band = chandef->chan->band, + }; + + if (dev->mt76.hw->conf.flags & ieee80211_conf_offchannel) + req.switch_reason = ch_switch_scan_bypass_dpd; + else if ((chandef->chan->flags & ieee80211_chan_radar) && + chandef->chan->dfs_state != nl80211_dfs_available) + req.switch_reason = ch_switch_dfs; + else + req.switch_reason = ch_switch_normal; + + if (cmd == mcu_ext_cmd_channel_switch) + req.rx_streams = hweight8(req.rx_streams); + + if (chandef->width == nl80211_chan_width_80p80) { + int freq2 = chandef->center_freq2; + + req.center_ch2 = ieee80211_frequency_to_channel(freq2); + } + + return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); +} + +int mt7921_mcu_set_eeprom(struct mt7921_dev *dev) +{ + struct req_hdr { + u8 buffer_mode; + u8 format; + __le16 len; + } __packed req = { + .buffer_mode = ee_mode_efuse, + .format = ee_format_whole, + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_ext_cmd_efuse_buffer_mode, + &req, sizeof(req), true); +} + +int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset) +{ + struct mt7921_mcu_eeprom_info req = { + .addr = cpu_to_le32(round_down(offset, 16)), + }; + struct mt7921_mcu_eeprom_info *res; + struct sk_buff *skb; + int ret; + u8 *buf; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, mcu_ext_cmd_efuse_access, &req, + sizeof(req), true, &skb); + if (ret) + return ret; + + res = (struct mt7921_mcu_eeprom_info *)skb->data; + buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr); + memcpy(buf, res->data, 16); + dev_kfree_skb(skb); + + return 0; +} + +int +mt7921_mcu_uni_add_dev(struct mt7921_dev *dev, + struct ieee80211_vif *vif, bool enable) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + u8 omac_idx = mvif->mt76.omac_idx; + struct { + struct { + u8 omac_idx; + u8 band_idx; + __le16 pad; + } __packed hdr; + struct req_tlv { + __le16 tag; + __le16 len; + u8 active; + u8 pad; + u8 omac_addr[eth_alen]; + } __packed tlv; + } dev_req = { + .hdr = { + .omac_idx = omac_idx, + .band_idx = mvif->mt76.band_idx, + }, + .tlv = { + .tag = cpu_to_le16(dev_info_active), + .len = cpu_to_le16(sizeof(struct req_tlv)), + .active = enable, + }, + }; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7921_bss_basic_tlv basic; + } basic_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .basic = { + .tag = cpu_to_le16(uni_bss_info_basic), + .len = cpu_to_le16(sizeof(struct mt7921_bss_basic_tlv)), + .omac_idx = omac_idx, + .band_idx = mvif->mt76.band_idx, + .wmm_idx = mvif->mt76.wmm_idx, + .active = enable, + .bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx), + .sta_idx = cpu_to_le16(mvif->sta.wcid.idx), + .conn_state = 1, + }, + }; + int err, idx, cmd, len; + void *data; + + switch (vif->type) { + case nl80211_iftype_mesh_point: + case nl80211_iftype_ap: + basic_req.basic.conn_type = cpu_to_le32(connection_infra_ap); + break; + case nl80211_iftype_station: + basic_req.basic.conn_type = cpu_to_le32(connection_infra_sta); + break; + case nl80211_iftype_adhoc: + basic_req.basic.conn_type = cpu_to_le32(connection_ibss_adhoc); + break; + default: + warn_on(1); + break; + } + + idx = omac_idx > ext_bssid_start ? hw_bssid_0 : omac_idx; + basic_req.basic.hw_bss_idx = idx; + + memcpy(dev_req.tlv.omac_addr, vif->addr, eth_alen); + + cmd = enable ? mcu_uni_cmd_dev_info_update : mcu_uni_cmd_bss_info_update; + data = enable ? (void *)&dev_req : (void *)&basic_req; + len = enable ? sizeof(dev_req) : sizeof(basic_req); + + err = mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); + if (err < 0) + return err; + + cmd = enable ? mcu_uni_cmd_bss_info_update : mcu_uni_cmd_dev_info_update; + data = enable ? (void *)&basic_req : (void *)&dev_req; + len = enable ? sizeof(basic_req) : sizeof(dev_req); + + return mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); +} + +int +mt7921_mcu_uni_add_bss(struct mt7921_phy *phy, struct ieee80211_vif *vif, + bool enable) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; + int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; + struct mt7921_dev *dev = phy->dev; + enum nl80211_band band = chandef->chan->band; + u8 omac_idx = mvif->mt76.omac_idx; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7921_bss_basic_tlv basic; + struct mt7921_bss_qos_tlv qos; + } basic_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .basic = { + .tag = cpu_to_le16(uni_bss_info_basic), + .len = cpu_to_le16(sizeof(struct mt7921_bss_basic_tlv)), + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + .dtim_period = vif->bss_conf.dtim_period, + .omac_idx = omac_idx, + .band_idx = mvif->mt76.band_idx, + .wmm_idx = mvif->mt76.wmm_idx, + .active = true, /* keep bss deactivated */ + .phymode = mt7921_get_phy_mode(phy->dev, vif, band, null), + }, + .qos = { + .tag = cpu_to_le16(uni_bss_info_qbss), + .len = cpu_to_le16(sizeof(struct mt7921_bss_qos_tlv)), + .qos = vif->bss_conf.qos, + }, + }; + + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bss_info_uni_he he; + } he_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .he = { + .tag = cpu_to_le16(uni_bss_info_he_basic), + .len = cpu_to_le16(sizeof(struct bss_info_uni_he)), + }, + }; + + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct rlm_tlv { + __le16 tag; + __le16 len; + u8 control_channel; + u8 center_chan; + u8 center_chan2; + u8 bw; + u8 tx_streams; + u8 rx_streams; + u8 short_st; + u8 ht_op_info; + u8 sco; + u8 pad[3]; + } __packed rlm; + } __packed rlm_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .rlm = { + .tag = cpu_to_le16(uni_bss_info_rlm), + .len = cpu_to_le16(sizeof(struct rlm_tlv)), + .control_channel = chandef->chan->hw_value, + .center_chan = ieee80211_frequency_to_channel(freq1), + .center_chan2 = ieee80211_frequency_to_channel(freq2), + .tx_streams = hweight8(phy->mt76->antenna_mask), + .rx_streams = phy->mt76->chainmask, + .short_st = true, + }, + }; + int err, conn_type; + u8 idx; + + idx = omac_idx > ext_bssid_start ? hw_bssid_0 : omac_idx; + basic_req.basic.hw_bss_idx = idx; + + switch (vif->type) { + case nl80211_iftype_mesh_point: + case nl80211_iftype_ap: + if (vif->p2p) + conn_type = connection_p2p_go; + else + conn_type = connection_infra_ap; + basic_req.basic.conn_type = cpu_to_le32(conn_type); + break; + case nl80211_iftype_station: + if (vif->p2p) + conn_type = connection_p2p_gc; + else + conn_type = connection_infra_sta; + basic_req.basic.conn_type = cpu_to_le32(conn_type); + break; + case nl80211_iftype_adhoc: + basic_req.basic.conn_type = cpu_to_le32(connection_ibss_adhoc); + break; + default: + warn_on(1); + break; + } + + memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, eth_alen); + basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx); + basic_req.basic.sta_idx = cpu_to_le16(mvif->sta.wcid.idx); + basic_req.basic.conn_state = !enable; + + err = mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_bss_info_update, + &basic_req, sizeof(basic_req), true); + if (err < 0) + return err; + + if (vif->bss_conf.he_support) { + mt7921_mcu_uni_bss_he_tlv((struct tlv *)&he_req.he, vif, phy); + + err = mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_bss_info_update, + &he_req, sizeof(he_req), true); + if (err < 0) + return err; + } + + switch (chandef->width) { + case nl80211_chan_width_40: + rlm_req.rlm.bw = cmd_cbw_40mhz; + break; + case nl80211_chan_width_80: + rlm_req.rlm.bw = cmd_cbw_80mhz; + break; + case nl80211_chan_width_80p80: + rlm_req.rlm.bw = cmd_cbw_8080mhz; + break; + case nl80211_chan_width_160: + rlm_req.rlm.bw = cmd_cbw_160mhz; + break; + case nl80211_chan_width_5: + rlm_req.rlm.bw = cmd_cbw_5mhz; + break; + case nl80211_chan_width_10: + rlm_req.rlm.bw = cmd_cbw_10mhz; + break; + case nl80211_chan_width_20_noht: + case nl80211_chan_width_20: + default: + rlm_req.rlm.bw = cmd_cbw_20mhz; + break; + } + + if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan) + rlm_req.rlm.sco = 1; /* sca */ + else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) + rlm_req.rlm.sco = 3; /* scb */ + + return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_bss_info_update, + &rlm_req, sizeof(rlm_req), true); +} + +static int +mt7921_mcu_add_sta_cmd(struct mt7921_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable, int cmd) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct wtbl_req_hdr *wtbl_hdr; + struct mt7921_sta *msta; + struct tlv *sta_wtbl; + struct sk_buff *skb; + + msta = sta ? (struct mt7921_sta *)sta->drv_priv : &mvif->sta; + + skb = mt7921_mcu_alloc_sta_req(dev, mvif, msta, + mt7921_sta_update_max_size); + if (is_err(skb)) + return ptr_err(skb); + + mt7921_mcu_sta_basic_tlv(skb, vif, sta, enable); + if (enable && sta) + mt7921_mcu_sta_tlv(dev, skb, sta, vif); + + sta_wtbl = mt7921_mcu_add_tlv(skb, sta_rec_wtbl, sizeof(struct tlv)); + + wtbl_hdr = mt7921_mcu_alloc_wtbl_req(dev, msta, wtbl_reset_and_set, + sta_wtbl, &skb); + if (enable) { + mt7921_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); + if (sta) + mt7921_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr); + } + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); +} + +int +mt7921_mcu_uni_add_sta(struct mt7921_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, bool enable) +{ + return mt7921_mcu_add_sta_cmd(dev, vif, sta, enable, + mcu_uni_cmd_sta_rec_update); +} + +int mt7921_mcu_set_channel_domain(struct mt7921_phy *phy) +{ + struct mt76_phy *mphy = phy->mt76; + struct mt7921_dev *dev = phy->dev; + struct mt7921_mcu_channel_domain { + __le32 country_code; /* regulatory_request.alpha2 */ + u8 bw_2g; /* bw_20_40m 0 + * bw_20m 1 + * bw_20_40_80m 2 + * bw_20_40_80_160m 3 + * bw_20_40_80_8080m 4 + */ + u8 bw_5g; + __le16 pad; + u8 n_2ch; + u8 n_5ch; + __le16 pad2; + } __packed hdr = { + .bw_2g = 0, + .bw_5g = 3, + .n_2ch = mphy->sband_2g.sband.n_channels, + .n_5ch = mphy->sband_5g.sband.n_channels, + }; + struct mt7921_mcu_chan { + __le16 hw_value; + __le16 pad; + __le32 flags; + } __packed; + int i, n_channels = hdr.n_2ch + hdr.n_5ch; + int len = sizeof(hdr) + n_channels * sizeof(struct mt7921_mcu_chan); + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(&dev->mt76, null, len); + if (!skb) + return -enomem; + + skb_put_data(skb, &hdr, sizeof(hdr)); + + for (i = 0; i < n_channels; i++) { + struct ieee80211_channel *chan; + struct mt7921_mcu_chan channel; + + if (i < hdr.n_2ch) + chan = &mphy->sband_2g.sband.channels[i]; + else + chan = &mphy->sband_5g.sband.channels[i - hdr.n_2ch]; + + channel.hw_value = cpu_to_le16(chan->hw_value); + channel.flags = cpu_to_le32(chan->flags); + channel.pad = 0; + + skb_put_data(skb, &channel, sizeof(channel)); + } + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_cmd_set_chan_domain, + false); +} + +#define mt7921_scan_channel_time 60 +int mt7921_mcu_hw_scan(struct mt7921_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_scan_request *scan_req) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct cfg80211_scan_request *sreq = &scan_req->req; + int n_ssids = 0, err, i, duration = mt7921_scan_channel_time; + int ext_channels_num = max_t(int, sreq->n_channels - 32, 0); + struct ieee80211_channel **scan_list = sreq->channels; + struct mt7921_dev *dev = phy->dev; + struct mt7921_mcu_scan_channel *chan; + struct mt7921_hw_scan_req *req; + struct sk_buff *skb; + + skb = mt76_mcu_msg_alloc(&dev->mt76, null, sizeof(*req)); + if (!skb) + return -enomem; + + set_bit(mt76_hw_scanning, &phy->mt76->state); + mvif->mt76.scan_seq_num = (mvif->mt76.scan_seq_num + 1) & 0x7f; + + req = (struct mt7921_hw_scan_req *)skb_put(skb, sizeof(*req)); + + req->seq_num = mvif->mt76.scan_seq_num; + req->bss_idx = mvif->mt76.idx; + req->scan_type = sreq->n_ssids ? 1 : 0; + req->probe_req_num = sreq->n_ssids ? 2 : 0; + req->version = 1; + + for (i = 0; i < sreq->n_ssids; i++) { + if (!sreq->ssids[i].ssid_len) + continue; + + req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid, + sreq->ssids[i].ssid_len); + n_ssids++; + } + req->ssid_type = n_ssids ? bit(2) : bit(0); + req->ssid_type_ext = n_ssids ? bit(0) : 0; + req->ssids_num = n_ssids; + + /* increase channel time for passive scan */ + if (!sreq->n_ssids) + duration *= 2; + req->timeout_value = cpu_to_le16(sreq->n_channels * duration); + req->channel_min_dwell_time = cpu_to_le16(duration); + req->channel_dwell_time = cpu_to_le16(duration); + + req->channels_num = min_t(u8, sreq->n_channels, 32); + req->ext_channels_num = min_t(u8, ext_channels_num, 32); + for (i = 0; i < req->channels_num + req->ext_channels_num; i++) { + if (i >= 32) + chan = &req->ext_channels[i - 32]; + else + chan = &req->channels[i]; + + chan->band = scan_list[i]->band == nl80211_band_2ghz ? 1 : 2; + chan->channel_num = scan_list[i]->hw_value; + } + req->channel_type = sreq->n_channels ? 4 : 0; + + if (sreq->ie_len > 0) { + memcpy(req->ies, sreq->ie, sreq->ie_len); + req->ies_len = cpu_to_le16(sreq->ie_len); + } + + memcpy(req->bssid, sreq->bssid, eth_alen); + if (sreq->flags & nl80211_scan_flag_random_addr) { + get_random_mask_addr(req->random_mac, sreq->mac_addr, + sreq->mac_addr_mask); + req->scan_func = 1; + } + + err = mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_cmd_start_hw_scan, + false); + if (err < 0) + clear_bit(mt76_hw_scanning, &phy->mt76->state); + + return err; +} + +int mt7921_mcu_cancel_hw_scan(struct mt7921_phy *phy, + struct ieee80211_vif *vif) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = phy->dev; + struct { + u8 seq_num; + u8 is_ext_channel; + u8 rsv[2]; + } __packed req = { + .seq_num = mvif->mt76.scan_seq_num, + }; + + if (test_and_clear_bit(mt76_hw_scanning, &phy->mt76->state)) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(phy->mt76->hw, &info); + } + + return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_cancel_hw_scan, &req, + sizeof(req), false); +} + +u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u16 wlan_idx) +{ + struct mt7921_mcu_wlan_info wtbl_info = { + .wlan_idx = cpu_to_le32(wlan_idx), + }; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, mcu_cmd_get_wtbl, + &wtbl_info, sizeof(wtbl_info), true, + &skb); + if (ret) + return ret; + + mt7921_mcu_tx_rate_report(dev, skb, wlan_idx); + dev_kfree_skb(skb); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +/* spdx-license-identifier: isc */ +/* copyright (c) 2020 mediatek inc. */ + +#ifndef __mt7921_mcu_h +#define __mt7921_mcu_h + +struct mt7921_mcu_txd { + __le32 txd[8]; + + __le16 len; + __le16 pq_id; + + u8 cid; + u8 pkt_type; + u8 set_query; /* fw don't care */ + u8 seq; + + u8 uc_d2b0_rev; + u8 ext_cid; + u8 s2d_index; + u8 ext_cid_ack; + + u32 reserved[5]; +} __packed __aligned(4); + +/** + * struct mt7921_uni_txd - mcu command descriptor for firmware v3 + * @txd: hardware descriptor + * @len: total length not including txd + * @cid: command identifier + * @pkt_type: must be 0xa0 (cmd packet by long format) + * @frag_n: fragment number + * @seq: sequence number + * @checksum: 0 mean there is no checksum + * @s2d_index: index for command source and destination + * definition | value | note + * cmd_s2d_idx_h2n | 0x00 | command from host to wm + * cmd_s2d_idx_c2n | 0x01 | command from wa to wm + * cmd_s2d_idx_h2c | 0x02 | command from host to wa + * cmd_s2d_idx_h2n_and_h2c | 0x03 | command from host to wa and wm + * + * @option: command option + * bit[0]: uni_cmd_opt_bit_ack + * set to 1 to request a fw reply + * if uni_cmd_opt_bit_0_ack is set and uni_cmd_opt_bit_2_set_query + * is set, mcu firmware will send response event eid = 0x01 + * (uni_event_id_cmd_result) to the host. + * bit[1]: uni_cmd_opt_bit_uni_cmd + * 0: original command + * 1: unified command + * bit[2]: uni_cmd_opt_bit_set_query + * 0: query command + * 1: set command + */ +struct mt7921_uni_txd { + __le32 txd[8]; + + /* dw1 */ + __le16 len; + __le16 cid; + + /* dw2 */ + u8 reserved; + u8 pkt_type; + u8 frag_n; + u8 seq; + + /* dw3 */ + __le16 checksum; + u8 s2d_index; + u8 option; + + /* dw4 */ + u8 reserved2[4]; +} __packed __aligned(4); + +/* event table */ +enum { + mcu_event_reg_access = 0x05, + mcu_event_scan_done = 0x0d, + mcu_event_bss_absence = 0x11, + mcu_event_bss_beacon_loss = 0x13, + mcu_event_ch_privilege = 0x18, + mcu_event_sched_scan_done = 0x23, + mcu_event_dbg_msg = 0x27, +}; + +/* ext event table */ +enum { + mcu_ext_event_rate_report = 0x87, +}; + +struct mt7921_mcu_rxd { + __le32 rxd[6]; + + __le16 len; + __le16 pkt_type_id; + + u8 eid; + u8 seq; + __le16 __rsv; + + u8 ext_eid; + u8 __rsv1[2]; + u8 s2d_index; +}; + +struct mt7921_mcu_eeprom_info { + __le32 addr; + __le32 valid; + u8 data[16]; +} __packed; + +#define mt_ra_rate_nss genmask(8, 6) +#define mt_ra_rate_mcs genmask(3, 0) +#define mt_ra_rate_tx_mode genmask(12, 9) +#define mt_ra_rate_dcm_en bit(4) +#define mt_ra_rate_bw genmask(14, 13) + +#define mcu_pq_id(p, q) (((p) << 15) | ((q) << 10)) +#define mcu_pkt_id 0xa0 + +enum { + mcu_q_query, + mcu_q_set, + mcu_q_reserved, + mcu_q_na +}; + +enum { + mcu_s2d_h2n, + mcu_s2d_c2n, + mcu_s2d_h2c, + mcu_s2d_h2cn +}; + +#define mcu_fw_prefix bit(31) +#define mcu_uni_prefix bit(30) +#define mcu_ce_prefix bit(29) +#define mcu_query_prefix bit(28) +#define mcu_cmd_mask ~(mcu_fw_prefix | mcu_uni_prefix | \ + mcu_ce_prefix | mcu_query_prefix) + +#define mcu_query_mask bit(16) + +enum { + mcu_cmd_target_address_len_req = mcu_fw_prefix | 0x01, + mcu_cmd_fw_start_req = mcu_fw_prefix | 0x02, + mcu_cmd_nic_power_ctrl = mcu_fw_prefix | 0x4, + mcu_cmd_patch_start_req = mcu_fw_prefix | 0x05, + mcu_cmd_patch_finish_req = mcu_fw_prefix | 0x07, + mcu_cmd_patch_sem_control = mcu_fw_prefix | 0x10, + mcu_cmd_ext_cid = 0xed, + mcu_cmd_fw_scatter = mcu_fw_prefix | 0xee, +}; + +enum { + mcu_ext_cmd_efuse_access = 0x01, + mcu_ext_cmd_channel_switch = 0x08, + mcu_ext_cmd_efuse_buffer_mode = 0x21, + mcu_ext_cmd_edca_update = 0x27, + mcu_ext_cmd_thermal_ctrl = 0x2c, + mcu_ext_cmd_wtbl_update = 0x32, + mcu_ext_cmd_protect_ctrl = 0x3e, + mcu_ext_cmd_mac_init_ctrl = 0x46, + mcu_ext_cmd_rx_hdr_trans = 0x47, + mcu_ext_cmd_set_rx_path = 0x4e, +}; + +enum { + mcu_uni_cmd_dev_info_update = mcu_uni_prefix | 0x01, + mcu_uni_cmd_bss_info_update = mcu_uni_prefix | 0x02, + mcu_uni_cmd_sta_rec_update = mcu_uni_prefix | 0x03, + mcu_uni_cmd_suspend = mcu_uni_prefix | 0x05, + mcu_uni_cmd_offload = mcu_uni_prefix | 0x06, + mcu_uni_cmd_hif_ctrl = mcu_uni_prefix | 0x07, +}; + +struct mt7921_mcu_uni_event { + u8 cid; + u8 pad[3]; + __le32 status; /* 0: success, others: fail */ +} __packed; + +/* offload mcu commands */ +enum { + mcu_cmd_start_hw_scan = mcu_ce_prefix | 0x03, + mcu_cmd_set_ps_profile = mcu_ce_prefix | 0x05, + mcu_cmd_set_chan_domain = mcu_ce_prefix | 0x0f, + mcu_cmd_set_bss_connected = mcu_ce_prefix | 0x16, + mcu_cmd_set_bss_abort = mcu_ce_prefix | 0x17, + mcu_cmd_cancel_hw_scan = mcu_ce_prefix | 0x1b, + mcu_cmd_sched_scan_enable = mcu_ce_prefix | 0x61, + mcu_cmd_sched_scan_req = mcu_ce_prefix | 0x62, + mcu_cmd_reg_write = mcu_ce_prefix | 0xc0, + mcu_cmd_reg_read = mcu_ce_prefix | mcu_query_mask | 0xc0, + mcu_cmd_fwlog_2_host = mcu_ce_prefix | 0xc5, + mcu_cmd_get_wtbl = mcu_ce_prefix | 0xcd, +}; + +#define mcu_cmd_ack bit(0) +#define mcu_cmd_uni bit(1) +#define mcu_cmd_query bit(2) + +#define mcu_cmd_uni_ext_ack (mcu_cmd_ack | mcu_cmd_uni | mcu_cmd_query) + +enum { + uni_bss_info_basic = 0, + uni_bss_info_rlm = 2, + uni_bss_info_he_basic = 5, + uni_bss_info_bcn_content = 7, + uni_bss_info_qbss = 15, + uni_bss_info_uapsd = 19, +}; + +enum { + uni_suspend_mode_setting, + uni_suspend_wow_ctrl, + uni_suspend_wow_gpio_param, + uni_suspend_wow_wakeup_port, + uni_suspend_wow_pattern, +}; + +enum { + uni_offload_offload_arp, + uni_offload_offload_nd, + uni_offload_offload_gtk_rekey, + uni_offload_offload_bmc_rpy_detect, +}; + +enum { + patch_sem_release, + patch_sem_get +}; + +enum { + patch_not_dl_sem_fail, + patch_is_dl, + patch_not_dl_sem_success, + patch_rel_sem_success +}; + +enum { + fw_state_initial, + fw_state_fw_download, + fw_state_normal_operation, + fw_state_normal_trx, + fw_state_wacpu_rdy = 7 +}; + +enum { + ee_mode_efuse, + ee_mode_buffer, +}; + +enum { + ee_format_bin, + ee_format_whole, + ee_format_multiple, +}; + +enum { + mcu_phy_state_tx_rate, + mcu_phy_state_rx_rate, + mcu_phy_state_rssi, + mcu_phy_state_contention_rx_rate, + mcu_phy_state_ofdmlq_cninfo, +}; + +#define sta_type_sta bit(0) +#define sta_type_ap bit(1) +#define sta_type_adhoc bit(2) +#define sta_type_wds bit(4) +#define sta_type_bc bit(5) + +#define network_infra bit(16) +#define network_p2p bit(17) +#define network_ibss bit(18) +#define network_wds bit(21) + +#define connection_infra_sta (sta_type_sta | network_infra) +#define connection_infra_ap (sta_type_ap | network_infra) +#define connection_p2p_gc (sta_type_sta | network_p2p) +#define connection_p2p_go (sta_type_ap | network_p2p) +#define connection_ibss_adhoc (sta_type_adhoc | network_ibss) +#define connection_wds (sta_type_wds | network_wds) +#define connection_infra_bc (sta_type_bc | network_infra) + +#define conn_state_disconnect 0 +#define conn_state_connect 1 +#define conn_state_port_secure 2 + +enum { + dev_info_active, + dev_info_max_num +}; + +enum { + cmd_cbw_20mhz = ieee80211_sta_rx_bw_20, + cmd_cbw_40mhz = ieee80211_sta_rx_bw_40, + cmd_cbw_80mhz = ieee80211_sta_rx_bw_80, + cmd_cbw_160mhz = ieee80211_sta_rx_bw_160, + cmd_cbw_10mhz, + cmd_cbw_5mhz, + cmd_cbw_8080mhz, + + cmd_he_mcs_bw80 = 0, + cmd_he_mcs_bw160, + cmd_he_mcs_bw8080, + cmd_he_mcs_bw_num +}; + +struct tlv { + __le16 tag; + __le16 len; +} __packed; + +struct bss_info_uni_he { + __le16 tag; + __le16 len; + __le16 he_rts_thres; + u8 he_pe_duration; + u8 su_disable; + __le16 max_nss_mcs[cmd_he_mcs_bw_num]; + u8 rsv[2]; +} __packed; + +enum { + wtbl_reset_and_set = 1, + wtbl_set, + wtbl_query, + wtbl_reset_all +}; + +struct wtbl_req_hdr { + u8 wlan_idx_lo; + u8 operation; + __le16 tlv_num; + u8 wlan_idx_hi; + u8 rsv[3]; +} __packed; + +struct wtbl_generic { + __le16 tag; + __le16 len; + u8 peer_addr[eth_alen]; + u8 muar_idx; + u8 skip_tx; + u8 cf_ack; + u8 qos; + u8 mesh; + u8 adm; + __le16 partial_aid; + u8 baf_en; + u8 aad_om; +} __packed; + +struct wtbl_rx { + __le16 tag; + __le16 len; + u8 rcid; + u8 rca1; + u8 rca2; + u8 rv; + u8 rsv[4]; +} __packed; + +struct wtbl_ht { + __le16 tag; + __le16 len; + u8 ht; + u8 ldpc; + u8 af; + u8 mm; + u8 rsv[4]; +} __packed; + +struct wtbl_vht { + __le16 tag; + __le16 len; + u8 ldpc; + u8 dyn_bw; + u8 vht; + u8 txop_ps; + u8 rsv[4]; +} __packed; + +struct wtbl_hdr_trans { + __le16 tag; + __le16 len; + u8 to_ds; + u8 from_ds; + u8 no_rx_trans; + u8 _rsv; +}; + +enum { + mt_ba_type_invalid, + mt_ba_type_originator, + mt_ba_type_recipient +}; + +enum { + rst_ba_mac_tid_match, + rst_ba_mac_match, + rst_ba_no_match +}; + +struct wtbl_ba { + __le16 tag; + __le16 len; + /* common */ + u8 tid; + u8 ba_type; + u8 rsv0[2]; + /* originator only */ + __le16 sn; + u8 ba_en; + u8 ba_winsize_idx; + __le16 ba_winsize; + /* recipient only */ + u8 peer_addr[eth_alen]; + u8 rst_ba_tid; + u8 rst_ba_sel; + u8 rst_ba_sb; + u8 band_idx; + u8 rsv1[4]; +} __packed; + +struct wtbl_smps { + __le16 tag; + __le16 len; + u8 smps; + u8 rsv[3]; +} __packed; + +enum { + wtbl_generic, + wtbl_rx, + wtbl_ht, + wtbl_vht, + wtbl_peer_ps, /* not used */ + wtbl_tx_ps, + wtbl_hdr_trans, + wtbl_sec_key, + wtbl_ba, + wtbl_rdg, /* obsoleted */ + wtbl_protect, /* not used */ + wtbl_clear, /* not used */ + wtbl_bf, + wtbl_smps, + wtbl_raw_data, /* debug only */ + wtbl_pn, + wtbl_spe, + wtbl_max_num +}; + +struct sta_ntlv_hdr { + u8 rsv[2]; + __le16 tlv_num; +} __packed; + +struct sta_req_hdr { + u8 bss_idx; + u8 wlan_idx_lo; + __le16 tlv_num; + u8 is_tlv_append; + u8 muar_idx; + u8 wlan_idx_hi; + u8 rsv; +} __packed; + +struct sta_rec_basic { + __le16 tag; + __le16 len; + __le32 conn_type; + u8 conn_state; + u8 qos; + __le16 aid; + u8 peer_addr[eth_alen]; + __le16 extra_info; +} __packed; + +struct sta_rec_ht { + __le16 tag; + __le16 len; + __le16 ht_cap; + u16 rsv; +} __packed; + +struct sta_rec_vht { + __le16 tag; + __le16 len; + __le32 vht_cap; + __le16 vht_rx_mcs_map; + __le16 vht_tx_mcs_map; + u8 rts_bw_sig; + u8 rsv[3]; +} __packed; + +struct sta_rec_uapsd { + __le16 tag; + __le16 len; + u8 dac_map; + u8 tac_map; + u8 max_sp; + u8 rsv0; + __le16 listen_interval; + u8 rsv1[2]; +} __packed; + +struct sta_rec_he { + __le16 tag; + __le16 len; + + __le32 he_cap; + + u8 t_frame_dur; + u8 max_ampdu_exp; + u8 bw_set; + u8 device_class; + u8 dcm_tx_mode; + u8 dcm_tx_max_nss; + u8 dcm_rx_mode; + u8 dcm_rx_max_nss; + u8 dcm_max_ru; + u8 punc_pream_rx; + u8 pkt_ext; + u8 rsv1; + + __le16 max_nss_mcs[cmd_he_mcs_bw_num]; + + u8 rsv2[2]; +} __packed; + +struct sta_rec_ba { + __le16 tag; + __le16 len; + u8 tid; + u8 ba_type; + u8 amsdu; + u8 ba_en; + __le16 ssn; + __le16 winsize; +} __packed; + +struct sta_rec_amsdu { + __le16 tag; + __le16 len; + u8 max_amsdu_num; + u8 max_mpdu_size; + u8 amsdu_en; + u8 rsv; +} __packed; + +struct sec_key { + u8 cipher_id; + u8 cipher_len; + u8 key_id; + u8 key_len; + u8 key[32]; +} __packed; + +struct sta_rec_sec { + __le16 tag; + __le16 len; + u8 add; + u8 n_cipher; + u8 rsv[2]; + + struct sec_key key[2]; +} __packed; + +struct sta_rec_state { + __le16 tag; + __le16 len; + __le32 flags; + u8 state; + u8 vht_opmode; + u8 action; + u8 rsv[1]; +} __packed; + +#define ht_mcs_mask_num 10 + +struct sta_rec_ra_info { + __le16 tag; + __le16 len; + __le16 legacy; + u8 rx_mcs_bitmask[ht_mcs_mask_num]; +} __packed; + +struct sta_rec_phy { + __le16 tag; + __le16 len; + __le16 basic_rate; + u8 phy_type; + u8 ampdu; + u8 rts_policy; + u8 rcpi; + u8 rsv[2]; +} __packed; + +enum { + sta_rec_basic, + sta_rec_ra, + sta_rec_ra_cmm_info, + sta_rec_ra_update, + sta_rec_bf, + sta_rec_amsdu, + sta_rec_ba, + sta_rec_state, + sta_rec_tx_proc, /* for hdr trans and cso in cr4 */ + sta_rec_ht, + sta_rec_vht, + sta_rec_apps, + sta_rec_key, + sta_rec_wtbl, + sta_rec_he, + sta_rec_hw_amsdu, + sta_rec_wtbl_aadom, + sta_rec_key_v2, + sta_rec_muru, + sta_rec_muedca, + sta_rec_bfee, + sta_rec_phy = 0x15, + sta_rec_max_num +}; + +enum mt7921_cipher_type { + mt_cipher_none, + mt_cipher_wep40, + mt_cipher_wep104, + mt_cipher_wep128, + mt_cipher_tkip, + mt_cipher_aes_ccmp, + mt_cipher_ccmp_256, + mt_cipher_gcmp, + mt_cipher_gcmp_256, + mt_cipher_wapi, + mt_cipher_bip_cmac_128, +}; + +enum { + ch_switch_normal = 0, + ch_switch_scan = 3, + ch_switch_mcc = 4, + ch_switch_dfs = 5, + ch_switch_background_scan_start = 6, + ch_switch_background_scan_running = 7, + ch_switch_background_scan_stop = 8, + ch_switch_scan_bypass_dpd = 9 +}; + +enum { + thermal_sensor_temp_query, + thermal_sensor_manual_ctrl, + thermal_sensor_info_query, + thermal_sensor_task_ctrl, +}; + +enum { + mt_ebf = bit(0), /* explicit beamforming */ + mt_ibf = bit(1) /* implicit beamforming */ +}; + +#define mt7921_wtbl_update_max_size (sizeof(struct wtbl_req_hdr) + \ + sizeof(struct wtbl_generic) + \ + sizeof(struct wtbl_rx) + \ + sizeof(struct wtbl_ht) + \ + sizeof(struct wtbl_vht) + \ + sizeof(struct wtbl_hdr_trans) +\ + sizeof(struct wtbl_ba) + \ + sizeof(struct wtbl_smps)) + +#define mt7921_sta_update_max_size (sizeof(struct sta_req_hdr) + \ + sizeof(struct sta_rec_basic) + \ + sizeof(struct sta_rec_ht) + \ + sizeof(struct sta_rec_he) + \ + sizeof(struct sta_rec_ba) + \ + sizeof(struct sta_rec_vht) + \ + sizeof(struct sta_rec_uapsd) + \ + sizeof(struct sta_rec_amsdu) + \ + sizeof(struct tlv) + \ + mt7921_wtbl_update_max_size) + +#define mt7921_wtbl_update_ba_size (sizeof(struct wtbl_req_hdr) + \ + sizeof(struct wtbl_ba)) + +#define phy_mode_a bit(0) +#define phy_mode_b bit(1) +#define phy_mode_g bit(2) +#define phy_mode_gn bit(3) +#define phy_mode_an bit(4) +#define phy_mode_ac bit(5) +#define phy_mode_ax_24g bit(6) +#define phy_mode_ax_5g bit(7) +#define phy_mode_ax_6g bit(8) + +#define mode_cck bit(0) +#define mode_ofdm bit(1) +#define mode_ht bit(2) +#define mode_vht bit(3) +#define mode_he bit(4) + +#define sta_cap_wmm bit(0) +#define sta_cap_sgi_20 bit(4) +#define sta_cap_sgi_40 bit(5) +#define sta_cap_tx_stbc bit(6) +#define sta_cap_rx_stbc bit(7) +#define sta_cap_vht_sgi_80 bit(16) +#define sta_cap_vht_sgi_160 bit(17) +#define sta_cap_vht_tx_stbc bit(18) +#define sta_cap_vht_rx_stbc bit(19) +#define sta_cap_vht_ldpc bit(23) +#define sta_cap_ldpc bit(24) +#define sta_cap_ht bit(26) +#define sta_cap_vht bit(27) +#define sta_cap_he bit(28) + +/* he mac */ +#define sta_rec_he_cap_htc bit(0) +#define sta_rec_he_cap_bqr bit(1) +#define sta_rec_he_cap_bsr bit(2) +#define sta_rec_he_cap_om bit(3) +#define sta_rec_he_cap_amsdu_in_ampdu bit(4) +/* he phy */ +#define sta_rec_he_cap_dual_band bit(5) +#define sta_rec_he_cap_ldpc bit(6) +#define sta_rec_he_cap_trig_cqi_fk bit(7) +#define sta_rec_he_cap_partial_bw_ext_range bit(8) +/* stbc */ +#define sta_rec_he_cap_le_eq_80m_tx_stbc bit(9) +#define sta_rec_he_cap_le_eq_80m_rx_stbc bit(10) +#define sta_rec_he_cap_gt_80m_tx_stbc bit(11) +#define sta_rec_he_cap_gt_80m_rx_stbc bit(12) +/* gi */ +#define sta_rec_he_cap_su_ppdu_1ltf_8us_gi bit(13) +#define sta_rec_he_cap_su_mu_ppdu_4ltf_8us_gi bit(14) +#define sta_rec_he_cap_er_su_ppdu_1ltf_8us_gi bit(15) +#define sta_rec_he_cap_er_su_ppdu_4ltf_8us_gi bit(16) +#define sta_rec_he_cap_ndp_4ltf_3dot2ms_gi bit(17) +/* 242 tone */ +#define sta_rec_he_cap_bw20_ru242_support bit(18) +#define sta_rec_he_cap_tx_1024qam_under_ru242 bit(19) +#define sta_rec_he_cap_rx_1024qam_under_ru242 bit(20) + +struct mt7921_mcu_reg_event { + __le32 reg; + __le32 val; +} __packed; + +struct mt7921_bss_basic_tlv { + __le16 tag; + __le16 len; + u8 active; + u8 omac_idx; + u8 hw_bss_idx; + u8 band_idx; + __le32 conn_type; + u8 conn_state; + u8 wmm_idx; + u8 bssid[eth_alen]; + __le16 bmc_tx_wlan_idx; + __le16 bcn_interval; + u8 dtim_period; + u8 phymode; /* bit(0): a + * bit(1): b + * bit(2): g + * bit(3): gn + * bit(4): an + * bit(5): ac + */ + __le16 sta_idx; + u8 nonht_basic_phy; + u8 pad[3]; +} __packed; + +struct mt7921_bss_qos_tlv { + __le16 tag; + __le16 len; + u8 qos; + u8 pad[3]; +} __packed; + +struct mt7921_mcu_scan_ssid { + __le32 ssid_len; + u8 ssid[ieee80211_max_ssid_len]; +} __packed; + +struct mt7921_mcu_scan_channel { + u8 band; /* 1: 2.4ghz + * 2: 5.0ghz + * others: reserved + */ + u8 channel_num; +} __packed; + +struct mt7921_mcu_scan_match { + __le32 rssi_th; + u8 ssid[ieee80211_max_ssid_len]; + u8 ssid_len; + u8 rsv[3]; +} __packed; + +struct mt7921_hw_scan_req { + u8 seq_num; + u8 bss_idx; + u8 scan_type; /* 0: passive scan + * 1: active scan + */ + u8 ssid_type; /* bit(0) wildcard ssid + * bit(1) p2p wildcard ssid + * bit(2) specified ssid + wildcard ssid + * bit(2) + ssid_type_ext bit(0) specified ssid only + */ + u8 ssids_num; + u8 probe_req_num; /* number of probe request for each ssid */ + u8 scan_func; /* bit(0) enable random mac scan + * bit(1) disable dbdc scan type 1~3. + * bit(2) use dbdc scan type 3 (dedicated one rf to scan). + */ + u8 version; /* 0: not support fields after ies. + * 1: support fields after ies. + */ + struct mt7921_mcu_scan_ssid ssids[4]; + __le16 probe_delay_time; + __le16 channel_dwell_time; /* channel dwell interval */ + __le16 timeout_value; + u8 channel_type; /* 0: full channels + * 1: only 2.4ghz channels + * 2: only 5ghz channels + * 3: p2p social channel only (channel #1, #6 and #11) + * 4: specified channels + * others: reserved + */ + u8 channels_num; /* valid when channel_type is 4 */ + /* valid when channels_num is set */ + struct mt7921_mcu_scan_channel channels[32]; + __le16 ies_len; + u8 ies[mt7921_scan_ie_len]; + /* following fields are valid if version > 0 */ + u8 ext_channels_num; + u8 ext_ssids_num; + __le16 channel_min_dwell_time; + struct mt7921_mcu_scan_channel ext_channels[32]; + struct mt7921_mcu_scan_ssid ext_ssids[6]; + u8 bssid[eth_alen]; + u8 random_mac[eth_alen]; /* valid when bit(1) in scan_func is set. */ + u8 pad[63]; + u8 ssid_type_ext; +} __packed; + +#define scan_done_event_max_channel_num 64 +struct mt7921_hw_scan_done { + u8 seq_num; + u8 sparse_channel_num; + struct mt7921_mcu_scan_channel sparse_channel; + u8 complete_channel_num; + u8 current_state; + u8 version; + u8 pad; + __le32 beacon_scan_num; + u8 pno_enabled; + u8 pad2[3]; + u8 sparse_channel_valid_num; + u8 pad3[3]; + u8 channel_num[scan_done_event_max_channel_num]; + /* idle format for channel_idle_time + * 0: first bytes: idle time(ms) 2nd byte: dwell time(ms) + * 1: first bytes: idle time(8ms) 2nd byte: dwell time(8ms) + * 2: dwell time (16us) + */ + __le16 channel_idle_time[scan_done_event_max_channel_num]; + /* beacon and probe response count */ + u8 beacon_probe_num[scan_done_event_max_channel_num]; + u8 mdrdy_count[scan_done_event_max_channel_num]; + __le32 beacon_2g_num; + __le32 beacon_5g_num; +} __packed; + +struct mt7921_mcu_bss_event { + u8 bss_idx; + u8 is_absent; + u8 free_quota; + u8 pad; +} __packed; + +enum { + phy_type_hr_dsss_index = 0, + phy_type_erp_index, + phy_type_erp_p2p_index, + phy_type_ofdm_index, + phy_type_ht_index, + phy_type_vht_index, + phy_type_he_index, + phy_type_index_num +}; + +#define phy_type_bit_hr_dsss bit(phy_type_hr_dsss_index) +#define phy_type_bit_erp bit(phy_type_erp_index) +#define phy_type_bit_ofdm bit(phy_type_ofdm_index) +#define phy_type_bit_ht bit(phy_type_ht_index) +#define phy_type_bit_vht bit(phy_type_vht_index) +#define phy_type_bit_he bit(phy_type_he_index) + +#define mt_wtbl_rate_tx_mode genmask(9, 6) +#define mt_wtbl_rate_mcs genmask(5, 0) +#define mt_wtbl_rate_nss genmask(12, 10) +#define mt_wtbl_rate_he_gi genmask(7, 4) +#define mt_wtbl_rate_gi genmask(3, 0) + +struct mt7921_mcu_tx_config { + u8 peer_addr[eth_alen]; + u8 sw; + u8 dis_rx_hdr_tran; + + u8 aad_om; + u8 pfmu_idx; + __le16 partial_aid; + + u8 ibf; + u8 ebf; + u8 is_ht; + u8 is_vht; + + u8 mesh; + u8 baf_en; + u8 cf_ack; + u8 rdg_ba; + + u8 rdg; + u8 pm; + u8 rts; + u8 smps; + + u8 txop_ps; + u8 not_update_ipsm; + u8 skip_tx; + u8 ldpc; + + u8 qos; + u8 from_ds; + u8 to_ds; + u8 dyn_bw; + + u8 amdsu_cross_lg; + u8 check_per; + u8 gid_63; + u8 he; + + u8 vht_ibf; + u8 vht_ebf; + u8 vht_ldpc; + u8 he_ldpc; +} __packed; + +struct mt7921_mcu_sec_config { + u8 wpi_flag; + u8 rv; + u8 ikv; + u8 rkv; + + u8 rcid; + u8 rca1; + u8 rca2; + u8 even_pn; + + u8 key_id; + u8 muar_idx; + u8 cipher_suit; + u8 rsv[1]; +} __packed; + +struct mt7921_mcu_key_config { + u8 key[32]; +} __packed; + +struct mt7921_mcu_rate_info { + u8 mpdu_fail; + u8 mpdu_tx; + u8 rate_idx; + u8 rsv[1]; + __le16 rate[8]; +} __packed; + +struct mt7921_mcu_ba_config { + u8 ba_en; + u8 rsv[3]; + __le32 ba_winsize; +} __packed; + +struct mt7921_mcu_ant_id_config { + u8 ant_id[4]; +} __packed; + +struct mt7921_mcu_peer_cap { + struct mt7921_mcu_ant_id_config ant_id_config; + + u8 power_offset; + u8 bw_selector; + u8 change_bw_rate_n; + u8 bw; + u8 spe_idx; + + u8 g2; + u8 g4; + u8 g8; + u8 g16; + + u8 mmss; + u8 ampdu_factor; + u8 rsv[1]; +} __packed; + +struct mt7921_mcu_rx_cnt { + u8 rx_rcpi[4]; + u8 rx_cc[4]; + u8 rx_cc_sel; + u8 ce_rmsd; + u8 rsv[2]; +} __packed; + +struct mt7921_mcu_tx_cnt { + __le16 rate1_cnt; + __le16 rate1_fail_cnt; + __le16 rate2_cnt; + __le16 rate3_cnt; + __le16 cur_bw_tx_cnt; + __le16 cur_bw_tx_fail_cnt; + __le16 other_bw_tx_cnt; + __le16 other_bw_tx_fail_cnt; +} __packed; + +struct mt7921_mcu_wlan_info_event { + struct mt7921_mcu_tx_config tx_config; + struct mt7921_mcu_sec_config sec_config; + struct mt7921_mcu_key_config key_config; + struct mt7921_mcu_rate_info rate_info; + struct mt7921_mcu_ba_config ba_config; + struct mt7921_mcu_peer_cap peer_cap; + struct mt7921_mcu_rx_cnt rx_cnt; + struct mt7921_mcu_tx_cnt tx_cnt; +} __packed; + +struct mt7921_mcu_wlan_info { + __le32 wlan_idx; + struct mt7921_mcu_wlan_info_event event; +} __packed; +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h + u8 sta_work_count; +
Networking
1c099ab44727c8e42fe4de4d91b53cec3ef02860
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: add dma support
add dma and register access support to mt7921e driver to set up the link for the data movement between the host and mt7921 mac, or the host and mt7921 mcu.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['c']
2
359
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c +// spdx-license-identifier: isc +/* copyright (c) 2020 mediatek inc. */ + +#include "mt7921.h" +#include "../dma.h" +#include "mac.h" + +int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc) +{ + int i, err; + + err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, mt_tx_ring_base); + if (err < 0) + return err; + + for (i = 0; i <= mt_txq_psd; i++) + phy->mt76->q_tx[i] = phy->mt76->q_tx[0]; + + return 0; +} + +void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + enum rx_pkt_type type; + u16 flag; + + type = field_get(mt_rxd0_pkt_type, le32_to_cpu(rxd[0])); + flag = field_get(mt_rxd0_pkt_flag, le32_to_cpu(rxd[0])); + + if (type == pkt_type_rx_event && flag == 0x1) + type = pkt_type_normal_mcu; + + switch (type) { + case pkt_type_txrx_notify: + mt7921_mac_tx_free(dev, skb); + break; + case pkt_type_rx_event: + mt7921_mcu_rx_event(dev, skb); + break; + case pkt_type_normal_mcu: + case pkt_type_normal: + if (!mt7921_mac_fill_rx(dev, skb)) { + mt76_rx(&dev->mt76, q, skb); + return; + } + fallthrough; + default: + dev_kfree_skb(skb); + break; + } +} + +static void +mt7921_tx_cleanup(struct mt7921_dev *dev) +{ + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[mt_mcuq_wm], false); + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[mt_mcuq_wa], false); +} + +static int mt7921_poll_tx(struct napi_struct *napi, int budget) +{ + struct mt7921_dev *dev; + + dev = container_of(napi, struct mt7921_dev, mt76.tx_napi); + + mt7921_tx_cleanup(dev); + + if (napi_complete_done(napi, 0)) + mt7921_irq_enable(dev, mt_int_tx_done_all); + + return 0; +} + +void mt7921_dma_prefetch(struct mt7921_dev *dev) +{ +#define prefetch(base, depth) ((base) << 16 | (depth)) + + mt76_wr(dev, mt_wfdma0_rx_ring0_ext_ctrl, prefetch(0x0, 0x4)); + mt76_wr(dev, mt_wfdma0_rx_ring2_ext_ctrl, prefetch(0x40, 0x4)); + mt76_wr(dev, mt_wfdma0_rx_ring3_ext_ctrl, prefetch(0x80, 0x4)); + mt76_wr(dev, mt_wfdma0_rx_ring4_ext_ctrl, prefetch(0xc0, 0x4)); + mt76_wr(dev, mt_wfdma0_rx_ring5_ext_ctrl, prefetch(0x100, 0x4)); + + mt76_wr(dev, mt_wfdma0_tx_ring0_ext_ctrl, prefetch(0x140, 0x4)); + mt76_wr(dev, mt_wfdma0_tx_ring1_ext_ctrl, prefetch(0x180, 0x4)); + mt76_wr(dev, mt_wfdma0_tx_ring2_ext_ctrl, prefetch(0x1c0, 0x4)); + mt76_wr(dev, mt_wfdma0_tx_ring3_ext_ctrl, prefetch(0x200, 0x4)); + mt76_wr(dev, mt_wfdma0_tx_ring4_ext_ctrl, prefetch(0x240, 0x4)); + mt76_wr(dev, mt_wfdma0_tx_ring5_ext_ctrl, prefetch(0x280, 0x4)); + mt76_wr(dev, mt_wfdma0_tx_ring6_ext_ctrl, prefetch(0x2c0, 0x4)); + mt76_wr(dev, mt_wfdma0_tx_ring16_ext_ctrl, prefetch(0x340, 0x4)); + mt76_wr(dev, mt_wfdma0_tx_ring17_ext_ctrl, prefetch(0x380, 0x4)); +} + +static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr) +{ + static const struct { + u32 phys; + u32 mapped; + u32 size; + } fixed_map[] = { + { 0x00400000, 0x80000, 0x10000}, /* wf_mcu_sysram */ + { 0x00410000, 0x90000, 0x10000}, /* wf_mcu_sysram (configure register) */ + { 0x40000000, 0x70000, 0x10000}, /* wf_umac_sysram */ + { 0x54000000, 0x02000, 0x1000 }, /* wfdma pcie0 mcu dma0 */ + { 0x55000000, 0x03000, 0x1000 }, /* wfdma pcie0 mcu dma1 */ + { 0x58000000, 0x06000, 0x1000 }, /* wfdma pcie1 mcu dma0 (mem_dma) */ + { 0x59000000, 0x07000, 0x1000 }, /* wfdma pcie1 mcu dma1 */ + { 0x7c000000, 0xf0000, 0x10000 }, /* conn_infra */ + { 0x7c020000, 0xd0000, 0x10000 }, /* conn_infra, wfdma */ + { 0x7c060000, 0xe0000, 0x10000}, /* conn_infra, conn_host_csr_top */ + { 0x80020000, 0xb0000, 0x10000 }, /* wf_top_misc_off */ + { 0x81020000, 0xc0000, 0x10000 }, /* wf_top_misc_on */ + { 0x820c0000, 0x08000, 0x4000 }, /* wf_umac_top (ple) */ + { 0x820c8000, 0x0c000, 0x2000 }, /* wf_umac_top (pse) */ + { 0x820cc000, 0x0e000, 0x2000 }, /* wf_umac_top (pp) */ + { 0x820ce000, 0x21c00, 0x0200 }, /* wf_lmac_top (wf_sec) */ + { 0x820cf000, 0x22000, 0x1000 }, /* wf_lmac_top (wf_pf) */ + { 0x820d0000, 0x30000, 0x10000 }, /* wf_lmac_top (wf_wtblon) */ + { 0x820e0000, 0x20000, 0x0400 }, /* wf_lmac_top bn0 (wf_cfg) */ + { 0x820e1000, 0x20400, 0x0200 }, /* wf_lmac_top bn0 (wf_trb) */ + { 0x820e2000, 0x20800, 0x0400 }, /* wf_lmac_top bn0 (wf_agg) */ + { 0x820e3000, 0x20c00, 0x0400 }, /* wf_lmac_top bn0 (wf_arb) */ + { 0x820e4000, 0x21000, 0x0400 }, /* wf_lmac_top bn0 (wf_tmac) */ + { 0x820e5000, 0x21400, 0x0800 }, /* wf_lmac_top bn0 (wf_rmac) */ + { 0x820e7000, 0x21e00, 0x0200 }, /* wf_lmac_top bn0 (wf_dma) */ + { 0x820e9000, 0x23400, 0x0200 }, /* wf_lmac_top bn0 (wf_wtbloff) */ + { 0x820ea000, 0x24000, 0x0200 }, /* wf_lmac_top bn0 (wf_etbf) */ + { 0x820eb000, 0x24200, 0x0400 }, /* wf_lmac_top bn0 (wf_lpon) */ + { 0x820ec000, 0x24600, 0x0200 }, /* wf_lmac_top bn0 (wf_int) */ + { 0x820ed000, 0x24800, 0x0800 }, /* wf_lmac_top bn0 (wf_mib) */ + { 0x820f0000, 0xa0000, 0x0400 }, /* wf_lmac_top bn1 (wf_cfg) */ + { 0x820f1000, 0xa0600, 0x0200 }, /* wf_lmac_top bn1 (wf_trb) */ + { 0x820f2000, 0xa0800, 0x0400 }, /* wf_lmac_top bn1 (wf_agg) */ + { 0x820f3000, 0xa0c00, 0x0400 }, /* wf_lmac_top bn1 (wf_arb) */ + { 0x820f4000, 0xa1000, 0x0400 }, /* wf_lmac_top bn1 (wf_tmac) */ + { 0x820f5000, 0xa1400, 0x0800 }, /* wf_lmac_top bn1 (wf_rmac) */ + { 0x820f7000, 0xa1e00, 0x0200 }, /* wf_lmac_top bn1 (wf_dma) */ + { 0x820f9000, 0xa3400, 0x0200 }, /* wf_lmac_top bn1 (wf_wtbloff) */ + { 0x820fa000, 0xa4000, 0x0200 }, /* wf_lmac_top bn1 (wf_etbf) */ + { 0x820fb000, 0xa4200, 0x0400 }, /* wf_lmac_top bn1 (wf_lpon) */ + { 0x820fc000, 0xa4600, 0x0200 }, /* wf_lmac_top bn1 (wf_int) */ + { 0x820fd000, 0xa4800, 0x0800 }, /* wf_lmac_top bn1 (wf_mib) */ + }; + int i; + + if (addr < 0x100000) + return addr; + + for (i = 0; i < array_size(fixed_map); i++) { + u32 ofs; + + if (addr < fixed_map[i].phys) + continue; + + ofs = addr - fixed_map[i].phys; + if (ofs > fixed_map[i].size) + continue; + + return fixed_map[i].mapped + ofs; + } + + if ((addr >= 0x18000000 && addr < 0x18c00000) || + (addr >= 0x70000000 && addr < 0x78000000) || + (addr >= 0x7c000000 && addr < 0x7c400000)) + return mt7921_reg_map_l1(dev, addr); + + dev_err(dev->mt76.dev, "access currently unsupported address %08x ", + addr); + + return 0; +} + +static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + u32 addr = __mt7921_reg_addr(dev, offset); + + return dev->bus_ops->rr(mdev, addr); +} + +static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + u32 addr = __mt7921_reg_addr(dev, offset); + + dev->bus_ops->wr(mdev, addr, val); +} + +static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + u32 addr = __mt7921_reg_addr(dev, offset); + + return dev->bus_ops->rmw(mdev, addr, mask, val); +} + +static int mt7921_dmashdl_disabled(struct mt7921_dev *dev) +{ + mt76_clear(dev, mt_wfdma0_glo_cfg_ext0, mt_wfdma0_csr_tx_dmashdl_enable); + mt76_set(dev, mt_dmashdl_sw_control, mt_dmashdl_dmashdl_bypass); + + return 0; +} + +int mt7921_dma_init(struct mt7921_dev *dev) +{ + /* increase buffer size to receive large vht/he mpdus */ + struct mt76_bus_ops *bus_ops; + int rx_buf_size = mt_rx_buf_size * 2; + int ret; + + dev->bus_ops = dev->mt76.bus; + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), + gfp_kernel); + if (!bus_ops) + return -enomem; + + bus_ops->rr = mt7921_rr; + bus_ops->wr = mt7921_wr; + bus_ops->rmw = mt7921_rmw; + dev->mt76.bus = bus_ops; + + mt76_dma_attach(&dev->mt76); + + /* reset */ + mt76_clear(dev, mt_wfdma0_rst, + mt_wfdma0_rst_dmashdl_all_rst | + mt_wfdma0_rst_logic_rst); + + mt76_set(dev, mt_wfdma0_rst, + mt_wfdma0_rst_dmashdl_all_rst | + mt_wfdma0_rst_logic_rst); + + ret = mt7921_dmashdl_disabled(dev); + if (ret) + return ret; + + /* disable wfdma0 */ + mt76_clear(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_en | + mt_wfdma0_glo_cfg_rx_dma_en | + mt_wfdma0_glo_cfg_csr_disp_base_ptr_chain_en | + mt_wfdma0_glo_cfg_omit_tx_info | + mt_wfdma0_glo_cfg_omit_rx_info | + mt_wfdma0_glo_cfg_omit_rx_info_pfet2); + + mt76_poll(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_busy | + mt_wfdma0_glo_cfg_rx_dma_busy, 0, 1000); + + /* init tx queue */ + ret = mt7921_init_tx_queues(&dev->phy, mt7921_txq_band0, + mt7921_tx_ring_size); + if (ret) + return ret; + + mt76_wr(dev, mt_wfdma0_tx_ring0_ext_ctrl, 0x4); + + /* command to wm */ + ret = mt76_init_mcu_queue(&dev->mt76, mt_mcuq_wm, mt7921_txq_mcu_wm, + mt7921_tx_mcu_ring_size, mt_tx_ring_base); + if (ret) + return ret; + + /* firmware download */ + ret = mt76_init_mcu_queue(&dev->mt76, mt_mcuq_fwdl, mt7921_txq_fwdl, + mt7921_tx_fwdl_ring_size, mt_tx_ring_base); + if (ret) + return ret; + + /* event from wm before firmware download */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[mt_rxq_mcu], + mt7921_rxq_mcu_wm, + mt7921_rx_mcu_ring_size, + rx_buf_size, mt_rx_event_ring_base); + if (ret) + return ret; + + /* change mcu queue after firmware download */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[mt_rxq_mcu_wa], + mt7921_rxq_mcu_wm, + mt7921_rx_mcu_ring_size, + rx_buf_size, mt_wfdma0(0x540)); + if (ret) + return ret; + + /* rx data */ + ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[mt_rxq_main], + mt7921_rxq_band0, mt7921_rx_ring_size, + rx_buf_size, mt_rx_data_ring_base); + if (ret) + return ret; + + ret = mt76_init_queues(dev); + if (ret < 0) + return ret; + + netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, + mt7921_poll_tx, napi_poll_weight); + napi_enable(&dev->mt76.tx_napi); + + /* configure perfetch settings */ + mt7921_dma_prefetch(dev); + + /* reset dma idx */ + mt76_wr(dev, mt_wfdma0_rst_dtx_ptr, ~0); + + /* configure delay interrupt */ + mt76_wr(dev, mt_wfdma0_pri_dly_int_cfg0, 0); + + mt76_set(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_wb_ddone | + mt_wfdma0_glo_cfg_fifo_little_endian | + mt_wfdma0_glo_cfg_clk_gat_dis | + mt_wfdma0_glo_cfg_omit_tx_info | + mt_wfdma0_glo_cfg_csr_disp_base_ptr_chain_en | + mt_wfdma0_glo_cfg_omit_rx_info_pfet2); + + mt76_set(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_en | mt_wfdma0_glo_cfg_rx_dma_en); + + mt76_set(dev, 0x54000120, bit(1)); + + /* enable interrupts for tx/rx rings */ + mt7921_irq_enable(dev, mt_int_rx_done_all | mt_int_tx_done_all | + mt_int_mcu_cmd); + + return 0; +} + +void mt7921_dma_cleanup(struct mt7921_dev *dev) +{ + /* disable */ + mt76_clear(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_en | + mt_wfdma0_glo_cfg_rx_dma_en | + mt_wfdma0_glo_cfg_csr_disp_base_ptr_chain_en | + mt_wfdma0_glo_cfg_omit_tx_info | + mt_wfdma0_glo_cfg_omit_rx_info | + mt_wfdma0_glo_cfg_omit_rx_info_pfet2); + + /* reset */ + mt76_clear(dev, mt_wfdma0_rst, + mt_wfdma0_rst_dmashdl_all_rst | + mt_wfdma0_rst_logic_rst); + + mt76_set(dev, mt_wfdma0_rst, + mt_wfdma0_rst_dmashdl_all_rst | + mt_wfdma0_rst_logic_rst); + + mt76_dma_cleanup(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c + /* re-init prefetch settings after reset */ + mt7921_dma_prefetch(dev); +
Networking
12d1c31788ad703d0f61d399db14f45f0ad0e888
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: add eeprom support
add eeprom support to mt7921 to determine the capability the card has such as indentificaiton, mac address, the band and antenna number the card able to support.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
2
127
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c +// spdx-license-identifier: isc +/* copyright (c) 2020 mediatek inc. */ + +#include "mt7921.h" +#include "eeprom.h" + +static u32 mt7921_eeprom_read(struct mt7921_dev *dev, u32 offset) +{ + u8 *data = dev->mt76.eeprom.data; + + if (data[offset] == 0xff) + mt7921_mcu_get_eeprom(dev, offset); + + return data[offset]; +} + +static int mt7921_eeprom_load(struct mt7921_dev *dev) +{ + int ret; + + ret = mt76_eeprom_init(&dev->mt76, mt7921_eeprom_size); + if (ret < 0) + return ret; + + memset(dev->mt76.eeprom.data, -1, mt7921_eeprom_size); + + return 0; +} + +static int mt7921_check_eeprom(struct mt7921_dev *dev) +{ + u8 *eeprom = dev->mt76.eeprom.data; + u16 val; + + mt7921_eeprom_read(dev, mt_ee_chip_id); + val = get_unaligned_le16(eeprom); + + switch (val) { + case 0x7961: + return 0; + default: + return -einval; + } +} + +void mt7921_eeprom_parse_band_config(struct mt7921_phy *phy) +{ + struct mt7921_dev *dev = phy->dev; + u32 val; + + val = mt7921_eeprom_read(dev, mt_ee_wifi_conf); + val = field_get(mt_ee_wifi_conf_band_sel, val); + + switch (val) { + case mt_ee_5ghz: + phy->mt76->cap.has_5ghz = true; + break; + case mt_ee_2ghz: + phy->mt76->cap.has_2ghz = true; + break; + default: + phy->mt76->cap.has_2ghz = true; + phy->mt76->cap.has_5ghz = true; + break; + } +} + +static void mt7921_eeprom_parse_hw_cap(struct mt7921_dev *dev) +{ + u8 tx_mask; + + mt7921_eeprom_parse_band_config(&dev->phy); + + /* todo: read nss with mcu_cmd_nic_capv2 */ + tx_mask = 2; + dev->chainmask = bit(tx_mask) - 1; + dev->mphy.antenna_mask = dev->chainmask; + dev->mphy.chainmask = dev->mphy.antenna_mask; +} + +int mt7921_eeprom_init(struct mt7921_dev *dev) +{ + int ret; + + ret = mt7921_eeprom_load(dev); + if (ret < 0) + return ret; + + ret = mt7921_check_eeprom(dev); + if (ret) + return ret; + + mt7921_eeprom_parse_hw_cap(dev); + memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + mt_ee_mac_addr, + eth_alen); + + mt76_eeprom_override(&dev->mphy); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h +/* spdx-license-identifier: isc */ +/* copyright (c) 2020 mediatek inc. */ + +#ifndef __mt7921_eeprom_h +#define __mt7921_eeprom_h + +#include "mt7921.h" + +enum mt7921_eeprom_field { + mt_ee_chip_id = 0x000, + mt_ee_version = 0x002, + mt_ee_mac_addr = 0x004, + mt_ee_wifi_conf = 0x07c, + __mt_ee_max = 0x3bf +}; + +#define mt_ee_wifi_conf_tx_mask bit(0) +#define mt_ee_wifi_conf_band_sel genmask(3, 2) + +enum mt7921_eeprom_band { + mt_ee_na, + mt_ee_5ghz, + mt_ee_2ghz, + mt_ee_dual_band, +}; + +#endif
Networking
bb1f6aaf71d658ed2f41e109d502d427ea0577fb
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: add ieee80211_ops
filling ieee80211_ops with the mt7921 operations.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['c']
1
960
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +// spdx-license-identifier: isc +/* copyright (c) 2020 mediatek inc. */ + +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/pci.h> +#include <linux/module.h> +#include "mt7921.h" +#include "mcu.h" + +static void +mt7921_gen_ppe_thresh(u8 *he_ppet, int nss) +{ + u8 i, ppet_bits, ppet_size, ru_bit_mask = 0x7; /* he80 */ + u8 ppet16_ppet8_ru3_ru0[] = {0x1c, 0xc7, 0x71}; + + he_ppet[0] = field_prep(ieee80211_ppe_thres_nss_mask, nss - 1) | + field_prep(ieee80211_ppe_thres_ru_index_bitmask_mask, + ru_bit_mask); + + ppet_bits = ieee80211_ppe_thres_info_ppet_size * + nss * hweight8(ru_bit_mask) * 2; + ppet_size = div_round_up(ppet_bits, 8); + + for (i = 0; i < ppet_size - 1; i++) + he_ppet[i + 1] = ppet16_ppet8_ru3_ru0[i % 3]; + + he_ppet[i + 1] = ppet16_ppet8_ru3_ru0[i % 3] & + (0xff >> (8 - (ppet_bits - 1) % 8)); +} + +static int +mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band, + struct ieee80211_sband_iftype_data *data) +{ + int i, idx = 0; + int nss = hweight8(phy->mt76->chainmask); + u16 mcs_map = 0; + + for (i = 0; i < 8; i++) { + if (i < nss) + mcs_map |= (ieee80211_he_mcs_support_0_11 << (i * 2)); + else + mcs_map |= (ieee80211_he_mcs_not_supported << (i * 2)); + } + + for (i = 0; i < num_nl80211_iftypes; i++) { + struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; + struct ieee80211_he_cap_elem *he_cap_elem = + &he_cap->he_cap_elem; + struct ieee80211_he_mcs_nss_supp *he_mcs = + &he_cap->he_mcs_nss_supp; + + switch (i) { + case nl80211_iftype_station: + break; + default: + continue; + } + + data[idx].types_mask = bit(i); + he_cap->has_he = true; + + he_cap_elem->mac_cap_info[0] = + ieee80211_he_mac_cap0_htc_he; + he_cap_elem->mac_cap_info[3] = + ieee80211_he_mac_cap3_omi_control | + ieee80211_he_mac_cap3_max_ampdu_len_exp_reserved; + he_cap_elem->mac_cap_info[4] = + ieee80211_he_mac_cap4_amdsu_in_ampdu; + + if (band == nl80211_band_2ghz) + he_cap_elem->phy_cap_info[0] = + ieee80211_he_phy_cap0_channel_width_set_40mhz_in_2g; + else if (band == nl80211_band_5ghz) + he_cap_elem->phy_cap_info[0] = + ieee80211_he_phy_cap0_channel_width_set_40mhz_80mhz_in_5g | + ieee80211_he_phy_cap0_channel_width_set_80plus80_mhz_in_5g; + + he_cap_elem->phy_cap_info[1] = + ieee80211_he_phy_cap1_ldpc_coding_in_payload; + he_cap_elem->phy_cap_info[2] = + ieee80211_he_phy_cap2_stbc_tx_under_80mhz | + ieee80211_he_phy_cap2_stbc_rx_under_80mhz; + + switch (i) { + case nl80211_iftype_station: + he_cap_elem->mac_cap_info[0] |= + ieee80211_he_mac_cap0_twt_req; + he_cap_elem->mac_cap_info[1] |= + ieee80211_he_mac_cap1_tf_mac_pad_dur_16us; + + if (band == nl80211_band_2ghz) + he_cap_elem->phy_cap_info[0] |= + ieee80211_he_phy_cap0_channel_width_set_ru_mapping_in_2g; + else if (band == nl80211_band_5ghz) + he_cap_elem->phy_cap_info[0] |= + ieee80211_he_phy_cap0_channel_width_set_ru_mapping_in_5g; + + he_cap_elem->phy_cap_info[1] |= + ieee80211_he_phy_cap1_device_class_a | + ieee80211_he_phy_cap1_he_ltf_and_gi_for_he_ppdus_0_8us; + he_cap_elem->phy_cap_info[3] |= + ieee80211_he_phy_cap3_dcm_max_const_tx_qpsk | + ieee80211_he_phy_cap3_dcm_max_const_rx_qpsk; + he_cap_elem->phy_cap_info[6] |= + ieee80211_he_phy_cap6_trig_cqi_fb | + ieee80211_he_phy_cap6_partial_bw_ext_range | + ieee80211_he_phy_cap6_ppe_threshold_present; + he_cap_elem->phy_cap_info[7] |= + ieee80211_he_phy_cap7_power_boost_factor_ar | + ieee80211_he_phy_cap7_he_su_mu_ppdu_4xltf_and_08_us_gi; + he_cap_elem->phy_cap_info[8] |= + ieee80211_he_phy_cap8_20mhz_in_40mhz_he_ppdu_in_2g | + ieee80211_he_phy_cap8_dcm_max_ru_484; + he_cap_elem->phy_cap_info[9] |= + ieee80211_he_phy_cap9_longer_than_16_sigb_ofdm_sym | + ieee80211_he_phy_cap9_non_triggered_cqi_feedback | + ieee80211_he_phy_cap9_tx_1024_qam_less_than_242_tone_ru | + ieee80211_he_phy_cap9_rx_1024_qam_less_than_242_tone_ru | + ieee80211_he_phy_cap9_rx_full_bw_su_using_mu_with_comp_sigb | + ieee80211_he_phy_cap9_rx_full_bw_su_using_mu_with_non_comp_sigb; + break; + } + + he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map); + he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map); + he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map); + he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map); + + memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); + if (he_cap_elem->phy_cap_info[6] & + ieee80211_he_phy_cap6_ppe_threshold_present) { + mt7921_gen_ppe_thresh(he_cap->ppe_thres, nss); + } else { + he_cap_elem->phy_cap_info[9] |= + ieee80211_he_phy_cap9_nomimal_pkt_padding_16us; + } + idx++; + } + + return idx; +} + +void mt7921_set_stream_he_caps(struct mt7921_phy *phy) +{ + struct ieee80211_sband_iftype_data *data; + struct ieee80211_supported_band *band; + int n; + + if (phy->mt76->cap.has_2ghz) { + data = phy->iftype[nl80211_band_2ghz]; + n = mt7921_init_he_caps(phy, nl80211_band_2ghz, data); + + band = &phy->mt76->sband_2g.sband; + band->iftype_data = data; + band->n_iftype_data = n; + } + + if (phy->mt76->cap.has_5ghz) { + data = phy->iftype[nl80211_band_5ghz]; + n = mt7921_init_he_caps(phy, nl80211_band_5ghz, data); + + band = &phy->mt76->sband_5g.sband; + band->iftype_data = data; + band->n_iftype_data = n; + } +} + +static int mt7921_start(struct ieee80211_hw *hw) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + + mutex_lock(&dev->mt76.mutex); + + mt7921_mcu_set_mac(dev, 0, true, false); + mt7921_mcu_set_channel_domain(phy); + mt7921_mcu_set_chan_info(phy, mcu_ext_cmd_set_rx_path); + mt7921_mac_reset_counters(phy); + set_bit(mt76_state_running, &phy->mt76->state); + + ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, + mt7921_watchdog_time); + + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void mt7921_stop(struct ieee80211_hw *hw) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + + cancel_delayed_work_sync(&phy->mt76->mac_work); + + mutex_lock(&dev->mt76.mutex); + clear_bit(mt76_state_running, &phy->mt76->state); + mt7921_mcu_set_mac(dev, 0, false, false); + mutex_unlock(&dev->mt76.mutex); +} + +static inline int get_free_idx(u32 mask, u8 start, u8 end) +{ + return ffs(~mask & genmask(end, start)); +} + +static int get_omac_idx(enum nl80211_iftype type, u64 mask) +{ + int i; + + switch (type) { + case nl80211_iftype_station: + /* prefer hw bssid slot 1-3 */ + i = get_free_idx(mask, hw_bssid_1, hw_bssid_3); + if (i) + return i - 1; + + if (type != nl80211_iftype_station) + break; + + /* next, try to find a free repeater entry for the sta */ + i = get_free_idx(mask >> repeater_bssid_start, 0, + repeater_bssid_max - repeater_bssid_start); + if (i) + return i + 32 - 1; + + i = get_free_idx(mask, ext_bssid_1, ext_bssid_max); + if (i) + return i - 1; + + if (~mask & bit(hw_bssid_0)) + return hw_bssid_0; + + break; + case nl80211_iftype_monitor: + /* ap uses hw bssid 0 and ext bssid */ + if (~mask & bit(hw_bssid_0)) + return hw_bssid_0; + + i = get_free_idx(mask, ext_bssid_1, ext_bssid_max); + if (i) + return i - 1; + + break; + default: + warn_on(1); + break; + } + + return -1; +} + +static int mt7921_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt76_txq *mtxq; + int idx, ret = 0; + + mutex_lock(&dev->mt76.mutex); + + if (vif->type == nl80211_iftype_monitor && + is_zero_ether_addr(vif->addr)) + phy->monitor_vif = vif; + + mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1; + if (mvif->mt76.idx >= mt7921_max_interfaces) { + ret = -enospc; + goto out; + } + + idx = get_omac_idx(vif->type, phy->omac_mask); + if (idx < 0) { + ret = -enospc; + goto out; + } + mvif->mt76.omac_idx = idx; + mvif->phy = phy; + mvif->mt76.band_idx = 0; + mvif->mt76.wmm_idx = mvif->mt76.idx % mt7921_max_wmm_sets; + + ret = mt7921_mcu_uni_add_dev(dev, vif, true); + if (ret) + goto out; + + dev->mt76.vif_mask |= bit(mvif->mt76.idx); + phy->omac_mask |= bit_ull(mvif->mt76.omac_idx); + + idx = mt7921_wtbl_reserved - mvif->mt76.idx; + + init_list_head(&mvif->sta.stats_list); + init_list_head(&mvif->sta.poll_list); + mvif->sta.wcid.idx = idx; + mvif->sta.wcid.ext_phy = mvif->mt76.band_idx; + mvif->sta.wcid.hw_key_idx = -1; + mvif->sta.wcid.tx_info |= mt_wcid_tx_info_set; + mt7921_mac_wtbl_update(dev, idx, + mt_wtbl_update_adm_count_clear); + + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + if (vif->txq) { + mtxq = (struct mt76_txq *)vif->txq->drv_priv; + mtxq->wcid = &mvif->sta.wcid; + } + + if (vif->type != nl80211_iftype_ap && + (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3)) + vif->offload_flags = 0; + + vif->offload_flags |= ieee80211_offload_encap_4addr; + +out: + mutex_unlock(&dev->mt76.mutex); + + return ret; +} + +static void mt7921_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_sta *msta = &mvif->sta; + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + int idx = msta->wcid.idx; + + if (vif == phy->monitor_vif) + phy->monitor_vif = null; + + mt7921_mcu_uni_add_dev(dev, vif, false); + + rcu_assign_pointer(dev->mt76.wcid[idx], null); + + mutex_lock(&dev->mt76.mutex); + dev->mt76.vif_mask &= ~bit(mvif->mt76.idx); + phy->omac_mask &= ~bit_ull(mvif->mt76.omac_idx); + mutex_unlock(&dev->mt76.mutex); + + spin_lock_bh(&dev->sta_poll_lock); + if (!list_empty(&msta->poll_list)) + list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); +} + +int mt7921_set_channel(struct mt7921_phy *phy) +{ + struct mt7921_dev *dev = phy->dev; + int ret; + + cancel_delayed_work_sync(&phy->mt76->mac_work); + + mutex_lock(&dev->mt76.mutex); + set_bit(mt76_reset, &phy->mt76->state); + + mt76_set_channel(phy->mt76); + + ret = mt7921_mcu_set_chan_info(phy, mcu_ext_cmd_channel_switch); + if (ret) + goto out; + + mt7921_mac_set_timing(phy); + + mt7921_mac_reset_counters(phy); + phy->noise = 0; + +out: + clear_bit(mt76_reset, &phy->mt76->state); + mutex_unlock(&dev->mt76.mutex); + + mt76_txq_schedule_all(phy->mt76); + + ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work, + mt7921_watchdog_time); + + return ret; +} + +static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_sta *msta = sta ? (struct mt7921_sta *)sta->drv_priv : + &mvif->sta; + struct mt76_wcid *wcid = &msta->wcid; + int idx = key->keyidx; + + /* the hardware does not support per-sta rx gtk, fallback + * to software mode for these. + */ + if ((vif->type == nl80211_iftype_adhoc || + vif->type == nl80211_iftype_mesh_point) && + (key->cipher == wlan_cipher_suite_tkip || + key->cipher == wlan_cipher_suite_ccmp) && + !(key->flags & ieee80211_key_flag_pairwise)) + return -eopnotsupp; + + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case wlan_cipher_suite_aes_cmac: + key->flags |= ieee80211_key_flag_generate_mmie; + break; + case wlan_cipher_suite_tkip: + case wlan_cipher_suite_ccmp: + case wlan_cipher_suite_ccmp_256: + case wlan_cipher_suite_gcmp: + case wlan_cipher_suite_gcmp_256: + case wlan_cipher_suite_sms4: + break; + case wlan_cipher_suite_wep40: + case wlan_cipher_suite_wep104: + default: + return -eopnotsupp; + } + + if (cmd == set_key) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + } else if (idx == wcid->hw_key_idx) { + wcid->hw_key_idx = -1; + } + mt76_wcid_key_setup(&dev->mt76, wcid, + cmd == set_key ? key : null); + + return mt7921_mcu_add_key(dev, vif, msta, key, cmd); +} + +static int mt7921_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + bool band = phy != &dev->phy; + int ret; + + if (changed & ieee80211_conf_change_channel) { + ieee80211_stop_queues(hw); + ret = mt7921_set_channel(phy); + if (ret) + return ret; + ieee80211_wake_queues(hw); + } + + mutex_lock(&dev->mt76.mutex); + + if (changed & ieee80211_conf_change_monitor) { + bool enabled = !!(hw->conf.flags & ieee80211_conf_monitor); + + if (!enabled) + phy->rxfilter |= mt_wf_rfcr_drop_other_uc; + else + phy->rxfilter &= ~mt_wf_rfcr_drop_other_uc; + + mt76_rmw_field(dev, mt_dma_dcr0(band), mt_dma_dcr0_rxd_g5_en, + enabled); + mt76_wr(dev, mt_wf_rfcr(band), phy->rxfilter); + } + + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static int +mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + + /* no need to update right away, we'll get bss_changed_qos */ + queue = mt7921_lmac_mapping(dev, queue); + mvif->queue_params[queue] = *params; + + return 0; +} + +static void mt7921_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + bool band = phy != &dev->phy; + u32 ctl_flags = mt_wf_rfcr1_drop_ack | + mt_wf_rfcr1_drop_bf_poll | + mt_wf_rfcr1_drop_ba | + mt_wf_rfcr1_drop_cfend | + mt_wf_rfcr1_drop_cfack; + u32 flags = 0; + +#define mt76_filter(_flag, _hw) do { \ + flags |= *total_flags & fif_##_flag; \ + phy->rxfilter &= ~(_hw); \ + phy->rxfilter |= !(flags & fif_##_flag) * (_hw); \ + } while (0) + + mutex_lock(&dev->mt76.mutex); + + phy->rxfilter &= ~(mt_wf_rfcr_drop_other_bss | + mt_wf_rfcr_drop_other_beacon | + mt_wf_rfcr_drop_frame_report | + mt_wf_rfcr_drop_probereq | + mt_wf_rfcr_drop_mcast_filtered | + mt_wf_rfcr_drop_mcast | + mt_wf_rfcr_drop_bcast | + mt_wf_rfcr_drop_duplicate | + mt_wf_rfcr_drop_a2_bssid | + mt_wf_rfcr_drop_unwanted_ctl | + mt_wf_rfcr_drop_stbc_multi); + + mt76_filter(other_bss, mt_wf_rfcr_drop_other_tim | + mt_wf_rfcr_drop_a3_mac | + mt_wf_rfcr_drop_a3_bssid); + + mt76_filter(fcsfail, mt_wf_rfcr_drop_fcsfail); + + mt76_filter(control, mt_wf_rfcr_drop_cts | + mt_wf_rfcr_drop_rts | + mt_wf_rfcr_drop_ctl_rsv | + mt_wf_rfcr_drop_ndpa); + + *total_flags = flags; + mt76_wr(dev, mt_wf_rfcr(band), phy->rxfilter); + + if (*total_flags & fif_control) + mt76_clear(dev, mt_wf_rfcr1(band), ctl_flags); + else + mt76_set(dev, mt_wf_rfcr1(band), ctl_flags); + + mutex_unlock(&dev->mt76.mutex); +} + +static void mt7921_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt7921_dev *dev = mt7921_hw_dev(hw); + + mutex_lock(&dev->mt76.mutex); + + if (changed & bss_changed_erp_slot) { + int slottime = info->use_short_slot ? 9 : 20; + + if (slottime != phy->slottime) { + phy->slottime = slottime; + mt7921_mac_set_timing(phy); + } + } + + /* ensure that enable txcmd_mode after bss_info */ + if (changed & (bss_changed_qos | bss_changed_beacon_enabled)) + mt7921_mcu_set_tx(dev, vif); + + mutex_unlock(&dev->mt76.mutex); +} + +int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + int ret, idx; + + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7921_wtbl_sta - 1); + if (idx < 0) + return -enospc; + + init_list_head(&msta->stats_list); + init_list_head(&msta->poll_list); + msta->vif = mvif; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + msta->wcid.ext_phy = mvif->mt76.band_idx; + msta->wcid.tx_info |= mt_wcid_tx_info_set; + msta->stats.jiffies = jiffies; + + if (vif->type == nl80211_iftype_station && !sta->tdls) + mt7921_mcu_uni_add_bss(&dev->phy, vif, true); + mt7921_mac_wtbl_update(dev, idx, + mt_wtbl_update_adm_count_clear); + + ret = mt7921_mcu_uni_add_sta(dev, vif, sta, true); + if (ret) + return ret; + + return 0; +} + +void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + + mt7921_mcu_uni_add_sta(dev, vif, sta, false); + mt7921_mac_wtbl_update(dev, msta->wcid.idx, + mt_wtbl_update_adm_count_clear); + if (vif->type == nl80211_iftype_station && !sta->tdls) + mt7921_mcu_uni_add_bss(&dev->phy, vif, false); + + spin_lock_bh(&dev->sta_poll_lock); + if (!list_empty(&msta->poll_list)) + list_del_init(&msta->poll_list); + if (!list_empty(&msta->stats_list)) + list_del_init(&msta->stats_list); + spin_unlock_bh(&dev->sta_poll_lock); +} + +static void mt7921_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + struct ieee80211_tx_info *info = ieee80211_skb_cb(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt76_wcid *wcid = &dev->mt76.global_wcid; + + if (control->sta) { + struct mt7921_sta *sta; + + sta = (struct mt7921_sta *)control->sta->drv_priv; + wcid = &sta->wcid; + } + + if (vif && !control->sta) { + struct mt7921_vif *mvif; + + mvif = (struct mt7921_vif *)vif->drv_priv; + wcid = &mvif->sta.wcid; + } + + mt76_tx(mphy, control->sta, wcid, skb); +} + +static int mt7921_set_rts_threshold(struct ieee80211_hw *hw, u32 val) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + + mutex_lock(&dev->mt76.mutex); + mt7921_mcu_set_rts_thresh(phy, val); + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static int +mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct ieee80211_sta *sta = params->sta; + struct ieee80211_txq *txq = sta->txq[params->tid]; + struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + u16 tid = params->tid; + u16 ssn = params->ssn; + struct mt76_txq *mtxq; + int ret = 0; + + if (!txq) + return -einval; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + mutex_lock(&dev->mt76.mutex); + switch (action) { + case ieee80211_ampdu_rx_start: + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + params->buf_size); + mt7921_mcu_uni_rx_ba(dev, params, true); + break; + case ieee80211_ampdu_rx_stop: + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt7921_mcu_uni_rx_ba(dev, params, false); + break; + case ieee80211_ampdu_tx_operational: + mtxq->aggr = true; + mtxq->send_bar = false; + mt7921_mcu_uni_tx_ba(dev, params, true); + break; + case ieee80211_ampdu_tx_stop_flush: + case ieee80211_ampdu_tx_stop_flush_cont: + mtxq->aggr = false; + clear_bit(tid, &msta->ampdu_state); + mt7921_mcu_uni_tx_ba(dev, params, false); + break; + case ieee80211_ampdu_tx_start: + set_bit(tid, &msta->ampdu_state); + ret = ieee80211_ampdu_tx_start_immediate; + break; + case ieee80211_ampdu_tx_stop_cont: + mtxq->aggr = false; + clear_bit(tid, &msta->ampdu_state); + mt7921_mcu_uni_tx_ba(dev, params, false); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + mutex_unlock(&dev->mt76.mutex); + + return ret; +} + +static int +mt7921_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return mt76_sta_state(hw, vif, sta, ieee80211_sta_notexist, + ieee80211_sta_none); +} + +static int +mt7921_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + return mt76_sta_state(hw, vif, sta, ieee80211_sta_none, + ieee80211_sta_notexist); +} + +static int +mt7921_get_stats(struct ieee80211_hw *hw, + struct ieee80211_low_level_stats *stats) +{ + struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mib_stats *mib = &phy->mib; + + stats->dot11rtssuccesscount = mib->rts_cnt; + stats->dot11rtsfailurecount = mib->rts_retries_cnt; + stats->dot11fcserrorcount = mib->fcs_err_cnt; + stats->dot11ackfailurecount = mib->ack_fail_cnt; + + return 0; +} + +static u64 +mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + u8 omac_idx = mvif->mt76.omac_idx; + bool band = phy != &dev->phy; + union { + u64 t64; + u32 t32[2]; + } tsf; + u16 n; + + mutex_lock(&dev->mt76.mutex); + + n = omac_idx > hw_bssid_max ? hw_bssid_0 : omac_idx; + /* tsf software read */ + mt76_set(dev, mt_lpon_tcr(band, n), mt_lpon_tcr_sw_mode); + tsf.t32[0] = mt76_rr(dev, mt_lpon_uttr0(band)); + tsf.t32[1] = mt76_rr(dev, mt_lpon_uttr1(band)); + + mutex_unlock(&dev->mt76.mutex); + + return tsf.t64; +} + +static void +mt7921_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 timestamp) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + u8 omac_idx = mvif->mt76.omac_idx; + bool band = phy != &dev->phy; + union { + u64 t64; + u32 t32[2]; + } tsf = { .t64 = timestamp, }; + u16 n; + + mutex_lock(&dev->mt76.mutex); + + n = omac_idx > hw_bssid_max ? hw_bssid_0 : omac_idx; + mt76_wr(dev, mt_lpon_uttr0(band), tsf.t32[0]); + mt76_wr(dev, mt_lpon_uttr1(band), tsf.t32[1]); + /* tsf software overwrite */ + mt76_set(dev, mt_lpon_tcr(band, n), mt_lpon_tcr_sw_write); + + mutex_unlock(&dev->mt76.mutex); +} + +static void +mt7921_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) +{ + struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt7921_dev *dev = phy->dev; + + mutex_lock(&dev->mt76.mutex); + phy->coverage_class = max_t(s16, coverage_class, 0); + mt7921_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); +} + +void mt7921_scan_work(struct work_struct *work) +{ + struct mt7921_phy *phy; + + phy = (struct mt7921_phy *)container_of(work, struct mt7921_phy, + scan_work.work); + + while (true) { + struct mt7921_mcu_rxd *rxd; + struct sk_buff *skb; + + spin_lock_bh(&phy->dev->mt76.lock); + skb = __skb_dequeue(&phy->scan_event_list); + spin_unlock_bh(&phy->dev->mt76.lock); + + if (!skb) + break; + + rxd = (struct mt7921_mcu_rxd *)skb->data; + if (rxd->eid == mcu_event_sched_scan_done) { + ieee80211_sched_scan_results(phy->mt76->hw); + } else if (test_and_clear_bit(mt76_hw_scanning, + &phy->mt76->state)) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + + ieee80211_scan_completed(phy->mt76->hw, &info); + } + dev_kfree_skb(skb); + } +} + +static int +mt7921_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_scan_request *req) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mutex_lock(&dev->mt76.mutex); + err = mt7921_mcu_hw_scan(mphy->priv, vif, req); + mutex_unlock(&dev->mt76.mutex); + + return err; +} + +static void +mt7921_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + + mutex_lock(&dev->mt76.mutex); + mt7921_mcu_cancel_hw_scan(mphy->priv, vif); + mutex_unlock(&dev->mt76.mutex); +} + +static int +mt7921_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + int max_nss = hweight8(hw->wiphy->available_antennas_tx); + + if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss) + return -einval; + + if ((bit(hweight8(tx_ant)) - 1) != tx_ant) + tx_ant = bit(ffs(tx_ant) - 1) - 1; + + mutex_lock(&dev->mt76.mutex); + + phy->mt76->antenna_mask = tx_ant; + phy->mt76->chainmask = tx_ant; + + mt76_set_stream_caps(phy->mt76, true); + mt7921_set_stream_he_caps(phy); + + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void +mt7921_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed) +{ +} + +static void mt7921_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; + struct mt7921_sta_stats *stats = &msta->stats; + + if (!stats->tx_rate.legacy && !stats->tx_rate.flags) + return; + + if (stats->tx_rate.legacy) { + sinfo->txrate.legacy = stats->tx_rate.legacy; + } else { + sinfo->txrate.mcs = stats->tx_rate.mcs; + sinfo->txrate.nss = stats->tx_rate.nss; + sinfo->txrate.bw = stats->tx_rate.bw; + sinfo->txrate.he_gi = stats->tx_rate.he_gi; + sinfo->txrate.he_dcm = stats->tx_rate.he_dcm; + sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc; + } + sinfo->txrate.flags = stats->tx_rate.flags; + sinfo->filled |= bit_ull(nl80211_sta_info_tx_bitrate); +} + +const struct ieee80211_ops mt7921_ops = { + .tx = mt7921_tx, + .start = mt7921_start, + .stop = mt7921_stop, + .add_interface = mt7921_add_interface, + .remove_interface = mt7921_remove_interface, + .config = mt7921_config, + .conf_tx = mt7921_conf_tx, + .configure_filter = mt7921_configure_filter, + .bss_info_changed = mt7921_bss_info_changed, + .sta_add = mt7921_sta_add, + .sta_remove = mt7921_sta_remove, + .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, + .sta_rc_update = mt7921_sta_rc_update, + .set_key = mt7921_set_key, + .ampdu_action = mt7921_ampdu_action, + .set_rts_threshold = mt7921_set_rts_threshold, + .wake_tx_queue = mt76_wake_tx_queue, + .release_buffered_frames = mt76_release_buffered_frames, + .get_txpower = mt76_get_txpower, + .get_stats = mt7921_get_stats, + .get_tsf = mt7921_get_tsf, + .set_tsf = mt7921_set_tsf, + .get_survey = mt76_get_survey, + .get_antenna = mt76_get_antenna, + .set_antenna = mt7921_set_antenna, + .set_coverage_class = mt7921_set_coverage_class, + .hw_scan = mt7921_hw_scan, + .cancel_hw_scan = mt7921_cancel_hw_scan, + .sta_statistics = mt7921_sta_statistics, +};
Networking
e0f9fdda81bd32371ddac9222487e612027d8de2
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: introduce mt7921e support
introduce support for mt7921e 802.11ax (wi-fi 6) 2x2:2ss chipset.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['kconfig', 'c', 'makefile']
6
449
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/kconfig b/drivers/net/wireless/mediatek/mt76/kconfig --- a/drivers/net/wireless/mediatek/mt76/kconfig +++ b/drivers/net/wireless/mediatek/mt76/kconfig +source "drivers/net/wireless/mediatek/mt76/mt7921/kconfig" diff --git a/drivers/net/wireless/mediatek/mt76/makefile b/drivers/net/wireless/mediatek/mt76/makefile --- a/drivers/net/wireless/mediatek/mt76/makefile +++ b/drivers/net/wireless/mediatek/mt76/makefile +obj-$(config_mt7921e) += mt7921/ diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/kconfig --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/kconfig +# spdx-license-identifier: isc +config mt7921e + tristate "mediatek mt7921e (pcie) support" + select mt76_core + depends on mac80211 + depends on pci + help + this adds support for mt7921e 802.11ax 2x2:2ss wireless devices. + + to compile this driver as a module, choose m here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/makefile b/drivers/net/wireless/mediatek/mt76/mt7921/makefile --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/makefile +#spdx-license-identifier: isc + +obj-$(config_mt7921e) += mt7921e.o + +mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c +// spdx-license-identifier: isc +/* copyright (c) 2020 mediatek inc. */ + +#include <linux/etherdevice.h> +#include "mt7921.h" +#include "mac.h" +#include "eeprom.h" + +#define cck_rate(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = ieee80211_rate_short_preamble, \ + .hw_value = (mt_phy_type_cck << 8) | (_idx), \ + .hw_value_short = (mt_phy_type_cck << 8) | (4 + (_idx)), \ +} + +#define ofdm_rate(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (mt_phy_type_ofdm << 8) | (_idx), \ + .hw_value_short = (mt_phy_type_ofdm << 8) | (_idx), \ +} + +static struct ieee80211_rate mt7921_rates[] = { + cck_rate(0, 10), + cck_rate(1, 20), + cck_rate(2, 55), + cck_rate(3, 110), + ofdm_rate(11, 60), + ofdm_rate(15, 90), + ofdm_rate(10, 120), + ofdm_rate(14, 180), + ofdm_rate(9, 240), + ofdm_rate(13, 360), + ofdm_rate(8, 480), + ofdm_rate(12, 540), +}; + +static const struct ieee80211_iface_limit if_limits[] = { + { + .max = mt7921_max_interfaces, + .types = bit(nl80211_iftype_station) + } +}; + +static const struct ieee80211_iface_combination if_comb[] = { + { + .limits = if_limits, + .n_limits = array_size(if_limits), + .max_interfaces = mt7921_max_interfaces, + .num_different_channels = 1, + .beacon_int_infra_match = true, + } +}; + +static void +mt7921_init_wiphy(struct ieee80211_hw *hw) +{ + struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct wiphy *wiphy = hw->wiphy; + + hw->queues = 4; + hw->max_rx_aggregation_subframes = ieee80211_max_ampdu_buf; + hw->max_tx_aggregation_subframes = ieee80211_max_ampdu_buf; + + phy->slottime = 9; + + hw->sta_data_size = sizeof(struct mt7921_sta); + hw->vif_data_size = sizeof(struct mt7921_vif); + + wiphy->iface_combinations = if_comb; + wiphy->n_iface_combinations = array_size(if_comb); + wiphy->max_scan_ie_len = mt7921_scan_ie_len; + wiphy->max_scan_ssids = 4; + wiphy->flags |= wiphy_flag_has_channel_switch; + + wiphy_ext_feature_set(wiphy, nl80211_ext_feature_set_scan_dwell); + + ieee80211_hw_set(hw, single_scan_on_all_bands); + ieee80211_hw_set(hw, has_rate_control); + ieee80211_hw_set(hw, supports_tx_encap_offload); + ieee80211_hw_set(hw, want_monitor_vif); + + hw->max_tx_fragments = 4; +} + +static void +mt7921_mac_init_band(struct mt7921_dev *dev, u8 band) +{ + u32 mask, set; + + mt76_rmw_field(dev, mt_tmac_ctcr0(band), + mt_tmac_ctcr0_ins_ddlmt_reftime, 0x3f); + mt76_set(dev, mt_tmac_ctcr0(band), + mt_tmac_ctcr0_ins_ddlmt_vht_smpdu_en | + mt_tmac_ctcr0_ins_ddlmt_en); + + mask = mt_mdp_rcfr0_mcu_rx_mgmt | + mt_mdp_rcfr0_mcu_rx_ctl_non_bar | + mt_mdp_rcfr0_mcu_rx_ctl_bar; + set = field_prep(mt_mdp_rcfr0_mcu_rx_mgmt, mt_mdp_to_hif) | + field_prep(mt_mdp_rcfr0_mcu_rx_ctl_non_bar, mt_mdp_to_hif) | + field_prep(mt_mdp_rcfr0_mcu_rx_ctl_bar, mt_mdp_to_hif); + mt76_rmw(dev, mt_mdp_bnrcfr0(band), mask, set); + + mask = mt_mdp_rcfr1_mcu_rx_bypass | + mt_mdp_rcfr1_rx_dropped_ucast | + mt_mdp_rcfr1_rx_dropped_mcast; + set = field_prep(mt_mdp_rcfr1_mcu_rx_bypass, mt_mdp_to_hif) | + field_prep(mt_mdp_rcfr1_rx_dropped_ucast, mt_mdp_to_hif) | + field_prep(mt_mdp_rcfr1_rx_dropped_mcast, mt_mdp_to_hif); + mt76_rmw(dev, mt_mdp_bnrcfr1(band), mask, set); + + mt76_set(dev, mt_wf_rmac_mib_time0(band), mt_wf_rmac_mib_rxtime_en); + mt76_set(dev, mt_wf_rmac_mib_airtime0(band), mt_wf_rmac_mib_rxtime_en); + + mt76_rmw_field(dev, mt_dma_dcr0(band), mt_dma_dcr0_max_rx_len, 1536); + /* disable rx rate report by default due to hw issues */ + mt76_clear(dev, mt_dma_dcr0(band), mt_dma_dcr0_rxd_g5_en); +} + +static void mt7921_mac_init(struct mt7921_dev *dev) +{ + int i; + + mt76_rmw_field(dev, mt_mdp_dcr1, mt_mdp_dcr1_max_rx_len, 1536); + /* disable hardware de-agg */ + mt76_clear(dev, mt_mdp_dcr0, mt_mdp_dcr0_damsdu_en); + mt76_clear(dev, mt_mdp_dcr0, mt_mdp_dcr0_rx_hdr_trans_en); + + for (i = 0; i < mt7921_wtbl_size; i++) + mt7921_mac_wtbl_update(dev, i, + mt_wtbl_update_adm_count_clear); + for (i = 0; i < 2; i++) + mt7921_mac_init_band(dev, i); + + mt7921_mcu_set_rts_thresh(&dev->phy, 0x92b); +} + +static void mt7921_init_work(struct work_struct *work) +{ + struct mt7921_dev *dev = container_of(work, struct mt7921_dev, + init_work); + + mt7921_mcu_set_eeprom(dev); + mt7921_mac_init(dev); +} + +static int mt7921_init_hardware(struct mt7921_dev *dev) +{ + int ret, idx; + + init_work(&dev->init_work, mt7921_init_work); + spin_lock_init(&dev->token_lock); + idr_init(&dev->token); + + ret = mt7921_dma_init(dev); + if (ret) + return ret; + + set_bit(mt76_state_initialized, &dev->mphy.state); + + /* force firmware operation mode into normal state, + * which should be set before firmware download stage. + */ + mt76_wr(dev, mt_swdef_mode, mt_swdef_normal_mode); + + ret = mt7921_mcu_init(dev); + if (ret) + return ret; + + ret = mt7921_eeprom_init(dev); + if (ret < 0) + return ret; + + /* beacon and mgmt frames should occupy wcid 0 */ + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, mt7921_wtbl_sta - 1); + if (idx) + return -enospc; + + dev->mt76.global_wcid.idx = idx; + dev->mt76.global_wcid.hw_key_idx = -1; + dev->mt76.global_wcid.tx_info |= mt_wcid_tx_info_set; + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + + return 0; +} + +int mt7921_register_device(struct mt7921_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + int ret; + + dev->phy.dev = dev; + dev->phy.mt76 = &dev->mt76.phy; + dev->mt76.phy.priv = &dev->phy; + init_list_head(&dev->phy.stats_list); + init_delayed_work(&dev->mphy.mac_work, mt7921_mac_work); + init_delayed_work(&dev->phy.scan_work, mt7921_scan_work); + skb_queue_head_init(&dev->phy.scan_event_list); + init_list_head(&dev->sta_poll_list); + spin_lock_init(&dev->sta_poll_lock); + + init_waitqueue_head(&dev->reset_wait); + init_work(&dev->reset_work, mt7921_mac_reset_work); + + ret = mt7921_init_hardware(dev); + if (ret) + return ret; + + mt7921_init_wiphy(hw); + dev->mphy.sband_2g.sband.ht_cap.cap |= + ieee80211_ht_cap_ldpc_coding | + ieee80211_ht_cap_max_amsdu; + dev->mphy.sband_5g.sband.ht_cap.cap |= + ieee80211_ht_cap_ldpc_coding | + ieee80211_ht_cap_max_amsdu; + dev->mphy.sband_5g.sband.vht_cap.cap |= + ieee80211_vht_cap_max_mpdu_length_7991 | + ieee80211_vht_cap_max_a_mpdu_length_exponent_mask; + dev->mphy.sband_5g.sband.vht_cap.cap |= + ieee80211_vht_cap_short_gi_160 | + ieee80211_vht_cap_supp_chan_width_160_80plus80mhz; + dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask; + dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask; + + mt76_set_stream_caps(&dev->mphy, true); + mt7921_set_stream_he_caps(&dev->phy); + + ret = mt76_register_device(&dev->mt76, true, mt7921_rates, + array_size(mt7921_rates)); + if (ret) + return ret; + + ieee80211_queue_work(mt76_hw(dev), &dev->init_work); + + return 0; +} + +void mt7921_unregister_device(struct mt7921_dev *dev) +{ + mt76_unregister_device(&dev->mt76); + mt7921_mcu_exit(dev); + mt7921_dma_cleanup(dev); + + mt7921_tx_token_put(dev); + + tasklet_disable(&dev->irq_tasklet); + mt76_free_device(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +// spdx-license-identifier: isc +/* copyright (c) 2020 mediatek inc. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "mt7921.h" +#include "mac.h" +#include "../trace.h" + +static const struct pci_device_id mt7921_pci_device_table[] = { + { pci_device(0x14c3, 0x7961) }, + { }, +}; + +static void +mt7921_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) +{ + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + + if (q == mt_rxq_main) + mt7921_irq_enable(dev, mt_int_rx_done_data); + else if (q == mt_rxq_mcu_wa) + mt7921_irq_enable(dev, mt_int_rx_done_wm2); + else + mt7921_irq_enable(dev, mt_int_rx_done_wm); +} + +static irqreturn_t mt7921_irq_handler(int irq, void *dev_instance) +{ + struct mt7921_dev *dev = dev_instance; + + mt76_wr(dev, mt_wfdma0_host_int_ena, 0); + + if (!test_bit(mt76_state_initialized, &dev->mphy.state)) + return irq_none; + + tasklet_schedule(&dev->irq_tasklet); + + return irq_handled; +} + +static void mt7921_irq_tasklet(unsigned long data) +{ + struct mt7921_dev *dev = (struct mt7921_dev *)data; + u32 intr, mask = 0; + + mt76_wr(dev, mt_wfdma0_host_int_ena, 0); + + intr = mt76_rr(dev, mt_wfdma0_host_int_sta); + intr &= dev->mt76.mmio.irqmask; + mt76_wr(dev, mt_wfdma0_host_int_sta, intr); + + trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask); + + mask |= intr & mt_int_rx_done_all; + if (intr & mt_int_tx_done_mcu) + mask |= mt_int_tx_done_mcu; + + mt76_set_irq_mask(&dev->mt76, mt_wfdma0_host_int_ena, mask, 0); + + if (intr & mt_int_tx_done_all) + napi_schedule(&dev->mt76.tx_napi); + + if (intr & mt_int_rx_done_wm) + napi_schedule(&dev->mt76.napi[mt_rxq_mcu]); + + if (intr & mt_int_rx_done_wm2) + napi_schedule(&dev->mt76.napi[mt_rxq_mcu_wa]); + + if (intr & mt_int_rx_done_data) + napi_schedule(&dev->mt76.napi[mt_rxq_main]); +} + +static int mt7921_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + /* txwi_size = txd size + txp size */ + .txwi_size = mt_txd_size + sizeof(struct mt7921_txp_common), + .drv_flags = mt_drv_txwi_no_free | mt_drv_hw_mgmt_txq | + mt_drv_amsdu_offload, + .survey_flags = survey_info_time_tx | + survey_info_time_rx | + survey_info_time_bss_rx, + .tx_prepare_skb = mt7921_tx_prepare_skb, + .tx_complete_skb = mt7921_tx_complete_skb, + .rx_skb = mt7921_queue_rx_skb, + .rx_poll_complete = mt7921_rx_poll_complete, + .sta_ps = mt7921_sta_ps, + .sta_add = mt7921_mac_sta_add, + .sta_remove = mt7921_mac_sta_remove, + .update_survey = mt7921_update_channel, + }; + struct mt7921_dev *dev; + struct mt76_dev *mdev; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, bit(0), pci_name(pdev)); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, 1, 1, pci_irq_legacy); + if (ret < 0) + return ret; + + ret = pci_set_dma_mask(pdev, dma_bit_mask(32)); + if (ret) + goto err_free_pci_vec; + + mt76_pci_disable_aspm(pdev); + + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7921_ops, + &drv_ops); + if (!mdev) { + ret = -enomem; + goto err_free_pci_vec; + } + + dev = container_of(mdev, struct mt7921_dev, mt76); + + mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev); + mdev->rev = (mt7921_l1_rr(dev, mt_hw_chipid) << 16) | + (mt7921_l1_rr(dev, mt_hw_rev) & 0xff); + dev_err(mdev->dev, "asic revision: %04x ", mdev->rev); + + mt76_wr(dev, mt_wfdma0_host_int_ena, 0); + + mt7921_l1_wr(dev, mt_pcie_mac_int_enable, 0xff); + + ret = devm_request_irq(mdev->dev, pdev->irq, mt7921_irq_handler, + irqf_shared, kbuild_modname, dev); + if (ret) + goto err_free_dev; + + ret = mt7921_register_device(dev); + if (ret) + goto err_free_dev; + + return 0; + +err_free_dev: + mt76_free_device(&dev->mt76); +err_free_pci_vec: + pci_free_irq_vectors(pdev); + + return ret; +} + +static void mt7921_pci_remove(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + + mt7921_unregister_device(dev); + devm_free_irq(&pdev->dev, pdev->irq, dev); + pci_free_irq_vectors(pdev); +} + +struct pci_driver mt7921_pci_driver = { + .name = kbuild_modname, + .id_table = mt7921_pci_device_table, + .probe = mt7921_pci_probe, + .remove = mt7921_pci_remove, +}; + +module_pci_driver(mt7921_pci_driver); + +module_device_table(pci, mt7921_pci_device_table); +module_firmware(mt7921_firmware_wm); +module_firmware(mt7921_rom_patch); +module_author("sean wang <sean.wang@mediatek.com>"); +module_author("lorenzo bianconi <lorenzo@kernel.org>"); +module_license("dual bsd/gpl");
Networking
5c14a5f944b91371961548b1907802f74a4d2e5c
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: add debugfs support
add debugfs support to dump driver statistics and hardware details.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['c']
2
179
1
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +// spdx-license-identifier: isc +/* copyright (c) 2020 mediatek inc. */ + +#include "mt7921.h" +#include "eeprom.h" + +static int +mt7921_fw_debug_set(void *data, u64 val) +{ + struct mt7921_dev *dev = data; + + dev->fw_debug = (u8)val; + + mt7921_mcu_fw_log_2_host(dev, dev->fw_debug); + + return 0; +} + +static int +mt7921_fw_debug_get(void *data, u64 *val) +{ + struct mt7921_dev *dev = data; + + *val = dev->fw_debug; + + return 0; +} + +define_debugfs_attribute(fops_fw_debug, mt7921_fw_debug_get, + mt7921_fw_debug_set, "%lld "); + +static void +mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy, + struct seq_file *file) +{ + struct mt7921_dev *dev = file->private; + int bound[15], range[4], i; + + if (!phy) + return; + + /* tx ampdu stat */ + for (i = 0; i < array_size(range); i++) + range[i] = mt76_rr(dev, mt_mib_arng(0, i)); + + for (i = 0; i < array_size(bound); i++) + bound[i] = mt_mib_arncr_range(range[i / 4], i) + 1; + + seq_printf(file, " phy0 "); + + seq_printf(file, "length: %8d | ", bound[0]); + for (i = 0; i < array_size(bound) - 1; i++) + seq_printf(file, "%3d -%3d | ", + bound[i] + 1, bound[i + 1]); + + seq_puts(file, " count: "); + for (i = 0; i < array_size(bound); i++) + seq_printf(file, "%8d | ", dev->mt76.aggr_stats[i]); + seq_puts(file, " "); + + seq_printf(file, "ba miss count: %d ", phy->mib.ba_miss_cnt); +} + +static int +mt7921_tx_stats_read(struct seq_file *file, void *data) +{ + struct mt7921_dev *dev = file->private; + int stat[8], i, n; + + mt7921_ampdu_stat_read_phy(&dev->phy, file); + + /* tx amsdu info */ + seq_puts(file, "tx msdu stat: "); + for (i = 0, n = 0; i < array_size(stat); i++) { + stat[i] = mt76_rr(dev, mt_ple_amsdu_pack_msdu_cnt(i)); + n += stat[i]; + } + + for (i = 0; i < array_size(stat); i++) { + seq_printf(file, "amsdu pack count of %d msdu in txd: 0x%x ", + i + 1, stat[i]); + if (n != 0) + seq_printf(file, "(%d%%) ", stat[i] * 100 / n); + else + seq_puts(file, " "); + } + + return 0; +} + +static int +mt7921_tx_stats_open(struct inode *inode, struct file *f) +{ + return single_open(f, mt7921_tx_stats_read, inode->i_private); +} + +static const struct file_operations fops_tx_stats = { + .open = mt7921_tx_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = this_module, +}; + +static int +mt7921_queues_acq(struct seq_file *s, void *data) +{ + struct mt7921_dev *dev = dev_get_drvdata(s->private); + int i; + + for (i = 0; i < 16; i++) { + int j, acs = i / 4, index = i % 4; + u32 ctrl, val, qlen = 0; + + val = mt76_rr(dev, mt_ple_ac_qempty(acs, index)); + ctrl = bit(31) | bit(15) | (acs << 8); + + for (j = 0; j < 32; j++) { + if (val & bit(j)) + continue; + + mt76_wr(dev, mt_ple_fl_q0_ctrl, + ctrl | (j + (index << 5))); + qlen += mt76_get_field(dev, mt_ple_fl_q3_ctrl, + genmask(11, 0)); + } + seq_printf(s, "ac%d%d: queued=%d ", acs, index, qlen); + } + + return 0; +} + +static int +mt7921_queues_read(struct seq_file *s, void *data) +{ + struct mt7921_dev *dev = dev_get_drvdata(s->private); + struct { + struct mt76_queue *q; + char *queue; + } queue_map[] = { + { dev->mphy.q_tx[mt_txq_be], "wfdma0" }, + { dev->mt76.q_mcu[mt_mcuq_wm], "mcuwm" }, + { dev->mt76.q_mcu[mt_mcuq_fwdl], "mcufwq" }, + }; + int i; + + for (i = 0; i < array_size(queue_map); i++) { + struct mt76_queue *q = queue_map[i].q; + + if (!q) + continue; + + seq_printf(s, + "%s: queued=%d head=%d tail=%d ", + queue_map[i].queue, q->queued, q->head, + q->tail); + } + + return 0; +} + +int mt7921_init_debugfs(struct mt7921_dev *dev) +{ + struct dentry *dir; + + dir = mt76_register_debugfs(&dev->mt76); + if (!dir) + return -enomem; + + debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir, + mt7921_queues_read); + debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, + mt7921_queues_acq); + debugfs_create_file("tx_stats", 0400, dir, dev, &fops_tx_stats); + debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c - return 0; + return mt7921_init_debugfs(dev);
Networking
474a9f21e2e20ebe1cdaa093a77f0681273f4b03
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: introduce schedule scan support
introduce schedule scan to control mt7921 firmware to do background scan in defined plan to see if the matched ssid is available.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
5
155
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c + wiphy->max_sched_scan_plan_interval = mt7921_max_sched_scan_interval; + wiphy->max_sched_scan_ie_len = ieee80211_max_data_len; + wiphy->max_sched_scan_ssids = mt7921_max_sched_scan_ssid; + wiphy->max_match_sets = mt7921_max_scan_match; + wiphy->max_sched_scan_reqs = 1; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +static int +mt7921_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mutex_lock(&dev->mt76.mutex); + + err = mt7921_mcu_sched_scan_req(mphy->priv, vif, req); + if (err < 0) + goto out; + + err = mt7921_mcu_sched_scan_enable(mphy->priv, vif, true); +out: + mutex_unlock(&dev->mt76.mutex); + + return err; +} + +static int +mt7921_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt76_phy *mphy = hw->priv; + int err; + + mutex_lock(&dev->mt76.mutex); + err = mt7921_mcu_sched_scan_enable(mphy->priv, vif, false); + mutex_unlock(&dev->mt76.mutex); + + return err; +} + + .sched_scan_start = mt7921_start_sched_scan, + .sched_scan_stop = mt7921_stop_sched_scan, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +int mt7921_mcu_sched_scan_req(struct mt7921_phy *phy, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *sreq) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct ieee80211_channel **scan_list = sreq->channels; + struct mt7921_dev *dev = phy->dev; + struct mt7921_mcu_scan_channel *chan; + struct mt7921_sched_scan_req *req; + struct cfg80211_match_set *match; + struct cfg80211_ssid *ssid; + struct sk_buff *skb; + int i; + + skb = mt76_mcu_msg_alloc(&dev->mt76, null, + sizeof(*req) + sreq->ie_len); + if (!skb) + return -enomem; + + mvif->mt76.scan_seq_num = (mvif->mt76.scan_seq_num + 1) & 0x7f; + + req = (struct mt7921_sched_scan_req *)skb_put(skb, sizeof(*req)); + req->version = 1; + req->seq_num = mvif->mt76.scan_seq_num; + + req->ssids_num = sreq->n_ssids; + for (i = 0; i < req->ssids_num; i++) { + ssid = &sreq->ssids[i]; + memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len); + req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len); + } + + req->match_num = sreq->n_match_sets; + for (i = 0; i < req->match_num; i++) { + match = &sreq->match_sets[i]; + memcpy(req->match[i].ssid, match->ssid.ssid, + match->ssid.ssid_len); + req->match[i].rssi_th = cpu_to_le32(match->rssi_thold); + req->match[i].ssid_len = match->ssid.ssid_len; + } + + req->channel_type = sreq->n_channels ? 4 : 0; + req->channels_num = min_t(u8, sreq->n_channels, 64); + for (i = 0; i < req->channels_num; i++) { + chan = &req->channels[i]; + chan->band = scan_list[i]->band == nl80211_band_2ghz ? 1 : 2; + chan->channel_num = scan_list[i]->hw_value; + } + + req->intervals_num = sreq->n_scan_plans; + for (i = 0; i < req->intervals_num; i++) + req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval); + + if (sreq->ie_len > 0) { + req->ie_len = cpu_to_le16(sreq->ie_len); + memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len); + } + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_cmd_sched_scan_req, + false); +} + +int mt7921_mcu_sched_scan_enable(struct mt7921_phy *phy, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt7921_dev *dev = phy->dev; + struct { + u8 active; /* 0: enabled 1: disabled */ + u8 rsv[3]; + } __packed req = { + .active = !enable, + }; + + if (enable) + set_bit(mt76_hw_sched_scanning, &phy->mt76->state); + else + clear_bit(mt76_hw_sched_scanning, &phy->mt76->state); + + return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_sched_scan_enable, &req, + sizeof(req), false); +} + diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +struct mt7921_sched_scan_req { + u8 version; + u8 seq_num; + u8 stop_on_match; + u8 ssids_num; + u8 match_num; + u8 pad; + __le16 ie_len; + struct mt7921_mcu_scan_ssid ssids[mt7921_max_sched_scan_ssid]; + struct mt7921_mcu_scan_match match[mt7921_max_scan_match]; + u8 channel_type; + u8 channels_num; + u8 intervals_num; + u8 scan_func; + struct mt7921_mcu_scan_channel channels[64]; + __le16 intervals[mt7921_max_sched_scan_interval]; + u8 bss_idx; + u8 pad2[64]; +} __packed; + diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +#define mt7921_max_sched_scan_interval 10 +#define mt7921_max_sched_scan_ssid 10 +#define mt7921_max_scan_match 16 +int mt7921_mcu_sched_scan_req(struct mt7921_phy *phy, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *sreq); +int mt7921_mcu_sched_scan_enable(struct mt7921_phy *phy, + struct ieee80211_vif *vif, + bool enable);
Networking
29f9d8b08b8cfaaceb4cb6199e38fbe6630d9706
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: introduce 802.11 ps support in sta mode
enable 802.11 power-save support available in mt7921 firmware
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
6
45
3
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c + ieee80211_hw_set(hw, supports_ps); + ieee80211_hw_set(hw, supports_dynamic_ps); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c - val = mt_txd3_sw_power_mgmt | - field_prep(mt_txd3_rem_tx_count, tx_count); + val = field_prep(mt_txd3_rem_tx_count, tx_count); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c + if (changed & bss_changed_ps) + mt7921_mcu_uni_bss_ps(dev, vif); + diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c + +int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct ps_tlv { + __le16 tag; + __le16 len; + u8 ps_state; /* 0: device awake + * 1: static power save + * 2: dynamic power saving + * 3: enter twt power saving + * 4: leave twt power saving + */ + u8 pad[3]; + } __packed ps; + } __packed ps_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .ps = { + .tag = cpu_to_le16(uni_bss_info_ps), + .len = cpu_to_le16(sizeof(struct ps_tlv)), + .ps_state = vif->bss_conf.ps ? 2 : 0, + }, + }; + + if (vif->type != nl80211_iftype_station) + return -eopnotsupp; + + return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_bss_info_update, + &ps_req, sizeof(ps_req), true); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h - mcu_cmd_set_ps_profile = mcu_ce_prefix | 0x05, + uni_bss_info_ps = 21, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
Networking
56d965da1318f92705a349f7232524dbb93add43
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: introduce support for hardware beacon filter
introduce support for hw beacon filter available in the mt7921 firmware.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
3
80
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c + +int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct bcnft_tlv { + __le16 tag; + __le16 len; + __le16 bcn_interval; + u8 dtim_period; + u8 pad; + } __packed bcnft; + } __packed bcnft_req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .bcnft = { + .tag = cpu_to_le16(uni_bss_info_bcnft), + .len = cpu_to_le16(sizeof(struct bcnft_tlv)), + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + .dtim_period = vif->bss_conf.dtim_period, + }, + }; + + if (vif->type != nl80211_iftype_station) + return 0; + + return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_bss_info_update, + &bcnft_req, sizeof(bcnft_req), true); +} + +int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool enable) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct { + u8 bss_idx; + u8 dtim_period; + __le16 aid; + __le16 bcn_interval; + __le16 atim_window; + u8 uapsd; + u8 bmc_delivered_ac; + u8 bmc_triggered_ac; + u8 pad; + } req = { + .bss_idx = mvif->mt76.idx, + .aid = cpu_to_le16(vif->bss_conf.aid), + .dtim_period = vif->bss_conf.dtim_period, + .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + }; + struct { + u8 bss_idx; + u8 pad[3]; + } req_hdr = { + .bss_idx = mvif->mt76.idx, + }; + int err; + + if (vif->type != nl80211_iftype_station) + return 0; + + err = mt76_mcu_send_msg(&dev->mt76, mcu_cmd_set_bss_abort, &req_hdr, + sizeof(req_hdr), false); + if (err < 0 || !enable) + return err; + + return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_set_bss_connected, &req, + sizeof(req), false); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h + uni_bss_info_bcnft = 22, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool enable); +int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool enable);
Networking
4086ee28e239a665397829e38d6d1714b7cf3369
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: introduce beacon_loss mcu event
if device has enabled beacon hw filter rx beacons are not reported to the host. introduce beacon_loss mcu event to trigger mac80211 mlme connection state machine in this configuration. ieee80211_vif_beacon_filter has not set in vif flags since hw beacon filter is not enabled yet
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
2
41
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +static void +mt7921_mcu_beacon_loss_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_beacon_loss_event *event = priv; + + if (mvif->mt76.idx != event->bss_idx) + return; + + if (!(vif->driver_flags & ieee80211_vif_beacon_filter)) + return; + + ieee80211_beacon_loss(vif); +} + +static void +mt7921_mcu_beacon_loss_event(struct mt7921_dev *dev, struct sk_buff *skb) +{ + struct mt7921_beacon_loss_event *event; + struct mt76_phy *mphy; + u8 band_idx = 0; /* dbdc support */ + + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + event = (struct mt7921_beacon_loss_event *)skb->data; + if (band_idx && dev->mt76.phy2) + mphy = dev->mt76.phy2; + else + mphy = &dev->mt76.phy; + + ieee80211_iterate_active_interfaces_atomic(mphy->hw, + ieee80211_iface_iter_resume_all, + mt7921_mcu_beacon_loss_iter, event); +} + + mt7921_mcu_beacon_loss_event(dev, skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +struct mt7921_beacon_loss_event { + u8 bss_idx; + u8 reason; + u8 pad[2]; +} __packed; +
Networking
b88f5c6473aa92469a5be7a1fdf521b711ed40ba
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: introduce pm support
introduce suspend/resume and wow (wake-on-wowlan) support to mt7921 driver to allow remote wakeu-up from the suspend state.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
5
589
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c +#ifdef config_pm +static int mt7921_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + int err; + + cancel_delayed_work_sync(&phy->scan_work); + cancel_delayed_work_sync(&phy->mt76->mac_work); + + mutex_lock(&dev->mt76.mutex); + + clear_bit(mt76_state_running, &phy->mt76->state); + + set_bit(mt76_state_suspend, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + ieee80211_iface_iter_resume_all, + mt7921_mcu_set_suspend_iter, phy); + + err = mt7921_mcu_set_hif_suspend(dev, true); + + mutex_unlock(&dev->mt76.mutex); + + return err; +} + +static int mt7921_resume(struct ieee80211_hw *hw) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + int err; + + mutex_lock(&dev->mt76.mutex); + + err = mt7921_mcu_set_hif_suspend(dev, false); + if (err < 0) + goto out; + + set_bit(mt76_state_running, &phy->mt76->state); + clear_bit(mt76_state_suspend, &phy->mt76->state); + ieee80211_iterate_active_interfaces(hw, + ieee80211_iface_iter_resume_all, + mt7921_mcu_set_suspend_iter, phy); + + ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, + mt7921_watchdog_time); +out: + mutex_unlock(&dev->mt76.mutex); + + return err; +} + +static void mt7921_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt76_dev *mdev = &dev->mt76; + + device_set_wakeup_enable(mdev->dev, enabled); +} + +static void mt7921_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + + mutex_lock(&dev->mt76.mutex); + mt7921_mcu_update_gtk_rekey(hw, vif, data); + mutex_unlock(&dev->mt76.mutex); +} +#endif /* config_pm */ + +#ifdef config_pm + .suspend = mt7921_suspend, + .resume = mt7921_resume, + .set_wakeup = mt7921_set_wakeup, + .set_rekey_data = mt7921_set_rekey_data, +#endif /* config_pm */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +static const struct wiphy_wowlan_support mt7921_wowlan_support = { + .flags = wiphy_wowlan_magic_pkt | wiphy_wowlan_disconnect | + wiphy_wowlan_supports_gtk_rekey | wiphy_wowlan_net_detect, + .n_patterns = 1, + .pattern_min_len = 1, + .pattern_max_len = mt7921_wow_patten_max_len, + .max_nd_match_sets = 10, +}; + +#ifdef config_pm + dev->mt76.hw->wiphy->wowlan = &mt7921_wowlan_support; +#endif /* config_pm */ + + +#ifdef config_pm +int mt7921_mcu_set_hif_suspend(struct mt7921_dev *dev, bool suspend) +{ + struct { + struct { + u8 hif_type; /* 0x0: hif_sdio + * 0x1: hif_usb + * 0x2: hif_pcie + */ + u8 pad[3]; + } __packed hdr; + struct hif_suspend_tlv { + __le16 tag; + __le16 len; + u8 suspend; + } __packed hif_suspend; + } req = { + .hif_suspend = { + .tag = cpu_to_le16(0), /* 0: uni_hif_ctrl_basic */ + .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)), + .suspend = suspend, + }, + }; + + if (mt76_is_mmio(&dev->mt76)) + req.hdr.hif_type = 2; + else if (mt76_is_usb(&dev->mt76)) + req.hdr.hif_type = 1; + else if (mt76_is_sdio(&dev->mt76)) + req.hdr.hif_type = 0; + + return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_hif_ctrl, &req, + sizeof(req), true); +} +export_symbol_gpl(mt7921_mcu_set_hif_suspend); + +static int +mt7921_mcu_set_wow_ctrl(struct mt7921_phy *phy, struct ieee80211_vif *vif, + bool suspend, struct cfg80211_wowlan *wowlan) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = phy->dev; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7921_wow_ctrl_tlv wow_ctrl_tlv; + struct mt7921_wow_gpio_param_tlv gpio_tlv; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .wow_ctrl_tlv = { + .tag = cpu_to_le16(uni_suspend_wow_ctrl), + .len = cpu_to_le16(sizeof(struct mt7921_wow_ctrl_tlv)), + .cmd = suspend ? 1 : 2, + }, + .gpio_tlv = { + .tag = cpu_to_le16(uni_suspend_wow_gpio_param), + .len = cpu_to_le16(sizeof(struct mt7921_wow_gpio_param_tlv)), + .gpio_pin = 0xff, /* follow fw about gpio pin */ + }, + }; + + if (wowlan->magic_pkt) + req.wow_ctrl_tlv.trigger |= bit(0); + if (wowlan->disconnect) + req.wow_ctrl_tlv.trigger |= bit(2); + if (wowlan->nd_config) { + mt7921_mcu_sched_scan_req(phy, vif, wowlan->nd_config); + req.wow_ctrl_tlv.trigger |= bit(5); + mt7921_mcu_sched_scan_enable(phy, vif, suspend); + } + + if (mt76_is_mmio(&dev->mt76)) + req.wow_ctrl_tlv.wakeup_hif = wow_pcie; + else if (mt76_is_usb(&dev->mt76)) + req.wow_ctrl_tlv.wakeup_hif = wow_usb; + else if (mt76_is_sdio(&dev->mt76)) + req.wow_ctrl_tlv.wakeup_hif = wow_gpio; + + return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_suspend, &req, + sizeof(req), true); +} + +static int +mt7921_mcu_set_wow_pattern(struct mt7921_dev *dev, + struct ieee80211_vif *vif, + u8 index, bool enable, + struct cfg80211_pkt_pattern *pattern) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_wow_pattern_tlv *ptlv; + struct sk_buff *skb; + struct req_hdr { + u8 bss_idx; + u8 pad[3]; + } __packed hdr = { + .bss_idx = mvif->mt76.idx, + }; + + skb = mt76_mcu_msg_alloc(&dev->mt76, null, + sizeof(hdr) + sizeof(*ptlv)); + if (!skb) + return -enomem; + + skb_put_data(skb, &hdr, sizeof(hdr)); + ptlv = (struct mt7921_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv)); + ptlv->tag = cpu_to_le16(uni_suspend_wow_pattern); + ptlv->len = cpu_to_le16(sizeof(*ptlv)); + ptlv->data_len = pattern->pattern_len; + ptlv->enable = enable; + ptlv->index = index; + + memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len); + memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_uni_cmd_suspend, + true); +} + +static int +mt7921_mcu_set_suspend_mode(struct mt7921_dev *dev, + struct ieee80211_vif *vif, + bool enable, u8 mdtim, bool wow_suspend) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7921_suspend_tlv suspend_tlv; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .suspend_tlv = { + .tag = cpu_to_le16(uni_suspend_mode_setting), + .len = cpu_to_le16(sizeof(struct mt7921_suspend_tlv)), + .enable = enable, + .mdtim = mdtim, + .wow_suspend = wow_suspend, + }, + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_suspend, &req, + sizeof(req), true); +} + +static int +mt7921_mcu_set_gtk_rekey(struct mt7921_dev *dev, + struct ieee80211_vif *vif, + bool suspend) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7921_gtk_rekey_tlv gtk_tlv; + } __packed req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .gtk_tlv = { + .tag = cpu_to_le16(uni_offload_offload_gtk_rekey), + .len = cpu_to_le16(sizeof(struct mt7921_gtk_rekey_tlv)), + .rekey_mode = !suspend, + }, + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_offload, &req, + sizeof(req), true); +} + +static int +mt7921_mcu_set_arp_filter(struct mt7921_dev *dev, struct ieee80211_vif *vif, + bool suspend) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7921_arpns_tlv arpns; + } req = { + .hdr = { + .bss_idx = mvif->mt76.idx, + }, + .arpns = { + .tag = cpu_to_le16(uni_offload_offload_arp), + .len = cpu_to_le16(sizeof(struct mt7921_arpns_tlv)), + .mode = suspend, + }, + }; + + return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_offload, &req, + sizeof(req), true); +} + +void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif) +{ + struct mt7921_phy *phy = priv; + bool suspend = test_bit(mt76_state_suspend, &phy->mt76->state); + struct ieee80211_hw *hw = phy->mt76->hw; + struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config; + int i; + + mt7921_mcu_set_gtk_rekey(phy->dev, vif, suspend); + mt7921_mcu_set_arp_filter(phy->dev, vif, suspend); + + mt7921_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true); + + for (i = 0; i < wowlan->n_patterns; i++) + mt7921_mcu_set_wow_pattern(phy->dev, vif, i, suspend, + &wowlan->patterns[i]); + mt7921_mcu_set_wow_ctrl(phy, vif, suspend, wowlan); +} + +static void +mt7921_mcu_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct ieee80211_key_conf *key, + void *data) +{ + struct mt7921_gtk_rekey_tlv *gtk_tlv = data; + u32 cipher; + + if (key->cipher != wlan_cipher_suite_aes_cmac && + key->cipher != wlan_cipher_suite_ccmp && + key->cipher != wlan_cipher_suite_tkip) + return; + + if (key->cipher == wlan_cipher_suite_tkip) { + gtk_tlv->proto = cpu_to_le32(nl80211_wpa_version_1); + cipher = bit(3); + } else { + gtk_tlv->proto = cpu_to_le32(nl80211_wpa_version_2); + cipher = bit(4); + } + + /* we are assuming here to have a single pairwise key */ + if (key->flags & ieee80211_key_flag_pairwise) { + gtk_tlv->pairwise_cipher = cpu_to_le32(cipher); + gtk_tlv->group_cipher = cpu_to_le32(cipher); + gtk_tlv->keyid = key->keyidx; + } +} + +int mt7921_mcu_update_gtk_rekey(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *key) +{ + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_gtk_rekey_tlv *gtk_tlv; + struct sk_buff *skb; + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr = { + .bss_idx = mvif->mt76.idx, + }; + + skb = mt76_mcu_msg_alloc(&dev->mt76, null, + sizeof(hdr) + sizeof(*gtk_tlv)); + if (!skb) + return -enomem; + + skb_put_data(skb, &hdr, sizeof(hdr)); + gtk_tlv = (struct mt7921_gtk_rekey_tlv *)skb_put(skb, + sizeof(*gtk_tlv)); + gtk_tlv->tag = cpu_to_le16(uni_offload_offload_gtk_rekey); + gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv)); + gtk_tlv->rekey_mode = 2; + gtk_tlv->option = 1; + + rcu_read_lock(); + ieee80211_iter_keys_rcu(hw, vif, mt7921_mcu_key_iter, gtk_tlv); + rcu_read_unlock(); + + memcpy(gtk_tlv->kek, key->kek, nl80211_kek_len); + memcpy(gtk_tlv->kck, key->kck, nl80211_kck_len); + memcpy(gtk_tlv->replay_ctr, key->replay_ctr, nl80211_replay_ctr_len); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_uni_cmd_offload, + true); +} +#endif /* config_pm */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +enum { + wow_usb = 1, + wow_pcie = 2, + wow_gpio = 3, +}; + +struct mt7921_wow_ctrl_tlv { + __le16 tag; + __le16 len; + u8 cmd; /* 0x1: pm_wowlan_req_start + * 0x2: pm_wowlan_req_stop + * 0x3: pm_wowlan_param_clear + */ + u8 trigger; /* 0: none + * bit(0): nl80211_wowlan_trig_magic_pkt + * bit(1): nl80211_wowlan_trig_any + * bit(2): nl80211_wowlan_trig_disconnect + * bit(3): nl80211_wowlan_trig_gtk_rekey_failure + * bit(4): beacon_lost + * bit(5): nl80211_wowlan_trig_net_detect + */ + u8 wakeup_hif; /* 0x0: hif_sdio + * 0x1: hif_usb + * 0x2: hif_pcie + * 0x3: hif_gpio + */ + u8 pad; + u8 rsv[4]; +} __packed; + +struct mt7921_wow_gpio_param_tlv { + __le16 tag; + __le16 len; + u8 gpio_pin; + u8 trigger_lvl; + u8 pad[2]; + __le32 gpio_interval; + u8 rsv[4]; +} __packed; + +#define mt7921_wow_mask_max_len 16 +#define mt7921_wow_patten_max_len 128 +struct mt7921_wow_pattern_tlv { + __le16 tag; + __le16 len; + u8 index; /* pattern index */ + u8 enable; /* 0: disable + * 1: enable + */ + u8 data_len; /* pattern length */ + u8 pad; + u8 mask[mt7921_wow_mask_max_len]; + u8 pattern[mt7921_wow_patten_max_len]; + u8 rsv[4]; +} __packed; + +struct mt7921_suspend_tlv { + __le16 tag; + __le16 len; + u8 enable; /* 0: suspend mode disabled + * 1: suspend mode enabled + */ + u8 mdtim; /* lp parameter */ + u8 wow_suspend; /* 0: update by origin policy + * 1: update by wow dtim + */ + u8 pad[5]; +} __packed; + +struct mt7921_gtk_rekey_tlv { + __le16 tag; + __le16 len; + u8 kek[nl80211_kek_len]; + u8 kck[nl80211_kck_len]; + u8 replay_ctr[nl80211_replay_ctr_len]; + u8 rekey_mode; /* 0: rekey offload enable + * 1: rekey offload disable + * 2: rekey update + */ + u8 keyid; + u8 pad[2]; + __le32 proto; /* wpa-rsn-wapi-opsn */ + __le32 pairwise_cipher; + __le32 group_cipher; + __le32 key_mgmt; /* none-psk-ieee802.1x */ + __le32 mgmt_group_cipher; + u8 option; /* 1: rekey data update without enabling offload */ + u8 reserverd[3]; +} __packed; + +struct mt7921_arpns_tlv { + __le16 tag; + __le16 len; + u8 mode; + u8 ips_num; + u8 option; + u8 pad[1]; +} __packed; + diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +int mt7921_mcu_set_hif_suspend(struct mt7921_dev *dev, bool suspend); +void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, + struct ieee80211_vif *vif); +int mt7921_mcu_update_gtk_rekey(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *key); +int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +#ifdef config_pm +static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + bool hif_suspend; + int i, err; + + hif_suspend = !test_bit(mt76_state_suspend, &dev->mphy.state); + if (hif_suspend) { + err = mt7921_mcu_set_hif_suspend(dev, true); + if (err) + return err; + } + + napi_disable(&mdev->tx_napi); + mt76_worker_disable(&mdev->tx_worker); + + mt76_for_each_q_rx(mdev, i) { + napi_disable(&mdev->napi[i]); + } + tasklet_kill(&dev->irq_tasklet); + + pci_enable_wake(pdev, pci_choose_state(pdev, state), true); + + /* wait until dma is idle */ + mt76_poll(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_busy | + mt_wfdma0_glo_cfg_rx_dma_busy, 0, 1000); + + /* put dma disabled */ + mt76_clear(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_en | mt_wfdma0_glo_cfg_rx_dma_en); + + /* disable interrupt */ + mt76_wr(dev, mt_wfdma0_host_int_ena, 0); + + pci_save_state(pdev); + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); + if (err) + goto restore; + + return 0; + +restore: + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + if (hif_suspend) + mt7921_mcu_set_hif_suspend(dev, false); + + return err; +} + +static int mt7921_pci_resume(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); + int i, err; + + err = pci_set_power_state(pdev, pci_d0); + if (err) + return err; + + pci_restore_state(pdev); + + /* enable interrupt */ + mt7921_l1_wr(dev, mt_pcie_mac_int_enable, 0xff); + mt7921_irq_enable(dev, mt_int_rx_done_all | mt_int_tx_done_all | + mt_int_mcu_cmd); + + /* put dma enabled */ + mt76_set(dev, mt_wfdma0_glo_cfg, + mt_wfdma0_glo_cfg_tx_dma_en | mt_wfdma0_glo_cfg_rx_dma_en); + + mt76_worker_enable(&mdev->tx_worker); + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + napi_schedule(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + napi_schedule(&mdev->tx_napi); + + if (!test_bit(mt76_state_suspend, &dev->mphy.state)) + err = mt7921_mcu_set_hif_suspend(dev, false); + + return err; +} +#endif /* config_pm */ + +#ifdef config_pm + .suspend = mt7921_pci_suspend, + .resume = mt7921_pci_resume, +#endif /* config_pm */
Networking
ffa1bf97425bd511b105ce769976e20a845a71e9
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: rely on mt76_connac_mcu common library
rely on mt76_connac_mcu common library and remove duplicated code
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['kconfig', 'h', 'c']
10
411
1,811
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h +static inline bool is_mt7921(struct mt76_dev *dev) +{ + return mt76_chip(dev) == 0x7961; +} + diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c + int cmd; - return mt76_mcu_send_msg(dev, mcu_cmd_target_address_len_req, &req, - sizeof(req), true); + if (is_mt7921(dev) && + (req.addr == cpu_to_le32(mcu_patch_address) || addr == 0x900000)) + cmd = mcu_cmd_patch_start_req; + else + cmd = mcu_cmd_target_address_len_req; + + return mt76_mcu_send_msg(dev, cmd, &req, sizeof(req), true); - .wlan_idx_lo = wcid ? wcid->idx : 0, + mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo, + &hdr.wlan_idx_hi); - .wlan_idx_lo = wcid ? wcid->idx : 0, + mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo, + &hdr.wlan_idx_hi); - eth_broadcast_addr(generic->peer_addr); + if (is_mt7921(dev) && + vif->type == nl80211_iftype_station) + memcpy(generic->peer_addr, vif->bss_conf.bssid, + eth_alen); + else + eth_broadcast_addr(generic->peer_addr); + + if (is_mt7921(dev)) + return; + +static void +mt76_connac_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, + struct ieee80211_vif *vif) +{ + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + struct sta_rec_amsdu *amsdu; + struct tlv *tlv; + + if (vif->type != nl80211_iftype_ap && + vif->type != nl80211_iftype_station) + return; + + if (!sta->max_amsdu_len) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, sta_rec_hw_amsdu, sizeof(*amsdu)); + amsdu = (struct sta_rec_amsdu *)tlv; + amsdu->max_amsdu_num = 8; + amsdu->amsdu_en = true; + amsdu->max_mpdu_size = sta->max_amsdu_len >= + ieee80211_max_mpdu_len_vht_7991; + + wcid->amsdu = true; +} + +#define he_phy(p, c) u8_get_bits(c, ieee80211_he_phy_##p) +#define he_mac(m, c) u8_get_bits(c, ieee80211_he_mac_##m) +static void +mt76_connac_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +{ + struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; + struct sta_rec_he *he; + struct tlv *tlv; + u32 cap = 0; + + tlv = mt76_connac_mcu_add_tlv(skb, sta_rec_he, sizeof(*he)); + + he = (struct sta_rec_he *)tlv; + + if (elem->mac_cap_info[0] & ieee80211_he_mac_cap0_htc_he) + cap |= sta_rec_he_cap_htc; + + if (elem->mac_cap_info[2] & ieee80211_he_mac_cap2_bsr) + cap |= sta_rec_he_cap_bsr; + + if (elem->mac_cap_info[3] & ieee80211_he_mac_cap3_omi_control) + cap |= sta_rec_he_cap_om; + + if (elem->mac_cap_info[4] & ieee80211_he_mac_cap4_amdsu_in_ampdu) + cap |= sta_rec_he_cap_amsdu_in_ampdu; + + if (elem->mac_cap_info[4] & ieee80211_he_mac_cap4_bqr) + cap |= sta_rec_he_cap_bqr; + + if (elem->phy_cap_info[0] & + (ieee80211_he_phy_cap0_channel_width_set_ru_mapping_in_2g | + ieee80211_he_phy_cap0_channel_width_set_ru_mapping_in_5g)) + cap |= sta_rec_he_cap_bw20_ru242_support; + + if (elem->phy_cap_info[1] & + ieee80211_he_phy_cap1_ldpc_coding_in_payload) + cap |= sta_rec_he_cap_ldpc; + + if (elem->phy_cap_info[1] & + ieee80211_he_phy_cap1_he_ltf_and_gi_for_he_ppdus_0_8us) + cap |= sta_rec_he_cap_su_ppdu_1ltf_8us_gi; + + if (elem->phy_cap_info[2] & + ieee80211_he_phy_cap2_ndp_4x_ltf_and_3_2us) + cap |= sta_rec_he_cap_ndp_4ltf_3dot2ms_gi; + + if (elem->phy_cap_info[2] & + ieee80211_he_phy_cap2_stbc_tx_under_80mhz) + cap |= sta_rec_he_cap_le_eq_80m_tx_stbc; + + if (elem->phy_cap_info[2] & + ieee80211_he_phy_cap2_stbc_rx_under_80mhz) + cap |= sta_rec_he_cap_le_eq_80m_rx_stbc; + + if (elem->phy_cap_info[6] & + ieee80211_he_phy_cap6_partial_bw_ext_range) + cap |= sta_rec_he_cap_partial_bw_ext_range; + + if (elem->phy_cap_info[7] & + ieee80211_he_phy_cap7_he_su_mu_ppdu_4xltf_and_08_us_gi) + cap |= sta_rec_he_cap_su_mu_ppdu_4ltf_8us_gi; + + if (elem->phy_cap_info[7] & + ieee80211_he_phy_cap7_stbc_tx_above_80mhz) + cap |= sta_rec_he_cap_gt_80m_tx_stbc; + + if (elem->phy_cap_info[7] & + ieee80211_he_phy_cap7_stbc_rx_above_80mhz) + cap |= sta_rec_he_cap_gt_80m_rx_stbc; + + if (elem->phy_cap_info[8] & + ieee80211_he_phy_cap8_he_er_su_ppdu_4xltf_and_08_us_gi) + cap |= sta_rec_he_cap_er_su_ppdu_4ltf_8us_gi; + + if (elem->phy_cap_info[8] & + ieee80211_he_phy_cap8_he_er_su_1xltf_and_08_us_gi) + cap |= sta_rec_he_cap_er_su_ppdu_1ltf_8us_gi; + + if (elem->phy_cap_info[9] & + ieee80211_he_phy_cap9_non_triggered_cqi_feedback) + cap |= sta_rec_he_cap_trig_cqi_fk; + + if (elem->phy_cap_info[9] & + ieee80211_he_phy_cap9_tx_1024_qam_less_than_242_tone_ru) + cap |= sta_rec_he_cap_tx_1024qam_under_ru242; + + if (elem->phy_cap_info[9] & + ieee80211_he_phy_cap9_rx_1024_qam_less_than_242_tone_ru) + cap |= sta_rec_he_cap_rx_1024qam_under_ru242; + + he->he_cap = cpu_to_le32(cap); + + switch (sta->bandwidth) { + case ieee80211_sta_rx_bw_160: + if (elem->phy_cap_info[0] & + ieee80211_he_phy_cap0_channel_width_set_80plus80_mhz_in_5g) + he->max_nss_mcs[cmd_he_mcs_bw8080] = + he_cap->he_mcs_nss_supp.rx_mcs_80p80; + + he->max_nss_mcs[cmd_he_mcs_bw160] = + he_cap->he_mcs_nss_supp.rx_mcs_160; + fallthrough; + default: + he->max_nss_mcs[cmd_he_mcs_bw80] = + he_cap->he_mcs_nss_supp.rx_mcs_80; + break; + } + + he->t_frame_dur = + he_mac(cap1_tf_mac_pad_dur_mask, elem->mac_cap_info[1]); + he->max_ampdu_exp = + he_mac(cap3_max_ampdu_len_exp_mask, elem->mac_cap_info[3]); + + he->bw_set = + he_phy(cap0_channel_width_set_mask, elem->phy_cap_info[0]); + he->device_class = + he_phy(cap1_device_class_a, elem->phy_cap_info[1]); + he->punc_pream_rx = + he_phy(cap1_preamble_punc_rx_mask, elem->phy_cap_info[1]); + + he->dcm_tx_mode = + he_phy(cap3_dcm_max_const_tx_mask, elem->phy_cap_info[3]); + he->dcm_tx_max_nss = + he_phy(cap3_dcm_max_tx_nss_2, elem->phy_cap_info[3]); + he->dcm_rx_mode = + he_phy(cap3_dcm_max_const_rx_mask, elem->phy_cap_info[3]); + he->dcm_rx_max_nss = + he_phy(cap3_dcm_max_rx_nss_2, elem->phy_cap_info[3]); + he->dcm_rx_max_nss = + he_phy(cap8_dcm_max_ru_mask, elem->phy_cap_info[8]); + + he->pkt_ext = 2; +} + +static u8 +mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, + enum nl80211_band band, struct ieee80211_sta *sta) +{ + struct ieee80211_sta_ht_cap *ht_cap; + struct ieee80211_sta_vht_cap *vht_cap; + const struct ieee80211_sta_he_cap *he_cap; + u8 mode = 0; + + if (sta) { + ht_cap = &sta->ht_cap; + vht_cap = &sta->vht_cap; + he_cap = &sta->he_cap; + } else { + struct ieee80211_supported_band *sband; + + sband = mphy->hw->wiphy->bands[band]; + ht_cap = &sband->ht_cap; + vht_cap = &sband->vht_cap; + he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); + } + + if (band == nl80211_band_2ghz) { + mode |= phy_type_bit_hr_dsss | phy_type_bit_erp; + + if (ht_cap->ht_supported) + mode |= phy_type_bit_ht; + + if (he_cap->has_he) + mode |= phy_type_bit_he; + } else if (band == nl80211_band_5ghz) { + mode |= phy_type_bit_ofdm; + + if (ht_cap->ht_supported) + mode |= phy_type_bit_ht; + + if (vht_cap->vht_supported) + mode |= phy_type_bit_vht; + + if (he_cap->has_he) + mode |= phy_type_bit_he; + } + + return mode; +} + + struct cfg80211_chan_def *chandef = &mphy->chandef; + enum nl80211_band band = chandef->chan->band; + struct mt76_dev *dev = mphy->dev; + struct sta_rec_ra_info *ra_info; + struct sta_rec_state *state; + struct sta_rec_phy *phy; + int len; - tlv = mt76_connac_mcu_add_tlv(skb, sta_rec_vht, - sizeof(*vht) - 4); + len = is_mt7921(dev) ? sizeof(*vht) : sizeof(*vht) - 4; + tlv = mt76_connac_mcu_add_tlv(skb, sta_rec_vht, len); + + if (!is_mt7921(dev)) + return; + + if (sta->ht_cap.ht_supported) + mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif); + + /* starec he */ + if (sta->he_cap.has_he) + mt76_connac_mcu_sta_he_tlv(skb, sta); + + tlv = mt76_connac_mcu_add_tlv(skb, sta_rec_phy, sizeof(*phy)); + phy = (struct sta_rec_phy *)tlv; + phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta); + phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); + + tlv = mt76_connac_mcu_add_tlv(skb, sta_rec_ra, sizeof(*ra_info)); + ra_info = (struct sta_rec_ra_info *)tlv; + ra_info->legacy = cpu_to_le16((u16)sta->supp_rates[band]); + + if (sta->ht_cap.ht_supported) + memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask, + ht_mcs_mask_num); + + tlv = mt76_connac_mcu_add_tlv(skb, sta_rec_state, sizeof(*state)); + state = (struct sta_rec_state *)tlv; + state->state = 2; + + if (sta->vht_cap.vht_supported) { + state->vht_opmode = sta->bandwidth; + state->vht_opmode |= (sta->rx_nss - 1) << + ieee80211_opmode_notif_rx_nss_shift; + } - if (sta->ht_cap.ht_supported) { + if (!is_mt7921(dev) && sta->ht_cap.ht_supported) { + if (is_mt7921(dev)) + return; + +static u8 +mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, + enum nl80211_band band, + struct ieee80211_sta *sta) +{ + struct mt76_dev *dev = phy->dev; + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_sta_vht_cap *vht_cap; + struct ieee80211_sta_ht_cap *ht_cap; + u8 mode = 0; + + if (!is_mt7921(dev)) + return 0x38; + + if (sta) { + ht_cap = &sta->ht_cap; + vht_cap = &sta->vht_cap; + he_cap = &sta->he_cap; + } else { + struct ieee80211_supported_band *sband; + + sband = phy->hw->wiphy->bands[band]; + ht_cap = &sband->ht_cap; + vht_cap = &sband->vht_cap; + he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); + } + + if (band == nl80211_band_2ghz) { + mode |= phy_mode_b | phy_mode_g; + + if (ht_cap->ht_supported) + mode |= phy_mode_gn; + + if (he_cap->has_he) + mode |= phy_mode_ax_24g; + } else if (band == nl80211_band_5ghz) { + mode |= phy_mode_a; + + if (ht_cap->ht_supported) + mode |= phy_mode_an; + + if (vht_cap->vht_supported) + mode |= phy_mode_ac; + + if (he_cap->has_he) + mode |= phy_mode_ax_5g; + } + + return mode; +} + + enum nl80211_band band = chandef->chan->band; - .phymode = 0x38, + .phymode = mt76_connac_get_phy_mode(phy, vif, band, null), diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h + mcu_cmd_fwlog_2_host = mcu_ce_prefix | 0xc5, + mcu_cmd_get_wtbl = mcu_ce_prefix | 0xcd, + uni_bss_info_ps = 21, + uni_bss_info_bcnft = 22, +#define to_wcid_lo(id) field_get(genmask(7, 0), (u16)id) +#define to_wcid_hi(id) field_get(genmask(9, 8), (u16)id) + +static inline void +mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid, + u8 *wlan_idx_lo, u8 *wlan_idx_hi) +{ + *wlan_idx_hi = 0; + + if (is_mt7921(dev)) { + *wlan_idx_lo = wcid ? to_wcid_lo(wcid->idx) : 0; + *wlan_idx_hi = wcid ? to_wcid_hi(wcid->idx) : 0; + } else { + *wlan_idx_lo = wcid ? wcid->idx : 0; + } +} + diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/kconfig --- a/drivers/net/wireless/mediatek/mt76/mt7921/kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7921/kconfig - select mt76_core + select mt76_connac_lib diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c +#include "mcu.h" - mt7921_mcu_set_rts_thresh(&dev->phy, 0x92b); + mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c - mt7921_mcu_set_mac(dev, 0, true, false); - mt7921_mcu_set_channel_domain(phy); + mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, true, false); + mt76_connac_mcu_set_channel_domain(phy->mt76); + - mt7921_mcu_set_mac(dev, 0, false, false); + mt76_connac_mcu_set_mac_enable(&dev->mt76, 0, false, false); - ret = mt7921_mcu_uni_add_dev(dev, vif, true); + ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, + true); - mt7921_mcu_uni_add_dev(dev, vif, false); + mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false); - mt7921_mcu_uni_add_bss(&dev->phy, vif, true); + mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, + true); + - ret = mt7921_mcu_uni_add_sta(dev, vif, sta, true); + ret = mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, + true, mcu_uni_cmd_sta_rec_update); - mt7921_mcu_uni_add_sta(dev, vif, sta, false); + mt76_connac_mcu_add_sta_cmd(&dev->mphy, vif, sta, &msta->wcid, false, + mcu_uni_cmd_sta_rec_update); - if (vif->type == nl80211_iftype_station && !sta->tdls) - mt7921_mcu_uni_add_bss(&dev->phy, vif, false); + + if (vif->type == nl80211_iftype_station && !sta->tdls) { + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; + + mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, + false); + } - struct mt7921_phy *phy = mt7921_hw_phy(hw); - mt7921_mcu_set_rts_thresh(phy, val); + mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, 0); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c -#define mcu_patch_address 0x200000 - -#define he_phy(p, c) u8_get_bits(c, ieee80211_he_phy_##p) -#define he_mac(m, c) u8_get_bits(c, ieee80211_he_mac_##m) - -static const struct ieee80211_sta_he_cap * -mt7921_get_he_phy_cap(struct mt7921_phy *phy, struct ieee80211_vif *vif) -{ - struct ieee80211_supported_band *sband; - enum nl80211_band band; - - band = phy->mt76->chandef.chan->band; - sband = phy->mt76->hw->wiphy->bands[band]; - - return ieee80211_get_he_iftype_cap(sband, vif->type); -} - -static u8 -mt7921_get_phy_mode(struct mt7921_dev *dev, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) -{ - struct ieee80211_sta_ht_cap *ht_cap; - struct ieee80211_sta_vht_cap *vht_cap; - const struct ieee80211_sta_he_cap *he_cap; - u8 mode = 0; - - if (sta) { - ht_cap = &sta->ht_cap; - vht_cap = &sta->vht_cap; - he_cap = &sta->he_cap; - } else { - struct ieee80211_supported_band *sband; - struct mt7921_phy *phy = &dev->phy; - - sband = phy->mt76->hw->wiphy->bands[band]; - ht_cap = &sband->ht_cap; - vht_cap = &sband->vht_cap; - he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); - } - - if (band == nl80211_band_2ghz) { - mode |= phy_mode_b | phy_mode_g; - - if (ht_cap->ht_supported) - mode |= phy_mode_gn; - - if (he_cap->has_he) - mode |= phy_mode_ax_24g; - } else if (band == nl80211_band_5ghz) { - mode |= phy_mode_a; - - if (ht_cap->ht_supported) - mode |= phy_mode_an; - - if (vht_cap->vht_supported) - mode |= phy_mode_ac; - - if (he_cap->has_he) - mode |= phy_mode_ax_5g; - } - - return mode; -} - -static u8 -mt7921_get_phy_mode_v2(struct mt7921_dev *dev, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) -{ - struct ieee80211_sta_ht_cap *ht_cap; - struct ieee80211_sta_vht_cap *vht_cap; - const struct ieee80211_sta_he_cap *he_cap; - u8 mode = 0; - - if (sta) { - ht_cap = &sta->ht_cap; - vht_cap = &sta->vht_cap; - he_cap = &sta->he_cap; - } else { - struct ieee80211_supported_band *sband; - struct mt7921_phy *phy = &dev->phy; - - sband = phy->mt76->hw->wiphy->bands[band]; - ht_cap = &sband->ht_cap; - vht_cap = &sband->vht_cap; - he_cap = ieee80211_get_he_iftype_cap(sband, vif->type); - } - - if (band == nl80211_band_2ghz) { - mode |= phy_type_bit_hr_dsss | phy_type_bit_erp; - - if (ht_cap->ht_supported) - mode |= phy_type_bit_ht; - - if (he_cap->has_he) - mode |= phy_type_bit_he; - } else if (band == nl80211_band_5ghz) { - mode |= phy_type_bit_ofdm; - - if (ht_cap->ht_supported) - mode |= phy_type_bit_ht; - - if (vht_cap->vht_supported) - mode |= phy_type_bit_vht; - - if (he_cap->has_he) - mode |= phy_type_bit_he; - } - - return mode; -} - - case mcu_ext_cmd_thermal_ctrl: + case mcu_ext_cmd_get_temp: -static void -mt7921_mcu_beacon_loss_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_beacon_loss_event *event = priv; - - if (mvif->mt76.idx != event->bss_idx) - return; - - if (!(vif->driver_flags & ieee80211_vif_beacon_filter)) - return; - - ieee80211_beacon_loss(vif); -} - - struct mt7921_beacon_loss_event *event; + struct mt76_connac_beacon_loss_event *event; - event = (struct mt7921_beacon_loss_event *)skb->data; + event = (struct mt76_connac_beacon_loss_event *)skb->data; - mt7921_mcu_beacon_loss_iter, event); + mt76_connac_mcu_beacon_loss_iter, event); - struct mt7921_mcu_bss_event *event; + struct mt76_connac_mcu_bss_event *event; - event = (struct mt7921_mcu_bss_event *)(skb->data + - sizeof(struct mt7921_mcu_rxd)); + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + event = (struct mt76_connac_mcu_bss_event *)skb->data; -static struct sk_buff * -mt7921_mcu_alloc_sta_req(struct mt7921_dev *dev, struct mt7921_vif *mvif, - struct mt7921_sta *msta, int len) -{ - struct sta_req_hdr hdr = { - .bss_idx = mvif->mt76.idx, - .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0, - .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0, - .muar_idx = msta ? mvif->mt76.omac_idx : 0, - .is_tlv_append = 1, - }; - struct sk_buff *skb; - - skb = mt76_mcu_msg_alloc(&dev->mt76, null, len); - if (!skb) - return err_ptr(-enomem); - - skb_put_data(skb, &hdr, sizeof(hdr)); - - return skb; -} - -static struct wtbl_req_hdr * -mt7921_mcu_alloc_wtbl_req(struct mt7921_dev *dev, struct mt7921_sta *msta, - int cmd, void *sta_wtbl, struct sk_buff **skb) -{ - struct tlv *sta_hdr = sta_wtbl; - struct wtbl_req_hdr hdr = { - .wlan_idx_lo = to_wcid_lo(msta->wcid.idx), - .wlan_idx_hi = to_wcid_hi(msta->wcid.idx), - .operation = cmd, - }; - struct sk_buff *nskb = *skb; - - if (!nskb) { - nskb = mt76_mcu_msg_alloc(&dev->mt76, null, - mt7921_wtbl_update_ba_size); - if (!nskb) - return err_ptr(-enomem); - - *skb = nskb; - } - - if (sta_hdr) - sta_hdr->len = cpu_to_le16(sizeof(hdr)); - - return skb_put_data(nskb, &hdr, sizeof(hdr)); -} - -static struct tlv * -mt7921_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len, - void *sta_ntlv, void *sta_wtbl) -{ - struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv; - struct tlv *sta_hdr = sta_wtbl; - struct tlv *ptlv, tlv = { - .tag = cpu_to_le16(tag), - .len = cpu_to_le16(len), - }; - u16 ntlv; - - ptlv = skb_put(skb, len); - memcpy(ptlv, &tlv, sizeof(tlv)); - - ntlv = le16_to_cpu(ntlv_hdr->tlv_num); - ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1); - - if (sta_hdr) { - u16 size = le16_to_cpu(sta_hdr->len); - - sta_hdr->len = cpu_to_le16(size + len); - } - - return ptlv; -} - -static struct tlv * -mt7921_mcu_add_tlv(struct sk_buff *skb, int tag, int len) -{ - return mt7921_mcu_add_nested_tlv(skb, tag, len, skb->data, null); -} - -static void -mt7921_mcu_uni_bss_he_tlv(struct tlv *tlv, struct ieee80211_vif *vif, - struct mt7921_phy *phy) -{ -#define default_he_pe_duration 4 -#define default_he_duration_rts_thres 1023 - const struct ieee80211_sta_he_cap *cap; - struct bss_info_uni_he *he; - - cap = mt7921_get_he_phy_cap(phy, vif); - - he = (struct bss_info_uni_he *)tlv; - he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; - if (!he->he_pe_duration) - he->he_pe_duration = default_he_pe_duration; - - he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); - if (!he->he_rts_thres) - he->he_rts_thres = cpu_to_le16(default_he_duration_rts_thres); - - he->max_nss_mcs[cmd_he_mcs_bw80] = cap->he_mcs_nss_supp.tx_mcs_80; - he->max_nss_mcs[cmd_he_mcs_bw160] = cap->he_mcs_nss_supp.tx_mcs_160; - he->max_nss_mcs[cmd_he_mcs_bw8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; -} - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_key_v2, sizeof(*sec)); + tlv = mt76_connac_mcu_add_tlv(skb, sta_rec_key_v2, sizeof(*sec)); - int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec); - skb = mt7921_mcu_alloc_sta_req(dev, mvif, msta, len); + skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, + &msta->wcid); -static void -mt7921_mcu_sta_ba_tlv(struct sk_buff *skb, - struct ieee80211_ampdu_params *params, - bool enable, bool tx) -{ - struct sta_rec_ba *ba; - struct tlv *tlv; - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_ba, sizeof(*ba)); - - ba = (struct sta_rec_ba *)tlv; - ba->ba_type = tx ? mt_ba_type_originator : mt_ba_type_recipient, - ba->winsize = cpu_to_le16(params->buf_size); - ba->ssn = cpu_to_le16(params->ssn); - ba->ba_en = enable << params->tid; - ba->amsdu = params->amsdu; - ba->tid = params->tid; -} - -static void -mt7921_mcu_wtbl_ba_tlv(struct sk_buff *skb, - struct ieee80211_ampdu_params *params, - bool enable, bool tx, void *sta_wtbl, - void *wtbl_tlv) -{ - struct wtbl_ba *ba; - struct tlv *tlv; - - tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_ba, sizeof(*ba), - wtbl_tlv, sta_wtbl); - - ba = (struct wtbl_ba *)tlv; - ba->tid = params->tid; - - if (tx) { - ba->ba_type = mt_ba_type_originator; - ba->sn = enable ? cpu_to_le16(params->ssn) : 0; - ba->ba_en = enable; - } else { - memcpy(ba->peer_addr, params->sta->addr, eth_alen); - ba->ba_type = mt_ba_type_recipient; - ba->rst_ba_tid = params->tid; - ba->rst_ba_sel = rst_ba_mac_tid_match; - ba->rst_ba_sb = 1; - } - - if (enable && tx) - ba->ba_winsize = cpu_to_le16(params->buf_size); -} - -static int -mt7921_mcu_sta_ba(struct mt7921_dev *dev, - struct ieee80211_ampdu_params *params, - bool enable, bool tx, int cmd) +int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, + struct ieee80211_ampdu_params *params, + bool enable) - struct mt7921_vif *mvif = msta->vif; - struct wtbl_req_hdr *wtbl_hdr; - struct tlv *sta_wtbl; - struct sk_buff *skb; - int ret; - if (enable && tx && !params->amsdu) + if (enable && !params->amsdu) - skb = mt7921_mcu_alloc_sta_req(dev, mvif, msta, - mt7921_sta_update_max_size); - if (is_err(skb)) - return ptr_err(skb); - - sta_wtbl = mt7921_mcu_add_tlv(skb, sta_rec_wtbl, sizeof(struct tlv)); - - wtbl_hdr = mt7921_mcu_alloc_wtbl_req(dev, msta, wtbl_set, sta_wtbl, - &skb); - mt7921_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); - - ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); - if (ret) - return ret; - - skb = mt7921_mcu_alloc_sta_req(dev, mvif, msta, - mt7921_sta_update_max_size); - if (is_err(skb)) - return ptr_err(skb); - - mt7921_mcu_sta_ba_tlv(skb, params, enable, tx); - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); -} - -int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev, - struct ieee80211_ampdu_params *params, - bool enable) -{ - return mt7921_mcu_sta_ba(dev, params, enable, true, mcu_uni_cmd_sta_rec_update); + return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, + enable, true); - return mt7921_mcu_sta_ba(dev, params, enable, false, mcu_uni_cmd_sta_rec_update); -} - -static void -mt7921_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, void *sta_wtbl, - void *wtbl_tlv) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct wtbl_generic *generic; - struct wtbl_rx *rx; - struct tlv *tlv; - - tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_generic, sizeof(*generic), - wtbl_tlv, sta_wtbl); - - generic = (struct wtbl_generic *)tlv; - - if (sta) { - if (vif->type == nl80211_iftype_station) - generic->partial_aid = cpu_to_le16(vif->bss_conf.aid); - else - generic->partial_aid = cpu_to_le16(sta->aid); - memcpy(generic->peer_addr, sta->addr, eth_alen); - generic->muar_idx = mvif->mt76.omac_idx; - generic->qos = sta->wme; - } else { - /* use bssid in station mode */ - if (vif->type == nl80211_iftype_station) - memcpy(generic->peer_addr, vif->bss_conf.bssid, - eth_alen); - else - eth_broadcast_addr(generic->peer_addr); - - generic->muar_idx = 0xe; - } - - tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_rx, sizeof(*rx), - wtbl_tlv, sta_wtbl); - - rx = (struct wtbl_rx *)tlv; - rx->rca1 = sta ? vif->type != nl80211_iftype_ap : 1; - rx->rca2 = 1; - rx->rv = 1; -} - -static void -mt7921_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable) -{ -#define extra_info_ver bit(0) -#define extra_info_new bit(1) - struct sta_rec_basic *basic; - struct tlv *tlv; - int conn_type; - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_basic, sizeof(*basic)); - - basic = (struct sta_rec_basic *)tlv; - basic->extra_info = cpu_to_le16(extra_info_ver); - - if (enable) { - basic->extra_info |= cpu_to_le16(extra_info_new); - basic->conn_state = conn_state_port_secure; - } else { - basic->conn_state = conn_state_disconnect; - } - - if (!sta) { - basic->conn_type = cpu_to_le32(connection_infra_bc); - eth_broadcast_addr(basic->peer_addr); - return; - } - - switch (vif->type) { - case nl80211_iftype_mesh_point: - case nl80211_iftype_ap: - if (vif->p2p) - conn_type = connection_p2p_gc; - else - conn_type = connection_infra_sta; - basic->conn_type = cpu_to_le32(conn_type); - basic->aid = cpu_to_le16(sta->aid); - break; - case nl80211_iftype_station: - if (vif->p2p) - conn_type = connection_p2p_go; - else - conn_type = connection_infra_ap; - basic->conn_type = cpu_to_le32(conn_type); - basic->aid = cpu_to_le16(vif->bss_conf.aid); - break; - case nl80211_iftype_adhoc: - basic->conn_type = cpu_to_le32(connection_ibss_adhoc); - basic->aid = cpu_to_le16(sta->aid); - break; - default: - warn_on(1); - break; - } - - memcpy(basic->peer_addr, sta->addr, eth_alen); - basic->qos = sta->wme; -} - -static void -mt7921_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) -{ - struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; - struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem; - struct sta_rec_he *he; - struct tlv *tlv; - u32 cap = 0; - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_he, sizeof(*he)); - - he = (struct sta_rec_he *)tlv; - - if (elem->mac_cap_info[0] & ieee80211_he_mac_cap0_htc_he) - cap |= sta_rec_he_cap_htc; - - if (elem->mac_cap_info[2] & ieee80211_he_mac_cap2_bsr) - cap |= sta_rec_he_cap_bsr; - - if (elem->mac_cap_info[3] & ieee80211_he_mac_cap3_omi_control) - cap |= sta_rec_he_cap_om; - - if (elem->mac_cap_info[4] & ieee80211_he_mac_cap4_amdsu_in_ampdu) - cap |= sta_rec_he_cap_amsdu_in_ampdu; - - if (elem->mac_cap_info[4] & ieee80211_he_mac_cap4_bqr) - cap |= sta_rec_he_cap_bqr; - - if (elem->phy_cap_info[0] & - (ieee80211_he_phy_cap0_channel_width_set_ru_mapping_in_2g | - ieee80211_he_phy_cap0_channel_width_set_ru_mapping_in_5g)) - cap |= sta_rec_he_cap_bw20_ru242_support; - - if (elem->phy_cap_info[1] & - ieee80211_he_phy_cap1_ldpc_coding_in_payload) - cap |= sta_rec_he_cap_ldpc; - - if (elem->phy_cap_info[1] & - ieee80211_he_phy_cap1_he_ltf_and_gi_for_he_ppdus_0_8us) - cap |= sta_rec_he_cap_su_ppdu_1ltf_8us_gi; - - if (elem->phy_cap_info[2] & - ieee80211_he_phy_cap2_ndp_4x_ltf_and_3_2us) - cap |= sta_rec_he_cap_ndp_4ltf_3dot2ms_gi; - - if (elem->phy_cap_info[2] & - ieee80211_he_phy_cap2_stbc_tx_under_80mhz) - cap |= sta_rec_he_cap_le_eq_80m_tx_stbc; - - if (elem->phy_cap_info[2] & - ieee80211_he_phy_cap2_stbc_rx_under_80mhz) - cap |= sta_rec_he_cap_le_eq_80m_rx_stbc; - - if (elem->phy_cap_info[6] & - ieee80211_he_phy_cap6_partial_bw_ext_range) - cap |= sta_rec_he_cap_partial_bw_ext_range; - - if (elem->phy_cap_info[7] & - ieee80211_he_phy_cap7_he_su_mu_ppdu_4xltf_and_08_us_gi) - cap |= sta_rec_he_cap_su_mu_ppdu_4ltf_8us_gi; - - if (elem->phy_cap_info[7] & - ieee80211_he_phy_cap7_stbc_tx_above_80mhz) - cap |= sta_rec_he_cap_gt_80m_tx_stbc; - - if (elem->phy_cap_info[7] & - ieee80211_he_phy_cap7_stbc_rx_above_80mhz) - cap |= sta_rec_he_cap_gt_80m_rx_stbc; - - if (elem->phy_cap_info[8] & - ieee80211_he_phy_cap8_he_er_su_ppdu_4xltf_and_08_us_gi) - cap |= sta_rec_he_cap_er_su_ppdu_4ltf_8us_gi; - - if (elem->phy_cap_info[8] & - ieee80211_he_phy_cap8_he_er_su_1xltf_and_08_us_gi) - cap |= sta_rec_he_cap_er_su_ppdu_1ltf_8us_gi; - - if (elem->phy_cap_info[9] & - ieee80211_he_phy_cap9_non_triggered_cqi_feedback) - cap |= sta_rec_he_cap_trig_cqi_fk; - - if (elem->phy_cap_info[9] & - ieee80211_he_phy_cap9_tx_1024_qam_less_than_242_tone_ru) - cap |= sta_rec_he_cap_tx_1024qam_under_ru242; - - if (elem->phy_cap_info[9] & - ieee80211_he_phy_cap9_rx_1024_qam_less_than_242_tone_ru) - cap |= sta_rec_he_cap_rx_1024qam_under_ru242; - - he->he_cap = cpu_to_le32(cap); - - switch (sta->bandwidth) { - case ieee80211_sta_rx_bw_160: - if (elem->phy_cap_info[0] & - ieee80211_he_phy_cap0_channel_width_set_80plus80_mhz_in_5g) - he->max_nss_mcs[cmd_he_mcs_bw8080] = - he_cap->he_mcs_nss_supp.rx_mcs_80p80; - - he->max_nss_mcs[cmd_he_mcs_bw160] = - he_cap->he_mcs_nss_supp.rx_mcs_160; - fallthrough; - default: - he->max_nss_mcs[cmd_he_mcs_bw80] = - he_cap->he_mcs_nss_supp.rx_mcs_80; - break; - } - - he->t_frame_dur = - he_mac(cap1_tf_mac_pad_dur_mask, elem->mac_cap_info[1]); - he->max_ampdu_exp = - he_mac(cap3_max_ampdu_len_exp_mask, elem->mac_cap_info[3]); - - he->bw_set = - he_phy(cap0_channel_width_set_mask, elem->phy_cap_info[0]); - he->device_class = - he_phy(cap1_device_class_a, elem->phy_cap_info[1]); - he->punc_pream_rx = - he_phy(cap1_preamble_punc_rx_mask, elem->phy_cap_info[1]); - - he->dcm_tx_mode = - he_phy(cap3_dcm_max_const_tx_mask, elem->phy_cap_info[3]); - he->dcm_tx_max_nss = - he_phy(cap3_dcm_max_tx_nss_2, elem->phy_cap_info[3]); - he->dcm_rx_mode = - he_phy(cap3_dcm_max_const_rx_mask, elem->phy_cap_info[3]); - he->dcm_rx_max_nss = - he_phy(cap3_dcm_max_rx_nss_2, elem->phy_cap_info[3]); - he->dcm_rx_max_nss = - he_phy(cap8_dcm_max_ru_mask, elem->phy_cap_info[8]); - - he->pkt_ext = 2; -} - -static void -mt7921_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, - struct ieee80211_vif *vif) -{ - struct sta_rec_uapsd *uapsd; - struct tlv *tlv; - - if (vif->type != nl80211_iftype_ap || !sta->wme) - return; - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_apps, sizeof(*uapsd)); - uapsd = (struct sta_rec_uapsd *)tlv; - - if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_vo) { - uapsd->dac_map |= bit(3); - uapsd->tac_map |= bit(3); - } - if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_vi) { - uapsd->dac_map |= bit(2); - uapsd->tac_map |= bit(2); - } - if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_be) { - uapsd->dac_map |= bit(1); - uapsd->tac_map |= bit(1); - } - if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_bk) { - uapsd->dac_map |= bit(0); - uapsd->tac_map |= bit(0); - } - uapsd->max_sp = sta->max_sp; -} - -static void -mt7921_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) -{ - struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; - struct sta_rec_amsdu *amsdu; - struct tlv *tlv; - - if (!sta->max_amsdu_len) - return; - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_hw_amsdu, sizeof(*amsdu)); - amsdu = (struct sta_rec_amsdu *)tlv; - amsdu->max_amsdu_num = 8; - amsdu->amsdu_en = true; - amsdu->max_mpdu_size = sta->max_amsdu_len >= - ieee80211_max_mpdu_len_vht_7991; - msta->wcid.amsdu = true; -} - -static bool -mt7921_hw_amsdu_supported(struct ieee80211_vif *vif) -{ - switch (vif->type) { - case nl80211_iftype_ap: - case nl80211_iftype_station: - return true; - default: - return false; - } -} - -static void -mt7921_mcu_sta_tlv(struct mt7921_dev *dev, struct sk_buff *skb, - struct ieee80211_sta *sta, struct ieee80211_vif *vif) -{ - struct tlv *tlv; - struct sta_rec_state *state; - struct sta_rec_phy *phy; - struct sta_rec_ra_info *ra_info; - struct cfg80211_chan_def *chandef = &dev->mphy.chandef; - enum nl80211_band band = chandef->chan->band; - - /* starec ht */ - if (sta->ht_cap.ht_supported) { - struct sta_rec_ht *ht; - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_ht, sizeof(*ht)); - ht = (struct sta_rec_ht *)tlv; - ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); - - if (mt7921_hw_amsdu_supported(vif)) - mt7921_mcu_sta_amsdu_tlv(skb, sta); - } - - /* starec vht */ - if (sta->vht_cap.vht_supported) { - struct sta_rec_vht *vht; - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_vht, sizeof(*vht)); - vht = (struct sta_rec_vht *)tlv; - vht->vht_cap = cpu_to_le32(sta->vht_cap.cap); - vht->vht_rx_mcs_map = sta->vht_cap.vht_mcs.rx_mcs_map; - vht->vht_tx_mcs_map = sta->vht_cap.vht_mcs.tx_mcs_map; - } - - /* starec he */ - if (sta->he_cap.has_he) - mt7921_mcu_sta_he_tlv(skb, sta); - - /* starec uapsd */ - mt7921_mcu_sta_uapsd_tlv(skb, sta, vif); - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_phy, sizeof(*phy)); - phy = (struct sta_rec_phy *)tlv; - phy->phy_type = mt7921_get_phy_mode_v2(dev, vif, band, sta); - phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_ra, sizeof(*ra_info)); - ra_info = (struct sta_rec_ra_info *)tlv; - ra_info->legacy = cpu_to_le16((u16)sta->supp_rates[band]); - - if (sta->ht_cap.ht_supported) { - memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask, - ht_mcs_mask_num); - } - - tlv = mt7921_mcu_add_tlv(skb, sta_rec_state, sizeof(*state)); - state = (struct sta_rec_state *)tlv; - state->state = 2; - - if (sta->vht_cap.vht_supported) { - state->vht_opmode = sta->bandwidth; - state->vht_opmode |= (sta->rx_nss - 1) << - ieee80211_opmode_notif_rx_nss_shift; - } -} - -static void -mt7921_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, - void *sta_wtbl, void *wtbl_tlv) -{ - struct wtbl_smps *smps; - struct tlv *tlv; - - tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_smps, sizeof(*smps), - wtbl_tlv, sta_wtbl); - smps = (struct wtbl_smps *)tlv; - - if (sta->smps_mode == ieee80211_smps_dynamic) - smps->smps = true; -} - -static void -mt7921_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, - void *sta_wtbl, void *wtbl_tlv) -{ - struct wtbl_ht *ht = null; - struct tlv *tlv; - - /* wtbl ht */ - if (sta->ht_cap.ht_supported) { - tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_ht, sizeof(*ht), - wtbl_tlv, sta_wtbl); - ht = (struct wtbl_ht *)tlv; - ht->ldpc = !!(sta->ht_cap.cap & ieee80211_ht_cap_ldpc_coding); - ht->af = sta->ht_cap.ampdu_factor; - ht->mm = sta->ht_cap.ampdu_density; - ht->ht = true; - } - - /* wtbl vht */ - if (sta->vht_cap.vht_supported) { - struct wtbl_vht *vht; - u8 af; - - tlv = mt7921_mcu_add_nested_tlv(skb, wtbl_vht, sizeof(*vht), - wtbl_tlv, sta_wtbl); - vht = (struct wtbl_vht *)tlv; - vht->ldpc = !!(sta->vht_cap.cap & ieee80211_vht_cap_rxldpc); - vht->vht = true; - - af = field_get(ieee80211_vht_cap_max_a_mpdu_length_exponent_mask, - sta->vht_cap.cap); - if (ht) - ht->af = max_t(u8, ht->af, af); - } - - mt7921_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv); -} - -static int mt7921_mcu_start_firmware(struct mt7921_dev *dev, u32 addr, - u32 option) -{ - struct { - __le32 option; - __le32 addr; - } req = { - .option = cpu_to_le32(option), - .addr = cpu_to_le32(addr), - }; + struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv; - return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_fw_start_req, &req, - sizeof(req), true); + return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, + enable, false); -static int mt7921_mcu_patch_sem_ctrl(struct mt7921_dev *dev, bool get) -{ - struct { - __le32 op; - } req = { - .op = cpu_to_le32(get ? patch_sem_get : patch_sem_release), - }; - - return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_patch_sem_control, &req, - sizeof(req), true); -} - -static int mt7921_mcu_start_patch(struct mt7921_dev *dev) -{ - struct { - u8 check_crc; - u8 reserved[3]; - } req = { - .check_crc = 0, - }; - - return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_patch_finish_req, &req, - sizeof(req), true); -} - -static int mt7921_mcu_init_download(struct mt7921_dev *dev, u32 addr, - u32 len, u32 mode) -{ - struct { - __le32 addr; - __le32 len; - __le32 mode; - } req = { - .addr = cpu_to_le32(addr), - .len = cpu_to_le32(len), - .mode = cpu_to_le32(mode), - }; - int attr; - - if (req.addr == cpu_to_le32(mcu_patch_address) || addr == 0x900000) - attr = mcu_cmd_patch_start_req; - else - attr = mcu_cmd_target_address_len_req; - - return mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true); -} - - sem = mt7921_mcu_patch_sem_ctrl(dev, 1); + sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true); - ret = mt7921_mcu_init_download(dev, addr, len, - dl_mode_need_rsp); + ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len, + dl_mode_need_rsp); - ret = mt7921_mcu_start_patch(dev); + ret = mt76_connac_mcu_start_patch(&dev->mt76); - sem = mt7921_mcu_patch_sem_ctrl(dev, 0); + sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); - err = mt7921_mcu_init_download(dev, addr, len, mode); + err = mt76_connac_mcu_init_download(&dev->mt76, addr, len, + mode); - return mt7921_mcu_start_firmware(dev, override, option); + return mt76_connac_mcu_start_firmware(&dev->mt76, override, option); -int mt7921_mcu_set_mac(struct mt7921_dev *dev, int band, - bool enable, bool hdr_trans) -{ - struct { - u8 enable; - u8 band; - u8 rsv[2]; - } __packed req_mac = { - .enable = enable, - .band = band, - }; - - return mt76_mcu_send_msg(&dev->mt76, mcu_ext_cmd_mac_init_ctrl, - &req_mac, sizeof(req_mac), true); -} - -int mt7921_mcu_set_rts_thresh(struct mt7921_phy *phy, u32 val) -{ - struct mt7921_dev *dev = phy->dev; - struct { - u8 prot_idx; - u8 band; - u8 rsv[2]; - __le32 len_thresh; - __le32 pkt_thresh; - } __packed req = { - .prot_idx = 1, - .band = phy != &dev->phy, - .len_thresh = cpu_to_le32(val), - .pkt_thresh = cpu_to_le32(0x2), - }; - - return mt76_mcu_send_msg(&dev->mt76, mcu_ext_cmd_protect_ctrl, &req, - sizeof(req), true); -} - -int -mt7921_mcu_uni_add_dev(struct mt7921_dev *dev, - struct ieee80211_vif *vif, bool enable) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - u8 omac_idx = mvif->mt76.omac_idx; - struct { - struct { - u8 omac_idx; - u8 band_idx; - __le16 pad; - } __packed hdr; - struct req_tlv { - __le16 tag; - __le16 len; - u8 active; - u8 pad; - u8 omac_addr[eth_alen]; - } __packed tlv; - } dev_req = { - .hdr = { - .omac_idx = omac_idx, - .band_idx = mvif->mt76.band_idx, - }, - .tlv = { - .tag = cpu_to_le16(dev_info_active), - .len = cpu_to_le16(sizeof(struct req_tlv)), - .active = enable, - }, - }; - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct mt7921_bss_basic_tlv basic; - } basic_req = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .basic = { - .tag = cpu_to_le16(uni_bss_info_basic), - .len = cpu_to_le16(sizeof(struct mt7921_bss_basic_tlv)), - .omac_idx = omac_idx, - .band_idx = mvif->mt76.band_idx, - .wmm_idx = mvif->mt76.wmm_idx, - .active = enable, - .bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx), - .sta_idx = cpu_to_le16(mvif->sta.wcid.idx), - .conn_state = 1, - }, - }; - int err, idx, cmd, len; - void *data; - - switch (vif->type) { - case nl80211_iftype_mesh_point: - case nl80211_iftype_ap: - basic_req.basic.conn_type = cpu_to_le32(connection_infra_ap); - break; - case nl80211_iftype_station: - basic_req.basic.conn_type = cpu_to_le32(connection_infra_sta); - break; - case nl80211_iftype_adhoc: - basic_req.basic.conn_type = cpu_to_le32(connection_ibss_adhoc); - break; - default: - warn_on(1); - break; - } - - idx = omac_idx > ext_bssid_start ? hw_bssid_0 : omac_idx; - basic_req.basic.hw_bss_idx = idx; - - memcpy(dev_req.tlv.omac_addr, vif->addr, eth_alen); - - cmd = enable ? mcu_uni_cmd_dev_info_update : mcu_uni_cmd_bss_info_update; - data = enable ? (void *)&dev_req : (void *)&basic_req; - len = enable ? sizeof(dev_req) : sizeof(basic_req); - - err = mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); - if (err < 0) - return err; - - cmd = enable ? mcu_uni_cmd_bss_info_update : mcu_uni_cmd_dev_info_update; - data = enable ? (void *)&basic_req : (void *)&dev_req; - len = enable ? sizeof(basic_req) : sizeof(dev_req); - - return mt76_mcu_send_msg(&dev->mt76, cmd, data, len, true); -} - -int -mt7921_mcu_uni_add_bss(struct mt7921_phy *phy, struct ieee80211_vif *vif, - bool enable) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct cfg80211_chan_def *chandef = &phy->mt76->chandef; - int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; - struct mt7921_dev *dev = phy->dev; - enum nl80211_band band = chandef->chan->band; - u8 omac_idx = mvif->mt76.omac_idx; - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct mt7921_bss_basic_tlv basic; - struct mt7921_bss_qos_tlv qos; - } basic_req = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .basic = { - .tag = cpu_to_le16(uni_bss_info_basic), - .len = cpu_to_le16(sizeof(struct mt7921_bss_basic_tlv)), - .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), - .dtim_period = vif->bss_conf.dtim_period, - .omac_idx = omac_idx, - .band_idx = mvif->mt76.band_idx, - .wmm_idx = mvif->mt76.wmm_idx, - .active = true, /* keep bss deactivated */ - .phymode = mt7921_get_phy_mode(phy->dev, vif, band, null), - }, - .qos = { - .tag = cpu_to_le16(uni_bss_info_qbss), - .len = cpu_to_le16(sizeof(struct mt7921_bss_qos_tlv)), - .qos = vif->bss_conf.qos, - }, - }; - - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct bss_info_uni_he he; - } he_req = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .he = { - .tag = cpu_to_le16(uni_bss_info_he_basic), - .len = cpu_to_le16(sizeof(struct bss_info_uni_he)), - }, - }; - - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct rlm_tlv { - __le16 tag; - __le16 len; - u8 control_channel; - u8 center_chan; - u8 center_chan2; - u8 bw; - u8 tx_streams; - u8 rx_streams; - u8 short_st; - u8 ht_op_info; - u8 sco; - u8 pad[3]; - } __packed rlm; - } __packed rlm_req = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .rlm = { - .tag = cpu_to_le16(uni_bss_info_rlm), - .len = cpu_to_le16(sizeof(struct rlm_tlv)), - .control_channel = chandef->chan->hw_value, - .center_chan = ieee80211_frequency_to_channel(freq1), - .center_chan2 = ieee80211_frequency_to_channel(freq2), - .tx_streams = hweight8(phy->mt76->antenna_mask), - .rx_streams = phy->mt76->chainmask, - .short_st = true, - }, - }; - int err, conn_type; - u8 idx; - - idx = omac_idx > ext_bssid_start ? hw_bssid_0 : omac_idx; - basic_req.basic.hw_bss_idx = idx; - - switch (vif->type) { - case nl80211_iftype_mesh_point: - case nl80211_iftype_ap: - if (vif->p2p) - conn_type = connection_p2p_go; - else - conn_type = connection_infra_ap; - basic_req.basic.conn_type = cpu_to_le32(conn_type); - break; - case nl80211_iftype_station: - if (vif->p2p) - conn_type = connection_p2p_gc; - else - conn_type = connection_infra_sta; - basic_req.basic.conn_type = cpu_to_le32(conn_type); - break; - case nl80211_iftype_adhoc: - basic_req.basic.conn_type = cpu_to_le32(connection_ibss_adhoc); - break; - default: - warn_on(1); - break; - } - - memcpy(basic_req.basic.bssid, vif->bss_conf.bssid, eth_alen); - basic_req.basic.bmc_tx_wlan_idx = cpu_to_le16(mvif->sta.wcid.idx); - basic_req.basic.sta_idx = cpu_to_le16(mvif->sta.wcid.idx); - basic_req.basic.conn_state = !enable; - - err = mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_bss_info_update, - &basic_req, sizeof(basic_req), true); - if (err < 0) - return err; - - if (vif->bss_conf.he_support) { - mt7921_mcu_uni_bss_he_tlv((struct tlv *)&he_req.he, vif, phy); - - err = mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_bss_info_update, - &he_req, sizeof(he_req), true); - if (err < 0) - return err; - } - - switch (chandef->width) { - case nl80211_chan_width_40: - rlm_req.rlm.bw = cmd_cbw_40mhz; - break; - case nl80211_chan_width_80: - rlm_req.rlm.bw = cmd_cbw_80mhz; - break; - case nl80211_chan_width_80p80: - rlm_req.rlm.bw = cmd_cbw_8080mhz; - break; - case nl80211_chan_width_160: - rlm_req.rlm.bw = cmd_cbw_160mhz; - break; - case nl80211_chan_width_5: - rlm_req.rlm.bw = cmd_cbw_5mhz; - break; - case nl80211_chan_width_10: - rlm_req.rlm.bw = cmd_cbw_10mhz; - break; - case nl80211_chan_width_20_noht: - case nl80211_chan_width_20: - default: - rlm_req.rlm.bw = cmd_cbw_20mhz; - break; - } - - if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan) - rlm_req.rlm.sco = 1; /* sca */ - else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) - rlm_req.rlm.sco = 3; /* scb */ - - return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_bss_info_update, - &rlm_req, sizeof(rlm_req), true); -} - -static int -mt7921_mcu_add_sta_cmd(struct mt7921_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable, int cmd) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct wtbl_req_hdr *wtbl_hdr; - struct mt7921_sta *msta; - struct tlv *sta_wtbl; - struct sk_buff *skb; - - msta = sta ? (struct mt7921_sta *)sta->drv_priv : &mvif->sta; - - skb = mt7921_mcu_alloc_sta_req(dev, mvif, msta, - mt7921_sta_update_max_size); - if (is_err(skb)) - return ptr_err(skb); - - mt7921_mcu_sta_basic_tlv(skb, vif, sta, enable); - if (enable && sta) - mt7921_mcu_sta_tlv(dev, skb, sta, vif); - - sta_wtbl = mt7921_mcu_add_tlv(skb, sta_rec_wtbl, sizeof(struct tlv)); - - wtbl_hdr = mt7921_mcu_alloc_wtbl_req(dev, msta, wtbl_reset_and_set, - sta_wtbl, &skb); - if (enable) { - mt7921_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr); - if (sta) - mt7921_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr); - } - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); -} - -int -mt7921_mcu_uni_add_sta(struct mt7921_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable) -{ - return mt7921_mcu_add_sta_cmd(dev, vif, sta, enable, - mcu_uni_cmd_sta_rec_update); -} - -int mt7921_mcu_set_channel_domain(struct mt7921_phy *phy) -{ - struct mt76_phy *mphy = phy->mt76; - struct mt7921_dev *dev = phy->dev; - struct mt7921_mcu_channel_domain { - __le32 country_code; /* regulatory_request.alpha2 */ - u8 bw_2g; /* bw_20_40m 0 - * bw_20m 1 - * bw_20_40_80m 2 - * bw_20_40_80_160m 3 - * bw_20_40_80_8080m 4 - */ - u8 bw_5g; - __le16 pad; - u8 n_2ch; - u8 n_5ch; - __le16 pad2; - } __packed hdr = { - .bw_2g = 0, - .bw_5g = 3, - .n_2ch = mphy->sband_2g.sband.n_channels, - .n_5ch = mphy->sband_5g.sband.n_channels, - }; - struct mt7921_mcu_chan { - __le16 hw_value; - __le16 pad; - __le32 flags; - } __packed; - int i, n_channels = hdr.n_2ch + hdr.n_5ch; - int len = sizeof(hdr) + n_channels * sizeof(struct mt7921_mcu_chan); - struct sk_buff *skb; - - skb = mt76_mcu_msg_alloc(&dev->mt76, null, len); - if (!skb) - return -enomem; - - skb_put_data(skb, &hdr, sizeof(hdr)); - - for (i = 0; i < n_channels; i++) { - struct ieee80211_channel *chan; - struct mt7921_mcu_chan channel; - - if (i < hdr.n_2ch) - chan = &mphy->sband_2g.sband.channels[i]; - else - chan = &mphy->sband_5g.sband.channels[i - hdr.n_2ch]; - - channel.hw_value = cpu_to_le16(chan->hw_value); - channel.flags = cpu_to_le32(chan->flags); - channel.pad = 0; - - skb_put_data(skb, &channel, sizeof(channel)); - } - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_cmd_set_chan_domain, - false); -} - diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +#include "../mt76_connac_mcu.h" + -#define mcu_fw_prefix bit(31) -#define mcu_uni_prefix bit(30) -#define mcu_ce_prefix bit(29) -#define mcu_query_prefix bit(28) -#define mcu_cmd_mask ~(mcu_fw_prefix | mcu_uni_prefix | \ - mcu_ce_prefix | mcu_query_prefix) - -#define mcu_query_mask bit(16) - -enum { - mcu_cmd_target_address_len_req = mcu_fw_prefix | 0x01, - mcu_cmd_fw_start_req = mcu_fw_prefix | 0x02, - mcu_cmd_nic_power_ctrl = mcu_fw_prefix | 0x4, - mcu_cmd_patch_start_req = mcu_fw_prefix | 0x05, - mcu_cmd_patch_finish_req = mcu_fw_prefix | 0x07, - mcu_cmd_patch_sem_control = mcu_fw_prefix | 0x10, - mcu_cmd_ext_cid = 0xed, - mcu_cmd_fw_scatter = mcu_fw_prefix | 0xee, -}; - -enum { - mcu_ext_cmd_efuse_access = 0x01, - mcu_ext_cmd_channel_switch = 0x08, - mcu_ext_cmd_efuse_buffer_mode = 0x21, - mcu_ext_cmd_edca_update = 0x27, - mcu_ext_cmd_thermal_ctrl = 0x2c, - mcu_ext_cmd_wtbl_update = 0x32, - mcu_ext_cmd_protect_ctrl = 0x3e, - mcu_ext_cmd_mac_init_ctrl = 0x46, - mcu_ext_cmd_rx_hdr_trans = 0x47, - mcu_ext_cmd_set_rx_path = 0x4e, -}; - -enum { - mcu_uni_cmd_dev_info_update = mcu_uni_prefix | 0x01, - mcu_uni_cmd_bss_info_update = mcu_uni_prefix | 0x02, - mcu_uni_cmd_sta_rec_update = mcu_uni_prefix | 0x03, - mcu_uni_cmd_suspend = mcu_uni_prefix | 0x05, - mcu_uni_cmd_offload = mcu_uni_prefix | 0x06, - mcu_uni_cmd_hif_ctrl = mcu_uni_prefix | 0x07, -}; - -enum { - wow_usb = 1, - wow_pcie = 2, - wow_gpio = 3, -}; - -/* offload mcu commands */ -enum { - mcu_cmd_start_hw_scan = mcu_ce_prefix | 0x03, - mcu_cmd_set_chan_domain = mcu_ce_prefix | 0x0f, - mcu_cmd_set_bss_connected = mcu_ce_prefix | 0x16, - mcu_cmd_set_bss_abort = mcu_ce_prefix | 0x17, - mcu_cmd_cancel_hw_scan = mcu_ce_prefix | 0x1b, - mcu_cmd_sched_scan_enable = mcu_ce_prefix | 0x61, - mcu_cmd_sched_scan_req = mcu_ce_prefix | 0x62, - mcu_cmd_reg_write = mcu_ce_prefix | 0xc0, - mcu_cmd_reg_read = mcu_ce_prefix | mcu_query_mask | 0xc0, - mcu_cmd_fwlog_2_host = mcu_ce_prefix | 0xc5, - mcu_cmd_get_wtbl = mcu_ce_prefix | 0xcd, -}; - -#define mcu_cmd_ack bit(0) -#define mcu_cmd_uni bit(1) -#define mcu_cmd_query bit(2) - -#define mcu_cmd_uni_ext_ack (mcu_cmd_ack | mcu_cmd_uni | mcu_cmd_query) - -enum { - uni_bss_info_basic = 0, - uni_bss_info_rlm = 2, - uni_bss_info_he_basic = 5, - uni_bss_info_bcn_content = 7, - uni_bss_info_qbss = 15, - uni_bss_info_uapsd = 19, - uni_bss_info_ps = 21, - uni_bss_info_bcnft = 22, -}; - -enum { - uni_suspend_mode_setting, - uni_suspend_wow_ctrl, - uni_suspend_wow_gpio_param, - uni_suspend_wow_wakeup_port, - uni_suspend_wow_pattern, -}; - -enum { - uni_offload_offload_arp, - uni_offload_offload_nd, - uni_offload_offload_gtk_rekey, - uni_offload_offload_bmc_rpy_detect, -}; - -enum { - patch_sem_release, - patch_sem_get -}; - -#define conn_state_disconnect 0 -#define conn_state_connect 1 -#define conn_state_port_secure 2 - -enum { - dev_info_active, - dev_info_max_num -}; - -enum { - cmd_cbw_20mhz = ieee80211_sta_rx_bw_20, - cmd_cbw_40mhz = ieee80211_sta_rx_bw_40, - cmd_cbw_80mhz = ieee80211_sta_rx_bw_80, - cmd_cbw_160mhz = ieee80211_sta_rx_bw_160, - cmd_cbw_10mhz, - cmd_cbw_5mhz, - cmd_cbw_8080mhz, - - cmd_he_mcs_bw80 = 0, - cmd_he_mcs_bw160, - cmd_he_mcs_bw8080, - cmd_he_mcs_bw_num -}; - -struct tlv { - __le16 tag; - __le16 len; -} __packed; - -struct bss_info_uni_he { - __le16 tag; - __le16 len; - __le16 he_rts_thres; - u8 he_pe_duration; - u8 su_disable; - __le16 max_nss_mcs[cmd_he_mcs_bw_num]; - u8 rsv[2]; -} __packed; - -enum { - wtbl_reset_and_set = 1, - wtbl_set, - wtbl_query, - wtbl_reset_all -}; - -struct wtbl_req_hdr { - u8 wlan_idx_lo; - u8 operation; - __le16 tlv_num; - u8 wlan_idx_hi; - u8 rsv[3]; -} __packed; - -struct wtbl_generic { - __le16 tag; - __le16 len; - u8 peer_addr[eth_alen]; - u8 muar_idx; - u8 skip_tx; - u8 cf_ack; - u8 qos; - u8 mesh; - u8 adm; - __le16 partial_aid; - u8 baf_en; - u8 aad_om; -} __packed; - -struct wtbl_rx { - __le16 tag; - __le16 len; - u8 rcid; - u8 rca1; - u8 rca2; - u8 rv; - u8 rsv[4]; -} __packed; - -struct wtbl_ht { - __le16 tag; - __le16 len; - u8 ht; - u8 ldpc; - u8 af; - u8 mm; - u8 rsv[4]; -} __packed; - -struct wtbl_vht { - __le16 tag; - __le16 len; - u8 ldpc; - u8 dyn_bw; - u8 vht; - u8 txop_ps; - u8 rsv[4]; -} __packed; - -struct wtbl_hdr_trans { - __le16 tag; - __le16 len; - u8 to_ds; - u8 from_ds; - u8 no_rx_trans; - u8 _rsv; -}; - -enum { - mt_ba_type_invalid, - mt_ba_type_originator, - mt_ba_type_recipient -}; - -enum { - rst_ba_mac_tid_match, - rst_ba_mac_match, - rst_ba_no_match -}; - -struct wtbl_ba { - __le16 tag; - __le16 len; - /* common */ - u8 tid; - u8 ba_type; - u8 rsv0[2]; - /* originator only */ - __le16 sn; - u8 ba_en; - u8 ba_winsize_idx; - __le16 ba_winsize; - /* recipient only */ - u8 peer_addr[eth_alen]; - u8 rst_ba_tid; - u8 rst_ba_sel; - u8 rst_ba_sb; - u8 band_idx; - u8 rsv1[4]; -} __packed; - -struct wtbl_smps { - __le16 tag; - __le16 len; - u8 smps; - u8 rsv[3]; -} __packed; - -enum { - wtbl_generic, - wtbl_rx, - wtbl_ht, - wtbl_vht, - wtbl_peer_ps, /* not used */ - wtbl_tx_ps, - wtbl_hdr_trans, - wtbl_sec_key, - wtbl_ba, - wtbl_rdg, /* obsoleted */ - wtbl_protect, /* not used */ - wtbl_clear, /* not used */ - wtbl_bf, - wtbl_smps, - wtbl_raw_data, /* debug only */ - wtbl_pn, - wtbl_spe, - wtbl_max_num -}; - -struct sta_ntlv_hdr { - u8 rsv[2]; - __le16 tlv_num; -} __packed; - -struct sta_req_hdr { - u8 bss_idx; - u8 wlan_idx_lo; - __le16 tlv_num; - u8 is_tlv_append; - u8 muar_idx; - u8 wlan_idx_hi; - u8 rsv; -} __packed; - -struct sta_rec_basic { - __le16 tag; - __le16 len; - __le32 conn_type; - u8 conn_state; - u8 qos; - __le16 aid; - u8 peer_addr[eth_alen]; - __le16 extra_info; -} __packed; - -struct sta_rec_ht { - __le16 tag; - __le16 len; - __le16 ht_cap; - u16 rsv; -} __packed; - -struct sta_rec_vht { - __le16 tag; - __le16 len; - __le32 vht_cap; - __le16 vht_rx_mcs_map; - __le16 vht_tx_mcs_map; - u8 rts_bw_sig; - u8 rsv[3]; -} __packed; - -struct sta_rec_uapsd { - __le16 tag; - __le16 len; - u8 dac_map; - u8 tac_map; - u8 max_sp; - u8 rsv0; - __le16 listen_interval; - u8 rsv1[2]; -} __packed; - -struct sta_rec_he { - __le16 tag; - __le16 len; - - __le32 he_cap; - - u8 t_frame_dur; - u8 max_ampdu_exp; - u8 bw_set; - u8 device_class; - u8 dcm_tx_mode; - u8 dcm_tx_max_nss; - u8 dcm_rx_mode; - u8 dcm_rx_max_nss; - u8 dcm_max_ru; - u8 punc_pream_rx; - u8 pkt_ext; - u8 rsv1; - - __le16 max_nss_mcs[cmd_he_mcs_bw_num]; - - u8 rsv2[2]; -} __packed; - -struct sta_rec_ba { - __le16 tag; - __le16 len; - u8 tid; - u8 ba_type; - u8 amsdu; - u8 ba_en; - __le16 ssn; - __le16 winsize; -} __packed; - -struct sta_rec_amsdu { - __le16 tag; - __le16 len; - u8 max_amsdu_num; - u8 max_mpdu_size; - u8 amsdu_en; - u8 rsv; -} __packed; - -struct sta_rec_state { - __le16 tag; - __le16 len; - __le32 flags; - u8 state; - u8 vht_opmode; - u8 action; - u8 rsv[1]; -} __packed; - -#define ht_mcs_mask_num 10 - -struct sta_rec_ra_info { - __le16 tag; - __le16 len; - __le16 legacy; - u8 rx_mcs_bitmask[ht_mcs_mask_num]; -} __packed; - -struct sta_rec_phy { - __le16 tag; - __le16 len; - __le16 basic_rate; - u8 phy_type; - u8 ampdu; - u8 rts_policy; - u8 rcpi; - u8 rsv[2]; -} __packed; - -enum { - sta_rec_basic, - sta_rec_ra, - sta_rec_ra_cmm_info, - sta_rec_ra_update, - sta_rec_bf, - sta_rec_amsdu, - sta_rec_ba, - sta_rec_state, - sta_rec_tx_proc, /* for hdr trans and cso in cr4 */ - sta_rec_ht, - sta_rec_vht, - sta_rec_apps, - sta_rec_key, - sta_rec_wtbl, - sta_rec_he, - sta_rec_hw_amsdu, - sta_rec_wtbl_aadom, - sta_rec_key_v2, - sta_rec_muru, - sta_rec_muedca, - sta_rec_bfee, - sta_rec_phy = 0x15, - sta_rec_max_num -}; - -#define phy_mode_a bit(0) -#define phy_mode_b bit(1) -#define phy_mode_g bit(2) -#define phy_mode_gn bit(3) -#define phy_mode_an bit(4) -#define phy_mode_ac bit(5) -#define phy_mode_ax_24g bit(6) -#define phy_mode_ax_5g bit(7) -#define phy_mode_ax_6g bit(8) - -#define mode_cck bit(0) -#define mode_ofdm bit(1) -#define mode_ht bit(2) -#define mode_vht bit(3) -#define mode_he bit(4) - -/* he mac */ -#define sta_rec_he_cap_htc bit(0) -#define sta_rec_he_cap_bqr bit(1) -#define sta_rec_he_cap_bsr bit(2) -#define sta_rec_he_cap_om bit(3) -#define sta_rec_he_cap_amsdu_in_ampdu bit(4) -/* he phy */ -#define sta_rec_he_cap_dual_band bit(5) -#define sta_rec_he_cap_ldpc bit(6) -#define sta_rec_he_cap_trig_cqi_fk bit(7) -#define sta_rec_he_cap_partial_bw_ext_range bit(8) -/* stbc */ -#define sta_rec_he_cap_le_eq_80m_tx_stbc bit(9) -#define sta_rec_he_cap_le_eq_80m_rx_stbc bit(10) -#define sta_rec_he_cap_gt_80m_tx_stbc bit(11) -#define sta_rec_he_cap_gt_80m_rx_stbc bit(12) -/* gi */ -#define sta_rec_he_cap_su_ppdu_1ltf_8us_gi bit(13) -#define sta_rec_he_cap_su_mu_ppdu_4ltf_8us_gi bit(14) -#define sta_rec_he_cap_er_su_ppdu_1ltf_8us_gi bit(15) -#define sta_rec_he_cap_er_su_ppdu_4ltf_8us_gi bit(16) -#define sta_rec_he_cap_ndp_4ltf_3dot2ms_gi bit(17) -/* 242 tone */ -#define sta_rec_he_cap_bw20_ru242_support bit(18) -#define sta_rec_he_cap_tx_1024qam_under_ru242 bit(19) -#define sta_rec_he_cap_rx_1024qam_under_ru242 bit(20) - -struct mt7921_bss_basic_tlv { - __le16 tag; - __le16 len; - u8 active; - u8 omac_idx; - u8 hw_bss_idx; - u8 band_idx; - __le32 conn_type; - u8 conn_state; - u8 wmm_idx; - u8 bssid[eth_alen]; - __le16 bmc_tx_wlan_idx; - __le16 bcn_interval; - u8 dtim_period; - u8 phymode; /* bit(0): a - * bit(1): b - * bit(2): g - * bit(3): gn - * bit(4): an - * bit(5): ac - */ - __le16 sta_idx; - u8 nonht_basic_phy; - u8 pad[3]; -} __packed; - -struct mt7921_bss_qos_tlv { - __le16 tag; - __le16 len; - u8 qos; - u8 pad[3]; -} __packed; - -struct mt7921_beacon_loss_event { - u8 bss_idx; - u8 reason; - u8 pad[2]; -} __packed; - -struct mt7921_mcu_bss_event { - u8 bss_idx; - u8 is_absent; - u8 free_quota; - u8 pad; -} __packed; - -enum { - phy_type_hr_dsss_index = 0, - phy_type_erp_index, - phy_type_erp_p2p_index, - phy_type_ofdm_index, - phy_type_ht_index, - phy_type_vht_index, - phy_type_he_index, - phy_type_index_num -}; - -#define phy_type_bit_hr_dsss bit(phy_type_hr_dsss_index) -#define phy_type_bit_erp bit(phy_type_erp_index) -#define phy_type_bit_ofdm bit(phy_type_ofdm_index) -#define phy_type_bit_ht bit(phy_type_ht_index) -#define phy_type_bit_vht bit(phy_type_vht_index) -#define phy_type_bit_he bit(phy_type_he_index) - -#define mt_wtbl_rate_tx_mode genmask(9, 6) -#define mt_wtbl_rate_mcs genmask(5, 0) -#define mt_wtbl_rate_nss genmask(12, 10) -#define mt_wtbl_rate_he_gi genmask(7, 4) -#define mt_wtbl_rate_gi genmask(3, 0) - diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h -enum { - hw_bssid_0 = 0x0, - hw_bssid_1, - hw_bssid_2, - hw_bssid_3, - hw_bssid_max = hw_bssid_3, - ext_bssid_start = 0x10, - ext_bssid_1, - ext_bssid_15 = 0x1f, - ext_bssid_max = ext_bssid_15, - repeater_bssid_start = 0x20, - repeater_bssid_max = 0x3f, -}; - -int mt7921_mcu_set_mac(struct mt7921_dev *dev, int band, bool enable, - bool hdr_trans); -int mt7921_mcu_set_rts_thresh(struct mt7921_phy *phy, u32 val); +int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct rate_info *rate); -static inline bool is_mt7921(struct mt76_dev *dev) -{ - return mt76_chip(dev) == 0x7961; -} - -int -mt7921_mcu_uni_add_dev(struct mt7921_dev *dev, - struct ieee80211_vif *vif, bool enable); -int -mt7921_mcu_uni_add_bss(struct mt7921_phy *phy, struct ieee80211_vif *vif, - bool enable); - -int -mt7921_mcu_uni_add_sta(struct mt7921_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable); + -int mt7921_mcu_set_channel_domain(struct mt7921_phy *phy); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +#include "mcu.h"
Networking
67aa27431c7f871962fccdb70ae1f3883691e958
lorenzo bianconi
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: rely on mt76_connac_mcu module for sched_scan and hw_scan
rely on mt76_connac_mcu module for sched_scan and hw_scan and remove duplicated code
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
5
15
334
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c - wiphy->max_scan_ie_len = mt7921_scan_ie_len; + wiphy->max_scan_ie_len = mt76_connac_scan_ie_len; - wiphy->max_sched_scan_plan_interval = mt7921_max_sched_scan_interval; + wiphy->max_sched_scan_plan_interval = + mt76_connac_max_sched_scan_interval; - wiphy->max_sched_scan_ssids = mt7921_max_sched_scan_ssid; - wiphy->max_match_sets = mt7921_max_scan_match; + wiphy->max_sched_scan_ssids = mt76_connac_max_sched_scan_ssid; + wiphy->max_match_sets = mt76_connac_max_scan_match; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c - err = mt7921_mcu_hw_scan(mphy->priv, vif, req); + err = mt76_connac_mcu_hw_scan(mphy, vif, req); - mt7921_mcu_cancel_hw_scan(mphy->priv, vif); + mt76_connac_mcu_cancel_hw_scan(mphy, vif); - err = mt7921_mcu_sched_scan_req(mphy->priv, vif, req); + err = mt76_connac_mcu_sched_scan_req(mphy, vif, req); - err = mt7921_mcu_sched_scan_enable(mphy->priv, vif, true); + err = mt76_connac_mcu_sched_scan_enable(mphy, vif, true); - err = mt7921_mcu_sched_scan_enable(mphy->priv, vif, false); + err = mt76_connac_mcu_sched_scan_enable(mphy, vif, false); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c -#define mt7921_scan_channel_time 60 -int mt7921_mcu_hw_scan(struct mt7921_phy *phy, struct ieee80211_vif *vif, - struct ieee80211_scan_request *scan_req) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct cfg80211_scan_request *sreq = &scan_req->req; - int n_ssids = 0, err, i, duration = mt7921_scan_channel_time; - int ext_channels_num = max_t(int, sreq->n_channels - 32, 0); - struct ieee80211_channel **scan_list = sreq->channels; - struct mt7921_dev *dev = phy->dev; - struct mt7921_mcu_scan_channel *chan; - struct mt7921_hw_scan_req *req; - struct sk_buff *skb; - - skb = mt76_mcu_msg_alloc(&dev->mt76, null, sizeof(*req)); - if (!skb) - return -enomem; - - set_bit(mt76_hw_scanning, &phy->mt76->state); - mvif->mt76.scan_seq_num = (mvif->mt76.scan_seq_num + 1) & 0x7f; - - req = (struct mt7921_hw_scan_req *)skb_put(skb, sizeof(*req)); - - req->seq_num = mvif->mt76.scan_seq_num; - req->bss_idx = mvif->mt76.idx; - req->scan_type = sreq->n_ssids ? 1 : 0; - req->probe_req_num = sreq->n_ssids ? 2 : 0; - req->version = 1; - - for (i = 0; i < sreq->n_ssids; i++) { - if (!sreq->ssids[i].ssid_len) - continue; - - req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); - memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid, - sreq->ssids[i].ssid_len); - n_ssids++; - } - req->ssid_type = n_ssids ? bit(2) : bit(0); - req->ssid_type_ext = n_ssids ? bit(0) : 0; - req->ssids_num = n_ssids; - - /* increase channel time for passive scan */ - if (!sreq->n_ssids) - duration *= 2; - req->timeout_value = cpu_to_le16(sreq->n_channels * duration); - req->channel_min_dwell_time = cpu_to_le16(duration); - req->channel_dwell_time = cpu_to_le16(duration); - - req->channels_num = min_t(u8, sreq->n_channels, 32); - req->ext_channels_num = min_t(u8, ext_channels_num, 32); - for (i = 0; i < req->channels_num + req->ext_channels_num; i++) { - if (i >= 32) - chan = &req->ext_channels[i - 32]; - else - chan = &req->channels[i]; - - chan->band = scan_list[i]->band == nl80211_band_2ghz ? 1 : 2; - chan->channel_num = scan_list[i]->hw_value; - } - req->channel_type = sreq->n_channels ? 4 : 0; - - if (sreq->ie_len > 0) { - memcpy(req->ies, sreq->ie, sreq->ie_len); - req->ies_len = cpu_to_le16(sreq->ie_len); - } - - memcpy(req->bssid, sreq->bssid, eth_alen); - if (sreq->flags & nl80211_scan_flag_random_addr) { - get_random_mask_addr(req->random_mac, sreq->mac_addr, - sreq->mac_addr_mask); - req->scan_func = 1; - } - - err = mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_cmd_start_hw_scan, - false); - if (err < 0) - clear_bit(mt76_hw_scanning, &phy->mt76->state); - - return err; -} - -int mt7921_mcu_cancel_hw_scan(struct mt7921_phy *phy, - struct ieee80211_vif *vif) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = phy->dev; - struct { - u8 seq_num; - u8 is_ext_channel; - u8 rsv[2]; - } __packed req = { - .seq_num = mvif->mt76.scan_seq_num, - }; - - if (test_and_clear_bit(mt76_hw_scanning, &phy->mt76->state)) { - struct cfg80211_scan_info info = { - .aborted = true, - }; - - ieee80211_scan_completed(phy->mt76->hw, &info); - } - - return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_cancel_hw_scan, &req, - sizeof(req), false); -} - -int mt7921_mcu_sched_scan_req(struct mt7921_phy *phy, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *sreq) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct ieee80211_channel **scan_list = sreq->channels; - struct mt7921_dev *dev = phy->dev; - struct mt7921_mcu_scan_channel *chan; - struct mt7921_sched_scan_req *req; - struct cfg80211_match_set *match; - struct cfg80211_ssid *ssid; - struct sk_buff *skb; - int i; - - skb = mt76_mcu_msg_alloc(&dev->mt76, null, - sizeof(*req) + sreq->ie_len); - if (!skb) - return -enomem; - - mvif->mt76.scan_seq_num = (mvif->mt76.scan_seq_num + 1) & 0x7f; - - req = (struct mt7921_sched_scan_req *)skb_put(skb, sizeof(*req)); - req->version = 1; - req->seq_num = mvif->mt76.scan_seq_num; - - req->ssids_num = sreq->n_ssids; - for (i = 0; i < req->ssids_num; i++) { - ssid = &sreq->ssids[i]; - memcpy(req->ssids[i].ssid, ssid->ssid, ssid->ssid_len); - req->ssids[i].ssid_len = cpu_to_le32(ssid->ssid_len); - } - - req->match_num = sreq->n_match_sets; - for (i = 0; i < req->match_num; i++) { - match = &sreq->match_sets[i]; - memcpy(req->match[i].ssid, match->ssid.ssid, - match->ssid.ssid_len); - req->match[i].rssi_th = cpu_to_le32(match->rssi_thold); - req->match[i].ssid_len = match->ssid.ssid_len; - } - - req->channel_type = sreq->n_channels ? 4 : 0; - req->channels_num = min_t(u8, sreq->n_channels, 64); - for (i = 0; i < req->channels_num; i++) { - chan = &req->channels[i]; - chan->band = scan_list[i]->band == nl80211_band_2ghz ? 1 : 2; - chan->channel_num = scan_list[i]->hw_value; - } - - req->intervals_num = sreq->n_scan_plans; - for (i = 0; i < req->intervals_num; i++) - req->intervals[i] = cpu_to_le16(sreq->scan_plans[i].interval); - - if (sreq->ie_len > 0) { - req->ie_len = cpu_to_le16(sreq->ie_len); - memcpy(skb_put(skb, sreq->ie_len), sreq->ie, sreq->ie_len); - } - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_cmd_sched_scan_req, - false); -} - -int mt7921_mcu_sched_scan_enable(struct mt7921_phy *phy, - struct ieee80211_vif *vif, - bool enable) -{ - struct mt7921_dev *dev = phy->dev; - struct { - u8 active; /* 0: enabled 1: disabled */ - u8 rsv[3]; - } __packed req = { - .active = !enable, - }; - - if (enable) - set_bit(mt76_hw_sched_scanning, &phy->mt76->state); - else - clear_bit(mt76_hw_sched_scanning, &phy->mt76->state); - - return mt76_mcu_send_msg(&dev->mt76, mcu_cmd_sched_scan_enable, &req, - sizeof(req), false); -} - -u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u16 wlan_idx) +u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx) - mt7921_mcu_sched_scan_req(phy, vif, wowlan->nd_config); + mt76_connac_mcu_sched_scan_req(&dev->mphy, vif, + wowlan->nd_config); - mt7921_mcu_sched_scan_enable(phy, vif, suspend); + mt76_connac_mcu_sched_scan_enable(&dev->mphy, vif, suspend); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h -struct mt7921_mcu_scan_ssid { - __le32 ssid_len; - u8 ssid[ieee80211_max_ssid_len]; -} __packed; - -struct mt7921_mcu_scan_channel { - u8 band; /* 1: 2.4ghz - * 2: 5.0ghz - * others: reserved - */ - u8 channel_num; -} __packed; - -struct mt7921_mcu_scan_match { - __le32 rssi_th; - u8 ssid[ieee80211_max_ssid_len]; - u8 ssid_len; - u8 rsv[3]; -} __packed; - -struct mt7921_hw_scan_req { - u8 seq_num; - u8 bss_idx; - u8 scan_type; /* 0: passive scan - * 1: active scan - */ - u8 ssid_type; /* bit(0) wildcard ssid - * bit(1) p2p wildcard ssid - * bit(2) specified ssid + wildcard ssid - * bit(2) + ssid_type_ext bit(0) specified ssid only - */ - u8 ssids_num; - u8 probe_req_num; /* number of probe request for each ssid */ - u8 scan_func; /* bit(0) enable random mac scan - * bit(1) disable dbdc scan type 1~3. - * bit(2) use dbdc scan type 3 (dedicated one rf to scan). - */ - u8 version; /* 0: not support fields after ies. - * 1: support fields after ies. - */ - struct mt7921_mcu_scan_ssid ssids[4]; - __le16 probe_delay_time; - __le16 channel_dwell_time; /* channel dwell interval */ - __le16 timeout_value; - u8 channel_type; /* 0: full channels - * 1: only 2.4ghz channels - * 2: only 5ghz channels - * 3: p2p social channel only (channel #1, #6 and #11) - * 4: specified channels - * others: reserved - */ - u8 channels_num; /* valid when channel_type is 4 */ - /* valid when channels_num is set */ - struct mt7921_mcu_scan_channel channels[32]; - __le16 ies_len; - u8 ies[mt7921_scan_ie_len]; - /* following fields are valid if version > 0 */ - u8 ext_channels_num; - u8 ext_ssids_num; - __le16 channel_min_dwell_time; - struct mt7921_mcu_scan_channel ext_channels[32]; - struct mt7921_mcu_scan_ssid ext_ssids[6]; - u8 bssid[eth_alen]; - u8 random_mac[eth_alen]; /* valid when bit(1) in scan_func is set. */ - u8 pad[63]; - u8 ssid_type_ext; -} __packed; - -#define scan_done_event_max_channel_num 64 -struct mt7921_hw_scan_done { - u8 seq_num; - u8 sparse_channel_num; - struct mt7921_mcu_scan_channel sparse_channel; - u8 complete_channel_num; - u8 current_state; - u8 version; - u8 pad; - __le32 beacon_scan_num; - u8 pno_enabled; - u8 pad2[3]; - u8 sparse_channel_valid_num; - u8 pad3[3]; - u8 channel_num[scan_done_event_max_channel_num]; - /* idle format for channel_idle_time - * 0: first bytes: idle time(ms) 2nd byte: dwell time(ms) - * 1: first bytes: idle time(8ms) 2nd byte: dwell time(8ms) - * 2: dwell time (16us) - */ - __le16 channel_idle_time[scan_done_event_max_channel_num]; - /* beacon and probe response count */ - u8 beacon_probe_num[scan_done_event_max_channel_num]; - u8 mdrdy_count[scan_done_event_max_channel_num]; - __le32 beacon_2g_num; - __le32 beacon_5g_num; -} __packed; - -struct mt7921_sched_scan_req { - u8 version; - u8 seq_num; - u8 stop_on_match; - u8 ssids_num; - u8 match_num; - u8 pad; - __le16 ie_len; - struct mt7921_mcu_scan_ssid ssids[mt7921_max_sched_scan_ssid]; - struct mt7921_mcu_scan_match match[mt7921_max_scan_match]; - u8 channel_type; - u8 channels_num; - u8 intervals_num; - u8 scan_func; - struct mt7921_mcu_scan_channel channels[64]; - __le16 intervals[mt7921_max_sched_scan_interval]; - u8 bss_idx; - u8 pad2[64]; -} __packed; - diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h -#define mt7921_scan_ie_len 600 -#define mt7921_max_sched_scan_interval 10 -#define mt7921_max_sched_scan_ssid 10 -#define mt7921_max_scan_match 16 - -int mt7921_mcu_hw_scan(struct mt7921_phy *phy, struct ieee80211_vif *vif, - struct ieee80211_scan_request *scan_req); -int mt7921_mcu_sched_scan_req(struct mt7921_phy *phy, - struct ieee80211_vif *vif, - struct cfg80211_sched_scan_request *sreq); -int mt7921_mcu_sched_scan_enable(struct mt7921_phy *phy, - struct ieee80211_vif *vif, - bool enable); -int mt7921_mcu_cancel_hw_scan(struct mt7921_phy *phy, - struct ieee80211_vif *vif); -u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u16 wlan_idx); +u32 mt7921_get_wtbl_info(struct mt7921_dev *dev, u32 wlan_idx);
Networking
80fc1e37c0eb0115c980a5bbc011724fa41bfdb3
lorenzo bianconi
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: rely on mt76_connac_mcu module for suspend and wow support
rely on mt76_connac_mcu module for suspend and wow support and remove duplicated code
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
5
11
412
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c - mt7921_mcu_set_suspend_iter, phy); + mt76_connac_mcu_set_suspend_iter, + &dev->mphy); - err = mt7921_mcu_set_hif_suspend(dev, true); + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, true); - err = mt7921_mcu_set_hif_suspend(dev, false); + err = mt76_connac_mcu_set_hif_suspend(&dev->mt76, false); - mt7921_mcu_set_suspend_iter, phy); + mt76_connac_mcu_set_suspend_iter, + &dev->mphy); - mt7921_mcu_update_gtk_rekey(hw, vif, data); + mt76_connac_mcu_update_gtk_rekey(hw, vif, data); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c -static const struct wiphy_wowlan_support mt7921_wowlan_support = { - .flags = wiphy_wowlan_magic_pkt | wiphy_wowlan_disconnect | - wiphy_wowlan_supports_gtk_rekey | wiphy_wowlan_net_detect, - .n_patterns = 1, - .pattern_min_len = 1, - .pattern_max_len = mt7921_wow_patten_max_len, - .max_nd_match_sets = 10, -}; - - dev->mt76.hw->wiphy->wowlan = &mt7921_wowlan_support; + dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; - -#ifdef config_pm -int mt7921_mcu_set_hif_suspend(struct mt7921_dev *dev, bool suspend) -{ - struct { - struct { - u8 hif_type; /* 0x0: hif_sdio - * 0x1: hif_usb - * 0x2: hif_pcie - */ - u8 pad[3]; - } __packed hdr; - struct hif_suspend_tlv { - __le16 tag; - __le16 len; - u8 suspend; - } __packed hif_suspend; - } req = { - .hif_suspend = { - .tag = cpu_to_le16(0), /* 0: uni_hif_ctrl_basic */ - .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)), - .suspend = suspend, - }, - }; - - if (mt76_is_mmio(&dev->mt76)) - req.hdr.hif_type = 2; - else if (mt76_is_usb(&dev->mt76)) - req.hdr.hif_type = 1; - else if (mt76_is_sdio(&dev->mt76)) - req.hdr.hif_type = 0; - - return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_hif_ctrl, &req, - sizeof(req), true); -} -export_symbol_gpl(mt7921_mcu_set_hif_suspend); - -static int -mt7921_mcu_set_wow_ctrl(struct mt7921_phy *phy, struct ieee80211_vif *vif, - bool suspend, struct cfg80211_wowlan *wowlan) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = phy->dev; - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct mt7921_wow_ctrl_tlv wow_ctrl_tlv; - struct mt7921_wow_gpio_param_tlv gpio_tlv; - } req = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .wow_ctrl_tlv = { - .tag = cpu_to_le16(uni_suspend_wow_ctrl), - .len = cpu_to_le16(sizeof(struct mt7921_wow_ctrl_tlv)), - .cmd = suspend ? 1 : 2, - }, - .gpio_tlv = { - .tag = cpu_to_le16(uni_suspend_wow_gpio_param), - .len = cpu_to_le16(sizeof(struct mt7921_wow_gpio_param_tlv)), - .gpio_pin = 0xff, /* follow fw about gpio pin */ - }, - }; - - if (wowlan->magic_pkt) - req.wow_ctrl_tlv.trigger |= bit(0); - if (wowlan->disconnect) - req.wow_ctrl_tlv.trigger |= bit(2); - if (wowlan->nd_config) { - mt76_connac_mcu_sched_scan_req(&dev->mphy, vif, - wowlan->nd_config); - req.wow_ctrl_tlv.trigger |= bit(5); - mt76_connac_mcu_sched_scan_enable(&dev->mphy, vif, suspend); - } - - if (mt76_is_mmio(&dev->mt76)) - req.wow_ctrl_tlv.wakeup_hif = wow_pcie; - else if (mt76_is_usb(&dev->mt76)) - req.wow_ctrl_tlv.wakeup_hif = wow_usb; - else if (mt76_is_sdio(&dev->mt76)) - req.wow_ctrl_tlv.wakeup_hif = wow_gpio; - - return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_suspend, &req, - sizeof(req), true); -} - -static int -mt7921_mcu_set_wow_pattern(struct mt7921_dev *dev, - struct ieee80211_vif *vif, - u8 index, bool enable, - struct cfg80211_pkt_pattern *pattern) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_wow_pattern_tlv *ptlv; - struct sk_buff *skb; - struct req_hdr { - u8 bss_idx; - u8 pad[3]; - } __packed hdr = { - .bss_idx = mvif->mt76.idx, - }; - - skb = mt76_mcu_msg_alloc(&dev->mt76, null, - sizeof(hdr) + sizeof(*ptlv)); - if (!skb) - return -enomem; - - skb_put_data(skb, &hdr, sizeof(hdr)); - ptlv = (struct mt7921_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv)); - ptlv->tag = cpu_to_le16(uni_suspend_wow_pattern); - ptlv->len = cpu_to_le16(sizeof(*ptlv)); - ptlv->data_len = pattern->pattern_len; - ptlv->enable = enable; - ptlv->index = index; - - memcpy(ptlv->pattern, pattern->pattern, pattern->pattern_len); - memcpy(ptlv->mask, pattern->mask, pattern->pattern_len / 8); - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_uni_cmd_suspend, - true); -} - -static int -mt7921_mcu_set_suspend_mode(struct mt7921_dev *dev, - struct ieee80211_vif *vif, - bool enable, u8 mdtim, bool wow_suspend) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct mt7921_suspend_tlv suspend_tlv; - } req = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .suspend_tlv = { - .tag = cpu_to_le16(uni_suspend_mode_setting), - .len = cpu_to_le16(sizeof(struct mt7921_suspend_tlv)), - .enable = enable, - .mdtim = mdtim, - .wow_suspend = wow_suspend, - }, - }; - - return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_suspend, &req, - sizeof(req), true); -} - -static int -mt7921_mcu_set_gtk_rekey(struct mt7921_dev *dev, - struct ieee80211_vif *vif, - bool suspend) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct mt7921_gtk_rekey_tlv gtk_tlv; - } __packed req = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .gtk_tlv = { - .tag = cpu_to_le16(uni_offload_offload_gtk_rekey), - .len = cpu_to_le16(sizeof(struct mt7921_gtk_rekey_tlv)), - .rekey_mode = !suspend, - }, - }; - - return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_offload, &req, - sizeof(req), true); -} - -static int -mt7921_mcu_set_arp_filter(struct mt7921_dev *dev, struct ieee80211_vif *vif, - bool suspend) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct mt7921_arpns_tlv arpns; - } req = { - .hdr = { - .bss_idx = mvif->mt76.idx, - }, - .arpns = { - .tag = cpu_to_le16(uni_offload_offload_arp), - .len = cpu_to_le16(sizeof(struct mt7921_arpns_tlv)), - .mode = suspend, - }, - }; - - return mt76_mcu_send_msg(&dev->mt76, mcu_uni_cmd_offload, &req, - sizeof(req), true); -} - -void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, - struct ieee80211_vif *vif) -{ - struct mt7921_phy *phy = priv; - bool suspend = test_bit(mt76_state_suspend, &phy->mt76->state); - struct ieee80211_hw *hw = phy->mt76->hw; - struct cfg80211_wowlan *wowlan = hw->wiphy->wowlan_config; - int i; - - mt7921_mcu_set_gtk_rekey(phy->dev, vif, suspend); - mt7921_mcu_set_arp_filter(phy->dev, vif, suspend); - - mt7921_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true); - - for (i = 0; i < wowlan->n_patterns; i++) - mt7921_mcu_set_wow_pattern(phy->dev, vif, i, suspend, - &wowlan->patterns[i]); - mt7921_mcu_set_wow_ctrl(phy, vif, suspend, wowlan); -} - -static void -mt7921_mcu_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, struct ieee80211_key_conf *key, - void *data) -{ - struct mt7921_gtk_rekey_tlv *gtk_tlv = data; - u32 cipher; - - if (key->cipher != wlan_cipher_suite_aes_cmac && - key->cipher != wlan_cipher_suite_ccmp && - key->cipher != wlan_cipher_suite_tkip) - return; - - if (key->cipher == wlan_cipher_suite_tkip) { - gtk_tlv->proto = cpu_to_le32(nl80211_wpa_version_1); - cipher = bit(3); - } else { - gtk_tlv->proto = cpu_to_le32(nl80211_wpa_version_2); - cipher = bit(4); - } - - /* we are assuming here to have a single pairwise key */ - if (key->flags & ieee80211_key_flag_pairwise) { - gtk_tlv->pairwise_cipher = cpu_to_le32(cipher); - gtk_tlv->group_cipher = cpu_to_le32(cipher); - gtk_tlv->keyid = key->keyidx; - } -} - -int mt7921_mcu_update_gtk_rekey(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *key) -{ - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; - struct mt7921_dev *dev = mt7921_hw_dev(hw); - struct mt7921_gtk_rekey_tlv *gtk_tlv; - struct sk_buff *skb; - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr = { - .bss_idx = mvif->mt76.idx, - }; - - skb = mt76_mcu_msg_alloc(&dev->mt76, null, - sizeof(hdr) + sizeof(*gtk_tlv)); - if (!skb) - return -enomem; - - skb_put_data(skb, &hdr, sizeof(hdr)); - gtk_tlv = (struct mt7921_gtk_rekey_tlv *)skb_put(skb, - sizeof(*gtk_tlv)); - gtk_tlv->tag = cpu_to_le16(uni_offload_offload_gtk_rekey); - gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv)); - gtk_tlv->rekey_mode = 2; - gtk_tlv->option = 1; - - rcu_read_lock(); - ieee80211_iter_keys_rcu(hw, vif, mt7921_mcu_key_iter, gtk_tlv); - rcu_read_unlock(); - - memcpy(gtk_tlv->kek, key->kek, nl80211_kek_len); - memcpy(gtk_tlv->kck, key->kck, nl80211_kck_len); - memcpy(gtk_tlv->replay_ctr, key->replay_ctr, nl80211_replay_ctr_len); - - return mt76_mcu_skb_send_msg(&dev->mt76, skb, mcu_uni_cmd_offload, - true); -} -#endif /* config_pm */ diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h -struct mt7921_wow_ctrl_tlv { - __le16 tag; - __le16 len; - u8 cmd; /* 0x1: pm_wowlan_req_start - * 0x2: pm_wowlan_req_stop - * 0x3: pm_wowlan_param_clear - */ - u8 trigger; /* 0: none - * bit(0): nl80211_wowlan_trig_magic_pkt - * bit(1): nl80211_wowlan_trig_any - * bit(2): nl80211_wowlan_trig_disconnect - * bit(3): nl80211_wowlan_trig_gtk_rekey_failure - * bit(4): beacon_lost - * bit(5): nl80211_wowlan_trig_net_detect - */ - u8 wakeup_hif; /* 0x0: hif_sdio - * 0x1: hif_usb - * 0x2: hif_pcie - * 0x3: hif_gpio - */ - u8 pad; - u8 rsv[4]; -} __packed; - -struct mt7921_wow_gpio_param_tlv { - __le16 tag; - __le16 len; - u8 gpio_pin; - u8 trigger_lvl; - u8 pad[2]; - __le32 gpio_interval; - u8 rsv[4]; -} __packed; - -#define mt7921_wow_mask_max_len 16 -#define mt7921_wow_patten_max_len 128 -struct mt7921_wow_pattern_tlv { - __le16 tag; - __le16 len; - u8 index; /* pattern index */ - u8 enable; /* 0: disable - * 1: enable - */ - u8 data_len; /* pattern length */ - u8 pad; - u8 mask[mt7921_wow_mask_max_len]; - u8 pattern[mt7921_wow_patten_max_len]; - u8 rsv[4]; -} __packed; - -struct mt7921_suspend_tlv { - __le16 tag; - __le16 len; - u8 enable; /* 0: suspend mode disabled - * 1: suspend mode enabled - */ - u8 mdtim; /* lp parameter */ - u8 wow_suspend; /* 0: update by origin policy - * 1: update by wow dtim - */ - u8 pad[5]; -} __packed; - -struct mt7921_gtk_rekey_tlv { - __le16 tag; - __le16 len; - u8 kek[nl80211_kek_len]; - u8 kck[nl80211_kck_len]; - u8 replay_ctr[nl80211_replay_ctr_len]; - u8 rekey_mode; /* 0: rekey offload enable - * 1: rekey offload disable - * 2: rekey update - */ - u8 keyid; - u8 pad[2]; - __le32 proto; /* wpa-rsn-wapi-opsn */ - __le32 pairwise_cipher; - __le32 group_cipher; - __le32 key_mgmt; /* none-psk-ieee802.1x */ - __le32 mgmt_group_cipher; - u8 option; /* 1: rekey data update without enabling offload */ - u8 reserverd[3]; -} __packed; - -struct mt7921_arpns_tlv { - __le16 tag; - __le16 len; - u8 mode; - u8 ips_num; - u8 option; - u8 pad[1]; -} __packed; - diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h -int mt7921_mcu_set_hif_suspend(struct mt7921_dev *dev, bool suspend); -void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, - struct ieee80211_vif *vif); -int mt7921_mcu_update_gtk_rekey(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *key); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c - err = mt7921_mcu_set_hif_suspend(dev, true); + err = mt76_connac_mcu_set_hif_suspend(mdev, true); - mt7921_mcu_set_hif_suspend(dev, false); + mt76_connac_mcu_set_hif_suspend(mdev, false); - err = mt7921_mcu_set_hif_suspend(dev, false); + err = mt76_connac_mcu_set_hif_suspend(mdev, false);
Networking
022159b0e13fba711aabe549e6b3631b1d33dc66
lorenzo bianconi
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: introduce runtime pm support
introduce runtime pm to mt7921 driver
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'c']
8
372
54
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +static int +mt7921_pm_set(void *data, u64 val) +{ + struct mt7921_dev *dev = data; + struct mt76_phy *mphy = dev->phy.mt76; + int ret = 0; + + mt7921_mutex_acquire(dev); + + dev->pm.enable = val; + + ieee80211_iterate_active_interfaces(mphy->hw, + ieee80211_iface_iter_resume_all, + mt7921_pm_interface_iter, mphy->priv); + mt7921_mutex_release(dev); + + return ret; +} + +static int +mt7921_pm_get(void *data, u64 *val) +{ + struct mt7921_dev *dev = data; + + *val = dev->pm.enable; + + return 0; +} + +define_debugfs_attribute(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld "); + +static int +mt7921_pm_idle_timeout_set(void *data, u64 val) +{ + struct mt7921_dev *dev = data; + + dev->pm.idle_timeout = msecs_to_jiffies(val); + + return 0; +} + +static int +mt7921_pm_idle_timeout_get(void *data, u64 *val) +{ + struct mt7921_dev *dev = data; + + *val = jiffies_to_msecs(dev->pm.idle_timeout); + + return 0; +} + +define_debugfs_attribute(fops_pm_idle_timeout, mt7921_pm_idle_timeout_get, + mt7921_pm_idle_timeout_set, "%lld "); + + debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); + debugfs_create_file("idle-timeout", 0600, dir, dev, + &fops_pm_idle_timeout); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c + + init_delayed_work(&dev->pm.ps_work, mt7921_pm_power_save_work); + init_work(&dev->pm.wake_work, mt7921_pm_wake_work); + init_completion(&dev->pm.wake_cmpl); + spin_lock_init(&dev->pm.txq_lock); + set_bit(mt76_state_pm, &dev->mphy.state); + dev->pm.idle_timeout = mt7921_pm_timeout; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c - mt7921_mac_sta_poll(dev); - - mt76_worker_schedule(&dev->mt76.tx_worker); - + + if (test_bit(mt76_state_pm, &dev->phy.mt76->state)) + return; + + mt7921_mac_sta_poll(dev); + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); + + mt76_worker_schedule(&dev->mt76.tx_worker); + if (mt76_connac_pm_wake(&dev->mphy, &dev->pm)) + return; + + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&mphy->dev->mutex); + if (test_bit(mt76_state_pm, &mphy->state)) + goto out; + + mt7921_mutex_acquire(phy->dev); - mutex_unlock(&mphy->dev->mutex); + mt7921_mutex_release(phy->dev); - ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, +out: + ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work, + +void mt7921_pm_wake_work(struct work_struct *work) +{ + struct mt7921_dev *dev; + struct mt76_phy *mphy; + + dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, + pm.wake_work); + mphy = dev->phy.mt76; + + if (!mt7921_mcu_drv_pmctrl(dev)) + mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); + else + dev_err(mphy->dev->dev, "failed to wake device "); + + ieee80211_wake_queues(mphy->hw); + complete_all(&dev->pm.wake_cmpl); +} + +void mt7921_pm_power_save_work(struct work_struct *work) +{ + struct mt7921_dev *dev; + unsigned long delta; + + dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, + pm.ps_work.work); + + delta = dev->pm.idle_timeout; + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { + delta = dev->pm.last_activity + delta - jiffies; + goto out; + } + + if (!mt7921_mcu_fw_pmctrl(dev)) + return; +out: + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); +} + +int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy, + struct ieee80211_vif *vif, + bool enable) +{ + struct mt7921_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + int err; + + if (!dev->pm.enable) + return -eopnotsupp; + + err = mt7921_mcu_set_bss_pm(dev, vif, enable); + if (err) + return err; + + if (enable) { + vif->driver_flags |= ieee80211_vif_beacon_filter; + mt76_set(dev, mt_wf_rfcr(ext_phy), + mt_wf_rfcr_drop_other_beacon); + } else { + vif->driver_flags &= ~ieee80211_vif_beacon_filter; + mt76_clear(dev, mt_wf_rfcr(ext_phy), + mt_wf_rfcr_drop_other_beacon); + } + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + cancel_delayed_work_sync(&dev->pm.ps_work); + cancel_work_sync(&dev->pm.wake_work); + mt76_connac_free_pending_tx_skbs(&dev->pm, null); + + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); + if (dev->pm.enable) { + ret = mt7921_mcu_set_bss_pm(dev, vif, true); + if (ret) + goto out; + + vif->driver_flags |= ieee80211_vif_beacon_filter; + mt76_set(dev, mt_wf_rfcr(0), mt_wf_rfcr_drop_other_beacon); + } + - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + + if (dev->pm.enable) { + mt7921_mcu_set_bss_pm(dev, vif, false); + mt76_clear(dev, mt_wf_rfcr(0), + mt_wf_rfcr_drop_other_beacon); + } + - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); + ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); + if (ret) + return ret; + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); + + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + mt76_connac_pm_wake(&dev->mphy, &dev->pm); + + + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); +} + +static void +mt7921_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +{ + struct mt7921_dev *dev = mt7921_hw_dev(hw); + struct mt7921_phy *phy = mt7921_hw_phy(hw); + struct mt76_phy *mphy = phy->mt76; + + if (!test_bit(mt76_state_running, &mphy->state)) + return; + + if (test_bit(mt76_state_pm, &mphy->state)) { + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return; + } + + dev->pm.last_activity = jiffies; + mt76_worker_schedule(&dev->mt76.tx_worker); + int qid; - mt76_tx(mphy, control->sta, wcid, skb); + if (!test_bit(mt76_state_pm, &mphy->state)) { + dev->pm.last_activity = jiffies; + mt76_tx(mphy, control->sta, wcid, skb); + return; + } + + qid = skb_get_queue_mapping(skb); + if (qid >= mt_txq_psd) { + qid = ieee80211_ac_be; + skb_set_queue_mapping(skb, qid); + } + + mt76_connac_pm_queue_skb(hw, &dev->pm, wcid, skb); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + cancel_delayed_work_sync(&dev->pm.ps_work); + mt76_connac_free_pending_tx_skbs(&dev->pm, null); + + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + + mt7921_mutex_release(dev); - mutex_lock(&dev->mt76.mutex); + mt7921_mutex_acquire(dev); - mutex_unlock(&dev->mt76.mutex); + mt7921_mutex_release(dev); - .wake_tx_queue = mt76_wake_tx_queue, + .wake_tx_queue = mt7921_wake_tx_queue, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c + clear_bit(mt76_state_pm, &dev->mphy.state); + + +int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + int i; + + if (!test_and_clear_bit(mt76_state_pm, &mphy->state)) + goto out; + + for (i = 0; i < mt7921_drv_own_retry_count; i++) { + mt76_wr(dev, mt_conn_on_lpctl, pcie_lpcr_host_clr_own); + if (mt76_poll_msec(dev, mt_conn_on_lpctl, + pcie_lpcr_host_own_sync, 0, 50)) + break; + } + + if (i == mt7921_drv_own_retry_count) { + dev_err(dev->mt76.dev, "driver own failed "); + return -eio; + } + +out: + dev->pm.last_activity = jiffies; + + return 0; +} + +int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) +{ + struct mt76_phy *mphy = &dev->mt76.phy; + int i; + + if (test_and_set_bit(mt76_state_pm, &mphy->state)) + return 0; + + for (i = 0; i < mt7921_drv_own_retry_count; i++) { + mt76_wr(dev, mt_conn_on_lpctl, pcie_lpcr_host_set_own); + if (mt76_poll_msec(dev, mt_conn_on_lpctl, + pcie_lpcr_host_own_sync, 4, 50)) + break; + } + + if (i == mt7921_drv_own_retry_count) { + dev_err(dev->mt76.dev, "firmware own failed "); + return -eio; + } + + return 0; +} + +void +mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt7921_phy *phy = priv; + struct mt7921_dev *dev = phy->dev; + + if (mt7921_mcu_set_bss_pm(dev, vif, dev->pm.enable)) + return; + + if (dev->pm.enable) { + vif->driver_flags |= ieee80211_vif_beacon_filter; + mt76_set(dev, mt_wf_rfcr(0), mt_wf_rfcr_drop_other_beacon); + } else { + vif->driver_flags &= ~ieee80211_vif_beacon_filter; + mt76_clear(dev, mt_wf_rfcr(0), mt_wf_rfcr_drop_other_beacon); + } +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h -#include "../mt76.h" +#include "../mt76_connac.h" +#define mt7921_pm_timeout (hz / 12) +#define mt7921_drv_own_retry_count 10 + + + struct mt76_connac_pm pm; +#define mt7921_mutex_acquire(dev) \ + mt76_connac_mutex_acquire(&(dev)->mt76, &(dev)->pm) +#define mt7921_mutex_release(dev) \ + mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm) + +int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); +int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev); +void mt7921_pm_wake_work(struct work_struct *work); +void mt7921_pm_power_save_work(struct work_struct *work); +bool mt7921_wait_for_mcu_init(struct mt7921_dev *dev); +int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy, + struct ieee80211_vif *vif, + bool enable); +void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c + err = mt76_connac_pm_wake(&dev->mphy, &dev->pm); + if (err < 0) + return err; + + err = mt7921_mcu_drv_pmctrl(dev); + if (err) + goto restore; + + err = mt7921_mcu_fw_pmctrl(dev); + if (err < 0) + return err; + diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h +#define mt_conn_on_lpctl 0x7c060010 +#define pcie_lpcr_host_own_sync bit(2) +#define pcie_lpcr_host_clr_own bit(1) +#define pcie_lpcr_host_set_own bit(0) +
Networking
1d8efc741df80be940e1584b5ac613dc03d58bd6
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: introduce regdomain notifier support
register regdomain notifier to determine the channel domain the hw scan should rely on.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['c']
1
16
0
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c +static void +mt7921_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt7921_dev *dev = mt7921_hw_dev(hw); + + memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); + dev->mt76.region = request->dfs_region; + + mt7921_mutex_acquire(dev); + mt76_connac_mcu_set_channel_domain(hw->priv); + mt7921_mutex_release(dev); +} + + wiphy->reg_notifier = mt7921_regd_notifier;
Networking
2c25f4e4cdc924c82385b83b09476db6a6fcd4f4
lorenzo bianconi
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: enable msi interrupts
enable msi interrupts for mt7921 driver
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['c']
1
1
1
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c - ret = pci_alloc_irq_vectors(pdev, 1, 1, pci_irq_legacy); + ret = pci_alloc_irq_vectors(pdev, 1, 1, pci_irq_all_types);
Networking
eaafabd2850d782366ca0558d432857d5e3d472a
lorenzo bianconi
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7921: add coredump support
introduce coredump support to mt7921 driver.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['kconfig', 'h', 'c']
10
117
1
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h --- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h +#define mt76_connac_coredump_timeout (hz / 20) +#define mt76_connac_coredump_sz (128 * 1024) + +struct mt76_connac_coredump { + struct sk_buff_head msg_list; + struct delayed_work work; + unsigned long last_activity; +}; + diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +int mt76_connac_mcu_chip_config(struct mt76_dev *dev) +{ + struct { + __le16 id; + u8 type; + u8 resp_type; + __le16 data_size; + __le16 resv; + u8 data[320]; + } req = { + .resp_type = 0, + }; + + memcpy(req.data, "assert", 7); + + return mt76_mcu_send_msg(dev, mcu_cmd_chip_config, &req, sizeof(req), + false); +} +export_symbol_gpl(mt76_connac_mcu_chip_config); + +void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, + struct mt76_connac_coredump *coredump) +{ + spin_lock_bh(&dev->lock); + __skb_queue_tail(&coredump->msg_list, skb); + spin_unlock_bh(&dev->lock); + + coredump->last_activity = jiffies; + + queue_delayed_work(dev->wq, &coredump->work, + mt76_connac_coredump_timeout); +} +export_symbol_gpl(mt76_connac_mcu_coredump_event); + diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h + mcu_cmd_chip_config = mcu_ce_prefix | 0xca, +int mt76_connac_mcu_chip_config(struct mt76_dev *dev); +void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, + struct mt76_connac_coredump *coredump); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/kconfig --- a/drivers/net/wireless/mediatek/mt76/mt7921/kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7921/kconfig + select want_dev_coredump diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c +static int mt7921_config(void *data, u64 val) +{ + struct mt7921_dev *dev = data; + int ret; + + mt7921_mutex_acquire(dev); + ret = mt76_connac_mcu_chip_config(&dev->mt76); + mt7921_mutex_release(dev); + + return ret; +} + +define_debugfs_attribute(fops_config, null, mt7921_config, "%lld "); + + debugfs_create_file("chip_config", 0600, dir, dev, &fops_config); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c + init_delayed_work(&dev->coredump.work, mt7921_coredump_work); + skb_queue_head_init(&dev->coredump.msg_list); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +#include <linux/devcoredump.h> +#include "mcu.h" + +void mt7921_coredump_work(struct work_struct *work) +{ + struct mt7921_dev *dev; + char *dump, *data; + + dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev, + coredump.work.work); + + if (time_is_after_jiffies(dev->coredump.last_activity + + 4 * mt76_connac_coredump_timeout)) { + queue_delayed_work(dev->mt76.wq, &dev->coredump.work, + mt76_connac_coredump_timeout); + return; + } + + dump = vzalloc(mt76_connac_coredump_sz); + data = dump; + + while (true) { + struct sk_buff *skb; + + spin_lock_bh(&dev->mt76.lock); + skb = __skb_dequeue(&dev->coredump.msg_list); + spin_unlock_bh(&dev->mt76.lock); + + if (!skb) + break; + + skb_pull(skb, sizeof(struct mt7921_mcu_rxd)); + if (data + skb->len - dump > mt76_connac_coredump_sz) + break; + + memcpy(data, skb->data, skb->len); + data += skb->len; + + dev_kfree_skb(skb); + } + dev_coredumpv(dev->mt76.dev, dump, mt76_connac_coredump_sz, + gfp_kernel); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c + case mcu_event_coredump: + mt76_connac_mcu_coredump_event(&dev->mt76, skb, + &dev->coredump); + return; + rxd->eid == mcu_event_coredump || diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h + mcu_event_coredump = 0xf0, diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h --- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h -#include "../mt76_connac.h" +#include "../mt76_connac_mcu.h" + struct mt76_connac_coredump coredump; +void mt7921_coredump_work(struct work_struct *work);
Networking
0da3c795d07bf005d4b0be8d6cdc4714aa51a988
sean wang
drivers
net
mediatek, mt76, mt7921, wireless
mt76: mt7663: introduce coredump support
similar to mt7921 devices, introduce coredump support for mt7663 chipset
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
introduce mt7921e support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mt76 ']
['h', 'kconfig', 'c']
7
72
1
--- diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/kconfig --- a/drivers/net/wireless/mediatek/mt76/mt7615/kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7615/kconfig + select want_dev_coredump diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +static int mt7615_config(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + int ret; + + mt7615_mutex_acquire(dev); + ret = mt76_connac_mcu_chip_config(&dev->mt76); + mt7615_mutex_release(dev); + + return ret; +} + +define_debugfs_attribute(fops_config, null, mt7615_config, "%lld "); + + if (is_mt7663(&dev->mt76)) + debugfs_create_file("chip_config", 0600, dir, dev, + &fops_config); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c + init_delayed_work(&dev->coredump.work, mt7615_coredump_work); + skb_queue_head_init(&dev->coredump.msg_list); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +#include <linux/devcoredump.h> +#include "mcu.h" + +void mt7615_coredump_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + char *dump, *data; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + coredump.work.work); + + if (time_is_after_jiffies(dev->coredump.last_activity + + 4 * mt76_connac_coredump_timeout)) { + queue_delayed_work(dev->mt76.wq, &dev->coredump.work, + mt76_connac_coredump_timeout); + return; + } + + dump = vzalloc(mt76_connac_coredump_sz); + data = dump; + + while (true) { + struct sk_buff *skb; + + spin_lock_bh(&dev->mt76.lock); + skb = __skb_dequeue(&dev->coredump.msg_list); + spin_unlock_bh(&dev->mt76.lock); + + if (!skb) + break; + + skb_pull(skb, sizeof(struct mt7615_mcu_rxd)); + if (data + skb->len - dump > mt76_connac_coredump_sz) + break; + + memcpy(data, skb->data, skb->len); + data += skb->len; + + dev_kfree_skb(skb); + } + dev_coredumpv(dev->mt76.dev, dump, mt76_connac_coredump_sz, + gfp_kernel); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c + case mcu_event_coredump: + mt76_connac_mcu_coredump_event(&dev->mt76, skb, + &dev->coredump); + return; + rxd->eid == mcu_event_coredump || diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h + mcu_event_coredump = 0xf0, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h -#include "../mt76_connac.h" +#include "../mt76_connac_mcu.h" + struct mt76_connac_coredump coredump; +void mt7615_coredump_work(struct work_struct *work);
Networking
d2bf7959d9c0f631ef860edaf834d55773fdedff
lorenzo bianconi
drivers
net
mediatek, mt76, mt7615, wireless
net: mvneta: implement mqprio support
implement a basic mqprio support, inserting rules in rx that translate the tc to prio mapping into vlan prio to queues.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
implement mqprio support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvneta']
['c']
1
61
0
--- diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c +#define mvneta_vlan_prio_to_rxq 0x2440 +#define mvneta_vlan_prio_rxq_map(prio, rxq) ((rxq) << ((prio) * 3)) + u8 prio_tc_map[8]; +static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) +{ + mvreg_write(pp, mvneta_vlan_prio_to_rxq, 0); +} + +static void mvneta_setup_rx_prio_map(struct mvneta_port *pp) +{ + u32 val = 0; + int i; + + for (i = 0; i < rxq_number; i++) + val |= mvneta_vlan_prio_rxq_map(i, pp->prio_tc_map[i]); + + mvreg_write(pp, mvneta_vlan_prio_to_rxq, val); +} + +static int mvneta_setup_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *qopt) +{ + struct mvneta_port *pp = netdev_priv(dev); + u8 num_tc; + int i; + + qopt->hw = tc_mqprio_hw_offload_tcs; + num_tc = qopt->num_tc; + + if (num_tc > rxq_number) + return -einval; + + if (!num_tc) { + mvneta_clear_rx_prio_map(pp); + netdev_reset_tc(dev); + return 0; + } + + memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map)); + + mvneta_setup_rx_prio_map(pp); + + netdev_set_num_tc(dev, qopt->num_tc); + for (i = 0; i < qopt->num_tc; i++) + netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]); + + return 0; +} + +static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case tc_setup_qdisc_mqprio: + return mvneta_setup_mqprio(dev, type_data); + default: + return -eopnotsupp; + } +} + + .ndo_setup_tc = mvneta_setup_tc,
Networking
4906887a8ae5f1296f8079bcf4565a6092a8e402
maxime chevallier
drivers
net
ethernet, marvell
doc: marvell: add cm3 address space and ppv2.3 description
patch adds cm3 address space and ppv2.3 description.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['txt']
1
4
2
--- diff --git a/documentation/devicetree/bindings/net/marvell-pp2.txt b/documentation/devicetree/bindings/net/marvell-pp2.txt --- a/documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/documentation/devicetree/bindings/net/marvell-pp2.txt + marvell cn913x ethernet controller (ppv2.3) - common controller registers - lms registers - one register area per ethernet port - for "marvell,armada-7k-pp2", must contain the following register + for "marvell,armada-7k-pp2" used by 7k/8k and cn913x, must contain the following register - packet processor registers - networking interfaces registers + - cm3 address space used for tx flow control - clocks: pointers to the reference clocks for this device, consequently: - main controller clock (for both armada-375-pp2 and armada-7k-pp2) - reg = <0x0 0x100000>, <0x129000 0xb000>; + reg = <0x0 0x100000>, <0x129000 0xb000>, <0x220000 0x800>;
Networking
1c2b4812b7daee6560d32c0d1c0963466f006942
stefan chulski marcin wojtas mw semihalf com
documentation
devicetree
bindings, net
dts: marvell: add cm3 sram memory to cp11x ethernet device tree
cm3 sram address space will be used for flow control configuration.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['dtsi']
1
1
1
--- diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi --- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi - reg = <0x0 0x100000>, <0x129000 0xb000>; + reg = <0x0 0x100000>, <0x129000 0xb000>, <0x220000 0x800>;
Networking
60523583b07cddc474522cdd94523cad9b80c5a9
konstantin porotchkin marcin wojtas mw semihalf com rob herring robh kernel org
arch
arm64
boot, dts, marvell
net: mvpp2: add cm3 sram memory map
this patch adds cm3 memory map.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h', 'c']
2
27
0
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h + void __iomem *cm3_base; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +static int mvpp2_get_sram(struct platform_device *pdev, + struct mvpp2 *priv) +{ + struct resource *res; + + res = platform_get_resource(pdev, ioresource_mem, 2); + if (!res) { + if (has_acpi_companion(&pdev->dev)) + dev_warn(&pdev->dev, "acpi is too old, flow control not supported "); + else + dev_warn(&pdev->dev, "dt is too old, flow control not supported "); + return 0; + } + + priv->cm3_base = devm_ioremap_resource(&pdev->dev, res); + if (is_err(priv->cm3_base)) + return ptr_err(priv->cm3_base); + + return 0; +} + + + /* map cm3 sram */ + err = mvpp2_get_sram(pdev, priv); + if (err) + dev_warn(&pdev->dev, "fail to alloc cm3 sram ");
Networking
e54ad1e01c00d35dcae8eff7954221fc8c700888
stefan chulski marcin wojtas mw semihalf com andrew lunn andrew lunn ch
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: always compare hw-version vs mvpp21
currently we have pp2v1 and pp2v2 hw-versions, with some different handlers depending upon condition hw_version = mvpp21/mvpp22. in a future there will be also pp2v3. let's use now the generic "if equal/notequal mvpp21" for all cases instead of "if mvpp22".
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['c']
1
19
19
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c - if (priv->hw_version == mvpp22 && queue_mode == mvpp2_qdist_single_mode) + if (priv->hw_version != mvpp21 && queue_mode == mvpp2_qdist_single_mode) - if (priv->hw_version == mvpp22) { + if (priv->hw_version != mvpp21) { - if (port->priv->hw_version == mvpp22) { + if (port->priv->hw_version != mvpp21) { - if (port->priv->hw_version != mvpp22) + if (port->priv->hw_version == mvpp21) - return !(port->priv->hw_version == mvpp22 && port->gop_id == 0); + return !(port->priv->hw_version != mvpp21 && port->gop_id == 0); - if (port->priv->hw_version == mvpp22 && port->gop_id == 0) { + if (port->priv->hw_version != mvpp21 && port->gop_id == 0) { - if (port->priv->hw_version != mvpp22 || port->gop_id != 0) + if (port->priv->hw_version == mvpp21 || port->gop_id != 0) - if (port->priv->hw_version != mvpp22 || port->gop_id != 0) + if (port->priv->hw_version == mvpp21 || port->gop_id != 0) - if (port->priv->hw_version == mvpp22) + if (port->priv->hw_version != mvpp21) - if (priv->hw_version == mvpp22 && port->port_irq) { + if (priv->hw_version != mvpp21 && port->port_irq) { - if (port->priv->hw_version == mvpp22) { + if (port->priv->hw_version != mvpp21) { - if (port->priv->hw_version == mvpp22 && + if (port->priv->hw_version != mvpp21 && - if (priv->hw_version == mvpp22) + if (priv->hw_version != mvpp21) - if (priv->hw_version == mvpp22 && dev_of_node(&pdev->dev)) { + if (priv->hw_version != mvpp21 && dev_of_node(&pdev->dev)) { - if (priv->hw_version == mvpp22 && + if (priv->hw_version != mvpp21 && - if (priv->hw_version == mvpp22) { + if (priv->hw_version != mvpp21) { - if (priv->hw_version == mvpp22) { + if (priv->hw_version != mvpp21) { - if (priv->hw_version == mvpp22) + if (priv->hw_version != mvpp21) - if (priv->hw_version == mvpp22) + if (priv->hw_version != mvpp21)
Networking
60dcd6b7d96e63221f41b3c68b19dd8c88eeda75
stefan chulski
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: add ppv23 version definition
this patch add ppv23 version definition. ppv23 is new packet processor in cp115. everything that supported by ppv22, also supported by ppv23. no functional changes in this stage.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h', 'c']
2
24
15
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +#define mvpp2_ver_id_reg 0x50b0 +#define mvpp2_ver_pp22 0x10 +#define mvpp2_ver_pp23 0x11 -/* per-port xgmac registers. ppv2.2 only, only for gop port 0, +/* per-port xgmac registers. ppv2.2 and ppv2.3, only for gop port 0, -/* smi registers. ppv2.2 only, relative to priv->iface_base. */ +/* smi registers. ppv2.2 and ppv2.3, relative to priv->iface_base. */ -/* xpcs registers. ppv2.2 only */ +/* xpcs registers.ppv2.2 and ppv2.3 */ -/* xpcs registers. ppv2.2 only */ +/* xpcs registers. ppv2.2 and ppv2.3 */ - /* on ppv2.2, each "software thread" can access the base + /* on ppv2.2 and ppv2.3, each "software thread" can access the base - /* on ppv2.2, some port control registers are located into the system - * controller space. these registers are accessible through a regmap. + /* on ppv2.2 and ppv2.3, some port control registers are located into + * the system controller space. these registers are accessible + * through a regmap. - enum { mvpp21, mvpp22 } hw_version; + enum { mvpp21, mvpp22, mvpp23 } hw_version; -/* hw tx descriptor for ppv2.2 */ +/* hw tx descriptor for ppv2.2 and ppv2.3 */ -/* hw rx descriptor for ppv2.2 */ +/* hw rx descriptor for ppv2.2 and ppv2.3 */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c - /* ppv2.1 needs 8 bytes per buffer pointer, ppv2.2 needs 16 + /* ppv2.1 needs 8 bytes per buffer pointer, ppv2.2 and ppv2.3 needs 16 - /* handle the more complicated ppv2.2 case */ + /* handle the more complicated ppv2.2 and ppv2.3 case */ - * - ppv2.2: + * - ppv2.2 and ppv2.3: -/* initialize tx fifo's: the total fifo size is 48kb on ppv2.2. +/* initialize tx fifo's: the total fifo size is 48kb on ppv2.2 and ppv2.3. -/* initialize tx fifo's: the total fifo size is 19kb on ppv2.2. +/* initialize tx fifo's: the total fifo size is 19kb on ppv2.2 and ppv2.3. + if (priv->hw_version != mvpp21) { + if (mvpp2_read(priv, mvpp2_ver_id_reg) == mvpp2_ver_pp23) + priv->hw_version = mvpp23; + } +
Networking
6af27a1dc4224f77a8a651f21c80b5075f44aca3
stefan chulski marcin wojtas mw semihalf com russell king rmk kernel armlinux org uk
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: increase bm pool and rxq size
bm pool and rxq size increased to support firmware flow control. minimum depletion thresholds to support fc are 1024 buffers. bm pool size increased to 2048 to have some 1024 buffers space between depletion thresholds and bm pool size.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h']
1
4
4
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h -#define mvpp2_max_rxd_max 1024 -#define mvpp2_max_rxd_dflt 128 +#define mvpp2_max_rxd_max 2048 +#define mvpp2_max_rxd_dflt 1024 -#define mvpp2_bm_jumbo_buf_num 512 -#define mvpp2_bm_long_buf_num 1024 +#define mvpp2_bm_jumbo_buf_num 2048 +#define mvpp2_bm_long_buf_num 2048
Networking
d07ea73f37f9845f37d40fd897e695003a37e276
stefan chulski
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: add fca periodic timer configurations
flow control periodic timer would be used if port in xoff to transmit periodic xoff frames.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h', 'c']
2
58
0
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +/* fca registers. ppv2.2 and ppv2.3 */ +#define mvpp22_fca_base(port) (0x7600 + (port) * 0x1000) +#define mvpp22_fca_reg_size 16 +#define mvpp22_fca_reg_mask 0xffff +#define mvpp22_fca_control_reg 0x0 +#define mvpp22_fca_enable_periodic bit(11) +#define mvpp22_periodic_counter_lsb_reg (0x110) +#define mvpp22_periodic_counter_msb_reg (0x114) + +/* mss flow control */ +#define fc_quanta 0xffff +#define fc_clk_divider 100 + diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en) +{ + struct mvpp2 *priv = port->priv; + void __iomem *fca = priv->iface_base + mvpp22_fca_base(port->gop_id); + u32 val; + + val = readl(fca + mvpp22_fca_control_reg); + val &= ~mvpp22_fca_enable_periodic; + if (en) + val |= mvpp22_fca_enable_periodic; + writel(val, fca + mvpp22_fca_control_reg); +} + +static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer) +{ + struct mvpp2 *priv = port->priv; + void __iomem *fca = priv->iface_base + mvpp22_fca_base(port->gop_id); + u32 lsb, msb; + + lsb = timer & mvpp22_fca_reg_mask; + msb = timer >> mvpp22_fca_reg_size; + + writel(lsb, fca + mvpp22_periodic_counter_lsb_reg); + writel(msb, fca + mvpp22_periodic_counter_msb_reg); +} + +/* set flow control timer x100 faster than pause quanta to ensure that link + * partner won't send traffic if port is in xoff mode. + */ +static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port) +{ + u32 timer; + + timer = (port->priv->tclk / (usec_per_sec * fc_clk_divider)) + * fc_quanta; + + mvpp22_gop_fca_enable_periodic(port, false); + + mvpp22_gop_fca_set_timer(port, timer); + + mvpp22_gop_fca_enable_periodic(port, true); +} + + mvpp22_gop_fca_set_periodic_timer(port); +
Networking
2788d8418af5a88db754cc8e7c16a7455934fc44
stefan chulski marcin wojtas mw semihalf com
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: add fca rxq non occupied descriptor threshold
the firmware needs to monitor the rx non-occupied descriptor bits for flow control to move to xoff mode. these bits need to be unmasked to be functional, but they will not raise interrupts as we leave the rx exception summary bit in mvpp2_isr_rx_tx_mask_reg clear.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h', 'c']
2
40
7
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +#define mvpp2_isr_rx_err_cause_reg(port) (0x5520 + 4 * (port)) +#define mvpp2_isr_rx_err_cause_nonocc_mask 0x00ff +#define mss_threshold_stop 768 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c + int cpu = smp_processor_id(); + u32 thread; - if (smp_processor_id() > port->priv->nthreads) + if (cpu > port->priv->nthreads) - mvpp2_thread_write(port->priv, - mvpp2_cpu_to_thread(port->priv, smp_processor_id()), + thread = mvpp2_cpu_to_thread(port->priv, cpu); + + mvpp2_thread_write(port->priv, thread, + mvpp2_thread_write(port->priv, thread, + mvpp2_isr_rx_err_cause_reg(port->id), 0); - u32 val; + int cpu = smp_processor_id(); + u32 val, thread; - if (smp_processor_id() > port->priv->nthreads) + if (cpu > port->priv->nthreads) + thread = mvpp2_cpu_to_thread(port->priv, cpu); + - mvpp2_thread_write(port->priv, - mvpp2_cpu_to_thread(port->priv, smp_processor_id()), + mvpp2_thread_write(port->priv, thread, + mvpp2_thread_write(port->priv, thread, + mvpp2_isr_rx_err_cause_reg(port->id), + mvpp2_isr_rx_err_cause_nonocc_mask); + mvpp2_thread_write(port->priv, v->sw_thread_id, + mvpp2_isr_rx_err_cause_reg(port->id), + mvpp2_isr_rx_err_cause_nonocc_mask); +/* set the number of non-occupied descriptors threshold */ +static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + u32 val; + + mvpp2_write(port->priv, mvpp2_rxq_num_reg, rxq->id); + + val = mvpp2_read(port->priv, mvpp2_rxq_thresh_reg); + val &= ~mvpp2_rxq_non_occupied_mask; + val |= mss_threshold_stop << mvpp2_rxq_non_occupied_offset; + mvpp2_write(port->priv, mvpp2_rxq_thresh_reg, val); +} + + /* set the number of non occupied descriptors threshold */ + mvpp2_set_rxq_free_tresh(port, rxq); +
Networking
bf270fa3c445faee7c2ca46f862f82f88d7fec3f
stefan chulski marcin wojtas mw semihalf com
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: enable global flow control
this patch enables global flow control in fw and in the phylink validate mask.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h', 'c']
2
37
4
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h -#define fc_quanta 0xffff -#define fc_clk_divider 100 -#define mss_threshold_stop 768 +#define mss_fc_com_reg 0 +#define flow_control_enable_bit bit(0) +#define fc_quanta 0xffff +#define fc_clk_divider 100 +#define mss_threshold_stop 768 + + /* global tx flow control config */ + bool global_tx_fc; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data) +{ + writel(data, priv->cm3_base + offset); +} + +static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset) +{ + return readl(priv->cm3_base + offset); +} + + if (port->priv->global_tx_fc) { + phylink_set(mask, pause); + phylink_set(mask, asym_pause); + } + - int err; + int err, val; + + /* enable global flow control only if handler to sram not null */ + if (priv->cm3_base) + priv->global_tx_fc = true; + /* enable global flow control. in this stage global + * flow control enabled, but still disabled per port. + */ + if (priv->global_tx_fc && priv->hw_version != mvpp21) { + val = mvpp2_cm3_read(priv, mss_fc_com_reg); + val |= flow_control_enable_bit; + mvpp2_cm3_write(priv, mss_fc_com_reg, val); + } +
Networking
a59d354208a784d277cf057e8be8d17a7f5bf38e
stefan chulski marcin wojtas mw semihalf com
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: add rxq flow control configurations
this patch adds rxq flow control configurations. flow control disabled by default. minimum ring size limited to 1024 descriptors.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h', 'c']
2
150
1
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +#define flow_control_update_command_bit bit(31) -#define mss_threshold_stop 768 + +#define mss_rxq_tresh_base 0x200 +#define mss_rxq_tresh_offs 4 +#define mss_rxq_tresh_reg(q, fq) (mss_rxq_tresh_base + (((q) + (fq)) \ + * mss_rxq_tresh_offs)) + +#define mss_rxq_tresh_start_mask 0xffff +#define mss_rxq_tresh_stop_mask (0xffff << mss_rxq_tresh_stop_offs) +#define mss_rxq_tresh_stop_offs 16 + +#define mss_rxq_ass_base 0x80 +#define mss_rxq_ass_offs 4 +#define mss_rxq_ass_per_reg 4 +#define mss_rxq_ass_per_offs 8 +#define mss_rxq_ass_portid_offs 0 +#define mss_rxq_ass_portid_mask 0x3 +#define mss_rxq_ass_hostid_offs 2 +#define mss_rxq_ass_hostid_mask 0x3f + +#define mss_rxq_ass_q_base(q, fq) ((((q) + (fq)) % mss_rxq_ass_per_reg) \ + * mss_rxq_ass_per_offs) +#define mss_rxq_ass_pq_base(q, fq) ((((q) + (fq)) / mss_rxq_ass_per_reg) \ + * mss_rxq_ass_offs) +#define mss_rxq_ass_reg(q, fq) (mss_rxq_ass_base + mss_rxq_ass_pq_base(q, fq)) + +#define mss_threshold_stop 768 +#define mss_threshold_start 1024 + + /* spinlocks for cm3 shared memory configuration */ + spinlock_t mss_spinlock; + + /* firmware tx flow control */ + bool tx_fc; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +/* routine enable flow control for rxqs condition */ +static void mvpp2_rxq_enable_fc(struct mvpp2_port *port) +{ + int val, cm3_state, host_id, q; + int fq = port->first_rxq; + unsigned long flags; + + spin_lock_irqsave(&port->priv->mss_spinlock, flags); + + /* remove flow control enable bit to prevent race between fw and kernel + * if flow control was enabled, it would be re-enabled. + */ + val = mvpp2_cm3_read(port->priv, mss_fc_com_reg); + cm3_state = (val & flow_control_enable_bit); + val &= ~flow_control_enable_bit; + mvpp2_cm3_write(port->priv, mss_fc_com_reg, val); + + /* set same flow control for all rxqs */ + for (q = 0; q < port->nrxqs; q++) { + /* set stop and start flow control rxq thresholds */ + val = mss_threshold_start; + val |= (mss_threshold_stop << mss_rxq_tresh_stop_offs); + mvpp2_cm3_write(port->priv, mss_rxq_tresh_reg(q, fq), val); + + val = mvpp2_cm3_read(port->priv, mss_rxq_ass_reg(q, fq)); + /* set rxq port id */ + val &= ~(mss_rxq_ass_portid_mask << mss_rxq_ass_q_base(q, fq)); + val |= (port->id << mss_rxq_ass_q_base(q, fq)); + val &= ~(mss_rxq_ass_hostid_mask << (mss_rxq_ass_q_base(q, fq) + + mss_rxq_ass_hostid_offs)); + + /* calculate rxq host id: + * in single queue mode: host id equal to host id used for + * shared rx interrupt + * in multi queue mode: host id equal to number of + * rxq id / number of cos queues + * in single resource mode: host id always equal to 0 + */ + if (queue_mode == mvpp2_qdist_single_mode) + host_id = port->nqvecs; + else if (queue_mode == mvpp2_qdist_multi_mode) + host_id = q; + else + host_id = 0; + + /* set rxq host id */ + val |= (host_id << (mss_rxq_ass_q_base(q, fq) + + mss_rxq_ass_hostid_offs)); + + mvpp2_cm3_write(port->priv, mss_rxq_ass_reg(q, fq), val); + } + + /* notify firmware that flow control config space ready for update */ + val = mvpp2_cm3_read(port->priv, mss_fc_com_reg); + val |= flow_control_update_command_bit; + val |= cm3_state; + mvpp2_cm3_write(port->priv, mss_fc_com_reg, val); + + spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); +} + +/* routine disable flow control for rxqs condition */ +static void mvpp2_rxq_disable_fc(struct mvpp2_port *port) +{ + int val, cm3_state, q; + unsigned long flags; + int fq = port->first_rxq; + + spin_lock_irqsave(&port->priv->mss_spinlock, flags); + + /* remove flow control enable bit to prevent race between fw and kernel + * if flow control was enabled, it would be re-enabled. + */ + val = mvpp2_cm3_read(port->priv, mss_fc_com_reg); + cm3_state = (val & flow_control_enable_bit); + val &= ~flow_control_enable_bit; + mvpp2_cm3_write(port->priv, mss_fc_com_reg, val); + + /* disable flow control for all rxqs */ + for (q = 0; q < port->nrxqs; q++) { + /* set threshold 0 to disable flow control */ + val = 0; + val |= (0 << mss_rxq_tresh_stop_offs); + mvpp2_cm3_write(port->priv, mss_rxq_tresh_reg(q, fq), val); + + val = mvpp2_cm3_read(port->priv, mss_rxq_ass_reg(q, fq)); + + val &= ~(mss_rxq_ass_portid_mask << mss_rxq_ass_q_base(q, fq)); + + val &= ~(mss_rxq_ass_hostid_mask << (mss_rxq_ass_q_base(q, fq) + + mss_rxq_ass_hostid_offs)); + + mvpp2_cm3_write(port->priv, mss_rxq_ass_reg(q, fq), val); + } + + /* notify firmware that flow control config space ready for update */ + val = mvpp2_cm3_read(port->priv, mss_fc_com_reg); + val |= flow_control_update_command_bit; + val |= cm3_state; + mvpp2_cm3_write(port->priv, mss_fc_com_reg, val); + + spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); +} + + + if (port->tx_fc) + mvpp2_rxq_disable_fc(port); + + if (port->tx_fc) + mvpp2_rxq_enable_fc(port); + + else if (ring->rx_pending < mss_threshold_start) + new_rx_pending = mss_threshold_start; + /* init mss lock */ + spin_lock_init(&priv->mss_spinlock); +
Networking
3bd17fdc08e99c40044aed061e8f6599a1e20710
stefan chulski marcin wojtas mw semihalf com
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: add ethtool flow control configuration support
this patch add ethtool flow control configuration support.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h', 'c']
2
111
0
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +#define mss_buf_pool_base 0x40 +#define mss_buf_pool_offs 4 +#define mss_buf_pool_reg(id) (mss_buf_pool_base \ + + (id) * mss_buf_pool_offs) + +#define mss_buf_pool_stop_mask 0xfff +#define mss_buf_pool_start_mask (0xfff << mss_buf_pool_start_offs) +#define mss_buf_pool_start_offs 12 +#define mss_buf_pool_ports_mask (0xf << mss_buf_pool_ports_offs) +#define mss_buf_pool_ports_offs 24 +#define mss_buf_pool_port_offs(id) (0x1 << \ + ((id) + mss_buf_pool_ports_offs)) + diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +/* routine disable/enable flow control for bm pool condition */ +static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port, + struct mvpp2_bm_pool *pool, + bool en) +{ + int val, cm3_state; + unsigned long flags; + + spin_lock_irqsave(&port->priv->mss_spinlock, flags); + + /* remove flow control enable bit to prevent race between fw and kernel + * if flow control were enabled, it would be re-enabled. + */ + val = mvpp2_cm3_read(port->priv, mss_fc_com_reg); + cm3_state = (val & flow_control_enable_bit); + val &= ~flow_control_enable_bit; + mvpp2_cm3_write(port->priv, mss_fc_com_reg, val); + + /* check if bm pool should be enabled/disable */ + if (en) { + /* set bm pool start and stop thresholds per port */ + val = mvpp2_cm3_read(port->priv, mss_buf_pool_reg(pool->id)); + val |= mss_buf_pool_port_offs(port->id); + val &= ~mss_buf_pool_start_mask; + val |= (mss_threshold_start << mss_buf_pool_start_offs); + val &= ~mss_buf_pool_stop_mask; + val |= mss_threshold_stop; + mvpp2_cm3_write(port->priv, mss_buf_pool_reg(pool->id), val); + } else { + /* remove bm pool from the port */ + val = mvpp2_cm3_read(port->priv, mss_buf_pool_reg(pool->id)); + val &= ~mss_buf_pool_port_offs(port->id); + + /* zero bm pool start and stop thresholds to disable pool + * flow control if pool empty (not used by any port) + */ + if (!pool->buf_num) { + val &= ~mss_buf_pool_start_mask; + val &= ~mss_buf_pool_stop_mask; + } + + mvpp2_cm3_write(port->priv, mss_buf_pool_reg(pool->id), val); + } + + /* notify firmware that flow control config space ready for update */ + val = mvpp2_cm3_read(port->priv, mss_fc_com_reg); + val |= flow_control_update_command_bit; + val |= cm3_state; + mvpp2_cm3_write(port->priv, mss_fc_com_reg, val); + + spin_unlock_irqrestore(&port->priv->mss_spinlock, flags); +} + + if (port->tx_fc) { + if (pkt_size > mvpp2_bm_long_pkt_size) + mvpp2_bm_pool_update_fc(port, + port->pool_short, + false); + else + mvpp2_bm_pool_update_fc(port, port->pool_long, + false); + } + + + if (port->tx_fc) { + if (pkt_size > mvpp2_bm_long_pkt_size) + mvpp2_bm_pool_update_fc(port, port->pool_long, + true); + else + mvpp2_bm_pool_update_fc(port, port->pool_short, + true); + } + + /* update l4 checksum when jumbo enable/disable on port */ + if (new_long_pool == mvpp2_bm_jumbo && port->id != 0) { + dev->features &= ~(netif_f_ip_csum | netif_f_ipv6_csum); + dev->hw_features &= ~(netif_f_ip_csum | + netif_f_ipv6_csum); + } else { + dev->features |= netif_f_ip_csum | netif_f_ipv6_csum; + dev->hw_features |= netif_f_ip_csum | netif_f_ipv6_csum; + } + int i; + if (port->priv->global_tx_fc) { + port->tx_fc = tx_pause; + if (tx_pause) + mvpp2_rxq_enable_fc(port); + else + mvpp2_rxq_disable_fc(port); + if (port->priv->percpu_pools) { + for (i = 0; i < port->nrxqs; i++) + mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause); + } else { + mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause); + mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause); + } + } +
Networking
76055831cf84b8fc202f7e5c6b6639817832eef3
stefan chulski
drivers
net
ethernet, marvell, mvpp2
net: mvpp2: add bm protection underrun feature support
the pp2v23 hardware supports a feature allowing to double the size of bppi by decreasing number of pools from 16 to 8. increasing of bppi size protect bm drop from bppi underrun. underrun could occurred due to stress on ddr and as result slow buffer transition from bppe to bppi. new bppi threshold recommended by spec is: bppi low threshold - 640 buffers bppi high threshold - 832 buffers supported only in ppv23.
this release allows to map an uid to a different one in a mount; it also adds support for selecting the preemption model at runtime; support for a low-overhead memory error detector designed to be used in production; support for the acrn hypervisor designed for embedded systems; btrfs initial support for zoned devices, subpage blocks sizes and performance improvements; support for eager nfs writes; support for a thermal power management to control the surface temperature of embedded devices in an unified way; the napi polling can be moved to a kernel thread; and support for non-blocking path lookups. as always, there are many other features, new drivers, improvements and fixes.
add tx flow control support
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'cryptography', 'security', 'networking', 'architectures x86 arm risc-v powerpc mips csky s390 pa-risc c6x']
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'cpu frequency scaling', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'cryptography hardware acceleration', 'pci', 'non-transparent bridge (ntb)', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'cxl (compute express link)', 'various']
['mvpp2 ']
['h', 'c']
2
34
0
--- diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +#define mvpp2_bm_bppi_high_thresh 0x1e +#define mvpp2_bm_bppi_low_thresh 0x1c +#define mvpp23_bm_bppi_high_thresh 0x34 +#define mvpp23_bm_bppi_low_thresh 0x28 +#define mvpp22_bm_pool_base_addr_high_reg 0x6310 +#define mvpp22_bm_pool_base_addr_high_mask 0xff +#define mvpp23_bm_8pool_mode bit(8) + diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c + + val &= ~mvpp2_bm_low_thresh_mask; + val &= ~mvpp2_bm_high_thresh_mask; + + /* set 8 pools bppi threshold for mvpp23 */ + if (priv->hw_version == mvpp23) { + val |= mvpp2_bm_low_thresh_value(mvpp23_bm_bppi_low_thresh); + val |= mvpp2_bm_high_thresh_value(mvpp23_bm_bppi_high_thresh); + } else { + val |= mvpp2_bm_low_thresh_value(mvpp2_bm_bppi_low_thresh); + val |= mvpp2_bm_high_thresh_value(mvpp2_bm_bppi_high_thresh); + } + +/* routine enable ppv23 8 pool mode */ +static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv) +{ + int val; + + val = mvpp2_read(priv, mvpp22_bm_pool_base_addr_high_reg); + val |= mvpp23_bm_8pool_mode; + mvpp2_write(priv, mvpp22_bm_pool_base_addr_high_reg, val); +} + + if (priv->hw_version == mvpp23) + mvpp23_bm_set_8pool_mode(priv); +
Networking
eb30b269549a0cd27c3b9a67676f6a39c77fcfa0
stefan chulski marcin wojtas mw semihalf com
drivers
net
ethernet, marvell, mvpp2