commit_title
stringlengths 13
124
| commit_body
stringlengths 0
1.9k
| release_summary
stringclasses 52
values | changes_summary
stringlengths 1
758
| release_affected_domains
stringclasses 33
values | release_affected_drivers
stringclasses 51
values | domain_of_changes
stringlengths 2
571
| language_set
stringclasses 983
values | diffstat_files
int64 1
300
| diffstat_insertions
int64 0
309k
| diffstat_deletions
int64 0
168k
| commit_diff
stringlengths 92
23.4M
| category
stringclasses 108
values | commit_hash
stringlengths 34
40
| related_people
stringlengths 0
370
| domain
stringclasses 21
values | subdomain
stringclasses 241
values | leaf_module
stringlengths 0
912
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
drm/meson: store the framebuffer width for plane commit
|
also store the framebuffer width in the private common struct to be used by the afbc decoder module driver when committing the afbc plane.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
osd1 plane afbc commit
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['meson ']
|
['h', 'c']
| 2
| 2
| 0
|
--- diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h + uint32_t osd1_width; diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c + priv->viu.osd1_width = fb->width;
|
Graphics
|
ce7cb472108c37884c6084b317ceb07592906133
|
neil armstrong kevin hilman khilman baylibre com
|
drivers
|
gpu
|
drm, meson
|
drm/meson: add rdma module driver
|
the vpu embeds a "register dma" that can write a sequence of registers on the vpu ahb bus, either manually or triggered by an internal irq event like vsync or a line input counter.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
osd1 plane afbc commit
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['meson ']
|
['h', 'c', 'makefile']
| 4
| 163
| 0
|
--- diff --git a/drivers/gpu/drm/meson/makefile b/drivers/gpu/drm/meson/makefile --- a/drivers/gpu/drm/meson/makefile +++ b/drivers/gpu/drm/meson/makefile +meson-drm-y += meson_rdma.o diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h + + struct { + dma_addr_t addr_phys; + uint32_t *addr; + unsigned int offset; + } rdma; diff --git a/drivers/gpu/drm/meson/meson_rdma.c b/drivers/gpu/drm/meson/meson_rdma.c --- /dev/null +++ b/drivers/gpu/drm/meson/meson_rdma.c +// spdx-license-identifier: gpl-2.0+ +/* + * copyright (c) 2019 baylibre, sas + * author: neil armstrong <narmstrong@baylibre.com> + */ + +#include <linux/bitfield.h> +#include <linux/dma-mapping.h> + +#include "meson_drv.h" +#include "meson_registers.h" +#include "meson_rdma.h" + +/* + * the vpu embeds a "register dma" that can write a sequence of registers + * on the vpu ahb bus, either manually or triggered by an internal irq + * event like vsync or a line input counter. + * the initial implementation handles a single channel (over 8), triggered + * by the vsync irq and does not handle the rdma irq. + */ + +#define rdma_desc_size (sizeof(uint32_t) * 2) + +int meson_rdma_init(struct meson_drm *priv) +{ + if (!priv->rdma.addr) { + /* allocate a page buffer */ + priv->rdma.addr = + dma_alloc_coherent(priv->dev, sz_4k, + &priv->rdma.addr_phys, + gfp_kernel); + if (!priv->rdma.addr) + return -enomem; + } + + priv->rdma.offset = 0; + + writel_relaxed(rdma_ctrl_sw_reset, + priv->io_base + _reg(rdma_ctrl)); + writel_relaxed(rdma_default_config | + field_prep(rdma_ctrl_ahb_wr_burst, 3) | + field_prep(rdma_ctrl_ahb_rd_burst, 0), + priv->io_base + _reg(rdma_ctrl)); + + return 0; +} + +void meson_rdma_free(struct meson_drm *priv) +{ + if (!priv->rdma.addr && !priv->rdma.addr_phys) + return; + + meson_rdma_stop(priv); + + dma_free_coherent(priv->dev, sz_4k, + priv->rdma.addr, priv->rdma.addr_phys); + + priv->rdma.addr = null; + priv->rdma.addr_phys = (dma_addr_t)null; +} + +void meson_rdma_setup(struct meson_drm *priv) +{ + /* channel 1: write flag, no address increment */ + writel_bits_relaxed(rdma_access_rw_flag_chan1 | + rdma_access_addr_inc_chan1, + rdma_access_rw_flag_chan1, + priv->io_base + _reg(rdma_access_auto)); +} + +void meson_rdma_stop(struct meson_drm *priv) +{ + writel_bits_relaxed(rdma_irq_clear_chan1, + rdma_irq_clear_chan1, + priv->io_base + _reg(rdma_ctrl)); + + /* stop channel 1 */ + writel_bits_relaxed(rdma_access_trigger_chan1, + field_prep(rdma_access_addr_inc_chan1, + rdma_access_trigger_stop), + priv->io_base + _reg(rdma_access_auto)); +} + +void meson_rdma_reset(struct meson_drm *priv) +{ + meson_rdma_stop(priv); + + priv->rdma.offset = 0; +} + +static void meson_rdma_writel(struct meson_drm *priv, uint32_t val, + uint32_t reg) +{ + if (priv->rdma.offset >= (sz_4k / rdma_desc_size)) { + dev_warn_once(priv->dev, "%s: overflow ", __func__); + return; + } + + priv->rdma.addr[priv->rdma.offset++] = reg; + priv->rdma.addr[priv->rdma.offset++] = val; +} + +/* + * this will add the register to the rdma buffer and write it to the + * hardware at the same time. + * when meson_rdma_flush is called, the rdma will replay the register + * writes in order. + */ +void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg) +{ + meson_rdma_writel(priv, val, reg); + + writel_relaxed(val, priv->io_base + _reg(reg)); +} + +void meson_rdma_flush(struct meson_drm *priv) +{ + meson_rdma_stop(priv); + + /* start of channel 1 register writes buffer */ + writel(priv->rdma.addr_phys, + priv->io_base + _reg(rdma_ahb_start_addr_1)); + + /* last byte on channel 1 register writes buffer */ + writel(priv->rdma.addr_phys + (priv->rdma.offset * rdma_desc_size) - 1, + priv->io_base + _reg(rdma_ahb_end_addr_1)); + + /* trigger channel 1 on vsync event */ + writel_bits_relaxed(rdma_access_trigger_chan1, + field_prep(rdma_access_trigger_chan1, + rdma_access_trigger_vsync), + priv->io_base + _reg(rdma_access_auto)); + + priv->rdma.offset = 0; +} diff --git a/drivers/gpu/drm/meson/meson_rdma.h b/drivers/gpu/drm/meson/meson_rdma.h --- /dev/null +++ b/drivers/gpu/drm/meson/meson_rdma.h +/* spdx-license-identifier: gpl-2.0+ */ +/* + * copyright (c) 2019 baylibre, sas + * author: neil armstrong <narmstrong@baylibre.com> + */ + +#ifndef __meson_rdma_h +#define __meson_rdma_h + +#include "meson_drv.h" + +int meson_rdma_init(struct meson_drm *priv); +void meson_rdma_free(struct meson_drm *priv); +void meson_rdma_setup(struct meson_drm *priv); +void meson_rdma_reset(struct meson_drm *priv); +void meson_rdma_stop(struct meson_drm *priv); + +void meson_rdma_writel_sync(struct meson_drm *priv, uint32_t val, uint32_t reg); +void meson_rdma_flush(struct meson_drm *priv); + +#endif /* __meson_rdma_h */
|
Graphics
|
63fba242c464ba3417d33f97bc0d47bbf46d0721
|
neil armstrong
|
drivers
|
gpu
|
drm, meson
|
drm/meson: add afbcd module driver
|
this adds the driver for the arm framebuffer compression decoders found in the amlogic gxm and g12a socs.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
osd1 plane afbc commit
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['meson ']
|
['c', 'makefile', 'h']
| 5
| 473
| 8
|
--- diff --git a/drivers/gpu/drm/meson/makefile b/drivers/gpu/drm/meson/makefile --- a/drivers/gpu/drm/meson/makefile +++ b/drivers/gpu/drm/meson/makefile -meson-drm-y += meson_rdma.o +meson-drm-y += meson_rdma.o meson_osd_afbcd.o diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c +#include "meson_osd_afbcd.h" +#include "meson_rdma.h" + const struct meson_drm_match_data *match; + match = of_device_get_match_data(dev); + if (!match) + return -enodev; + - - priv->compat = (enum vpu_compatible)of_device_get_match_data(priv->dev); + priv->compat = match->compat; + priv->afbcd.ops = match->afbcd_ops; + if (priv->afbcd.ops) { + ret = priv->afbcd.ops->init(priv); + if (ret) + return ret; + } + if (priv->afbcd.ops) { + priv->afbcd.ops->reset(priv); + meson_rdma_free(priv); + } + - + if (priv->afbcd.ops) + priv->afbcd.ops->init(priv); +static struct meson_drm_match_data meson_drm_gxbb_data = { + .compat = vpu_compatible_gxbb, +}; + +static struct meson_drm_match_data meson_drm_gxl_data = { + .compat = vpu_compatible_gxl, +}; + +static struct meson_drm_match_data meson_drm_gxm_data = { + .compat = vpu_compatible_gxm, + .afbcd_ops = &meson_afbcd_gxm_ops, +}; + +static struct meson_drm_match_data meson_drm_g12a_data = { + .compat = vpu_compatible_g12a, + .afbcd_ops = &meson_afbcd_g12a_ops, +}; + - .data = (void *)vpu_compatible_gxbb }, + .data = (void *)&meson_drm_gxbb_data }, - .data = (void *)vpu_compatible_gxl }, + .data = (void *)&meson_drm_gxl_data }, - .data = (void *)vpu_compatible_gxm }, + .data = (void *)&meson_drm_gxm_data }, - .data = (void *)vpu_compatible_g12a }, + .data = (void *)&meson_drm_g12a_data }, diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h +struct meson_afbcd_ops; +struct meson_drm_match_data { + enum vpu_compatible compat; + struct meson_afbcd_ops *afbcd_ops; +}; + + + struct { + struct meson_afbcd_ops *ops; + u64 modifier; + u32 format; + } afbcd; diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.c b/drivers/gpu/drm/meson/meson_osd_afbcd.c --- /dev/null +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.c +// spdx-license-identifier: gpl-2.0+ +/* + * copyright (c) 2019 baylibre, sas + * author: neil armstrong <narmstrong@baylibre.com> + */ + +#include <linux/bitfield.h> + +#include <drm/drm_print.h> +#include <drm/drm_fourcc.h> + +#include "meson_drv.h" +#include "meson_registers.h" +#include "meson_viu.h" +#include "meson_rdma.h" +#include "meson_osd_afbcd.h" + +/* + * doc: driver for the arm framebuffer compression decoders + * + * the amlogic gxm and g12a soc families embeds an afbc decoder, + * to decode compressed buffers generated by the arm mali gpu. + * + * for the gxm family, amlogic designed their own decoder, named in + * the vendor source as "meson_afbc", and a single decoder is available + * for the 2 osd planes. + * this decoder is compatible with the afbc 1.0 specifications and the + * mali t820 gpu capabilities. + * it supports : + * - basic afbc buffer for rgb32 only, thus ytr feature is mandatory + * - sparse layout and split layout + * - only 16x16 superblock + * + * the decoder reads the data from the sdram, decodes and sends the + * decoded pixel stream to the osd1 plane pixel composer. + * + * for the g12a family, amlogic integrated an arm afbc decoder, named + * in the vendor source as "mali_afbc", and the decoder can decode up + * to 4 surfaces, one for each of the 4 available osds. + * this decoder is compatible with the afbc 1.2 specifications for the + * mali g31 and g52 gpus. + * is supports : + * - basic afbc buffer for multiple rgb and yuv pixel formats + * - sparse layout and split layout + * - 16x16 and 32x8 "wideblk" superblocks + * - tiled header + * + * the arm afbc decoder independent from the vpu pixel pipeline, so + * the arm afbc decoder reads the data from the sdram then decodes + * into a private internal physical address where the osd1 plane pixel + * composer unpacks the decoded data. + */ + +/* amlogic afbc decoder for gxm family */ + +#define osd1_afbcd_rgb32 0x15 + +static int meson_gxm_afbcd_pixel_fmt(u64 modifier, uint32_t format) +{ + switch (format) { + case drm_format_xbgr8888: + case drm_format_abgr8888: + return osd1_afbcd_rgb32; + /* tofix support mode formats */ + default: + drm_debug("unsupported afbc format[%08x] ", format); + return -einval; + } +} + +static bool meson_gxm_afbcd_supported_fmt(u64 modifier, uint32_t format) +{ + if (modifier & afbc_format_mod_block_size_32x8) + return false; + + if (!(modifier & afbc_format_mod_ytr)) + return false; + + return meson_gxm_afbcd_pixel_fmt(modifier, format) >= 0; +} + +static int meson_gxm_afbcd_init(struct meson_drm *priv) +{ + return 0; +} + +static int meson_gxm_afbcd_reset(struct meson_drm *priv) +{ + writel_relaxed(viu_sw_reset_osd1_afbcd, + priv->io_base + _reg(viu_sw_reset)); + writel_relaxed(0, priv->io_base + _reg(viu_sw_reset)); + + return 0; +} + +static int meson_gxm_afbcd_enable(struct meson_drm *priv) +{ + writel_relaxed(field_prep(osd1_afbcd_id_fifo_thrd, 0x40) | + osd1_afbcd_dec_enable, + priv->io_base + _reg(osd1_afbcd_enable)); + + return 0; +} + +static int meson_gxm_afbcd_disable(struct meson_drm *priv) +{ + writel_bits_relaxed(osd1_afbcd_dec_enable, 0, + priv->io_base + _reg(osd1_afbcd_enable)); + + return 0; +} + +static int meson_gxm_afbcd_setup(struct meson_drm *priv) +{ + u32 conv_lbuf_len; + u32 mode = field_prep(osd1_afbcd_mif_urgent, 3) | + field_prep(osd1_afbcd_hold_line_num, 4) | + field_prep(osd1_afbcd_rgba_exchan_ctrl, 0x34) | + meson_gxm_afbcd_pixel_fmt(priv->afbcd.modifier, + priv->afbcd.format); + + if (priv->afbcd.modifier & afbc_format_mod_sparse) + mode |= osd1_afbcd_hreg_half_block; + + if (priv->afbcd.modifier & afbc_format_mod_split) + mode |= osd1_afbcd_hreg_block_split; + + writel_relaxed(mode, priv->io_base + _reg(osd1_afbcd_mode)); + + writel_relaxed(field_prep(osd1_afbcd_hreg_vsize_in, + priv->viu.osd1_width) | + field_prep(osd1_afbcd_hreg_hsize_in, + priv->viu.osd1_height), + priv->io_base + _reg(osd1_afbcd_size_in)); + + writel_relaxed(priv->viu.osd1_addr >> 4, + priv->io_base + _reg(osd1_afbcd_hdr_ptr)); + writel_relaxed(priv->viu.osd1_addr >> 4, + priv->io_base + _reg(osd1_afbcd_frame_ptr)); + /* tofix: bits 31:24 are not documented, nor the meaning of 0xe4 */ + writel_relaxed((0xe4 << 24) | (priv->viu.osd1_addr & 0xffffff), + priv->io_base + _reg(osd1_afbcd_chroma_ptr)); + + if (priv->viu.osd1_width <= 128) + conv_lbuf_len = 32; + else if (priv->viu.osd1_width <= 256) + conv_lbuf_len = 64; + else if (priv->viu.osd1_width <= 512) + conv_lbuf_len = 128; + else if (priv->viu.osd1_width <= 1024) + conv_lbuf_len = 256; + else if (priv->viu.osd1_width <= 2048) + conv_lbuf_len = 512; + else + conv_lbuf_len = 1024; + + writel_relaxed(conv_lbuf_len, + priv->io_base + _reg(osd1_afbcd_conv_ctrl)); + + writel_relaxed(field_prep(osd1_afbcd_dec_pixel_bgn_h, 0) | + field_prep(osd1_afbcd_dec_pixel_end_h, + priv->viu.osd1_width - 1), + priv->io_base + _reg(osd1_afbcd_pixel_hscope)); + + writel_relaxed(field_prep(osd1_afbcd_dec_pixel_bgn_v, 0) | + field_prep(osd1_afbcd_dec_pixel_end_v, + priv->viu.osd1_height - 1), + priv->io_base + _reg(osd1_afbcd_pixel_vscope)); + + return 0; +} + +struct meson_afbcd_ops meson_afbcd_gxm_ops = { + .init = meson_gxm_afbcd_init, + .reset = meson_gxm_afbcd_reset, + .enable = meson_gxm_afbcd_enable, + .disable = meson_gxm_afbcd_disable, + .setup = meson_gxm_afbcd_setup, + .supported_fmt = meson_gxm_afbcd_supported_fmt, +}; + +/* arm afbc decoder for g12a family */ + +/* amlogic g12a mali afbc decoder supported formats */ +enum { + mafbc_fmt_rgb565 = 0, + mafbc_fmt_rgba5551, + mafbc_fmt_rgba1010102, + mafbc_fmt_yuv420_10b, + mafbc_fmt_rgb888, + mafbc_fmt_rgba8888, + mafbc_fmt_rgba4444, + mafbc_fmt_r8, + mafbc_fmt_rg88, + mafbc_fmt_yuv420_8b, + mafbc_fmt_yuv422_8b = 11, + mafbc_fmt_yuv422_10b = 14, +}; + +static int meson_g12a_afbcd_pixel_fmt(u64 modifier, uint32_t format) +{ + switch (format) { + case drm_format_xrgb8888: + case drm_format_argb8888: + /* ytr is forbidden for non xbgr formats */ + if (modifier & afbc_format_mod_ytr) + return -einval; + /* fall through */ + case drm_format_xbgr8888: + case drm_format_abgr8888: + return mafbc_fmt_rgba8888; + case drm_format_rgb888: + /* ytr is forbidden for non xbgr formats */ + if (modifier & afbc_format_mod_ytr) + return -einval; + return mafbc_fmt_rgb888; + case drm_format_rgb565: + /* ytr is forbidden for non xbgr formats */ + if (modifier & afbc_format_mod_ytr) + return -einval; + return mafbc_fmt_rgb565; + /* tofix support mode formats */ + default: + drm_debug("unsupported afbc format[%08x] ", format); + return -einval; + } +} + +static int meson_g12a_afbcd_bpp(uint32_t format) +{ + switch (format) { + case drm_format_xrgb8888: + case drm_format_argb8888: + case drm_format_xbgr8888: + case drm_format_abgr8888: + return 32; + case drm_format_rgb888: + return 24; + case drm_format_rgb565: + return 16; + /* tofix support mode formats */ + default: + drm_error("unsupported afbc format[%08x] ", format); + return 0; + } +} + +static int meson_g12a_afbcd_fmt_to_blk_mode(u64 modifier, uint32_t format) +{ + switch (format) { + case drm_format_xrgb8888: + case drm_format_argb8888: + case drm_format_xbgr8888: + case drm_format_abgr8888: + return osd_mali_color_mode_rgba8888; + case drm_format_rgb888: + return osd_mali_color_mode_rgb888; + case drm_format_rgb565: + return osd_mali_color_mode_rgb565; + /* tofix support mode formats */ + default: + drm_debug("unsupported afbc format[%08x] ", format); + return -einval; + } +} + +static bool meson_g12a_afbcd_supported_fmt(u64 modifier, uint32_t format) +{ + return meson_g12a_afbcd_pixel_fmt(modifier, format) >= 0; +} + +static int meson_g12a_afbcd_init(struct meson_drm *priv) +{ + int ret; + + ret = meson_rdma_init(priv); + if (ret) + return ret; + + meson_rdma_setup(priv); + + /* handle afbc decoder reset manually */ + writel_bits_relaxed(mali_afbcd_manual_reset, mali_afbcd_manual_reset, + priv->io_base + _reg(mali_afbcd_top_ctrl)); + + return 0; +} + +static int meson_g12a_afbcd_reset(struct meson_drm *priv) +{ + meson_rdma_reset(priv); + + meson_rdma_writel_sync(priv, viu_sw_reset_g12a_afbc_arb | + viu_sw_reset_g12a_osd1_afbcd, + viu_sw_reset); + meson_rdma_writel_sync(priv, 0, viu_sw_reset); + + return 0; +} + +static int meson_g12a_afbcd_enable(struct meson_drm *priv) +{ + meson_rdma_writel_sync(priv, vpu_mafbc_irq_surfaces_completed | + vpu_mafbc_irq_configuration_swapped | + vpu_mafbc_irq_decode_error | + vpu_mafbc_irq_detiling_error, + vpu_mafbc_irq_mask); + + meson_rdma_writel_sync(priv, vpu_mafbc_s0_enable, + vpu_mafbc_surface_cfg); + + meson_rdma_writel_sync(priv, vpu_mafbc_direct_swap, + vpu_mafbc_command); + + /* this will enable the rdma replaying the register writes on vsync */ + meson_rdma_flush(priv); + + return 0; +} + +static int meson_g12a_afbcd_disable(struct meson_drm *priv) +{ + writel_bits_relaxed(vpu_mafbc_s0_enable, 0, + priv->io_base + _reg(vpu_mafbc_surface_cfg)); + + return 0; +} + +static int meson_g12a_afbcd_setup(struct meson_drm *priv) +{ + u32 format = meson_g12a_afbcd_pixel_fmt(priv->afbcd.modifier, + priv->afbcd.format); + + if (priv->afbcd.modifier & afbc_format_mod_ytr) + format |= vpu_mafbc_yuv_transform; + + if (priv->afbcd.modifier & afbc_format_mod_split) + format |= vpu_mafbc_block_split; + + if (priv->afbcd.modifier & afbc_format_mod_tiled) + format |= vpu_mafbc_tiled_header_en; + + if ((priv->afbcd.modifier & afbc_format_mod_block_size_mask) == + afbc_format_mod_block_size_32x8) + format |= field_prep(vpu_mafbc_super_block_aspect, 1); + + meson_rdma_writel_sync(priv, format, + vpu_mafbc_format_specifier_s0); + + meson_rdma_writel_sync(priv, priv->viu.osd1_addr, + vpu_mafbc_header_buf_addr_low_s0); + meson_rdma_writel_sync(priv, 0, + vpu_mafbc_header_buf_addr_high_s0); + + meson_rdma_writel_sync(priv, priv->viu.osd1_width, + vpu_mafbc_buffer_width_s0); + meson_rdma_writel_sync(priv, align(priv->viu.osd1_height, 32), + vpu_mafbc_buffer_height_s0); + + meson_rdma_writel_sync(priv, 0, + vpu_mafbc_bounding_box_x_start_s0); + meson_rdma_writel_sync(priv, priv->viu.osd1_width - 1, + vpu_mafbc_bounding_box_x_end_s0); + meson_rdma_writel_sync(priv, 0, + vpu_mafbc_bounding_box_y_start_s0); + meson_rdma_writel_sync(priv, priv->viu.osd1_height - 1, + vpu_mafbc_bounding_box_y_end_s0); + + meson_rdma_writel_sync(priv, meson_g12a_afbcd_out_addr, + vpu_mafbc_output_buf_addr_low_s0); + meson_rdma_writel_sync(priv, 0, + vpu_mafbc_output_buf_addr_high_s0); + + meson_rdma_writel_sync(priv, priv->viu.osd1_width * + (meson_g12a_afbcd_bpp(priv->afbcd.format) / 8), + vpu_mafbc_output_buf_stride_s0); + + return 0; +} + +struct meson_afbcd_ops meson_afbcd_g12a_ops = { + .init = meson_g12a_afbcd_init, + .reset = meson_g12a_afbcd_reset, + .enable = meson_g12a_afbcd_enable, + .disable = meson_g12a_afbcd_disable, + .setup = meson_g12a_afbcd_setup, + .fmt_to_blk_mode = meson_g12a_afbcd_fmt_to_blk_mode, + .supported_fmt = meson_g12a_afbcd_supported_fmt, +}; diff --git a/drivers/gpu/drm/meson/meson_osd_afbcd.h b/drivers/gpu/drm/meson/meson_osd_afbcd.h --- /dev/null +++ b/drivers/gpu/drm/meson/meson_osd_afbcd.h +/* spdx-license-identifier: gpl-2.0+ */ +/* + * copyright (c) 2019 baylibre, sas + * author: neil armstrong <narmstrong@baylibre.com> + */ + +#ifndef __meson_osd_afbcd_h +#define __meson_osd_afbcd_h + +#include "meson_drv.h" + +/* this is an internal address used to transfer pixel from afbc to the viu */ +#define meson_g12a_afbcd_out_addr 0x1000000 + +struct meson_afbcd_ops { + int (*init)(struct meson_drm *priv); + int (*reset)(struct meson_drm *priv); + int (*enable)(struct meson_drm *priv); + int (*disable)(struct meson_drm *priv); + int (*setup)(struct meson_drm *priv); + int (*fmt_to_blk_mode)(u64 modifier, uint32_t format); + bool (*supported_fmt)(u64 modifier, uint32_t format); +}; + +extern struct meson_afbcd_ops meson_afbcd_gxm_ops; +extern struct meson_afbcd_ops meson_afbcd_g12a_ops; + +#endif /* __meson_osd_afbcd_h */
|
Graphics
|
d1b5e41e13a7e9bde3e736df9b8693b0325e41bc
|
neil armstrong
|
drivers
|
gpu
|
drm, meson
|
drm/meson: plane: add support for afbc mode for osd1 plane
|
this adds all the osd configuration plumbing to support the afbc decoders path to display of the osd1 plane.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
osd1 plane afbc commit
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['meson ']
|
['c', 'h']
| 3
| 203
| 31
|
--- diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c + writel_relaxed(priv->viu.osd1_ctrl_stat2, + priv->io_base + _reg(viu_osd1_ctrl_stat2)); diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h --- a/drivers/gpu/drm/meson/meson_drv.h +++ b/drivers/gpu/drm/meson/meson_drv.h + bool osd1_afbcd; + uint32_t osd1_ctrl_stat2; + uint32_t osd1_blk1_cfg4; + uint32_t osd1_blk2_cfg4; diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c +#include "meson_osd_afbcd.h" +#define meson_mod_afbc_valid_bits (afbc_format_mod_block_size_16x16 | \ + afbc_format_mod_block_size_32x8 | \ + afbc_format_mod_ytr | \ + afbc_format_mod_sparse | \ + afbc_format_mod_split) + +static u32 meson_g12a_afbcd_line_stride(struct meson_drm *priv) +{ + u32 line_stride = 0; + + switch (priv->afbcd.format) { + case drm_format_rgb565: + line_stride = ((priv->viu.osd1_width << 4) + 127) >> 7; + break; + case drm_format_rgb888: + case drm_format_xrgb8888: + case drm_format_argb8888: + case drm_format_xbgr8888: + case drm_format_abgr8888: + line_stride = ((priv->viu.osd1_width << 5) + 127) >> 7; + break; + } + + return ((line_stride + 1) >> 1) << 1; +} + + /* check if afbc decoder is required for this buffer */ + if ((meson_vpu_is_compatible(priv, vpu_compatible_gxm) || + meson_vpu_is_compatible(priv, vpu_compatible_g12a)) && + fb->modifier & drm_format_mod_arm_afbc(meson_mod_afbc_valid_bits)) + priv->viu.osd1_afbcd = true; + else + priv->viu.osd1_afbcd = false; + + priv->viu.osd1_ctrl_stat2 = readl(priv->io_base + + _reg(viu_osd1_ctrl_stat2)); + - priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << osd_canvas_sel) | - osd_endianness_le); + priv->viu.osd1_blk0_cfg[0] = canvas_id_osd1 << osd_canvas_sel; + + if (priv->viu.osd1_afbcd) { + if (meson_vpu_is_compatible(priv, vpu_compatible_g12a)) { + /* this is the internal decoding memory address */ + priv->viu.osd1_blk1_cfg4 = meson_g12a_afbcd_out_addr; + priv->viu.osd1_blk0_cfg[0] |= osd_endianness_be; + priv->viu.osd1_ctrl_stat2 |= osd_pending_stat_clean; + priv->viu.osd1_ctrl_stat |= viu_osd1_cfg_syn_en; + } + + if (meson_vpu_is_compatible(priv, vpu_compatible_gxm)) { + priv->viu.osd1_blk0_cfg[0] |= osd_endianness_le; + priv->viu.osd1_ctrl_stat2 |= osd_dpath_mali_afbcd; + } + } else { + priv->viu.osd1_blk0_cfg[0] |= osd_endianness_le; + + if (meson_vpu_is_compatible(priv, vpu_compatible_gxm)) + priv->viu.osd1_ctrl_stat2 &= ~osd_dpath_mali_afbcd; + } + if (priv->viu.osd1_afbcd && + meson_vpu_is_compatible(priv, vpu_compatible_g12a)) { + priv->viu.osd1_blk0_cfg[0] |= osd_mali_src_en | + priv->afbcd.ops->fmt_to_blk_mode(fb->modifier, + fb->format->format); + } else { + switch (fb->format->format) { + case drm_format_xrgb8888: + case drm_format_argb8888: + priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_32 | + osd_color_matrix_32_argb; + break; + case drm_format_xbgr8888: + case drm_format_abgr8888: + priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_32 | + osd_color_matrix_32_abgr; + break; + case drm_format_rgb888: + priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_24 | + osd_color_matrix_24_rgb; + break; + case drm_format_rgb565: + priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_16 | + osd_color_matrix_16_rgb565; + break; + }; + } + - /* for xrgb, replace the pixel's alpha by 0xff */ - writel_bits_relaxed(osd_replace_en, osd_replace_en, - priv->io_base + _reg(viu_osd1_ctrl_stat2)); - priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_32 | - osd_color_matrix_32_argb; - break; - writel_bits_relaxed(osd_replace_en, osd_replace_en, - priv->io_base + _reg(viu_osd1_ctrl_stat2)); - priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_32 | - osd_color_matrix_32_abgr; + priv->viu.osd1_ctrl_stat2 |= osd_replace_en; - /* for argb, use the pixel's alpha */ - writel_bits_relaxed(osd_replace_en, 0, - priv->io_base + _reg(viu_osd1_ctrl_stat2)); - priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_32 | - osd_color_matrix_32_argb; - break; - writel_bits_relaxed(osd_replace_en, 0, - priv->io_base + _reg(viu_osd1_ctrl_stat2)); - priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_32 | - osd_color_matrix_32_abgr; - break; - case drm_format_rgb888: - priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_24 | - osd_color_matrix_24_rgb; - break; - case drm_format_rgb565: - priv->viu.osd1_blk0_cfg[0] |= osd_blk_mode_16 | - osd_color_matrix_16_rgb565; + priv->viu.osd1_ctrl_stat2 &= ~osd_replace_en; + if (priv->viu.osd1_afbcd) { + priv->afbcd.modifier = fb->modifier; + priv->afbcd.format = fb->format->format; + + /* calculate decoder write stride */ + if (meson_vpu_is_compatible(priv, vpu_compatible_g12a)) + priv->viu.osd1_blk2_cfg4 = + meson_g12a_afbcd_line_stride(priv); + } + + if (priv->afbcd.ops) { + priv->afbcd.ops->reset(priv); + priv->afbcd.ops->disable(priv); + } + +static bool meson_plane_format_mod_supported(struct drm_plane *plane, + u32 format, u64 modifier) +{ + struct meson_plane *meson_plane = to_meson_plane(plane); + struct meson_drm *priv = meson_plane->priv; + int i; + + if (modifier == drm_format_mod_invalid) + return false; + + if (modifier == drm_format_mod_linear) + return true; + + if (!meson_vpu_is_compatible(priv, vpu_compatible_gxm) && + !meson_vpu_is_compatible(priv, vpu_compatible_g12a)) + return false; + + if (modifier & ~drm_format_mod_arm_afbc(meson_mod_afbc_valid_bits)) + return false; + + for (i = 0 ; i < plane->modifier_count ; ++i) + if (plane->modifiers[i] == modifier) + break; + + if (i == plane->modifier_count) { + drm_debug_kms("unsupported modifier "); + return false; + } + + if (priv->afbcd.ops && priv->afbcd.ops->supported_fmt) + return priv->afbcd.ops->supported_fmt(modifier, format); + + drm_debug_kms("afbc unsupported "); + return false; +} + + .format_mod_supported = meson_plane_format_mod_supported, +static const uint64_t format_modifiers_afbc_gxm[] = { + drm_format_mod_arm_afbc(afbc_format_mod_block_size_16x16 | + afbc_format_mod_sparse | + afbc_format_mod_ytr), + /* split mandates sparse, rgb modes mandates ytr */ + drm_format_mod_arm_afbc(afbc_format_mod_block_size_16x16 | + afbc_format_mod_ytr | + afbc_format_mod_sparse | + afbc_format_mod_split), + drm_format_mod_linear, + drm_format_mod_invalid, +}; + +static const uint64_t format_modifiers_afbc_g12a[] = { + /* + * - tofix support afbc modifiers for yuv formats (16x16 + tiled) + * - split is mandatory for performances reasons when in 16x16 + * block size + * - 32x8 block size + split is mandatory with 4k frame size + * for performances reasons + */ + drm_format_mod_arm_afbc(afbc_format_mod_block_size_16x16 | + afbc_format_mod_sparse | + afbc_format_mod_split), + drm_format_mod_arm_afbc(afbc_format_mod_block_size_16x16 | + afbc_format_mod_ytr | + afbc_format_mod_sparse | + afbc_format_mod_split), + drm_format_mod_arm_afbc(afbc_format_mod_block_size_32x8 | + afbc_format_mod_sparse), + drm_format_mod_arm_afbc(afbc_format_mod_block_size_32x8 | + afbc_format_mod_ytr | + afbc_format_mod_sparse), + drm_format_mod_arm_afbc(afbc_format_mod_block_size_32x8 | + afbc_format_mod_sparse | + afbc_format_mod_split), + drm_format_mod_arm_afbc(afbc_format_mod_block_size_32x8 | + afbc_format_mod_ytr | + afbc_format_mod_sparse | + afbc_format_mod_split), + drm_format_mod_linear, + drm_format_mod_invalid, +}; + +static const uint64_t format_modifiers_default[] = { + drm_format_mod_linear, + drm_format_mod_invalid, +}; + + const uint64_t *format_modifiers = format_modifiers_default; + if (meson_vpu_is_compatible(priv, vpu_compatible_gxm)) + format_modifiers = format_modifiers_afbc_gxm; + else if (meson_vpu_is_compatible(priv, vpu_compatible_g12a)) + format_modifiers = format_modifiers_afbc_g12a; + - null, + format_modifiers,
|
Graphics
|
68e2f64ee4603aeab9c3bb907d19f5cd30d1c6ff
|
neil armstrong
|
drivers
|
gpu
|
drm, meson
|
drm/meson: viu: add afbc modules routing functions
|
the amlogic g12a afbc decoder pixel input need to be routed diferently than the amlogic gxm afbc decoder, this adds support for routing the viu osd1 pixel source to the afbc "mali unpack" module.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
osd1 plane afbc commit
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['meson ']
|
['c', 'h']
| 2
| 85
| 0
|
--- diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c +#include <linux/bitfield.h> + +#include <drm/drm_fourcc.h> +#define osd1_mali_order_abgr \ + (field_prep(viu_osd1_mali_afbcd_a_reorder, \ + viu_osd1_mali_reorder_a) | \ + field_prep(viu_osd1_mali_afbcd_b_reorder, \ + viu_osd1_mali_reorder_b) | \ + field_prep(viu_osd1_mali_afbcd_g_reorder, \ + viu_osd1_mali_reorder_g) | \ + field_prep(viu_osd1_mali_afbcd_r_reorder, \ + viu_osd1_mali_reorder_r)) + +#define osd1_mali_order_argb \ + (field_prep(viu_osd1_mali_afbcd_a_reorder, \ + viu_osd1_mali_reorder_a) | \ + field_prep(viu_osd1_mali_afbcd_b_reorder, \ + viu_osd1_mali_reorder_r) | \ + field_prep(viu_osd1_mali_afbcd_g_reorder, \ + viu_osd1_mali_reorder_g) | \ + field_prep(viu_osd1_mali_afbcd_r_reorder, \ + viu_osd1_mali_reorder_b)) + +void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv) +{ + u32 afbc_order = osd1_mali_order_argb; + + /* enable mali afbc unpack */ + writel_bits_relaxed(viu_osd1_mali_unpack_en, + viu_osd1_mali_unpack_en, + priv->io_base + _reg(viu_osd1_mali_unpack_ctrl)); + + switch (priv->afbcd.format) { + case drm_format_xbgr8888: + case drm_format_abgr8888: + afbc_order = osd1_mali_order_abgr; + break; + } + + /* setup rgba reordering */ + writel_bits_relaxed(viu_osd1_mali_afbcd_a_reorder | + viu_osd1_mali_afbcd_b_reorder | + viu_osd1_mali_afbcd_g_reorder | + viu_osd1_mali_afbcd_r_reorder, + afbc_order, + priv->io_base + _reg(viu_osd1_mali_unpack_ctrl)); + + /* select afbcd path for osd1 */ + writel_bits_relaxed(osd_path_osd_axi_sel_osd1_afbcd, + osd_path_osd_axi_sel_osd1_afbcd, + priv->io_base + _reg(osd_path_misc_ctrl)); +} + +void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv) +{ + /* disable afbcd path for osd1 */ + writel_bits_relaxed(osd_path_osd_axi_sel_osd1_afbcd, 0, + priv->io_base + _reg(osd_path_misc_ctrl)); + + /* disable afbcd unpack */ + writel_bits_relaxed(viu_osd1_mali_unpack_en, 0, + priv->io_base + _reg(viu_osd1_mali_unpack_ctrl)); +} + +void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv) +{ + writel_bits_relaxed(mali_afbc_misc, field_prep(mali_afbc_misc, 0x90), + priv->io_base + _reg(viu_misc_ctrl1)); +} + +void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv) +{ + writel_bits_relaxed(mali_afbc_misc, field_prep(mali_afbc_misc, 0x00), + priv->io_base + _reg(viu_misc_ctrl1)); +} + + + meson_viu_g12a_disable_osd1_afbc(priv); + if (meson_vpu_is_compatible(priv, vpu_compatible_gxm)) + meson_viu_gxm_disable_osd1_afbc(priv); + diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h --- a/drivers/gpu/drm/meson/meson_viu.h +++ b/drivers/gpu/drm/meson/meson_viu.h +void meson_viu_g12a_enable_osd1_afbc(struct meson_drm *priv); +void meson_viu_g12a_disable_osd1_afbc(struct meson_drm *priv); +void meson_viu_gxm_enable_osd1_afbc(struct meson_drm *priv); +void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv);
|
Graphics
|
1b85270ff156d567bb4e8e235fa7069edd6c7b1f
|
neil armstrong
|
drivers
|
gpu
|
drm, meson
|
drm/meson: hold 32 lines after vsync to give time for afbc start
|
when using an afbc encoded frame, the afbc decoder must be reset, configured and enabled at each vsync irq.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
osd1 plane afbc commit
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['meson ']
|
['c']
| 1
| 1
| 1
|
--- diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c --- a/drivers/gpu/drm/meson/meson_viu.c +++ b/drivers/gpu/drm/meson/meson_viu.c - viu_osd_hold_fifo_lines(4) | + viu_osd_hold_fifo_lines(31) |
|
Graphics
|
24e0d4058eff7cdf66976c66be42ac89f94d1d16
|
neil armstrong
|
drivers
|
gpu
|
drm, meson
|
drm/meson: crtc: add osd1 plane afbc commit
|
finally, setup the viu registers and start the afbc decoder to support displaying afbc encoded buffers on amlogic gxm and g12a socs.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
osd1 plane afbc commit
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['meson ']
|
['c']
| 1
| 72
| 5
|
--- diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c --- a/drivers/gpu/drm/meson/meson_crtc.c +++ b/drivers/gpu/drm/meson/meson_crtc.c +#include "meson_rdma.h" +#include "meson_osd_afbcd.h" + void (*enable_osd1_afbc)(struct meson_drm *priv); + void (*disable_osd1_afbc)(struct meson_drm *priv); + bool vsync_forced; + bool vsync_disabled; + meson_crtc->vsync_disabled = false; - meson_venc_disable_vsync(priv); + if (!meson_crtc->vsync_forced) { + meson_crtc->vsync_disabled = true; + meson_venc_disable_vsync(priv); + } +static void meson_crtc_g12a_enable_osd1_afbc(struct meson_drm *priv) +{ + writel_relaxed(priv->viu.osd1_blk2_cfg4, + priv->io_base + _reg(viu_osd1_blk2_cfg_w4)); + + writel_bits_relaxed(osd_mem_linear_addr, osd_mem_linear_addr, + priv->io_base + _reg(viu_osd1_ctrl_stat)); + + writel_relaxed(priv->viu.osd1_blk1_cfg4, + priv->io_base + _reg(viu_osd1_blk1_cfg_w4)); + + meson_viu_g12a_enable_osd1_afbc(priv); + + writel_bits_relaxed(osd_mem_linear_addr, osd_mem_linear_addr, + priv->io_base + _reg(viu_osd1_ctrl_stat)); + + writel_bits_relaxed(osd_mali_src_en, osd_mali_src_en, + priv->io_base + _reg(viu_osd1_blk0_cfg_w0)); +} + + + if (priv->viu.osd1_afbcd) { + if (meson_crtc->enable_osd1_afbc) + meson_crtc->enable_osd1_afbc(priv); + } else { + if (meson_crtc->disable_osd1_afbc) + meson_crtc->disable_osd1_afbc(priv); + if (priv->afbcd.ops) { + priv->afbcd.ops->reset(priv); + priv->afbcd.ops->disable(priv); + } + meson_crtc->vsync_forced = false; + } + - meson_canvas_config(priv->canvas, priv->canvas_id_osd1, - priv->viu.osd1_addr, priv->viu.osd1_stride, - priv->viu.osd1_height, meson_canvas_wrap_none, - meson_canvas_blkmode_linear, 0); + if (!priv->viu.osd1_afbcd) + meson_canvas_config(priv->canvas, priv->canvas_id_osd1, + priv->viu.osd1_addr, + priv->viu.osd1_stride, + priv->viu.osd1_height, + meson_canvas_wrap_none, + meson_canvas_blkmode_linear, 0); + if (priv->viu.osd1_afbcd) { + priv->afbcd.ops->reset(priv); + priv->afbcd.ops->setup(priv); + priv->afbcd.ops->enable(priv); + meson_crtc->vsync_forced = true; + } + + if (meson_crtc->vsync_disabled) + return; + + meson_crtc->enable_osd1_afbc = + meson_crtc_g12a_enable_osd1_afbc; + meson_crtc->disable_osd1_afbc = + meson_viu_g12a_disable_osd1_afbc; + if (meson_vpu_is_compatible(priv, vpu_compatible_gxm)) { + meson_crtc->enable_osd1_afbc = + meson_viu_gxm_enable_osd1_afbc; + meson_crtc->disable_osd1_afbc = + meson_viu_gxm_disable_osd1_afbc; + }
|
Graphics
|
c96bcb635a5ed9bc072c3efcda70dfd24a771749
|
neil armstrong
|
drivers
|
gpu
|
drm, meson
|
drm/gma500: add page flip support on psb/cdv
|
legacy (non-atomic) page flip support is added to the driver by using the mode_set_base crtc function, that allows configuring a new framebuffer for display. since the function requires the primary plane's fb to be set already, this is done prior to calling the function in the page flip helper and reverted if the flip fails.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add pageflip support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['gma500 ']
|
['c', 'h']
| 6
| 72
| 3
|
--- diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c + .page_flip = gma_crtc_page_flip, diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c +int gma_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, + struct drm_modeset_acquire_ctx *ctx) +{ + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + struct drm_framebuffer *current_fb = crtc->primary->fb; + struct drm_framebuffer *old_fb = crtc->primary->old_fb; + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + struct drm_device *dev = crtc->dev; + unsigned long flags; + int ret; + + if (!crtc_funcs->mode_set_base) + return -einval; + + /* using mode_set_base requires the new fb to be set already. */ + crtc->primary->fb = fb; + + if (event) { + spin_lock_irqsave(&dev->event_lock, flags); + + warn_on(drm_crtc_vblank_get(crtc) != 0); + + gma_crtc->page_flip_event = event; + + /* call this locked if we want an event at vblank interrupt. */ + ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); + if (ret) { + gma_crtc->page_flip_event = null; + drm_crtc_vblank_put(crtc); + } + + spin_unlock_irqrestore(&dev->event_lock, flags); + } else { + ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); + } + + /* restore previous fb in case of failure. */ + if (ret) + crtc->primary->fb = current_fb; + + return ret; +} + diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h +#include <drm/drm_vblank.h> +extern int gma_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags, + struct drm_modeset_acquire_ctx *ctx); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c + .page_flip = gma_crtc_page_flip, diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h +#include <drm/drm_vblank.h> + + struct drm_pending_vblank_event *page_flip_event; diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c - if (pipe_stat_val & pipe_vblank_status) - drm_handle_vblank(dev, pipe); + if (pipe_stat_val & pipe_vblank_status || + (is_mfld(dev) && pipe_stat_val & pipe_te_status)) { + struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe); + struct gma_crtc *gma_crtc = to_gma_crtc(crtc); + unsigned long flags; - if (pipe_stat_val & pipe_te_status) + + spin_lock_irqsave(&dev->event_lock, flags); + if (gma_crtc->page_flip_event) { + drm_crtc_send_vblank_event(crtc, + gma_crtc->page_flip_event); + gma_crtc->page_flip_event = null; + drm_crtc_vblank_put(crtc); + } + spin_unlock_irqrestore(&dev->event_lock, flags); + }
|
Graphics
|
f76c22ce8fbbd03394eb9e2cd8c490d9ad2a116c
|
paul kocialkowski
|
drivers
|
gpu
|
drm, gma500
|
drm/komeda: enable new product d32 support
|
d32 is simple version of d71, the difference is: - only has one pipeline - drop the periph block and merge it to gcu
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enable new product d32 support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['komeda ']
|
['h', 'c']
| 5
| 42
| 16
|
- only has one pipeline - drop the periph block and merge it to gcu --- diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h --- a/drivers/gpu/drm/arm/display/include/malidp_product.h +++ b/drivers/gpu/drm/arm/display/include/malidp_product.h -#define malidp_d71_product_id 0x0071 +#define malidp_d71_product_id 0x0071 +#define malidp_d32_product_id 0x0032 diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c - ctrlr->supports_dual_link = true; + ctrlr->supports_dual_link = d71->supports_dual_link; diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c - /* probe periph */ + /* only the legacy hw has the periph block, the newer merges the periph + * into gcu + */ - if (block_info_blk_type(value) != d71_blk_type_periph) { - drm_error("access blk periph but got blk: %d. ", - block_info_blk_type(value)); - err = -einval; - goto err_cleanup; + if (block_info_blk_type(value) != d71_blk_type_periph) + d71->periph_addr = null; + + if (d71->periph_addr) { + /* probe peripheral in legacy hw */ + value = malidp_read32(d71->periph_addr, periph_configuration_id); + + d71->max_line_size = value & periph_max_line_size ? 4096 : 2048; + d71->max_vsize = 4096; + d71->num_rich_layers = value & periph_num_rich_layers ? 2 : 1; + d71->supports_dual_link = !!(value & periph_split_en); + d71->integrates_tbu = !!(value & periph_tbu_en); + } else { + value = malidp_read32(d71->gcu_addr, gcu_configuration_id0); + d71->max_line_size = gcu_max_line_size(value); + d71->max_vsize = gcu_max_num_lines(value); + + value = malidp_read32(d71->gcu_addr, gcu_configuration_id1); + d71->num_rich_layers = gcu_num_rich_layers(value); + d71->supports_dual_link = gcu_display_split_en(value); + d71->integrates_tbu = gcu_display_tbu_en(value); - value = malidp_read32(d71->periph_addr, periph_configuration_id); - - d71->max_line_size = value & periph_max_line_size ? 4096 : 2048; - d71->max_vsize = 4096; - d71->num_rich_layers = value & periph_num_rich_layers ? 2 : 1; - d71->supports_dual_link = value & periph_split_en ? true : false; - d71->integrates_tbu = value & periph_tbu_en ? true : false; - + case malidp_d32_product_id: diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h +/* gcu_configuration registers */ +#define gcu_configuration_id0 0x100 +#define gcu_configuration_id1 0x104 + +/* gcu configuration */ +#define gcu_max_line_size(x) ((x) & 0xffff) +#define gcu_max_num_lines(x) ((x) >> 16) +#define gcu_num_rich_layers(x) ((x) & 0x7) +#define gcu_num_pipelines(x) (((x) >> 3) & 0x7) +#define gcu_num_scalers(x) (((x) >> 6) & 0x7) +#define gcu_display_split_en(x) (((x) >> 16) & 0x1) +#define gcu_display_tbu_en(x) (((x) >> 17) & 0x1) + diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c + { .compatible = "arm,mali-d32", .data = d71_identify, },
|
Graphics
|
17cfcb68af3bc7d5e8ae08779b1853310a2949f3
|
james qian wang arm technology china
|
drivers
|
gpu
|
arm, d71, display, drm, include, komeda
|
drm/komeda: add debugfs node to control error verbosity
|
named 'err_verbosity', currently with only 1 active bit in that replicates the existing level - print error events once per flip.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add debugfs node to control error verbosity
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['komeda ']
|
['c', 'h']
| 4
| 24
| 5
|
--- diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c + debugfs_create_x16("err_verbosity", 0664, mdev->debugfs_root, + &mdev->err_verbosity); + mdev->err_verbosity = komeda_dev_print_err_events; + diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h + /** + * @err_verbosity: bitmask for how much extra info to print on error + * + * see komeda_dev_* macros for details. + */ + u16 err_verbosity; + /* print a single line per error per frame with error events. */ +#define komeda_dev_print_err_events bit(0) -void komeda_print_events(struct komeda_events *evts); +void komeda_print_events(struct komeda_events *evts, struct drm_device *dev); -static inline void komeda_print_events(struct komeda_events *evts) {} +static inline void komeda_print_events(struct komeda_events *evts, + struct drm_device *dev) +{} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c --- a/drivers/gpu/drm/arm/display/komeda/komeda_event.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c -void komeda_print_events(struct komeda_events *evts) +void komeda_print_events(struct komeda_events *evts, struct drm_device *dev) - u64 print_evts = komeda_err_events; + u64 print_evts = 0; + struct komeda_dev *mdev = dev->dev_private; + u16 const err_verbosity = mdev->err_verbosity; + if (err_verbosity & komeda_dev_print_err_events) + print_evts |= komeda_err_events; + diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c - komeda_print_events(&evts); + komeda_print_events(&evts, drm);
|
Graphics
|
8894cd5824e500c530db5f6f399d22edbcddea73
|
mihail atanassov liviu dudau liviu dudau arm com james qian wang arm technology china james qian wang arm com
|
drivers
|
gpu
|
arm, display, drm, komeda
|
drm/komeda: adds gamma and color-transform support for dou-ips
|
adds gamma and color-transform support for dou-ips. adds two caps members fgamma_coeffs and ctm_coeffs to komeda_improc_state. if color management changed, set gamma and color-transform accordingly.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
adds gamma and color-transform support for dou-ips
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['komeda ']
|
['c', 'h']
| 4
| 31
| 0
|
--- diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c + struct drm_crtc_state *crtc_st = state->crtc->state; + struct d71_pipeline *pipe = to_d71_pipeline(c->pipeline); + if (crtc_st->color_mgmt_changed) { + mask |= ips_ctrl_ft | ips_ctrl_rgb; + + if (crtc_st->gamma_lut) { + malidp_write_group(pipe->dou_ft_coeff_addr, ft_coeff0, + komeda_n_gamma_coeffs, + st->fgamma_coeffs); + ctrl |= ips_ctrl_ft; /* enable gamma */ + } + + if (crtc_st->ctm) { + malidp_write_group(reg, ips_rgb_rgb_coeff0, + komeda_n_ctm_coeffs, + st->ctm_coeffs); + ctrl |= ips_ctrl_rgb; /* enable gamut */ + } + } + diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c + drm_crtc_enable_color_mgmt(crtc, 0, true, komeda_color_lut_size); + diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h +#include "komeda_color_mgmt.h" + u32 fgamma_coeffs[komeda_n_gamma_coeffs]; + u32 ctm_coeffs[komeda_n_ctm_coeffs]; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c + if (kcrtc_st->base.color_mgmt_changed) { + drm_lut_to_fgamma_coeffs(kcrtc_st->base.gamma_lut, + st->fgamma_coeffs); + drm_ctm_to_coeffs(kcrtc_st->base.ctm, st->ctm_coeffs); + } +
|
Graphics
|
db9cd76d099a0e4337d30e22c89fe019332bcec1
|
lowry li arm technology china
|
drivers
|
gpu
|
arm, d71, display, drm, komeda
|
drm/tilcdc: remove obsolete bundled tilcdc tfp410 driver
|
remove obsolete bundled tfp410 driver with its "ti,tilcdc,tfp410" devicetree binding. no platform has ever used this driver in the mainline kernel and if anybody connects tfp410 to tilcdc he or she should use the generic drm tfp410 bridge driver.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
remove obsolete bundled tilcdc tfp410 driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['tilcdc ']
|
['c', 'makefile', 'h', 'txt']
| 5
| 0
| 421
|
--- diff --git a/documentation/devicetree/bindings/display/tilcdc/tfp410.txt b/documentation/devicetree/bindings/display/tilcdc/tfp410.txt --- a/documentation/devicetree/bindings/display/tilcdc/tfp410.txt +++ /dev/null -device-tree bindings for tilcdc drm tfp410 output driver - -required properties: - - compatible: value should be "ti,tilcdc,tfp410". - - i2c: the phandle for the i2c device to use for ddc - -recommended properties: - - pinctrl-names, pinctrl-0: the pincontrol settings to configure - muxing properly for pins that connect to tfp410 device - - powerdn-gpio: the powerdown gpio, pulled low to power down the - tfp410 device (for dpms_off) - -example: - - dvicape { - compatible = "ti,tilcdc,tfp410"; - i2c = <&i2c2>; - pinctrl-names = "default"; - pinctrl-0 = <&bone_dvi_cape_dvi_00a1_pins>; - powerdn-gpio = <&gpio2 31 0>; - }; diff --git a/drivers/gpu/drm/tilcdc/makefile b/drivers/gpu/drm/tilcdc/makefile --- a/drivers/gpu/drm/tilcdc/makefile +++ b/drivers/gpu/drm/tilcdc/makefile - tilcdc_tfp410.o \ diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c -#include "tilcdc_tfp410.h" - tilcdc_tfp410_init(); - tilcdc_tfp410_fini(); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-only -/* - * copyright (c) 2012 texas instruments - * author: rob clark <robdclark@gmail.com> - */ - -#include <linux/gpio.h> -#include <linux/mod_devicetable.h> -#include <linux/of_gpio.h> -#include <linux/pinctrl/consumer.h> -#include <linux/platform_device.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_encoder.h> -#include <drm/drm_modeset_helper_vtables.h> -#include <drm/drm_probe_helper.h> - -#include "tilcdc_drv.h" -#include "tilcdc_tfp410.h" - -struct tfp410_module { - struct tilcdc_module base; - struct i2c_adapter *i2c; - int gpio; -}; -#define to_tfp410_module(x) container_of(x, struct tfp410_module, base) - - -static const struct tilcdc_panel_info dvi_info = { - .ac_bias = 255, - .ac_bias_intrpt = 0, - .dma_burst_sz = 16, - .bpp = 16, - .fdd = 0x80, - .tft_alt_mode = 0, - .sync_edge = 0, - .sync_ctrl = 1, - .raster_order = 0, -}; - -/* - * encoder: - */ - -struct tfp410_encoder { - struct drm_encoder base; - struct tfp410_module *mod; - int dpms; -}; -#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base) - -static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode) -{ - struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder); - - if (tfp410_encoder->dpms == mode) - return; - - if (mode == drm_mode_dpms_on) { - dbg("power on"); - gpio_direction_output(tfp410_encoder->mod->gpio, 1); - } else { - dbg("power off"); - gpio_direction_output(tfp410_encoder->mod->gpio, 0); - } - - tfp410_encoder->dpms = mode; -} - -static void tfp410_encoder_prepare(struct drm_encoder *encoder) -{ - tfp410_encoder_dpms(encoder, drm_mode_dpms_off); -} - -static void tfp410_encoder_commit(struct drm_encoder *encoder) -{ - tfp410_encoder_dpms(encoder, drm_mode_dpms_on); -} - -static void tfp410_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* nothing needed */ -} - -static const struct drm_encoder_funcs tfp410_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = { - .dpms = tfp410_encoder_dpms, - .prepare = tfp410_encoder_prepare, - .commit = tfp410_encoder_commit, - .mode_set = tfp410_encoder_mode_set, -}; - -static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev, - struct tfp410_module *mod) -{ - struct tfp410_encoder *tfp410_encoder; - struct drm_encoder *encoder; - int ret; - - tfp410_encoder = devm_kzalloc(dev->dev, sizeof(*tfp410_encoder), - gfp_kernel); - if (!tfp410_encoder) - return null; - - tfp410_encoder->dpms = drm_mode_dpms_off; - tfp410_encoder->mod = mod; - - encoder = &tfp410_encoder->base; - encoder->possible_crtcs = 1; - - ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs, - drm_mode_encoder_tmds, null); - if (ret < 0) - goto fail; - - drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs); - - return encoder; - -fail: - drm_encoder_cleanup(encoder); - return null; -} - -/* - * connector: - */ - -struct tfp410_connector { - struct drm_connector base; - - struct drm_encoder *encoder; /* our connected encoder */ - struct tfp410_module *mod; -}; -#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base) - - -static void tfp410_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - -static enum drm_connector_status tfp410_connector_detect( - struct drm_connector *connector, - bool force) -{ - struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); - - if (drm_probe_ddc(tfp410_connector->mod->i2c)) - return connector_status_connected; - - return connector_status_unknown; -} - -static int tfp410_connector_get_modes(struct drm_connector *connector) -{ - struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); - struct edid *edid; - int ret = 0; - - edid = drm_get_edid(connector, tfp410_connector->mod->i2c); - - drm_connector_update_edid_property(connector, edid); - - if (edid) { - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - } - - return ret; -} - -static struct drm_encoder *tfp410_connector_best_encoder( - struct drm_connector *connector) -{ - struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector); - return tfp410_connector->encoder; -} - -static const struct drm_connector_funcs tfp410_connector_funcs = { - .destroy = tfp410_connector_destroy, - .detect = tfp410_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = { - .get_modes = tfp410_connector_get_modes, - .best_encoder = tfp410_connector_best_encoder, -}; - -static struct drm_connector *tfp410_connector_create(struct drm_device *dev, - struct tfp410_module *mod, struct drm_encoder *encoder) -{ - struct tfp410_connector *tfp410_connector; - struct drm_connector *connector; - int ret; - - tfp410_connector = devm_kzalloc(dev->dev, sizeof(*tfp410_connector), - gfp_kernel); - if (!tfp410_connector) - return null; - - tfp410_connector->encoder = encoder; - tfp410_connector->mod = mod; - - connector = &tfp410_connector->base; - - drm_connector_init_with_ddc(dev, connector, - &tfp410_connector_funcs, - drm_mode_connector_dvid, - mod->i2c); - drm_connector_helper_add(connector, &tfp410_connector_helper_funcs); - - connector->polled = drm_connector_poll_connect | - drm_connector_poll_disconnect; - - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) - goto fail; - - return connector; - -fail: - tfp410_connector_destroy(connector); - return null; -} - -/* - * module: - */ - -static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev) -{ - struct tfp410_module *tfp410_mod = to_tfp410_module(mod); - struct tilcdc_drm_private *priv = dev->dev_private; - struct drm_encoder *encoder; - struct drm_connector *connector; - - encoder = tfp410_encoder_create(dev, tfp410_mod); - if (!encoder) - return -enomem; - - connector = tfp410_connector_create(dev, tfp410_mod, encoder); - if (!connector) - return -enomem; - - priv->encoders[priv->num_encoders++] = encoder; - priv->connectors[priv->num_connectors++] = connector; - - tilcdc_crtc_set_panel_info(priv->crtc, &dvi_info); - return 0; -} - -static const struct tilcdc_module_ops tfp410_module_ops = { - .modeset_init = tfp410_modeset_init, -}; - -/* - * device: - */ - -static int tfp410_probe(struct platform_device *pdev) -{ - struct device_node *node = pdev->dev.of_node; - struct device_node *i2c_node; - struct tfp410_module *tfp410_mod; - struct tilcdc_module *mod; - struct pinctrl *pinctrl; - uint32_t i2c_phandle; - int ret = -einval; - - /* bail out early if no dt data: */ - if (!node) { - dev_err(&pdev->dev, "device-tree data is missing "); - return -enxio; - } - - tfp410_mod = devm_kzalloc(&pdev->dev, sizeof(*tfp410_mod), gfp_kernel); - if (!tfp410_mod) - return -enomem; - - mod = &tfp410_mod->base; - pdev->dev.platform_data = mod; - - tilcdc_module_init(mod, "tfp410", &tfp410_module_ops); - - pinctrl = devm_pinctrl_get_select_default(&pdev->dev); - if (is_err(pinctrl)) - dev_warn(&pdev->dev, "pins are not configured "); - - if (of_property_read_u32(node, "i2c", &i2c_phandle)) { - dev_err(&pdev->dev, "could not get i2c bus phandle "); - goto fail; - } - - i2c_node = of_find_node_by_phandle(i2c_phandle); - if (!i2c_node) { - dev_err(&pdev->dev, "could not get i2c bus node "); - goto fail; - } - - tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node); - if (!tfp410_mod->i2c) { - dev_err(&pdev->dev, "could not get i2c "); - of_node_put(i2c_node); - goto fail; - } - - of_node_put(i2c_node); - - tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio", - 0, null); - if (tfp410_mod->gpio < 0) { - dev_warn(&pdev->dev, "no power down gpio "); - } else { - ret = gpio_request(tfp410_mod->gpio, "dvi_pdn"); - if (ret) { - dev_err(&pdev->dev, "could not get dvi_pdn gpio "); - goto fail_adapter; - } - } - - return 0; - -fail_adapter: - i2c_put_adapter(tfp410_mod->i2c); - -fail: - tilcdc_module_cleanup(mod); - return ret; -} - -static int tfp410_remove(struct platform_device *pdev) -{ - struct tilcdc_module *mod = dev_get_platdata(&pdev->dev); - struct tfp410_module *tfp410_mod = to_tfp410_module(mod); - - i2c_put_adapter(tfp410_mod->i2c); - gpio_free(tfp410_mod->gpio); - - tilcdc_module_cleanup(mod); - - return 0; -} - -static const struct of_device_id tfp410_of_match[] = { - { .compatible = "ti,tilcdc,tfp410", }, - { }, -}; - -struct platform_driver tfp410_driver = { - .probe = tfp410_probe, - .remove = tfp410_remove, - .driver = { - .owner = this_module, - .name = "tfp410", - .of_match_table = tfp410_of_match, - }, -}; - -int __init tilcdc_tfp410_init(void) -{ - return platform_driver_register(&tfp410_driver); -} - -void __exit tilcdc_tfp410_fini(void) -{ - platform_driver_unregister(&tfp410_driver); -} diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0-only */ -/* - * copyright (c) 2012 texas instruments - * author: rob clark <robdclark@gmail.com> - */ - -#ifndef __tilcdc_tfp410_h__ -#define __tilcdc_tfp410_h__ - -/* sub-module for tfp410 dvi adaptor */ - -int tilcdc_tfp410_init(void); -void tilcdc_tfp410_fini(void); - -#endif /* __tilcdc_tfp410_h__ */
|
Graphics
|
2156873f08c7893811f34177aa923ab1ea486591
|
jyri sarha laurent pinchart laurent pinchart ideasonboard com
|
documentation
|
devicetree
|
bindings, display, drm, tilcdc
|
drm: arc: pgu: add argb8888 format to supported format list
|
as we ignore first 8 bit of 32 bit pixel value we can add argb8888 format as alias of xrgb8888.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add argb8888 format to supported format list
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['arc ', 'pgu']
|
['c']
| 1
| 1
| 0
|
--- diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c + drm_format_argb8888,
|
Graphics
|
0ff916e2ef6fb742e4906aac26c470314b59bae8
|
eugeniy paltsev
|
drivers
|
gpu
|
arc, drm
|
drm/edid: add cta-861-g modes with vic >= 193
|
add a second table to the cea modes with vic >= 193.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add cta-861-g modes with vic >= 193
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['edid ']
|
['c']
| 1
| 149
| 2
|
--- diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c +/* + * from cea/cta-861 spec. + * + * do not access directly, instead always use cea_mode_for_vic(). + */ +static const struct drm_display_mode edid_cea_modes_193[] = { + /* 193 - 5120x2160@120hz 64:27 */ + { drm_mode("5120x2160", drm_mode_type_driver, 1485000, 5120, 5284, + 5372, 5500, 0, 2160, 2168, 2178, 2250, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 120, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 194 - 7680x4320@24hz 16:9 */ + { drm_mode("7680x4320", drm_mode_type_driver, 1188000, 7680, 10232, + 10408, 11000, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 24, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, + /* 195 - 7680x4320@25hz 16:9 */ + { drm_mode("7680x4320", drm_mode_type_driver, 1188000, 7680, 10032, + 10208, 10800, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 25, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, + /* 196 - 7680x4320@30hz 16:9 */ + { drm_mode("7680x4320", drm_mode_type_driver, 1188000, 7680, 8232, + 8408, 9000, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 30, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, + /* 197 - 7680x4320@48hz 16:9 */ + { drm_mode("7680x4320", drm_mode_type_driver, 2376000, 7680, 10232, + 10408, 11000, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 48, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, + /* 198 - 7680x4320@50hz 16:9 */ + { drm_mode("7680x4320", drm_mode_type_driver, 2376000, 7680, 10032, + 10208, 10800, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 50, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, + /* 199 - 7680x4320@60hz 16:9 */ + { drm_mode("7680x4320", drm_mode_type_driver, 2376000, 7680, 8232, + 8408, 9000, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 60, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, + /* 200 - 7680x4320@100hz 16:9 */ + { drm_mode("7680x4320", drm_mode_type_driver, 4752000, 7680, 9792, + 9968, 10560, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 100, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, + /* 201 - 7680x4320@120hz 16:9 */ + { drm_mode("7680x4320", drm_mode_type_driver, 4752000, 7680, 8032, + 8208, 8800, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 120, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, + /* 202 - 7680x4320@24hz 64:27 */ + { drm_mode("7680x4320", drm_mode_type_driver, 1188000, 7680, 10232, + 10408, 11000, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 24, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 203 - 7680x4320@25hz 64:27 */ + { drm_mode("7680x4320", drm_mode_type_driver, 1188000, 7680, 10032, + 10208, 10800, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 25, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 204 - 7680x4320@30hz 64:27 */ + { drm_mode("7680x4320", drm_mode_type_driver, 1188000, 7680, 8232, + 8408, 9000, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 30, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 205 - 7680x4320@48hz 64:27 */ + { drm_mode("7680x4320", drm_mode_type_driver, 2376000, 7680, 10232, + 10408, 11000, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 48, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 206 - 7680x4320@50hz 64:27 */ + { drm_mode("7680x4320", drm_mode_type_driver, 2376000, 7680, 10032, + 10208, 10800, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 50, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 207 - 7680x4320@60hz 64:27 */ + { drm_mode("7680x4320", drm_mode_type_driver, 2376000, 7680, 8232, + 8408, 9000, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 60, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 208 - 7680x4320@100hz 64:27 */ + { drm_mode("7680x4320", drm_mode_type_driver, 4752000, 7680, 9792, + 9968, 10560, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 100, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 209 - 7680x4320@120hz 64:27 */ + { drm_mode("7680x4320", drm_mode_type_driver, 4752000, 7680, 8032, + 8208, 8800, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 120, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 210 - 10240x4320@24hz 64:27 */ + { drm_mode("10240x4320", drm_mode_type_driver, 1485000, 10240, 11732, + 11908, 12500, 0, 4320, 4336, 4356, 4950, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 24, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 211 - 10240x4320@25hz 64:27 */ + { drm_mode("10240x4320", drm_mode_type_driver, 1485000, 10240, 12732, + 12908, 13500, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 25, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 212 - 10240x4320@30hz 64:27 */ + { drm_mode("10240x4320", drm_mode_type_driver, 1485000, 10240, 10528, + 10704, 11000, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 30, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 213 - 10240x4320@48hz 64:27 */ + { drm_mode("10240x4320", drm_mode_type_driver, 2970000, 10240, 11732, + 11908, 12500, 0, 4320, 4336, 4356, 4950, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 48, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 214 - 10240x4320@50hz 64:27 */ + { drm_mode("10240x4320", drm_mode_type_driver, 2970000, 10240, 12732, + 12908, 13500, 0, 4320, 4336, 4356, 4400, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 50, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 215 - 10240x4320@60hz 64:27 */ + { drm_mode("10240x4320", drm_mode_type_driver, 2970000, 10240, 10528, + 10704, 11000, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 60, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 216 - 10240x4320@100hz 64:27 */ + { drm_mode("10240x4320", drm_mode_type_driver, 5940000, 10240, 12432, + 12608, 13200, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 100, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 217 - 10240x4320@120hz 64:27 */ + { drm_mode("10240x4320", drm_mode_type_driver, 5940000, 10240, 10528, + 10704, 11000, 0, 4320, 4336, 4356, 4500, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 120, .picture_aspect_ratio = hdmi_picture_aspect_64_27, }, + /* 218 - 4096x2160@100hz 256:135 */ + { drm_mode("4096x2160", drm_mode_type_driver, 1188000, 4096, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 100, .picture_aspect_ratio = hdmi_picture_aspect_256_135, }, + /* 219 - 4096x2160@120hz 256:135 */ + { drm_mode("4096x2160", drm_mode_type_driver, 1188000, 4096, 4184, + 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, + drm_mode_flag_phsync | drm_mode_flag_pvsync), + .vrefresh = 120, .picture_aspect_ratio = hdmi_picture_aspect_256_135, }, +}; + + if (vic >= 193 && vic < 193 + array_size(edid_cea_modes_193)) + return &edid_cea_modes_193[vic - 193]; - return array_size(edid_cea_modes_0); + return 193 + array_size(edid_cea_modes_193); - return vic + 1; + if (++vic == array_size(edid_cea_modes_0)) + vic = 193; + return vic;
|
Graphics
|
f7655d42fceefea7e4c2d07eb04381d086b7090e
|
ville syrj l
|
drivers
|
gpu
|
drm
|
drm/edid: add aspect ratios to hdmi 4k modes
|
[why] hdmi 2.0 adds aspect ratio attribute to distinguish different 4k modes. according to appendix e of hdmi 2.0 spec, source should use vsif to indicate video mode only when the mode is one defined in hdmi 1.4b 4k modes. otherwise, use avi infoframes to convey vic.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add aspect ratios to hdmi 4k modes
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['edid ']
|
['c']
| 1
| 35
| 10
|
--- diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c - .vrefresh = 30, }, + .vrefresh = 30, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, - .vrefresh = 25, }, + .vrefresh = 25, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, - .vrefresh = 24, }, + .vrefresh = 24, .picture_aspect_ratio = hdmi_picture_aspect_16_9, }, - .vrefresh = 24, }, + .vrefresh = 24, .picture_aspect_ratio = hdmi_picture_aspect_256_135, }, +static enum hdmi_picture_aspect drm_get_hdmi_aspect_ratio(const u8 video_code) +{ + return edid_4k_modes[video_code].picture_aspect_ratio; +} + + if (to_match->picture_aspect_ratio) + match_flags |= drm_mode_match_aspect_ratio; + + if (to_match->picture_aspect_ratio) + match_flags |= drm_mode_match_aspect_ratio; + + u8 vic, hdmi_vic; - frame->video_code = drm_mode_cea_vic(connector, mode); + vic = drm_mode_cea_vic(connector, mode); + hdmi_vic = drm_mode_hdmi_vic(connector, mode); - * user input (if specified) or from the cea mode list. + * user input (if specified) or from the cea/hdmi mode lists. - if (picture_aspect == hdmi_picture_aspect_none) - picture_aspect = drm_get_cea_aspect_ratio(frame->video_code); + if (picture_aspect == hdmi_picture_aspect_none) { + if (vic) + picture_aspect = drm_get_cea_aspect_ratio(vic); + else if (hdmi_vic) + picture_aspect = drm_get_hdmi_aspect_ratio(hdmi_vic); + } - if (picture_aspect != - drm_get_cea_aspect_ratio(frame->video_code)) + if (vic) { + if (picture_aspect != drm_get_cea_aspect_ratio(vic)) + return -einval; + } else if (hdmi_vic) { + if (picture_aspect != drm_get_hdmi_aspect_ratio(hdmi_vic)) + return -einval; + } else { + } + + frame->video_code = vic;
|
Graphics
|
d2b434730f301a7ad74473eb66422f0008186306
|
wayne lin
|
drivers
|
gpu
|
drm
|
drm/edid: add alternate clock for smpte 4k
|
[why] in hdmi_mode_alternate_clock(), it adds an exception for vic 4 mode (4096x2160@24) due to there is no alternate clock defined for that mode in hdmi1.4b. but hdmi2.0 adds 23.98hz for that mode.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add aspect ratios to hdmi 4k modes
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['edid ']
|
['c']
| 1
| 0
| 7
|
--- diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c - * - * it's almost like cea_mode_alternate_clock(), we just need to add an - * exception for the vic 4 mode (4096x2160@24hz): no alternate clock for this - * one. - if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160) - return hdmi_mode->clock; -
|
Graphics
|
304a94a2e6debadd55c4e73cbec432dd57832856
|
wayne lin
|
drivers
|
gpu
|
drm
|
drm/bridge: repurpose lvds-encoder.c
|
lvds-encoder.c implementation is also suitable for lvds decoders, not just lvds encoders. instead of creating a new driver for addressing support for transparent lvds decoders, repurpose lvds-encoder.c for the greater good with this patch.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
lvds decoder support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
[]
|
['c', 'kconfig', 'makefile']
| 4
| 159
| 160
|
--- diff --git a/drivers/gpu/drm/bridge/kconfig b/drivers/gpu/drm/bridge/kconfig --- a/drivers/gpu/drm/bridge/kconfig +++ b/drivers/gpu/drm/bridge/kconfig -config drm_lvds_encoder - tristate "transparent parallel to lvds encoder support" +config drm_lvds_codec + tristate "transparent lvds encoders and decoders support" - support for transparent parallel to lvds encoders that don't require - any configuration. + support for transparent lvds encoders and decoders that don't + require any configuration. diff --git a/drivers/gpu/drm/bridge/makefile b/drivers/gpu/drm/bridge/makefile --- a/drivers/gpu/drm/bridge/makefile +++ b/drivers/gpu/drm/bridge/makefile -obj-$(config_drm_lvds_encoder) += lvds-encoder.o +obj-$(config_drm_lvds_codec) += lvds-codec.o diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c --- /dev/null +++ b/drivers/gpu/drm/bridge/lvds-codec.c +// spdx-license-identifier: gpl-2.0-or-later +/* + * copyright (c) 2019 renesas electronics corporation + * copyright (c) 2016 laurent pinchart <laurent.pinchart@ideasonboard.com> + */ + +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/platform_device.h> + +#include <drm/drm_bridge.h> +#include <drm/drm_panel.h> + +struct lvds_codec { + struct drm_bridge bridge; + struct drm_bridge *panel_bridge; + struct gpio_desc *powerdown_gpio; +}; + +static int lvds_codec_attach(struct drm_bridge *bridge) +{ + struct lvds_codec *lvds_codec = container_of(bridge, + struct lvds_codec, bridge); + + return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge, + bridge); +} + +static void lvds_codec_enable(struct drm_bridge *bridge) +{ + struct lvds_codec *lvds_codec = container_of(bridge, + struct lvds_codec, bridge); + + if (lvds_codec->powerdown_gpio) + gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0); +} + +static void lvds_codec_disable(struct drm_bridge *bridge) +{ + struct lvds_codec *lvds_codec = container_of(bridge, + struct lvds_codec, bridge); + + if (lvds_codec->powerdown_gpio) + gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1); +} + +static struct drm_bridge_funcs funcs = { + .attach = lvds_codec_attach, + .enable = lvds_codec_enable, + .disable = lvds_codec_disable, +}; + +static int lvds_codec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *port; + struct device_node *endpoint; + struct device_node *panel_node; + struct drm_panel *panel; + struct lvds_codec *lvds_codec; + + lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), gfp_kernel); + if (!lvds_codec) + return -enomem; + + lvds_codec->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", + gpiod_out_high); + if (is_err(lvds_codec->powerdown_gpio)) { + int err = ptr_err(lvds_codec->powerdown_gpio); + + if (err != -eprobe_defer) + dev_err(dev, "powerdown gpio failure: %d ", err); + return err; + } + + /* locate the panel dt node. */ + port = of_graph_get_port_by_id(dev->of_node, 1); + if (!port) { + dev_dbg(dev, "port 1 not found "); + return -enxio; + } + + endpoint = of_get_child_by_name(port, "endpoint"); + of_node_put(port); + if (!endpoint) { + dev_dbg(dev, "no endpoint for port 1 "); + return -enxio; + } + + panel_node = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); + if (!panel_node) { + dev_dbg(dev, "no remote endpoint for port 1 "); + return -enxio; + } + + panel = of_drm_find_panel(panel_node); + of_node_put(panel_node); + if (is_err(panel)) { + dev_dbg(dev, "panel not found, deferring probe "); + return ptr_err(panel); + } + + lvds_codec->panel_bridge = + devm_drm_panel_bridge_add_typed(dev, panel, + drm_mode_connector_lvds); + if (is_err(lvds_codec->panel_bridge)) + return ptr_err(lvds_codec->panel_bridge); + + /* + * the panel_bridge bridge is attached to the panel's of_node, + * but we need a bridge attached to our of_node for our user + * to look up. + */ + lvds_codec->bridge.of_node = dev->of_node; + lvds_codec->bridge.funcs = &funcs; + drm_bridge_add(&lvds_codec->bridge); + + platform_set_drvdata(pdev, lvds_codec); + + return 0; +} + +static int lvds_codec_remove(struct platform_device *pdev) +{ + struct lvds_codec *lvds_codec = platform_get_drvdata(pdev); + + drm_bridge_remove(&lvds_codec->bridge); + + return 0; +} + +static const struct of_device_id lvds_codec_match[] = { + { .compatible = "lvds-encoder" }, + { .compatible = "thine,thc63lvdm83d" }, + {}, +}; +module_device_table(of, lvds_codec_match); + +static struct platform_driver lvds_codec_driver = { + .probe = lvds_codec_probe, + .remove = lvds_codec_remove, + .driver = { + .name = "lvds-codec", + .of_match_table = lvds_codec_match, + }, +}; +module_platform_driver(lvds_codec_driver); + +module_author("laurent pinchart <laurent.pinchart@ideasonboard.com>"); +module_description("lvds encoders and decoders"); +module_license("gpl"); diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c --- a/drivers/gpu/drm/bridge/lvds-encoder.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * copyright (c) 2016 laurent pinchart <laurent.pinchart@ideasonboard.com> - */ - -#include <linux/gpio/consumer.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_graph.h> -#include <linux/platform_device.h> - -#include <drm/drm_bridge.h> -#include <drm/drm_panel.h> - -struct lvds_encoder { - struct drm_bridge bridge; - struct drm_bridge *panel_bridge; - struct gpio_desc *powerdown_gpio; -}; - -static int lvds_encoder_attach(struct drm_bridge *bridge) -{ - struct lvds_encoder *lvds_encoder = container_of(bridge, - struct lvds_encoder, - bridge); - - return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge, - bridge); -} - -static void lvds_encoder_enable(struct drm_bridge *bridge) -{ - struct lvds_encoder *lvds_encoder = container_of(bridge, - struct lvds_encoder, - bridge); - - if (lvds_encoder->powerdown_gpio) - gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 0); -} - -static void lvds_encoder_disable(struct drm_bridge *bridge) -{ - struct lvds_encoder *lvds_encoder = container_of(bridge, - struct lvds_encoder, - bridge); - - if (lvds_encoder->powerdown_gpio) - gpiod_set_value_cansleep(lvds_encoder->powerdown_gpio, 1); -} - -static struct drm_bridge_funcs funcs = { - .attach = lvds_encoder_attach, - .enable = lvds_encoder_enable, - .disable = lvds_encoder_disable, -}; - -static int lvds_encoder_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *port; - struct device_node *endpoint; - struct device_node *panel_node; - struct drm_panel *panel; - struct lvds_encoder *lvds_encoder; - - lvds_encoder = devm_kzalloc(dev, sizeof(*lvds_encoder), gfp_kernel); - if (!lvds_encoder) - return -enomem; - - lvds_encoder->powerdown_gpio = devm_gpiod_get_optional(dev, "powerdown", - gpiod_out_high); - if (is_err(lvds_encoder->powerdown_gpio)) { - int err = ptr_err(lvds_encoder->powerdown_gpio); - - if (err != -eprobe_defer) - dev_err(dev, "powerdown gpio failure: %d ", err); - return err; - } - - /* locate the panel dt node. */ - port = of_graph_get_port_by_id(dev->of_node, 1); - if (!port) { - dev_dbg(dev, "port 1 not found "); - return -enxio; - } - - endpoint = of_get_child_by_name(port, "endpoint"); - of_node_put(port); - if (!endpoint) { - dev_dbg(dev, "no endpoint for port 1 "); - return -enxio; - } - - panel_node = of_graph_get_remote_port_parent(endpoint); - of_node_put(endpoint); - if (!panel_node) { - dev_dbg(dev, "no remote endpoint for port 1 "); - return -enxio; - } - - panel = of_drm_find_panel(panel_node); - of_node_put(panel_node); - if (is_err(panel)) { - dev_dbg(dev, "panel not found, deferring probe "); - return ptr_err(panel); - } - - lvds_encoder->panel_bridge = - devm_drm_panel_bridge_add_typed(dev, panel, - drm_mode_connector_lvds); - if (is_err(lvds_encoder->panel_bridge)) - return ptr_err(lvds_encoder->panel_bridge); - - /* the panel_bridge bridge is attached to the panel's of_node, - * but we need a bridge attached to our of_node for our user - * to look up. - */ - lvds_encoder->bridge.of_node = dev->of_node; - lvds_encoder->bridge.funcs = &funcs; - drm_bridge_add(&lvds_encoder->bridge); - - platform_set_drvdata(pdev, lvds_encoder); - - return 0; -} - -static int lvds_encoder_remove(struct platform_device *pdev) -{ - struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev); - - drm_bridge_remove(&lvds_encoder->bridge); - - return 0; -} - -static const struct of_device_id lvds_encoder_match[] = { - { .compatible = "lvds-encoder" }, - { .compatible = "thine,thc63lvdm83d" }, - {}, -}; -module_device_table(of, lvds_encoder_match); - -static struct platform_driver lvds_encoder_driver = { - .probe = lvds_encoder_probe, - .remove = lvds_encoder_remove, - .driver = { - .name = "lvds-encoder", - .of_match_table = lvds_encoder_match, - }, -}; -module_platform_driver(lvds_encoder_driver); - -module_author("laurent pinchart <laurent.pinchart@ideasonboard.com>"); -module_description("transparent parallel to lvds encoder"); -module_license("gpl");
|
Graphics
|
0d60131a3b5dcb4c6980eb30cbca3c6fb62d7b6f
|
fabrizio castro
|
drivers
|
gpu
|
bridge, drm
|
drm/bridge: add analogix anx6345 support
|
the anx6345 is an ultra-low power displaypower/edp transmitter designed for portable devices. this driver adds initial support for rgb to edp mode, without hpd and interrupts.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add analogix anx6345 support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bridge ']
|
['c', 'kconfig', 'makefile']
| 3
| 806
| 0
|
--- diff --git a/drivers/gpu/drm/bridge/analogix/kconfig b/drivers/gpu/drm/bridge/analogix/kconfig --- a/drivers/gpu/drm/bridge/analogix/kconfig +++ b/drivers/gpu/drm/bridge/analogix/kconfig +config drm_analogix_anx6345 + tristate "analogix anx6345 bridge" + select drm_analogix_dp + select drm_kms_helper + select regmap_i2c + help + anx6345 is an ultra-low full-hd displayport/edp + transmitter designed for portable devices. the + anx6345 transforms the lvttl rgb output of an + application processor to edp or displayport. + + select drm_analogix_dp diff --git a/drivers/gpu/drm/bridge/analogix/makefile b/drivers/gpu/drm/bridge/analogix/makefile --- a/drivers/gpu/drm/bridge/analogix/makefile +++ b/drivers/gpu/drm/bridge/analogix/makefile +obj-$(config_drm_analogix_anx6345) += analogix-anx6345.o diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +/* spdx-license-identifier: gpl-2.0-only */ +/* + * copyright(c) 2016, analogix semiconductor. + * copyright(c) 2017, icenowy zheng <icenowy@aosc.io> + * + * based on anx7808 driver obtained from chromeos with copyright: + * copyright(c) 2013, google inc. + */ +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/types.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> + +#include "analogix-i2c-dptx.h" +#include "analogix-i2c-txcommon.h" + +#define poll_delay 50000 /* us */ +#define poll_timeout 5000000 /* us */ + +#define i2c_idx_dptx 0 +#define i2c_idx_txcom 1 + +static const u8 anx6345_i2c_addresses[] = { + [i2c_idx_dptx] = 0x70, + [i2c_idx_txcom] = 0x72, +}; +#define i2c_num_addresses array_size(anx6345_i2c_addresses) + +struct anx6345 { + struct drm_dp_aux aux; + struct drm_bridge bridge; + struct i2c_client *client; + struct edid *edid; + struct drm_connector connector; + struct drm_dp_link link; + struct drm_panel *panel; + struct regulator *dvdd12; + struct regulator *dvdd25; + struct gpio_desc *gpiod_reset; + struct mutex lock; /* protect edid access */ + + /* i2c slave addresses of anx6345 are mapped as dptx and sys */ + struct i2c_client *i2c_clients[i2c_num_addresses]; + struct regmap *map[i2c_num_addresses]; + + u16 chipid; + u8 dpcd[dp_receiver_cap_size]; + + bool powered; +}; + +static inline struct anx6345 *connector_to_anx6345(struct drm_connector *c) +{ + return container_of(c, struct anx6345, connector); +} + +static inline struct anx6345 *bridge_to_anx6345(struct drm_bridge *bridge) +{ + return container_of(bridge, struct anx6345, bridge); +} + +static int anx6345_set_bits(struct regmap *map, u8 reg, u8 mask) +{ + return regmap_update_bits(map, reg, mask, mask); +} + +static int anx6345_clear_bits(struct regmap *map, u8 reg, u8 mask) +{ + return regmap_update_bits(map, reg, mask, 0); +} + +static ssize_t anx6345_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct anx6345 *anx6345 = container_of(aux, struct anx6345, aux); + + return anx_dp_aux_transfer(anx6345->map[i2c_idx_dptx], msg); +} + +static int anx6345_dp_link_training(struct anx6345 *anx6345) +{ + unsigned int value; + u8 dp_bw; + int err; + + err = anx6345_clear_bits(anx6345->map[i2c_idx_txcom], + sp_powerdown_ctrl_reg, + sp_total_pd); + if (err) + return err; + + err = drm_dp_dpcd_readb(&anx6345->aux, dp_max_link_rate, &dp_bw); + if (err < 0) + return err; + + switch (dp_bw) { + case dp_link_bw_1_62: + case dp_link_bw_2_7: + break; + + default: + drm_debug_kms("dp bandwidth (%#02x) not supported ", dp_bw); + return -einval; + } + + err = anx6345_set_bits(anx6345->map[i2c_idx_txcom], sp_vid_ctrl1_reg, + sp_video_mute); + if (err) + return err; + + err = anx6345_clear_bits(anx6345->map[i2c_idx_txcom], + sp_vid_ctrl1_reg, sp_video_en); + if (err) + return err; + + /* get dpcd info */ + err = drm_dp_dpcd_read(&anx6345->aux, dp_dpcd_rev, + &anx6345->dpcd, dp_receiver_cap_size); + if (err < 0) { + drm_error("failed to read dpcd: %d ", err); + return err; + } + + /* clear channel x serdes power down */ + err = anx6345_clear_bits(anx6345->map[i2c_idx_dptx], + sp_dp_analog_power_down_reg, sp_ch0_pd); + if (err) + return err; + + /* check link capabilities */ + err = drm_dp_link_probe(&anx6345->aux, &anx6345->link); + if (err < 0) { + drm_error("failed to probe link capabilities: %d ", err); + return err; + } + + /* power up the sink */ + err = drm_dp_link_power_up(&anx6345->aux, &anx6345->link); + if (err < 0) { + drm_error("failed to power up displayport link: %d ", err); + return err; + } + + /* possibly enable downspread on the sink */ + err = regmap_write(anx6345->map[i2c_idx_dptx], + sp_dp_downspread_ctrl1_reg, 0); + if (err) + return err; + + if (anx6345->dpcd[dp_max_downspread] & dp_max_downspread_0_5) { + drm_debug("enable downspread on the sink "); + /* 4000ppm */ + err = regmap_write(anx6345->map[i2c_idx_dptx], + sp_dp_downspread_ctrl1_reg, 8); + if (err) + return err; + + err = drm_dp_dpcd_writeb(&anx6345->aux, dp_downspread_ctrl, + dp_spread_amp_0_5); + if (err < 0) + return err; + } else { + err = drm_dp_dpcd_writeb(&anx6345->aux, dp_downspread_ctrl, 0); + if (err < 0) + return err; + } + + /* set the lane count and the link rate on the sink */ + if (drm_dp_enhanced_frame_cap(anx6345->dpcd)) + err = anx6345_set_bits(anx6345->map[i2c_idx_dptx], + sp_dp_system_ctrl_base + 4, + sp_enhanced_mode); + else + err = anx6345_clear_bits(anx6345->map[i2c_idx_dptx], + sp_dp_system_ctrl_base + 4, + sp_enhanced_mode); + if (err) + return err; + + value = drm_dp_link_rate_to_bw_code(anx6345->link.rate); + err = regmap_write(anx6345->map[i2c_idx_dptx], + sp_dp_main_link_bw_set_reg, value); + if (err) + return err; + + err = regmap_write(anx6345->map[i2c_idx_dptx], + sp_dp_lane_count_set_reg, anx6345->link.num_lanes); + if (err) + return err; + + err = drm_dp_link_configure(&anx6345->aux, &anx6345->link); + if (err < 0) { + drm_error("failed to configure displayport link: %d ", err); + return err; + } + + /* start training on the source */ + err = regmap_write(anx6345->map[i2c_idx_dptx], sp_dp_lt_ctrl_reg, + sp_lt_en); + if (err) + return err; + + return regmap_read_poll_timeout(anx6345->map[i2c_idx_dptx], + sp_dp_lt_ctrl_reg, + value, !(value & sp_dp_lt_inprogress), + poll_delay, poll_timeout); +} + +static int anx6345_tx_initialization(struct anx6345 *anx6345) +{ + int err, i; + + /* fixme: colordepth is hardcoded for now */ + err = regmap_write(anx6345->map[i2c_idx_txcom], sp_vid_ctrl2_reg, + sp_in_bpc_6bit << sp_in_bpc_shift); + if (err) + return err; + + err = regmap_write(anx6345->map[i2c_idx_dptx], sp_dp_pll_ctrl_reg, 0); + if (err) + return err; + + err = regmap_write(anx6345->map[i2c_idx_txcom], + sp_analog_debug1_reg, 0); + if (err) + return err; + + err = regmap_write(anx6345->map[i2c_idx_dptx], + sp_dp_link_debug_ctrl_reg, + sp_new_prbs7 | sp_m_vid_debug); + if (err) + return err; + + err = regmap_write(anx6345->map[i2c_idx_dptx], + sp_dp_analog_power_down_reg, 0); + if (err) + return err; + + /* force hpd */ + err = anx6345_set_bits(anx6345->map[i2c_idx_dptx], + sp_dp_system_ctrl_base + 3, + sp_hpd_force | sp_hpd_ctrl); + if (err) + return err; + + for (i = 0; i < 4; i++) { + /* 4 lanes */ + err = regmap_write(anx6345->map[i2c_idx_dptx], + sp_dp_lane0_lt_ctrl_reg + i, 0); + if (err) + return err; + } + + /* reset aux */ + err = anx6345_set_bits(anx6345->map[i2c_idx_txcom], + sp_reset_ctrl2_reg, sp_aux_rst); + if (err) + return err; + + return anx6345_clear_bits(anx6345->map[i2c_idx_txcom], + sp_reset_ctrl2_reg, sp_aux_rst); +} + +static void anx6345_poweron(struct anx6345 *anx6345) +{ + int err; + + /* ensure reset is asserted before starting power on sequence */ + gpiod_set_value_cansleep(anx6345->gpiod_reset, 1); + usleep_range(1000, 2000); + + err = regulator_enable(anx6345->dvdd12); + if (err) { + drm_error("failed to enable dvdd12 regulator: %d ", + err); + return; + } + + /* t1 - delay between vdd12 and vdd25 should be 0-2ms */ + usleep_range(1000, 2000); + + err = regulator_enable(anx6345->dvdd25); + if (err) { + drm_error("failed to enable dvdd25 regulator: %d ", + err); + return; + } + + /* t2 - delay between resetn and all power rail stable, + * should be 2-5ms + */ + usleep_range(2000, 5000); + + gpiod_set_value_cansleep(anx6345->gpiod_reset, 0); + + /* power on registers module */ + anx6345_set_bits(anx6345->map[i2c_idx_txcom], sp_powerdown_ctrl_reg, + sp_hdcp_pd | sp_audio_pd | sp_video_pd | sp_link_pd); + anx6345_clear_bits(anx6345->map[i2c_idx_txcom], sp_powerdown_ctrl_reg, + sp_register_pd | sp_total_pd); + + if (anx6345->panel) + drm_panel_prepare(anx6345->panel); + + anx6345->powered = true; +} + +static void anx6345_poweroff(struct anx6345 *anx6345) +{ + int err; + + gpiod_set_value_cansleep(anx6345->gpiod_reset, 1); + usleep_range(1000, 2000); + + if (anx6345->panel) + drm_panel_unprepare(anx6345->panel); + + err = regulator_disable(anx6345->dvdd25); + if (err) { + drm_error("failed to disable dvdd25 regulator: %d ", + err); + return; + } + + usleep_range(5000, 10000); + + err = regulator_disable(anx6345->dvdd12); + if (err) { + drm_error("failed to disable dvdd12 regulator: %d ", + err); + return; + } + + usleep_range(1000, 2000); + + anx6345->powered = false; +} + +static int anx6345_start(struct anx6345 *anx6345) +{ + int err; + + if (!anx6345->powered) + anx6345_poweron(anx6345); + + /* power on needed modules */ + err = anx6345_clear_bits(anx6345->map[i2c_idx_txcom], + sp_powerdown_ctrl_reg, + sp_video_pd | sp_link_pd); + + err = anx6345_tx_initialization(anx6345); + if (err) { + drm_error("failed edp transmitter initialization: %d ", err); + anx6345_poweroff(anx6345); + return err; + } + + err = anx6345_dp_link_training(anx6345); + if (err) { + drm_error("failed link training: %d ", err); + anx6345_poweroff(anx6345); + return err; + } + + /* + * this delay seems to help keep the hardware in a good state. without + * it, there are times where it fails silently. + */ + usleep_range(10000, 15000); + + return 0; +} + +static int anx6345_config_dp_output(struct anx6345 *anx6345) +{ + int err; + + err = anx6345_clear_bits(anx6345->map[i2c_idx_txcom], sp_vid_ctrl1_reg, + sp_video_mute); + if (err) + return err; + + /* enable dp output */ + err = anx6345_set_bits(anx6345->map[i2c_idx_txcom], sp_vid_ctrl1_reg, + sp_video_en); + if (err) + return err; + + /* force stream valid */ + return anx6345_set_bits(anx6345->map[i2c_idx_dptx], + sp_dp_system_ctrl_base + 3, + sp_strm_force | sp_strm_ctrl); +} + +static int anx6345_get_downstream_info(struct anx6345 *anx6345) +{ + u8 value; + int err; + + err = drm_dp_dpcd_readb(&anx6345->aux, dp_sink_count, &value); + if (err < 0) { + drm_error("get sink count failed %d ", err); + return err; + } + + if (!dp_get_sink_count(value)) { + drm_error("downstream disconnected "); + return -eio; + } + + return 0; +} + +static int anx6345_get_modes(struct drm_connector *connector) +{ + struct anx6345 *anx6345 = connector_to_anx6345(connector); + int err, num_modes = 0; + bool power_off = false; + + mutex_lock(&anx6345->lock); + + if (!anx6345->edid) { + if (!anx6345->powered) { + anx6345_poweron(anx6345); + power_off = true; + } + + err = anx6345_get_downstream_info(anx6345); + if (err) { + drm_error("failed to get downstream info: %d ", err); + goto unlock; + } + + anx6345->edid = drm_get_edid(connector, &anx6345->aux.ddc); + if (!anx6345->edid) + drm_error("failed to read edid from panel "); + + err = drm_connector_update_edid_property(connector, + anx6345->edid); + if (err) { + drm_error("failed to update edid property: %d ", err); + goto unlock; + } + } + + num_modes += drm_add_edid_modes(connector, anx6345->edid); + +unlock: + if (power_off) + anx6345_poweroff(anx6345); + + mutex_unlock(&anx6345->lock); + + if (!num_modes && anx6345->panel) + num_modes += drm_panel_get_modes(anx6345->panel); + + return num_modes; +} + +static const struct drm_connector_helper_funcs anx6345_connector_helper_funcs = { + .get_modes = anx6345_get_modes, +}; + +static void +anx6345_connector_destroy(struct drm_connector *connector) +{ + struct anx6345 *anx6345 = connector_to_anx6345(connector); + + if (anx6345->panel) + drm_panel_detach(anx6345->panel); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs anx6345_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = anx6345_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int anx6345_bridge_attach(struct drm_bridge *bridge) +{ + struct anx6345 *anx6345 = bridge_to_anx6345(bridge); + int err; + + if (!bridge->encoder) { + drm_error("parent encoder object not found"); + return -enodev; + } + + /* register aux channel */ + anx6345->aux.name = "dp-aux"; + anx6345->aux.dev = &anx6345->client->dev; + anx6345->aux.transfer = anx6345_aux_transfer; + + err = drm_dp_aux_register(&anx6345->aux); + if (err < 0) { + drm_error("failed to register aux channel: %d ", err); + return err; + } + + err = drm_connector_init(bridge->dev, &anx6345->connector, + &anx6345_connector_funcs, + drm_mode_connector_edp); + if (err) { + drm_error("failed to initialize connector: %d ", err); + return err; + } + + drm_connector_helper_add(&anx6345->connector, + &anx6345_connector_helper_funcs); + + err = drm_connector_register(&anx6345->connector); + if (err) { + drm_error("failed to register connector: %d ", err); + return err; + } + + anx6345->connector.polled = drm_connector_poll_hpd; + + err = drm_connector_attach_encoder(&anx6345->connector, + bridge->encoder); + if (err) { + drm_error("failed to link up connector to encoder: %d ", err); + return err; + } + + if (anx6345->panel) { + err = drm_panel_attach(anx6345->panel, &anx6345->connector); + if (err) { + drm_error("failed to attach panel: %d ", err); + return err; + } + } + + return 0; +} + +static enum drm_mode_status +anx6345_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_mode *mode) +{ + if (mode->flags & drm_mode_flag_interlace) + return mode_no_interlace; + + /* max 1200p at 5.4 ghz, one lane */ + if (mode->clock > 154000) + return mode_clock_high; + + return mode_ok; +} + +static void anx6345_bridge_disable(struct drm_bridge *bridge) +{ + struct anx6345 *anx6345 = bridge_to_anx6345(bridge); + + /* power off all modules except configuration registers access */ + anx6345_set_bits(anx6345->map[i2c_idx_txcom], sp_powerdown_ctrl_reg, + sp_hdcp_pd | sp_audio_pd | sp_video_pd | sp_link_pd); + if (anx6345->panel) + drm_panel_disable(anx6345->panel); + + if (anx6345->powered) + anx6345_poweroff(anx6345); +} + +static void anx6345_bridge_enable(struct drm_bridge *bridge) +{ + struct anx6345 *anx6345 = bridge_to_anx6345(bridge); + int err; + + if (anx6345->panel) + drm_panel_enable(anx6345->panel); + + err = anx6345_start(anx6345); + if (err) { + drm_error("failed to initialize: %d ", err); + return; + } + + err = anx6345_config_dp_output(anx6345); + if (err) + drm_error("failed to enable dp output: %d ", err); +} + +static const struct drm_bridge_funcs anx6345_bridge_funcs = { + .attach = anx6345_bridge_attach, + .mode_valid = anx6345_bridge_mode_valid, + .disable = anx6345_bridge_disable, + .enable = anx6345_bridge_enable, +}; + +static void unregister_i2c_dummy_clients(struct anx6345 *anx6345) +{ + unsigned int i; + + for (i = 1; i < array_size(anx6345->i2c_clients); i++) + if (anx6345->i2c_clients[i] && + anx6345->i2c_clients[i]->addr != anx6345->client->addr) + i2c_unregister_device(anx6345->i2c_clients[i]); +} + +static const struct regmap_config anx6345_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0xff, + .cache_type = regcache_none, +}; + +static const u16 anx6345_chipid_list[] = { + 0x6345, +}; + +static bool anx6345_get_chip_id(struct anx6345 *anx6345) +{ + unsigned int i, idl, idh, version; + + if (regmap_read(anx6345->map[i2c_idx_txcom], sp_device_idl_reg, &idl)) + return false; + + if (regmap_read(anx6345->map[i2c_idx_txcom], sp_device_idh_reg, &idh)) + return false; + + anx6345->chipid = (u8)idl | ((u8)idh << 8); + + if (regmap_read(anx6345->map[i2c_idx_txcom], sp_device_version_reg, + &version)) + return false; + + for (i = 0; i < array_size(anx6345_chipid_list); i++) { + if (anx6345->chipid == anx6345_chipid_list[i]) { + drm_info("found anx%x (ver. %d) edp transmitter ", + anx6345->chipid, version); + return true; + } + } + + drm_error("anx%x (ver. %d) not supported by this driver ", + anx6345->chipid, version); + + return false; +} + +static int anx6345_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct anx6345 *anx6345; + struct device *dev; + int i, err; + + anx6345 = devm_kzalloc(&client->dev, sizeof(*anx6345), gfp_kernel); + if (!anx6345) + return -enomem; + + mutex_init(&anx6345->lock); + + anx6345->bridge.of_node = client->dev.of_node; + + anx6345->client = client; + i2c_set_clientdata(client, anx6345); + + dev = &anx6345->client->dev; + + err = drm_of_find_panel_or_bridge(client->dev.of_node, 1, 0, + &anx6345->panel, null); + if (err == -eprobe_defer) + return err; + + if (err) + drm_debug("no panel found "); + + /* 1.2v digital core power regulator */ + anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply"); + if (is_err(anx6345->dvdd12)) { + drm_error("dvdd12-supply not found "); + return ptr_err(anx6345->dvdd12); + } + + /* 2.5v digital core power regulator */ + anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply"); + if (is_err(anx6345->dvdd25)) { + drm_error("dvdd25-supply not found "); + return ptr_err(anx6345->dvdd25); + } + + /* gpio for chip reset */ + anx6345->gpiod_reset = devm_gpiod_get(dev, "reset", gpiod_out_low); + if (is_err(anx6345->gpiod_reset)) { + drm_error("reset gpio not found "); + return ptr_err(anx6345->gpiod_reset); + } + + /* map slave addresses of anx6345 */ + for (i = 0; i < i2c_num_addresses; i++) { + if (anx6345_i2c_addresses[i] >> 1 != client->addr) + anx6345->i2c_clients[i] = i2c_new_dummy(client->adapter, + anx6345_i2c_addresses[i] >> 1); + else + anx6345->i2c_clients[i] = client; + + if (!anx6345->i2c_clients[i]) { + err = -enomem; + drm_error("failed to reserve i2c bus %02x ", + anx6345_i2c_addresses[i]); + goto err_unregister_i2c; + } + + anx6345->map[i] = devm_regmap_init_i2c(anx6345->i2c_clients[i], + &anx6345_regmap_config); + if (is_err(anx6345->map[i])) { + err = ptr_err(anx6345->map[i]); + drm_error("failed regmap initialization %02x ", + anx6345_i2c_addresses[i]); + goto err_unregister_i2c; + } + } + + /* look for supported chip id */ + anx6345_poweron(anx6345); + if (anx6345_get_chip_id(anx6345)) { + anx6345->bridge.funcs = &anx6345_bridge_funcs; + drm_bridge_add(&anx6345->bridge); + + return 0; + } else { + anx6345_poweroff(anx6345); + err = -enodev; + } + +err_unregister_i2c: + unregister_i2c_dummy_clients(anx6345); + return err; +} + +static int anx6345_i2c_remove(struct i2c_client *client) +{ + struct anx6345 *anx6345 = i2c_get_clientdata(client); + + drm_bridge_remove(&anx6345->bridge); + + unregister_i2c_dummy_clients(anx6345); + + kfree(anx6345->edid); + + mutex_destroy(&anx6345->lock); + + return 0; +} + +static const struct i2c_device_id anx6345_id[] = { + { "anx6345", 0 }, + { /* sentinel */ } +}; +module_device_table(i2c, anx6345_id); + +static const struct of_device_id anx6345_match_table[] = { + { .compatible = "analogix,anx6345", }, + { /* sentinel */ }, +}; +module_device_table(of, anx6345_match_table); + +static struct i2c_driver anx6345_driver = { + .driver = { + .name = "anx6345", + .of_match_table = of_match_ptr(anx6345_match_table), + }, + .probe = anx6345_i2c_probe, + .remove = anx6345_i2c_remove, + .id_table = anx6345_id, +}; +module_i2c_driver(anx6345_driver); + +module_description("anx6345 edp transmitter driver"); +module_author("icenowy zheng <icenowy@aosc.io>"); +module_license("gpl v2");
|
Graphics
|
6aa192698089b450b06d609355fc9c82c07856d2
|
icenowy zheng
|
drivers
|
gpu
|
analogix, bridge, drm
|
drm/hisilicon/hibmc: export vram mm information to debugfs
|
this change makes information about vram consumption available on debugfs. see
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
export vram mm information to debugfs
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['hisilicon/hibmc ']
|
['c']
| 1
| 1
| 0
|
--- diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c + .debugfs_init = drm_vram_mm_debugfs_init,
|
Graphics
|
de2318f69366cdbb285253d5f40b129b09b3515b
|
thomas zimmermann
|
drivers
|
gpu
|
drm, hibmc, hisilicon
|
drm/mgag200: add module parameter to pin all buffers at offset 0
|
for hardware that does not interpret the startadd field correctly, add the module parameter 'hw_bug_no_startadd', which enables the workaround.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add module parameter to pin all buffers at offset 0
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['mgag200 ']
|
['c']
| 1
| 14
| 0
|
--- diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c +int mgag200_hw_bug_no_startadd = -1; +module_parm_desc(modeset, "hw does not interpret scanout-buffer start address correctly"); +module_param_named(hw_bug_no_startadd, mgag200_hw_bug_no_startadd, int, 0400); + + if (mgag200_hw_bug_no_startadd > 0) { + drm_warn_once("option hw_bug_no_startradd is enabled. please " + "report the output of 'lspci -vvnn' to " + "<dri-devel@lists.freedesktop.org> if this " + "option is required to make mgag200 work " + "correctly on your system. "); + return true; + } else if (!mgag200_hw_bug_no_startadd) { + return false; + }
|
Graphics
|
3cacb2086e41bbdf4a43e494d47d05db356992b9
|
thomas zimmermann
|
drivers
|
gpu
|
drm, mgag200
|
drm/scheduler: improve job distribution with multiple queues
|
this patch uses score based logic to select a new rq for better loadbalance between multiple rq/scheds instead of num_jobs.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
improve job distribution with multiple queues
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scheduler']
|
['c', 'h']
| 3
| 12
| 10
|
--- diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c - unsigned int min_jobs = uint_max, num_jobs; + unsigned int min_score = uint_max, num_score; - num_jobs = atomic_read(&sched->num_jobs); - if (num_jobs < min_jobs) { - min_jobs = num_jobs; + num_score = atomic_read(&sched->score); + if (num_score < min_score) { + min_score = num_score; - atomic_inc(&entity->rq->sched->num_jobs); + atomic_inc(&entity->rq->sched->score); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c + atomic_inc(&rq->sched->score); + atomic_dec(&rq->sched->score); - atomic_dec(&sched->num_jobs); + atomic_dec(&sched->score); - atomic_set(&sched->num_jobs, 0); + atomic_set(&sched->score, 0); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h - * @num_jobs: the number of jobs in queue in the scheduler + * @score: score to help loadbalancer pick a idle sched - atomic_t num_jobs; - bool ready; + atomic_t score; + bool ready;
|
Graphics
|
56822db194232c089601728d68ed078dccb97f8b
|
nirmoy das
|
include
|
drm
|
drm, scheduler
|
efi: allow disabling pci busmastering on bridges during boot
|
add an option to disable the busmaster bit in the control register on all pci bridges before calling exitbootservices() and passing control to the runtime kernel. system firmware may configure the iommu to prevent malicious pci devices from being able to attack the os via dma. however, since firmware can't guarantee that the os is iommu-aware, it will tear down iommu configuration when exitbootservices() is called. this leaves a window between where a hostile device could still cause damage before linux configures the iommu again.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
allow disabling pci busmastering on bridges during boot
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['efi']
|
['c', 'kconfig', 'txt', 'makefile', 'h']
| 7
| 168
| 3
|
--- diff --git a/documentation/admin-guide/kernel-parameters.txt b/documentation/admin-guide/kernel-parameters.txt --- a/documentation/admin-guide/kernel-parameters.txt +++ b/documentation/admin-guide/kernel-parameters.txt - "nosoftreserve" } + "nosoftreserve", "disable_early_pci_dma", + "no_disable_early_pci_dma" } + disable_early_pci_dma: disable the busmaster bit on all + pci bridges while in the efi boot stub + no_disable_early_pci_dma: leave the busmaster bit set + on all pci bridges while in the efi boot stub diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h +/* pci i/o */ +#define __efi64_argmap_get_location(protocol, seg, bus, dev, func) \ + ((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus), \ + efi64_zero_upper(dev), efi64_zero_upper(func)) + diff --git a/drivers/firmware/efi/kconfig b/drivers/firmware/efi/kconfig --- a/drivers/firmware/efi/kconfig +++ b/drivers/firmware/efi/kconfig +config efi_disable_pci_dma + bool "clear busmaster bit on pci bridges during exitbootservices()" + help + disable the busmaster bit in the control register on all pci bridges + while calling exitbootservices() and passing control to the runtime + kernel. system firmware may configure the iommu to prevent malicious + pci devices from being able to attack the os via dma. however, since + firmware can't guarantee that the os is iommu-aware, it will tear + down iommu configuration when exitbootservices() is called. this + leaves a window between where a hostile device could still cause + damage before linux configures the iommu again. + + if you say y here, the efi stub will clear the busmaster bit on all + pci bridges before exitbootservices() is called. this will prevent + any malicious pci devices from being able to perform dma until the + kernel reenables busmastering after configuring the iommu. + + this option will cause failures with some poorly behaved hardware + and should not be enabled without testing. the kernel commandline + options "efi=disable_early_pci_dma" or "efi=no_disable_early_pci_dma" + may be used to override this option. + diff --git a/drivers/firmware/efi/libstub/makefile b/drivers/firmware/efi/libstub/makefile --- a/drivers/firmware/efi/libstub/makefile +++ b/drivers/firmware/efi/libstub/makefile - random.o + random.o pci.o diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c +static bool __efistub_global efi_disable_pci_dma = + is_enabled(config_efi_disable_pci_dma); + if (!strncmp(str, "disable_early_pci_dma", 21)) { + str += strlen("disable_early_pci_dma"); + efi_disable_pci_dma = true; + } + + if (!strncmp(str, "no_disable_early_pci_dma", 24)) { + str += strlen("no_disable_early_pci_dma"); + efi_disable_pci_dma = false; + } + + if (efi_disable_pci_dma) + efi_pci_disable_bridge_busmaster(); + diff --git a/drivers/firmware/efi/libstub/pci.c b/drivers/firmware/efi/libstub/pci.c --- /dev/null +++ b/drivers/firmware/efi/libstub/pci.c +// spdx-license-identifier: gpl-2.0 +/* + * pci-related functions used by the efi stub on multiple + * architectures. + * + * copyright 2019 google, llc + */ + +#include <linux/efi.h> +#include <linux/pci.h> + +#include <asm/efi.h> + +#include "efistub.h" + +void efi_pci_disable_bridge_busmaster(void) +{ + efi_guid_t pci_proto = efi_pci_io_protocol_guid; + unsigned long pci_handle_size = 0; + efi_handle_t *pci_handle = null; + efi_handle_t handle; + efi_status_t status; + u16 class, command; + int i; + + status = efi_bs_call(locate_handle, efi_locate_by_protocol, &pci_proto, + null, &pci_handle_size, null); + + if (status != efi_buffer_too_small) { + if (status != efi_success && status != efi_not_found) + pr_efi_err("failed to locate pci i/o handles' "); + return; + } + + status = efi_bs_call(allocate_pool, efi_loader_data, pci_handle_size, + (void **)&pci_handle); + if (status != efi_success) { + pr_efi_err("failed to allocate memory for 'pci_handle' "); + return; + } + + status = efi_bs_call(locate_handle, efi_locate_by_protocol, &pci_proto, + null, &pci_handle_size, pci_handle); + if (status != efi_success) { + pr_efi_err("failed to locate pci i/o handles' "); + goto free_handle; + } + + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + efi_pci_io_protocol_t *pci; + unsigned long segment_nr, bus_nr, device_nr, func_nr; + + status = efi_bs_call(handle_protocol, handle, &pci_proto, + (void **)&pci); + if (status != efi_success) + continue; + + /* + * disregard devices living on bus 0 - these are not behind a + * bridge so no point in disconnecting them from their drivers. + */ + status = efi_call_proto(pci, get_location, &segment_nr, &bus_nr, + &device_nr, &func_nr); + if (status != efi_success || bus_nr == 0) + continue; + + /* + * don't disconnect vga controllers so we don't risk losing + * access to the framebuffer. drivers for true pcie graphics + * controllers that are behind a pcie root port do not use + * dma to implement the gop framebuffer anyway [although they + * may use it in their implentation of gop->blt()], and so + * disabling dma in the pci bridge should not interfere with + * normal operation of the device. + */ + status = efi_call_proto(pci, pci.read, efipciiowidthuint16, + pci_class_device, 1, &class); + if (status != efi_success || class == pci_class_display_vga) + continue; + + /* disconnect this handle from all its drivers */ + efi_bs_call(disconnect_controller, handle, null, null); + } + + for_each_efi_handle(handle, pci_handle, pci_handle_size, i) { + efi_pci_io_protocol_t *pci; + + status = efi_bs_call(handle_protocol, handle, &pci_proto, + (void **)&pci); + if (status != efi_success || !pci) + continue; + + status = efi_call_proto(pci, pci.read, efipciiowidthuint16, + pci_class_device, 1, &class); + + if (status != efi_success || class != pci_class_bridge_pci) + continue; + + /* disable busmastering */ + status = efi_call_proto(pci, pci.read, efipciiowidthuint16, + pci_command, 1, &command); + if (status != efi_success || !(command & pci_command_master)) + continue; + + command &= ~pci_command_master; + status = efi_call_proto(pci, pci.write, efipciiowidthuint16, + pci_command, 1, &command); + if (status != efi_success) + pr_efi_err("failed to disable pci busmastering "); + } + +free_handle: + efi_bs_call(free_pool, pci_handle); +} diff --git a/include/linux/efi.h b/include/linux/efi.h --- a/include/linux/efi.h +++ b/include/linux/efi.h - void *disconnect_controller; + efi_status_t (__efiapi *disconnect_controller)(efi_handle_t, + efi_handle_t, + efi_handle_t); +void efi_pci_disable_bridge_busmaster(void); +
|
Power Management
|
4444f8541dad16fefd9b8807ad1451e806ef1d94
|
matthew garrett
|
drivers
|
firmware
|
asm, efi, include, libstub
|
acpi: dptf: add tiger lake acpi device ids
|
tiger lake has new unique acpi device ids that need to be added to the dptf drivers to support it.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add tiger lake acpi device ids
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['acpi', 'dptf']
|
['c']
| 2
| 5
| 0
|
--- diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c + {"int1047", 0}, diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c + {"int1040"}, + {"int1043"}, + {"int1044"}, + {"int1047"},
|
Power Management
|
55cfe6a5c58223c51ee095693958738ee0d4c942
|
gayatri kammela
|
drivers
|
acpi
|
dptf
|
acpi: fan: add tiger lake acpi device id
|
tiger lake has a new unique acpi device id for the acpi fan that needs to be added to the fan driver and to the blacklist in acpi_dev_pm_attach() to support it.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add tiger lake acpi device id
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['acpi', 'fan']
|
['c']
| 2
| 2
| 0
|
--- diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c + {"int1044", }, /* fan for tiger lake generation */ diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c + {"int1044", 0},
|
Power Management
|
c248dfe7e0caa86e1cdfceddcf96649746dd171a
|
gayatri kammela
|
drivers
|
acpi
| |
thermal: add bcm2711 thermal driver
|
this adds the thermal sensor driver for the broadcom bcm2711 soc, which is placed on the raspberry pi 4. the driver only provides soc temperature reading so far.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add bcm2711 thermal driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['thermal ']
|
['c', 'kconfig', 'makefile']
| 3
| 131
| 0
|
--- diff --git a/drivers/thermal/broadcom/kconfig b/drivers/thermal/broadcom/kconfig --- a/drivers/thermal/broadcom/kconfig +++ b/drivers/thermal/broadcom/kconfig +config bcm2711_thermal + tristate "broadcom avs ro thermal sensor driver" + depends on arch_bcm2835 || compile_test + depends on thermal_of && mfd_syscon + help + support for thermal sensors on broadcom bcm2711 socs. + diff --git a/drivers/thermal/broadcom/makefile b/drivers/thermal/broadcom/makefile --- a/drivers/thermal/broadcom/makefile +++ b/drivers/thermal/broadcom/makefile +obj-$(config_bcm2711_thermal) += bcm2711_thermal.o diff --git a/drivers/thermal/broadcom/bcm2711_thermal.c b/drivers/thermal/broadcom/bcm2711_thermal.c --- /dev/null +++ b/drivers/thermal/broadcom/bcm2711_thermal.c +// spdx-license-identifier: gpl-2.0+ +/* + * broadcom avs ro thermal sensor driver + * + * based on brcmstb_thermal + * + * copyright (c) 2020 stefan wahren + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include <linux/thermal.h> + +#include "../thermal_hwmon.h" + +#define avs_ro_temp_status 0x200 +#define avs_ro_temp_status_valid_msk (bit(16) | bit(10)) +#define avs_ro_temp_status_data_msk genmask(9, 0) + +struct bcm2711_thermal_priv { + struct regmap *regmap; + struct thermal_zone_device *thermal; +}; + +static int bcm2711_get_temp(void *data, int *temp) +{ + struct bcm2711_thermal_priv *priv = data; + int slope = thermal_zone_get_slope(priv->thermal); + int offset = thermal_zone_get_offset(priv->thermal); + u32 val; + int ret; + long t; + + ret = regmap_read(priv->regmap, avs_ro_temp_status, &val); + if (ret) + return ret; + + if (!(val & avs_ro_temp_status_valid_msk)) + return -eio; + + val &= avs_ro_temp_status_data_msk; + + /* convert a hw code to a temperature reading (millidegree celsius) */ + t = slope * val + offset; + + *temp = t < 0 ? 0 : t; + + return 0; +} + +static const struct thermal_zone_of_device_ops bcm2711_thermal_of_ops = { + .get_temp = bcm2711_get_temp, +}; + +static const struct of_device_id bcm2711_thermal_id_table[] = { + { .compatible = "brcm,bcm2711-thermal" }, + {}, +}; +module_device_table(of, bcm2711_thermal_id_table); + +static int bcm2711_thermal_probe(struct platform_device *pdev) +{ + struct thermal_zone_device *thermal; + struct bcm2711_thermal_priv *priv; + struct device *dev = &pdev->dev; + struct device_node *parent; + struct regmap *regmap; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), gfp_kernel); + if (!priv) + return -enomem; + + /* get regmap from syscon node */ + parent = of_get_parent(dev->of_node); /* parent should be syscon node */ + regmap = syscon_node_to_regmap(parent); + of_node_put(parent); + if (is_err(regmap)) { + ret = ptr_err(regmap); + dev_err(dev, "failed to get regmap: %d ", ret); + return ret; + } + priv->regmap = regmap; + + thermal = devm_thermal_zone_of_sensor_register(dev, 0, priv, + &bcm2711_thermal_of_ops); + if (is_err(thermal)) { + ret = ptr_err(thermal); + dev_err(dev, "could not register sensor: %d ", ret); + return ret; + } + + priv->thermal = thermal; + + thermal->tzp->no_hwmon = false; + ret = thermal_add_hwmon_sysfs(thermal); + if (ret) + return ret; + + return 0; +} + +static struct platform_driver bcm2711_thermal_driver = { + .probe = bcm2711_thermal_probe, + .driver = { + .name = "bcm2711_thermal", + .of_match_table = bcm2711_thermal_id_table, + }, +}; +module_platform_driver(bcm2711_thermal_driver); + +module_license("gpl"); +module_author("stefan wahren"); +module_description("broadcom avs ro thermal sensor driver");
|
Power Management
|
59b781352dc4cb9ae27a8ddae0cda979d29d8af7
|
stefan wahren florian fainelli f fainelli gmail com nicolas saenz julienne nsaenzjulienne suse de nicolas saenz julienne nsaenzjulienne suse de
|
drivers
|
thermal
|
broadcom
|
thermal: int340x: processor_thermal: add jasper lake support
|
added new pci id for jasper lake processor thermal device.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add jasper lake support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['thermal ', 'int340x', 'processor_thermal']
|
['c']
| 1
| 4
| 0
|
--- diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +/* jasperlake thermal reporting device */ +#define pci_device_id_proc_jsl_thermal 0x4503 + + { pci_device(pci_vendor_id_intel, pci_device_id_proc_jsl_thermal)},
|
Power Management
|
f64a6583d3f527b297b88441e1c20e6ed45f8f56
|
swaminathan nivedita zhang rui rui zhang intel com
|
drivers
|
thermal
|
int340x_thermal, intel
|
thermal: int340x_thermal: add tiger lake acpi device ids
|
tiger lake has new unique acpi device ids for thermal devices that need to be added to the intel thermal driver to suport it.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add tiger lake acpi device ids
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['thermal ', 'int340x_thermal']
|
['c']
| 2
| 2
| 0
|
--- diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c + {"int1040", 0}, diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c + {"int1043", 0},
|
Power Management
|
9b1b5535dfc98a9d783199d4bcd8580e15daa23c
|
gayatri kammela zhang rui rui zhang intel com pandruvada srinivas srinivas pandruvada intel com
|
drivers
|
thermal
|
int340x_thermal, intel
|
thermal: intel: intel_pch_thermal: add comet lake (cml) platform support
|
add comet lake to the list of the platforms to support intel_pch_thermal driver.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add comet lake (cml) platform support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['thermal ', 'intel', 'intel_pch_thermal']
|
['c']
| 1
| 8
| 0
|
--- diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c --- a/drivers/thermal/intel/intel_pch_thermal.c +++ b/drivers/thermal/intel/intel_pch_thermal.c +#define pch_thermal_did_cml_h 0x06f9 /* cml-h pch */ + board_cml, + [board_cml] = { + .name = "pch_cometlake", + .ops = &pch_dev_ops_wpt, + } + { pci_device(pci_vendor_id_intel, pch_thermal_did_cml_h), + .driver_data = board_cml, },
|
Power Management
|
35709c4ee772afc3818cd6d42d123d608feeaa33
|
gayatri kammela zhang rui rui zhang intel com
|
drivers
|
thermal
|
intel
|
thermal: qoriq: add hwmon support
|
expose thermal readings as a hwmon device, so that it could be accessed using lm-sensors.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add hwmon support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['thermal ', 'qoriq']
|
['c']
| 1
| 6
| 0
|
--- diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c +#include "thermal_hwmon.h" + + if (devm_thermal_add_hwmon_sysfs(tzd)) + dev_warn(dev, + "failed to add hwmon sysfs attributes "); +
|
Power Management
|
fd8433099c5b78c2a1915e1b9911ecfdfc041103
|
andrey smirnov daniel lezcano daniel lezcano linaro org lucas stach l stach pengutronix de
|
drivers
|
thermal
| |
thermal: rockchip: enable hwmon
|
by default, of-based thermal drivers do not enable hwmon. explicitly enable hwmon for both, the soc and gpu temperature sensor.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enable hwmon
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['thermal ', 'rockchip']
|
['c']
| 1
| 11
| 1
|
--- diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c +#include "thermal_hwmon.h" + - for (i = 0; i < thermal->chip->chn_num; i++) + for (i = 0; i < thermal->chip->chn_num; i++) { + thermal->sensors[i].tzd->tzp->no_hwmon = false; + error = thermal_add_hwmon_sysfs(thermal->sensors[i].tzd); + if (error) + dev_warn(&pdev->dev, + "failed to register sensor %d with hwmon: %d ", + i, error); + } + thermal_remove_hwmon_sysfs(sensor->tzd);
|
Power Management
|
d27970b82a0f552f70e76fab154855b3192aac23
|
stefan schaeckeler daniel lezcano daniel lezcano linaro org
|
drivers
|
thermal
| |
thermal: sun8i: add hwmon support
|
expose sun8i thermal as a hwmon device.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add hwmon support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['thermal ', 'sun8i']
|
['c']
| 1
| 6
| 0
|
--- diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c --- a/drivers/thermal/sun8i_thermal.c +++ b/drivers/thermal/sun8i_thermal.c +#include "thermal_hwmon.h" + + + if (devm_thermal_add_hwmon_sysfs(tmdev->sensor[i].tzd)) + dev_warn(tmdev->dev, + "failed to add hwmon sysfs attributes ");
|
Power Management
|
85f0ad221317c18e6032b6735f6b36c8a6a96ea9
|
yangtao li
|
drivers
|
thermal
| |
thermal/drivers/sun8i: add thermal driver for h6/h5/h3/a64/a83t/r40
|
this patch adds the support for allwinner thermal sensor, within allwinner soc. it will register sensors for thermal framework and use device tree to bind cooling device.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add thermal driver for h6/h5/h3/a64/a83t/r40
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['thermal ', 'sun8i']
|
['c', 'kconfig', 'makefile', 'maintainers']
| 4
| 662
| 0
|
--- diff --git a/maintainers b/maintainers --- a/maintainers +++ b/maintainers +allwinner thermal driver +m: vasily khoruzhick <anarsoul@gmail.com> +m: yangtao li <tiny.windzz@gmail.com> +l: linux-pm@vger.kernel.org +s: maintained +f: documentation/devicetree/bindings/thermal/allwinner,sun8i-a83t-ths.yaml +f: drivers/thermal/sun8i_thermal.c + diff --git a/drivers/thermal/kconfig b/drivers/thermal/kconfig --- a/drivers/thermal/kconfig +++ b/drivers/thermal/kconfig +config sun8i_thermal + tristate "allwinner sun8i thermal driver" + depends on arch_sunxi || compile_test + depends on has_iomem + depends on nvmem + depends on of + depends on reset_controller + help + support for the sun8i thermal sensor driver into the linux thermal + framework. + + to compile this driver as a module, choose m here: the + module will be called sun8i-thermal. + diff --git a/drivers/thermal/makefile b/drivers/thermal/makefile --- a/drivers/thermal/makefile +++ b/drivers/thermal/makefile +obj-$(config_sun8i_thermal) += sun8i_thermal.o diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c --- /dev/null +++ b/drivers/thermal/sun8i_thermal.c +// spdx-license-identifier: gpl-2.0 +/* + * thermal sensor driver for allwinner soc + * copyright (c) 2019 yangtao li + * + * based on the work of icenowy zheng <icenowy@aosc.io> + * based on the work of ondrej jirman <megous@megous.com> + * based on the work of josef gajdusek <atx@atx.name> + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/thermal.h> + +#define max_sensor_num 4 + +#define ft_temp_mask genmask(11, 0) +#define temp_calib_mask genmask(11, 0) +#define calibrate_default 0x800 + +#define sun8i_ths_ctrl0 0x00 +#define sun8i_ths_ctrl2 0x40 +#define sun8i_ths_ic 0x44 +#define sun8i_ths_is 0x48 +#define sun8i_ths_mfc 0x70 +#define sun8i_ths_temp_calib 0x74 +#define sun8i_ths_temp_data 0x80 + +#define sun50i_ths_ctrl0 0x00 +#define sun50i_h6_ths_enable 0x04 +#define sun50i_h6_ths_pc 0x08 +#define sun50i_h6_ths_dic 0x10 +#define sun50i_h6_ths_dis 0x20 +#define sun50i_h6_ths_mfc 0x30 +#define sun50i_h6_ths_temp_calib 0xa0 +#define sun50i_h6_ths_temp_data 0xc0 + +#define sun8i_ths_ctrl0_t_acq0(x) (genmask(15, 0) & (x)) +#define sun8i_ths_ctrl2_t_acq1(x) ((genmask(15, 0) & (x)) << 16) +#define sun8i_ths_data_irq_sts(x) bit(x + 8) + +#define sun50i_ths_ctrl0_t_acq(x) ((genmask(15, 0) & (x)) << 16) +#define sun50i_ths_filter_en bit(2) +#define sun50i_ths_filter_type(x) (genmask(1, 0) & (x)) +#define sun50i_h6_ths_pc_temp_period(x) ((genmask(19, 0) & (x)) << 12) +#define sun50i_h6_ths_data_irq_sts(x) bit(x) + +/* millidegree celsius */ +#define ths_efuse_cp_ft_mask 0x3000 +#define ths_efuse_cp_ft_bit 12 +#define ths_calibration_in_ft 1 + +struct tsensor { + struct ths_device *tmdev; + struct thermal_zone_device *tzd; + int id; +}; + +struct ths_thermal_chip { + bool has_mod_clk; + bool has_bus_clk_reset; + int sensor_num; + int offset; + int scale; + int ft_deviation; + int temp_data_base; + int (*calibrate)(struct ths_device *tmdev, + u16 *caldata, int callen); + int (*init)(struct ths_device *tmdev); + int (*irq_ack)(struct ths_device *tmdev); + int (*calc_temp)(struct ths_device *tmdev, + int id, int reg); +}; + +struct ths_device { + const struct ths_thermal_chip *chip; + struct device *dev; + struct regmap *regmap; + struct reset_control *reset; + struct clk *bus_clk; + struct clk *mod_clk; + struct tsensor sensor[max_sensor_num]; + u32 cp_ft_flag; +}; + +/* temp unit: millidegree celsius */ +static int sun8i_ths_calc_temp(struct ths_device *tmdev, + int id, int reg) +{ + return tmdev->chip->offset - (reg * tmdev->chip->scale / 10); +} + +static int sun50i_h5_calc_temp(struct ths_device *tmdev, + int id, int reg) +{ + if (reg >= 0x500) + return -1191 * reg / 10 + 223000; + else if (!id) + return -1452 * reg / 10 + 259000; + else + return -1590 * reg / 10 + 276000; +} + +static int sun8i_ths_get_temp(void *data, int *temp) +{ + struct tsensor *s = data; + struct ths_device *tmdev = s->tmdev; + int val = 0; + + regmap_read(tmdev->regmap, tmdev->chip->temp_data_base + + 0x4 * s->id, &val); + + /* ths have no data yet */ + if (!val) + return -eagain; + + *temp = tmdev->chip->calc_temp(tmdev, s->id, val); + /* + * according to the original sdk, there are some platforms(rarely) + * that add a fixed offset value after calculating the temperature + * value. we can't simply put it on the formula for calculating the + * temperature above, because the formula for calculating the + * temperature above is also used when the sensor is calibrated. if + * do this, the correct calibration formula is hard to know. + */ + *temp += tmdev->chip->ft_deviation; + + return 0; +} + +static const struct thermal_zone_of_device_ops ths_ops = { + .get_temp = sun8i_ths_get_temp, +}; + +static const struct regmap_config config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .fast_io = true, + .max_register = 0xfc, +}; + +static int sun8i_h3_irq_ack(struct ths_device *tmdev) +{ + int i, state, ret = 0; + + regmap_read(tmdev->regmap, sun8i_ths_is, &state); + + for (i = 0; i < tmdev->chip->sensor_num; i++) { + if (state & sun8i_ths_data_irq_sts(i)) { + regmap_write(tmdev->regmap, sun8i_ths_is, + sun8i_ths_data_irq_sts(i)); + ret |= bit(i); + } + } + + return ret; +} + +static int sun50i_h6_irq_ack(struct ths_device *tmdev) +{ + int i, state, ret = 0; + + regmap_read(tmdev->regmap, sun50i_h6_ths_dis, &state); + + for (i = 0; i < tmdev->chip->sensor_num; i++) { + if (state & sun50i_h6_ths_data_irq_sts(i)) { + regmap_write(tmdev->regmap, sun50i_h6_ths_dis, + sun50i_h6_ths_data_irq_sts(i)); + ret |= bit(i); + } + } + + return ret; +} + +static irqreturn_t sun8i_irq_thread(int irq, void *data) +{ + struct ths_device *tmdev = data; + int i, state; + + state = tmdev->chip->irq_ack(tmdev); + + for (i = 0; i < tmdev->chip->sensor_num; i++) { + if (state & bit(i)) + thermal_zone_device_update(tmdev->sensor[i].tzd, + thermal_event_unspecified); + } + + return irq_handled; +} + +static int sun8i_h3_ths_calibrate(struct ths_device *tmdev, + u16 *caldata, int callen) +{ + int i; + + if (!caldata[0] || callen < 2 * tmdev->chip->sensor_num) + return -einval; + + for (i = 0; i < tmdev->chip->sensor_num; i++) { + int offset = (i % 2) << 4; + + regmap_update_bits(tmdev->regmap, + sun8i_ths_temp_calib + (4 * (i >> 1)), + 0xfff << offset, + caldata[i] << offset); + } + + return 0; +} + +static int sun50i_h6_ths_calibrate(struct ths_device *tmdev, + u16 *caldata, int callen) +{ + struct device *dev = tmdev->dev; + int i, ft_temp; + + if (!caldata[0] || callen < 2 + 2 * tmdev->chip->sensor_num) + return -einval; + + /* + * efuse layout: + * + * 0 11 16 32 + * +-------+-------+-------+ + * |temp| |sensor0|sensor1| + * +-------+-------+-------+ + * + * the calibration data on the h6 is the ambient temperature and + * sensor values that are filled during the factory test stage. + * + * the unit of stored ft temperature is 0.1 degreee celusis. + * + * we need to calculate a delta between measured and caluclated + * register values and this will become a calibration offset. + */ + ft_temp = (caldata[0] & ft_temp_mask) * 100; + tmdev->cp_ft_flag = (caldata[0] & ths_efuse_cp_ft_mask) + >> ths_efuse_cp_ft_bit; + + for (i = 0; i < tmdev->chip->sensor_num; i++) { + int sensor_reg = caldata[i + 1]; + int cdata, offset; + int sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg); + + /* + * calibration data is calibrate_default - (calculated + * temperature from sensor reading at factory temperature + * minus actual factory temperature) * 14.88 (scale from + * temperature to register values) + */ + cdata = calibrate_default - + ((sensor_temp - ft_temp) * 10 / tmdev->chip->scale); + if (cdata & ~temp_calib_mask) { + /* + * calibration value more than 12-bit, but calibration + * register is 12-bit. in this case, ths hardware can + * still work without calibration, although the data + * won't be so accurate. + */ + dev_warn(dev, "sensor%d is not calibrated. ", i); + continue; + } + + offset = (i % 2) * 16; + regmap_update_bits(tmdev->regmap, + sun50i_h6_ths_temp_calib + (i / 2 * 4), + 0xfff << offset, + cdata << offset); + } + + return 0; +} + +static int sun8i_ths_calibrate(struct ths_device *tmdev) +{ + struct nvmem_cell *calcell; + struct device *dev = tmdev->dev; + u16 *caldata; + size_t callen; + int ret = 0; + + calcell = devm_nvmem_cell_get(dev, "calibration"); + if (is_err(calcell)) { + if (ptr_err(calcell) == -eprobe_defer) + return -eprobe_defer; + /* + * even if the external calibration data stored in sid is + * not accessible, the ths hardware can still work, although + * the data won't be so accurate. + * + * the default value of calibration register is 0x800 for + * every sensor, and the calibration value is usually 0x7xx + * or 0x8xx, so they won't be away from the default value + * for a lot. + * + * so here we do not return error if the calibartion data is + * not available, except the probe needs deferring. + */ + goto out; + } + + caldata = nvmem_cell_read(calcell, &callen); + if (is_err(caldata)) { + ret = ptr_err(caldata); + goto out; + } + + tmdev->chip->calibrate(tmdev, caldata, callen); + + kfree(caldata); +out: + return ret; +} + +static int sun8i_ths_resource_init(struct ths_device *tmdev) +{ + struct device *dev = tmdev->dev; + struct platform_device *pdev = to_platform_device(dev); + void __iomem *base; + int ret; + + base = devm_platform_ioremap_resource(pdev, 0); + if (is_err(base)) + return ptr_err(base); + + tmdev->regmap = devm_regmap_init_mmio(dev, base, &config); + if (is_err(tmdev->regmap)) + return ptr_err(tmdev->regmap); + + if (tmdev->chip->has_bus_clk_reset) { + tmdev->reset = devm_reset_control_get(dev, 0); + if (is_err(tmdev->reset)) + return ptr_err(tmdev->reset); + + tmdev->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (is_err(tmdev->bus_clk)) + return ptr_err(tmdev->bus_clk); + } + + if (tmdev->chip->has_mod_clk) { + tmdev->mod_clk = devm_clk_get(&pdev->dev, "mod"); + if (is_err(tmdev->mod_clk)) + return ptr_err(tmdev->mod_clk); + } + + ret = reset_control_deassert(tmdev->reset); + if (ret) + return ret; + + ret = clk_prepare_enable(tmdev->bus_clk); + if (ret) + goto assert_reset; + + ret = clk_set_rate(tmdev->mod_clk, 24000000); + if (ret) + goto bus_disable; + + ret = clk_prepare_enable(tmdev->mod_clk); + if (ret) + goto bus_disable; + + ret = sun8i_ths_calibrate(tmdev); + if (ret) + goto mod_disable; + + return 0; + +mod_disable: + clk_disable_unprepare(tmdev->mod_clk); +bus_disable: + clk_disable_unprepare(tmdev->bus_clk); +assert_reset: + reset_control_assert(tmdev->reset); + + return ret; +} + +static int sun8i_h3_thermal_init(struct ths_device *tmdev) +{ + int val; + + /* average over 4 samples */ + regmap_write(tmdev->regmap, sun8i_ths_mfc, + sun50i_ths_filter_en | + sun50i_ths_filter_type(1)); + /* + * clkin = 24mhz + * filter_samples = 4 + * period = 0.25s + * + * x = period * clkin / 4096 / filter_samples - 1 + * = 365 + */ + val = genmask(7 + tmdev->chip->sensor_num, 8); + regmap_write(tmdev->regmap, sun8i_ths_ic, + sun50i_h6_ths_pc_temp_period(365) | val); + /* + * t_acq = 20us + * clkin = 24mhz + * + * x = t_acq * clkin - 1 + * = 479 + */ + regmap_write(tmdev->regmap, sun8i_ths_ctrl0, + sun8i_ths_ctrl0_t_acq0(479)); + val = genmask(tmdev->chip->sensor_num - 1, 0); + regmap_write(tmdev->regmap, sun8i_ths_ctrl2, + sun8i_ths_ctrl2_t_acq1(479) | val); + + return 0; +} + +/* + * without this undocummented value, the returned temperatures would + * be higher than real ones by about 20c. + */ +#define sun50i_h6_ctrl0_unk 0x0000002f + +static int sun50i_h6_thermal_init(struct ths_device *tmdev) +{ + int val; + + /* + * t_acq = 20us + * clkin = 24mhz + * + * x = t_acq * clkin - 1 + * = 479 + */ + regmap_write(tmdev->regmap, sun50i_ths_ctrl0, + sun50i_h6_ctrl0_unk | sun50i_ths_ctrl0_t_acq(479)); + /* average over 4 samples */ + regmap_write(tmdev->regmap, sun50i_h6_ths_mfc, + sun50i_ths_filter_en | + sun50i_ths_filter_type(1)); + /* + * clkin = 24mhz + * filter_samples = 4 + * period = 0.25s + * + * x = period * clkin / 4096 / filter_samples - 1 + * = 365 + */ + regmap_write(tmdev->regmap, sun50i_h6_ths_pc, + sun50i_h6_ths_pc_temp_period(365)); + /* enable sensor */ + val = genmask(tmdev->chip->sensor_num - 1, 0); + regmap_write(tmdev->regmap, sun50i_h6_ths_enable, val); + /* thermal data interrupt enable */ + val = genmask(tmdev->chip->sensor_num - 1, 0); + regmap_write(tmdev->regmap, sun50i_h6_ths_dic, val); + + return 0; +} + +static int sun8i_ths_register(struct ths_device *tmdev) +{ + int i; + + for (i = 0; i < tmdev->chip->sensor_num; i++) { + tmdev->sensor[i].tmdev = tmdev; + tmdev->sensor[i].id = i; + tmdev->sensor[i].tzd = + devm_thermal_zone_of_sensor_register(tmdev->dev, + i, + &tmdev->sensor[i], + &ths_ops); + if (is_err(tmdev->sensor[i].tzd)) + return ptr_err(tmdev->sensor[i].tzd); + } + + return 0; +} + +static int sun8i_ths_probe(struct platform_device *pdev) +{ + struct ths_device *tmdev; + struct device *dev = &pdev->dev; + int ret, irq; + + tmdev = devm_kzalloc(dev, sizeof(*tmdev), gfp_kernel); + if (!tmdev) + return -enomem; + + tmdev->dev = dev; + tmdev->chip = of_device_get_match_data(&pdev->dev); + if (!tmdev->chip) + return -einval; + + platform_set_drvdata(pdev, tmdev); + + ret = sun8i_ths_resource_init(tmdev); + if (ret) + return ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = tmdev->chip->init(tmdev); + if (ret) + return ret; + + ret = sun8i_ths_register(tmdev); + if (ret) + return ret; + + /* + * avoid entering the interrupt handler, the thermal device is not + * registered yet, we deffer the registration of the interrupt to + * the end. + */ + ret = devm_request_threaded_irq(dev, irq, null, + sun8i_irq_thread, + irqf_oneshot, "ths", tmdev); + if (ret) + return ret; + + return 0; +} + +static int sun8i_ths_remove(struct platform_device *pdev) +{ + struct ths_device *tmdev = platform_get_drvdata(pdev); + + clk_disable_unprepare(tmdev->mod_clk); + clk_disable_unprepare(tmdev->bus_clk); + reset_control_assert(tmdev->reset); + + return 0; +} + +static const struct ths_thermal_chip sun8i_a83t_ths = { + .sensor_num = 3, + .scale = 705, + .offset = 191668, + .temp_data_base = sun8i_ths_temp_data, + .calibrate = sun8i_h3_ths_calibrate, + .init = sun8i_h3_thermal_init, + .irq_ack = sun8i_h3_irq_ack, + .calc_temp = sun8i_ths_calc_temp, +}; + +static const struct ths_thermal_chip sun8i_h3_ths = { + .sensor_num = 1, + .scale = 1211, + .offset = 217000, + .has_mod_clk = true, + .has_bus_clk_reset = true, + .temp_data_base = sun8i_ths_temp_data, + .calibrate = sun8i_h3_ths_calibrate, + .init = sun8i_h3_thermal_init, + .irq_ack = sun8i_h3_irq_ack, + .calc_temp = sun8i_ths_calc_temp, +}; + +static const struct ths_thermal_chip sun8i_r40_ths = { + .sensor_num = 3, + .offset = 251086, + .scale = 1130, + .has_mod_clk = true, + .has_bus_clk_reset = true, + .temp_data_base = sun8i_ths_temp_data, + .calibrate = sun8i_h3_ths_calibrate, + .init = sun8i_h3_thermal_init, + .irq_ack = sun8i_h3_irq_ack, + .calc_temp = sun8i_ths_calc_temp, +}; + +static const struct ths_thermal_chip sun50i_a64_ths = { + .sensor_num = 3, + .offset = 260890, + .scale = 1170, + .has_mod_clk = true, + .has_bus_clk_reset = true, + .temp_data_base = sun8i_ths_temp_data, + .calibrate = sun8i_h3_ths_calibrate, + .init = sun8i_h3_thermal_init, + .irq_ack = sun8i_h3_irq_ack, + .calc_temp = sun8i_ths_calc_temp, +}; + +static const struct ths_thermal_chip sun50i_h5_ths = { + .sensor_num = 2, + .has_mod_clk = true, + .has_bus_clk_reset = true, + .temp_data_base = sun8i_ths_temp_data, + .calibrate = sun8i_h3_ths_calibrate, + .init = sun8i_h3_thermal_init, + .irq_ack = sun8i_h3_irq_ack, + .calc_temp = sun50i_h5_calc_temp, +}; + +static const struct ths_thermal_chip sun50i_h6_ths = { + .sensor_num = 2, + .has_bus_clk_reset = true, + .ft_deviation = 7000, + .offset = 187744, + .scale = 672, + .temp_data_base = sun50i_h6_ths_temp_data, + .calibrate = sun50i_h6_ths_calibrate, + .init = sun50i_h6_thermal_init, + .irq_ack = sun50i_h6_irq_ack, + .calc_temp = sun8i_ths_calc_temp, +}; + +static const struct of_device_id of_ths_match[] = { + { .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths }, + { .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths }, + { .compatible = "allwinner,sun8i-r40-ths", .data = &sun8i_r40_ths }, + { .compatible = "allwinner,sun50i-a64-ths", .data = &sun50i_a64_ths }, + { .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths }, + { .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths }, + { /* sentinel */ }, +}; +module_device_table(of, of_ths_match); + +static struct platform_driver ths_driver = { + .probe = sun8i_ths_probe, + .remove = sun8i_ths_remove, + .driver = { + .name = "sun8i-thermal", + .of_match_table = of_ths_match, + }, +}; +module_platform_driver(ths_driver); + +module_description("thermal sensor driver for allwinner soc"); +module_license("gpl v2");
|
Power Management
|
dccc5c3b6f30f27ed0f1bea82221e18face20bef
|
yangtao li maxime ripard mripard kernel org
|
drivers
|
thermal
| |
scsi: soc: mediatek: add header for sip service interface
|
add a common header for the sip service interface in mediatek chipsets.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add mediatek vendor implementations
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'ufs']
|
['h']
| 1
| 25
| 0
|
--- diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h --- /dev/null +++ b/include/linux/soc/mediatek/mtk_sip_svc.h +/* spdx-license-identifier: gpl-2.0 */ +/* + * copyright (c) 2019 mediatek inc. + */ +#ifndef __mtk_sip_svc_h +#define __mtk_sip_svc_h + +/* error code */ +#define sip_svc_e_success 0 +#define sip_svc_e_not_supported -1 +#define sip_svc_e_invalid_params -2 +#define sip_svc_e_invalid_range -3 +#define sip_svc_e_permission_denied -4 + +#ifdef config_arm64 +#define mtk_sip_smc_convention arm_smccc_smc_64 +#else +#define mtk_sip_smc_convention arm_smccc_smc_32 +#endif + +#define mtk_sip_smc_cmd(fn_id) \ + arm_smccc_call_val(arm_smccc_fast_call, mtk_sip_smc_convention, \ + arm_smccc_owner_sip, fn_id) + +#endif
|
Storage
|
fef093ff7e9654eec77e79b8170485241f01d027
|
stanley chu
|
include
|
linux
|
mediatek, soc
|
scsi: ufs-mediatek: add device reset implementation
|
add device reset vops implementation in mediatek ufs driver.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add mediatek vendor implementations
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'ufs']
|
['c', 'h']
| 2
| 42
| 0
|
--- diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c +#include <linux/arm-smccc.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> +#define ufs_mtk_smc(cmd, val, res) \ + arm_smccc_smc(mtk_sip_ufs_control, \ + cmd, val, 0, 0, 0, 0, 0, &(res)) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(ufs_mtk_sip_device_reset, high, res) + +static void ufs_mtk_device_reset(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + ufs_mtk_device_reset_ctrl(0, res); + + /* + * the reset signal is active low. ufs devices shall detect + * more than or equal to 1us of positive or negative rst_n + * pulse width. + * + * to be on safe side, keep the reset low for at least 10us. + */ + usleep_range(10, 15); + + ufs_mtk_device_reset_ctrl(1, res); + + /* some devices may need time to respond to rst_n */ + usleep_range(10000, 15000); + + dev_info(hba->dev, "device reset done "); +} + + .device_reset = ufs_mtk_device_reset, diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h --- a/drivers/scsi/ufs/ufs-mediatek.h +++ b/drivers/scsi/ufs/ufs-mediatek.h +#include <linux/bitops.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> + +/* + * sip commands + */ +#define mtk_sip_ufs_control mtk_sip_smc_cmd(0x276) +#define ufs_mtk_sip_device_reset bit(1) +
|
Storage
|
b0d077ed389cb75b95396886517664c2500b0c10
|
stanley chu avri altman avri altman wdc com
|
drivers
|
scsi
|
ufs
|
scsi: ufs-mediatek: introduce reference clock control
|
introduce reference clock control in mediatek chipset in order to disable it if it is not necessary by ufs device to save system power.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add mediatek vendor implementations
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'ufs']
|
['c', 'h']
| 2
| 78
| 6
|
--- diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c +#define ufs_mtk_ref_clk_notify(on, res) \ + ufs_mtk_smc(ufs_mtk_sip_ref_clk_notification, on, res) + +static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; + unsigned long timeout; + u32 value; + + if (host->ref_clk_enabled == on) + return 0; + + if (on) { + ufs_mtk_ref_clk_notify(on, res); + ufshcd_writel(hba, refclk_request, reg_ufs_refclk_ctrl); + } else { + ufshcd_writel(hba, refclk_release, reg_ufs_refclk_ctrl); + } + + /* wait for ack */ + timeout = jiffies + msecs_to_jiffies(refclk_req_timeout_ms); + do { + value = ufshcd_readl(hba, reg_ufs_refclk_ctrl); + + /* wait until ack bit equals to req bit */ + if (((value & refclk_ack) >> 1) == (value & refclk_request)) + goto out; + + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x ", value); + + ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res); + + return -etimedout; + +out: + host->ref_clk_enabled = on; + if (!on) + ufs_mtk_ref_clk_notify(on, res); + + return 0; +} + - if (!on) + if (!on) { + ufs_mtk_setup_ref_clk(hba, on); + } - if (on) + if (on) { + ufs_mtk_setup_ref_clk(hba, on); + } - if (ufshcd_is_link_hibern8(hba)) + if (ufshcd_is_link_hibern8(hba)) { + ufs_mtk_setup_ref_clk(hba, false); + } - if (ufshcd_is_link_hibern8(hba)) + if (ufshcd_is_link_hibern8(hba)) { + ufs_mtk_setup_ref_clk(hba, true); + } diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h --- a/drivers/scsi/ufs/ufs-mediatek.h +++ b/drivers/scsi/ufs/ufs-mediatek.h +/* + * vendor specific ufshci registers + */ +#define reg_ufs_refclk_ctrl 0x144 + +/* + * ref-clk control + * + * values for register reg_ufs_refclk_ctrl + */ +#define refclk_release 0x0 +#define refclk_request bit(0) +#define refclk_ack bit(1) + +#define refclk_req_timeout_ms 3 + -#define mtk_sip_ufs_control mtk_sip_smc_cmd(0x276) -#define ufs_mtk_sip_device_reset bit(1) +#define mtk_sip_ufs_control mtk_sip_smc_cmd(0x276) +#define ufs_mtk_sip_device_reset bit(1) +#define ufs_mtk_sip_ref_clk_notification bit(3) + bool ref_clk_enabled;
|
Storage
|
97347214bce8d740ce4d64e22783b50384cd2e6f
|
stanley chu
|
drivers
|
scsi
|
ufs
|
scsi: ufs: export ufshcd_auto_hibern8_update for vendor usage
|
export ufshcd_auto_hibern8_update to allow vendors to use common interface to customize auto-hibernate timer.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add mediatek vendor implementations
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'ufs']
|
['c', 'h']
| 3
| 19
| 20
|
--- diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c -static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) -{ - unsigned long flags; - - if (!ufshcd_is_auto_hibern8_supported(hba)) - return; - - spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ahit != ahit) - hba->ahit = ahit; - spin_unlock_irqrestore(hba->host->host_lock, flags); - if (!pm_runtime_suspended(hba->dev)) { - pm_runtime_get_sync(hba->dev); - ufshcd_hold(hba, false); - ufshcd_auto_hibern8_enable(hba); - ufshcd_release(hba); - pm_runtime_put(hba->dev); - } -} - diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) +{ + unsigned long flags; + + if (!(hba->capabilities & mask_auto_hibern8_support)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ahit == ahit) + goto out_unlock; + hba->ahit = ahit; + if (!pm_runtime_suspended(hba->dev)) + ufshcd_writel(hba, hba->ahit, reg_auto_hibernate_idle_timer); +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +export_symbol_gpl(ufshcd_auto_hibern8_update); + diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
|
Storage
|
ba7af5ec5126dca17cdc8cfdb6740cdfb4bad70c
|
stanley chu asutosh das asutoshd codeaurora org alim akhtar alim akhtar samsung com
|
drivers
|
scsi
|
ufs
|
scsi: ufs-mediatek: configure customized auto-hibern8 timer
|
configure customized auto-hibern8 timer in mediatek chipsets.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add mediatek vendor implementations
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'ufs']
|
['c']
| 1
| 8
| 0
|
--- diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c +#include <linux/bitfield.h> + /* configure auto-hibern8 timer to 10ms */ + if (ufshcd_is_auto_hibern8_supported(hba)) { + ufshcd_auto_hibern8_update(hba, + field_prep(ufshci_ahibern8_timer_mask, 10) | + field_prep(ufshci_ahibern8_scale_mask, 3)); + } +
|
Storage
|
8588c6b032176feb5fcef8f56a1140feded5d6c4
|
stanley chu alim akhtar alim akhtar samsung com
|
drivers
|
scsi
|
ufs
|
scsi: ufs-mediatek: configure and enable clk-gating
|
enable clk-gating with customized delayed timer value in mediatek chipsets.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add mediatek vendor implementations
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'ufs']
|
['c']
| 1
| 22
| 0
|
--- diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c + /* enable clock-gating */ + hba->caps |= ufshcd_cap_clk_gating; + +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + unsigned long flags; + u32 ah_ms; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) + ah_ms = field_get(ufshci_ahibern8_timer_mask, + hba->ahit); + else + ah_ms = 10; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = ah_ms + 5; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } +} + + ufs_mtk_setup_clk_gating(hba); +
|
Storage
|
5d74e18edd7bdb1fcc35bd115af720ebfb8c5cf0
|
stanley chu alim akhtar alim akhtar samsung com
|
drivers
|
scsi
|
ufs
|
scsi: mpt3sas: update mpi headers to v02.00.57
|
update mpi headers to version 02.00.57.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['h']
| 4
| 34
| 6
|
--- diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h + * 06-24-19 02.00.55 bumped mpi2_header_version_unit + * 08-01-19 02.00.56 bumped mpi2_header_version_unit + * 10-02-19 02.00.57 bumped mpi2_header_version_unit -#define mpi2_header_version_unit (0x36) +#define mpi2_header_version_unit (0x39) +#define mpi2_ioc_state_coredump (0x50000000) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h + * 08-01-19 02.00.49 add mpi26_manpage7_flag_x2_x4_slot_info_valid + * add mpi26_iounitpage1_nvme_wrcache_shift +#define mpi26_manpage7_flag_conn_lane_use_pinout (0x00000020) +#define mpi26_manpage7_flag_x2_x4_slot_info_valid (0x00000010) -#define mpi26_iounitpage1_nvme_wrcache_enable (0x00000000) -#define mpi26_iounitpage1_nvme_wrcache_disable (0x00010000) -#define mpi26_iounitpage1_nvme_wrcache_no_change (0x00020000) +#define mpi26_iounitpage1_nvme_wrcache_shift (16) +#define mpi26_iounitpage1_nvme_wrcache_no_change (0x00000000) +#define mpi26_iounitpage1_nvme_wrcache_enable (0x00010000) +#define mpi26_iounitpage1_nvme_wrcache_disable (0x00020000) - u16 reserved2; /* 0x16 */ + u16 shutdownlatency; /* 0x16 */ + u16 vendorid; /* 0x18 */ + u16 deviceid; /* 0x1a */ + u16 subsystemvendorid; /* 0x1c */ + u16 subsystemid; /* 0x1e */ + u8 revisionid; /* 0x20 */ + u8 reserved21[3]; /* 0x21 */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h --- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h + * 06-24-19 02.06.05 whitespace adjustments to help with identifier + * checking tool. + * 10-02-19 02.06.06 added mpi26_image_header_sig1_coredump + * added mpi2_flash_region_coredump +/* little-endian "dump" */ +#define mpi26_image_header_sig1_coredump (0x504d5544) +#define mpi2_flash_region_coredump (0x17) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h + * 10-02-19 02.00.38 added mpi26_iocinit_cfgflags_coredump_enable + * added mpi26_iocfacts_capability_coredump_enabled + * added mpi2_fw_download_itype_coredump + * added mpi2_fw_upload_itype_coredump +#define mpi26_iocinit_cfgflags_coredump_enable (0x0002) +#define mpi26_iocfacts_capability_coredump_enabled (0x00200000) +#define mpi2_fw_download_itype_coredump (0x17) -#define mpi2_fw_download_itype_terminate (0xff)
|
Storage
|
1ade26b616cc2da0b7277a97e3799c99bae0655b
|
sreekanth reddy
|
drivers
|
scsi
|
mpi, mpt3sas
|
scsi: mpt3sas: add support for nvme shutdown
|
introduce function _scsih_nvme_shutdown() to issue io unit control message to ioc firmware with operation code 'shutdown'. this causes ioc firmware to issue nvme shutdown commands to all nvme drives attached to it.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['h', 'c']
| 2
| 142
| 1
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h +#define io_unit_control_shutdown_timeout 6 + * @shutdown_latency: nvme device's rtd3 entry latency + u16 shutdown_latency; + * @max_shutdown_latency: timeout value for nvme shutdown operation, + * which is equal that nvme drive's rtd3 entry latency + * which has reported maximum rtd3 entry latency value + * among attached nvme drives. - + u16 max_shutdown_latency; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c +/** + * _scsih_set_nvme_max_shutdown_latency - update max_shutdown_latency. + * @ioc: per adapter object + * context: this function will acquire ioc->pcie_device_lock + * + * update ioc->max_shutdown_latency to that nvme drives rtd3 entry latency + * which has reported maximum among all available nvme drives. + * minimum max_shutdown_latency will be six seconds. + */ +static void +_scsih_set_nvme_max_shutdown_latency(struct mpt3sas_adapter *ioc) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + u16 shutdown_latency = io_unit_control_shutdown_timeout; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if (pcie_device->shutdown_latency) { + if (shutdown_latency < pcie_device->shutdown_latency) + shutdown_latency = + pcie_device->shutdown_latency; + } + } + ioc->max_shutdown_latency = shutdown_latency; + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + + u8 update_latency = 0; + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; + + /* + * this device's rtd3 entry latency matches ioc's + * max_shutdown_latency. recalculate ioc's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); + u8 update_latency = 0; + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; + + /* + * this device's rtd3 entry latency matches ioc's + * max_shutdown_latency. recalculate ioc's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); + pcie_device->shutdown_latency = + le16_to_cpu(pcie_device_pg2.shutdownlatency); + /* + * set ioc's max_shutdown_latency to drive's rtd3 entry latency + * if drive's rtd3 entry latency is greater then ioc's + * max_shutdown_latency. + */ + if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) + ioc->max_shutdown_latency = + pcie_device->shutdown_latency; + _scsih_set_nvme_max_shutdown_latency(ioc); +/** + * _scsih_nvme_shutdown - nvme shutdown notification + * @ioc: per adapter object + * + * sending iounitcontrol request with shutdown operation code to alert ioc that + * the host system is shutting down so that ioc can issue nvme shutdown to + * nvme drives attached to it. + */ +static void +_scsih_nvme_shutdown(struct mpt3sas_adapter *ioc) +{ + mpi26iounitcontrolrequest_t *mpi_request; + mpi26iounitcontrolreply_t *mpi_reply; + u16 smid; + + /* are there any nvme devices ? */ + if (list_empty(&ioc->pcie_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != mpt3_cmd_not_used) { + ioc_err(ioc, "%s: scsih_cmd in use ", __func__); + goto out; + } + + ioc->scsih_cmds.status = mpt3_cmd_pending; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, + "%s: failed obtaining a smid ", __func__); + ioc->scsih_cmds.status = mpt3_cmd_not_used; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(mpi26iounitcontrolrequest_t)); + mpi_request->function = mpi2_function_io_unit_control; + mpi_request->operation = mpi26_ctrl_op_shutdown; + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + /* wait for max_shutdown_latency seconds */ + ioc_info(ioc, + "io unit control shutdown (sending), shutdown latency %d sec ", + ioc->max_shutdown_latency); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + ioc->max_shutdown_latency*hz); + + if (!(ioc->scsih_cmds.status & mpt3_cmd_complete)) { + ioc_err(ioc, "%s: timeout ", __func__); + goto out; + } + + if (ioc->scsih_cmds.status & mpt3_cmd_reply_valid) { + mpi_reply = ioc->scsih_cmds.reply; + ioc_info(ioc, "io unit control shutdown (complete):" + "ioc_status(0x%04x), loginfo(0x%08x) ", + le16_to_cpu(mpi_reply->iocstatus), + le32_to_cpu(mpi_reply->iocloginfo)); + } + out: + ioc->scsih_cmds.status = mpt3_cmd_not_used; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + + + _scsih_nvme_shutdown(ioc); + /* host waits for minimum of six seconds */ + ioc->max_shutdown_latency = io_unit_control_shutdown_timeout; + _scsih_nvme_shutdown(ioc);
|
Storage
|
d3f623ae8e0323ca434ee9029100312a8be37773
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: mpt3sas: renamed _base_after_reset_handler function
|
renamed _base_after_reset_handler function to _base_clear_outstanding_commands so that it can be used in multiple scenarios with suitable name which matches with the operation it does.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['c', 'h']
| 4
| 29
| 14
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c - * _base_after_reset_handler - after reset handler + * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands -static void _base_after_reset_handler(struct mpt3sas_adapter *ioc) +static void +_base_clear_outstanding_mpt_commands(struct mpt3sas_adapter *ioc) - mpt3sas_scsih_after_reset_handler(ioc); - mpt3sas_ctl_after_reset_handler(ioc); - dtmprintk(ioc, ioc_info(ioc, "%s: mpt3_ioc_after_reset ", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding mpt cmds ", __func__)); +/** + * _base_clear_outstanding_commands - clear all outstanding commands + * @ioc: per adapter object + */ +static void _base_clear_outstanding_commands(struct mpt3sas_adapter *ioc) +{ + mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); + mpt3sas_ctl_clear_outstanding_ioctls(ioc); + _base_clear_outstanding_mpt_commands(ioc); +} + - _base_after_reset_handler(ioc); + _base_clear_outstanding_commands(ioc); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h -void mpt3sas_scsih_after_reset_handler(struct mpt3sas_adapter *ioc); +void mpt3sas_scsih_clear_outstanding_scsi_tm_commands( + struct mpt3sas_adapter *ioc); -void mpt3sas_ctl_after_reset_handler(struct mpt3sas_adapter *ioc); +void mpt3sas_ctl_clear_outstanding_ioctls(struct mpt3sas_adapter *ioc); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c - * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) + * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd. -void mpt3sas_ctl_after_reset_handler(struct mpt3sas_adapter *ioc) +void mpt3sas_ctl_clear_outstanding_ioctls(struct mpt3sas_adapter *ioc) - dtmprintk(ioc, ioc_info(ioc, "%s: mpt3_ioc_after_reset ", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding ioctl cmd ", __func__)); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c - * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih) + * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding + * scsi & tm cmds. -mpt3sas_scsih_after_reset_handler(struct mpt3sas_adapter *ioc) +mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct mpt3sas_adapter *ioc) - dtmprintk(ioc, ioc_info(ioc, "%s: mpt3_ioc_after_reset ", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding scsi & tm cmds ", __func__));
|
Storage
|
36c6c7f75b0998f5a4b5c79cbb94ee1ab4ee35c0
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: mpt3sas: add support iocs new state named coredump
|
new feature is added in hba firmware where it copies the collected firmware logs in flash region named 'coredump' whenever hba firmware faults occur.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['c', 'h']
| 2
| 118
| 4
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +static int +_base_wait_on_iocstate(struct mpt3sas_adapter *ioc, + u32 ioc_state, int timeout); +/** + * mpt3sas_base_coredump_info - verbose translation of firmware coredump state + * @ioc: per adapter object + * @fault_code: fault code + * + * return nothing. + */ +void +mpt3sas_base_coredump_info(struct mpt3sas_adapter *ioc, u16 fault_code) +{ + ioc_err(ioc, "coredump_state(0x%04x)! ", fault_code); +} + +/** + * mpt3sas_base_wait_for_coredump_completion - wait until coredump + * completes or times out + * @ioc: per adapter object + * + * returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_wait_for_coredump_completion(struct mpt3sas_adapter *ioc, + const char *caller) +{ + u8 timeout = (ioc->manu_pg11.coredumptosec) ? + ioc->manu_pg11.coredumptosec : + mpt3sas_default_coredump_timeout_seconds; + + int ioc_state = _base_wait_on_iocstate(ioc, mpi2_ioc_state_fault, + timeout); + + if (ioc_state) + ioc_err(ioc, + "%s: coredump timed out. (ioc_state=0x%x) ", + caller, ioc_state); + else + ioc_info(ioc, + "%s: coredump completed. (ioc_state=0x%x) ", + caller, ioc_state); + + return ioc_state; +} + - if ((doorbell & mpi2_ioc_state_mask) == mpi2_ioc_state_fault) - mpt3sas_base_fault_info(ioc , doorbell); - else { + if ((doorbell & mpi2_ioc_state_mask) == mpi2_ioc_state_fault) { + mpt3sas_base_fault_info(ioc, doorbell & + mpi2_doorbell_data_mask); + } else if ((doorbell & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump) { + mpt3sas_base_coredump_info(ioc, doorbell & + mpi2_doorbell_data_mask); + } else { + } else if ((ioc_state & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump) { + mpt3sas_base_coredump_info(ioc, ioc_state & + mpi2_doorbell_data_mask); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + rc = _base_diag_reset(ioc); + if (count && current_state == mpi2_ioc_state_coredump) + break; + if ((doorbell & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump) { + mpt3sas_base_coredump_info(ioc, doorbell); + return -efault; + } + unsigned long flags; + + if (r != 0) { + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + /* + * wait for ioc state coredump to clear only during + * hba initialization & release time. + */ + if ((ioc_state & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump && (ioc->is_driver_loading == 1 || + ioc->fault_reset_work_q == null)) { + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_base_coredump_info(ioc, ioc_state); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + } + } else if ((ioc_state & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump) { + ioc_info(ioc, + "%s: skipping the diag reset here. (ioc_state=0x%x) ", + __func__, ioc_state); + return -efault; + /* + * set the flag to enable coredump state feature in ioc firmware. + */ + mpi_request.configurationflags |= + cpu_to_le16(mpi26_iocinit_cfgflags_coredump_enable); + + if ((ioc_state & mpi2_ioc_state_mask) == mpi2_ioc_state_coredump) { + mpt3sas_base_coredump_info(ioc, ioc_state & + mpi2_doorbell_data_mask); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + goto issue_diag_reset; + } + diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h +/* coredump: default timeout */ +#define mpt3sas_default_coredump_timeout_seconds (15) /*15 seconds*/ + - __le32 reserved10[2]; /* 54h - 5bh */ + u8 coredumptosec; /* 54h */ + u8 reserved8; /* 55h */ + u16 reserved9; /* 56h */ + __le32 reserved10; /* 58h */ +void mpt3sas_base_coredump_info(struct mpt3sas_adapter *ioc, u16 fault_code); +int mpt3sas_base_wait_for_coredump_completion(struct mpt3sas_adapter *ioc, + const char *caller);
|
Storage
|
e8c2307e6a690db9aaff84153b2857c5c4dfd969
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: mpt3sas: handle coredump state from watchdog thread
|
watchdog thread polls for ioc state every 1 second. if it detects that ioc state is in coredump state then it immediately stops the ios and also clears the outstanding commands issued to the hba firmware and then it will poll for ioc state to be out of coredump state and once it detects that ioc state is changed from coredump state to fault state (or) coredumptosec number of seconds are elapsed then it will issue host reset operation and moves the ioc state to operational state and resumes the ios.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['c', 'h']
| 3
| 91
| 7
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +static void +_base_mask_interrupts(struct mpt3sas_adapter *ioc); +static void +_base_clear_outstanding_commands(struct mpt3sas_adapter *ioc); - if (ioc->shost_recovery || ioc->pci_error_recovery) + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery) - ioc->non_operational_loop = 0; + if ((doorbell & mpi2_ioc_state_mask) == mpi2_ioc_state_coredump) { + u8 timeout = (ioc->manu_pg11.coredumptosec) ? + ioc->manu_pg11.coredumptosec : + mpt3sas_default_coredump_timeout_seconds; + + timeout /= (fault_polling_interval/1000); + + if (ioc->ioc_coredump_loop == 0) { + mpt3sas_base_coredump_info(ioc, + doorbell & mpi2_doorbell_data_mask); + /* do not accept any ios and disable the interrupts */ + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + _base_mask_interrupts(ioc); + _base_clear_outstanding_commands(ioc); + } + + ioc_info(ioc, "%s: coredump loop %d.", + __func__, ioc->ioc_coredump_loop); + + /* wait until coredump completes or times out */ + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + goto rearm_timer; + } + } + if (ioc->ioc_coredump_loop) { + if ((doorbell & mpi2_ioc_state_mask) != mpi2_ioc_state_coredump) + ioc_err(ioc, "%s: coredump completed. loopcount: %d", + __func__, ioc->ioc_coredump_loop); + else + ioc_err(ioc, "%s: coredump timed out. loopcount: %d", + __func__, ioc->ioc_coredump_loop); + ioc->ioc_coredump_loop = mpt3sas_coredump_loop_done; + } + ioc->non_operational_loop = 0; - if ((doorbell & mpi2_ioc_state_mask) == mpi2_ioc_state_fault) + if ((doorbell & mpi2_ioc_state_mask) == mpi2_ioc_state_fault) { + } else if ((doorbell & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump) + mpt3sas_base_coredump_info(ioc, doorbell & + mpi2_doorbell_data_mask); + ioc->ioc_coredump_loop = 0; - mpt3sas_base_coredump_info(ioc, ioc_state & - mpi2_doorbell_data_mask); - mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + /* + * if host reset is invoked while watch dog thread is waiting + * for ioc state to be changed to fault state then driver has + * to wait here for coredump state to clear otherwise reset + * will be issued to the fw and fw move the ioc state to + * reset state without copying the fw logs to coredump region. + */ + if (ioc->ioc_coredump_loop != mpt3sas_coredump_loop_done) { + mpt3sas_base_coredump_info(ioc, ioc_state & + mpi2_doorbell_data_mask); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + } + ioc->ioc_coredump_loop = 0; - if ((ioc_state & mpi2_ioc_state_mask) == mpi2_ioc_state_fault) + if ((ioc_state & mpi2_ioc_state_mask) == mpi2_ioc_state_fault || + (ioc_state & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h +#define mpt3sas_coredump_loop_done (0xff) + * @ioc_coredump_loop: will have non-zero value when fw is in coredump state + u8 ioc_coredump_loop; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c + } else if ((ioc_state & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump) { + mpt3sas_base_coredump_info(ioc, ioc_state & + mpi2_doorbell_data_mask); + rc = mpt3sas_base_hard_reset_handler(ioc, force_big_hammer); + return (!rc) ? success : failed; + u32 doorbell; + if (ioc->hba_mpi_version_belonged != mpi2_version) { + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & mpi2_ioc_state_mask) == + mpi2_ioc_state_fault) { + mpt3sas_base_fault_info(ioc, + doorbell & mpi2_doorbell_data_mask); + } else if ((doorbell & mpi2_ioc_state_mask) == + mpi2_ioc_state_coredump) { + mpt3sas_base_coredump_info(ioc, + doorbell & mpi2_doorbell_data_mask); + } + }
|
Storage
|
fce0aa08792b3ae725395fa25d44507dee0b603b
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: mpt3sas: print in which path firmware fault occurred
|
when firmware fault occurs then print in which path firmware fault has occurred. this will be useful while debugging the firmware fault issues.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['c', 'h']
| 3
| 25
| 17
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c - mpt3sas_base_coredump_info(ioc, + mpt3sas_print_coredump_info(ioc, - mpt3sas_base_fault_info(ioc, doorbell & + mpt3sas_print_fault_code(ioc, doorbell & - mpt3sas_base_coredump_info(ioc, doorbell & + mpt3sas_print_coredump_info(ioc, doorbell & - mpt3sas_base_fault_info(ioc, doorbell & + mpt3sas_print_fault_code(ioc, doorbell & - mpt3sas_base_coredump_info(ioc, doorbell & + mpt3sas_print_coredump_info(ioc, doorbell & - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & - mpt3sas_base_coredump_info(ioc, ioc_state & + mpt3sas_print_coredump_info(ioc, ioc_state & - mpt3sas_base_fault_info(ioc , doorbell); + mpt3sas_print_fault_code(ioc, doorbell); - mpt3sas_base_coredump_info(ioc, doorbell); + mpt3sas_print_coredump_info(ioc, doorbell); - mpt3sas_base_coredump_info(ioc, ioc_state); + mpt3sas_print_coredump_info(ioc, ioc_state); - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & - mpt3sas_base_coredump_info(ioc, ioc_state & + mpt3sas_print_coredump_info(ioc, ioc_state & diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h +#define mpt3sas_print_fault_code(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s ", ioc->name, __func__); \ + mpt3sas_base_fault_info(ioc, fault_code); } while (0) + +#define mpt3sas_print_coredump_info(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s ", ioc->name, __func__); \ + mpt3sas_base_coredump_info(ioc, fault_code); } while (0) + diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & - mpt3sas_base_coredump_info(ioc, ioc_state & + mpt3sas_print_coredump_info(ioc, ioc_state & - mpt3sas_base_fault_info(ioc, + mpt3sas_print_fault_code(ioc, - mpt3sas_base_coredump_info(ioc, + mpt3sas_print_coredump_info(ioc,
|
Storage
|
c59777189433621392f6f5c82ecfc62f00a1232d
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: mpt3sas: optimize mpt3sas driver logging
|
this improves mpt3sas driver default debug information collection and allows for a higher percentage of issues being able to be resolved with a first-time data capture. however, this improvement to balance the amount of debug data captured with the performance of driver.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['c']
| 5
| 110
| 78
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c + ioc_info(ioc, + "combined replyqueue is off, enabling msix load balance "); - dfailprintk(ioc, - ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!! ", - r)); + ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!! ", r); - ioc_err(ioc, "unable to map adapter memory! or resource not found "); + ioc_err(ioc, + "unable to map adapter memory! or resource not found "); - dfailprintk(ioc, - ioc_warn(ioc, "allocation for reply post register index failed!!! ")); + ioc_err(ioc, + "allocation for replypostregisterindex failed! "); - ioc_err(ioc, "failure at %s:%d/%s()! ", + ioc_err(ioc, + "memory allocation for fwpkg data failed at %s:%d/%s()! ", - dinitprintk(ioc, - ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d) ", - ioc->max_sges_in_main_message, - ioc->max_sges_in_chain_message, - ioc->shost->sg_tablesize, - ioc->chains_needed_per_io)); + ioc_info(ioc, + "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), " + "sge_per_io(%d), chains_per_io(%d) ", + ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, + ioc->shost->sg_tablesize, + ioc->chains_needed_per_io); - dinitprintk(ioc, - ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kb) ", - ioc->request, ioc->hba_queue_depth, - ioc->request_sz, - (ioc->hba_queue_depth * ioc->request_sz) / 1024)); + ioc_info(ioc, + "request pool(0x%p) - dma(0x%llx): " + "depth(%d), frame_size(%d), pool_size(%d kb) ", + ioc->request, (unsigned long long) ioc->request_dma, + ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz) / 1024); - dinitprintk(ioc, - ioc_info(ioc, "request pool: dma(0x%llx) ", - (unsigned long long)ioc->request_dma)); - dinitprintk(ioc, - ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kb) ", - ioc->sense, ioc->scsiio_depth, - scsi_sense_buffersize, sz / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "sense_dma(0x%llx) ", - (unsigned long long)ioc->sense_dma)); + ioc_info(ioc, + "sense pool(0x%p)- dma(0x%llx): depth(%d)," + "element_size(%d), pool_size(%d kb) ", + ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth, + scsi_sense_buffersize, sz / 1024); + - dinitprintk(ioc, - ioc_info(ioc, "config page(0x%p): size(%d) ", - ioc->config_page, ioc->config_page_sz)); - dinitprintk(ioc, - ioc_info(ioc, "config_page_dma(0x%llx) ", - (unsigned long long)ioc->config_page_dma)); + + ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d) ", + ioc->config_page, (unsigned long long)ioc->config_page_dma, + ioc->config_page_sz); - pr_info(" [0x%02x]:%08x ", i*4, + ioc_info(ioc, " [0x%02x]:%08x ", i*4, - pr_info(" offset:data "); + ioc_info(ioc, " offset:data "); - pr_info(" [0x%02x]:%08x ", i*4, + ioc_info(ioc, " [0x%02x]:%08x ", i*4, - if (count++ > 20) + if (count++ > 20) { + ioc_info(ioc, + "stop writing magic sequence after 20 retries "); + } - if (host_diagnostic == 0xffffffff) + if (host_diagnostic == 0xffffffff) { + ioc_info(ioc, + "invalid host diagnostic register value "); + } - dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active! ")); + ioc_info(ioc, "unexpected doorbell active! "); - dfailprintk(ioc, - ioc_info(ioc, "allocation for cpu_msix_table failed!!! ")); + ioc_info(ioc, "allocation for cpu_msix_table failed!!! "); - dfailprintk(ioc, - ioc_info(ioc, "allocation for reply_post_host_index failed!!! ")); + ioc_info(ioc, "allocation for reply_post_host_index failed!!! "); - dtmprintk(ioc, - ioc_info(ioc, "%s: %s ", - __func__, r == 0 ? "success" : "failed")); + ioc_info(ioc, "%s: %s ", __func__, r == 0 ? "success" : "failed"); diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c - if (!(ioc->logging_level & mpt_debug_config)) - return; - - _config_display_some_debug(ioc, smid, "config_done", mpi_reply); + if (ioc->logging_level & mpt_debug_config) + _config_display_some_debug(ioc, smid, "config_done", mpi_reply); - _config_display_some_debug(ioc, smid, "config_request", null); + if (ioc->logging_level & mpt_debug_config) + _config_display_some_debug(ioc, smid, "config_request", null); + if (!(ioc->logging_level & mpt_debug_config)) + _config_display_some_debug(ioc, + smid, "config_request", null); + if (!(ioc->logging_level & mpt_debug_config)) + _config_display_some_debug(ioc, + smid, "config_request", null); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + if (!(ioc->logging_level & mpt_debug_config)) + _config_display_some_debug(ioc, + smid, "config_request", null); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + if (!(ioc->logging_level & mpt_debug_config)) + _config_display_some_debug(ioc, + smid, "config_request", null); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); + if (!(ioc->logging_level & mpt_debug_config)) + _config_display_some_debug(ioc, + smid, "config_request", null); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c + case mpi2_function_toolbox: + desc = "toolbox"; + break; + case mpi2_function_nvme_encapsulated: + desc = "nvme_encapsulated"; + break; - ioc_info(ioc, "host reset: %s ", ((!retval) ? "success" : "failed")); + ioc_info(ioc, + "ioctl: host reset: %s ", ((!retval) ? "success" : "failed")); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c - return scsi_change_queue_depth(sdev, qdepth); + scsi_change_queue_depth(sdev, qdepth); + sdev_printk(kern_info, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d) ", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); + return sdev->queue_depth; - sdev_printk(kern_info, scmd->device, - "attempting task abort! scmd(%p) ", scmd); + sdev_printk(kern_info, scmd->device, "attempting task abort!" + "scmd(0x%p), outstanding for %u ms & timeout %u ms ", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scmd->request->timeout / hz) * 1000); - "device been deleted! scmd(%p) ", scmd); + "device been deleted! scmd(0x%p) ", scmd); + sdev_printk(kern_info, scmd->device, "no reference found at " + "driver, assuming scmd(0x%p) might have completed ", scmd); - sdev_printk(kern_info, scmd->device, "task abort: %s scmd(%p) ", + sdev_printk(kern_info, scmd->device, "task abort: %s scmd(0x%p) ", - "attempting device reset! scmd(%p) ", scmd); + "attempting device reset! scmd(0x%p) ", scmd); - "device been deleted! scmd(%p) ", scmd); + "device been deleted! scmd(0x%p) ", scmd); - sdev_printk(kern_info, scmd->device, "device reset: %s scmd(%p) ", + sdev_printk(kern_info, scmd->device, "device reset: %s scmd(0x%p) ", - starget_printk(kern_info, starget, "attempting target reset! scmd(%p) ", - scmd); + starget_printk(kern_info, starget, + "attempting target reset! scmd(0x%p) ", scmd); - starget_printk(kern_info, starget, "target been deleted! scmd(%p) ", - scmd); + starget_printk(kern_info, starget, + "target been deleted! scmd(0x%p) ", scmd); - starget_printk(kern_info, starget, "target reset: %s scmd(%p) ", + starget_printk(kern_info, starget, "target reset: %s scmd(0x%p) ", - ioc_info(ioc, "attempting host reset! scmd(%p) ", scmd); + ioc_info(ioc, "attempting host reset! scmd(0x%p) ", scmd); - ioc_info(ioc, "host reset: %s scmd(%p) ", + ioc_info(ioc, "host reset: %s scmd(0x%p) ", - + ioc_info(ioc, "issuing hard reset as part of os resume "); + ioc_info(ioc, "issuing hard reset as part of pci slot reset "); diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c - if ((ioc->logging_level & mpt_debug_transport)) - dev_printk(kern_info, &rphy->dev, - "add: handle(0x%04x), sas_addr(0x%016llx) ", - handle, (unsigned long long) - mpt3sas_port->remote_identify.sas_address); + dev_info(&rphy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx) ", handle, + (unsigned long long)mpt3sas_port->remote_identify.sas_address); + + ioc_info(ioc, "%s: removed: sas_addr(0x%016llx) ", + __func__, (unsigned long long)sas_address);
|
Storage
|
5b061980e362820894d7d884370b37005bed23ec
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: mpt3sas: print function name in which cmd timed out
|
print the function name in which mpt command got timed out. this will facilitate debugging in which path corresponding mpt command got timeout in first failure instance of log itself.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['c', 'h']
| 5
| 38
| 33
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->base_cmds.status, mpi_request, - sizeof(mpi2sasiounitcontrolrequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status, + mpi_request, sizeof(mpi2sasiounitcontrolrequest_t)/4, + issue_reset); - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->base_cmds.status, mpi_request, - sizeof(mpi2seprequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(mpi2seprequest_t)/4, issue_reset); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h +#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ +do { ioc_err(ioc, "in func: %s ", __func__); \ + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \ + status, mpi_request, sz); } while (0) + diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c + u8 issue_reset = 0; - mpt3sas_base_check_cmd_timeout(ioc, - ioc->config_cmds.status, mpi_request, - sizeof(mpi2configrequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->config_cmds.status, mpi_request, + sizeof(mpi2configrequest_t)/4, issue_reset); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - karg.data_sge_offset); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(mpi2diagbufferpostrequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(mpi2diagbufferpostrequest_t)/4, issue_reset); + u8 reset_needed = 0; + - *issue_reset = mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(mpi2diagreleaserequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(mpi2diagreleaserequest_t)/4, reset_needed); + *issue_reset = reset_needed; - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(mpi2diagbufferpostrequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(mpi2diagbufferpostrequest_t)/4, issue_reset); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c + u8 issue_reset = 0; - if (mpt3sas_base_check_cmd_timeout(ioc, - ioc->tm_cmds.status, mpi_request, - sizeof(mpi2scsitaskmanagementrequest_t)/4)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->tm_cmds.status, mpi_request, + sizeof(mpi2scsitaskmanagementrequest_t)/4, issue_reset); + if (issue_reset) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->scsih_cmds.status, mpi_request, - sizeof(mpi2raidactionrequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(mpi2raidactionrequest_t)/4, issue_reset);
|
Storage
|
c6bdb6a10892d1130638a5e28d1523a813e45d5e
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: mpt3sas: remove usage of device_busy counter
|
remove usage of device_busy counter from driver. instead of device_busy counter now driver uses 'nr_active' counter of request_queue to get the number of inflight request for a lun.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['c']
| 1
| 17
| 1
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c +/** + * _base_sdev_nr_inflight_request -get number of inflight requests + * of a request queue. + * @q: request_queue object + * + * returns number of inflight request of a request queue. + */ +inline unsigned long +_base_sdev_nr_inflight_request(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0]; + + return atomic_read(&hctx->nr_active); +} + + - if (atomic_read(&scmd->device->device_busy) > + if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
|
Storage
|
c50ed99cd56ee725d9e14dffec8e8f1641b8ca30
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: mpt3sas: update drive version to 33.100.00.00
|
update mpt3sas driver version from 32.100.00.00 to 33.100.00.00
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enhancements of phase14
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'mpt3sas']
|
['h']
| 1
| 2
| 2
|
--- diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h -#define mpt3sas_driver_version "32.100.00.00" -#define mpt3sas_major_version 32 +#define mpt3sas_driver_version "33.100.00.00" +#define mpt3sas_major_version 33
|
Storage
|
c53cf10ef6d9faeee9baa1fab824139c6f10a134
|
sreekanth reddy
|
drivers
|
scsi
|
mpt3sas
|
scsi: esp_scsi: add support for fsc chip
|
the fsc (ncr53cf9x-2 / sym53cf9x-2) has a different family code than qlogic or emulex parts. this caused it to be detected as a fas100a.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add support for fsc chip
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['scsi ', 'esp_scsi']
|
['c', 'h']
| 2
| 28
| 16
|
--- diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c - u8 family_code, version; - - version = esp_read8(esp_uid); - family_code = (version & 0xf8) >> 3; - if (family_code == 0x02) + u8 family_code = esp_family(esp_read8(esp_uid)); + + if (family_code == esp_uid_f236) { - else if (family_code == 0x0a) + } else if (family_code == esp_uid_hme) { - else + } else if (family_code == esp_uid_fsc) { + esp->rev = fsc; + /* enable active negation */ + esp_write8(esp_config4_rade, esp_cfg4); + } else { + } - /* fast 236, am53c974 or hme */ + case fsc: + "53cf9x-2", diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h -/* esp config register 4 read-write, found only on am53c974 chips */ -#define esp_config4_rade 0x04 /* active negation */ -#define esp_config4_rae 0x08 /* active negation on req and ack */ -#define esp_config4_pwd 0x20 /* reduced power feature */ -#define esp_config4_ge0 0x40 /* glitch eater bit 0 */ -#define esp_config4_ge1 0x80 /* glitch eater bit 1 */ +/* esp config register 4 read-write */ +#define esp_config4_bbte 0x01 /* back-to-back transfers (fsc) */ +#define esp_congig4_test 0x02 /* transfer counter test mode (fsc) */ +#define esp_config4_rade 0x04 /* active negation (am53c974/fsc) */ +#define esp_config4_rae 0x08 /* act. negation req/ack (am53c974) */ +#define esp_config4_pwd 0x20 /* reduced power feature (am53c974) */ +#define esp_config4_ge0 0x40 /* glitch eater bit 0 (am53c974) */ +#define esp_config4_ge1 0x80 /* glitch eater bit 1 (am53c974) */ +#define esp_uid_fam 0xf8 /* esp family bitmask */ + +#define esp_family(uid) (((uid) & esp_uid_fam) >> 3) + +/* values for the esp family bits */ -#define esp_uid_rev 0x07 /* esp revision */ -#define esp_uid_fam 0xf8 /* esp family */ +#define esp_uid_hme 0x0a /* fas hme */ +#define esp_uid_fsc 0x14 /* ncr/symbios logic 53cf9x-2 */ + fsc, /* ncr/symbios logic 53cf9x-2 */
|
Storage
|
bd40726153c646ed28f830e22a27f5e831b77017
|
kars de jong
|
drivers
|
scsi
| |
pata_pcmia: add sandisk high (>8g) cf card to supported list
|
add new sandisk high (>8g) cf cards to the pata_pcmcia driver.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add sandisk high (>8g) cf card to supported list
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['pata_pcmia']
|
['c']
| 1
| 1
| 0
|
--- diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c + pcmcia_device_manf_card(0x00f1, 0x0101), /* sandisk high (>8g) cfa */
|
Storage
|
73e4eab0a8dacd0ad6921a1717eb38a959b53f09
|
christian zigotzky
|
drivers
|
ata
| |
media: meson: vdec: add g12a platform
|
add support for the g12a platform by: - adding the g12a codec support, here mpeg1 & mpeg2 - getting the new hevcf clock for the upcoming hevc/vp9 decoding support
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add g12a platform
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media', 'meson', 'vdec']
|
['c', 'h']
| 4
| 44
| 2
|
- adding the g12a codec support, here mpeg1 & mpeg2 - getting the new hevcf clock for the upcoming hevc/vp9 decoding support --- diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c + { .compatible = "amlogic,g12a-vdec", + .data = &vdec_platform_g12a }, + of_id = of_match_node(vdec_dt_match, dev->of_node); + core->platform = of_id->data; + + if (core->platform->revision == vdec_revision_g12a) { + core->vdec_hevcf_clk = devm_clk_get(dev, "vdec_hevcf"); + if (is_err(core->vdec_hevcf_clk)) + return -eprobe_defer; + } + - of_id = of_match_node(vdec_dt_match, dev->of_node); - core->platform = of_id->data; diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h --- a/drivers/staging/media/meson/vdec/vdec.h +++ b/drivers/staging/media/meson/vdec/vdec.h + struct clk *vdec_hevcf_clk; diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c --- a/drivers/staging/media/meson/vdec/vdec_platform.c +++ b/drivers/staging/media/meson/vdec/vdec_platform.c +static const struct amvdec_format vdec_formats_g12a[] = { + { + .pixfmt = v4l2_pix_fmt_mpeg1, + .min_buffers = 8, + .max_buffers = 8, + .max_width = 1920, + .max_height = 1080, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_mpeg12_ops, + .firmware_path = "meson/vdec/gxl_mpeg12.bin", + .pixfmts_cap = { v4l2_pix_fmt_nv12m, v4l2_pix_fmt_yuv420m, 0 }, + }, { + .pixfmt = v4l2_pix_fmt_mpeg2, + .min_buffers = 8, + .max_buffers = 8, + .max_width = 1920, + .max_height = 1080, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_mpeg12_ops, + .firmware_path = "meson/vdec/gxl_mpeg12.bin", + .pixfmts_cap = { v4l2_pix_fmt_nv12m, v4l2_pix_fmt_yuv420m, 0 }, + }, +}; + + +const struct vdec_platform vdec_platform_g12a = { + .formats = vdec_formats_g12a, + .num_formats = array_size(vdec_formats_g12a), + .revision = vdec_revision_g12a, +}; diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h --- a/drivers/staging/media/meson/vdec/vdec_platform.h +++ b/drivers/staging/media/meson/vdec/vdec_platform.h + vdec_revision_g12a, +extern const struct vdec_platform vdec_platform_g12a;
|
Drivers in the Staging area
|
8299c65339797a3b751f7f869842ed22a433fcb1
|
maxime jourdan
|
drivers
|
staging
|
media, meson, vdec
|
media: meson: vdec: add sm1 platform
|
add support for the amlogic sm1 platform for the current mpeg1 & mpeg2 support.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add sm1 platform
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media', 'meson', 'vdec']
|
['c', 'h']
| 4
| 59
| 7
|
--- diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c + { .compatible = "amlogic,sm1-vdec", + .data = &vdec_platform_sm1 }, - if (core->platform->revision == vdec_revision_g12a) { + if (core->platform->revision == vdec_revision_g12a || + core->platform->revision == vdec_revision_sm1) { diff --git a/drivers/staging/media/meson/vdec/vdec_1.c b/drivers/staging/media/meson/vdec/vdec_1.c --- a/drivers/staging/media/meson/vdec/vdec_1.c +++ b/drivers/staging/media/meson/vdec/vdec_1.c + #define gen_pwr_vdec_1_sm1 (bit(1)) - regmap_write(core->regmap_ao, ao_rti_gen_pwr_iso0, 0xc0); + if (core->platform->revision == vdec_revision_sm1) + regmap_update_bits(core->regmap_ao, ao_rti_gen_pwr_iso0, + gen_pwr_vdec_1_sm1, gen_pwr_vdec_1_sm1); + else + regmap_write(core->regmap_ao, ao_rti_gen_pwr_iso0, 0xc0); - regmap_update_bits(core->regmap_ao, ao_rti_gen_pwr_sleep0, - gen_pwr_vdec_1, gen_pwr_vdec_1); + if (core->platform->revision == vdec_revision_sm1) + regmap_update_bits(core->regmap_ao, ao_rti_gen_pwr_sleep0, + gen_pwr_vdec_1_sm1, gen_pwr_vdec_1_sm1); + else + regmap_update_bits(core->regmap_ao, ao_rti_gen_pwr_sleep0, + gen_pwr_vdec_1, gen_pwr_vdec_1); - regmap_update_bits(core->regmap_ao, ao_rti_gen_pwr_sleep0, - gen_pwr_vdec_1, 0); + if (core->platform->revision == vdec_revision_sm1) + regmap_update_bits(core->regmap_ao, ao_rti_gen_pwr_sleep0, + gen_pwr_vdec_1_sm1, 0); + else + regmap_update_bits(core->regmap_ao, ao_rti_gen_pwr_sleep0, + gen_pwr_vdec_1, 0); - regmap_write(core->regmap_ao, ao_rti_gen_pwr_iso0, 0); + if (core->platform->revision == vdec_revision_sm1) + regmap_update_bits(core->regmap_ao, ao_rti_gen_pwr_iso0, + gen_pwr_vdec_1_sm1, 0); + else + regmap_write(core->regmap_ao, ao_rti_gen_pwr_iso0, 0); diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c --- a/drivers/staging/media/meson/vdec/vdec_platform.c +++ b/drivers/staging/media/meson/vdec/vdec_platform.c +static const struct amvdec_format vdec_formats_sm1[] = { + { + .pixfmt = v4l2_pix_fmt_mpeg1, + .min_buffers = 8, + .max_buffers = 8, + .max_width = 1920, + .max_height = 1080, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_mpeg12_ops, + .firmware_path = "meson/vdec/gxl_mpeg12.bin", + .pixfmts_cap = { v4l2_pix_fmt_nv12m, v4l2_pix_fmt_yuv420m, 0 }, + }, { + .pixfmt = v4l2_pix_fmt_mpeg2, + .min_buffers = 8, + .max_buffers = 8, + .max_width = 1920, + .max_height = 1080, + .vdec_ops = &vdec_1_ops, + .codec_ops = &codec_mpeg12_ops, + .firmware_path = "meson/vdec/gxl_mpeg12.bin", + .pixfmts_cap = { v4l2_pix_fmt_nv12m, v4l2_pix_fmt_yuv420m, 0 }, + }, +}; + + +const struct vdec_platform vdec_platform_sm1 = { + .formats = vdec_formats_sm1, + .num_formats = array_size(vdec_formats_sm1), + .revision = vdec_revision_sm1, +}; diff --git a/drivers/staging/media/meson/vdec/vdec_platform.h b/drivers/staging/media/meson/vdec/vdec_platform.h --- a/drivers/staging/media/meson/vdec/vdec_platform.h +++ b/drivers/staging/media/meson/vdec/vdec_platform.h + vdec_revision_sm1, +extern const struct vdec_platform vdec_platform_sm1;
|
Drivers in the Staging area
|
fee586a20f48bf646118dc553993b3b0d98a5f06
|
neil armstrong
|
drivers
|
staging
|
media, meson, vdec
|
media: hantro: support color conversion via post-processing
|
the hantro g1 decoder is able to enable a post-processor on the decoding pipeline, which can be used to perform scaling and color conversion.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
support color conversion via post-processing
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media', 'hantro']
|
['h', 'c', 'makefile']
| 11
| 343
| 11
|
--- diff --git a/drivers/staging/media/hantro/makefile b/drivers/staging/media/hantro/makefile --- a/drivers/staging/media/hantro/makefile +++ b/drivers/staging/media/hantro/makefile + hantro_postproc.o \ diff --git a/drivers/staging/media/hantro/hantro.h b/drivers/staging/media/hantro/hantro.h --- a/drivers/staging/media/hantro/hantro.h +++ b/drivers/staging/media/hantro/hantro.h + * @postproc_fmts: post-processor formats. + * @num_postproc_fmts: number of post-processor formats. + * @postproc_regs: &struct hantro_postproc_regs pointer + const struct hantro_fmt *postproc_fmts; + unsigned int num_postproc_fmts; + const struct hantro_postproc_regs *postproc_regs; + * @postproc: post-processing context. + struct hantro_postproc_ctx postproc; +struct hantro_postproc_regs { + struct hantro_reg pipeline_en; + struct hantro_reg max_burst; + struct hantro_reg clk_gate; + struct hantro_reg out_swap32; + struct hantro_reg out_endian; + struct hantro_reg out_luma_base; + struct hantro_reg input_width; + struct hantro_reg input_height; + struct hantro_reg output_width; + struct hantro_reg output_height; + struct hantro_reg input_fmt; + struct hantro_reg output_fmt; + struct hantro_reg orig_width; + struct hantro_reg display_width; +}; + -static inline void hantro_reg_write(struct hantro_dev *vpu, - const struct hantro_reg *reg, - u32 val) +static inline u32 vdpu_read_mask(struct hantro_dev *vpu, + const struct hantro_reg *reg, + u32 val) - vdpu_write_relaxed(vpu, v, reg->base); + return v; +} + +static inline void hantro_reg_write(struct hantro_dev *vpu, + const struct hantro_reg *reg, + u32 val) +{ + vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base); +} + +static inline void hantro_reg_write_s(struct hantro_dev *vpu, + const struct hantro_reg *reg, + u32 val) +{ + vdpu_write(vpu, vdpu_read_mask(vpu, reg, val), reg->base); +static inline bool +hantro_needs_postproc(struct hantro_ctx *ctx, const struct hantro_fmt *fmt) +{ + return fmt->fourcc != v4l2_pix_fmt_nv12; +} + +static inline dma_addr_t +hantro_get_dec_buf_addr(struct hantro_ctx *ctx, struct vb2_buffer *vb) +{ + if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) + return ctx->postproc.dec_q[vb->index].dma; + return vb2_dma_contig_plane_dma_addr(vb, 0); +} + +void hantro_postproc_disable(struct hantro_ctx *ctx); +void hantro_postproc_enable(struct hantro_ctx *ctx); +void hantro_postproc_free(struct hantro_ctx *ctx); +int hantro_postproc_alloc(struct hantro_ctx *ctx); + diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c --- a/drivers/staging/media/hantro/hantro_drv.c +++ b/drivers/staging/media/hantro/hantro_drv.c - return vb2_dma_contig_plane_dma_addr(buf, 0); + return hantro_get_dec_buf_addr(ctx, buf); + + if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) + hantro_postproc_enable(ctx); + else + hantro_postproc_disable(ctx); diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c --- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c +++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c - dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); + dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf); diff --git a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c --- a/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c +++ b/drivers/staging/media/hantro/hantro_g1_mpeg2_dec.c - addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + addr = hantro_get_dec_buf_addr(ctx, dst_buf); diff --git a/drivers/staging/media/hantro/hantro_g1_regs.h b/drivers/staging/media/hantro/hantro_g1_regs.h --- a/drivers/staging/media/hantro/hantro_g1_regs.h +++ b/drivers/staging/media/hantro/hantro_g1_regs.h +#define g1_swreg(nr) ((nr) * 4) + +/* post-processor registers. */ +#define g1_reg_pp_interrupt g1_swreg(60) +#define g1_reg_pp_ready_irq bit(12) +#define g1_reg_pp_irq bit(8) +#define g1_reg_pp_irq_dis bit(4) +#define g1_reg_pp_pipeline_en bit(1) +#define g1_reg_pp_external_trigger bit(0) +#define g1_reg_pp_dev_config g1_swreg(61) +#define g1_reg_pp_axi_rd_id(v) (((v) << 24) & genmask(31, 24)) +#define g1_reg_pp_axi_wr_id(v) (((v) << 16) & genmask(23, 16)) +#define g1_reg_pp_inswap32_e(v) ((v) ? bit(10) : 0) +#define g1_reg_pp_data_disc_e(v) ((v) ? bit(9) : 0) +#define g1_reg_pp_clk_gate_e(v) ((v) ? bit(8) : 0) +#define g1_reg_pp_in_endian(v) ((v) ? bit(7) : 0) +#define g1_reg_pp_out_endian(v) ((v) ? bit(6) : 0) +#define g1_reg_pp_outswap32_e(v) ((v) ? bit(5) : 0) +#define g1_reg_pp_max_burst(v) (((v) << 0) & genmask(4, 0)) +#define g1_reg_pp_in_luma_base g1_swreg(63) +#define g1_reg_pp_in_cb_base g1_swreg(64) +#define g1_reg_pp_in_cr_base g1_swreg(65) +#define g1_reg_pp_out_luma_base g1_swreg(66) +#define g1_reg_pp_out_chroma_base g1_swreg(67) +#define g1_reg_pp_contrast_adjust g1_swreg(68) +#define g1_reg_pp_color_conversion g1_swreg(69) +#define g1_reg_pp_color_conversion0 g1_swreg(70) +#define g1_reg_pp_color_conversion1 g1_swreg(71) +#define g1_reg_pp_input_size g1_swreg(72) +#define g1_reg_pp_input_size_height(v) (((v) << 9) & genmask(16, 9)) +#define g1_reg_pp_input_size_width(v) (((v) << 0) & genmask(8, 0)) +#define g1_reg_pp_scaling0 g1_swreg(79) +#define g1_reg_pp_padd_r(v) (((v) << 23) & genmask(27, 23)) +#define g1_reg_pp_padd_g(v) (((v) << 18) & genmask(22, 18)) +#define g1_reg_pp_rangemap_y(v) ((v) ? bit(31) : 0) +#define g1_reg_pp_rangemap_c(v) ((v) ? bit(30) : 0) +#define g1_reg_pp_ycbcr_range(v) ((v) ? bit(29) : 0) +#define g1_reg_pp_rgb_16(v) ((v) ? bit(28) : 0) +#define g1_reg_pp_scaling1 g1_swreg(80) +#define g1_reg_pp_padd_b(v) (((v) << 18) & genmask(22, 18)) +#define g1_reg_pp_mask_r g1_swreg(82) +#define g1_reg_pp_mask_g g1_swreg(83) +#define g1_reg_pp_mask_b g1_swreg(84) +#define g1_reg_pp_control g1_swreg(85) +#define g1_reg_pp_control_in_fmt(v) (((v) << 29) & genmask(31, 29)) +#define g1_reg_pp_control_out_fmt(v) (((v) << 26) & genmask(28, 26)) +#define g1_reg_pp_control_out_height(v) (((v) << 15) & genmask(25, 15)) +#define g1_reg_pp_control_out_width(v) (((v) << 4) & genmask(14, 4)) +#define g1_reg_pp_mask1_orig_width g1_swreg(88) +#define g1_reg_pp_orig_width(v) (((v) << 23) & genmask(31, 23)) +#define g1_reg_pp_display_width g1_swreg(92) +#define g1_reg_pp_fuse g1_swreg(99) + diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c --- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c +++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c - dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0); + dst_dma = hantro_get_dec_buf_addr(ctx, &vb2_dst->vb2_buf); diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h --- a/drivers/staging/media/hantro/hantro_hw.h +++ b/drivers/staging/media/hantro/hantro_hw.h + * @attrs: attributes of the dma mapping. + unsigned long attrs; +/** + * struct hantro_postproc_ctx + * + * @dec_q: references buffers, in decoder format. + */ +struct hantro_postproc_ctx { + struct hantro_aux_buf dec_q[vb2_max_frame]; +}; + +extern const struct hantro_postproc_regs hantro_g1_postproc_regs; + diff --git a/drivers/staging/media/hantro/hantro_postproc.c b/drivers/staging/media/hantro/hantro_postproc.c --- /dev/null +++ b/drivers/staging/media/hantro/hantro_postproc.c +// spdx-license-identifier: gpl-2.0 +/* + * hantro g1 post-processor support + * + * copyright (c) 2019 collabora, ltd. + */ + +#include <linux/dma-mapping.h> +#include <linux/types.h> + +#include "hantro.h" +#include "hantro_hw.h" +#include "hantro_g1_regs.h" + +#define hantro_pp_reg_write(vpu, reg_name, val) \ +{ \ + hantro_reg_write((vpu), \ + &((vpu)->variant->postproc_regs->reg_name), \ + (val)); \ +} + +#define hantro_pp_reg_write_s(vpu, reg_name, val) \ +{ \ + hantro_reg_write_s((vpu), \ + &((vpu)->variant->postproc_regs->reg_name), \ + (val)); \ +} + +#define vpu_pp_in_yuyv 0x0 +#define vpu_pp_in_nv12 0x1 +#define vpu_pp_in_yuv420 0x2 +#define vpu_pp_in_yuv240_tiled 0x5 +#define vpu_pp_out_rgb 0x0 +#define vpu_pp_out_yuyv 0x3 + +const struct hantro_postproc_regs hantro_g1_postproc_regs = { + .pipeline_en = {g1_reg_pp_interrupt, 1, 0x1}, + .max_burst = {g1_reg_pp_dev_config, 0, 0x1f}, + .clk_gate = {g1_reg_pp_dev_config, 1, 0x1}, + .out_swap32 = {g1_reg_pp_dev_config, 5, 0x1}, + .out_endian = {g1_reg_pp_dev_config, 6, 0x1}, + .out_luma_base = {g1_reg_pp_out_luma_base, 0, 0xffffffff}, + .input_width = {g1_reg_pp_input_size, 0, 0x1ff}, + .input_height = {g1_reg_pp_input_size, 9, 0x1ff}, + .output_width = {g1_reg_pp_control, 4, 0x7ff}, + .output_height = {g1_reg_pp_control, 15, 0x7ff}, + .input_fmt = {g1_reg_pp_control, 29, 0x7}, + .output_fmt = {g1_reg_pp_control, 26, 0x7}, + .orig_width = {g1_reg_pp_mask1_orig_width, 23, 0x1ff}, + .display_width = {g1_reg_pp_display_width, 0, 0xfff}, +}; + +void hantro_postproc_enable(struct hantro_ctx *ctx) +{ + struct hantro_dev *vpu = ctx->dev; + struct vb2_v4l2_buffer *dst_buf; + u32 src_pp_fmt, dst_pp_fmt; + dma_addr_t dst_dma; + + /* turn on pipeline mode. must be done first. */ + hantro_pp_reg_write_s(vpu, pipeline_en, 0x1); + + src_pp_fmt = vpu_pp_in_nv12; + + switch (ctx->vpu_dst_fmt->fourcc) { + case v4l2_pix_fmt_yuyv: + dst_pp_fmt = vpu_pp_out_yuyv; + break; + default: + warn(1, "output format %d not supported by the post-processor, this wasn't expected.", + ctx->vpu_dst_fmt->fourcc); + dst_pp_fmt = 0; + break; + } + + dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); + dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); + + hantro_pp_reg_write(vpu, clk_gate, 0x1); + hantro_pp_reg_write(vpu, out_endian, 0x1); + hantro_pp_reg_write(vpu, out_swap32, 0x1); + hantro_pp_reg_write(vpu, max_burst, 16); + hantro_pp_reg_write(vpu, out_luma_base, dst_dma); + hantro_pp_reg_write(vpu, input_width, mb_width(ctx->dst_fmt.width)); + hantro_pp_reg_write(vpu, input_height, mb_height(ctx->dst_fmt.height)); + hantro_pp_reg_write(vpu, input_fmt, src_pp_fmt); + hantro_pp_reg_write(vpu, output_fmt, dst_pp_fmt); + hantro_pp_reg_write(vpu, output_width, ctx->dst_fmt.width); + hantro_pp_reg_write(vpu, output_height, ctx->dst_fmt.height); + hantro_pp_reg_write(vpu, orig_width, mb_width(ctx->dst_fmt.width)); + hantro_pp_reg_write(vpu, display_width, ctx->dst_fmt.width); +} + +void hantro_postproc_free(struct hantro_ctx *ctx) +{ + struct hantro_dev *vpu = ctx->dev; + unsigned int i; + + for (i = 0; i < vb2_max_frame; ++i) { + struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i]; + + if (priv->cpu) { + dma_free_attrs(vpu->dev, priv->size, priv->cpu, + priv->dma, priv->attrs); + priv->cpu = null; + } + } +} + +int hantro_postproc_alloc(struct hantro_ctx *ctx) +{ + struct hantro_dev *vpu = ctx->dev; + struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; + struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q; + unsigned int num_buffers = cap_queue->num_buffers; + unsigned int i, buf_size; + + buf_size = ctx->dst_fmt.plane_fmt[0].sizeimage; + + for (i = 0; i < num_buffers; ++i) { + struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i]; + + /* + * the buffers on this queue are meant as intermediate + * buffers for the decoder, so no mapping is needed. + */ + priv->attrs = dma_attr_no_kernel_mapping; + priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma, + gfp_kernel, priv->attrs); + if (!priv->cpu) + return -enomem; + priv->size = buf_size; + } + return 0; +} + +void hantro_postproc_disable(struct hantro_ctx *ctx) +{ + struct hantro_dev *vpu = ctx->dev; + + hantro_pp_reg_write_s(vpu, pipeline_en, 0x0); +} diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c --- a/drivers/staging/media/hantro/hantro_v4l2.c +++ b/drivers/staging/media/hantro/hantro_v4l2.c +static const struct hantro_fmt * +hantro_get_postproc_formats(const struct hantro_ctx *ctx, + unsigned int *num_fmts) +{ + if (hantro_is_encoder_ctx(ctx)) { + *num_fmts = 0; + return null; + } + + *num_fmts = ctx->dev->variant->num_postproc_fmts; + return ctx->dev->variant->postproc_fmts; +} + + formats = hantro_get_postproc_formats(ctx, &num_fmts); + for (i = 0; i < num_fmts; i++) + if (formats[i].fourcc == fourcc) + return &formats[i]; ++j; + + /* + * enumerate post-processed formats. as per the specification, + * we enumerated these formats after natively decoded formats + * as a hint for applications on what's the preferred fomat. + */ + if (!capture) + return -einval; + formats = hantro_get_postproc_formats(ctx, &num_fmts); + for (i = 0; i < num_fmts; i++) { + if (j == f->index) { + fmt = &formats[i]; + f->pixelformat = fmt->fourcc; + return 0; + } + ++j; + } + - * frames. + * frames and only if the format is non-post-processed nv12. - if (ctx->vpu_src_fmt->fourcc == v4l2_pix_fmt_h264_slice) + if (ctx->vpu_src_fmt->fourcc == v4l2_pix_fmt_h264_slice && + !hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) - if (ctx->codec_ops->init) + if (ctx->codec_ops->init) { + if (ret) + return ret; + } + + if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) { + ret = hantro_postproc_alloc(ctx); + if (ret) + goto err_codec_exit; + } + return ret; +err_codec_exit: + if (ctx->codec_ops && ctx->codec_ops->exit) + ctx->codec_ops->exit(ctx); + hantro_postproc_free(ctx); diff --git a/drivers/staging/media/hantro/rk3288_vpu_hw.c b/drivers/staging/media/hantro/rk3288_vpu_hw.c --- a/drivers/staging/media/hantro/rk3288_vpu_hw.c +++ b/drivers/staging/media/hantro/rk3288_vpu_hw.c +static const struct hantro_fmt rk3288_vpu_postproc_fmts[] = { + { + .fourcc = v4l2_pix_fmt_yuyv, + .codec_mode = hantro_mode_none, + }, +}; + + .postproc_fmts = rk3288_vpu_postproc_fmts, + .num_postproc_fmts = array_size(rk3288_vpu_postproc_fmts), + .postproc_regs = &hantro_g1_postproc_regs,
|
Drivers in the Staging area
|
8c2d66b036c778480787e395097e0d04bc383db4
|
ezequiel garcia
|
drivers
|
staging
|
hantro, media
|
media: staging: phy-rockchip-dphy-rx0: add rockchip mipi synopsys dphy rx0 driver
|
add driver for rockchip mipi synopsys dphy driver
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['c', 'kconfig', 'makefile', 'todo']
| 6
| 412
| 0
|
--- diff --git a/drivers/staging/media/kconfig b/drivers/staging/media/kconfig --- a/drivers/staging/media/kconfig +++ b/drivers/staging/media/kconfig +source "drivers/staging/media/phy-rockchip-dphy-rx0/kconfig" + diff --git a/drivers/staging/media/makefile b/drivers/staging/media/makefile --- a/drivers/staging/media/makefile +++ b/drivers/staging/media/makefile +obj-$(config_phy_rockchip_dphy_rx0) += phy-rockchip-dphy-rx0/ diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/kconfig b/drivers/staging/media/phy-rockchip-dphy-rx0/kconfig --- /dev/null +++ b/drivers/staging/media/phy-rockchip-dphy-rx0/kconfig +# spdx-license-identifier: gpl-2.0-only + +config phy_rockchip_dphy_rx0 + tristate "rockchip mipi synopsys dphy rx0 driver" + depends on (arch_rockchip || compile_test) && of + select generic_phy_mipi_dphy + select generic_phy + help + enable this to support the rockchip mipi synopsys dphy rx0 + associated to the rockchip isp module present in rk3399 socs. + + to compile this driver as a module, choose m here: the module + will be called phy-rockchip-dphy-rx0. diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/makefile b/drivers/staging/media/phy-rockchip-dphy-rx0/makefile --- /dev/null +++ b/drivers/staging/media/phy-rockchip-dphy-rx0/makefile +# spdx-license-identifier: gpl-2.0 +obj-$(config_phy_rockchip_dphy_rx0) += phy-rockchip-dphy-rx0.o diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/todo b/drivers/staging/media/phy-rockchip-dphy-rx0/todo --- /dev/null +++ b/drivers/staging/media/phy-rockchip-dphy-rx0/todo +the main reason for keeping this in staging is because the only driver +that uses this is rkisp1, which is also in staging. it should be moved together +with rkisp1. + +please cc patches to linux media <linux-media@vger.kernel.org> and +helen koike <helen.koike@collabora.com>. diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c --- /dev/null +++ b/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c +// spdx-license-identifier: (gpl-2.0+ or mit) +/* + * rockchip mipi synopsys dphy rx0 driver + * + * copyright (c) 2019 collabora, ltd. + * + * based on: + * + * drivers/media/platform/rockchip/isp1/mipi_dphy_sy.c + * in https://chromium.googlesource.com/chromiumos/third_party/kernel, + * chromeos-4.4 branch. + * + * copyright (c) 2017 fuzhou rockchip electronics co., ltd. + * jacob chen <jacob2.chen@rock-chips.com> + * shunqian zheng <zhengsq@rock-chips.com> + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-mipi-dphy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define rk3399_grf_soc_con9 0x6224 +#define rk3399_grf_soc_con21 0x6254 +#define rk3399_grf_soc_con22 0x6258 +#define rk3399_grf_soc_con23 0x625c +#define rk3399_grf_soc_con24 0x6260 +#define rk3399_grf_soc_con25 0x6264 +#define rk3399_grf_soc_status1 0xe2a4 + +#define clock_lane_hs_rx_control 0x34 +#define lane0_hs_rx_control 0x44 +#define lane1_hs_rx_control 0x54 +#define lane2_hs_rx_control 0x84 +#define lane3_hs_rx_control 0x94 +#define lanes_ths_settle_control 0x75 +#define ths_settle_counter_threshold 0x04 + +struct hsfreq_range { + u16 range_h; + u8 cfg_bit; +}; + +static const struct hsfreq_range rk3399_mipidphy_hsfreq_ranges[] = { + { 89, 0x00 }, { 99, 0x10 }, { 109, 0x20 }, { 129, 0x01 }, + { 139, 0x11 }, { 149, 0x21 }, { 169, 0x02 }, { 179, 0x12 }, + { 199, 0x22 }, { 219, 0x03 }, { 239, 0x13 }, { 249, 0x23 }, + { 269, 0x04 }, { 299, 0x14 }, { 329, 0x05 }, { 359, 0x15 }, + { 399, 0x25 }, { 449, 0x06 }, { 499, 0x16 }, { 549, 0x07 }, + { 599, 0x17 }, { 649, 0x08 }, { 699, 0x18 }, { 749, 0x09 }, + { 799, 0x19 }, { 849, 0x29 }, { 899, 0x39 }, { 949, 0x0a }, + { 999, 0x1a }, { 1049, 0x2a }, { 1099, 0x3a }, { 1149, 0x0b }, + { 1199, 0x1b }, { 1249, 0x2b }, { 1299, 0x3b }, { 1349, 0x0c }, + { 1399, 0x1c }, { 1449, 0x2c }, { 1500, 0x3c } +}; + +static const char * const rk3399_mipidphy_clks[] = { + "dphy-ref", + "dphy-cfg", + "grf", +}; + +enum dphy_reg_id { + grf_dphy_rx0_turndisable = 0, + grf_dphy_rx0_forcerxmode, + grf_dphy_rx0_forcetxstopmode, + grf_dphy_rx0_enable, + grf_dphy_rx0_testclr, + grf_dphy_rx0_testclk, + grf_dphy_rx0_testen, + grf_dphy_rx0_testdin, + grf_dphy_rx0_turnrequest, + grf_dphy_rx0_testdout, + grf_dphy_tx0_turndisable, + grf_dphy_tx0_forcerxmode, + grf_dphy_tx0_forcetxstopmode, + grf_dphy_tx0_turnrequest, + grf_dphy_tx1rx1_turndisable, + grf_dphy_tx1rx1_forcerxmode, + grf_dphy_tx1rx1_forcetxstopmode, + grf_dphy_tx1rx1_enable, + grf_dphy_tx1rx1_masterslavez, + grf_dphy_tx1rx1_basedir, + grf_dphy_tx1rx1_enableclk, + grf_dphy_tx1rx1_turnrequest, + grf_dphy_rx1_src_sel, + /* rk3288 only */ + grf_con_disable_isp, + grf_con_isp_dphy_sel, + grf_dsi_csi_testbus_sel, + grf_dvp_v18sel, + /* below is for rk3399 only */ + grf_dphy_rx0_clk_inv_sel, + grf_dphy_rx1_clk_inv_sel, +}; + +struct dphy_reg { + u16 offset; + u8 mask; + u8 shift; +}; + +#define phy_reg(_offset, _width, _shift) \ + { .offset = _offset, .mask = bit(_width) - 1, .shift = _shift, } + +static const struct dphy_reg rk3399_grf_dphy_regs[] = { + [grf_dphy_rx0_turnrequest] = phy_reg(rk3399_grf_soc_con9, 4, 0), + [grf_dphy_rx0_clk_inv_sel] = phy_reg(rk3399_grf_soc_con9, 1, 10), + [grf_dphy_rx1_clk_inv_sel] = phy_reg(rk3399_grf_soc_con9, 1, 11), + [grf_dphy_rx0_enable] = phy_reg(rk3399_grf_soc_con21, 4, 0), + [grf_dphy_rx0_forcerxmode] = phy_reg(rk3399_grf_soc_con21, 4, 4), + [grf_dphy_rx0_forcetxstopmode] = phy_reg(rk3399_grf_soc_con21, 4, 8), + [grf_dphy_rx0_turndisable] = phy_reg(rk3399_grf_soc_con21, 4, 12), + [grf_dphy_tx0_forcerxmode] = phy_reg(rk3399_grf_soc_con22, 4, 0), + [grf_dphy_tx0_forcetxstopmode] = phy_reg(rk3399_grf_soc_con22, 4, 4), + [grf_dphy_tx0_turndisable] = phy_reg(rk3399_grf_soc_con22, 4, 8), + [grf_dphy_tx0_turnrequest] = phy_reg(rk3399_grf_soc_con22, 4, 12), + [grf_dphy_tx1rx1_enable] = phy_reg(rk3399_grf_soc_con23, 4, 0), + [grf_dphy_tx1rx1_forcerxmode] = phy_reg(rk3399_grf_soc_con23, 4, 4), + [grf_dphy_tx1rx1_forcetxstopmode] = phy_reg(rk3399_grf_soc_con23, 4, 8), + [grf_dphy_tx1rx1_turndisable] = phy_reg(rk3399_grf_soc_con23, 4, 12), + [grf_dphy_tx1rx1_turnrequest] = phy_reg(rk3399_grf_soc_con24, 4, 0), + [grf_dphy_rx1_src_sel] = phy_reg(rk3399_grf_soc_con24, 1, 4), + [grf_dphy_tx1rx1_basedir] = phy_reg(rk3399_grf_soc_con24, 1, 5), + [grf_dphy_tx1rx1_enableclk] = phy_reg(rk3399_grf_soc_con24, 1, 6), + [grf_dphy_tx1rx1_masterslavez] = phy_reg(rk3399_grf_soc_con24, 1, 7), + [grf_dphy_rx0_testdin] = phy_reg(rk3399_grf_soc_con25, 8, 0), + [grf_dphy_rx0_testen] = phy_reg(rk3399_grf_soc_con25, 1, 8), + [grf_dphy_rx0_testclk] = phy_reg(rk3399_grf_soc_con25, 1, 9), + [grf_dphy_rx0_testclr] = phy_reg(rk3399_grf_soc_con25, 1, 10), + [grf_dphy_rx0_testdout] = phy_reg(rk3399_grf_soc_status1, 8, 0), +}; + +struct rk_dphy_drv_data { + const char * const *clks; + unsigned int num_clks; + const struct hsfreq_range *hsfreq_ranges; + unsigned int num_hsfreq_ranges; + const struct dphy_reg *regs; +}; + +struct rk_dphy { + struct device *dev; + struct regmap *grf; + struct clk_bulk_data *clks; + + const struct rk_dphy_drv_data *drv_data; + struct phy_configure_opts_mipi_dphy config; + + u8 hsfreq; +}; + +static inline void rk_dphy_write_grf(struct rk_dphy *priv, + unsigned int index, u8 value) +{ + const struct dphy_reg *reg = &priv->drv_data->regs[index]; + /* update high word */ + unsigned int val = (value << reg->shift) | + (reg->mask << (reg->shift + 16)); + + if (warn_on(!reg->offset)) + return; + regmap_write(priv->grf, reg->offset, val); +} + +static void rk_dphy_write(struct rk_dphy *priv, u8 test_code, u8 test_data) +{ + rk_dphy_write_grf(priv, grf_dphy_rx0_testdin, test_code); + rk_dphy_write_grf(priv, grf_dphy_rx0_testen, 1); + /* + * with the falling edge on testclk, the testdin[7:0] signal content + * is latched internally as the current test code. test data is + * programmed internally by rising edge on testclk. + * this code assumes that testclk is already 1. + */ + rk_dphy_write_grf(priv, grf_dphy_rx0_testclk, 0); + rk_dphy_write_grf(priv, grf_dphy_rx0_testen, 0); + rk_dphy_write_grf(priv, grf_dphy_rx0_testdin, test_data); + rk_dphy_write_grf(priv, grf_dphy_rx0_testclk, 1); +} + +static void rk_dphy_enable(struct rk_dphy *priv) +{ + rk_dphy_write_grf(priv, grf_dphy_rx0_forcerxmode, 0); + rk_dphy_write_grf(priv, grf_dphy_rx0_forcetxstopmode, 0); + + /* disable lane turn around, which is ignored in receive mode */ + rk_dphy_write_grf(priv, grf_dphy_rx0_turnrequest, 0); + rk_dphy_write_grf(priv, grf_dphy_rx0_turndisable, 0xf); + + rk_dphy_write_grf(priv, grf_dphy_rx0_enable, + genmask(priv->config.lanes - 1, 0)); + + /* dphy start */ + rk_dphy_write_grf(priv, grf_dphy_rx0_testclk, 1); + rk_dphy_write_grf(priv, grf_dphy_rx0_testclr, 1); + usleep_range(100, 150); + rk_dphy_write_grf(priv, grf_dphy_rx0_testclr, 0); + usleep_range(100, 150); + + /* set clock lane */ + /* hs hsfreq_range & lane 0 settle bypass */ + rk_dphy_write(priv, clock_lane_hs_rx_control, 0); + /* hs rx control of lane0 */ + rk_dphy_write(priv, lane0_hs_rx_control, priv->hsfreq << 1); + /* hs rx control of lane1 */ + rk_dphy_write(priv, lane1_hs_rx_control, priv->hsfreq << 1); + /* hs rx control of lane2 */ + rk_dphy_write(priv, lane2_hs_rx_control, priv->hsfreq << 1); + /* hs rx control of lane3 */ + rk_dphy_write(priv, lane3_hs_rx_control, priv->hsfreq << 1); + /* hs rx data lanes settle state time control */ + rk_dphy_write(priv, lanes_ths_settle_control, + ths_settle_counter_threshold); + + /* normal operation */ + rk_dphy_write(priv, 0x0, 0); +} + +static int rk_dphy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + struct rk_dphy *priv = phy_get_drvdata(phy); + const struct rk_dphy_drv_data *drv_data = priv->drv_data; + struct phy_configure_opts_mipi_dphy *config = &opts->mipi_dphy; + unsigned int hsfreq = 0; + unsigned int i; + u64 data_rate_mbps; + int ret; + + /* pass with phy_mipi_dphy_get_default_config (with pixel rate?) */ + ret = phy_mipi_dphy_config_validate(config); + if (ret) + return ret; + + data_rate_mbps = div_u64(config->hs_clk_rate, 1000 * 1000); + + dev_dbg(priv->dev, "lanes %d - data_rate_mbps %llu ", + config->lanes, data_rate_mbps); + for (i = 0; i < drv_data->num_hsfreq_ranges; i++) { + if (drv_data->hsfreq_ranges[i].range_h >= data_rate_mbps) { + hsfreq = drv_data->hsfreq_ranges[i].cfg_bit; + break; + } + } + if (!hsfreq) + return -einval; + + priv->hsfreq = hsfreq; + priv->config = *config; + return 0; +} + +static int rk_dphy_power_on(struct phy *phy) +{ + struct rk_dphy *priv = phy_get_drvdata(phy); + int ret; + + ret = clk_bulk_enable(priv->drv_data->num_clks, priv->clks); + if (ret) + return ret; + + rk_dphy_enable(priv); + + return 0; +} + +static int rk_dphy_power_off(struct phy *phy) +{ + struct rk_dphy *priv = phy_get_drvdata(phy); + + rk_dphy_write_grf(priv, grf_dphy_rx0_enable, 0); + clk_bulk_disable(priv->drv_data->num_clks, priv->clks); + return 0; +} + +static int rk_dphy_init(struct phy *phy) +{ + struct rk_dphy *priv = phy_get_drvdata(phy); + + return clk_bulk_prepare(priv->drv_data->num_clks, priv->clks); +} + +static int rk_dphy_exit(struct phy *phy) +{ + struct rk_dphy *priv = phy_get_drvdata(phy); + + clk_bulk_unprepare(priv->drv_data->num_clks, priv->clks); + return 0; +} + +static const struct phy_ops rk_dphy_ops = { + .power_on = rk_dphy_power_on, + .power_off = rk_dphy_power_off, + .init = rk_dphy_init, + .exit = rk_dphy_exit, + .configure = rk_dphy_configure, + .owner = this_module, +}; + +static const struct rk_dphy_drv_data rk3399_mipidphy_drv_data = { + .clks = rk3399_mipidphy_clks, + .num_clks = array_size(rk3399_mipidphy_clks), + .hsfreq_ranges = rk3399_mipidphy_hsfreq_ranges, + .num_hsfreq_ranges = array_size(rk3399_mipidphy_hsfreq_ranges), + .regs = rk3399_grf_dphy_regs, +}; + +static const struct of_device_id rk_dphy_dt_ids[] = { + { + .compatible = "rockchip,rk3399-mipi-dphy-rx0", + .data = &rk3399_mipidphy_drv_data, + }, + {} +}; +module_device_table(of, rk_dphy_dt_ids); + +static int rk_dphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct rk_dphy_drv_data *drv_data; + struct phy_provider *phy_provider; + const struct of_device_id *of_id; + struct rk_dphy *priv; + struct phy *phy; + unsigned int i; + int ret; + + if (!dev->parent || !dev->parent->of_node) + return -enodev; + + priv = devm_kzalloc(dev, sizeof(*priv), gfp_kernel); + if (!priv) + return -enomem; + priv->dev = dev; + + priv->grf = syscon_node_to_regmap(dev->parent->of_node); + if (is_err(priv->grf)) { + dev_err(dev, "can't find grf syscon "); + return -enodev; + } + + of_id = of_match_device(rk_dphy_dt_ids, dev); + if (!of_id) + return -einval; + + drv_data = of_id->data; + priv->drv_data = drv_data; + priv->clks = devm_kcalloc(&pdev->dev, drv_data->num_clks, + sizeof(*priv->clks), gfp_kernel); + if (!priv->clks) + return -enomem; + for (i = 0; i < drv_data->num_clks; i++) + priv->clks[i].id = drv_data->clks[i]; + ret = devm_clk_bulk_get(&pdev->dev, drv_data->num_clks, priv->clks); + if (ret) + return ret; + + phy = devm_phy_create(dev, np, &rk_dphy_ops); + if (is_err(phy)) { + dev_err(dev, "failed to create phy "); + return ptr_err(phy); + } + phy_set_drvdata(phy, priv); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return ptr_err_or_zero(phy_provider); +} + +static struct platform_driver rk_dphy_driver = { + .probe = rk_dphy_probe, + .driver = { + .name = "rockchip-mipi-dphy-rx0", + .of_match_table = rk_dphy_dt_ids, + }, +}; +module_platform_driver(rk_dphy_driver); + +module_author("ezequiel garcia <ezequiel@collabora.com>"); +module_description("rockchip mipi synopsys dphy rx0 driver"); +module_license("dual mit/gpl");
|
Drivers in the Staging area
|
32abcc4491c62906e2023b7fc43cffc06064b2f0
|
ezequiel garcia laurent pinchart laurent pinchart ideasonboard com
|
drivers
|
staging
|
media, phy-rockchip-dphy-rx0
|
media: staging: rkisp1: add rockchip isp1 base driver
|
add base driver for rockchip image signal processing v1 unit, with isp subdevice and sensor biddings.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['c', 'kconfig', 'makefile', 'h']
| 9
| 3,124
| 0
|
--- diff --git a/drivers/staging/media/kconfig b/drivers/staging/media/kconfig --- a/drivers/staging/media/kconfig +++ b/drivers/staging/media/kconfig +source "drivers/staging/media/rkisp1/kconfig" + diff --git a/drivers/staging/media/makefile b/drivers/staging/media/makefile --- a/drivers/staging/media/makefile +++ b/drivers/staging/media/makefile +obj-$(config_video_rockchip_isp1) += rkisp1/ diff --git a/drivers/staging/media/rkisp1/kconfig b/drivers/staging/media/rkisp1/kconfig --- /dev/null +++ b/drivers/staging/media/rkisp1/kconfig +# spdx-license-identifier: gpl-2.0-only + +config video_rockchip_isp1 + tristate "rockchip image signal processing v1 unit driver" + depends on video_v4l2 && video_v4l2_subdev_api + depends on arch_rockchip || compile_test + select videobuf2_dma_contig + select videobuf2_vmalloc + select v4l2_fwnode + select phy_rockchip_dphy_rx0 + default n + help + enable this to support the image signal processing (isp) module + present in rk3399 socs. + + to compile this driver as a module, choose m here: the module + will be called rockchip-isp1. diff --git a/drivers/staging/media/rkisp1/makefile b/drivers/staging/media/rkisp1/makefile --- /dev/null +++ b/drivers/staging/media/rkisp1/makefile +obj-$(config_video_rockchip_isp1) += rockchip-isp1.o +rockchip-isp1-objs += rkisp1-common.o \ + rkisp1-dev.o \ + rkisp1-isp.o diff --git a/drivers/staging/media/rkisp1/rkisp1-common.c b/drivers/staging/media/rkisp1/rkisp1-common.c --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-common.c +// spdx-license-identifier: (gpl-2.0+ or mit) +/* + * rockchip isp1 driver - common definitions + * + * copyright (c) 2019 collabora, ltd. + */ + +#include <media/v4l2-rect.h> + +#include "rkisp1-common.h" + +static const struct v4l2_rect rkisp1_sd_min_crop = { + .width = rkisp1_isp_min_width, + .height = rkisp1_isp_min_height, + .top = 0, + .left = 0, +}; + +void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop, + const struct v4l2_rect *bounds) +{ + v4l2_rect_set_min_size(crop, &rkisp1_sd_min_crop); + v4l2_rect_map_inside(crop, bounds); +} + +void rkisp1_sd_adjust_crop(struct v4l2_rect *crop, + const struct v4l2_mbus_framefmt *bounds) +{ + struct v4l2_rect crop_bounds = { + .left = 0, + .top = 0, + .width = bounds->width, + .height = bounds->height, + }; + + rkisp1_sd_adjust_crop_rect(crop, &crop_bounds); +} diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-common.h +/* spdx-license-identifier: (gpl-2.0+ or mit) */ +/* + * rockchip isp1 driver - common definitions + * + * copyright (c) 2019 collabora, ltd. + * + * based on rockchip isp1 driver by rockchip electronics co., ltd. + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +#ifndef _rkisp1_common_h +#define _rkisp1_common_h + +#include <linux/clk.h> +#include <linux/mutex.h> +#include <media/media-device.h> +#include <media/media-entity.h> +#include <media/v4l2-ctrls.h> +#include <media/v4l2-device.h> +#include <media/videobuf2-v4l2.h> + +#include "rkisp1-regs.h" + +#define rkisp1_isp_max_width 4032 +#define rkisp1_isp_max_height 3024 +#define rkisp1_isp_min_width 32 +#define rkisp1_isp_min_height 32 + +#define rkisp1_rsz_mp_src_max_width 4416 +#define rkisp1_rsz_mp_src_max_height 3312 +#define rkisp1_rsz_sp_src_max_width 1920 +#define rkisp1_rsz_sp_src_max_height 1920 +#define rkisp1_rsz_src_min_width 32 +#define rkisp1_rsz_src_min_height 16 + +#define rkisp1_default_width 800 +#define rkisp1_default_height 600 + +#define rkisp1_driver_name "rkisp1" +#define rkisp1_bus_info "platform:" rkisp1_driver_name + +#define rkisp1_max_bus_clk 8 + +enum rkisp1_fmt_pix_type { + rkisp1_fmt_yuv, + rkisp1_fmt_rgb, + rkisp1_fmt_bayer, + rkisp1_fmt_jpeg, +}; + +enum rkisp1_fmt_raw_pat_type { + rkisp1_raw_rggb = 0, + rkisp1_raw_grbg, + rkisp1_raw_gbrg, + rkisp1_raw_bggr, +}; + +enum rkisp1_isp_pad { + rkisp1_isp_pad_sink_video, + rkisp1_isp_pad_sink_params, + rkisp1_isp_pad_source_video, + rkisp1_isp_pad_source_stats, + rkisp1_isp_pad_max +}; + +/* + * struct rkisp1_sensor_async - sensor information + * @mbus: media bus configuration + */ +struct rkisp1_sensor_async { + struct v4l2_async_subdev asd; + struct v4l2_mbus_config mbus; + unsigned int lanes; + struct v4l2_subdev *sd; + struct v4l2_ctrl *pixel_rate_ctrl; + struct phy *dphy; +}; + +/* + * struct rkisp1_isp - isp sub-device + * + * see cropping regions of isp in rkisp1.c for details + * @sink_frm: input size, don't have to be equal to sensor size + * @sink_fmt: input format + * @sink_crop: crop for sink pad + * @src_fmt: output format + * @src_crop: output size + * + * @is_dphy_errctrl_disabled : if dphy errctrl is disabled (avoid endless interrupt) + * @frame_sequence: used to synchronize frame_id between video devices. + * @quantization: output quantization + */ +struct rkisp1_isp { + struct v4l2_subdev sd; + struct media_pad pads[rkisp1_isp_pad_max]; + struct v4l2_subdev_pad_config pad_cfg[rkisp1_isp_pad_max]; + const struct rkisp1_isp_mbus_info *sink_fmt; + const struct rkisp1_isp_mbus_info *src_fmt; + bool is_dphy_errctrl_disabled; + atomic_t frame_sequence; +}; + +struct rkisp1_vdev_node { + struct vb2_queue buf_queue; + struct mutex vlock; /* ioctl serialization mutex */ + struct video_device vdev; + struct media_pad pad; +}; + +struct rkisp1_buffer { + struct vb2_v4l2_buffer vb; + struct list_head queue; + union { + u32 buff_addr[video_max_planes]; + void *vaddr[video_max_planes]; + }; +}; + +struct rkisp1_dummy_buffer { + void *vaddr; + dma_addr_t dma_addr; + u32 size; +}; + +struct rkisp1_device; + +struct rkisp1_debug { + struct dentry *debugfs_dir; + unsigned long data_loss; + unsigned long pic_size_error; + unsigned long mipi_error; +}; + +/* + * struct rkisp1_device - isp platform device + * @base_addr: base register address + * @active_sensor: sensor in-use, set when streaming on + * @isp: isp sub-device + */ +struct rkisp1_device { + void __iomem *base_addr; + int irq; + struct device *dev; + unsigned int clk_size; + struct clk_bulk_data clks[rkisp1_max_bus_clk]; + struct v4l2_device v4l2_dev; + struct v4l2_ctrl_handler ctrl_handler; + struct media_device media_dev; + struct v4l2_async_notifier notifier; + struct rkisp1_sensor_async *active_sensor; + struct rkisp1_isp isp; + struct media_pipeline pipe; + struct vb2_alloc_ctx *alloc_ctx; + struct rkisp1_debug debug; +}; + +/* + * struct rkisp1_isp_mbus_info - isp pad format info + * + * translate mbus_code to hardware format values + * + * @bus_width: used for parallel + */ +struct rkisp1_isp_mbus_info { + u32 mbus_code; + enum rkisp1_fmt_pix_type fmt_type; + u32 mipi_dt; + u32 yuv_seq; + u8 bus_width; + enum rkisp1_fmt_raw_pat_type bayer_pat; + unsigned int direction; +}; + +static inline void +rkisp1_write(struct rkisp1_device *rkisp1, u32 val, unsigned int addr) +{ + writel(val, rkisp1->base_addr + addr); +} + +static inline u32 rkisp1_read(struct rkisp1_device *rkisp1, unsigned int addr) +{ + return readl(rkisp1->base_addr + addr); +} + +void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop, + const struct v4l2_rect *bounds); + +void rkisp1_sd_adjust_crop(struct v4l2_rect *crop, + const struct v4l2_mbus_framefmt *bounds); + +int rkisp1_isp_register(struct rkisp1_device *rkisp1, + struct v4l2_device *v4l2_dev); +void rkisp1_isp_unregister(struct rkisp1_device *rkisp1); + +const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code); + +void rkisp1_isp_isr(struct rkisp1_device *rkisp1); +void rkisp1_mipi_isr(struct rkisp1_device *rkisp1); + +#endif /* _rkisp1_common_h */ diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-dev.c +// spdx-license-identifier: (gpl-2.0+ or mit) +/* + * rockchip isp1 driver - base driver + * + * copyright (c) 2019 collabora, ltd. + * + * based on rockchip isp1 driver by rockchip electronics co., ltd. + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-mipi-dphy.h> +#include <media/v4l2-fwnode.h> + +#include "rkisp1-common.h" + +/* + * isp details + * ----------- + * + * isp comprises with: + * mipi serial camera interface + * image signal processing + * many image enhancement blocks + * crop + * resizer + * rbg display ready image + * image rotation + * + * isp block diagram + * ----------------- + * rkisp1-isp.c main picture path + * |==========================| |===============================================| + * +-----------+ +--+--+--+--+ +--------+ +--------+ +-----------+ + * | | | | | | | | | | | | | + * +--------+ |\ | | | | | | | -->| crop |->| rsz |------------->| | + * | mipi |--->| \ | | | | | | | | | | | | | | + * +--------+ | | | | |ie|ie|ie|ie| | +--------+ +--------+ | memory | + * |mux|--->| isp |->|0 |1 |2 |3 |---+ | interface | + * +--------+ | | | | | | | | | | +--------+ +--------+ +--------+ | | + * |parallel|--->| / | | | | | | | | | | | | | | | | + * +--------+ |/ | | | | | | | -->| crop |->| rsz |->| rgb |->| | + * | | | | | | | | | | | | rotate | | | + * +-----------+ +--+--+--+--+ +--------+ +--------+ +--------+ +-----------+ + * ^ + * +--------+ | |===============================================| + * | dma |------------------------------------+ self picture path + * +--------+ + * + * + * media topology + * -------------- + * +----------+ +----------+ + * | sensor 2 | | sensor x | + * ------------ ... ------------ + * | 0 | | 0 | + * +----------+ +----------+ + * \ | + * \ | + * +----------+ \ | + * | sensor 1 | v v + * ------------ +------+------+ + * | 0 |----->| 0 | 1 | + * +----------+ |------+------| + * | isp | + * |------+------| + * | 2 | 3 | + * +------+------+ + */ + +struct rkisp1_match_data { + const char * const *clks; + unsigned int size; +}; + +/* ---------------------------------------------------------------------------- + * sensor dt bindings + */ + +static int rkisp1_create_links(struct rkisp1_device *rkisp1) +{ + unsigned int flags, source_pad; + struct v4l2_subdev *sd; + int ret; + + /* sensor links */ + flags = media_lnk_fl_enabled; + list_for_each_entry(sd, &rkisp1->v4l2_dev.subdevs, list) { + if (sd == &rkisp1->isp.sd) + continue; + + ret = media_entity_get_fwnode_pad(&sd->entity, sd->fwnode, + media_pad_fl_source); + if (ret) { + dev_err(sd->dev, "failed to find src pad for %s ", + sd->name); + return ret; + } + source_pad = ret; + + ret = media_create_pad_link(&sd->entity, source_pad, + &rkisp1->isp.sd.entity, + rkisp1_isp_pad_sink_video, + flags); + if (ret) + return ret; + + flags = 0; + } + + return 0; +} + +static int rkisp1_subdev_notifier_bound(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_subdev *asd) +{ + struct rkisp1_device *rkisp1 = + container_of(notifier, struct rkisp1_device, notifier); + struct rkisp1_sensor_async *s_asd = + container_of(asd, struct rkisp1_sensor_async, asd); + + s_asd->pixel_rate_ctrl = v4l2_ctrl_find(sd->ctrl_handler, + v4l2_cid_pixel_rate); + s_asd->sd = sd; + s_asd->dphy = devm_phy_get(rkisp1->dev, "dphy"); + if (is_err(s_asd->dphy)) { + if (ptr_err(s_asd->dphy) != -eprobe_defer) + dev_err(rkisp1->dev, "couldn't get the mipi d-phy "); + return ptr_err(s_asd->dphy); + } + + phy_init(s_asd->dphy); + + return 0; +} + +static void rkisp1_subdev_notifier_unbind(struct v4l2_async_notifier *notifier, + struct v4l2_subdev *sd, + struct v4l2_async_subdev *asd) +{ + struct rkisp1_sensor_async *s_asd = + container_of(asd, struct rkisp1_sensor_async, asd); + + phy_exit(s_asd->dphy); +} + +static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier) +{ + struct rkisp1_device *rkisp1 = + container_of(notifier, struct rkisp1_device, notifier); + int ret; + + mutex_lock(&rkisp1->media_dev.graph_mutex); + ret = rkisp1_create_links(rkisp1); + if (ret) + goto unlock; + ret = v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev); + if (ret) + goto unlock; + + dev_dbg(rkisp1->dev, "async subdev notifier completed "); + +unlock: + mutex_unlock(&rkisp1->media_dev.graph_mutex); + return ret; +} + +static int rkisp1_fwnode_parse(struct device *dev, + struct v4l2_fwnode_endpoint *vep, + struct v4l2_async_subdev *asd) +{ + struct rkisp1_sensor_async *s_asd = + container_of(asd, struct rkisp1_sensor_async, asd); + + if (vep->bus_type != v4l2_mbus_csi2_dphy) { + dev_err(dev, "only csi2 bus type is currently supported "); + return -einval; + } + + if (vep->base.port != 0) { + dev_err(dev, "the isp has only port 0 "); + return -einval; + } + + s_asd->mbus.type = vep->bus_type; + s_asd->mbus.flags = vep->bus.mipi_csi2.flags; + s_asd->lanes = vep->bus.mipi_csi2.num_data_lanes; + + switch (vep->bus.mipi_csi2.num_data_lanes) { + case 1: + s_asd->mbus.flags |= v4l2_mbus_csi2_1_lane; + break; + case 2: + s_asd->mbus.flags |= v4l2_mbus_csi2_2_lane; + break; + case 3: + s_asd->mbus.flags |= v4l2_mbus_csi2_3_lane; + break; + case 4: + s_asd->mbus.flags |= v4l2_mbus_csi2_4_lane; + break; + default: + return -einval; + } + + return 0; +} + +static const struct v4l2_async_notifier_operations rkisp1_subdev_notifier_ops = { + .bound = rkisp1_subdev_notifier_bound, + .unbind = rkisp1_subdev_notifier_unbind, + .complete = rkisp1_subdev_notifier_complete, +}; + +static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1) +{ + struct v4l2_async_notifier *ntf = &rkisp1->notifier; + struct device *dev = rkisp1->dev; + int ret; + + v4l2_async_notifier_init(ntf); + + ret = v4l2_async_notifier_parse_fwnode_endpoints_by_port(dev, ntf, + sizeof(struct rkisp1_sensor_async), + 0, rkisp1_fwnode_parse); + if (ret) + return ret; + + if (list_empty(&ntf->asd_list)) + return -enodev; + + ntf->ops = &rkisp1_subdev_notifier_ops; + + return v4l2_async_notifier_register(&rkisp1->v4l2_dev, ntf); +} + +/* ---------------------------------------------------------------------------- + * power + */ + +static int __maybe_unused rkisp1_runtime_suspend(struct device *dev) +{ + struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks); + return pinctrl_pm_select_sleep_state(dev); +} + +static int __maybe_unused rkisp1_runtime_resume(struct device *dev) +{ + struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + return ret; + ret = clk_bulk_prepare_enable(rkisp1->clk_size, rkisp1->clks); + if (ret) + return ret; + + return 0; +} + +static const struct dev_pm_ops rkisp1_pm_ops = { + set_system_sleep_pm_ops(pm_runtime_force_suspend, + pm_runtime_force_resume) + set_runtime_pm_ops(rkisp1_runtime_suspend, rkisp1_runtime_resume, null) +}; + +/* ---------------------------------------------------------------------------- + * core + */ + +static int rkisp1_entities_register(struct rkisp1_device *rkisp1) +{ + int ret; + + ret = rkisp1_isp_register(rkisp1, &rkisp1->v4l2_dev); + if (ret) + return ret; + + ret = rkisp1_subdev_notifier(rkisp1); + if (ret) { + dev_err(rkisp1->dev, + "failed to register subdev notifier(%d) ", ret); + rkisp1_isp_unregister(rkisp1); + return ret; + } + + return 0; +} + +static irqreturn_t rkisp1_isr(int irq, void *ctx) +{ + struct device *dev = ctx; + struct rkisp1_device *rkisp1 = dev_get_drvdata(dev); + + rkisp1_isp_isr(rkisp1); + rkisp1_mipi_isr(rkisp1); + + return irq_handled; +} + +static const char * const rk3399_isp_clks[] = { + "clk_isp", + "aclk_isp", + "hclk_isp", + "aclk_isp_wrap", + "hclk_isp_wrap", +}; + +static const struct rkisp1_match_data rk3399_isp_clk_data = { + .clks = rk3399_isp_clks, + .size = array_size(rk3399_isp_clks), +}; + +static const struct of_device_id rkisp1_of_match[] = { + { + .compatible = "rockchip,rk3399-cif-isp", + .data = &rk3399_isp_clk_data, + }, + {}, +}; +module_device_table(of, rkisp1_of_match); + +static void rkisp1_debug_init(struct rkisp1_device *rkisp1) +{ + struct rkisp1_debug *debug = &rkisp1->debug; + + debug->debugfs_dir = debugfs_create_dir(rkisp1_driver_name, null); + if (!debug->debugfs_dir) { + dev_dbg(rkisp1->dev, "failed to create debugfs directory "); + return; + } + debugfs_create_ulong("data_loss", 0444, debug->debugfs_dir, + &debug->data_loss); + debugfs_create_ulong("pic_size_error", 0444, debug->debugfs_dir, + &debug->pic_size_error); + debugfs_create_ulong("mipi_error", 0444, debug->debugfs_dir, + &debug->mipi_error); +} + +static int rkisp1_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct rkisp1_match_data *clk_data; + const struct of_device_id *match; + struct device *dev = &pdev->dev; + struct rkisp1_device *rkisp1; + struct v4l2_device *v4l2_dev; + unsigned int i; + int ret, irq; + + match = of_match_node(rkisp1_of_match, node); + rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), gfp_kernel); + if (!rkisp1) + return -enomem; + + dev_set_drvdata(dev, rkisp1); + rkisp1->dev = dev; + + rkisp1->base_addr = devm_platform_ioremap_resource(pdev, 0); + if (is_err(rkisp1->base_addr)) + return ptr_err(rkisp1->base_addr); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(dev, irq, rkisp1_isr, irqf_shared, + dev_driver_string(dev), dev); + if (ret) { + dev_err(dev, "request irq failed: %d ", ret); + return ret; + } + + rkisp1->irq = irq; + clk_data = match->data; + + for (i = 0; i < clk_data->size; i++) + rkisp1->clks[i].id = clk_data->clks[i]; + ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks); + if (ret) + return ret; + rkisp1->clk_size = clk_data->size; + + pm_runtime_enable(&pdev->dev); + + strscpy(rkisp1->media_dev.model, rkisp1_driver_name, + sizeof(rkisp1->media_dev.model)); + rkisp1->media_dev.dev = &pdev->dev; + strscpy(rkisp1->media_dev.bus_info, + "platform: " rkisp1_driver_name, + sizeof(rkisp1->media_dev.bus_info)); + media_device_init(&rkisp1->media_dev); + + v4l2_dev = &rkisp1->v4l2_dev; + v4l2_dev->mdev = &rkisp1->media_dev; + strscpy(v4l2_dev->name, rkisp1_driver_name, sizeof(v4l2_dev->name)); + + ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev); + if (ret) + return ret; + + ret = media_device_register(&rkisp1->media_dev); + if (ret) { + dev_err(dev, "failed to register media device: %d ", ret); + goto err_unreg_v4l2_dev; + } + + ret = rkisp1_entities_register(rkisp1); + if (ret) + goto err_unreg_media_dev; + + rkisp1_debug_init(rkisp1); + + return 0; + +err_unreg_media_dev: + media_device_unregister(&rkisp1->media_dev); +err_unreg_v4l2_dev: + v4l2_device_unregister(&rkisp1->v4l2_dev); + pm_runtime_disable(&pdev->dev); + return ret; +} + +static int rkisp1_remove(struct platform_device *pdev) +{ + struct rkisp1_device *rkisp1 = platform_get_drvdata(pdev); + + v4l2_async_notifier_unregister(&rkisp1->notifier); + v4l2_async_notifier_cleanup(&rkisp1->notifier); + + rkisp1_isp_unregister(rkisp1); + + media_device_unregister(&rkisp1->media_dev); + v4l2_device_unregister(&rkisp1->v4l2_dev); + + pm_runtime_disable(&pdev->dev); + + debugfs_remove_recursive(rkisp1->debug.debugfs_dir); + return 0; +} + +static struct platform_driver rkisp1_drv = { + .driver = { + .name = rkisp1_driver_name, + .of_match_table = of_match_ptr(rkisp1_of_match), + .pm = &rkisp1_pm_ops, + }, + .probe = rkisp1_probe, + .remove = rkisp1_remove, +}; + +module_platform_driver(rkisp1_drv); +module_description("rockchip isp1 platform driver"); +module_license("dual mit/gpl"); diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-isp.c +// spdx-license-identifier: (gpl-2.0+ or mit) +/* + * rockchip isp1 driver - isp subdevice + * + * copyright (c) 2019 collabora, ltd. + * + * based on rockchip isp1 driver by rockchip electronics co., ltd. + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +#include <linux/iopoll.h> +#include <linux/phy/phy.h> +#include <linux/phy/phy-mipi-dphy.h> +#include <linux/pm_runtime.h> +#include <linux/videodev2.h> +#include <linux/vmalloc.h> +#include <media/v4l2-event.h> + +#include "rkisp1-common.h" + +#define rkisp1_def_sink_pad_fmt media_bus_fmt_srggb10_1x10 +#define rkisp1_def_src_pad_fmt media_bus_fmt_yuyv8_2x8 + +#define rkisp1_isp_dev_name rkisp1_driver_name "_isp" + +#define rkisp1_dir_src bit(0) +#define rkisp1_dir_sink bit(1) +#define rkisp1_dir_sink_src (rkisp1_dir_sink | rkisp1_dir_src) + +/* + * note: mipi controller and input mux are also configured in this file, + * because isp subdev is not only describe isp submodule(input size,format, + * output size, format), but also a virtual route device. + */ + +/* + * there are many variables named with format/frame in below code, + * please see here for their meaning. + * cropping in the sink pad defines the image region from the sensor. + * cropping in the source pad defines the region for the image stabilizer (is) + * + * cropping regions of isp + * + * +---------------------------------------------------------+ + * | sensor image | + * | +---------------------------------------------------+ | + * | | cif_isp_acq (for black level) | | + * | | sink pad format | | + * | | +--------------------------------------------+ | | + * | | | cif_isp_out | | | + * | | | sink pad crop | | | + * | | | +---------------------------------+ | | | + * | | | | cif_isp_is | | | | + * | | | | source pad crop and format | | | | + * | | | +---------------------------------+ | | | + * | | +--------------------------------------------+ | | + * | +---------------------------------------------------+ | + * +---------------------------------------------------------+ + */ + +static const struct rkisp1_isp_mbus_info rkisp1_isp_formats[] = { + { + .mbus_code = media_bus_fmt_yuyv8_2x8, + .fmt_type = rkisp1_fmt_yuv, + .direction = rkisp1_dir_src, + }, { + .mbus_code = media_bus_fmt_srggb10_1x10, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw10, + .bayer_pat = rkisp1_raw_rggb, + .bus_width = 10, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sbggr10_1x10, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw10, + .bayer_pat = rkisp1_raw_bggr, + .bus_width = 10, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sgbrg10_1x10, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw10, + .bayer_pat = rkisp1_raw_gbrg, + .bus_width = 10, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sgrbg10_1x10, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw10, + .bayer_pat = rkisp1_raw_grbg, + .bus_width = 10, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_srggb12_1x12, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw12, + .bayer_pat = rkisp1_raw_rggb, + .bus_width = 12, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sbggr12_1x12, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw12, + .bayer_pat = rkisp1_raw_bggr, + .bus_width = 12, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sgbrg12_1x12, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw12, + .bayer_pat = rkisp1_raw_gbrg, + .bus_width = 12, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sgrbg12_1x12, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw12, + .bayer_pat = rkisp1_raw_grbg, + .bus_width = 12, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_srggb8_1x8, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw8, + .bayer_pat = rkisp1_raw_rggb, + .bus_width = 8, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sbggr8_1x8, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw8, + .bayer_pat = rkisp1_raw_bggr, + .bus_width = 8, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sgbrg8_1x8, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw8, + .bayer_pat = rkisp1_raw_gbrg, + .bus_width = 8, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_sgrbg8_1x8, + .fmt_type = rkisp1_fmt_bayer, + .mipi_dt = rkisp1_cif_csi2_dt_raw8, + .bayer_pat = rkisp1_raw_grbg, + .bus_width = 8, + .direction = rkisp1_dir_sink_src, + }, { + .mbus_code = media_bus_fmt_yuyv8_1x16, + .fmt_type = rkisp1_fmt_yuv, + .mipi_dt = rkisp1_cif_csi2_dt_yuv422_8b, + .yuv_seq = rkisp1_cif_isp_acq_prop_ycbycr, + .bus_width = 16, + .direction = rkisp1_dir_sink, + }, { + .mbus_code = media_bus_fmt_yvyu8_1x16, + .fmt_type = rkisp1_fmt_yuv, + .mipi_dt = rkisp1_cif_csi2_dt_yuv422_8b, + .yuv_seq = rkisp1_cif_isp_acq_prop_ycrycb, + .bus_width = 16, + .direction = rkisp1_dir_sink, + }, { + .mbus_code = media_bus_fmt_uyvy8_1x16, + .fmt_type = rkisp1_fmt_yuv, + .mipi_dt = rkisp1_cif_csi2_dt_yuv422_8b, + .yuv_seq = rkisp1_cif_isp_acq_prop_cbycry, + .bus_width = 16, + .direction = rkisp1_dir_sink, + }, { + .mbus_code = media_bus_fmt_vyuy8_1x16, + .fmt_type = rkisp1_fmt_yuv, + .mipi_dt = rkisp1_cif_csi2_dt_yuv422_8b, + .yuv_seq = rkisp1_cif_isp_acq_prop_crycby, + .bus_width = 16, + .direction = rkisp1_dir_sink, + }, +}; + +/* ---------------------------------------------------------------------------- + * helpers + */ + +const struct rkisp1_isp_mbus_info *rkisp1_isp_mbus_info_get(u32 mbus_code) +{ + unsigned int i; + + for (i = 0; i < array_size(rkisp1_isp_formats); i++) { + const struct rkisp1_isp_mbus_info *fmt = &rkisp1_isp_formats[i]; + + if (fmt->mbus_code == mbus_code) + return fmt; + } + + return null; +} + +static struct v4l2_subdev *rkisp1_get_remote_sensor(struct v4l2_subdev *sd) +{ + struct media_pad *local, *remote; + struct media_entity *sensor_me; + + local = &sd->entity.pads[rkisp1_isp_pad_sink_video]; + remote = media_entity_remote_pad(local); + if (!remote) { + dev_warn(sd->dev, "no link between isp and sensor "); + return null; + } + + sensor_me = remote->entity; + return media_entity_to_v4l2_subdev(sensor_me); +} + +static struct v4l2_mbus_framefmt * +rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, u32 which) +{ + if (which == v4l2_subdev_format_try) + return v4l2_subdev_get_try_format(&isp->sd, cfg, pad); + else + return v4l2_subdev_get_try_format(&isp->sd, isp->pad_cfg, pad); +} + +static struct v4l2_rect * +rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, u32 which) +{ + if (which == v4l2_subdev_format_try) + return v4l2_subdev_get_try_crop(&isp->sd, cfg, pad); + else + return v4l2_subdev_get_try_crop(&isp->sd, isp->pad_cfg, pad); +} + +/* ---------------------------------------------------------------------------- + * camera interface registers configurations + */ + +/* + * image stabilization. + * this should only be called when configuring cif + * or at the frame end interrupt + */ +static void rkisp1_config_ism(struct rkisp1_device *rkisp1) +{ + struct v4l2_rect *src_crop = + rkisp1_isp_get_pad_crop(&rkisp1->isp, null, + rkisp1_isp_pad_source_video, + v4l2_subdev_format_active); + u32 val; + + rkisp1_write(rkisp1, 0, rkisp1_cif_isp_is_recenter); + rkisp1_write(rkisp1, 0, rkisp1_cif_isp_is_max_dx); + rkisp1_write(rkisp1, 0, rkisp1_cif_isp_is_max_dy); + rkisp1_write(rkisp1, 0, rkisp1_cif_isp_is_displace); + rkisp1_write(rkisp1, src_crop->left, rkisp1_cif_isp_is_h_offs); + rkisp1_write(rkisp1, src_crop->top, rkisp1_cif_isp_is_v_offs); + rkisp1_write(rkisp1, src_crop->width, rkisp1_cif_isp_is_h_size); + rkisp1_write(rkisp1, src_crop->height, rkisp1_cif_isp_is_v_size); + + /* is(image stabilization) is always on, working as output crop */ + rkisp1_write(rkisp1, 1, rkisp1_cif_isp_is_ctrl); + val = rkisp1_read(rkisp1, rkisp1_cif_isp_ctrl); + val |= rkisp1_cif_isp_ctrl_isp_cfg_upd; + rkisp1_write(rkisp1, val, rkisp1_cif_isp_ctrl); +} + +/* + * configure isp blocks with input format, size...... + */ +static int rkisp1_config_isp(struct rkisp1_device *rkisp1) +{ + u32 isp_ctrl = 0, irq_mask = 0, acq_mult = 0, signal = 0; + const struct rkisp1_isp_mbus_info *src_fmt, *sink_fmt; + struct rkisp1_sensor_async *sensor; + struct v4l2_mbus_framefmt *sink_frm; + struct v4l2_rect *sink_crop; + + sensor = rkisp1->active_sensor; + sink_fmt = rkisp1->isp.sink_fmt; + src_fmt = rkisp1->isp.src_fmt; + sink_frm = rkisp1_isp_get_pad_fmt(&rkisp1->isp, null, + rkisp1_isp_pad_sink_video, + v4l2_subdev_format_active); + sink_crop = rkisp1_isp_get_pad_crop(&rkisp1->isp, null, + rkisp1_isp_pad_sink_video, + v4l2_subdev_format_active); + + if (sink_fmt->fmt_type == rkisp1_fmt_bayer) { + acq_mult = 1; + if (src_fmt->fmt_type == rkisp1_fmt_bayer) { + if (sensor->mbus.type == v4l2_mbus_bt656) + isp_ctrl = rkisp1_cif_isp_ctrl_isp_mode_raw_pict_itu656; + else + isp_ctrl = rkisp1_cif_isp_ctrl_isp_mode_raw_pict; + } else { + rkisp1_write(rkisp1, rkisp1_cif_isp_demosaic_th(0xc), + rkisp1_cif_isp_demosaic); + + if (sensor->mbus.type == v4l2_mbus_bt656) + isp_ctrl = rkisp1_cif_isp_ctrl_isp_mode_bayer_itu656; + else + isp_ctrl = rkisp1_cif_isp_ctrl_isp_mode_bayer_itu601; + } + } else if (sink_fmt->fmt_type == rkisp1_fmt_yuv) { + acq_mult = 2; + if (sensor->mbus.type == v4l2_mbus_csi2_dphy) { + isp_ctrl = rkisp1_cif_isp_ctrl_isp_mode_itu601; + } else { + if (sensor->mbus.type == v4l2_mbus_bt656) + isp_ctrl = rkisp1_cif_isp_ctrl_isp_mode_itu656; + else + isp_ctrl = rkisp1_cif_isp_ctrl_isp_mode_itu601; + } + + irq_mask |= rkisp1_cif_isp_data_loss; + } + + /* set up input acquisition properties */ + if (sensor->mbus.type == v4l2_mbus_bt656 || + sensor->mbus.type == v4l2_mbus_parallel) { + if (sensor->mbus.flags & v4l2_mbus_pclk_sample_rising) + signal = rkisp1_cif_isp_acq_prop_pos_edge; + } + + if (sensor->mbus.type == v4l2_mbus_parallel) { + if (sensor->mbus.flags & v4l2_mbus_vsync_active_low) + signal |= rkisp1_cif_isp_acq_prop_vsync_low; + + if (sensor->mbus.flags & v4l2_mbus_hsync_active_low) + signal |= rkisp1_cif_isp_acq_prop_hsync_low; + } + + rkisp1_write(rkisp1, isp_ctrl, rkisp1_cif_isp_ctrl); + rkisp1_write(rkisp1, signal | sink_fmt->yuv_seq | + rkisp1_cif_isp_acq_prop_bayer_pat(sink_fmt->bayer_pat) | + rkisp1_cif_isp_acq_prop_field_sel_all, + rkisp1_cif_isp_acq_prop); + rkisp1_write(rkisp1, 0, rkisp1_cif_isp_acq_nr_frames); + + /* acquisition size */ + rkisp1_write(rkisp1, 0, rkisp1_cif_isp_acq_h_offs); + rkisp1_write(rkisp1, 0, rkisp1_cif_isp_acq_v_offs); + rkisp1_write(rkisp1, + acq_mult * sink_frm->width, rkisp1_cif_isp_acq_h_size); + rkisp1_write(rkisp1, sink_frm->height, rkisp1_cif_isp_acq_v_size); + + /* isp out area */ + rkisp1_write(rkisp1, sink_crop->left, rkisp1_cif_isp_out_h_offs); + rkisp1_write(rkisp1, sink_crop->top, rkisp1_cif_isp_out_v_offs); + rkisp1_write(rkisp1, sink_crop->width, rkisp1_cif_isp_out_h_size); + rkisp1_write(rkisp1, sink_crop->height, rkisp1_cif_isp_out_v_size); + + irq_mask |= rkisp1_cif_isp_frame | rkisp1_cif_isp_v_start | + rkisp1_cif_isp_pic_size_error | rkisp1_cif_isp_frame_in; + rkisp1_write(rkisp1, irq_mask, rkisp1_cif_isp_imsc); + + return 0; +} + +static int rkisp1_config_dvp(struct rkisp1_device *rkisp1) +{ + const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt; + u32 val, input_sel; + + switch (sink_fmt->bus_width) { + case 8: + input_sel = rkisp1_cif_isp_acq_prop_in_sel_8b_zero; + break; + case 10: + input_sel = rkisp1_cif_isp_acq_prop_in_sel_10b_zero; + break; + case 12: + input_sel = rkisp1_cif_isp_acq_prop_in_sel_12b; + break; + default: + dev_err(rkisp1->dev, "invalid bus width "); + return -einval; + } + + val = rkisp1_read(rkisp1, rkisp1_cif_isp_acq_prop); + rkisp1_write(rkisp1, val | input_sel, rkisp1_cif_isp_acq_prop); + + return 0; +} + +static int rkisp1_config_mipi(struct rkisp1_device *rkisp1) +{ + const struct rkisp1_isp_mbus_info *sink_fmt = rkisp1->isp.sink_fmt; + unsigned int lanes; + u32 mipi_ctrl; + + /* + * rkisp1->active_sensor->mbus is set in isp or d-phy notifier_bound + * function + */ + switch (rkisp1->active_sensor->mbus.flags & v4l2_mbus_csi2_lanes) { + case v4l2_mbus_csi2_4_lane: + lanes = 4; + break; + case v4l2_mbus_csi2_3_lane: + lanes = 3; + break; + case v4l2_mbus_csi2_2_lane: + lanes = 2; + break; + case v4l2_mbus_csi2_1_lane: + lanes = 1; + break; + default: + return -einval; + } + + mipi_ctrl = rkisp1_cif_mipi_ctrl_num_lanes(lanes - 1) | + rkisp1_cif_mipi_ctrl_shutdownlanes(0xf) | + rkisp1_cif_mipi_ctrl_err_sot_sync_hs_skip | + rkisp1_cif_mipi_ctrl_clocklane_ena; + + rkisp1_write(rkisp1, mipi_ctrl, rkisp1_cif_mipi_ctrl); + + /* configure data type and virtual channel */ + rkisp1_write(rkisp1, + rkisp1_cif_mipi_data_sel_dt(sink_fmt->mipi_dt) | + rkisp1_cif_mipi_data_sel_vc(0), + rkisp1_cif_mipi_img_data_sel); + + /* clear mipi interrupts */ + rkisp1_write(rkisp1, ~0, rkisp1_cif_mipi_icr); + /* + * disable rkisp1_cif_mipi_err_dphy interrupt here temporary for + * isp bus may be dead when switch isp. + */ + rkisp1_write(rkisp1, + rkisp1_cif_mipi_frame_end | rkisp1_cif_mipi_err_csi | + rkisp1_cif_mipi_err_dphy | + rkisp1_cif_mipi_sync_fifo_ovflw(0x03) | + rkisp1_cif_mipi_add_data_ovflw, + rkisp1_cif_mipi_imsc); + + dev_dbg(rkisp1->dev, " mipi_ctrl 0x%08x " + " mipi_img_data_sel 0x%08x " + " mipi_status 0x%08x " + " mipi_imsc 0x%08x ", + rkisp1_read(rkisp1, rkisp1_cif_mipi_ctrl), + rkisp1_read(rkisp1, rkisp1_cif_mipi_img_data_sel), + rkisp1_read(rkisp1, rkisp1_cif_mipi_status), + rkisp1_read(rkisp1, rkisp1_cif_mipi_imsc)); + + return 0; +} + +/* configure mux */ +static int rkisp1_config_path(struct rkisp1_device *rkisp1) +{ + struct rkisp1_sensor_async *sensor = rkisp1->active_sensor; + u32 dpcl = rkisp1_read(rkisp1, rkisp1_cif_vi_dpcl); + int ret = 0; + + if (sensor->mbus.type == v4l2_mbus_bt656 || + sensor->mbus.type == v4l2_mbus_parallel) { + ret = rkisp1_config_dvp(rkisp1); + dpcl |= rkisp1_cif_vi_dpcl_if_sel_parallel; + } else if (sensor->mbus.type == v4l2_mbus_csi2_dphy) { + ret = rkisp1_config_mipi(rkisp1); + dpcl |= rkisp1_cif_vi_dpcl_if_sel_mipi; + } + + rkisp1_write(rkisp1, dpcl, rkisp1_cif_vi_dpcl); + + return ret; +} + +/* hardware configure entry */ +static int rkisp1_config_cif(struct rkisp1_device *rkisp1) +{ + u32 cif_id; + int ret; + + cif_id = rkisp1_read(rkisp1, rkisp1_cif_vi_id); + dev_dbg(rkisp1->dev, "cif_id 0x%08x ", cif_id); + + ret = rkisp1_config_isp(rkisp1); + if (ret) + return ret; + ret = rkisp1_config_path(rkisp1); + if (ret) + return ret; + rkisp1_config_ism(rkisp1); + + return 0; +} + +static int rkisp1_isp_stop(struct rkisp1_device *rkisp1) +{ + u32 val; + + /* + * isp(mi) stop in mi frame end -> stop isp(mipi) -> + * stop isp(isp) ->wait for isp isp off + */ + /* stop and clear mi, mipi, and isp interrupts */ + rkisp1_write(rkisp1, 0, rkisp1_cif_mipi_imsc); + rkisp1_write(rkisp1, ~0, rkisp1_cif_mipi_icr); + + rkisp1_write(rkisp1, 0, rkisp1_cif_isp_imsc); + rkisp1_write(rkisp1, ~0, rkisp1_cif_isp_icr); + + rkisp1_write(rkisp1, 0, rkisp1_cif_mi_imsc); + rkisp1_write(rkisp1, ~0, rkisp1_cif_mi_icr); + val = rkisp1_read(rkisp1, rkisp1_cif_mipi_ctrl); + rkisp1_write(rkisp1, val & (~rkisp1_cif_mipi_ctrl_output_ena), + rkisp1_cif_mipi_ctrl); + /* stop isp */ + val = rkisp1_read(rkisp1, rkisp1_cif_isp_ctrl); + val &= ~(rkisp1_cif_isp_ctrl_isp_inform_enable | + rkisp1_cif_isp_ctrl_isp_enable); + rkisp1_write(rkisp1, val, rkisp1_cif_isp_ctrl); + + val = rkisp1_read(rkisp1, rkisp1_cif_isp_ctrl); + rkisp1_write(rkisp1, val | rkisp1_cif_isp_ctrl_isp_cfg_upd, + rkisp1_cif_isp_ctrl); + + readx_poll_timeout(readl, rkisp1->base_addr + rkisp1_cif_isp_ris, + val, val & rkisp1_cif_isp_off, 20, 100); + rkisp1_write(rkisp1, + rkisp1_cif_ircl_mipi_sw_rst | rkisp1_cif_ircl_isp_sw_rst, + rkisp1_cif_ircl); + rkisp1_write(rkisp1, 0x0, rkisp1_cif_ircl); + + return 0; +} + +static void rkisp1_config_clk(struct rkisp1_device *rkisp1) +{ + u32 val = rkisp1_cif_iccl_isp_clk | rkisp1_cif_iccl_cp_clk | + rkisp1_cif_iccl_mrsz_clk | rkisp1_cif_iccl_srsz_clk | + rkisp1_cif_iccl_jpeg_clk | rkisp1_cif_iccl_mi_clk | + rkisp1_cif_iccl_ie_clk | rkisp1_cif_iccl_mipi_clk | + rkisp1_cif_iccl_dcrop_clk; + + rkisp1_write(rkisp1, val, rkisp1_cif_iccl); +} + +static int rkisp1_isp_start(struct rkisp1_device *rkisp1) +{ + struct rkisp1_sensor_async *sensor = rkisp1->active_sensor; + u32 val; + + rkisp1_config_clk(rkisp1); + + /* activate mipi */ + if (sensor->mbus.type == v4l2_mbus_csi2_dphy) { + val = rkisp1_read(rkisp1, rkisp1_cif_mipi_ctrl); + rkisp1_write(rkisp1, val | rkisp1_cif_mipi_ctrl_output_ena, + rkisp1_cif_mipi_ctrl); + } + /* activate isp */ + val = rkisp1_read(rkisp1, rkisp1_cif_isp_ctrl); + val |= rkisp1_cif_isp_ctrl_isp_cfg_upd | + rkisp1_cif_isp_ctrl_isp_enable | + rkisp1_cif_isp_ctrl_isp_inform_enable; + rkisp1_write(rkisp1, val, rkisp1_cif_isp_ctrl); + + /* + * cif spec says to wait for sufficient time after enabling + * the mipi interface and before starting the sensor output. + */ + usleep_range(1000, 1200); + + return 0; +} + +/* ---------------------------------------------------------------------------- + * subdev pad operations + */ + +static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + unsigned int i, dir; + int pos = 0; + + if (code->pad == rkisp1_isp_pad_sink_video) { + dir = rkisp1_dir_sink; + } else if (code->pad == rkisp1_isp_pad_source_video) { + dir = rkisp1_dir_src; + } else { + if (code->index > 0) + return -einval; + code->code = media_bus_fmt_fixed; + return 0; + } + + if (code->index >= array_size(rkisp1_isp_formats)) + return -einval; + + for (i = 0; i < array_size(rkisp1_isp_formats); i++) { + const struct rkisp1_isp_mbus_info *fmt = &rkisp1_isp_formats[i]; + + if (fmt->direction & dir) + pos++; + + if (code->index == pos - 1) { + code->code = fmt->mbus_code; + return 0; + } + } + + return -einval; +} + +static int rkisp1_isp_init_config(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg) +{ + struct v4l2_mbus_framefmt *sink_fmt, *src_fmt; + struct v4l2_rect *sink_crop, *src_crop; + + sink_fmt = v4l2_subdev_get_try_format(sd, cfg, + rkisp1_isp_pad_sink_video); + sink_fmt->width = rkisp1_default_width; + sink_fmt->height = rkisp1_default_height; + sink_fmt->field = v4l2_field_none; + sink_fmt->code = rkisp1_def_sink_pad_fmt; + + sink_crop = v4l2_subdev_get_try_crop(sd, cfg, + rkisp1_isp_pad_sink_video); + sink_crop->width = rkisp1_default_width; + sink_crop->height = rkisp1_default_height; + sink_crop->left = 0; + sink_crop->top = 0; + + src_fmt = v4l2_subdev_get_try_format(sd, cfg, + rkisp1_isp_pad_source_video); + *src_fmt = *sink_fmt; + src_fmt->code = rkisp1_def_src_pad_fmt; + src_fmt->quantization = v4l2_quantization_full_range; + + src_crop = v4l2_subdev_get_try_crop(sd, cfg, + rkisp1_isp_pad_source_video); + *src_crop = *sink_crop; + + sink_fmt = v4l2_subdev_get_try_format(sd, cfg, + rkisp1_isp_pad_sink_params); + src_fmt = v4l2_subdev_get_try_format(sd, cfg, + rkisp1_isp_pad_source_stats); + sink_fmt->width = rkisp1_default_width; + sink_fmt->height = rkisp1_default_height; + sink_fmt->field = v4l2_field_none; + sink_fmt->code = media_bus_fmt_fixed; + *src_fmt = *sink_fmt; + + return 0; +} + +static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_mbus_framefmt *format, + unsigned int which) +{ + const struct rkisp1_isp_mbus_info *mbus_info; + struct v4l2_mbus_framefmt *src_fmt; + const struct v4l2_rect *src_crop; + + src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, + rkisp1_isp_pad_source_video, which); + src_crop = rkisp1_isp_get_pad_crop(isp, cfg, + rkisp1_isp_pad_source_video, which); + + src_fmt->code = format->code; + mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code); + if (!mbus_info) { + src_fmt->code = rkisp1_def_src_pad_fmt; + mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code); + } + if (which == v4l2_subdev_format_active) + isp->src_fmt = mbus_info; + src_fmt->width = src_crop->width; + src_fmt->height = src_crop->height; + src_fmt->quantization = format->quantization; + /* full range by default */ + if (!src_fmt->quantization) + src_fmt->quantization = v4l2_quantization_full_range; + + *format = *src_fmt; +} + +static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect *r, unsigned int which) +{ + struct v4l2_mbus_framefmt *src_fmt; + const struct v4l2_rect *sink_crop; + struct v4l2_rect *src_crop; + + src_crop = rkisp1_isp_get_pad_crop(isp, cfg, + rkisp1_isp_pad_source_video, + which); + sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, + rkisp1_isp_pad_sink_video, + which); + + src_crop->left = align(r->left, 2); + src_crop->width = align(r->width, 2); + src_crop->top = r->top; + src_crop->height = r->height; + rkisp1_sd_adjust_crop_rect(src_crop, sink_crop); + + *r = *src_crop; + + /* propagate to out format */ + src_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, + rkisp1_isp_pad_source_video, which); + rkisp1_isp_set_src_fmt(isp, cfg, src_fmt, which); +} + +static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect *r, unsigned int which) +{ + struct v4l2_rect *sink_crop, *src_crop; + struct v4l2_mbus_framefmt *sink_fmt; + + sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, rkisp1_isp_pad_sink_video, + which); + sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, rkisp1_isp_pad_sink_video, + which); + + sink_crop->left = align(r->left, 2); + sink_crop->width = align(r->width, 2); + sink_crop->top = r->top; + sink_crop->height = r->height; + rkisp1_sd_adjust_crop(sink_crop, sink_fmt); + + *r = *sink_crop; + + /* propagate to out crop */ + src_crop = rkisp1_isp_get_pad_crop(isp, cfg, + rkisp1_isp_pad_source_video, which); + rkisp1_isp_set_src_crop(isp, cfg, src_crop, which); +} + +static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_mbus_framefmt *format, + unsigned int which) +{ + const struct rkisp1_isp_mbus_info *mbus_info; + struct v4l2_mbus_framefmt *sink_fmt; + struct v4l2_rect *sink_crop; + + sink_fmt = rkisp1_isp_get_pad_fmt(isp, cfg, rkisp1_isp_pad_sink_video, + which); + sink_fmt->code = format->code; + mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code); + if (!mbus_info) { + sink_fmt->code = rkisp1_def_sink_pad_fmt; + mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code); + } + if (which == v4l2_subdev_format_active) + isp->sink_fmt = mbus_info; + + sink_fmt->width = clamp_t(u32, format->width, + rkisp1_isp_min_width, + rkisp1_isp_max_width); + sink_fmt->height = clamp_t(u32, format->height, + rkisp1_isp_min_height, + rkisp1_isp_max_height); + + *format = *sink_fmt; + + /* propagate to in crop */ + sink_crop = rkisp1_isp_get_pad_crop(isp, cfg, rkisp1_isp_pad_sink_video, + which); + rkisp1_isp_set_sink_crop(isp, cfg, sink_crop, which); +} + +static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd); + + fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, fmt->which); + return 0; +} + +static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd); + + if (fmt->pad == rkisp1_isp_pad_sink_video) + rkisp1_isp_set_sink_fmt(isp, cfg, &fmt->format, fmt->which); + else if (fmt->pad == rkisp1_isp_pad_source_video) + rkisp1_isp_set_src_fmt(isp, cfg, &fmt->format, fmt->which); + else + fmt->format = *rkisp1_isp_get_pad_fmt(isp, cfg, fmt->pad, + fmt->which); + + return 0; +} + +static int rkisp1_isp_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd); + + if (sel->pad != rkisp1_isp_pad_source_video && + sel->pad != rkisp1_isp_pad_sink_video) + return -einval; + + switch (sel->target) { + case v4l2_sel_tgt_crop_bounds: + if (sel->pad == rkisp1_isp_pad_sink_video) { + struct v4l2_mbus_framefmt *fmt; + + fmt = rkisp1_isp_get_pad_fmt(isp, cfg, sel->pad, + sel->which); + sel->r.height = fmt->height; + sel->r.width = fmt->width; + sel->r.left = 0; + sel->r.top = 0; + } else { + sel->r = *rkisp1_isp_get_pad_crop(isp, cfg, + rkisp1_isp_pad_sink_video, + sel->which); + } + break; + case v4l2_sel_tgt_crop: + sel->r = *rkisp1_isp_get_pad_crop(isp, cfg, sel->pad, + sel->which); + break; + default: + return -einval; + } + + return 0; +} + +static int rkisp1_isp_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct rkisp1_device *rkisp1 = + container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev); + struct rkisp1_isp *isp = container_of(sd, struct rkisp1_isp, sd); + + if (sel->target != v4l2_sel_tgt_crop) + return -einval; + + dev_dbg(rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d ", __func__, + sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height); + + if (sel->pad == rkisp1_isp_pad_sink_video) + rkisp1_isp_set_sink_crop(isp, cfg, &sel->r, sel->which); + else if (sel->pad == rkisp1_isp_pad_source_video) + rkisp1_isp_set_src_crop(isp, cfg, &sel->r, sel->which); + else + return -einval; + + return 0; +} + +static int rkisp1_subdev_link_validate(struct media_link *link) +{ + if (link->sink->index == rkisp1_isp_pad_sink_params) + return 0; + + return v4l2_subdev_link_validate(link); +} + +static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = { + .enum_mbus_code = rkisp1_isp_enum_mbus_code, + .get_selection = rkisp1_isp_get_selection, + .set_selection = rkisp1_isp_set_selection, + .init_cfg = rkisp1_isp_init_config, + .get_fmt = rkisp1_isp_get_fmt, + .set_fmt = rkisp1_isp_set_fmt, + .link_validate = v4l2_subdev_link_validate_default, +}; + +/* ---------------------------------------------------------------------------- + * stream operations + */ + +static int rkisp1_mipi_csi2_start(struct rkisp1_isp *isp, + struct rkisp1_sensor_async *sensor) +{ + union phy_configure_opts opts; + struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy; + s64 pixel_clock; + + if (!sensor->pixel_rate_ctrl) { + dev_warn(sensor->sd->dev, "no pixel rate control in subdev "); + return -epipe; + } + + pixel_clock = v4l2_ctrl_g_ctrl_int64(sensor->pixel_rate_ctrl); + if (!pixel_clock) { + dev_err(sensor->sd->dev, "invalid pixel rate value "); + return -einval; + } + + phy_mipi_dphy_get_default_config(pixel_clock, isp->sink_fmt->bus_width, + sensor->lanes, cfg); + phy_set_mode(sensor->dphy, phy_mode_mipi_dphy); + phy_configure(sensor->dphy, &opts); + phy_power_on(sensor->dphy); + + return 0; +} + +static void rkisp1_mipi_csi2_stop(struct rkisp1_sensor_async *sensor) +{ + phy_power_off(sensor->dphy); +} + +static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct rkisp1_device *rkisp1 = + container_of(sd->v4l2_dev, struct rkisp1_device, v4l2_dev); + struct v4l2_subdev *sensor_sd; + int ret = 0; + + if (!enable) { + ret = rkisp1_isp_stop(rkisp1); + if (ret) + return ret; + rkisp1_mipi_csi2_stop(rkisp1->active_sensor); + return 0; + } + + sensor_sd = rkisp1_get_remote_sensor(sd); + if (!sensor_sd) + return -enodev; + rkisp1->active_sensor = container_of(sensor_sd->asd, + struct rkisp1_sensor_async, asd); + + atomic_set(&rkisp1->isp.frame_sequence, -1); + ret = rkisp1_config_cif(rkisp1); + if (ret) + return ret; + + if (rkisp1->active_sensor->mbus.type != v4l2_mbus_csi2_dphy) + return -einval; + + ret = rkisp1_mipi_csi2_start(&rkisp1->isp, rkisp1->active_sensor); + if (ret) + return ret; + + ret = rkisp1_isp_start(rkisp1); + if (ret) + rkisp1_mipi_csi2_stop(rkisp1->active_sensor); + + return ret; +} + +static int rkisp1_isp_subs_evt(struct v4l2_subdev *sd, struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + if (sub->type != v4l2_event_frame_sync) + return -einval; + + /* v4l2_event_frame_sync doesn't require an id, so zero should be set */ + if (sub->id != 0) + return -einval; + + return v4l2_event_subscribe(fh, sub, 0, null); +} + +static const struct media_entity_operations rkisp1_isp_media_ops = { + .link_validate = rkisp1_subdev_link_validate, +}; + +static const struct v4l2_subdev_video_ops rkisp1_isp_video_ops = { + .s_stream = rkisp1_isp_s_stream, +}; + +static const struct v4l2_subdev_core_ops rkisp1_isp_core_ops = { + .subscribe_event = rkisp1_isp_subs_evt, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, +}; + +static const struct v4l2_subdev_ops rkisp1_isp_ops = { + .core = &rkisp1_isp_core_ops, + .video = &rkisp1_isp_video_ops, + .pad = &rkisp1_isp_pad_ops, +}; + +int rkisp1_isp_register(struct rkisp1_device *rkisp1, + struct v4l2_device *v4l2_dev) +{ + struct rkisp1_isp *isp = &rkisp1->isp; + struct media_pad *pads = isp->pads; + struct v4l2_subdev *sd = &isp->sd; + int ret; + + v4l2_subdev_init(sd, &rkisp1_isp_ops); + sd->flags |= v4l2_subdev_fl_has_devnode | v4l2_subdev_fl_has_events; + sd->entity.ops = &rkisp1_isp_media_ops; + sd->entity.function = media_ent_f_proc_video_pixel_formatter; + sd->owner = this_module; + strscpy(sd->name, rkisp1_isp_dev_name, sizeof(sd->name)); + + pads[rkisp1_isp_pad_sink_video].flags = media_pad_fl_sink | + media_pad_fl_must_connect; + pads[rkisp1_isp_pad_sink_params].flags = media_pad_fl_sink; + pads[rkisp1_isp_pad_source_video].flags = media_pad_fl_source; + pads[rkisp1_isp_pad_source_stats].flags = media_pad_fl_source; + + isp->sink_fmt = rkisp1_isp_mbus_info_get(rkisp1_def_sink_pad_fmt); + isp->src_fmt = rkisp1_isp_mbus_info_get(rkisp1_def_src_pad_fmt); + + ret = media_entity_pads_init(&sd->entity, rkisp1_isp_pad_max, pads); + if (ret) + return ret; + + ret = v4l2_device_register_subdev(v4l2_dev, sd); + if (ret) { + dev_err(sd->dev, "failed to register isp subdev "); + goto err_cleanup_media_entity; + } + + rkisp1_isp_init_config(sd, rkisp1->isp.pad_cfg); + return 0; + +err_cleanup_media_entity: + media_entity_cleanup(&sd->entity); + + return ret; +} + +void rkisp1_isp_unregister(struct rkisp1_device *rkisp1) +{ + struct v4l2_subdev *sd = &rkisp1->isp.sd; + + v4l2_device_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); +} + +/* ---------------------------------------------------------------------------- + * interrupt handlers + */ + +void rkisp1_mipi_isr(struct rkisp1_device *rkisp1) +{ + u32 val, status; + + status = rkisp1_read(rkisp1, rkisp1_cif_mipi_mis); + if (!status) + return; + + rkisp1_write(rkisp1, status, rkisp1_cif_mipi_icr); + + /* + * disable dphy errctrl interrupt, because this dphy + * erctrl signal is asserted until the next changes + * of line state. this time is may be too long and cpu + * is hold in this interrupt. + */ + if (status & rkisp1_cif_mipi_err_ctrl(0x0f)) { + val = rkisp1_read(rkisp1, rkisp1_cif_mipi_imsc); + rkisp1_write(rkisp1, val & ~rkisp1_cif_mipi_err_ctrl(0x0f), + rkisp1_cif_mipi_imsc); + rkisp1->isp.is_dphy_errctrl_disabled = true; + } + + /* + * enable dphy errctrl interrupt again, if mipi have receive + * the whole frame without any error. + */ + if (status == rkisp1_cif_mipi_frame_end) { + /* + * enable dphy errctrl interrupt again, if mipi have receive + * the whole frame without any error. + */ + if (rkisp1->isp.is_dphy_errctrl_disabled) { + val = rkisp1_read(rkisp1, rkisp1_cif_mipi_imsc); + val |= rkisp1_cif_mipi_err_ctrl(0x0f); + rkisp1_write(rkisp1, val, rkisp1_cif_mipi_imsc); + rkisp1->isp.is_dphy_errctrl_disabled = false; + } + } else { + rkisp1->debug.mipi_error++; + } +} + +static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp) +{ + struct v4l2_event event = { + .type = v4l2_event_frame_sync, + }; + + /* + * increment the frame sequence on the vsync signal. + * this will allow applications to detect dropped. + * note that there is a debugfs counter for dropped + * frames, but using this event is more accurate. + */ + event.u.frame_sync.frame_sequence = + atomic_inc_return(&isp->frame_sequence); + v4l2_event_queue(isp->sd.devnode, &event); +} + +void rkisp1_isp_isr(struct rkisp1_device *rkisp1) +{ + u32 status, isp_err; + + status = rkisp1_read(rkisp1, rkisp1_cif_isp_mis); + if (!status) + return; + + rkisp1_write(rkisp1, status, rkisp1_cif_isp_icr); + + /* vertical sync signal, starting generating new frame */ + if (status & rkisp1_cif_isp_v_start) + rkisp1_isp_queue_event_sof(&rkisp1->isp); + + if (status & rkisp1_cif_isp_pic_size_error) { + /* clear pic_size_error */ + isp_err = rkisp1_read(rkisp1, rkisp1_cif_isp_err); + rkisp1_write(rkisp1, isp_err, rkisp1_cif_isp_err_clr); + rkisp1->debug.pic_size_error++; + } else if (status & rkisp1_cif_isp_data_loss) { + /* keep track of data_loss in debugfs */ + rkisp1->debug.data_loss++; + } +} diff --git a/drivers/staging/media/rkisp1/rkisp1-regs.h b/drivers/staging/media/rkisp1/rkisp1-regs.h --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-regs.h +/* spdx-license-identifier: (gpl-2.0+ or mit) */ +/* + * rockchip isp1 driver - registers header + * + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +#ifndef _rkisp1_regs_h +#define _rkisp1_regs_h + +/* isp_ctrl */ +#define rkisp1_cif_isp_ctrl_isp_enable bit(0) +#define rkisp1_cif_isp_ctrl_isp_mode_raw_pict (0 << 1) +#define rkisp1_cif_isp_ctrl_isp_mode_itu656 bit(1) +#define rkisp1_cif_isp_ctrl_isp_mode_itu601 (2 << 1) +#define rkisp1_cif_isp_ctrl_isp_mode_bayer_itu601 (3 << 1) +#define rkisp1_cif_isp_ctrl_isp_mode_data_mode (4 << 1) +#define rkisp1_cif_isp_ctrl_isp_mode_bayer_itu656 (5 << 1) +#define rkisp1_cif_isp_ctrl_isp_mode_raw_pict_itu656 (6 << 1) +#define rkisp1_cif_isp_ctrl_isp_inform_enable bit(4) +#define rkisp1_cif_isp_ctrl_isp_gamma_in_ena bit(6) +#define rkisp1_cif_isp_ctrl_isp_awb_ena bit(7) +#define rkisp1_cif_isp_ctrl_isp_cfg_upd_permanent bit(8) +#define rkisp1_cif_isp_ctrl_isp_cfg_upd bit(9) +#define rkisp1_cif_isp_ctrl_isp_gen_cfg_upd bit(10) +#define rkisp1_cif_isp_ctrl_isp_gamma_out_ena bit(11) +#define rkisp1_cif_isp_ctrl_isp_flash_mode_ena bit(12) +#define rkisp1_cif_isp_ctrl_isp_csm_y_full_ena bit(13) +#define rkisp1_cif_isp_ctrl_isp_csm_c_full_ena bit(14) + +/* isp_acq_prop */ +#define rkisp1_cif_isp_acq_prop_pos_edge bit(0) +#define rkisp1_cif_isp_acq_prop_hsync_low bit(1) +#define rkisp1_cif_isp_acq_prop_vsync_low bit(2) +#define rkisp1_cif_isp_acq_prop_bayer_pat_rggb (0 << 3) +#define rkisp1_cif_isp_acq_prop_bayer_pat_grbg bit(3) +#define rkisp1_cif_isp_acq_prop_bayer_pat_gbrg (2 << 3) +#define rkisp1_cif_isp_acq_prop_bayer_pat_bggr (3 << 3) +#define rkisp1_cif_isp_acq_prop_bayer_pat(pat) ((pat) << 3) +#define rkisp1_cif_isp_acq_prop_ycbycr (0 << 7) +#define rkisp1_cif_isp_acq_prop_ycrycb bit(7) +#define rkisp1_cif_isp_acq_prop_cbycry (2 << 7) +#define rkisp1_cif_isp_acq_prop_crycby (3 << 7) +#define rkisp1_cif_isp_acq_prop_field_sel_all (0 << 9) +#define rkisp1_cif_isp_acq_prop_field_sel_even bit(9) +#define rkisp1_cif_isp_acq_prop_field_sel_odd (2 << 9) +#define rkisp1_cif_isp_acq_prop_in_sel_12b (0 << 12) +#define rkisp1_cif_isp_acq_prop_in_sel_10b_zero bit(12) +#define rkisp1_cif_isp_acq_prop_in_sel_10b_msb (2 << 12) +#define rkisp1_cif_isp_acq_prop_in_sel_8b_zero (3 << 12) +#define rkisp1_cif_isp_acq_prop_in_sel_8b_msb (4 << 12) + +/* vi_dpcl */ +#define rkisp1_cif_vi_dpcl_dma_jpeg (0 << 0) +#define rkisp1_cif_vi_dpcl_mp_mux_mrsz_mi bit(0) +#define rkisp1_cif_vi_dpcl_mp_mux_mrsz_jpeg (2 << 0) +#define rkisp1_cif_vi_dpcl_chan_mode_mp bit(2) +#define rkisp1_cif_vi_dpcl_chan_mode_sp (2 << 2) +#define rkisp1_cif_vi_dpcl_chan_mode_mpsp (3 << 2) +#define rkisp1_cif_vi_dpcl_dma_sw_spmux (0 << 4) +#define rkisp1_cif_vi_dpcl_dma_sw_si bit(4) +#define rkisp1_cif_vi_dpcl_dma_sw_ie (2 << 4) +#define rkisp1_cif_vi_dpcl_dma_sw_jpeg (3 << 4) +#define rkisp1_cif_vi_dpcl_dma_sw_isp (4 << 4) +#define rkisp1_cif_vi_dpcl_if_sel_parallel (0 << 8) +#define rkisp1_cif_vi_dpcl_if_sel_smia bit(8) +#define rkisp1_cif_vi_dpcl_if_sel_mipi (2 << 8) +#define rkisp1_cif_vi_dpcl_dma_ie_mux_dma bit(10) +#define rkisp1_cif_vi_dpcl_dma_sp_mux_dma bit(11) + +/* isp_imsc - isp_mis - isp_ris - isp_icr - isp_isr */ +#define rkisp1_cif_isp_off bit(0) +#define rkisp1_cif_isp_frame bit(1) +#define rkisp1_cif_isp_data_loss bit(2) +#define rkisp1_cif_isp_pic_size_error bit(3) +#define rkisp1_cif_isp_awb_done bit(4) +#define rkisp1_cif_isp_frame_in bit(5) +#define rkisp1_cif_isp_v_start bit(6) +#define rkisp1_cif_isp_h_start bit(7) +#define rkisp1_cif_isp_flash_on bit(8) +#define rkisp1_cif_isp_flash_off bit(9) +#define rkisp1_cif_isp_shutter_on bit(10) +#define rkisp1_cif_isp_shutter_off bit(11) +#define rkisp1_cif_isp_afm_sum_of bit(12) +#define rkisp1_cif_isp_afm_lum_of bit(13) +#define rkisp1_cif_isp_afm_fin bit(14) +#define rkisp1_cif_isp_hist_measure_rdy bit(15) +#define rkisp1_cif_isp_flash_cap bit(17) +#define rkisp1_cif_isp_exp_end bit(18) +#define rkisp1_cif_isp_vsm_end bit(19) + +/* isp_err */ +#define rkisp1_cif_isp_err_inform_size bit(0) +#define rkisp1_cif_isp_err_is_size bit(1) +#define rkisp1_cif_isp_err_outform_size bit(2) + +/* mi_ctrl */ +#define rkisp1_cif_mi_ctrl_mp_enable bit(0) +#define rkisp1_cif_mi_ctrl_sp_enable (2 << 0) +#define rkisp1_cif_mi_ctrl_jpeg_enable (4 << 0) +#define rkisp1_cif_mi_ctrl_raw_enable (8 << 0) +#define rkisp1_cif_mi_ctrl_hflip bit(4) +#define rkisp1_cif_mi_ctrl_vflip bit(5) +#define rkisp1_cif_mi_ctrl_rot bit(6) +#define rkisp1_cif_mi_byte_swap bit(7) +#define rkisp1_cif_mi_sp_y_full_yuv2rgb bit(8) +#define rkisp1_cif_mi_sp_cbcr_full_yuv2rgb bit(9) +#define rkisp1_cif_mi_sp_422noncositeed bit(10) +#define rkisp1_cif_mi_mp_pingpong_enabel bit(11) +#define rkisp1_cif_mi_sp_pingpong_enabel bit(12) +#define rkisp1_cif_mi_mp_autoupdate_enable bit(13) +#define rkisp1_cif_mi_sp_autoupdate_enable bit(14) +#define rkisp1_cif_mi_last_pixel_sig_enable bit(15) +#define rkisp1_cif_mi_ctrl_burst_len_lum_16 (0 << 16) +#define rkisp1_cif_mi_ctrl_burst_len_lum_32 bit(16) +#define rkisp1_cif_mi_ctrl_burst_len_lum_64 (2 << 16) +#define rkisp1_cif_mi_ctrl_burst_len_chrom_16 (0 << 18) +#define rkisp1_cif_mi_ctrl_burst_len_chrom_32 bit(18) +#define rkisp1_cif_mi_ctrl_burst_len_chrom_64 (2 << 18) +#define rkisp1_cif_mi_ctrl_init_base_en bit(20) +#define rkisp1_cif_mi_ctrl_init_offset_en bit(21) +#define rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8 (0 << 22) +#define rkisp1_mi_ctrl_mp_write_yuv_spla bit(22) +#define rkisp1_mi_ctrl_mp_write_yuvint (2 << 22) +#define rkisp1_mi_ctrl_mp_write_raw12 (2 << 22) +#define rkisp1_mi_ctrl_sp_write_pla (0 << 24) +#define rkisp1_mi_ctrl_sp_write_spla bit(24) +#define rkisp1_mi_ctrl_sp_write_int (2 << 24) +#define rkisp1_mi_ctrl_sp_input_yuv400 (0 << 26) +#define rkisp1_mi_ctrl_sp_input_yuv420 bit(26) +#define rkisp1_mi_ctrl_sp_input_yuv422 (2 << 26) +#define rkisp1_mi_ctrl_sp_input_yuv444 (3 << 26) +#define rkisp1_mi_ctrl_sp_output_yuv400 (0 << 28) +#define rkisp1_mi_ctrl_sp_output_yuv420 bit(28) +#define rkisp1_mi_ctrl_sp_output_yuv422 (2 << 28) +#define rkisp1_mi_ctrl_sp_output_yuv444 (3 << 28) +#define rkisp1_mi_ctrl_sp_output_rgb565 (4 << 28) +#define rkisp1_mi_ctrl_sp_output_rgb666 (5 << 28) +#define rkisp1_mi_ctrl_sp_output_rgb888 (6 << 28) + +#define rkisp1_mi_ctrl_mp_fmt_mask genmask(23, 22) +#define rkisp1_mi_ctrl_sp_fmt_mask genmask(30, 24) + +/* mi_init */ +#define rkisp1_cif_mi_init_skip bit(2) +#define rkisp1_cif_mi_init_soft_upd bit(4) + +/* mi_ctrl_shd */ +#define rkisp1_cif_mi_ctrl_shd_mp_in_enabled bit(0) +#define rkisp1_cif_mi_ctrl_shd_sp_in_enabled bit(1) +#define rkisp1_cif_mi_ctrl_shd_jpeg_in_enabled bit(2) +#define rkisp1_cif_mi_ctrl_shd_raw_in_enabled bit(3) +#define rkisp1_cif_mi_ctrl_shd_mp_out_enabled bit(16) +#define rkisp1_cif_mi_ctrl_shd_sp_out_enabled bit(17) +#define rkisp1_cif_mi_ctrl_shd_jpeg_out_enabled bit(18) +#define rkisp1_cif_mi_ctrl_shd_raw_out_enabled bit(19) + +/* rsz_ctrl */ +#define rkisp1_cif_rsz_ctrl_scale_hy_enable bit(0) +#define rkisp1_cif_rsz_ctrl_scale_hc_enable bit(1) +#define rkisp1_cif_rsz_ctrl_scale_vy_enable bit(2) +#define rkisp1_cif_rsz_ctrl_scale_vc_enable bit(3) +#define rkisp1_cif_rsz_ctrl_scale_hy_up bit(4) +#define rkisp1_cif_rsz_ctrl_scale_hc_up bit(5) +#define rkisp1_cif_rsz_ctrl_scale_vy_up bit(6) +#define rkisp1_cif_rsz_ctrl_scale_vc_up bit(7) +#define rkisp1_cif_rsz_ctrl_cfg_upd bit(8) +#define rkisp1_cif_rsz_ctrl_cfg_upd_auto bit(9) +#define rkisp1_cif_rsz_scaler_factor bit(16) + +/* mi_imsc - mi_mis - mi_ris - mi_icr - mi_isr */ +#define rkisp1_cif_mi_frame(stream) bit((stream)->id) +#define rkisp1_cif_mi_mblk_line bit(2) +#define rkisp1_cif_mi_fill_mp_y bit(3) +#define rkisp1_cif_mi_wrap_mp_y bit(4) +#define rkisp1_cif_mi_wrap_mp_cb bit(5) +#define rkisp1_cif_mi_wrap_mp_cr bit(6) +#define rkisp1_cif_mi_wrap_sp_y bit(7) +#define rkisp1_cif_mi_wrap_sp_cb bit(8) +#define rkisp1_cif_mi_wrap_sp_cr bit(9) +#define rkisp1_cif_mi_dma_ready bit(11) + +/* mi_status */ +#define rkisp1_cif_mi_status_mp_y_fifo_full bit(0) +#define rkisp1_cif_mi_status_sp_y_fifo_full bit(4) + +/* mi_dma_ctrl */ +#define rkisp1_cif_mi_dma_ctrl_burst_len_lum_16 (0 << 0) +#define rkisp1_cif_mi_dma_ctrl_burst_len_lum_32 bit(0) +#define rkisp1_cif_mi_dma_ctrl_burst_len_lum_64 (2 << 0) +#define rkisp1_cif_mi_dma_ctrl_burst_len_chrom_16 (0 << 2) +#define rkisp1_cif_mi_dma_ctrl_burst_len_chrom_32 bit(2) +#define rkisp1_cif_mi_dma_ctrl_burst_len_chrom_64 (2 << 2) +#define rkisp1_cif_mi_dma_ctrl_read_fmt_planar (0 << 4) +#define rkisp1_cif_mi_dma_ctrl_read_fmt_splanar bit(4) +#define rkisp1_cif_mi_dma_ctrl_fmt_yuv400 (0 << 6) +#define rkisp1_cif_mi_dma_ctrl_fmt_yuv420 bit(6) +#define rkisp1_cif_mi_dma_ctrl_read_fmt_packed (2 << 4) +#define rkisp1_cif_mi_dma_ctrl_fmt_yuv422 (2 << 6) +#define rkisp1_cif_mi_dma_ctrl_fmt_yuv444 (3 << 6) +#define rkisp1_cif_mi_dma_ctrl_byte_swap bit(8) +#define rkisp1_cif_mi_dma_ctrl_continuous_ena bit(9) +#define rkisp1_cif_mi_dma_ctrl_rgb_bayer_no (0 << 12) +#define rkisp1_cif_mi_dma_ctrl_rgb_bayer_8bit bit(12) +#define rkisp1_cif_mi_dma_ctrl_rgb_bayer_16bit (2 << 12) +/* mi_dma_start */ +#define rkisp1_cif_mi_dma_start_enable bit(0) +/* mi_xtd_format_ctrl */ +#define rkisp1_cif_mi_xtd_fmt_ctrl_mp_cb_cr_swap bit(0) +#define rkisp1_cif_mi_xtd_fmt_ctrl_sp_cb_cr_swap bit(1) +#define rkisp1_cif_mi_xtd_fmt_ctrl_dma_cb_cr_swap bit(2) + +/* ccl */ +#define rkisp1_cif_ccl_cif_clk_dis bit(2) +/* iccl */ +#define rkisp1_cif_iccl_isp_clk bit(0) +#define rkisp1_cif_iccl_cp_clk bit(1) +#define rkisp1_cif_iccl_res_2 bit(2) +#define rkisp1_cif_iccl_mrsz_clk bit(3) +#define rkisp1_cif_iccl_srsz_clk bit(4) +#define rkisp1_cif_iccl_jpeg_clk bit(5) +#define rkisp1_cif_iccl_mi_clk bit(6) +#define rkisp1_cif_iccl_res_7 bit(7) +#define rkisp1_cif_iccl_ie_clk bit(8) +#define rkisp1_cif_iccl_simp_clk bit(9) +#define rkisp1_cif_iccl_smia_clk bit(10) +#define rkisp1_cif_iccl_mipi_clk bit(11) +#define rkisp1_cif_iccl_dcrop_clk bit(12) +/* ircl */ +#define rkisp1_cif_ircl_isp_sw_rst bit(0) +#define rkisp1_cif_ircl_cp_sw_rst bit(1) +#define rkisp1_cif_ircl_ycs_sw_rst bit(2) +#define rkisp1_cif_ircl_mrsz_sw_rst bit(3) +#define rkisp1_cif_ircl_srsz_sw_rst bit(4) +#define rkisp1_cif_ircl_jpeg_sw_rst bit(5) +#define rkisp1_cif_ircl_mi_sw_rst bit(6) +#define rkisp1_cif_ircl_cif_sw_rst bit(7) +#define rkisp1_cif_ircl_ie_sw_rst bit(8) +#define rkisp1_cif_ircl_si_sw_rst bit(9) +#define rkisp1_cif_ircl_mipi_sw_rst bit(11) + +/* c_proc_ctr */ +#define rkisp1_cif_c_proc_ctr_enable bit(0) +#define rkisp1_cif_c_proc_yout_full bit(1) +#define rkisp1_cif_c_proc_yin_full bit(2) +#define rkisp1_cif_c_proc_cout_full bit(3) +#define rkisp1_cif_c_proc_ctrl_reserved 0xfffffffe +#define rkisp1_cif_c_proc_contrast_reserved 0xffffff00 +#define rkisp1_cif_c_proc_brightness_reserved 0xffffff00 +#define rkisp1_cif_c_proc_hue_reserved 0xffffff00 +#define rkisp1_cif_c_proc_saturation_reserved 0xffffff00 +#define rkisp1_cif_c_proc_macc_reserved 0xe000e000 +#define rkisp1_cif_c_proc_tone_reserved 0xf000 +/* dual_crop_ctrl */ +#define rkisp1_cif_dual_crop_mp_mode_bypass (0 << 0) +#define rkisp1_cif_dual_crop_mp_mode_yuv bit(0) +#define rkisp1_cif_dual_crop_mp_mode_raw (2 << 0) +#define rkisp1_cif_dual_crop_sp_mode_bypass (0 << 2) +#define rkisp1_cif_dual_crop_sp_mode_yuv bit(2) +#define rkisp1_cif_dual_crop_sp_mode_raw (2 << 2) +#define rkisp1_cif_dual_crop_cfg_upd_permanent bit(4) +#define rkisp1_cif_dual_crop_cfg_upd bit(5) +#define rkisp1_cif_dual_crop_gen_cfg_upd bit(6) + +/* img_eff_ctrl */ +#define rkisp1_cif_img_eff_ctrl_enable bit(0) +#define rkisp1_cif_img_eff_ctrl_mode_blackwhite (0 << 1) +#define rkisp1_cif_img_eff_ctrl_mode_negative bit(1) +#define rkisp1_cif_img_eff_ctrl_mode_sepia (2 << 1) +#define rkisp1_cif_img_eff_ctrl_mode_color_sel (3 << 1) +#define rkisp1_cif_img_eff_ctrl_mode_emboss (4 << 1) +#define rkisp1_cif_img_eff_ctrl_mode_sketch (5 << 1) +#define rkisp1_cif_img_eff_ctrl_mode_sharpen (6 << 1) +#define rkisp1_cif_img_eff_ctrl_cfg_upd bit(4) +#define rkisp1_cif_img_eff_ctrl_ycbcr_full bit(5) + +#define rkisp1_cif_img_eff_ctrl_mode_blackwhite_shift 0 +#define rkisp1_cif_img_eff_ctrl_mode_negative_shift 1 +#define rkisp1_cif_img_eff_ctrl_mode_sepia_shift 2 +#define rkisp1_cif_img_eff_ctrl_mode_color_sel_shift 3 +#define rkisp1_cif_img_eff_ctrl_mode_emboss_shift 4 +#define rkisp1_cif_img_eff_ctrl_mode_sketch_shift 5 +#define rkisp1_cif_img_eff_ctrl_mode_sharpen_shift 6 +#define rkisp1_cif_img_eff_ctrl_mode_mask 0xe + +/* img_eff_color_sel */ +#define rkisp1_cif_img_eff_color_rgb 0 +#define rkisp1_cif_img_eff_color_b bit(0) +#define rkisp1_cif_img_eff_color_g (2 << 0) +#define rkisp1_cif_img_eff_color_gb (3 << 0) +#define rkisp1_cif_img_eff_color_r (4 << 0) +#define rkisp1_cif_img_eff_color_rb (5 << 0) +#define rkisp1_cif_img_eff_color_rg (6 << 0) +#define rkisp1_cif_img_eff_color_rgb2 (7 << 0) + +/* mipi_ctrl */ +#define rkisp1_cif_mipi_ctrl_output_ena bit(0) +#define rkisp1_cif_mipi_ctrl_shutdownlanes(a) (((a) & 0xf) << 8) +#define rkisp1_cif_mipi_ctrl_num_lanes(a) (((a) & 0x3) << 12) +#define rkisp1_cif_mipi_ctrl_err_sot_hs_skip bit(16) +#define rkisp1_cif_mipi_ctrl_err_sot_sync_hs_skip bit(17) +#define rkisp1_cif_mipi_ctrl_clocklane_ena bit(18) + +/* mipi_data_sel */ +#define rkisp1_cif_mipi_data_sel_vc(a) (((a) & 0x3) << 6) +#define rkisp1_cif_mipi_data_sel_dt(a) (((a) & 0x3f) << 0) +/* mipi data_type */ +#define rkisp1_cif_csi2_dt_yuv420_8b 0x18 +#define rkisp1_cif_csi2_dt_yuv420_10b 0x19 +#define rkisp1_cif_csi2_dt_yuv422_8b 0x1e +#define rkisp1_cif_csi2_dt_yuv422_10b 0x1f +#define rkisp1_cif_csi2_dt_rgb565 0x22 +#define rkisp1_cif_csi2_dt_rgb666 0x23 +#define rkisp1_cif_csi2_dt_rgb888 0x24 +#define rkisp1_cif_csi2_dt_raw8 0x2a +#define rkisp1_cif_csi2_dt_raw10 0x2b +#define rkisp1_cif_csi2_dt_raw12 0x2c + +/* mipi_imsc, mipi_ris, mipi_mis, mipi_icr, mipi_isr */ +#define rkisp1_cif_mipi_sync_fifo_ovflw(a) (((a) & 0xf) << 0) +#define rkisp1_cif_mipi_err_sot(a) (((a) & 0xf) << 4) +#define rkisp1_cif_mipi_err_sot_sync(a) (((a) & 0xf) << 8) +#define rkisp1_cif_mipi_err_eot_sync(a) (((a) & 0xf) << 12) +#define rkisp1_cif_mipi_err_ctrl(a) (((a) & 0xf) << 16) +#define rkisp1_cif_mipi_err_protocol bit(20) +#define rkisp1_cif_mipi_err_ecc1 bit(21) +#define rkisp1_cif_mipi_err_ecc2 bit(22) +#define rkisp1_cif_mipi_err_cs bit(23) +#define rkisp1_cif_mipi_frame_end bit(24) +#define rkisp1_cif_mipi_add_data_ovflw bit(25) +#define rkisp1_cif_mipi_add_data_water_mark bit(26) + +#define rkisp1_cif_mipi_err_csi (rkisp1_cif_mipi_err_protocol | \ + rkisp1_cif_mipi_err_ecc1 | \ + rkisp1_cif_mipi_err_ecc2 | \ + rkisp1_cif_mipi_err_cs) + +#define rkisp1_cif_mipi_err_dphy (rkisp1_cif_mipi_err_sot(3) | \ + rkisp1_cif_mipi_err_sot_sync(3) | \ + rkisp1_cif_mipi_err_eot_sync(3) | \ + rkisp1_cif_mipi_err_ctrl(3)) + +/* super_impose */ +#define rkisp1_cif_super_imp_ctrl_normal_mode bit(0) +#define rkisp1_cif_super_imp_ctrl_ref_img_mem bit(1) +#define rkisp1_cif_super_imp_ctrl_transp_dis bit(2) + +/* isp histogram calculation : isp_hist_prop */ +#define rkisp1_cif_isp_hist_prop_mode_dis (0 << 0) +#define rkisp1_cif_isp_hist_prop_mode_rgb bit(0) +#define rkisp1_cif_isp_hist_prop_mode_red (2 << 0) +#define rkisp1_cif_isp_hist_prop_mode_green (3 << 0) +#define rkisp1_cif_isp_hist_prop_mode_blue (4 << 0) +#define rkisp1_cif_isp_hist_prop_mode_lum (5 << 0) +#define rkisp1_cif_isp_hist_prop_mode_mask 0x7 +#define rkisp1_cif_isp_hist_prediv_set(x) (((x) & 0x7f) << 3) +#define rkisp1_cif_isp_hist_weight_set(v0, v1, v2, v3) \ + (((v0) & 0x1f) | (((v1) & 0x1f) << 8) |\ + (((v2) & 0x1f) << 16) | \ + (((v3) & 0x1f) << 24)) + +#define rkisp1_cif_isp_hist_window_offset_reserved 0xfffff000 +#define rkisp1_cif_isp_hist_window_size_reserved 0xfffff800 +#define rkisp1_cif_isp_hist_weight_reserved 0xe0e0e0e0 +#define rkisp1_cif_isp_max_hist_predivider 0x0000007f +#define rkisp1_cif_isp_hist_row_num 5 +#define rkisp1_cif_isp_hist_column_num 5 + +/* auto focus measurement: isp_afm_ctrl */ +#define rkisp1_isp_afm_ctrl_enable bit(0) + +/* shutter control */ +#define rkisp1_cif_isp_sh_ctrl_sh_ena bit(0) +#define rkisp1_cif_isp_sh_ctrl_rep_en bit(1) +#define rkisp1_cif_isp_sh_ctrl_src_sh_trig bit(2) +#define rkisp1_cif_isp_sh_ctrl_edge_pos bit(3) +#define rkisp1_cif_isp_sh_ctrl_pol_low bit(4) + +/* flash module */ +/* isp_flash_cmd */ +#define rkisp1_cifflash_cmd_prelight_on bit(0) +#define rkisp1_cifflash_cmd_flash_on bit(1) +#define rkisp1_cifflash_cmd_pre_flash_on bit(2) +/* isp_flash_config */ +#define rkisp1_cifflash_config_prelight_end bit(0) +#define rkisp1_cifflash_config_vsync_pos bit(1) +#define rkisp1_cifflash_config_prelight_low bit(2) +#define rkisp1_cifflash_config_src_fl_trig bit(3) +#define rkisp1_cifflash_config_delay(a) (((a) & 0xf) << 4) + +/* demosaic: isp_demosaic */ +#define rkisp1_cif_isp_demosaic_bypass bit(10) +#define rkisp1_cif_isp_demosaic_th(x) ((x) & 0xff) + +/* awb */ +/* isp_awb_prop */ +#define rkisp1_cif_isp_awb_ymax_cmp_en bit(2) +#define rkisp1_cif_isp_awb_ymax_read(x) (((x) >> 2) & 1) +#define rkisp1_cif_isp_awb_mode_rgb_en ((1 << 31) | (0x2 << 0)) +#define rkisp1_cif_isp_awb_mode_ycbcr_en ((0 << 31) | (0x2 << 0)) +#define rkisp1_cif_isp_awb_mode_ycbcr_en ((0 << 31) | (0x2 << 0)) +#define rkisp1_cif_isp_awb_mode_mask_none 0xfffffffc +#define rkisp1_cif_isp_awb_mode_read(x) ((x) & 3) +/* isp_awb_gain_rb, isp_awb_gain_g */ +#define rkisp1_cif_isp_awb_gain_r_set(x) (((x) & 0x3ff) << 16) +#define rkisp1_cif_isp_awb_gain_r_read(x) (((x) >> 16) & 0x3ff) +#define rkisp1_cif_isp_awb_gain_b_set(x) ((x) & 0x3fff) +#define rkisp1_cif_isp_awb_gain_b_read(x) ((x) & 0x3fff) +/* isp_awb_ref */ +#define rkisp1_cif_isp_awb_ref_cr_set(x) (((x) & 0xff) << 8) +#define rkisp1_cif_isp_awb_ref_cr_read(x) (((x) >> 8) & 0xff) +#define rkisp1_cif_isp_awb_ref_cb_read(x) ((x) & 0xff) +/* isp_awb_thresh */ +#define rkisp1_cif_isp_awb_max_cs_set(x) (((x) & 0xff) << 8) +#define rkisp1_cif_isp_awb_max_cs_read(x) (((x) >> 8) & 0xff) +#define rkisp1_cif_isp_awb_min_c_read(x) ((x) & 0xff) +#define rkisp1_cif_isp_awb_min_y_set(x) (((x) & 0xff) << 16) +#define rkisp1_cif_isp_awb_min_y_read(x) (((x) >> 16) & 0xff) +#define rkisp1_cif_isp_awb_max_y_set(x) (((x) & 0xff) << 24) +#define rkisp1_cif_isp_awb_max_y_read(x) (((x) >> 24) & 0xff) +/* isp_awb_mean */ +#define rkisp1_cif_isp_awb_get_mean_cr_r(x) ((x) & 0xff) +#define rkisp1_cif_isp_awb_get_mean_cb_b(x) (((x) >> 8) & 0xff) +#define rkisp1_cif_isp_awb_get_mean_y_g(x) (((x) >> 16) & 0xff) +/* isp_awb_white_cnt */ +#define rkisp1_cif_isp_awb_get_pixel_cnt(x) ((x) & 0x3ffffff) + +#define rkisp1_cif_isp_awb_gains_max_val 0x000003ff +#define rkisp1_cif_isp_awb_window_offset_max 0x00000fff +#define rkisp1_cif_isp_awb_window_max_size 0x00001fff +#define rkisp1_cif_isp_awb_cbcr_max_ref 0x000000ff +#define rkisp1_cif_isp_awb_thres_max_yc 0x000000ff + +/* ae */ +/* isp_exp_ctrl */ +#define rkisp1_cif_isp_exp_ena bit(0) +#define rkisp1_cif_isp_exp_ctrl_autostop bit(1) +/* + *'1' luminance calculation according to y=(r+g+b) x 0.332 (85/256) + *'0' luminance calculation according to y=16+0.25r+0.5g+0.1094b + */ +#define rkisp1_cif_isp_exp_ctrl_measmode_1 bit(31) + +/* isp_exp_h_size */ +#define rkisp1_cif_isp_exp_h_size_set(x) ((x) & 0x7ff) +#define rkisp1_cif_isp_exp_height_mask 0x000007ff +/* isp_exp_v_size : vertical size must be a multiple of 2). */ +#define rkisp1_cif_isp_exp_v_size_set(x) ((x) & 0x7fe) + +/* isp_exp_h_offset */ +#define rkisp1_cif_isp_exp_h_offset_set(x) ((x) & 0x1fff) +#define rkisp1_cif_isp_exp_max_hoffs 2424 +/* isp_exp_v_offset */ +#define rkisp1_cif_isp_exp_v_offset_set(x) ((x) & 0x1fff) +#define rkisp1_cif_isp_exp_max_voffs 1806 + +#define rkisp1_cif_isp_exp_row_num 5 +#define rkisp1_cif_isp_exp_column_num 5 +#define rkisp1_cif_isp_exp_num_luma_regs \ + (rkisp1_cif_isp_exp_row_num * rkisp1_cif_isp_exp_column_num) +#define rkisp1_cif_isp_exp_block_max_hsize 516 +#define rkisp1_cif_isp_exp_block_min_hsize 35 +#define rkisp1_cif_isp_exp_block_max_vsize 390 +#define rkisp1_cif_isp_exp_block_min_vsize 28 +#define rkisp1_cif_isp_exp_max_hsize \ + (rkisp1_cif_isp_exp_block_max_hsize * rkisp1_cif_isp_exp_column_num + 1) +#define rkisp1_cif_isp_exp_min_hsize \ + (rkisp1_cif_isp_exp_block_min_hsize * rkisp1_cif_isp_exp_column_num + 1) +#define rkisp1_cif_isp_exp_max_vsize \ + (rkisp1_cif_isp_exp_block_max_vsize * rkisp1_cif_isp_exp_row_num + 1) +#define rkisp1_cif_isp_exp_min_vsize \ + (rkisp1_cif_isp_exp_block_min_vsize * rkisp1_cif_isp_exp_row_num + 1) + +/* lsc: isp_lsc_ctrl */ +#define rkisp1_cif_isp_lsc_ctrl_ena bit(0) +#define rkisp1_cif_isp_lsc_sect_size_reserved 0xfc00fc00 +#define rkisp1_cif_isp_lsc_grad_reserved 0xf000f000 +#define rkisp1_cif_isp_lsc_sample_reserved 0xf000f000 +#define rkisp1_cif_isp_lsc_sectors_max 17 +#define rkisp1_cif_isp_lsc_table_data(v0, v1) \ + (((v0) & 0xfff) | (((v1) & 0xfff) << 12)) +#define rkisp1_cif_isp_lsc_sect_size(v0, v1) \ + (((v0) & 0xfff) | (((v1) & 0xfff) << 16)) +#define rkisp1_cif_isp_lsc_grad_size(v0, v1) \ + (((v0) & 0xfff) | (((v1) & 0xfff) << 16)) + +/* lsc: isp_lsc_table_sel */ +#define rkisp1_cif_isp_lsc_table_0 0 +#define rkisp1_cif_isp_lsc_table_1 1 + +/* lsc: isp_lsc_status */ +#define rkisp1_cif_isp_lsc_active_table bit(1) +#define rkisp1_cif_isp_lsc_table_address_0 0 +#define rkisp1_cif_isp_lsc_table_address_153 153 + +/* flt */ +/* isp_filt_mode */ +#define rkisp1_cif_isp_flt_ena bit(0) + +/* + * 0: green filter static mode (active filter factor = filt_fac_mid) + * 1: dynamic noise reduction/sharpen default + */ +#define rkisp1_cif_isp_flt_mode_dnr bit(1) +#define rkisp1_cif_isp_flt_mode_max 1 +#define rkisp1_cif_isp_flt_chroma_v_mode(x) (((x) & 0x3) << 4) +#define rkisp1_cif_isp_flt_chroma_h_mode(x) (((x) & 0x3) << 6) +#define rkisp1_cif_isp_flt_chroma_mode_max 3 +#define rkisp1_cif_isp_flt_green_stage1(x) (((x) & 0xf) << 8) +#define rkisp1_cif_isp_flt_green_stage1_max 8 +#define rkisp1_cif_isp_flt_thread_reserved 0xfffffc00 +#define rkisp1_cif_isp_flt_fac_reserved 0xffffffc0 +#define rkisp1_cif_isp_flt_lum_weight_reserved 0xfff80000 + +#define rkisp1_cif_isp_ctk_coeff_reserved 0xfffff800 +#define rkisp1_cif_isp_xtalk_offset_reserved 0xfffff000 + +/* goc */ +#define rkisp1_cif_isp_gamma_out_mode_equ bit(0) +#define rkisp1_cif_isp_goc_mode_max 1 +#define rkisp1_cif_isp_goc_reserved 0xfffff800 +/* isp_ctrl bit 11*/ +#define rkisp1_cif_isp_ctrl_isp_gamma_out_ena_read(x) (((x) >> 11) & 1) + +/* dpcc */ +/* isp_dpcc_mode */ +#define rkisp1_cif_isp_dpcc_ena bit(0) +#define rkisp1_cif_isp_dpcc_mode_max 0x07 +#define rkisp1_cif_isp_dpcc_outputmode_max 0x0f +#define rkisp1_cif_isp_dpcc_setuse_max 0x0f +#define rkisp1_cif_isp_dpcc_methods_set_reserved 0xffffe000 +#define rkisp1_cif_isp_dpcc_line_thresh_reserved 0xffff0000 +#define rkisp1_cif_isp_dpcc_line_mad_fac_reserved 0xffffc0c0 +#define rkisp1_cif_isp_dpcc_pg_fac_reserved 0xffffc0c0 +#define rkisp1_cif_isp_dpcc_rnd_thresh_reserved 0xffff0000 +#define rkisp1_cif_isp_dpcc_rg_fac_reserved 0xffffc0c0 +#define rkisp1_cif_isp_dpcc_ro_limit_reserved 0xfffff000 +#define rkisp1_cif_isp_dpcc_rnd_offs_reserved 0xfffff000 + +/* bls */ +/* isp_bls_ctrl */ +#define rkisp1_cif_isp_bls_ena bit(0) +#define rkisp1_cif_isp_bls_mode_measured bit(1) +#define rkisp1_cif_isp_bls_mode_fixed 0 +#define rkisp1_cif_isp_bls_window_1 bit(2) +#define rkisp1_cif_isp_bls_window_2 (2 << 2) + +/* gamma-in */ +#define rkisp1_cifisp_degamma_x_reserved \ + ((1 << 31) | (1 << 27) | (1 << 23) | (1 << 19) |\ + (1 << 15) | (1 << 11) | (1 << 7) | (1 << 3)) +#define rkisp1_cifisp_degamma_y_reserved 0xfffff000 + +/* afm */ +#define rkisp1_cif_isp_afm_ena bit(0) +#define rkisp1_cif_isp_afm_thres_reserved 0xffff0000 +#define rkisp1_cif_isp_afm_var_shift_reserved 0xfff8fff8 +#define rkisp1_cif_isp_afm_window_x_reserved 0xe000 +#define rkisp1_cif_isp_afm_window_y_reserved 0xf000 +#define rkisp1_cif_isp_afm_window_x_min 0x5 +#define rkisp1_cif_isp_afm_window_y_min 0x2 +#define rkisp1_cif_isp_afm_window_x(x) (((x) & 0x1fff) << 16) +#define rkisp1_cif_isp_afm_window_y(x) ((x) & 0x1fff) + +/* dpf */ +#define rkisp1_cif_isp_dpf_mode_en bit(0) +#define rkisp1_cif_isp_dpf_mode_b_flt_dis bit(1) +#define rkisp1_cif_isp_dpf_mode_gb_flt_dis bit(2) +#define rkisp1_cif_isp_dpf_mode_gr_flt_dis bit(3) +#define rkisp1_cif_isp_dpf_mode_r_flt_dis bit(4) +#define rkisp1_cif_isp_dpf_mode_rb_fltsize_9x9 bit(5) +#define rkisp1_cif_isp_dpf_mode_nll_segmentation bit(6) +#define rkisp1_cif_isp_dpf_mode_awb_gain_comp bit(7) +#define rkisp1_cif_isp_dpf_mode_lsc_gain_comp bit(8) +#define rkisp1_cif_isp_dpf_mode_use_nf_gain bit(9) +#define rkisp1_cif_isp_dpf_nf_gain_reserved 0xfffff000 +#define rkisp1_cif_isp_dpf_spatial_coeff_max 0x1f +#define rkisp1_cif_isp_dpf_nll_coeff_n_max 0x3ff + +/* =================================================================== */ +/* cif registers */ +/* =================================================================== */ +#define rkisp1_cif_ctrl_base 0x00000000 +#define rkisp1_cif_ccl (rkisp1_cif_ctrl_base + 0x00000000) +#define rkisp1_cif_vi_id (rkisp1_cif_ctrl_base + 0x00000008) +#define rkisp1_cif_iccl (rkisp1_cif_ctrl_base + 0x00000010) +#define rkisp1_cif_ircl (rkisp1_cif_ctrl_base + 0x00000014) +#define rkisp1_cif_vi_dpcl (rkisp1_cif_ctrl_base + 0x00000018) + +#define rkisp1_cif_img_eff_base 0x00000200 +#define rkisp1_cif_img_eff_ctrl (rkisp1_cif_img_eff_base + 0x00000000) +#define rkisp1_cif_img_eff_color_sel (rkisp1_cif_img_eff_base + 0x00000004) +#define rkisp1_cif_img_eff_mat_1 (rkisp1_cif_img_eff_base + 0x00000008) +#define rkisp1_cif_img_eff_mat_2 (rkisp1_cif_img_eff_base + 0x0000000c) +#define rkisp1_cif_img_eff_mat_3 (rkisp1_cif_img_eff_base + 0x00000010) +#define rkisp1_cif_img_eff_mat_4 (rkisp1_cif_img_eff_base + 0x00000014) +#define rkisp1_cif_img_eff_mat_5 (rkisp1_cif_img_eff_base + 0x00000018) +#define rkisp1_cif_img_eff_tint (rkisp1_cif_img_eff_base + 0x0000001c) +#define rkisp1_cif_img_eff_ctrl_shd (rkisp1_cif_img_eff_base + 0x00000020) +#define rkisp1_cif_img_eff_sharpen (rkisp1_cif_img_eff_base + 0x00000024) + +#define rkisp1_cif_super_imp_base 0x00000300 +#define rkisp1_cif_super_imp_ctrl (rkisp1_cif_super_imp_base + 0x00000000) +#define rkisp1_cif_super_imp_offset_x (rkisp1_cif_super_imp_base + 0x00000004) +#define rkisp1_cif_super_imp_offset_y (rkisp1_cif_super_imp_base + 0x00000008) +#define rkisp1_cif_super_imp_color_y (rkisp1_cif_super_imp_base + 0x0000000c) +#define rkisp1_cif_super_imp_color_cb (rkisp1_cif_super_imp_base + 0x00000010) +#define rkisp1_cif_super_imp_color_cr (rkisp1_cif_super_imp_base + 0x00000014) + +#define rkisp1_cif_isp_base 0x00000400 +#define rkisp1_cif_isp_ctrl (rkisp1_cif_isp_base + 0x00000000) +#define rkisp1_cif_isp_acq_prop (rkisp1_cif_isp_base + 0x00000004) +#define rkisp1_cif_isp_acq_h_offs (rkisp1_cif_isp_base + 0x00000008) +#define rkisp1_cif_isp_acq_v_offs (rkisp1_cif_isp_base + 0x0000000c) +#define rkisp1_cif_isp_acq_h_size (rkisp1_cif_isp_base + 0x00000010) +#define rkisp1_cif_isp_acq_v_size (rkisp1_cif_isp_base + 0x00000014) +#define rkisp1_cif_isp_acq_nr_frames (rkisp1_cif_isp_base + 0x00000018) +#define rkisp1_cif_isp_gamma_dx_lo (rkisp1_cif_isp_base + 0x0000001c) +#define rkisp1_cif_isp_gamma_dx_hi (rkisp1_cif_isp_base + 0x00000020) +#define rkisp1_cif_isp_gamma_r_y0 (rkisp1_cif_isp_base + 0x00000024) +#define rkisp1_cif_isp_gamma_r_y1 (rkisp1_cif_isp_base + 0x00000028) +#define rkisp1_cif_isp_gamma_r_y2 (rkisp1_cif_isp_base + 0x0000002c) +#define rkisp1_cif_isp_gamma_r_y3 (rkisp1_cif_isp_base + 0x00000030) +#define rkisp1_cif_isp_gamma_r_y4 (rkisp1_cif_isp_base + 0x00000034) +#define rkisp1_cif_isp_gamma_r_y5 (rkisp1_cif_isp_base + 0x00000038) +#define rkisp1_cif_isp_gamma_r_y6 (rkisp1_cif_isp_base + 0x0000003c) +#define rkisp1_cif_isp_gamma_r_y7 (rkisp1_cif_isp_base + 0x00000040) +#define rkisp1_cif_isp_gamma_r_y8 (rkisp1_cif_isp_base + 0x00000044) +#define rkisp1_cif_isp_gamma_r_y9 (rkisp1_cif_isp_base + 0x00000048) +#define rkisp1_cif_isp_gamma_r_y10 (rkisp1_cif_isp_base + 0x0000004c) +#define rkisp1_cif_isp_gamma_r_y11 (rkisp1_cif_isp_base + 0x00000050) +#define rkisp1_cif_isp_gamma_r_y12 (rkisp1_cif_isp_base + 0x00000054) +#define rkisp1_cif_isp_gamma_r_y13 (rkisp1_cif_isp_base + 0x00000058) +#define rkisp1_cif_isp_gamma_r_y14 (rkisp1_cif_isp_base + 0x0000005c) +#define rkisp1_cif_isp_gamma_r_y15 (rkisp1_cif_isp_base + 0x00000060) +#define rkisp1_cif_isp_gamma_r_y16 (rkisp1_cif_isp_base + 0x00000064) +#define rkisp1_cif_isp_gamma_g_y0 (rkisp1_cif_isp_base + 0x00000068) +#define rkisp1_cif_isp_gamma_g_y1 (rkisp1_cif_isp_base + 0x0000006c) +#define rkisp1_cif_isp_gamma_g_y2 (rkisp1_cif_isp_base + 0x00000070) +#define rkisp1_cif_isp_gamma_g_y3 (rkisp1_cif_isp_base + 0x00000074) +#define rkisp1_cif_isp_gamma_g_y4 (rkisp1_cif_isp_base + 0x00000078) +#define rkisp1_cif_isp_gamma_g_y5 (rkisp1_cif_isp_base + 0x0000007c) +#define rkisp1_cif_isp_gamma_g_y6 (rkisp1_cif_isp_base + 0x00000080) +#define rkisp1_cif_isp_gamma_g_y7 (rkisp1_cif_isp_base + 0x00000084) +#define rkisp1_cif_isp_gamma_g_y8 (rkisp1_cif_isp_base + 0x00000088) +#define rkisp1_cif_isp_gamma_g_y9 (rkisp1_cif_isp_base + 0x0000008c) +#define rkisp1_cif_isp_gamma_g_y10 (rkisp1_cif_isp_base + 0x00000090) +#define rkisp1_cif_isp_gamma_g_y11 (rkisp1_cif_isp_base + 0x00000094) +#define rkisp1_cif_isp_gamma_g_y12 (rkisp1_cif_isp_base + 0x00000098) +#define rkisp1_cif_isp_gamma_g_y13 (rkisp1_cif_isp_base + 0x0000009c) +#define rkisp1_cif_isp_gamma_g_y14 (rkisp1_cif_isp_base + 0x000000a0) +#define rkisp1_cif_isp_gamma_g_y15 (rkisp1_cif_isp_base + 0x000000a4) +#define rkisp1_cif_isp_gamma_g_y16 (rkisp1_cif_isp_base + 0x000000a8) +#define rkisp1_cif_isp_gamma_b_y0 (rkisp1_cif_isp_base + 0x000000ac) +#define rkisp1_cif_isp_gamma_b_y1 (rkisp1_cif_isp_base + 0x000000b0) +#define rkisp1_cif_isp_gamma_b_y2 (rkisp1_cif_isp_base + 0x000000b4) +#define rkisp1_cif_isp_gamma_b_y3 (rkisp1_cif_isp_base + 0x000000b8) +#define rkisp1_cif_isp_gamma_b_y4 (rkisp1_cif_isp_base + 0x000000bc) +#define rkisp1_cif_isp_gamma_b_y5 (rkisp1_cif_isp_base + 0x000000c0) +#define rkisp1_cif_isp_gamma_b_y6 (rkisp1_cif_isp_base + 0x000000c4) +#define rkisp1_cif_isp_gamma_b_y7 (rkisp1_cif_isp_base + 0x000000c8) +#define rkisp1_cif_isp_gamma_b_y8 (rkisp1_cif_isp_base + 0x000000cc) +#define rkisp1_cif_isp_gamma_b_y9 (rkisp1_cif_isp_base + 0x000000d0) +#define rkisp1_cif_isp_gamma_b_y10 (rkisp1_cif_isp_base + 0x000000d4) +#define rkisp1_cif_isp_gamma_b_y11 (rkisp1_cif_isp_base + 0x000000d8) +#define rkisp1_cif_isp_gamma_b_y12 (rkisp1_cif_isp_base + 0x000000dc) +#define rkisp1_cif_isp_gamma_b_y13 (rkisp1_cif_isp_base + 0x000000e0) +#define rkisp1_cif_isp_gamma_b_y14 (rkisp1_cif_isp_base + 0x000000e4) +#define rkisp1_cif_isp_gamma_b_y15 (rkisp1_cif_isp_base + 0x000000e8) +#define rkisp1_cif_isp_gamma_b_y16 (rkisp1_cif_isp_base + 0x000000ec) +#define rkisp1_cif_isp_awb_prop (rkisp1_cif_isp_base + 0x00000110) +#define rkisp1_cif_isp_awb_wnd_h_offs (rkisp1_cif_isp_base + 0x00000114) +#define rkisp1_cif_isp_awb_wnd_v_offs (rkisp1_cif_isp_base + 0x00000118) +#define rkisp1_cif_isp_awb_wnd_h_size (rkisp1_cif_isp_base + 0x0000011c) +#define rkisp1_cif_isp_awb_wnd_v_size (rkisp1_cif_isp_base + 0x00000120) +#define rkisp1_cif_isp_awb_frames (rkisp1_cif_isp_base + 0x00000124) +#define rkisp1_cif_isp_awb_ref (rkisp1_cif_isp_base + 0x00000128) +#define rkisp1_cif_isp_awb_thresh (rkisp1_cif_isp_base + 0x0000012c) +#define rkisp1_cif_isp_awb_gain_g (rkisp1_cif_isp_base + 0x00000138) +#define rkisp1_cif_isp_awb_gain_rb (rkisp1_cif_isp_base + 0x0000013c) +#define rkisp1_cif_isp_awb_white_cnt (rkisp1_cif_isp_base + 0x00000140) +#define rkisp1_cif_isp_awb_mean (rkisp1_cif_isp_base + 0x00000144) +#define rkisp1_cif_isp_cc_coeff_0 (rkisp1_cif_isp_base + 0x00000170) +#define rkisp1_cif_isp_cc_coeff_1 (rkisp1_cif_isp_base + 0x00000174) +#define rkisp1_cif_isp_cc_coeff_2 (rkisp1_cif_isp_base + 0x00000178) +#define rkisp1_cif_isp_cc_coeff_3 (rkisp1_cif_isp_base + 0x0000017c) +#define rkisp1_cif_isp_cc_coeff_4 (rkisp1_cif_isp_base + 0x00000180) +#define rkisp1_cif_isp_cc_coeff_5 (rkisp1_cif_isp_base + 0x00000184) +#define rkisp1_cif_isp_cc_coeff_6 (rkisp1_cif_isp_base + 0x00000188) +#define rkisp1_cif_isp_cc_coeff_7 (rkisp1_cif_isp_base + 0x0000018c) +#define rkisp1_cif_isp_cc_coeff_8 (rkisp1_cif_isp_base + 0x00000190) +#define rkisp1_cif_isp_out_h_offs (rkisp1_cif_isp_base + 0x00000194) +#define rkisp1_cif_isp_out_v_offs (rkisp1_cif_isp_base + 0x00000198) +#define rkisp1_cif_isp_out_h_size (rkisp1_cif_isp_base + 0x0000019c) +#define rkisp1_cif_isp_out_v_size (rkisp1_cif_isp_base + 0x000001a0) +#define rkisp1_cif_isp_demosaic (rkisp1_cif_isp_base + 0x000001a4) +#define rkisp1_cif_isp_flags_shd (rkisp1_cif_isp_base + 0x000001a8) +#define rkisp1_cif_isp_out_h_offs_shd (rkisp1_cif_isp_base + 0x000001ac) +#define rkisp1_cif_isp_out_v_offs_shd (rkisp1_cif_isp_base + 0x000001b0) +#define rkisp1_cif_isp_out_h_size_shd (rkisp1_cif_isp_base + 0x000001b4) +#define rkisp1_cif_isp_out_v_size_shd (rkisp1_cif_isp_base + 0x000001b8) +#define rkisp1_cif_isp_imsc (rkisp1_cif_isp_base + 0x000001bc) +#define rkisp1_cif_isp_ris (rkisp1_cif_isp_base + 0x000001c0) +#define rkisp1_cif_isp_mis (rkisp1_cif_isp_base + 0x000001c4) +#define rkisp1_cif_isp_icr (rkisp1_cif_isp_base + 0x000001c8) +#define rkisp1_cif_isp_isr (rkisp1_cif_isp_base + 0x000001cc) +#define rkisp1_cif_isp_ct_coeff_0 (rkisp1_cif_isp_base + 0x000001d0) +#define rkisp1_cif_isp_ct_coeff_1 (rkisp1_cif_isp_base + 0x000001d4) +#define rkisp1_cif_isp_ct_coeff_2 (rkisp1_cif_isp_base + 0x000001d8) +#define rkisp1_cif_isp_ct_coeff_3 (rkisp1_cif_isp_base + 0x000001dc) +#define rkisp1_cif_isp_ct_coeff_4 (rkisp1_cif_isp_base + 0x000001e0) +#define rkisp1_cif_isp_ct_coeff_5 (rkisp1_cif_isp_base + 0x000001e4) +#define rkisp1_cif_isp_ct_coeff_6 (rkisp1_cif_isp_base + 0x000001e8) +#define rkisp1_cif_isp_ct_coeff_7 (rkisp1_cif_isp_base + 0x000001ec) +#define rkisp1_cif_isp_ct_coeff_8 (rkisp1_cif_isp_base + 0x000001f0) +#define rkisp1_cif_isp_gamma_out_mode (rkisp1_cif_isp_base + 0x000001f4) +#define rkisp1_cif_isp_gamma_out_y_0 (rkisp1_cif_isp_base + 0x000001f8) +#define rkisp1_cif_isp_gamma_out_y_1 (rkisp1_cif_isp_base + 0x000001fc) +#define rkisp1_cif_isp_gamma_out_y_2 (rkisp1_cif_isp_base + 0x00000200) +#define rkisp1_cif_isp_gamma_out_y_3 (rkisp1_cif_isp_base + 0x00000204) +#define rkisp1_cif_isp_gamma_out_y_4 (rkisp1_cif_isp_base + 0x00000208) +#define rkisp1_cif_isp_gamma_out_y_5 (rkisp1_cif_isp_base + 0x0000020c) +#define rkisp1_cif_isp_gamma_out_y_6 (rkisp1_cif_isp_base + 0x00000210) +#define rkisp1_cif_isp_gamma_out_y_7 (rkisp1_cif_isp_base + 0x00000214) +#define rkisp1_cif_isp_gamma_out_y_8 (rkisp1_cif_isp_base + 0x00000218) +#define rkisp1_cif_isp_gamma_out_y_9 (rkisp1_cif_isp_base + 0x0000021c) +#define rkisp1_cif_isp_gamma_out_y_10 (rkisp1_cif_isp_base + 0x00000220) +#define rkisp1_cif_isp_gamma_out_y_11 (rkisp1_cif_isp_base + 0x00000224) +#define rkisp1_cif_isp_gamma_out_y_12 (rkisp1_cif_isp_base + 0x00000228) +#define rkisp1_cif_isp_gamma_out_y_13 (rkisp1_cif_isp_base + 0x0000022c) +#define rkisp1_cif_isp_gamma_out_y_14 (rkisp1_cif_isp_base + 0x00000230) +#define rkisp1_cif_isp_gamma_out_y_15 (rkisp1_cif_isp_base + 0x00000234) +#define rkisp1_cif_isp_gamma_out_y_16 (rkisp1_cif_isp_base + 0x00000238) +#define rkisp1_cif_isp_err (rkisp1_cif_isp_base + 0x0000023c) +#define rkisp1_cif_isp_err_clr (rkisp1_cif_isp_base + 0x00000240) +#define rkisp1_cif_isp_frame_count (rkisp1_cif_isp_base + 0x00000244) +#define rkisp1_cif_isp_ct_offset_r (rkisp1_cif_isp_base + 0x00000248) +#define rkisp1_cif_isp_ct_offset_g (rkisp1_cif_isp_base + 0x0000024c) +#define rkisp1_cif_isp_ct_offset_b (rkisp1_cif_isp_base + 0x00000250) + +#define rkisp1_cif_isp_flash_base 0x00000660 +#define rkisp1_cif_isp_flash_cmd (rkisp1_cif_isp_flash_base + 0x00000000) +#define rkisp1_cif_isp_flash_config (rkisp1_cif_isp_flash_base + 0x00000004) +#define rkisp1_cif_isp_flash_prediv (rkisp1_cif_isp_flash_base + 0x00000008) +#define rkisp1_cif_isp_flash_delay (rkisp1_cif_isp_flash_base + 0x0000000c) +#define rkisp1_cif_isp_flash_time (rkisp1_cif_isp_flash_base + 0x00000010) +#define rkisp1_cif_isp_flash_maxp (rkisp1_cif_isp_flash_base + 0x00000014) + +#define rkisp1_cif_isp_sh_base 0x00000680 +#define rkisp1_cif_isp_sh_ctrl (rkisp1_cif_isp_sh_base + 0x00000000) +#define rkisp1_cif_isp_sh_prediv (rkisp1_cif_isp_sh_base + 0x00000004) +#define rkisp1_cif_isp_sh_delay (rkisp1_cif_isp_sh_base + 0x00000008) +#define rkisp1_cif_isp_sh_time (rkisp1_cif_isp_sh_base + 0x0000000c) + +#define rkisp1_cif_c_proc_base 0x00000800 +#define rkisp1_cif_c_proc_ctrl (rkisp1_cif_c_proc_base + 0x00000000) +#define rkisp1_cif_c_proc_contrast (rkisp1_cif_c_proc_base + 0x00000004) +#define rkisp1_cif_c_proc_brightness (rkisp1_cif_c_proc_base + 0x00000008) +#define rkisp1_cif_c_proc_saturation (rkisp1_cif_c_proc_base + 0x0000000c) +#define rkisp1_cif_c_proc_hue (rkisp1_cif_c_proc_base + 0x00000010) + +#define rkisp1_cif_dual_crop_base 0x00000880 +#define rkisp1_cif_dual_crop_ctrl (rkisp1_cif_dual_crop_base + 0x00000000) +#define rkisp1_cif_dual_crop_m_h_offs (rkisp1_cif_dual_crop_base + 0x00000004) +#define rkisp1_cif_dual_crop_m_v_offs (rkisp1_cif_dual_crop_base + 0x00000008) +#define rkisp1_cif_dual_crop_m_h_size (rkisp1_cif_dual_crop_base + 0x0000000c) +#define rkisp1_cif_dual_crop_m_v_size (rkisp1_cif_dual_crop_base + 0x00000010) +#define rkisp1_cif_dual_crop_s_h_offs (rkisp1_cif_dual_crop_base + 0x00000014) +#define rkisp1_cif_dual_crop_s_v_offs (rkisp1_cif_dual_crop_base + 0x00000018) +#define rkisp1_cif_dual_crop_s_h_size (rkisp1_cif_dual_crop_base + 0x0000001c) +#define rkisp1_cif_dual_crop_s_v_size (rkisp1_cif_dual_crop_base + 0x00000020) +#define rkisp1_cif_dual_crop_m_h_offs_shd (rkisp1_cif_dual_crop_base + 0x00000024) +#define rkisp1_cif_dual_crop_m_v_offs_shd (rkisp1_cif_dual_crop_base + 0x00000028) +#define rkisp1_cif_dual_crop_m_h_size_shd (rkisp1_cif_dual_crop_base + 0x0000002c) +#define rkisp1_cif_dual_crop_m_v_size_shd (rkisp1_cif_dual_crop_base + 0x00000030) +#define rkisp1_cif_dual_crop_s_h_offs_shd (rkisp1_cif_dual_crop_base + 0x00000034) +#define rkisp1_cif_dual_crop_s_v_offs_shd (rkisp1_cif_dual_crop_base + 0x00000038) +#define rkisp1_cif_dual_crop_s_h_size_shd (rkisp1_cif_dual_crop_base + 0x0000003c) +#define rkisp1_cif_dual_crop_s_v_size_shd (rkisp1_cif_dual_crop_base + 0x00000040) + +#define rkisp1_cif_mrsz_base 0x00000c00 +#define rkisp1_cif_mrsz_ctrl (rkisp1_cif_mrsz_base + 0x00000000) +#define rkisp1_cif_mrsz_scale_hy (rkisp1_cif_mrsz_base + 0x00000004) +#define rkisp1_cif_mrsz_scale_hcb (rkisp1_cif_mrsz_base + 0x00000008) +#define rkisp1_cif_mrsz_scale_hcr (rkisp1_cif_mrsz_base + 0x0000000c) +#define rkisp1_cif_mrsz_scale_vy (rkisp1_cif_mrsz_base + 0x00000010) +#define rkisp1_cif_mrsz_scale_vc (rkisp1_cif_mrsz_base + 0x00000014) +#define rkisp1_cif_mrsz_phase_hy (rkisp1_cif_mrsz_base + 0x00000018) +#define rkisp1_cif_mrsz_phase_hc (rkisp1_cif_mrsz_base + 0x0000001c) +#define rkisp1_cif_mrsz_phase_vy (rkisp1_cif_mrsz_base + 0x00000020) +#define rkisp1_cif_mrsz_phase_vc (rkisp1_cif_mrsz_base + 0x00000024) +#define rkisp1_cif_mrsz_scale_lut_addr (rkisp1_cif_mrsz_base + 0x00000028) +#define rkisp1_cif_mrsz_scale_lut (rkisp1_cif_mrsz_base + 0x0000002c) +#define rkisp1_cif_mrsz_ctrl_shd (rkisp1_cif_mrsz_base + 0x00000030) +#define rkisp1_cif_mrsz_scale_hy_shd (rkisp1_cif_mrsz_base + 0x00000034) +#define rkisp1_cif_mrsz_scale_hcb_shd (rkisp1_cif_mrsz_base + 0x00000038) +#define rkisp1_cif_mrsz_scale_hcr_shd (rkisp1_cif_mrsz_base + 0x0000003c) +#define rkisp1_cif_mrsz_scale_vy_shd (rkisp1_cif_mrsz_base + 0x00000040) +#define rkisp1_cif_mrsz_scale_vc_shd (rkisp1_cif_mrsz_base + 0x00000044) +#define rkisp1_cif_mrsz_phase_hy_shd (rkisp1_cif_mrsz_base + 0x00000048) +#define rkisp1_cif_mrsz_phase_hc_shd (rkisp1_cif_mrsz_base + 0x0000004c) +#define rkisp1_cif_mrsz_phase_vy_shd (rkisp1_cif_mrsz_base + 0x00000050) +#define rkisp1_cif_mrsz_phase_vc_shd (rkisp1_cif_mrsz_base + 0x00000054) + +#define rkisp1_cif_srsz_base 0x00001000 +#define rkisp1_cif_srsz_ctrl (rkisp1_cif_srsz_base + 0x00000000) +#define rkisp1_cif_srsz_scale_hy (rkisp1_cif_srsz_base + 0x00000004) +#define rkisp1_cif_srsz_scale_hcb (rkisp1_cif_srsz_base + 0x00000008) +#define rkisp1_cif_srsz_scale_hcr (rkisp1_cif_srsz_base + 0x0000000c) +#define rkisp1_cif_srsz_scale_vy (rkisp1_cif_srsz_base + 0x00000010) +#define rkisp1_cif_srsz_scale_vc (rkisp1_cif_srsz_base + 0x00000014) +#define rkisp1_cif_srsz_phase_hy (rkisp1_cif_srsz_base + 0x00000018) +#define rkisp1_cif_srsz_phase_hc (rkisp1_cif_srsz_base + 0x0000001c) +#define rkisp1_cif_srsz_phase_vy (rkisp1_cif_srsz_base + 0x00000020) +#define rkisp1_cif_srsz_phase_vc (rkisp1_cif_srsz_base + 0x00000024) +#define rkisp1_cif_srsz_scale_lut_addr (rkisp1_cif_srsz_base + 0x00000028) +#define rkisp1_cif_srsz_scale_lut (rkisp1_cif_srsz_base + 0x0000002c) +#define rkisp1_cif_srsz_ctrl_shd (rkisp1_cif_srsz_base + 0x00000030) +#define rkisp1_cif_srsz_scale_hy_shd (rkisp1_cif_srsz_base + 0x00000034) +#define rkisp1_cif_srsz_scale_hcb_shd (rkisp1_cif_srsz_base + 0x00000038) +#define rkisp1_cif_srsz_scale_hcr_shd (rkisp1_cif_srsz_base + 0x0000003c) +#define rkisp1_cif_srsz_scale_vy_shd (rkisp1_cif_srsz_base + 0x00000040) +#define rkisp1_cif_srsz_scale_vc_shd (rkisp1_cif_srsz_base + 0x00000044) +#define rkisp1_cif_srsz_phase_hy_shd (rkisp1_cif_srsz_base + 0x00000048) +#define rkisp1_cif_srsz_phase_hc_shd (rkisp1_cif_srsz_base + 0x0000004c) +#define rkisp1_cif_srsz_phase_vy_shd (rkisp1_cif_srsz_base + 0x00000050) +#define rkisp1_cif_srsz_phase_vc_shd (rkisp1_cif_srsz_base + 0x00000054) + +#define rkisp1_cif_mi_base 0x00001400 +#define rkisp1_cif_mi_ctrl (rkisp1_cif_mi_base + 0x00000000) +#define rkisp1_cif_mi_init (rkisp1_cif_mi_base + 0x00000004) +#define rkisp1_cif_mi_mp_y_base_ad_init (rkisp1_cif_mi_base + 0x00000008) +#define rkisp1_cif_mi_mp_y_size_init (rkisp1_cif_mi_base + 0x0000000c) +#define rkisp1_cif_mi_mp_y_offs_cnt_init (rkisp1_cif_mi_base + 0x00000010) +#define rkisp1_cif_mi_mp_y_offs_cnt_start (rkisp1_cif_mi_base + 0x00000014) +#define rkisp1_cif_mi_mp_y_irq_offs_init (rkisp1_cif_mi_base + 0x00000018) +#define rkisp1_cif_mi_mp_cb_base_ad_init (rkisp1_cif_mi_base + 0x0000001c) +#define rkisp1_cif_mi_mp_cb_size_init (rkisp1_cif_mi_base + 0x00000020) +#define rkisp1_cif_mi_mp_cb_offs_cnt_init (rkisp1_cif_mi_base + 0x00000024) +#define rkisp1_cif_mi_mp_cb_offs_cnt_start (rkisp1_cif_mi_base + 0x00000028) +#define rkisp1_cif_mi_mp_cr_base_ad_init (rkisp1_cif_mi_base + 0x0000002c) +#define rkisp1_cif_mi_mp_cr_size_init (rkisp1_cif_mi_base + 0x00000030) +#define rkisp1_cif_mi_mp_cr_offs_cnt_init (rkisp1_cif_mi_base + 0x00000034) +#define rkisp1_cif_mi_mp_cr_offs_cnt_start (rkisp1_cif_mi_base + 0x00000038) +#define rkisp1_cif_mi_sp_y_base_ad_init (rkisp1_cif_mi_base + 0x0000003c) +#define rkisp1_cif_mi_sp_y_size_init (rkisp1_cif_mi_base + 0x00000040) +#define rkisp1_cif_mi_sp_y_offs_cnt_init (rkisp1_cif_mi_base + 0x00000044) +#define rkisp1_cif_mi_sp_y_offs_cnt_start (rkisp1_cif_mi_base + 0x00000048) +#define rkisp1_cif_mi_sp_y_llength (rkisp1_cif_mi_base + 0x0000004c) +#define rkisp1_cif_mi_sp_cb_base_ad_init (rkisp1_cif_mi_base + 0x00000050) +#define rkisp1_cif_mi_sp_cb_size_init (rkisp1_cif_mi_base + 0x00000054) +#define rkisp1_cif_mi_sp_cb_offs_cnt_init (rkisp1_cif_mi_base + 0x00000058) +#define rkisp1_cif_mi_sp_cb_offs_cnt_start (rkisp1_cif_mi_base + 0x0000005c) +#define rkisp1_cif_mi_sp_cr_base_ad_init (rkisp1_cif_mi_base + 0x00000060) +#define rkisp1_cif_mi_sp_cr_size_init (rkisp1_cif_mi_base + 0x00000064) +#define rkisp1_cif_mi_sp_cr_offs_cnt_init (rkisp1_cif_mi_base + 0x00000068) +#define rkisp1_cif_mi_sp_cr_offs_cnt_start (rkisp1_cif_mi_base + 0x0000006c) +#define rkisp1_cif_mi_byte_cnt (rkisp1_cif_mi_base + 0x00000070) +#define rkisp1_cif_mi_ctrl_shd (rkisp1_cif_mi_base + 0x00000074) +#define rkisp1_cif_mi_mp_y_base_ad_shd (rkisp1_cif_mi_base + 0x00000078) +#define rkisp1_cif_mi_mp_y_size_shd (rkisp1_cif_mi_base + 0x0000007c) +#define rkisp1_cif_mi_mp_y_offs_cnt_shd (rkisp1_cif_mi_base + 0x00000080) +#define rkisp1_cif_mi_mp_y_irq_offs_shd (rkisp1_cif_mi_base + 0x00000084) +#define rkisp1_cif_mi_mp_cb_base_ad_shd (rkisp1_cif_mi_base + 0x00000088) +#define rkisp1_cif_mi_mp_cb_size_shd (rkisp1_cif_mi_base + 0x0000008c) +#define rkisp1_cif_mi_mp_cb_offs_cnt_shd (rkisp1_cif_mi_base + 0x00000090) +#define rkisp1_cif_mi_mp_cr_base_ad_shd (rkisp1_cif_mi_base + 0x00000094) +#define rkisp1_cif_mi_mp_cr_size_shd (rkisp1_cif_mi_base + 0x00000098) +#define rkisp1_cif_mi_mp_cr_offs_cnt_shd (rkisp1_cif_mi_base + 0x0000009c) +#define rkisp1_cif_mi_sp_y_base_ad_shd (rkisp1_cif_mi_base + 0x000000a0) +#define rkisp1_cif_mi_sp_y_size_shd (rkisp1_cif_mi_base + 0x000000a4) +#define rkisp1_cif_mi_sp_y_offs_cnt_shd (rkisp1_cif_mi_base + 0x000000a8) +#define rkisp1_cif_mi_sp_cb_base_ad_shd (rkisp1_cif_mi_base + 0x000000b0) +#define rkisp1_cif_mi_sp_cb_size_shd (rkisp1_cif_mi_base + 0x000000b4) +#define rkisp1_cif_mi_sp_cb_offs_cnt_shd (rkisp1_cif_mi_base + 0x000000b8) +#define rkisp1_cif_mi_sp_cr_base_ad_shd (rkisp1_cif_mi_base + 0x000000bc) +#define rkisp1_cif_mi_sp_cr_size_shd (rkisp1_cif_mi_base + 0x000000c0) +#define rkisp1_cif_mi_sp_cr_offs_cnt_shd (rkisp1_cif_mi_base + 0x000000c4) +#define rkisp1_cif_mi_dma_y_pic_start_ad (rkisp1_cif_mi_base + 0x000000c8) +#define rkisp1_cif_mi_dma_y_pic_width (rkisp1_cif_mi_base + 0x000000cc) +#define rkisp1_cif_mi_dma_y_llength (rkisp1_cif_mi_base + 0x000000d0) +#define rkisp1_cif_mi_dma_y_pic_size (rkisp1_cif_mi_base + 0x000000d4) +#define rkisp1_cif_mi_dma_cb_pic_start_ad (rkisp1_cif_mi_base + 0x000000d8) +#define rkisp1_cif_mi_dma_cr_pic_start_ad (rkisp1_cif_mi_base + 0x000000e8) +#define rkisp1_cif_mi_imsc (rkisp1_cif_mi_base + 0x000000f8) +#define rkisp1_cif_mi_ris (rkisp1_cif_mi_base + 0x000000fc) +#define rkisp1_cif_mi_mis (rkisp1_cif_mi_base + 0x00000100) +#define rkisp1_cif_mi_icr (rkisp1_cif_mi_base + 0x00000104) +#define rkisp1_cif_mi_isr (rkisp1_cif_mi_base + 0x00000108) +#define rkisp1_cif_mi_status (rkisp1_cif_mi_base + 0x0000010c) +#define rkisp1_cif_mi_status_clr (rkisp1_cif_mi_base + 0x00000110) +#define rkisp1_cif_mi_sp_y_pic_width (rkisp1_cif_mi_base + 0x00000114) +#define rkisp1_cif_mi_sp_y_pic_height (rkisp1_cif_mi_base + 0x00000118) +#define rkisp1_cif_mi_sp_y_pic_size (rkisp1_cif_mi_base + 0x0000011c) +#define rkisp1_cif_mi_dma_ctrl (rkisp1_cif_mi_base + 0x00000120) +#define rkisp1_cif_mi_dma_start (rkisp1_cif_mi_base + 0x00000124) +#define rkisp1_cif_mi_dma_status (rkisp1_cif_mi_base + 0x00000128) +#define rkisp1_cif_mi_pixel_count (rkisp1_cif_mi_base + 0x0000012c) +#define rkisp1_cif_mi_mp_y_base_ad_init2 (rkisp1_cif_mi_base + 0x00000130) +#define rkisp1_cif_mi_mp_cb_base_ad_init2 (rkisp1_cif_mi_base + 0x00000134) +#define rkisp1_cif_mi_mp_cr_base_ad_init2 (rkisp1_cif_mi_base + 0x00000138) +#define rkisp1_cif_mi_sp_y_base_ad_init2 (rkisp1_cif_mi_base + 0x0000013c) +#define rkisp1_cif_mi_sp_cb_base_ad_init2 (rkisp1_cif_mi_base + 0x00000140) +#define rkisp1_cif_mi_sp_cr_base_ad_init2 (rkisp1_cif_mi_base + 0x00000144) +#define rkisp1_cif_mi_xtd_format_ctrl (rkisp1_cif_mi_base + 0x00000148) + +#define rkisp1_cif_smia_base 0x00001a00 +#define rkisp1_cif_smia_ctrl (rkisp1_cif_smia_base + 0x00000000) +#define rkisp1_cif_smia_status (rkisp1_cif_smia_base + 0x00000004) +#define rkisp1_cif_smia_imsc (rkisp1_cif_smia_base + 0x00000008) +#define rkisp1_cif_smia_ris (rkisp1_cif_smia_base + 0x0000000c) +#define rkisp1_cif_smia_mis (rkisp1_cif_smia_base + 0x00000010) +#define rkisp1_cif_smia_icr (rkisp1_cif_smia_base + 0x00000014) +#define rkisp1_cif_smia_isr (rkisp1_cif_smia_base + 0x00000018) +#define rkisp1_cif_smia_data_format_sel (rkisp1_cif_smia_base + 0x0000001c) +#define rkisp1_cif_smia_sof_emb_data_lines (rkisp1_cif_smia_base + 0x00000020) +#define rkisp1_cif_smia_emb_hstart (rkisp1_cif_smia_base + 0x00000024) +#define rkisp1_cif_smia_emb_hsize (rkisp1_cif_smia_base + 0x00000028) +#define rkisp1_cif_smia_emb_vstart (rkisp1_cif_smia_base + 0x0000002c) +#define rkisp1_cif_smia_num_lines (rkisp1_cif_smia_base + 0x00000030) +#define rkisp1_cif_smia_emb_data_fifo (rkisp1_cif_smia_base + 0x00000034) +#define rkisp1_cif_smia_emb_data_watermark (rkisp1_cif_smia_base + 0x00000038) + +#define rkisp1_cif_mipi_base 0x00001c00 +#define rkisp1_cif_mipi_ctrl (rkisp1_cif_mipi_base + 0x00000000) +#define rkisp1_cif_mipi_status (rkisp1_cif_mipi_base + 0x00000004) +#define rkisp1_cif_mipi_imsc (rkisp1_cif_mipi_base + 0x00000008) +#define rkisp1_cif_mipi_ris (rkisp1_cif_mipi_base + 0x0000000c) +#define rkisp1_cif_mipi_mis (rkisp1_cif_mipi_base + 0x00000010) +#define rkisp1_cif_mipi_icr (rkisp1_cif_mipi_base + 0x00000014) +#define rkisp1_cif_mipi_isr (rkisp1_cif_mipi_base + 0x00000018) +#define rkisp1_cif_mipi_cur_data_id (rkisp1_cif_mipi_base + 0x0000001c) +#define rkisp1_cif_mipi_img_data_sel (rkisp1_cif_mipi_base + 0x00000020) +#define rkisp1_cif_mipi_add_data_sel_1 (rkisp1_cif_mipi_base + 0x00000024) +#define rkisp1_cif_mipi_add_data_sel_2 (rkisp1_cif_mipi_base + 0x00000028) +#define rkisp1_cif_mipi_add_data_sel_3 (rkisp1_cif_mipi_base + 0x0000002c) +#define rkisp1_cif_mipi_add_data_sel_4 (rkisp1_cif_mipi_base + 0x00000030) +#define rkisp1_cif_mipi_add_data_fifo (rkisp1_cif_mipi_base + 0x00000034) +#define rkisp1_cif_mipi_fifo_fill_level (rkisp1_cif_mipi_base + 0x00000038) +#define rkisp1_cif_mipi_compressed_mode (rkisp1_cif_mipi_base + 0x0000003c) +#define rkisp1_cif_mipi_frame (rkisp1_cif_mipi_base + 0x00000040) +#define rkisp1_cif_mipi_gen_short_dt (rkisp1_cif_mipi_base + 0x00000044) +#define rkisp1_cif_mipi_gen_short_8_9 (rkisp1_cif_mipi_base + 0x00000048) +#define rkisp1_cif_mipi_gen_short_a_b (rkisp1_cif_mipi_base + 0x0000004c) +#define rkisp1_cif_mipi_gen_short_c_d (rkisp1_cif_mipi_base + 0x00000050) +#define rkisp1_cif_mipi_gen_short_e_f (rkisp1_cif_mipi_base + 0x00000054) + +#define rkisp1_cif_isp_afm_base 0x00002000 +#define rkisp1_cif_isp_afm_ctrl (rkisp1_cif_isp_afm_base + 0x00000000) +#define rkisp1_cif_isp_afm_lt_a (rkisp1_cif_isp_afm_base + 0x00000004) +#define rkisp1_cif_isp_afm_rb_a (rkisp1_cif_isp_afm_base + 0x00000008) +#define rkisp1_cif_isp_afm_lt_b (rkisp1_cif_isp_afm_base + 0x0000000c) +#define rkisp1_cif_isp_afm_rb_b (rkisp1_cif_isp_afm_base + 0x00000010) +#define rkisp1_cif_isp_afm_lt_c (rkisp1_cif_isp_afm_base + 0x00000014) +#define rkisp1_cif_isp_afm_rb_c (rkisp1_cif_isp_afm_base + 0x00000018) +#define rkisp1_cif_isp_afm_thres (rkisp1_cif_isp_afm_base + 0x0000001c) +#define rkisp1_cif_isp_afm_var_shift (rkisp1_cif_isp_afm_base + 0x00000020) +#define rkisp1_cif_isp_afm_sum_a (rkisp1_cif_isp_afm_base + 0x00000024) +#define rkisp1_cif_isp_afm_sum_b (rkisp1_cif_isp_afm_base + 0x00000028) +#define rkisp1_cif_isp_afm_sum_c (rkisp1_cif_isp_afm_base + 0x0000002c) +#define rkisp1_cif_isp_afm_lum_a (rkisp1_cif_isp_afm_base + 0x00000030) +#define rkisp1_cif_isp_afm_lum_b (rkisp1_cif_isp_afm_base + 0x00000034) +#define rkisp1_cif_isp_afm_lum_c (rkisp1_cif_isp_afm_base + 0x00000038) + +#define rkisp1_cif_isp_lsc_base 0x00002200 +#define rkisp1_cif_isp_lsc_ctrl (rkisp1_cif_isp_lsc_base + 0x00000000) +#define rkisp1_cif_isp_lsc_r_table_addr (rkisp1_cif_isp_lsc_base + 0x00000004) +#define rkisp1_cif_isp_lsc_gr_table_addr (rkisp1_cif_isp_lsc_base + 0x00000008) +#define rkisp1_cif_isp_lsc_b_table_addr (rkisp1_cif_isp_lsc_base + 0x0000000c) +#define rkisp1_cif_isp_lsc_gb_table_addr (rkisp1_cif_isp_lsc_base + 0x00000010) +#define rkisp1_cif_isp_lsc_r_table_data (rkisp1_cif_isp_lsc_base + 0x00000014) +#define rkisp1_cif_isp_lsc_gr_table_data (rkisp1_cif_isp_lsc_base + 0x00000018) +#define rkisp1_cif_isp_lsc_b_table_data (rkisp1_cif_isp_lsc_base + 0x0000001c) +#define rkisp1_cif_isp_lsc_gb_table_data (rkisp1_cif_isp_lsc_base + 0x00000020) +#define rkisp1_cif_isp_lsc_xgrad_01 (rkisp1_cif_isp_lsc_base + 0x00000024) +#define rkisp1_cif_isp_lsc_xgrad_23 (rkisp1_cif_isp_lsc_base + 0x00000028) +#define rkisp1_cif_isp_lsc_xgrad_45 (rkisp1_cif_isp_lsc_base + 0x0000002c) +#define rkisp1_cif_isp_lsc_xgrad_67 (rkisp1_cif_isp_lsc_base + 0x00000030) +#define rkisp1_cif_isp_lsc_ygrad_01 (rkisp1_cif_isp_lsc_base + 0x00000034) +#define rkisp1_cif_isp_lsc_ygrad_23 (rkisp1_cif_isp_lsc_base + 0x00000038) +#define rkisp1_cif_isp_lsc_ygrad_45 (rkisp1_cif_isp_lsc_base + 0x0000003c) +#define rkisp1_cif_isp_lsc_ygrad_67 (rkisp1_cif_isp_lsc_base + 0x00000040) +#define rkisp1_cif_isp_lsc_xsize_01 (rkisp1_cif_isp_lsc_base + 0x00000044) +#define rkisp1_cif_isp_lsc_xsize_23 (rkisp1_cif_isp_lsc_base + 0x00000048) +#define rkisp1_cif_isp_lsc_xsize_45 (rkisp1_cif_isp_lsc_base + 0x0000004c) +#define rkisp1_cif_isp_lsc_xsize_67 (rkisp1_cif_isp_lsc_base + 0x00000050) +#define rkisp1_cif_isp_lsc_ysize_01 (rkisp1_cif_isp_lsc_base + 0x00000054) +#define rkisp1_cif_isp_lsc_ysize_23 (rkisp1_cif_isp_lsc_base + 0x00000058) +#define rkisp1_cif_isp_lsc_ysize_45 (rkisp1_cif_isp_lsc_base + 0x0000005c) +#define rkisp1_cif_isp_lsc_ysize_67 (rkisp1_cif_isp_lsc_base + 0x00000060) +#define rkisp1_cif_isp_lsc_table_sel (rkisp1_cif_isp_lsc_base + 0x00000064) +#define rkisp1_cif_isp_lsc_status (rkisp1_cif_isp_lsc_base + 0x00000068) + +#define rkisp1_cif_isp_is_base 0x00002300 +#define rkisp1_cif_isp_is_ctrl (rkisp1_cif_isp_is_base + 0x00000000) +#define rkisp1_cif_isp_is_recenter (rkisp1_cif_isp_is_base + 0x00000004) +#define rkisp1_cif_isp_is_h_offs (rkisp1_cif_isp_is_base + 0x00000008) +#define rkisp1_cif_isp_is_v_offs (rkisp1_cif_isp_is_base + 0x0000000c) +#define rkisp1_cif_isp_is_h_size (rkisp1_cif_isp_is_base + 0x00000010) +#define rkisp1_cif_isp_is_v_size (rkisp1_cif_isp_is_base + 0x00000014) +#define rkisp1_cif_isp_is_max_dx (rkisp1_cif_isp_is_base + 0x00000018) +#define rkisp1_cif_isp_is_max_dy (rkisp1_cif_isp_is_base + 0x0000001c) +#define rkisp1_cif_isp_is_displace (rkisp1_cif_isp_is_base + 0x00000020) +#define rkisp1_cif_isp_is_h_offs_shd (rkisp1_cif_isp_is_base + 0x00000024) +#define rkisp1_cif_isp_is_v_offs_shd (rkisp1_cif_isp_is_base + 0x00000028) +#define rkisp1_cif_isp_is_h_size_shd (rkisp1_cif_isp_is_base + 0x0000002c) +#define rkisp1_cif_isp_is_v_size_shd (rkisp1_cif_isp_is_base + 0x00000030) + +#define rkisp1_cif_isp_hist_base 0x00002400 + +#define rkisp1_cif_isp_hist_prop (rkisp1_cif_isp_hist_base + 0x00000000) +#define rkisp1_cif_isp_hist_h_offs (rkisp1_cif_isp_hist_base + 0x00000004) +#define rkisp1_cif_isp_hist_v_offs (rkisp1_cif_isp_hist_base + 0x00000008) +#define rkisp1_cif_isp_hist_h_size (rkisp1_cif_isp_hist_base + 0x0000000c) +#define rkisp1_cif_isp_hist_v_size (rkisp1_cif_isp_hist_base + 0x00000010) +#define rkisp1_cif_isp_hist_bin_0 (rkisp1_cif_isp_hist_base + 0x00000014) +#define rkisp1_cif_isp_hist_bin_1 (rkisp1_cif_isp_hist_base + 0x00000018) +#define rkisp1_cif_isp_hist_bin_2 (rkisp1_cif_isp_hist_base + 0x0000001c) +#define rkisp1_cif_isp_hist_bin_3 (rkisp1_cif_isp_hist_base + 0x00000020) +#define rkisp1_cif_isp_hist_bin_4 (rkisp1_cif_isp_hist_base + 0x00000024) +#define rkisp1_cif_isp_hist_bin_5 (rkisp1_cif_isp_hist_base + 0x00000028) +#define rkisp1_cif_isp_hist_bin_6 (rkisp1_cif_isp_hist_base + 0x0000002c) +#define rkisp1_cif_isp_hist_bin_7 (rkisp1_cif_isp_hist_base + 0x00000030) +#define rkisp1_cif_isp_hist_bin_8 (rkisp1_cif_isp_hist_base + 0x00000034) +#define rkisp1_cif_isp_hist_bin_9 (rkisp1_cif_isp_hist_base + 0x00000038) +#define rkisp1_cif_isp_hist_bin_10 (rkisp1_cif_isp_hist_base + 0x0000003c) +#define rkisp1_cif_isp_hist_bin_11 (rkisp1_cif_isp_hist_base + 0x00000040) +#define rkisp1_cif_isp_hist_bin_12 (rkisp1_cif_isp_hist_base + 0x00000044) +#define rkisp1_cif_isp_hist_bin_13 (rkisp1_cif_isp_hist_base + 0x00000048) +#define rkisp1_cif_isp_hist_bin_14 (rkisp1_cif_isp_hist_base + 0x0000004c) +#define rkisp1_cif_isp_hist_bin_15 (rkisp1_cif_isp_hist_base + 0x00000050) +#define rkisp1_cif_isp_hist_weight_00to30 (rkisp1_cif_isp_hist_base + 0x00000054) +#define rkisp1_cif_isp_hist_weight_40to21 (rkisp1_cif_isp_hist_base + 0x00000058) +#define rkisp1_cif_isp_hist_weight_31to12 (rkisp1_cif_isp_hist_base + 0x0000005c) +#define rkisp1_cif_isp_hist_weight_22to03 (rkisp1_cif_isp_hist_base + 0x00000060) +#define rkisp1_cif_isp_hist_weight_13to43 (rkisp1_cif_isp_hist_base + 0x00000064) +#define rkisp1_cif_isp_hist_weight_04to34 (rkisp1_cif_isp_hist_base + 0x00000068) +#define rkisp1_cif_isp_hist_weight_44 (rkisp1_cif_isp_hist_base + 0x0000006c) + +#define rkisp1_cif_isp_filt_base 0x00002500 +#define rkisp1_cif_isp_filt_mode (rkisp1_cif_isp_filt_base + 0x00000000) +#define rkisp1_cif_isp_filt_thresh_bl0 (rkisp1_cif_isp_filt_base + 0x00000028) +#define rkisp1_cif_isp_filt_thresh_bl1 (rkisp1_cif_isp_filt_base + 0x0000002c) +#define rkisp1_cif_isp_filt_thresh_sh0 (rkisp1_cif_isp_filt_base + 0x00000030) +#define rkisp1_cif_isp_filt_thresh_sh1 (rkisp1_cif_isp_filt_base + 0x00000034) +#define rkisp1_cif_isp_filt_lum_weight (rkisp1_cif_isp_filt_base + 0x00000038) +#define rkisp1_cif_isp_filt_fac_sh1 (rkisp1_cif_isp_filt_base + 0x0000003c) +#define rkisp1_cif_isp_filt_fac_sh0 (rkisp1_cif_isp_filt_base + 0x00000040) +#define rkisp1_cif_isp_filt_fac_mid (rkisp1_cif_isp_filt_base + 0x00000044) +#define rkisp1_cif_isp_filt_fac_bl0 (rkisp1_cif_isp_filt_base + 0x00000048) +#define rkisp1_cif_isp_filt_fac_bl1 (rkisp1_cif_isp_filt_base + 0x0000004c) + +#define rkisp1_cif_isp_cac_base 0x00002580 +#define rkisp1_cif_isp_cac_ctrl (rkisp1_cif_isp_cac_base + 0x00000000) +#define rkisp1_cif_isp_cac_count_start (rkisp1_cif_isp_cac_base + 0x00000004) +#define rkisp1_cif_isp_cac_a (rkisp1_cif_isp_cac_base + 0x00000008) +#define rkisp1_cif_isp_cac_b (rkisp1_cif_isp_cac_base + 0x0000000c) +#define rkisp1_cif_isp_cac_c (rkisp1_cif_isp_cac_base + 0x00000010) +#define rkisp1_cif_isp_x_norm (rkisp1_cif_isp_cac_base + 0x00000014) +#define rkisp1_cif_isp_y_norm (rkisp1_cif_isp_cac_base + 0x00000018) + +#define rkisp1_cif_isp_exp_base 0x00002600 +#define rkisp1_cif_isp_exp_ctrl (rkisp1_cif_isp_exp_base + 0x00000000) +#define rkisp1_cif_isp_exp_h_offset (rkisp1_cif_isp_exp_base + 0x00000004) +#define rkisp1_cif_isp_exp_v_offset (rkisp1_cif_isp_exp_base + 0x00000008) +#define rkisp1_cif_isp_exp_h_size (rkisp1_cif_isp_exp_base + 0x0000000c) +#define rkisp1_cif_isp_exp_v_size (rkisp1_cif_isp_exp_base + 0x00000010) +#define rkisp1_cif_isp_exp_mean_00 (rkisp1_cif_isp_exp_base + 0x00000014) +#define rkisp1_cif_isp_exp_mean_10 (rkisp1_cif_isp_exp_base + 0x00000018) +#define rkisp1_cif_isp_exp_mean_20 (rkisp1_cif_isp_exp_base + 0x0000001c) +#define rkisp1_cif_isp_exp_mean_30 (rkisp1_cif_isp_exp_base + 0x00000020) +#define rkisp1_cif_isp_exp_mean_40 (rkisp1_cif_isp_exp_base + 0x00000024) +#define rkisp1_cif_isp_exp_mean_01 (rkisp1_cif_isp_exp_base + 0x00000028) +#define rkisp1_cif_isp_exp_mean_11 (rkisp1_cif_isp_exp_base + 0x0000002c) +#define rkisp1_cif_isp_exp_mean_21 (rkisp1_cif_isp_exp_base + 0x00000030) +#define rkisp1_cif_isp_exp_mean_31 (rkisp1_cif_isp_exp_base + 0x00000034) +#define rkisp1_cif_isp_exp_mean_41 (rkisp1_cif_isp_exp_base + 0x00000038) +#define rkisp1_cif_isp_exp_mean_02 (rkisp1_cif_isp_exp_base + 0x0000003c) +#define rkisp1_cif_isp_exp_mean_12 (rkisp1_cif_isp_exp_base + 0x00000040) +#define rkisp1_cif_isp_exp_mean_22 (rkisp1_cif_isp_exp_base + 0x00000044) +#define rkisp1_cif_isp_exp_mean_32 (rkisp1_cif_isp_exp_base + 0x00000048) +#define rkisp1_cif_isp_exp_mean_42 (rkisp1_cif_isp_exp_base + 0x0000004c) +#define rkisp1_cif_isp_exp_mean_03 (rkisp1_cif_isp_exp_base + 0x00000050) +#define rkisp1_cif_isp_exp_mean_13 (rkisp1_cif_isp_exp_base + 0x00000054) +#define rkisp1_cif_isp_exp_mean_23 (rkisp1_cif_isp_exp_base + 0x00000058) +#define rkisp1_cif_isp_exp_mean_33 (rkisp1_cif_isp_exp_base + 0x0000005c) +#define rkisp1_cif_isp_exp_mean_43 (rkisp1_cif_isp_exp_base + 0x00000060) +#define rkisp1_cif_isp_exp_mean_04 (rkisp1_cif_isp_exp_base + 0x00000064) +#define rkisp1_cif_isp_exp_mean_14 (rkisp1_cif_isp_exp_base + 0x00000068) +#define rkisp1_cif_isp_exp_mean_24 (rkisp1_cif_isp_exp_base + 0x0000006c) +#define rkisp1_cif_isp_exp_mean_34 (rkisp1_cif_isp_exp_base + 0x00000070) +#define rkisp1_cif_isp_exp_mean_44 (rkisp1_cif_isp_exp_base + 0x00000074) + +#define rkisp1_cif_isp_bls_base 0x00002700 +#define rkisp1_cif_isp_bls_ctrl (rkisp1_cif_isp_bls_base + 0x00000000) +#define rkisp1_cif_isp_bls_samples (rkisp1_cif_isp_bls_base + 0x00000004) +#define rkisp1_cif_isp_bls_h1_start (rkisp1_cif_isp_bls_base + 0x00000008) +#define rkisp1_cif_isp_bls_h1_stop (rkisp1_cif_isp_bls_base + 0x0000000c) +#define rkisp1_cif_isp_bls_v1_start (rkisp1_cif_isp_bls_base + 0x00000010) +#define rkisp1_cif_isp_bls_v1_stop (rkisp1_cif_isp_bls_base + 0x00000014) +#define rkisp1_cif_isp_bls_h2_start (rkisp1_cif_isp_bls_base + 0x00000018) +#define rkisp1_cif_isp_bls_h2_stop (rkisp1_cif_isp_bls_base + 0x0000001c) +#define rkisp1_cif_isp_bls_v2_start (rkisp1_cif_isp_bls_base + 0x00000020) +#define rkisp1_cif_isp_bls_v2_stop (rkisp1_cif_isp_bls_base + 0x00000024) +#define rkisp1_cif_isp_bls_a_fixed (rkisp1_cif_isp_bls_base + 0x00000028) +#define rkisp1_cif_isp_bls_b_fixed (rkisp1_cif_isp_bls_base + 0x0000002c) +#define rkisp1_cif_isp_bls_c_fixed (rkisp1_cif_isp_bls_base + 0x00000030) +#define rkisp1_cif_isp_bls_d_fixed (rkisp1_cif_isp_bls_base + 0x00000034) +#define rkisp1_cif_isp_bls_a_measured (rkisp1_cif_isp_bls_base + 0x00000038) +#define rkisp1_cif_isp_bls_b_measured (rkisp1_cif_isp_bls_base + 0x0000003c) +#define rkisp1_cif_isp_bls_c_measured (rkisp1_cif_isp_bls_base + 0x00000040) +#define rkisp1_cif_isp_bls_d_measured (rkisp1_cif_isp_bls_base + 0x00000044) + +#define rkisp1_cif_isp_dpf_base 0x00002800 +#define rkisp1_cif_isp_dpf_mode (rkisp1_cif_isp_dpf_base + 0x00000000) +#define rkisp1_cif_isp_dpf_strength_r (rkisp1_cif_isp_dpf_base + 0x00000004) +#define rkisp1_cif_isp_dpf_strength_g (rkisp1_cif_isp_dpf_base + 0x00000008) +#define rkisp1_cif_isp_dpf_strength_b (rkisp1_cif_isp_dpf_base + 0x0000000c) +#define rkisp1_cif_isp_dpf_s_weight_g_1_4 (rkisp1_cif_isp_dpf_base + 0x00000010) +#define rkisp1_cif_isp_dpf_s_weight_g_5_6 (rkisp1_cif_isp_dpf_base + 0x00000014) +#define rkisp1_cif_isp_dpf_s_weight_rb_1_4 (rkisp1_cif_isp_dpf_base + 0x00000018) +#define rkisp1_cif_isp_dpf_s_weight_rb_5_6 (rkisp1_cif_isp_dpf_base + 0x0000001c) +#define rkisp1_cif_isp_dpf_null_coeff_0 (rkisp1_cif_isp_dpf_base + 0x00000020) +#define rkisp1_cif_isp_dpf_null_coeff_1 (rkisp1_cif_isp_dpf_base + 0x00000024) +#define rkisp1_cif_isp_dpf_null_coeff_2 (rkisp1_cif_isp_dpf_base + 0x00000028) +#define rkisp1_cif_isp_dpf_null_coeff_3 (rkisp1_cif_isp_dpf_base + 0x0000002c) +#define rkisp1_cif_isp_dpf_null_coeff_4 (rkisp1_cif_isp_dpf_base + 0x00000030) +#define rkisp1_cif_isp_dpf_null_coeff_5 (rkisp1_cif_isp_dpf_base + 0x00000034) +#define rkisp1_cif_isp_dpf_null_coeff_6 (rkisp1_cif_isp_dpf_base + 0x00000038) +#define rkisp1_cif_isp_dpf_null_coeff_7 (rkisp1_cif_isp_dpf_base + 0x0000003c) +#define rkisp1_cif_isp_dpf_null_coeff_8 (rkisp1_cif_isp_dpf_base + 0x00000040) +#define rkisp1_cif_isp_dpf_null_coeff_9 (rkisp1_cif_isp_dpf_base + 0x00000044) +#define rkisp1_cif_isp_dpf_null_coeff_10 (rkisp1_cif_isp_dpf_base + 0x00000048) +#define rkisp1_cif_isp_dpf_null_coeff_11 (rkisp1_cif_isp_dpf_base + 0x0000004c) +#define rkisp1_cif_isp_dpf_null_coeff_12 (rkisp1_cif_isp_dpf_base + 0x00000050) +#define rkisp1_cif_isp_dpf_null_coeff_13 (rkisp1_cif_isp_dpf_base + 0x00000054) +#define rkisp1_cif_isp_dpf_null_coeff_14 (rkisp1_cif_isp_dpf_base + 0x00000058) +#define rkisp1_cif_isp_dpf_null_coeff_15 (rkisp1_cif_isp_dpf_base + 0x0000005c) +#define rkisp1_cif_isp_dpf_null_coeff_16 (rkisp1_cif_isp_dpf_base + 0x00000060) +#define rkisp1_cif_isp_dpf_nf_gain_r (rkisp1_cif_isp_dpf_base + 0x00000064) +#define rkisp1_cif_isp_dpf_nf_gain_gr (rkisp1_cif_isp_dpf_base + 0x00000068) +#define rkisp1_cif_isp_dpf_nf_gain_gb (rkisp1_cif_isp_dpf_base + 0x0000006c) +#define rkisp1_cif_isp_dpf_nf_gain_b (rkisp1_cif_isp_dpf_base + 0x00000070) + +#define rkisp1_cif_isp_dpcc_base 0x00002900 +#define rkisp1_cif_isp_dpcc_mode (rkisp1_cif_isp_dpcc_base + 0x00000000) +#define rkisp1_cif_isp_dpcc_output_mode (rkisp1_cif_isp_dpcc_base + 0x00000004) +#define rkisp1_cif_isp_dpcc_set_use (rkisp1_cif_isp_dpcc_base + 0x00000008) +#define rkisp1_cif_isp_dpcc_methods_set_1 (rkisp1_cif_isp_dpcc_base + 0x0000000c) +#define rkisp1_cif_isp_dpcc_methods_set_2 (rkisp1_cif_isp_dpcc_base + 0x00000010) +#define rkisp1_cif_isp_dpcc_methods_set_3 (rkisp1_cif_isp_dpcc_base + 0x00000014) +#define rkisp1_cif_isp_dpcc_line_thresh_1 (rkisp1_cif_isp_dpcc_base + 0x00000018) +#define rkisp1_cif_isp_dpcc_line_mad_fac_1 (rkisp1_cif_isp_dpcc_base + 0x0000001c) +#define rkisp1_cif_isp_dpcc_pg_fac_1 (rkisp1_cif_isp_dpcc_base + 0x00000020) +#define rkisp1_cif_isp_dpcc_rnd_thresh_1 (rkisp1_cif_isp_dpcc_base + 0x00000024) +#define rkisp1_cif_isp_dpcc_rg_fac_1 (rkisp1_cif_isp_dpcc_base + 0x00000028) +#define rkisp1_cif_isp_dpcc_line_thresh_2 (rkisp1_cif_isp_dpcc_base + 0x0000002c) +#define rkisp1_cif_isp_dpcc_line_mad_fac_2 (rkisp1_cif_isp_dpcc_base + 0x00000030) +#define rkisp1_cif_isp_dpcc_pg_fac_2 (rkisp1_cif_isp_dpcc_base + 0x00000034) +#define rkisp1_cif_isp_dpcc_rnd_thresh_2 (rkisp1_cif_isp_dpcc_base + 0x00000038) +#define rkisp1_cif_isp_dpcc_rg_fac_2 (rkisp1_cif_isp_dpcc_base + 0x0000003c) +#define rkisp1_cif_isp_dpcc_line_thresh_3 (rkisp1_cif_isp_dpcc_base + 0x00000040) +#define rkisp1_cif_isp_dpcc_line_mad_fac_3 (rkisp1_cif_isp_dpcc_base + 0x00000044) +#define rkisp1_cif_isp_dpcc_pg_fac_3 (rkisp1_cif_isp_dpcc_base + 0x00000048) +#define rkisp1_cif_isp_dpcc_rnd_thresh_3 (rkisp1_cif_isp_dpcc_base + 0x0000004c) +#define rkisp1_cif_isp_dpcc_rg_fac_3 (rkisp1_cif_isp_dpcc_base + 0x00000050) +#define rkisp1_cif_isp_dpcc_ro_limits (rkisp1_cif_isp_dpcc_base + 0x00000054) +#define rkisp1_cif_isp_dpcc_rnd_offs (rkisp1_cif_isp_dpcc_base + 0x00000058) +#define rkisp1_cif_isp_dpcc_bpt_ctrl (rkisp1_cif_isp_dpcc_base + 0x0000005c) +#define rkisp1_cif_isp_dpcc_bpt_number (rkisp1_cif_isp_dpcc_base + 0x00000060) +#define rkisp1_cif_isp_dpcc_bpt_addr (rkisp1_cif_isp_dpcc_base + 0x00000064) +#define rkisp1_cif_isp_dpcc_bpt_data (rkisp1_cif_isp_dpcc_base + 0x00000068) + +#define rkisp1_cif_isp_wdr_base 0x00002a00 +#define rkisp1_cif_isp_wdr_ctrl (rkisp1_cif_isp_wdr_base + 0x00000000) +#define rkisp1_cif_isp_wdr_tonecurve_1 (rkisp1_cif_isp_wdr_base + 0x00000004) +#define rkisp1_cif_isp_wdr_tonecurve_2 (rkisp1_cif_isp_wdr_base + 0x00000008) +#define rkisp1_cif_isp_wdr_tonecurve_3 (rkisp1_cif_isp_wdr_base + 0x0000000c) +#define rkisp1_cif_isp_wdr_tonecurve_4 (rkisp1_cif_isp_wdr_base + 0x00000010) +#define rkisp1_cif_isp_wdr_tonecurve_ym_0 (rkisp1_cif_isp_wdr_base + 0x00000014) +#define rkisp1_cif_isp_wdr_tonecurve_ym_1 (rkisp1_cif_isp_wdr_base + 0x00000018) +#define rkisp1_cif_isp_wdr_tonecurve_ym_2 (rkisp1_cif_isp_wdr_base + 0x0000001c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_3 (rkisp1_cif_isp_wdr_base + 0x00000020) +#define rkisp1_cif_isp_wdr_tonecurve_ym_4 (rkisp1_cif_isp_wdr_base + 0x00000024) +#define rkisp1_cif_isp_wdr_tonecurve_ym_5 (rkisp1_cif_isp_wdr_base + 0x00000028) +#define rkisp1_cif_isp_wdr_tonecurve_ym_6 (rkisp1_cif_isp_wdr_base + 0x0000002c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_7 (rkisp1_cif_isp_wdr_base + 0x00000030) +#define rkisp1_cif_isp_wdr_tonecurve_ym_8 (rkisp1_cif_isp_wdr_base + 0x00000034) +#define rkisp1_cif_isp_wdr_tonecurve_ym_9 (rkisp1_cif_isp_wdr_base + 0x00000038) +#define rkisp1_cif_isp_wdr_tonecurve_ym_10 (rkisp1_cif_isp_wdr_base + 0x0000003c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_11 (rkisp1_cif_isp_wdr_base + 0x00000040) +#define rkisp1_cif_isp_wdr_tonecurve_ym_12 (rkisp1_cif_isp_wdr_base + 0x00000044) +#define rkisp1_cif_isp_wdr_tonecurve_ym_13 (rkisp1_cif_isp_wdr_base + 0x00000048) +#define rkisp1_cif_isp_wdr_tonecurve_ym_14 (rkisp1_cif_isp_wdr_base + 0x0000004c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_15 (rkisp1_cif_isp_wdr_base + 0x00000050) +#define rkisp1_cif_isp_wdr_tonecurve_ym_16 (rkisp1_cif_isp_wdr_base + 0x00000054) +#define rkisp1_cif_isp_wdr_tonecurve_ym_17 (rkisp1_cif_isp_wdr_base + 0x00000058) +#define rkisp1_cif_isp_wdr_tonecurve_ym_18 (rkisp1_cif_isp_wdr_base + 0x0000005c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_19 (rkisp1_cif_isp_wdr_base + 0x00000060) +#define rkisp1_cif_isp_wdr_tonecurve_ym_20 (rkisp1_cif_isp_wdr_base + 0x00000064) +#define rkisp1_cif_isp_wdr_tonecurve_ym_21 (rkisp1_cif_isp_wdr_base + 0x00000068) +#define rkisp1_cif_isp_wdr_tonecurve_ym_22 (rkisp1_cif_isp_wdr_base + 0x0000006c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_23 (rkisp1_cif_isp_wdr_base + 0x00000070) +#define rkisp1_cif_isp_wdr_tonecurve_ym_24 (rkisp1_cif_isp_wdr_base + 0x00000074) +#define rkisp1_cif_isp_wdr_tonecurve_ym_25 (rkisp1_cif_isp_wdr_base + 0x00000078) +#define rkisp1_cif_isp_wdr_tonecurve_ym_26 (rkisp1_cif_isp_wdr_base + 0x0000007c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_27 (rkisp1_cif_isp_wdr_base + 0x00000080) +#define rkisp1_cif_isp_wdr_tonecurve_ym_28 (rkisp1_cif_isp_wdr_base + 0x00000084) +#define rkisp1_cif_isp_wdr_tonecurve_ym_29 (rkisp1_cif_isp_wdr_base + 0x00000088) +#define rkisp1_cif_isp_wdr_tonecurve_ym_30 (rkisp1_cif_isp_wdr_base + 0x0000008c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_31 (rkisp1_cif_isp_wdr_base + 0x00000090) +#define rkisp1_cif_isp_wdr_tonecurve_ym_32 (rkisp1_cif_isp_wdr_base + 0x00000094) +#define rkisp1_cif_isp_wdr_offset (rkisp1_cif_isp_wdr_base + 0x00000098) +#define rkisp1_cif_isp_wdr_deltamin (rkisp1_cif_isp_wdr_base + 0x0000009c) +#define rkisp1_cif_isp_wdr_tonecurve_1_shd (rkisp1_cif_isp_wdr_base + 0x000000a0) +#define rkisp1_cif_isp_wdr_tonecurve_2_shd (rkisp1_cif_isp_wdr_base + 0x000000a4) +#define rkisp1_cif_isp_wdr_tonecurve_3_shd (rkisp1_cif_isp_wdr_base + 0x000000a8) +#define rkisp1_cif_isp_wdr_tonecurve_4_shd (rkisp1_cif_isp_wdr_base + 0x000000ac) +#define rkisp1_cif_isp_wdr_tonecurve_ym_0_shd (rkisp1_cif_isp_wdr_base + 0x000000b0) +#define rkisp1_cif_isp_wdr_tonecurve_ym_1_shd (rkisp1_cif_isp_wdr_base + 0x000000b4) +#define rkisp1_cif_isp_wdr_tonecurve_ym_2_shd (rkisp1_cif_isp_wdr_base + 0x000000b8) +#define rkisp1_cif_isp_wdr_tonecurve_ym_3_shd (rkisp1_cif_isp_wdr_base + 0x000000bc) +#define rkisp1_cif_isp_wdr_tonecurve_ym_4_shd (rkisp1_cif_isp_wdr_base + 0x000000c0) +#define rkisp1_cif_isp_wdr_tonecurve_ym_5_shd (rkisp1_cif_isp_wdr_base + 0x000000c4) +#define rkisp1_cif_isp_wdr_tonecurve_ym_6_shd (rkisp1_cif_isp_wdr_base + 0x000000c8) +#define rkisp1_cif_isp_wdr_tonecurve_ym_7_shd (rkisp1_cif_isp_wdr_base + 0x000000cc) +#define rkisp1_cif_isp_wdr_tonecurve_ym_8_shd (rkisp1_cif_isp_wdr_base + 0x000000d0) +#define rkisp1_cif_isp_wdr_tonecurve_ym_9_shd (rkisp1_cif_isp_wdr_base + 0x000000d4) +#define rkisp1_cif_isp_wdr_tonecurve_ym_10_shd (rkisp1_cif_isp_wdr_base + 0x000000d8) +#define rkisp1_cif_isp_wdr_tonecurve_ym_11_shd (rkisp1_cif_isp_wdr_base + 0x000000dc) +#define rkisp1_cif_isp_wdr_tonecurve_ym_12_shd (rkisp1_cif_isp_wdr_base + 0x000000e0) +#define rkisp1_cif_isp_wdr_tonecurve_ym_13_shd (rkisp1_cif_isp_wdr_base + 0x000000e4) +#define rkisp1_cif_isp_wdr_tonecurve_ym_14_shd (rkisp1_cif_isp_wdr_base + 0x000000e8) +#define rkisp1_cif_isp_wdr_tonecurve_ym_15_shd (rkisp1_cif_isp_wdr_base + 0x000000ec) +#define rkisp1_cif_isp_wdr_tonecurve_ym_16_shd (rkisp1_cif_isp_wdr_base + 0x000000f0) +#define rkisp1_cif_isp_wdr_tonecurve_ym_17_shd (rkisp1_cif_isp_wdr_base + 0x000000f4) +#define rkisp1_cif_isp_wdr_tonecurve_ym_18_shd (rkisp1_cif_isp_wdr_base + 0x000000f8) +#define rkisp1_cif_isp_wdr_tonecurve_ym_19_shd (rkisp1_cif_isp_wdr_base + 0x000000fc) +#define rkisp1_cif_isp_wdr_tonecurve_ym_20_shd (rkisp1_cif_isp_wdr_base + 0x00000100) +#define rkisp1_cif_isp_wdr_tonecurve_ym_21_shd (rkisp1_cif_isp_wdr_base + 0x00000104) +#define rkisp1_cif_isp_wdr_tonecurve_ym_22_shd (rkisp1_cif_isp_wdr_base + 0x00000108) +#define rkisp1_cif_isp_wdr_tonecurve_ym_23_shd (rkisp1_cif_isp_wdr_base + 0x0000010c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_24_shd (rkisp1_cif_isp_wdr_base + 0x00000110) +#define rkisp1_cif_isp_wdr_tonecurve_ym_25_shd (rkisp1_cif_isp_wdr_base + 0x00000114) +#define rkisp1_cif_isp_wdr_tonecurve_ym_26_shd (rkisp1_cif_isp_wdr_base + 0x00000118) +#define rkisp1_cif_isp_wdr_tonecurve_ym_27_shd (rkisp1_cif_isp_wdr_base + 0x0000011c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_28_shd (rkisp1_cif_isp_wdr_base + 0x00000120) +#define rkisp1_cif_isp_wdr_tonecurve_ym_29_shd (rkisp1_cif_isp_wdr_base + 0x00000124) +#define rkisp1_cif_isp_wdr_tonecurve_ym_30_shd (rkisp1_cif_isp_wdr_base + 0x00000128) +#define rkisp1_cif_isp_wdr_tonecurve_ym_31_shd (rkisp1_cif_isp_wdr_base + 0x0000012c) +#define rkisp1_cif_isp_wdr_tonecurve_ym_32_shd (rkisp1_cif_isp_wdr_base + 0x00000130) + +#define rkisp1_cif_isp_vsm_base 0x00002f00 +#define rkisp1_cif_isp_vsm_mode (rkisp1_cif_isp_vsm_base + 0x00000000) +#define rkisp1_cif_isp_vsm_h_offs (rkisp1_cif_isp_vsm_base + 0x00000004) +#define rkisp1_cif_isp_vsm_v_offs (rkisp1_cif_isp_vsm_base + 0x00000008) +#define rkisp1_cif_isp_vsm_h_size (rkisp1_cif_isp_vsm_base + 0x0000000c) +#define rkisp1_cif_isp_vsm_v_size (rkisp1_cif_isp_vsm_base + 0x00000010) +#define rkisp1_cif_isp_vsm_h_segments (rkisp1_cif_isp_vsm_base + 0x00000014) +#define rkisp1_cif_isp_vsm_v_segments (rkisp1_cif_isp_vsm_base + 0x00000018) +#define rkisp1_cif_isp_vsm_delta_h (rkisp1_cif_isp_vsm_base + 0x0000001c) +#define rkisp1_cif_isp_vsm_delta_v (rkisp1_cif_isp_vsm_base + 0x00000020) + +#endif /* _rkisp1_regs_h */
|
Drivers in the Staging area
|
d65dd85281fbf7fc66a936b0aa8979614a7ba150
|
helen koike
|
drivers
|
staging
|
media, rkisp1
|
media: staging: rkisp1: add streaming paths
|
add v4l2 capture device interface to rkisp1 driver, allowing users to get frames from isp1. isp1 has two major streaming paths, mainpah and selfpah, with different capabilities. each one has an independent crop and resizer, thus add a capture video device and a resizer subdevice for each of the paths.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['c', 'makefile', 'h']
| 5
| 2,364
| 7
|
--- diff --git a/drivers/staging/media/rkisp1/makefile b/drivers/staging/media/rkisp1/makefile --- a/drivers/staging/media/rkisp1/makefile +++ b/drivers/staging/media/rkisp1/makefile -rockchip-isp1-objs += rkisp1-common.o \ +rockchip-isp1-objs += rkisp1-capture.o \ + rkisp1-common.o \ - rkisp1-isp.o + rkisp1-isp.o \ + rkisp1-resizer.o diff --git a/drivers/staging/media/rkisp1/rkisp1-capture.c b/drivers/staging/media/rkisp1/rkisp1-capture.c --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-capture.c +// spdx-license-identifier: (gpl-2.0+ or mit) +/* + * rockchip isp1 driver - v4l capture device + * + * copyright (c) 2019 collabora, ltd. + * + * based on rockchip isp1 driver by rockchip electronics co., ltd. + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <media/v4l2-common.h> +#include <media/v4l2-event.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-mc.h> +#include <media/v4l2-subdev.h> +#include <media/videobuf2-dma-contig.h> + +#include "rkisp1-common.h" + +/* + * note: there are two capture video devices in rkisp1, selfpath and mainpath. + * + * differences between selfpath and mainpath + * available mp sink input: isp + * available sp sink input : isp, dma(todo) + * available mp sink pad fmts: yuv422, raw + * available sp sink pad fmts: yuv422, yuv420...... + * available mp source fmts: yuv, raw, jpeg(todo) + * available sp source fmts: yuv, rgb + */ + +#define rkisp1_sp_dev_name rkisp1_driver_name "_selfpath" +#define rkisp1_mp_dev_name rkisp1_driver_name "_mainpath" + +#define rkisp1_min_buffers_needed 3 + +enum rkisp1_plane { + rkisp1_plane_y = 0, + rkisp1_plane_cb = 1, + rkisp1_plane_cr = 2 +}; + +/* + * @fourcc: pixel format + * @fmt_type: helper filed for pixel format + * @uv_swap: if cb cr swaped, for yuv + * @write_format: defines how ycbcr self picture data is written to memory + * @output_format: defines sp output format + */ +struct rkisp1_capture_fmt_cfg { + u32 fourcc; + u8 fmt_type; + u8 uv_swap; + u32 write_format; + u32 output_format; +}; + +struct rkisp1_capture_ops { + void (*config)(struct rkisp1_capture *cap); + void (*stop)(struct rkisp1_capture *cap); + void (*enable)(struct rkisp1_capture *cap); + void (*disable)(struct rkisp1_capture *cap); + void (*set_data_path)(struct rkisp1_capture *cap); + bool (*is_stopped)(struct rkisp1_capture *cap); +}; + +struct rkisp1_capture_config { + const struct rkisp1_capture_fmt_cfg *fmts; + int fmt_size; + struct { + u32 y_size_init; + u32 cb_size_init; + u32 cr_size_init; + u32 y_base_ad_init; + u32 cb_base_ad_init; + u32 cr_base_ad_init; + u32 y_offs_cnt_init; + u32 cb_offs_cnt_init; + u32 cr_offs_cnt_init; + } mi; +}; + +static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = { + /* yuv422 */ + { + .fourcc = v4l2_pix_fmt_yuyv, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_mp_write_yuvint, + }, { + .fourcc = v4l2_pix_fmt_yvyu, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_mp_write_yuvint, + }, { + .fourcc = v4l2_pix_fmt_vyuy, + .fmt_type = rkisp1_fmt_yuv, + .write_format = rkisp1_mi_ctrl_mp_write_yuvint, + }, { + .fourcc = v4l2_pix_fmt_yuv422p, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, { + .fourcc = v4l2_pix_fmt_nv16, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_spla, + }, { + .fourcc = v4l2_pix_fmt_nv61, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_spla, + }, { + .fourcc = v4l2_pix_fmt_yvu422m, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, + /* yuv420 */ + { + .fourcc = v4l2_pix_fmt_nv21, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_spla, + }, { + .fourcc = v4l2_pix_fmt_nv12, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_spla, + }, { + .fourcc = v4l2_pix_fmt_nv21m, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_spla, + }, { + .fourcc = v4l2_pix_fmt_nv12m, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_spla, + }, { + .fourcc = v4l2_pix_fmt_yuv420, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, { + .fourcc = v4l2_pix_fmt_yvu420, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, + /* yuv444 */ + { + .fourcc = v4l2_pix_fmt_yuv444m, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, + /* yuv400 */ + { + .fourcc = v4l2_pix_fmt_grey, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_mp_write_yuvint, + }, + /* raw */ + { + .fourcc = v4l2_pix_fmt_srggb8, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, { + .fourcc = v4l2_pix_fmt_sgrbg8, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, { + .fourcc = v4l2_pix_fmt_sgbrg8, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, { + .fourcc = v4l2_pix_fmt_sbggr8, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_yuv_pla_or_raw8, + }, { + .fourcc = v4l2_pix_fmt_srggb10, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_raw12, + }, { + .fourcc = v4l2_pix_fmt_sgrbg10, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_raw12, + }, { + .fourcc = v4l2_pix_fmt_sgbrg10, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_raw12, + }, { + .fourcc = v4l2_pix_fmt_sbggr10, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_raw12, + }, { + .fourcc = v4l2_pix_fmt_srggb12, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_raw12, + }, { + .fourcc = v4l2_pix_fmt_sgrbg12, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_raw12, + }, { + .fourcc = v4l2_pix_fmt_sgbrg12, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_raw12, + }, { + .fourcc = v4l2_pix_fmt_sbggr12, + .fmt_type = rkisp1_fmt_bayer, + .write_format = rkisp1_mi_ctrl_mp_write_raw12, + }, +}; + +static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = { + /* yuv422 */ + { + .fourcc = v4l2_pix_fmt_yuyv, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_sp_write_int, + .output_format = rkisp1_mi_ctrl_sp_output_yuv422, + }, { + .fourcc = v4l2_pix_fmt_yvyu, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_sp_write_int, + .output_format = rkisp1_mi_ctrl_sp_output_yuv422, + }, { + .fourcc = v4l2_pix_fmt_vyuy, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_sp_write_int, + .output_format = rkisp1_mi_ctrl_sp_output_yuv422, + }, { + .fourcc = v4l2_pix_fmt_yuv422p, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_sp_write_pla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv422, + }, { + .fourcc = v4l2_pix_fmt_nv16, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_sp_write_spla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv422, + }, { + .fourcc = v4l2_pix_fmt_nv61, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_sp_write_spla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv422, + }, { + .fourcc = v4l2_pix_fmt_yvu422m, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_sp_write_pla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv422, + }, + /* yuv420 */ + { + .fourcc = v4l2_pix_fmt_nv21, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_sp_write_spla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv420, + }, { + .fourcc = v4l2_pix_fmt_nv12, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_sp_write_spla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv420, + }, { + .fourcc = v4l2_pix_fmt_nv21m, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_sp_write_spla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv420, + }, { + .fourcc = v4l2_pix_fmt_nv12m, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_sp_write_spla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv420, + }, { + .fourcc = v4l2_pix_fmt_yuv420, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_sp_write_pla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv420, + }, { + .fourcc = v4l2_pix_fmt_yvu420, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 1, + .write_format = rkisp1_mi_ctrl_sp_write_pla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv420, + }, + /* yuv444 */ + { + .fourcc = v4l2_pix_fmt_yuv444m, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_sp_write_pla, + .output_format = rkisp1_mi_ctrl_sp_output_yuv444, + }, + /* yuv400 */ + { + .fourcc = v4l2_pix_fmt_grey, + .fmt_type = rkisp1_fmt_yuv, + .uv_swap = 0, + .write_format = rkisp1_mi_ctrl_sp_write_int, + .output_format = rkisp1_mi_ctrl_sp_output_yuv400, + }, + /* rgb */ + { + .fourcc = v4l2_pix_fmt_rgb24, + .fmt_type = rkisp1_fmt_rgb, + .write_format = rkisp1_mi_ctrl_sp_write_pla, + .output_format = rkisp1_mi_ctrl_sp_output_rgb888, + }, { + .fourcc = v4l2_pix_fmt_rgb565, + .fmt_type = rkisp1_fmt_rgb, + .write_format = rkisp1_mi_ctrl_sp_write_pla, + .output_format = rkisp1_mi_ctrl_sp_output_rgb565, + }, { + .fourcc = v4l2_pix_fmt_bgr666, + .fmt_type = rkisp1_fmt_rgb, + .write_format = rkisp1_mi_ctrl_sp_write_pla, + .output_format = rkisp1_mi_ctrl_sp_output_rgb666, + }, +}; + +static const struct rkisp1_capture_config rkisp1_capture_config_mp = { + .fmts = rkisp1_mp_fmts, + .fmt_size = array_size(rkisp1_mp_fmts), + .mi = { + .y_size_init = rkisp1_cif_mi_mp_y_size_init, + .cb_size_init = rkisp1_cif_mi_mp_cb_size_init, + .cr_size_init = rkisp1_cif_mi_mp_cr_size_init, + .y_base_ad_init = rkisp1_cif_mi_mp_y_base_ad_init, + .cb_base_ad_init = rkisp1_cif_mi_mp_cb_base_ad_init, + .cr_base_ad_init = rkisp1_cif_mi_mp_cr_base_ad_init, + .y_offs_cnt_init = rkisp1_cif_mi_mp_y_offs_cnt_init, + .cb_offs_cnt_init = rkisp1_cif_mi_mp_cb_offs_cnt_init, + .cr_offs_cnt_init = rkisp1_cif_mi_mp_cr_offs_cnt_init, + }, +}; + +static const struct rkisp1_capture_config rkisp1_capture_config_sp = { + .fmts = rkisp1_sp_fmts, + .fmt_size = array_size(rkisp1_sp_fmts), + .mi = { + .y_size_init = rkisp1_cif_mi_sp_y_size_init, + .cb_size_init = rkisp1_cif_mi_sp_cb_size_init, + .cr_size_init = rkisp1_cif_mi_sp_cr_size_init, + .y_base_ad_init = rkisp1_cif_mi_sp_y_base_ad_init, + .cb_base_ad_init = rkisp1_cif_mi_sp_cb_base_ad_init, + .cr_base_ad_init = rkisp1_cif_mi_sp_cr_base_ad_init, + .y_offs_cnt_init = rkisp1_cif_mi_sp_y_offs_cnt_init, + .cb_offs_cnt_init = rkisp1_cif_mi_sp_cb_offs_cnt_init, + .cr_offs_cnt_init = rkisp1_cif_mi_sp_cr_offs_cnt_init, + }, +}; + +static inline struct rkisp1_vdev_node * +rkisp1_vdev_to_node(struct video_device *vdev) +{ + return container_of(vdev, struct rkisp1_vdev_node, vdev); +} + +/* ---------------------------------------------------------------------------- + * stream operations for self-picture path (sp) and main-picture path (mp) + */ + +static void rkisp1_mi_config_ctrl(struct rkisp1_capture *cap) +{ + u32 mi_ctrl = rkisp1_read(cap->rkisp1, rkisp1_cif_mi_ctrl); + + mi_ctrl &= ~genmask(17, 16); + mi_ctrl |= rkisp1_cif_mi_ctrl_burst_len_lum_64; + + mi_ctrl &= ~genmask(19, 18); + mi_ctrl |= rkisp1_cif_mi_ctrl_burst_len_chrom_64; + + mi_ctrl |= rkisp1_cif_mi_ctrl_init_base_en | + rkisp1_cif_mi_ctrl_init_offset_en; + + rkisp1_write(cap->rkisp1, mi_ctrl, rkisp1_cif_mi_ctrl); +} + +static u32 rkisp1_pixfmt_comp_size(const struct v4l2_pix_format_mplane *pixm, + unsigned int component) +{ + /* + * if packed format, then plane_fmt[0].sizeimage is the sum of all + * components, so we need to calculate just the size of y component. + * see rkisp1_fill_pixfmt(). + */ + if (!component && pixm->num_planes == 1) + return pixm->plane_fmt[0].bytesperline * pixm->height; + return pixm->plane_fmt[component].sizeimage; +} + +static void rkisp1_irq_frame_end_enable(struct rkisp1_capture *cap) +{ + u32 mi_imsc = rkisp1_read(cap->rkisp1, rkisp1_cif_mi_imsc); + + mi_imsc |= rkisp1_cif_mi_frame(cap); + rkisp1_write(cap->rkisp1, mi_imsc, rkisp1_cif_mi_imsc); +} + +static void rkisp1_mp_config(struct rkisp1_capture *cap) +{ + const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt; + struct rkisp1_device *rkisp1 = cap->rkisp1; + u32 reg; + + rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_y), + cap->config->mi.y_size_init); + rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_cb), + cap->config->mi.cb_size_init); + rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_cr), + cap->config->mi.cr_size_init); + + rkisp1_irq_frame_end_enable(cap); + if (cap->pix.cfg->uv_swap) { + reg = rkisp1_read(rkisp1, rkisp1_cif_mi_xtd_format_ctrl); + + reg = (reg & ~bit(0)) | + rkisp1_cif_mi_xtd_fmt_ctrl_mp_cb_cr_swap; + rkisp1_write(rkisp1, reg, rkisp1_cif_mi_xtd_format_ctrl); + } + + rkisp1_mi_config_ctrl(cap); + + reg = rkisp1_read(rkisp1, rkisp1_cif_mi_ctrl); + reg &= ~rkisp1_mi_ctrl_mp_fmt_mask; + reg |= cap->pix.cfg->write_format; + rkisp1_write(rkisp1, reg, rkisp1_cif_mi_ctrl); + + reg = rkisp1_read(rkisp1, rkisp1_cif_mi_ctrl); + reg |= rkisp1_cif_mi_mp_autoupdate_enable; + rkisp1_write(rkisp1, reg, rkisp1_cif_mi_ctrl); +} + +static void rkisp1_sp_config(struct rkisp1_capture *cap) +{ + const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt; + struct rkisp1_device *rkisp1 = cap->rkisp1; + u32 mi_ctrl; + + rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_y), + cap->config->mi.y_size_init); + rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_cb), + cap->config->mi.cb_size_init); + rkisp1_write(rkisp1, rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_cr), + cap->config->mi.cr_size_init); + + rkisp1_write(rkisp1, pixm->width, rkisp1_cif_mi_sp_y_pic_width); + rkisp1_write(rkisp1, pixm->height, rkisp1_cif_mi_sp_y_pic_height); + rkisp1_write(rkisp1, cap->sp_y_stride, rkisp1_cif_mi_sp_y_llength); + + rkisp1_irq_frame_end_enable(cap); + if (cap->pix.cfg->uv_swap) { + u32 reg = rkisp1_read(rkisp1, rkisp1_cif_mi_xtd_format_ctrl); + + rkisp1_write(rkisp1, reg & ~bit(1), + rkisp1_cif_mi_xtd_format_ctrl); + } + + rkisp1_mi_config_ctrl(cap); + + mi_ctrl = rkisp1_read(rkisp1, rkisp1_cif_mi_ctrl); + mi_ctrl &= ~rkisp1_mi_ctrl_sp_fmt_mask; + mi_ctrl |= cap->pix.cfg->write_format | + rkisp1_mi_ctrl_sp_input_yuv422 | + cap->pix.cfg->output_format | + rkisp1_cif_mi_sp_autoupdate_enable; + rkisp1_write(rkisp1, mi_ctrl, rkisp1_cif_mi_ctrl); +} + +static void rkisp1_mp_disable(struct rkisp1_capture *cap) +{ + u32 mi_ctrl = rkisp1_read(cap->rkisp1, rkisp1_cif_mi_ctrl); + + mi_ctrl &= ~(rkisp1_cif_mi_ctrl_mp_enable | + rkisp1_cif_mi_ctrl_raw_enable); + rkisp1_write(cap->rkisp1, mi_ctrl, rkisp1_cif_mi_ctrl); +} + +static void rkisp1_sp_disable(struct rkisp1_capture *cap) +{ + u32 mi_ctrl = rkisp1_read(cap->rkisp1, rkisp1_cif_mi_ctrl); + + mi_ctrl &= ~rkisp1_cif_mi_ctrl_sp_enable; + rkisp1_write(cap->rkisp1, mi_ctrl, rkisp1_cif_mi_ctrl); +} + +static void rkisp1_mp_enable(struct rkisp1_capture *cap) +{ + const struct rkisp1_capture_fmt_cfg *isp_fmt = cap->pix.cfg; + u32 mi_ctrl; + + rkisp1_mp_disable(cap); + + mi_ctrl = rkisp1_read(cap->rkisp1, rkisp1_cif_mi_ctrl); + if (isp_fmt->fmt_type == rkisp1_fmt_bayer) + mi_ctrl |= rkisp1_cif_mi_ctrl_raw_enable; + /* yuv */ + else + mi_ctrl |= rkisp1_cif_mi_ctrl_mp_enable; + + rkisp1_write(cap->rkisp1, mi_ctrl, rkisp1_cif_mi_ctrl); +} + +static void rkisp1_sp_enable(struct rkisp1_capture *cap) +{ + u32 mi_ctrl = rkisp1_read(cap->rkisp1, rkisp1_cif_mi_ctrl); + + mi_ctrl |= rkisp1_cif_mi_ctrl_sp_enable; + rkisp1_write(cap->rkisp1, mi_ctrl, rkisp1_cif_mi_ctrl); +} + +static void rkisp1_mp_sp_stop(struct rkisp1_capture *cap) +{ + if (!cap->is_streaming) + return; + rkisp1_write(cap->rkisp1, + rkisp1_cif_mi_frame(cap), rkisp1_cif_mi_icr); + cap->ops->disable(cap); +} + +static bool rkisp1_mp_is_stopped(struct rkisp1_capture *cap) +{ + u32 en = rkisp1_cif_mi_ctrl_shd_mp_in_enabled | + rkisp1_cif_mi_ctrl_shd_raw_out_enabled; + + return !(rkisp1_read(cap->rkisp1, rkisp1_cif_mi_ctrl_shd) & en); +} + +static bool rkisp1_sp_is_stopped(struct rkisp1_capture *cap) +{ + return !(rkisp1_read(cap->rkisp1, rkisp1_cif_mi_ctrl_shd) & + rkisp1_cif_mi_ctrl_shd_sp_in_enabled); +} + +static void rkisp1_mp_set_data_path(struct rkisp1_capture *cap) +{ + u32 dpcl = rkisp1_read(cap->rkisp1, rkisp1_cif_vi_dpcl); + + dpcl = dpcl | rkisp1_cif_vi_dpcl_chan_mode_mp | + rkisp1_cif_vi_dpcl_mp_mux_mrsz_mi; + rkisp1_write(cap->rkisp1, dpcl, rkisp1_cif_vi_dpcl); +} + +static void rkisp1_sp_set_data_path(struct rkisp1_capture *cap) +{ + u32 dpcl = rkisp1_read(cap->rkisp1, rkisp1_cif_vi_dpcl); + + dpcl |= rkisp1_cif_vi_dpcl_chan_mode_sp; + rkisp1_write(cap->rkisp1, dpcl, rkisp1_cif_vi_dpcl); +} + +static struct rkisp1_capture_ops rkisp1_capture_ops_mp = { + .config = rkisp1_mp_config, + .enable = rkisp1_mp_enable, + .disable = rkisp1_mp_disable, + .stop = rkisp1_mp_sp_stop, + .set_data_path = rkisp1_mp_set_data_path, + .is_stopped = rkisp1_mp_is_stopped, +}; + +static struct rkisp1_capture_ops rkisp1_capture_ops_sp = { + .config = rkisp1_sp_config, + .enable = rkisp1_sp_enable, + .disable = rkisp1_sp_disable, + .stop = rkisp1_mp_sp_stop, + .set_data_path = rkisp1_sp_set_data_path, + .is_stopped = rkisp1_sp_is_stopped, +}; + +/* ---------------------------------------------------------------------------- + * frame buffer operations + */ + +static int rkisp1_dummy_buf_create(struct rkisp1_capture *cap) +{ + const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt; + struct rkisp1_dummy_buffer *dummy_buf = &cap->buf.dummy; + + dummy_buf->size = max3(rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_y), + rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_cb), + rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_cr)); + + /* the driver never access vaddr, no mapping is required */ + dummy_buf->vaddr = dma_alloc_attrs(cap->rkisp1->dev, + dummy_buf->size, + &dummy_buf->dma_addr, + gfp_kernel, + dma_attr_no_kernel_mapping); + if (!dummy_buf->vaddr) + return -enomem; + + return 0; +} + +static void rkisp1_dummy_buf_destroy(struct rkisp1_capture *cap) +{ + dma_free_attrs(cap->rkisp1->dev, + cap->buf.dummy.size, cap->buf.dummy.vaddr, + cap->buf.dummy.dma_addr, dma_attr_no_kernel_mapping); +} + +static void rkisp1_set_next_buf(struct rkisp1_capture *cap) +{ + /* + * use the dummy space allocated by dma_alloc_coherent to + * throw data if there is no available buffer. + */ + if (cap->buf.next) { + u32 *buff_addr = cap->buf.next->buff_addr; + + rkisp1_write(cap->rkisp1, + buff_addr[rkisp1_plane_y], + cap->config->mi.y_base_ad_init); + rkisp1_write(cap->rkisp1, + buff_addr[rkisp1_plane_cb], + cap->config->mi.cb_base_ad_init); + rkisp1_write(cap->rkisp1, + buff_addr[rkisp1_plane_cr], + cap->config->mi.cr_base_ad_init); + } else { + rkisp1_write(cap->rkisp1, + cap->buf.dummy.dma_addr, + cap->config->mi.y_base_ad_init); + rkisp1_write(cap->rkisp1, + cap->buf.dummy.dma_addr, + cap->config->mi.cb_base_ad_init); + rkisp1_write(cap->rkisp1, + cap->buf.dummy.dma_addr, + cap->config->mi.cr_base_ad_init); + } + + /* set plane offsets */ + rkisp1_write(cap->rkisp1, 0, cap->config->mi.y_offs_cnt_init); + rkisp1_write(cap->rkisp1, 0, cap->config->mi.cb_offs_cnt_init); + rkisp1_write(cap->rkisp1, 0, cap->config->mi.cr_offs_cnt_init); +} + +/* + * this function is called when a frame end comes. the next frame + * is processing and we should set up buffer for next-next frame, + * otherwise it will overflow. + */ +static void rkisp1_handle_buffer(struct rkisp1_capture *cap) +{ + struct rkisp1_isp *isp = &cap->rkisp1->isp; + struct rkisp1_buffer *curr_buf = cap->buf.curr; + unsigned long flags; + + spin_lock_irqsave(&cap->buf.lock, flags); + + if (curr_buf) { + curr_buf->vb.sequence = atomic_read(&isp->frame_sequence); + curr_buf->vb.vb2_buf.timestamp = ktime_get_boottime_ns(); + curr_buf->vb.field = v4l2_field_none; + vb2_buffer_done(&curr_buf->vb.vb2_buf, vb2_buf_state_done); + } else { + cap->rkisp1->debug.frame_drop[cap->id]++; + } + + cap->buf.curr = cap->buf.next; + cap->buf.next = null; + + if (!list_empty(&cap->buf.queue)) { + cap->buf.next = list_first_entry(&cap->buf.queue, + struct rkisp1_buffer, + queue); + list_del(&cap->buf.next->queue); + } + spin_unlock_irqrestore(&cap->buf.lock, flags); + + rkisp1_set_next_buf(cap); +} + +void rkisp1_capture_isr(struct rkisp1_device *rkisp1) +{ + unsigned int i; + u32 status; + + status = rkisp1_read(rkisp1, rkisp1_cif_mi_mis); + rkisp1_write(rkisp1, status, rkisp1_cif_mi_icr); + + for (i = 0; i < array_size(rkisp1->capture_devs); ++i) { + struct rkisp1_capture *cap = &rkisp1->capture_devs[i]; + + if (!(status & rkisp1_cif_mi_frame(cap))) + continue; + if (!cap->is_stopping) { + rkisp1_handle_buffer(cap); + continue; + } + /* + * make sure stream is actually stopped, whose state + * can be read from the shadow register, before + * wake_up() thread which would immediately free all + * frame buffers. stop() takes effect at the next + * frame end that sync the configurations to shadow + * regs. + */ + if (!cap->ops->is_stopped(cap)) { + cap->ops->stop(cap); + continue; + } + cap->is_stopping = false; + cap->is_streaming = false; + wake_up(&cap->done); + } +} + +/* ---------------------------------------------------------------------------- + * vb2 operations + */ + +static int rkisp1_vb2_queue_setup(struct vb2_queue *queue, + unsigned int *num_buffers, + unsigned int *num_planes, + unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct rkisp1_capture *cap = queue->drv_priv; + const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt; + unsigned int i; + + if (*num_planes) { + if (*num_planes != pixm->num_planes) + return -einval; + + for (i = 0; i < pixm->num_planes; i++) + if (sizes[i] < pixm->plane_fmt[i].sizeimage) + return -einval; + } else { + *num_planes = pixm->num_planes; + for (i = 0; i < pixm->num_planes; i++) + sizes[i] = pixm->plane_fmt[i].sizeimage; + } + + return 0; +} + +static void rkisp1_vb2_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct rkisp1_buffer *ispbuf = + container_of(vbuf, struct rkisp1_buffer, vb); + struct rkisp1_capture *cap = vb->vb2_queue->drv_priv; + const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt; + unsigned long flags; + unsigned int i; + + memset(ispbuf->buff_addr, 0, sizeof(ispbuf->buff_addr)); + for (i = 0; i < pixm->num_planes; i++) + ispbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i); + + /* convert to non-mplane */ + if (pixm->num_planes == 1) { + ispbuf->buff_addr[rkisp1_plane_cb] = + ispbuf->buff_addr[rkisp1_plane_y] + + rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_y); + ispbuf->buff_addr[rkisp1_plane_cr] = + ispbuf->buff_addr[rkisp1_plane_cb] + + rkisp1_pixfmt_comp_size(pixm, rkisp1_plane_cb); + } + + spin_lock_irqsave(&cap->buf.lock, flags); + + /* + * if there's no next buffer assigned, queue this buffer directly + * as the next buffer, and update the memory interface. + */ + if (cap->is_streaming && !cap->buf.next && + atomic_read(&cap->rkisp1->isp.frame_sequence) == -1) { + cap->buf.next = ispbuf; + rkisp1_set_next_buf(cap); + } else { + list_add_tail(&ispbuf->queue, &cap->buf.queue); + } + spin_unlock_irqrestore(&cap->buf.lock, flags); +} + +static int rkisp1_vb2_buf_prepare(struct vb2_buffer *vb) +{ + struct rkisp1_capture *cap = vb->vb2_queue->drv_priv; + unsigned int i; + + for (i = 0; i < cap->pix.fmt.num_planes; i++) { + unsigned long size = cap->pix.fmt.plane_fmt[i].sizeimage; + + if (vb2_plane_size(vb, i) < size) { + dev_err(cap->rkisp1->dev, + "user buffer too small (%ld < %ld) ", + vb2_plane_size(vb, i), size); + return -einval; + } + vb2_set_plane_payload(vb, i, size); + } + + return 0; +} + +static void rkisp1_return_all_buffers(struct rkisp1_capture *cap, + enum vb2_buffer_state state) +{ + unsigned long flags; + struct rkisp1_buffer *buf; + + spin_lock_irqsave(&cap->buf.lock, flags); + if (cap->buf.curr) { + vb2_buffer_done(&cap->buf.curr->vb.vb2_buf, state); + cap->buf.curr = null; + } + if (cap->buf.next) { + vb2_buffer_done(&cap->buf.next->vb.vb2_buf, state); + cap->buf.next = null; + } + while (!list_empty(&cap->buf.queue)) { + buf = list_first_entry(&cap->buf.queue, + struct rkisp1_buffer, queue); + list_del(&buf->queue); + vb2_buffer_done(&buf->vb.vb2_buf, state); + } + spin_unlock_irqrestore(&cap->buf.lock, flags); +} + +/* + * rkisp1_pipeline_sink_walk - walk through the pipeline and call cb + * @from: entity at which to start pipeline walk + * @until: entity at which to stop pipeline walk + * + * walk the entities chain starting at the pipeline video node and stop + * all subdevices in the chain. + * + * if the until argument isn't null, stop the pipeline walk when reaching the + * until entity. this is used to disable a partially started pipeline due to a + * subdev start error. + */ +static int rkisp1_pipeline_sink_walk(struct media_entity *from, + struct media_entity *until, + int (*cb)(struct media_entity *from, + struct media_entity *curr)) +{ + struct media_entity *entity = from; + struct media_pad *pad; + unsigned int i; + int ret; + + while (1) { + pad = null; + /* find remote source pad */ + for (i = 0; i < entity->num_pads; i++) { + struct media_pad *spad = &entity->pads[i]; + + if (!(spad->flags & media_pad_fl_sink)) + continue; + pad = media_entity_remote_pad(spad); + if (pad && is_media_entity_v4l2_subdev(pad->entity)) + break; + } + if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) + break; + + entity = pad->entity; + if (entity == until) + break; + + ret = cb(from, entity); + if (ret) + return ret; + } + + return 0; +} + +static int rkisp1_pipeline_disable_cb(struct media_entity *from, + struct media_entity *curr) +{ + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(curr); + + return v4l2_subdev_call(sd, video, s_stream, false); +} + +static int rkisp1_pipeline_enable_cb(struct media_entity *from, + struct media_entity *curr) +{ + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(curr); + + return v4l2_subdev_call(sd, video, s_stream, true); +} + +static void rkisp1_stream_stop(struct rkisp1_capture *cap) +{ + int ret; + + /* stream should stop in interrupt. if it dosn't, stop it by force. */ + cap->is_stopping = true; + ret = wait_event_timeout(cap->done, + !cap->is_streaming, + msecs_to_jiffies(1000)); + if (!ret) { + cap->rkisp1->debug.stop_timeout[cap->id]++; + cap->ops->stop(cap); + cap->is_stopping = false; + cap->is_streaming = false; + } +} + +static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue) +{ + struct rkisp1_capture *cap = queue->drv_priv; + struct rkisp1_vdev_node *node = &cap->vnode; + struct rkisp1_device *rkisp1 = cap->rkisp1; + int ret; + + rkisp1_stream_stop(cap); + media_pipeline_stop(&node->vdev.entity); + ret = rkisp1_pipeline_sink_walk(&node->vdev.entity, null, + rkisp1_pipeline_disable_cb); + if (ret) + dev_err(rkisp1->dev, + "pipeline stream-off failed error:%d ", ret); + + rkisp1_return_all_buffers(cap, vb2_buf_state_error); + + ret = v4l2_pipeline_pm_use(&node->vdev.entity, 0); + if (ret) + dev_err(rkisp1->dev, "pipeline close failed error:%d ", ret); + + ret = pm_runtime_put(rkisp1->dev); + if (ret) + dev_err(rkisp1->dev, "power down failed error:%d ", ret); + + rkisp1_dummy_buf_destroy(cap); +} + +/* + * most of registers inside rockchip isp1 have shadow register since + * they must be not be changed during processing a frame. + * usually, each sub-module updates its shadow register after + * processing the last pixel of a frame. + */ +static void rkisp1_stream_start(struct rkisp1_capture *cap) +{ + struct rkisp1_device *rkisp1 = cap->rkisp1; + struct rkisp1_capture *other = &rkisp1->capture_devs[cap->id ^ 1]; + + cap->ops->set_data_path(cap); + cap->ops->config(cap); + + /* setup a buffer for the next frame */ + rkisp1_handle_buffer(cap); + cap->ops->enable(cap); + /* it's safe to config active and shadow regs for the + * first stream. while when the second is starting, do not + * force update because it also update the first one. + * + * the latter case would drop one more buf(that is 2) since + * there's not buf in shadow when the second fe received. this's + * also required because the second fe maybe corrupt especially + * when run at 120fps. + */ + if (!other->is_streaming) { + /* force cfg update */ + rkisp1_write(rkisp1, + rkisp1_cif_mi_init_soft_upd, rkisp1_cif_mi_init); + rkisp1_handle_buffer(cap); + } + cap->is_streaming = true; +} + +static int +rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count) +{ + struct rkisp1_capture *cap = queue->drv_priv; + struct media_entity *entity = &cap->vnode.vdev.entity; + int ret; + + ret = rkisp1_dummy_buf_create(cap); + if (ret) + goto err_ret_buffers; + + ret = pm_runtime_get_sync(cap->rkisp1->dev); + if (ret) { + dev_err(cap->rkisp1->dev, "power up failed %d ", ret); + goto err_destroy_dummy; + } + ret = v4l2_pipeline_pm_use(entity, 1); + if (ret) { + dev_err(cap->rkisp1->dev, "open cif pipeline failed %d ", ret); + goto err_pipe_pm_put; + } + + rkisp1_stream_start(cap); + + /* start sub-devices */ + ret = rkisp1_pipeline_sink_walk(entity, null, + rkisp1_pipeline_enable_cb); + if (ret) + goto err_stop_stream; + + ret = media_pipeline_start(entity, &cap->rkisp1->pipe); + if (ret) { + dev_err(cap->rkisp1->dev, "start pipeline failed %d ", ret); + goto err_pipe_disable; + } + + return 0; + +err_pipe_disable: + rkisp1_pipeline_sink_walk(entity, null, rkisp1_pipeline_disable_cb); +err_stop_stream: + rkisp1_stream_stop(cap); + v4l2_pipeline_pm_use(entity, 0); +err_pipe_pm_put: + pm_runtime_put(cap->rkisp1->dev); +err_destroy_dummy: + rkisp1_dummy_buf_destroy(cap); +err_ret_buffers: + rkisp1_return_all_buffers(cap, vb2_buf_state_queued); + + return ret; +} + +static struct vb2_ops rkisp1_vb2_ops = { + .queue_setup = rkisp1_vb2_queue_setup, + .buf_queue = rkisp1_vb2_buf_queue, + .buf_prepare = rkisp1_vb2_buf_prepare, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .stop_streaming = rkisp1_vb2_stop_streaming, + .start_streaming = rkisp1_vb2_start_streaming, +}; + +/* ---------------------------------------------------------------------------- + * ioctls operations + */ + +static const struct v4l2_format_info * +rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm, + enum rkisp1_stream_id id) +{ + struct v4l2_plane_pix_format *plane_y = &pixm->plane_fmt[0]; + const struct v4l2_format_info *info; + unsigned int i; + u32 stride; + + info = v4l2_format_info(pixm->pixelformat); + pixm->num_planes = info->mem_planes; + stride = info->bpp[0] * pixm->width; + /* self path supports custom stride but main path doesn't */ + if (id == rkisp1_mainpath || plane_y->bytesperline < stride) + plane_y->bytesperline = stride; + plane_y->sizeimage = plane_y->bytesperline * pixm->height; + + /* normalize stride to pixels per line */ + stride = div_round_up(plane_y->bytesperline, info->bpp[0]); + + for (i = 1; i < info->comp_planes; i++) { + struct v4l2_plane_pix_format *plane = &pixm->plane_fmt[i]; + + /* bytesperline for other components derive from y component */ + plane->bytesperline = div_round_up(stride, info->hdiv) * + info->bpp[i]; + plane->sizeimage = plane->bytesperline * + div_round_up(pixm->height, info->vdiv); + } + + /* + * if pixfmt is packed, then plane_fmt[0] should contain the total size + * considering all components. plane_fmt[i] for i > 0 should be ignored + * by userspace as mem_planes == 1, but we are keeping information there + * for convenience. + */ + if (info->mem_planes == 1) + for (i = 1; i < info->comp_planes; i++) + plane_y->sizeimage += pixm->plane_fmt[i].sizeimage; + + return info; +} + +static const struct rkisp1_capture_fmt_cfg * +rkisp1_find_fmt_cfg(const struct rkisp1_capture *cap, const u32 pixelfmt) +{ + unsigned int i; + + for (i = 0; i < cap->config->fmt_size; i++) { + if (cap->config->fmts[i].fourcc == pixelfmt) + return &cap->config->fmts[i]; + } + return null; +} + +static void rkisp1_try_fmt(const struct rkisp1_capture *cap, + struct v4l2_pix_format_mplane *pixm, + const struct rkisp1_capture_fmt_cfg **fmt_cfg, + const struct v4l2_format_info **fmt_info) +{ + const struct rkisp1_capture_config *config = cap->config; + struct rkisp1_capture *other_cap = + &cap->rkisp1->capture_devs[cap->id ^ 1]; + const struct rkisp1_capture_fmt_cfg *fmt; + const struct v4l2_format_info *info; + const unsigned int max_widths[] = { rkisp1_rsz_mp_src_max_width, + rkisp1_rsz_sp_src_max_width }; + const unsigned int max_heights[] = { rkisp1_rsz_mp_src_max_height, + rkisp1_rsz_sp_src_max_height}; + + fmt = rkisp1_find_fmt_cfg(cap, pixm->pixelformat); + if (!fmt) { + fmt = config->fmts; + pixm->pixelformat = fmt->fourcc; + } + + pixm->width = clamp_t(u32, pixm->width, + rkisp1_rsz_src_min_width, max_widths[cap->id]); + pixm->height = clamp_t(u32, pixm->height, + rkisp1_rsz_src_min_height, max_heights[cap->id]); + + pixm->field = v4l2_field_none; + pixm->colorspace = v4l2_colorspace_default; + pixm->ycbcr_enc = v4l2_ycbcr_enc_default; + + info = rkisp1_fill_pixfmt(pixm, cap->id); + + /* can not change quantization when stream-on */ + if (other_cap->is_streaming) + pixm->quantization = other_cap->pix.fmt.quantization; + /* output full range by default, take effect in params */ + else if (!pixm->quantization || + pixm->quantization > v4l2_quantization_lim_range) + pixm->quantization = v4l2_quantization_full_range; + + if (fmt_cfg) + *fmt_cfg = fmt; + if (fmt_info) + *fmt_info = info; +} + +static void rkisp1_set_fmt(struct rkisp1_capture *cap, + struct v4l2_pix_format_mplane *pixm) +{ + rkisp1_try_fmt(cap, pixm, &cap->pix.cfg, &cap->pix.info); + cap->pix.fmt = *pixm; + + /* sp supports custom stride in number of pixels of the y plane */ + if (cap->id == rkisp1_selfpath) + cap->sp_y_stride = pixm->plane_fmt[0].bytesperline / + cap->pix.info->bpp[0]; +} + +static int rkisp1_try_fmt_vid_cap_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct rkisp1_capture *cap = video_drvdata(file); + + rkisp1_try_fmt(cap, &f->fmt.pix_mp, null, null); + + return 0; +} + +static int rkisp1_enum_fmt_vid_cap_mplane(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct rkisp1_capture *cap = video_drvdata(file); + const struct rkisp1_capture_fmt_cfg *fmt = null; + + if (f->index >= cap->config->fmt_size) + return -einval; + + fmt = &cap->config->fmts[f->index]; + f->pixelformat = fmt->fourcc; + + return 0; +} + +static int rkisp1_s_fmt_vid_cap_mplane(struct file *file, + void *priv, struct v4l2_format *f) +{ + struct rkisp1_capture *cap = video_drvdata(file); + struct rkisp1_vdev_node *node = + rkisp1_vdev_to_node(&cap->vnode.vdev); + + if (vb2_is_busy(&node->buf_queue)) + return -ebusy; + + rkisp1_set_fmt(cap, &f->fmt.pix_mp); + + return 0; +} + +static int rkisp1_g_fmt_vid_cap_mplane(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct rkisp1_capture *cap = video_drvdata(file); + + f->fmt.pix_mp = cap->pix.fmt; + + return 0; +} + +static int +rkisp1_querycap(struct file *file, void *priv, struct v4l2_capability *cap) +{ + struct rkisp1_capture *cap_dev = video_drvdata(file); + struct rkisp1_device *rkisp1 = cap_dev->rkisp1; + + strscpy(cap->driver, rkisp1->dev->driver->name, sizeof(cap->driver)); + strscpy(cap->card, rkisp1->dev->driver->name, sizeof(cap->card)); + strscpy(cap->bus_info, rkisp1_bus_info, sizeof(cap->bus_info)); + + return 0; +} + +static const struct v4l2_ioctl_ops rkisp1_v4l2_ioctl_ops = { + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_try_fmt_vid_cap_mplane = rkisp1_try_fmt_vid_cap_mplane, + .vidioc_s_fmt_vid_cap_mplane = rkisp1_s_fmt_vid_cap_mplane, + .vidioc_g_fmt_vid_cap_mplane = rkisp1_g_fmt_vid_cap_mplane, + .vidioc_enum_fmt_vid_cap = rkisp1_enum_fmt_vid_cap_mplane, + .vidioc_querycap = rkisp1_querycap, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static int rkisp1_capture_link_validate(struct media_link *link) +{ + struct video_device *vdev = + media_entity_to_video_device(link->sink->entity); + struct v4l2_subdev *sd = + media_entity_to_v4l2_subdev(link->source->entity); + struct rkisp1_capture *cap = video_get_drvdata(vdev); + struct rkisp1_isp *isp = &cap->rkisp1->isp; + struct v4l2_subdev_format sd_fmt; + int ret; + + if (cap->id == rkisp1_selfpath && + isp->src_fmt->mbus_code != media_bus_fmt_yuyv8_2x8) { + dev_err(cap->rkisp1->dev, + "selfpath only supports media_bus_fmt_yuyv8_2x8 "); + return -epipe; + } + + if (cap->pix.cfg->fmt_type != isp->src_fmt->fmt_type) { + dev_err(cap->rkisp1->dev, + "format type mismatch in link '%s:%d->%s:%d' ", + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + return -epipe; + } + + sd_fmt.which = v4l2_subdev_format_active; + sd_fmt.pad = link->source->index; + ret = v4l2_subdev_call(sd, pad, get_fmt, null, &sd_fmt); + if (ret) + return ret; + + if (sd_fmt.format.height != cap->pix.fmt.height || + sd_fmt.format.width != cap->pix.fmt.width) + return -epipe; + + return 0; +} + +/* ---------------------------------------------------------------------------- + * core functions + */ + +static const struct media_entity_operations rkisp1_media_ops = { + .link_validate = rkisp1_capture_link_validate, +}; + +static const struct v4l2_file_operations rkisp1_fops = { + .open = v4l2_fh_open, + .release = vb2_fop_release, + .unlocked_ioctl = video_ioctl2, + .poll = vb2_fop_poll, + .mmap = vb2_fop_mmap, +}; + +static void rkisp1_unregister_capture(struct rkisp1_capture *cap) +{ + media_entity_cleanup(&cap->vnode.vdev.entity); + video_unregister_device(&cap->vnode.vdev); +} + +void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1) +{ + struct rkisp1_capture *mp = &rkisp1->capture_devs[rkisp1_mainpath]; + struct rkisp1_capture *sp = &rkisp1->capture_devs[rkisp1_selfpath]; + + rkisp1_unregister_capture(mp); + rkisp1_unregister_capture(sp); +} + +static int rkisp1_register_capture(struct rkisp1_capture *cap) +{ + const char * const dev_names[] = {rkisp1_mp_dev_name, + rkisp1_sp_dev_name}; + struct v4l2_device *v4l2_dev = &cap->rkisp1->v4l2_dev; + struct video_device *vdev = &cap->vnode.vdev; + struct rkisp1_vdev_node *node; + struct vb2_queue *q; + int ret; + + strscpy(vdev->name, dev_names[cap->id], sizeof(vdev->name)); + node = rkisp1_vdev_to_node(vdev); + mutex_init(&node->vlock); + + vdev->ioctl_ops = &rkisp1_v4l2_ioctl_ops; + vdev->release = video_device_release_empty; + vdev->fops = &rkisp1_fops; + vdev->minor = -1; + vdev->v4l2_dev = v4l2_dev; + vdev->lock = &node->vlock; + vdev->device_caps = v4l2_cap_video_capture_mplane | + v4l2_cap_streaming; + vdev->entity.ops = &rkisp1_media_ops; + video_set_drvdata(vdev, cap); + vdev->vfl_dir = vfl_dir_rx; + node->pad.flags = media_pad_fl_sink; + + q = &node->buf_queue; + q->type = v4l2_buf_type_video_capture_mplane; + q->io_modes = vb2_mmap | vb2_dmabuf | vb2_userptr; + q->drv_priv = cap; + q->ops = &rkisp1_vb2_ops; + q->mem_ops = &vb2_dma_contig_memops; + q->buf_struct_size = sizeof(struct rkisp1_buffer); + q->min_buffers_needed = rkisp1_min_buffers_needed; + q->timestamp_flags = v4l2_buf_flag_timestamp_monotonic; + q->lock = &node->vlock; + q->dev = cap->rkisp1->dev; + ret = vb2_queue_init(q); + if (ret) { + dev_err(cap->rkisp1->dev, + "vb2 queue init failed (err=%d) ", ret); + return ret; + } + + vdev->queue = q; + + ret = video_register_device(vdev, vfl_type_grabber, -1); + if (ret) { + dev_err(cap->rkisp1->dev, + "failed to register %s, ret=%d ", vdev->name, ret); + return ret; + } + v4l2_info(v4l2_dev, "registered %s as /dev/video%d ", vdev->name, + vdev->num); + + ret = media_entity_pads_init(&vdev->entity, 1, &node->pad); + if (ret) { + video_unregister_device(vdev); + return ret; + } + + return 0; +} + +static void +rkisp1_capture_init(struct rkisp1_device *rkisp1, enum rkisp1_stream_id id) +{ + struct rkisp1_capture *cap = &rkisp1->capture_devs[id]; + struct v4l2_pix_format_mplane pixm; + + memset(cap, 0, sizeof(*cap)); + cap->id = id; + cap->rkisp1 = rkisp1; + + init_list_head(&cap->buf.queue); + init_waitqueue_head(&cap->done); + spin_lock_init(&cap->buf.lock); + if (cap->id == rkisp1_selfpath) { + cap->ops = &rkisp1_capture_ops_sp; + cap->config = &rkisp1_capture_config_sp; + } else { + cap->ops = &rkisp1_capture_ops_mp; + cap->config = &rkisp1_capture_config_mp; + } + + cap->is_streaming = false; + + memset(&pixm, 0, sizeof(pixm)); + pixm.pixelformat = v4l2_pix_fmt_yuyv; + pixm.width = rkisp1_default_width; + pixm.height = rkisp1_default_height; + rkisp1_set_fmt(cap, &pixm); +} + +int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1) +{ + struct rkisp1_capture *cap; + unsigned int i, j; + int ret; + + for (i = 0; i < array_size(rkisp1->capture_devs); i++) { + rkisp1_capture_init(rkisp1, i); + cap = &rkisp1->capture_devs[i]; + cap->rkisp1 = rkisp1; + ret = rkisp1_register_capture(cap); + if (ret) + goto err_unreg_capture_devs; + } + + return 0; + +err_unreg_capture_devs: + for (j = 0; j < i; j++) { + cap = &rkisp1->capture_devs[j]; + rkisp1_unregister_capture(cap); + } + + return ret; +} diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h --- a/drivers/staging/media/rkisp1/rkisp1-common.h +++ b/drivers/staging/media/rkisp1/rkisp1-common.h +enum rkisp1_rsz_pad { + rkisp1_rsz_pad_sink, + rkisp1_rsz_pad_src, +}; + +enum rkisp1_stream_id { + rkisp1_mainpath, + rkisp1_selfpath, +}; + +/* + * struct rkisp1_capture - isp capture video device + * + * @pix.fmt: buffer format + * @pix.info: pixel information + * @pix.cfg: pixel configuration + * + * @buf.lock: lock to protect buf_queue + * @buf.queue: queued buffer list + * @buf.dummy: dummy space to store dropped data + * + * rkisp1 use shadowsock registers, so it need two buffer at a time + * @buf.curr: the buffer used for current frame + * @buf.next: the buffer used for next frame + */ +struct rkisp1_capture { + struct rkisp1_vdev_node vnode; + struct rkisp1_device *rkisp1; + enum rkisp1_stream_id id; + struct rkisp1_capture_ops *ops; + const struct rkisp1_capture_config *config; + bool is_streaming; + bool is_stopping; + wait_queue_head_t done; + unsigned int sp_y_stride; + struct { + /* protects queue, curr and next */ + spinlock_t lock; + struct list_head queue; + struct rkisp1_dummy_buffer dummy; + struct rkisp1_buffer *curr; + struct rkisp1_buffer *next; + } buf; + struct { + const struct rkisp1_capture_fmt_cfg *cfg; + const struct v4l2_format_info *info; + struct v4l2_pix_format_mplane fmt; + } pix; +}; + +struct rkisp1_resizer { + struct v4l2_subdev sd; + enum rkisp1_stream_id id; + struct rkisp1_device *rkisp1; + struct media_pad pads[rkisp1_isp_pad_max]; + struct v4l2_subdev_pad_config pad_cfg[rkisp1_isp_pad_max]; + const struct rkisp1_rsz_config *config; + enum rkisp1_fmt_pix_type fmt_type; +}; + + unsigned long stop_timeout[2]; + unsigned long frame_drop[2]; + * @rkisp1_capture: capture video device + struct rkisp1_resizer resizer_devs[2]; + struct rkisp1_capture capture_devs[2]; +void rkisp1_capture_isr(struct rkisp1_device *rkisp1); + +int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1); +void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1); + +int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1); +void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1); diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c --- a/drivers/staging/media/rkisp1/rkisp1-dev.c +++ b/drivers/staging/media/rkisp1/rkisp1-dev.c + * rkisp1-resizer.c rkisp1-capture.c + * |====================| |=======================| - * | 2 | 3 | - * +------+------+ + * +-------------| 2 | 3 | + * | +------+------+ + * | | + * v v + * +- ---------+ +-----------+ + * | 0 | | 0 | + * ------------- ------------- + * | resizer | | resizer | + * ------------| ------------| + * | 1 | | 1 | + * +-----------+ +-----------+ + * | | + * v v + * +-----------+ +-----------+ + * | selfpath | | mainpath | + * | (capture) | | (capture) | + * +-----------+ +-----------+ + struct media_entity *source, *sink; + unsigned int i; - if (sd == &rkisp1->isp.sd) + if (sd == &rkisp1->isp.sd || + sd == &rkisp1->resizer_devs[rkisp1_mainpath].sd || + sd == &rkisp1->resizer_devs[rkisp1_selfpath].sd) + flags = media_lnk_fl_enabled; + + /* create isp->rsz->cap links */ + for (i = 0; i < 2; i++) { + source = &rkisp1->isp.sd.entity; + sink = &rkisp1->resizer_devs[i].sd.entity; + ret = media_create_pad_link(source, rkisp1_isp_pad_source_video, + sink, rkisp1_rsz_pad_sink, flags); + if (ret) + return ret; + + source = sink; + sink = &rkisp1->capture_devs[i].vnode.vdev.entity; + ret = media_create_pad_link(source, rkisp1_rsz_pad_src, + sink, 0, flags); + if (ret) + return ret; + } + + ret = rkisp1_resizer_devs_register(rkisp1); + if (ret) + goto err_unreg_isp_subdev; + + ret = rkisp1_capture_devs_register(rkisp1); + if (ret) + goto err_unreg_resizer_devs; + - rkisp1_isp_unregister(rkisp1); - return ret; + goto err_unreg_capture_devs; +err_unreg_capture_devs: + rkisp1_capture_devs_unregister(rkisp1); +err_unreg_resizer_devs: + rkisp1_resizer_devs_unregister(rkisp1); +err_unreg_isp_subdev: + rkisp1_isp_unregister(rkisp1); + return ret; + /* + * call rkisp1_capture_isr() first to handle the frame that + * potentially completed using the current frame_sequence number before + * it is potentially incremented by rkisp1_isp_isr() in the vertical + * sync. + */ + rkisp1_capture_isr(rkisp1); + debugfs_create_ulong("mp_stop_timeout", 0444, debug->debugfs_dir, + &debug->stop_timeout[rkisp1_mainpath]); + debugfs_create_ulong("sp_stop_timeout", 0444, debug->debugfs_dir, + &debug->stop_timeout[rkisp1_selfpath]); + debugfs_create_ulong("mp_frame_drop", 0444, debug->debugfs_dir, + &debug->frame_drop[rkisp1_mainpath]); + debugfs_create_ulong("sp_frame_drop", 0444, debug->debugfs_dir, + &debug->frame_drop[rkisp1_selfpath]); + rkisp1_capture_devs_unregister(rkisp1); + rkisp1_resizer_devs_unregister(rkisp1); diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c +// spdx-license-identifier: (gpl-2.0+ or mit) +/* + * rockchip isp1 driver - v4l resizer device + * + * copyright (c) 2019 collabora, ltd. + * + * based on rockchip isp1 driver by rockchip electronics co., ltd. + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +#include "rkisp1-common.h" + +#define rkisp1_rsz_sp_dev_name rkisp1_driver_name "_resizer_selfpath" +#define rkisp1_rsz_mp_dev_name rkisp1_driver_name "_resizer_mainpath" + +#define rkisp1_def_fmt media_bus_fmt_yuyv8_2x8 +#define rkisp1_def_fmt_type rkisp1_fmt_yuv + +#define rkisp1_mbus_fmt_hdiv 2 +#define rkisp1_mbus_fmt_vdiv 1 + +enum rkisp1_shadow_regs_when { + rkisp1_shadow_regs_sync, + rkisp1_shadow_regs_async, +}; + +struct rkisp1_rsz_config { + /* constrains */ + const int max_rsz_width; + const int max_rsz_height; + const int min_rsz_width; + const int min_rsz_height; + /* registers */ + struct { + u32 ctrl; + u32 ctrl_shd; + u32 scale_hy; + u32 scale_hcr; + u32 scale_hcb; + u32 scale_vy; + u32 scale_vc; + u32 scale_lut; + u32 scale_lut_addr; + u32 scale_hy_shd; + u32 scale_hcr_shd; + u32 scale_hcb_shd; + u32 scale_vy_shd; + u32 scale_vc_shd; + u32 phase_hy; + u32 phase_hc; + u32 phase_vy; + u32 phase_vc; + u32 phase_hy_shd; + u32 phase_hc_shd; + u32 phase_vy_shd; + u32 phase_vc_shd; + } rsz; + struct { + u32 ctrl; + u32 yuvmode_mask; + u32 rawmode_mask; + u32 h_offset; + u32 v_offset; + u32 h_size; + u32 v_size; + } dual_crop; +}; + +static const struct rkisp1_rsz_config rkisp1_rsz_config_mp = { + /* constraints */ + .max_rsz_width = rkisp1_rsz_mp_src_max_width, + .max_rsz_height = rkisp1_rsz_mp_src_max_height, + .min_rsz_width = rkisp1_rsz_src_min_width, + .min_rsz_height = rkisp1_rsz_src_min_height, + /* registers */ + .rsz = { + .ctrl = rkisp1_cif_mrsz_ctrl, + .scale_hy = rkisp1_cif_mrsz_scale_hy, + .scale_hcr = rkisp1_cif_mrsz_scale_hcr, + .scale_hcb = rkisp1_cif_mrsz_scale_hcb, + .scale_vy = rkisp1_cif_mrsz_scale_vy, + .scale_vc = rkisp1_cif_mrsz_scale_vc, + .scale_lut = rkisp1_cif_mrsz_scale_lut, + .scale_lut_addr = rkisp1_cif_mrsz_scale_lut_addr, + .scale_hy_shd = rkisp1_cif_mrsz_scale_hy_shd, + .scale_hcr_shd = rkisp1_cif_mrsz_scale_hcr_shd, + .scale_hcb_shd = rkisp1_cif_mrsz_scale_hcb_shd, + .scale_vy_shd = rkisp1_cif_mrsz_scale_vy_shd, + .scale_vc_shd = rkisp1_cif_mrsz_scale_vc_shd, + .phase_hy = rkisp1_cif_mrsz_phase_hy, + .phase_hc = rkisp1_cif_mrsz_phase_hc, + .phase_vy = rkisp1_cif_mrsz_phase_vy, + .phase_vc = rkisp1_cif_mrsz_phase_vc, + .ctrl_shd = rkisp1_cif_mrsz_ctrl_shd, + .phase_hy_shd = rkisp1_cif_mrsz_phase_hy_shd, + .phase_hc_shd = rkisp1_cif_mrsz_phase_hc_shd, + .phase_vy_shd = rkisp1_cif_mrsz_phase_vy_shd, + .phase_vc_shd = rkisp1_cif_mrsz_phase_vc_shd, + }, + .dual_crop = { + .ctrl = rkisp1_cif_dual_crop_ctrl, + .yuvmode_mask = rkisp1_cif_dual_crop_mp_mode_yuv, + .rawmode_mask = rkisp1_cif_dual_crop_mp_mode_raw, + .h_offset = rkisp1_cif_dual_crop_m_h_offs, + .v_offset = rkisp1_cif_dual_crop_m_v_offs, + .h_size = rkisp1_cif_dual_crop_m_h_size, + .v_size = rkisp1_cif_dual_crop_m_v_size, + }, +}; + +static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = { + /* constraints */ + .max_rsz_width = rkisp1_rsz_sp_src_max_width, + .max_rsz_height = rkisp1_rsz_sp_src_max_height, + .min_rsz_width = rkisp1_rsz_src_min_width, + .min_rsz_height = rkisp1_rsz_src_min_height, + /* registers */ + .rsz = { + .ctrl = rkisp1_cif_srsz_ctrl, + .scale_hy = rkisp1_cif_srsz_scale_hy, + .scale_hcr = rkisp1_cif_srsz_scale_hcr, + .scale_hcb = rkisp1_cif_srsz_scale_hcb, + .scale_vy = rkisp1_cif_srsz_scale_vy, + .scale_vc = rkisp1_cif_srsz_scale_vc, + .scale_lut = rkisp1_cif_srsz_scale_lut, + .scale_lut_addr = rkisp1_cif_srsz_scale_lut_addr, + .scale_hy_shd = rkisp1_cif_srsz_scale_hy_shd, + .scale_hcr_shd = rkisp1_cif_srsz_scale_hcr_shd, + .scale_hcb_shd = rkisp1_cif_srsz_scale_hcb_shd, + .scale_vy_shd = rkisp1_cif_srsz_scale_vy_shd, + .scale_vc_shd = rkisp1_cif_srsz_scale_vc_shd, + .phase_hy = rkisp1_cif_srsz_phase_hy, + .phase_hc = rkisp1_cif_srsz_phase_hc, + .phase_vy = rkisp1_cif_srsz_phase_vy, + .phase_vc = rkisp1_cif_srsz_phase_vc, + .ctrl_shd = rkisp1_cif_srsz_ctrl_shd, + .phase_hy_shd = rkisp1_cif_srsz_phase_hy_shd, + .phase_hc_shd = rkisp1_cif_srsz_phase_hc_shd, + .phase_vy_shd = rkisp1_cif_srsz_phase_vy_shd, + .phase_vc_shd = rkisp1_cif_srsz_phase_vc_shd, + }, + .dual_crop = { + .ctrl = rkisp1_cif_dual_crop_ctrl, + .yuvmode_mask = rkisp1_cif_dual_crop_sp_mode_yuv, + .rawmode_mask = rkisp1_cif_dual_crop_sp_mode_raw, + .h_offset = rkisp1_cif_dual_crop_s_h_offs, + .v_offset = rkisp1_cif_dual_crop_s_v_offs, + .h_size = rkisp1_cif_dual_crop_s_h_size, + .v_size = rkisp1_cif_dual_crop_s_v_size, + }, +}; + +static struct v4l2_mbus_framefmt * +rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, u32 which) +{ + if (which == v4l2_subdev_format_try) + return v4l2_subdev_get_try_format(&rsz->sd, cfg, pad); + else + return v4l2_subdev_get_try_format(&rsz->sd, rsz->pad_cfg, pad); +} + +static struct v4l2_rect * +rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz, + struct v4l2_subdev_pad_config *cfg, + unsigned int pad, u32 which) +{ + if (which == v4l2_subdev_format_try) + return v4l2_subdev_get_try_crop(&rsz->sd, cfg, pad); + else + return v4l2_subdev_get_try_crop(&rsz->sd, rsz->pad_cfg, pad); +} + +/* ---------------------------------------------------------------------------- + * dual crop hw configs + */ + +static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz, + enum rkisp1_shadow_regs_when when) +{ + u32 dc_ctrl = rkisp1_read(rsz->rkisp1, rsz->config->dual_crop.ctrl); + u32 mask = ~(rsz->config->dual_crop.yuvmode_mask | + rsz->config->dual_crop.rawmode_mask); + + dc_ctrl &= mask; + if (when == rkisp1_shadow_regs_async) + dc_ctrl |= rkisp1_cif_dual_crop_gen_cfg_upd; + else + dc_ctrl |= rkisp1_cif_dual_crop_cfg_upd; + rkisp1_write(rsz->rkisp1, dc_ctrl, rsz->config->dual_crop.ctrl); +} + +/* configure dual-crop unit */ +static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz) +{ + struct rkisp1_device *rkisp1 = rsz->rkisp1; + struct v4l2_mbus_framefmt *sink_fmt; + struct v4l2_rect *sink_crop; + u32 dc_ctrl; + + sink_crop = rkisp1_rsz_get_pad_crop(rsz, null, rkisp1_rsz_pad_sink, + v4l2_subdev_format_active); + sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, null, rkisp1_rsz_pad_sink, + v4l2_subdev_format_active); + + if (sink_crop->width == sink_fmt->width && + sink_crop->height == sink_fmt->height && + sink_crop->left == 0 && sink_crop->top == 0) { + rkisp1_dcrop_disable(rsz, rkisp1_shadow_regs_sync); + dev_dbg(rkisp1->dev, "capture %d crop disabled ", rsz->id); + return; + } + + dc_ctrl = rkisp1_read(rkisp1, rsz->config->dual_crop.ctrl); + rkisp1_write(rkisp1, sink_crop->left, rsz->config->dual_crop.h_offset); + rkisp1_write(rkisp1, sink_crop->top, rsz->config->dual_crop.v_offset); + rkisp1_write(rkisp1, sink_crop->width, rsz->config->dual_crop.h_size); + rkisp1_write(rkisp1, sink_crop->height, rsz->config->dual_crop.v_size); + dc_ctrl |= rsz->config->dual_crop.yuvmode_mask; + dc_ctrl |= rkisp1_cif_dual_crop_cfg_upd; + rkisp1_write(rkisp1, dc_ctrl, rsz->config->dual_crop.ctrl); + + dev_dbg(rkisp1->dev, "stream %d crop: %dx%d -> %dx%d ", rsz->id, + sink_fmt->width, sink_fmt->height, + sink_crop->width, sink_crop->height); +} + +/* ---------------------------------------------------------------------------- + * resizer hw configs + */ + +static void rkisp1_rsz_dump_regs(struct rkisp1_resizer *rsz) +{ + dev_dbg(rsz->rkisp1->dev, + "rsz_ctrl 0x%08x/0x%08x " + "rsz_scale_hy %d/%d " + "rsz_scale_hcb %d/%d " + "rsz_scale_hcr %d/%d " + "rsz_scale_vy %d/%d " + "rsz_scale_vc %d/%d " + "rsz_phase_hy %d/%d " + "rsz_phase_hc %d/%d " + "rsz_phase_vy %d/%d " + "rsz_phase_vc %d/%d ", + rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hy), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hy_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcb), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcb_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcr), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_hcr_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vy), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vy_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vc), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.scale_vc_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hy), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hy_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hc), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_hc_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vy), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vy_shd), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vc), + rkisp1_read(rsz->rkisp1, rsz->config->rsz.phase_vc_shd)); +} + +static void rkisp1_rsz_update_shadow(struct rkisp1_resizer *rsz, + enum rkisp1_shadow_regs_when when) +{ + u32 ctrl_cfg = rkisp1_read(rsz->rkisp1, rsz->config->rsz.ctrl); + + if (when == rkisp1_shadow_regs_async) + ctrl_cfg |= rkisp1_cif_rsz_ctrl_cfg_upd_auto; + else + ctrl_cfg |= rkisp1_cif_rsz_ctrl_cfg_upd; + + rkisp1_write(rsz->rkisp1, ctrl_cfg, rsz->config->rsz.ctrl); +} + +static u32 rkisp1_rsz_calc_ratio(u32 len_sink, u32 len_src) +{ + if (len_sink < len_src) + return ((len_sink - 1) * rkisp1_cif_rsz_scaler_factor) / + (len_src - 1); + + return ((len_src - 1) * rkisp1_cif_rsz_scaler_factor) / + (len_sink - 1) + 1; +} + +static void rkisp1_rsz_disable(struct rkisp1_resizer *rsz, + enum rkisp1_shadow_regs_when when) +{ + rkisp1_write(rsz->rkisp1, 0, rsz->config->rsz.ctrl); + + if (when == rkisp1_shadow_regs_sync) + rkisp1_rsz_update_shadow(rsz, when); +} + +static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz, + struct v4l2_rect *sink_y, + struct v4l2_rect *sink_c, + struct v4l2_rect *src_y, + struct v4l2_rect *src_c, + enum rkisp1_shadow_regs_when when) +{ + struct rkisp1_device *rkisp1 = rsz->rkisp1; + u32 ratio, rsz_ctrl = 0; + unsigned int i; + + /* no phase offset */ + rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_hy); + rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_hc); + rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_vy); + rkisp1_write(rkisp1, 0, rsz->config->rsz.phase_vc); + + /* linear interpolation */ + for (i = 0; i < 64; i++) { + rkisp1_write(rkisp1, i, rsz->config->rsz.scale_lut_addr); + rkisp1_write(rkisp1, i, rsz->config->rsz.scale_lut); + } + + if (sink_y->width != src_y->width) { + rsz_ctrl |= rkisp1_cif_rsz_ctrl_scale_hy_enable; + if (sink_y->width < src_y->width) + rsz_ctrl |= rkisp1_cif_rsz_ctrl_scale_hy_up; + ratio = rkisp1_rsz_calc_ratio(sink_y->width, src_y->width); + rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hy); + } + + if (sink_c->width != src_c->width) { + rsz_ctrl |= rkisp1_cif_rsz_ctrl_scale_hc_enable; + if (sink_c->width < src_c->width) + rsz_ctrl |= rkisp1_cif_rsz_ctrl_scale_hc_up; + ratio = rkisp1_rsz_calc_ratio(sink_c->width, src_c->width); + rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hcb); + rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_hcr); + } + + if (sink_y->height != src_y->height) { + rsz_ctrl |= rkisp1_cif_rsz_ctrl_scale_vy_enable; + if (sink_y->height < src_y->height) + rsz_ctrl |= rkisp1_cif_rsz_ctrl_scale_vy_up; + ratio = rkisp1_rsz_calc_ratio(sink_y->height, src_y->height); + rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_vy); + } + + if (sink_c->height != src_c->height) { + rsz_ctrl |= rkisp1_cif_rsz_ctrl_scale_vc_enable; + if (sink_c->height < src_c->height) + rsz_ctrl |= rkisp1_cif_rsz_ctrl_scale_vc_up; + ratio = rkisp1_rsz_calc_ratio(sink_c->height, src_c->height); + rkisp1_write(rkisp1, ratio, rsz->config->rsz.scale_vc); + } + + rkisp1_write(rkisp1, rsz_ctrl, rsz->config->rsz.ctrl); + + rkisp1_rsz_update_shadow(rsz, when); +} + +static void rkisp1_rsz_config(struct rkisp1_resizer *rsz, + enum rkisp1_shadow_regs_when when) +{ + u8 hdiv = rkisp1_mbus_fmt_hdiv, vdiv = rkisp1_mbus_fmt_vdiv; + struct v4l2_rect sink_y, sink_c, src_y, src_c; + struct v4l2_mbus_framefmt *src_fmt; + struct v4l2_rect *sink_crop; + + sink_crop = rkisp1_rsz_get_pad_crop(rsz, null, rkisp1_rsz_pad_sink, + v4l2_subdev_format_active); + src_fmt = rkisp1_rsz_get_pad_fmt(rsz, null, rkisp1_rsz_pad_src, + v4l2_subdev_format_active); + + if (rsz->fmt_type == rkisp1_fmt_bayer) { + rkisp1_rsz_disable(rsz, when); + return; + } + + sink_y.width = sink_crop->width; + sink_y.height = sink_crop->height; + src_y.width = src_fmt->width; + src_y.height = src_fmt->height; + + sink_c.width = sink_y.width / rkisp1_mbus_fmt_hdiv; + sink_c.height = sink_y.height / rkisp1_mbus_fmt_vdiv; + + if (rsz->fmt_type == rkisp1_fmt_yuv) { + struct rkisp1_capture *cap = + &rsz->rkisp1->capture_devs[rsz->id]; + const struct v4l2_format_info *pixfmt_info = + v4l2_format_info(cap->pix.fmt.pixelformat); + + hdiv = pixfmt_info->hdiv; + vdiv = pixfmt_info->vdiv; + } + src_c.width = src_y.width / hdiv; + src_c.height = src_y.height / vdiv; + + if (sink_c.width == src_c.width && sink_c.height == src_c.height) { + rkisp1_rsz_disable(rsz, when); + return; + } + + dev_dbg(rsz->rkisp1->dev, "stream %d rsz/scale: %dx%d -> %dx%d ", + rsz->id, sink_crop->width, sink_crop->height, + src_fmt->width, src_fmt->height); + dev_dbg(rsz->rkisp1->dev, "chroma scaling %dx%d -> %dx%d ", + sink_c.width, sink_c.height, src_c.width, src_c.height); + + /* set values in the hw */ + rkisp1_rsz_config_regs(rsz, &sink_y, &sink_c, &src_y, &src_c, when); + + rkisp1_rsz_dump_regs(rsz); +} + +/* ---------------------------------------------------------------------------- + * subdev pad operations + */ + +static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + struct rkisp1_resizer *rsz = + container_of(sd, struct rkisp1_resizer, sd); + struct v4l2_subdev_pad_config dummy_cfg; + u32 pad = code->pad; + int ret; + + /* supported mbus codes are the same in isp sink pad */ + code->pad = rkisp1_isp_pad_sink_video; + ret = v4l2_subdev_call(&rsz->rkisp1->isp.sd, pad, enum_mbus_code, + &dummy_cfg, code); + + /* restore pad */ + code->pad = pad; + return ret; +} + +static int rkisp1_rsz_init_config(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg) +{ + struct v4l2_mbus_framefmt *sink_fmt, *src_fmt; + struct v4l2_rect *sink_crop; + + sink_fmt = v4l2_subdev_get_try_format(sd, cfg, rkisp1_rsz_pad_src); + sink_fmt->width = rkisp1_default_width; + sink_fmt->height = rkisp1_default_height; + sink_fmt->field = v4l2_field_none; + sink_fmt->code = rkisp1_def_fmt; + + sink_crop = v4l2_subdev_get_try_crop(sd, cfg, rkisp1_rsz_pad_sink); + sink_crop->width = rkisp1_default_width; + sink_crop->height = rkisp1_default_height; + sink_crop->left = 0; + sink_crop->top = 0; + + src_fmt = v4l2_subdev_get_try_format(sd, cfg, rkisp1_rsz_pad_sink); + *src_fmt = *sink_fmt; + + /* note: there is no crop in the source pad, only in the sink */ + + return 0; +} + +static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_mbus_framefmt *format, + unsigned int which) +{ + struct v4l2_mbus_framefmt *src_fmt; + + src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, rkisp1_rsz_pad_src, which); + src_fmt->width = clamp_t(u32, format->width, + rsz->config->min_rsz_width, + rsz->config->max_rsz_width); + src_fmt->height = clamp_t(u32, format->height, + rsz->config->min_rsz_height, + rsz->config->max_rsz_height); + + *format = *src_fmt; +} + +static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_rect *r, + unsigned int which) +{ + const struct rkisp1_isp_mbus_info *mbus_info; + struct v4l2_mbus_framefmt *sink_fmt; + struct v4l2_rect *sink_crop; + + sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, rkisp1_rsz_pad_sink, which); + sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, rkisp1_rsz_pad_sink, + which); + + /* not crop for mp bayer raw data */ + mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code); + + if (rsz->id == rkisp1_mainpath && + mbus_info->fmt_type == rkisp1_fmt_bayer) { + sink_crop->left = 0; + sink_crop->top = 0; + sink_crop->width = sink_fmt->width; + sink_crop->height = sink_fmt->height; + return; + } + + sink_crop->left = align(r->left, 2); + sink_crop->width = align(r->width, 2); + sink_crop->top = r->top; + sink_crop->height = r->height; + rkisp1_sd_adjust_crop(sink_crop, sink_fmt); + + *r = *sink_crop; +} + +static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_mbus_framefmt *format, + unsigned int which) +{ + const struct rkisp1_isp_mbus_info *mbus_info; + struct v4l2_mbus_framefmt *sink_fmt, *src_fmt; + struct v4l2_rect *sink_crop; + + sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, rkisp1_rsz_pad_sink, which); + src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, rkisp1_rsz_pad_src, which); + sink_crop = rkisp1_rsz_get_pad_crop(rsz, cfg, rkisp1_rsz_pad_sink, + which); + sink_fmt->code = format->code; + mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code); + if (!mbus_info) { + sink_fmt->code = rkisp1_def_fmt; + mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code); + } + if (which == v4l2_subdev_format_active) + rsz->fmt_type = mbus_info->fmt_type; + + if (rsz->id == rkisp1_mainpath && + mbus_info->fmt_type == rkisp1_fmt_bayer) { + sink_crop->left = 0; + sink_crop->top = 0; + sink_crop->width = sink_fmt->width; + sink_crop->height = sink_fmt->height; + return; + } + + /* propagete to source pad */ + src_fmt->code = sink_fmt->code; + + sink_fmt->width = clamp_t(u32, format->width, + rsz->config->min_rsz_width, + rsz->config->max_rsz_width); + sink_fmt->height = clamp_t(u32, format->height, + rsz->config->min_rsz_height, + rsz->config->max_rsz_height); + + *format = *sink_fmt; + + /* update sink crop */ + rkisp1_rsz_set_sink_crop(rsz, cfg, sink_crop, which); +} + +static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct rkisp1_resizer *rsz = + container_of(sd, struct rkisp1_resizer, sd); + + fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, cfg, fmt->pad, fmt->which); + return 0; +} + +static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct rkisp1_resizer *rsz = + container_of(sd, struct rkisp1_resizer, sd); + + if (fmt->pad == rkisp1_rsz_pad_sink) + rkisp1_rsz_set_sink_fmt(rsz, cfg, &fmt->format, fmt->which); + else + rkisp1_rsz_set_src_fmt(rsz, cfg, &fmt->format, fmt->which); + + return 0; +} + +static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct rkisp1_resizer *rsz = + container_of(sd, struct rkisp1_resizer, sd); + struct v4l2_mbus_framefmt *mf_sink; + + if (sel->pad == rkisp1_rsz_pad_src) + return -einval; + + switch (sel->target) { + case v4l2_sel_tgt_crop_bounds: + mf_sink = rkisp1_rsz_get_pad_fmt(rsz, cfg, rkisp1_rsz_pad_sink, + sel->which); + sel->r.height = mf_sink->height; + sel->r.width = mf_sink->width; + sel->r.left = 0; + sel->r.top = 0; + break; + case v4l2_sel_tgt_crop: + sel->r = *rkisp1_rsz_get_pad_crop(rsz, cfg, rkisp1_rsz_pad_sink, + sel->which); + break; + default: + return -einval; + } + + return 0; +} + +static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_selection *sel) +{ + struct rkisp1_resizer *rsz = + container_of(sd, struct rkisp1_resizer, sd); + + if (sel->target != v4l2_sel_tgt_crop || sel->pad == rkisp1_rsz_pad_src) + return -einval; + + dev_dbg(sd->dev, "%s: pad: %d sel(%d,%d)/%dx%d ", __func__, + sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height); + + rkisp1_rsz_set_sink_crop(rsz, cfg, &sel->r, sel->which); + + return 0; +} + +static const struct media_entity_operations rkisp1_rsz_media_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_pad_ops rkisp1_rsz_pad_ops = { + .enum_mbus_code = rkisp1_rsz_enum_mbus_code, + .get_selection = rkisp1_rsz_get_selection, + .set_selection = rkisp1_rsz_set_selection, + .init_cfg = rkisp1_rsz_init_config, + .get_fmt = rkisp1_rsz_get_fmt, + .set_fmt = rkisp1_rsz_set_fmt, + .link_validate = v4l2_subdev_link_validate_default, +}; + +/* ---------------------------------------------------------------------------- + * stream operations + */ + +static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable) +{ + struct rkisp1_resizer *rsz = + container_of(sd, struct rkisp1_resizer, sd); + struct rkisp1_device *rkisp1 = rsz->rkisp1; + struct rkisp1_capture *other = &rkisp1->capture_devs[rsz->id ^ 1]; + enum rkisp1_shadow_regs_when when = rkisp1_shadow_regs_sync; + + if (!enable) { + rkisp1_dcrop_disable(rsz, rkisp1_shadow_regs_async); + rkisp1_rsz_disable(rsz, rkisp1_shadow_regs_async); + return 0; + } + + if (other->is_streaming) + when = rkisp1_shadow_regs_async; + + rkisp1_rsz_config(rsz, when); + rkisp1_dcrop_config(rsz); + + return 0; +} + +static const struct v4l2_subdev_video_ops rkisp1_rsz_video_ops = { + .s_stream = rkisp1_rsz_s_stream, +}; + +static const struct v4l2_subdev_ops rkisp1_rsz_ops = { + .video = &rkisp1_rsz_video_ops, + .pad = &rkisp1_rsz_pad_ops, +}; + +static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz) +{ + v4l2_device_unregister_subdev(&rsz->sd); + media_entity_cleanup(&rsz->sd.entity); +} + +static int rkisp1_rsz_register(struct rkisp1_resizer *rsz) +{ + const char * const dev_names[] = {rkisp1_rsz_mp_dev_name, + rkisp1_rsz_sp_dev_name}; + struct media_pad *pads = rsz->pads; + struct v4l2_subdev *sd = &rsz->sd; + int ret; + + if (rsz->id == rkisp1_selfpath) + rsz->config = &rkisp1_rsz_config_sp; + else + rsz->config = &rkisp1_rsz_config_mp; + + v4l2_subdev_init(sd, &rkisp1_rsz_ops); + sd->flags |= v4l2_subdev_fl_has_devnode; + sd->entity.ops = &rkisp1_rsz_media_ops; + sd->entity.function = media_ent_f_proc_video_scaler; + sd->owner = this_module; + strscpy(sd->name, dev_names[rsz->id], sizeof(sd->name)); + + pads[rkisp1_rsz_pad_sink].flags = media_pad_fl_sink | + media_pad_fl_must_connect; + pads[rkisp1_rsz_pad_src].flags = media_pad_fl_source | + media_pad_fl_must_connect; + + rsz->fmt_type = rkisp1_def_fmt_type; + + ret = media_entity_pads_init(&sd->entity, 2, pads); + if (ret) + return ret; + + ret = v4l2_device_register_subdev(&rsz->rkisp1->v4l2_dev, sd); + if (ret) { + dev_err(sd->dev, "failed to register resizer subdev "); + goto err_cleanup_media_entity; + } + + rkisp1_rsz_init_config(sd, rsz->pad_cfg); + return 0; + +err_cleanup_media_entity: + media_entity_cleanup(&sd->entity); + + return ret; +} + +int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1) +{ + struct rkisp1_resizer *rsz; + unsigned int i, j; + int ret; + + for (i = 0; i < array_size(rkisp1->resizer_devs); i++) { + rsz = &rkisp1->resizer_devs[i]; + rsz->rkisp1 = rkisp1; + rsz->id = i; + ret = rkisp1_rsz_register(rsz); + if (ret) + goto err_unreg_resizer_devs; + } + + return 0; + +err_unreg_resizer_devs: + for (j = 0; j < i; j++) { + rsz = &rkisp1->resizer_devs[j]; + rkisp1_rsz_unregister(rsz); + } + + return ret; +} + +void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1) +{ + struct rkisp1_resizer *mp = &rkisp1->resizer_devs[rkisp1_mainpath]; + struct rkisp1_resizer *sp = &rkisp1->resizer_devs[rkisp1_selfpath]; + + rkisp1_rsz_unregister(mp); + rkisp1_rsz_unregister(sp); +}
|
Drivers in the Staging area
|
56e3b29f9f6b27a063df2a31f148782073cbe5e9
|
helen koike
|
drivers
|
staging
|
media, rkisp1
|
media: staging: rkisp1: add user space abi definitions
|
add the header for userspace
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['h']
| 1
| 819
| 0
|
--- diff --git a/drivers/staging/media/rkisp1/uapi/rkisp1-config.h b/drivers/staging/media/rkisp1/uapi/rkisp1-config.h --- /dev/null +++ b/drivers/staging/media/rkisp1/uapi/rkisp1-config.h +/* spdx-license-identifier: (gpl-2.0+ or mit) */ +/* + * rockchip isp1 userspace api + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +/* + * todo: improve documentation, mostly regarding abbreviation and hardware + * specificities. reference: "ref_01 - isp_user_manual, rev 2.57" (not public) + */ + +#ifndef _uapi_rkisp1_config_h +#define _uapi_rkisp1_config_h + +#include <linux/types.h> + +/* vendor specific - used for rk_isp1 camera sub-system */ +#define v4l2_meta_fmt_rk_isp1_params v4l2_fourcc('r', 'k', '1', 'p') /* rockchip isp1 params */ +#define v4l2_meta_fmt_rk_isp1_stat_3a v4l2_fourcc('r', 'k', '1', 's') /* rockchip isp1 3a statistics */ + +#define rkisp1_cif_isp_module_dpcc bit(0) +#define rkisp1_cif_isp_module_bls bit(1) +#define rkisp1_cif_isp_module_sdg bit(2) +#define rkisp1_cif_isp_module_hst bit(3) +#define rkisp1_cif_isp_module_lsc bit(4) +#define rkisp1_cif_isp_module_awb_gain bit(5) +#define rkisp1_cif_isp_module_flt bit(6) +#define rkisp1_cif_isp_module_bdm bit(7) +#define rkisp1_cif_isp_module_ctk bit(8) +#define rkisp1_cif_isp_module_goc bit(9) +#define rkisp1_cif_isp_module_cproc bit(10) +#define rkisp1_cif_isp_module_afc bit(11) +#define rkisp1_cif_isp_module_awb bit(12) +#define rkisp1_cif_isp_module_ie bit(13) +#define rkisp1_cif_isp_module_aec bit(14) +#define rkisp1_cif_isp_module_wdr bit(15) +#define rkisp1_cif_isp_module_dpf bit(16) +#define rkisp1_cif_isp_module_dpf_strength bit(17) + +#define rkisp1_cif_isp_ctk_coeff_max 0x100 +#define rkisp1_cif_isp_ctk_offset_max 0x800 + +#define rkisp1_cif_isp_ae_mean_max 25 +#define rkisp1_cif_isp_hist_bin_n_max 16 +#define rkisp1_cif_isp_afm_max_windows 3 +#define rkisp1_cif_isp_degamma_curve_size 17 + +#define rkisp1_cif_isp_bdm_max_th 0xff + +/* + * black level compensation + */ +/* maximum value for horizontal start address */ +#define rkisp1_cif_isp_bls_start_h_max 0x00000fff +/* maximum value for horizontal stop address */ +#define rkisp1_cif_isp_bls_stop_h_max 0x00000fff +/* maximum value for vertical start address */ +#define rkisp1_cif_isp_bls_start_v_max 0x00000fff +/* maximum value for vertical stop address */ +#define rkisp1_cif_isp_bls_stop_v_max 0x00000fff +/* maximum is 2^18 = 262144*/ +#define rkisp1_cif_isp_bls_samples_max 0x00000012 +/* maximum value for fixed black level */ +#define rkisp1_cif_isp_bls_fix_sub_max 0x00000fff +/* minimum value for fixed black level */ +#define rkisp1_cif_isp_bls_fix_sub_min 0xfffff000 +/* 13 bit range (signed)*/ +#define rkisp1_cif_isp_bls_fix_mask 0x00001fff + +/* + * automatic white balance measurments + */ +#define rkisp1_cif_isp_awb_max_grid 1 +#define rkisp1_cif_isp_awb_max_frames 7 + +/* + * gamma out + */ +/* maximum number of color samples supported */ +#define rkisp1_cif_isp_gamma_out_max_samples 17 + +/* + * lens shade correction + */ +#define rkisp1_cif_isp_lsc_grad_tbl_size 8 +#define rkisp1_cif_isp_lsc_size_tbl_size 8 +/* + * the following matches the tuning process, + * not the max capabilities of the chip. + * last value unused. + */ +#define rkisp1_cif_isp_lsc_data_tbl_size 290 + +/* + * histogram calculation + */ +/* last 3 values unused. */ +#define rkisp1_cif_isp_histogram_weight_grids_size 28 + +/* + * defect pixel cluster correction + */ +#define rkisp1_cif_isp_dpcc_methods_max 3 + +/* + * denoising pre filter + */ +#define rkisp1_cif_isp_dpf_max_nlf_coeffs 17 +#define rkisp1_cif_isp_dpf_max_spatial_coeffs 6 + +/* + * measurement types + */ +#define rkisp1_cif_isp_stat_awb bit(0) +#define rkisp1_cif_isp_stat_autoexp bit(1) +#define rkisp1_cif_isp_stat_afm_fin bit(2) +#define rkisp1_cif_isp_stat_hist bit(3) + +enum rkisp1_cif_isp_histogram_mode { + rkisp1_cif_isp_histogram_mode_disable, + rkisp1_cif_isp_histogram_mode_rgb_combined, + rkisp1_cif_isp_histogram_mode_r_histogram, + rkisp1_cif_isp_histogram_mode_g_histogram, + rkisp1_cif_isp_histogram_mode_b_histogram, + rkisp1_cif_isp_histogram_mode_y_histogram +}; + +enum rkisp1_cif_isp_awb_mode_type { + rkisp1_cif_isp_awb_mode_manual, + rkisp1_cif_isp_awb_mode_rgb, + rkisp1_cif_isp_awb_mode_ycbcr +}; + +enum rkisp1_cif_isp_flt_mode { + rkisp1_cif_isp_flt_static_mode, + rkisp1_cif_isp_flt_dynamic_mode +}; + +/** + * enum rkisp1_cif_isp_exp_ctrl_autostop - stop modes + * @rkisp1_cif_isp_exp_ctrl_autostop_0: continuous measurement + * @rkisp1_cif_isp_exp_ctrl_autostop_1: stop measuring after a complete frame + */ +enum rkisp1_cif_isp_exp_ctrl_autostop { + rkisp1_cif_isp_exp_ctrl_autostop_0 = 0, + rkisp1_cif_isp_exp_ctrl_autostop_1 = 1, +}; + +/** + * enum rkisp1_cif_isp_exp_meas_mode - exposure measure mode + * @rkisp1_cif_isp_exp_measuring_mode_0: y = 16 + 0.25r + 0.5g + 0.1094b + * @rkisp1_cif_isp_exp_measuring_mode_1: y = (r + g + b) x (85/256) + */ +enum rkisp1_cif_isp_exp_meas_mode { + rkisp1_cif_isp_exp_measuring_mode_0, + rkisp1_cif_isp_exp_measuring_mode_1, +}; + +/*---------- part1: input parameters ------------*/ + +struct rkisp1_cif_isp_window { + __u16 h_offs; + __u16 v_offs; + __u16 h_size; + __u16 v_size; +} __packed; + +/** + * struct rkisp1_cif_isp_bls_fixed_val - bls fixed subtraction values + * + * the values will be subtracted from the sensor + * values. therefore a negative value means addition instead of subtraction! + * + * @r: fixed (signed!) subtraction value for bayer pattern r + * @gr: fixed (signed!) subtraction value for bayer pattern gr + * @gb: fixed (signed!) subtraction value for bayer pattern gb + * @b: fixed (signed!) subtraction value for bayer pattern b + */ +struct rkisp1_cif_isp_bls_fixed_val { + __s16 r; + __s16 gr; + __s16 gb; + __s16 b; +} __packed; + +/** + * struct rkisp1_cif_isp_bls_config - configuration used by black level subtraction + * + * @enable_auto: automatic mode activated means that the measured values + * are subtracted. otherwise the fixed subtraction + * values will be subtracted. + * @en_windows: enabled window + * @bls_window1: measurement window 1 size + * @bls_window2: measurement window 2 size + * @bls_samples: set amount of measured pixels for each bayer position + * (a, b,c and d) to 2^bls_samples. + * @fixed_val: fixed subtraction values + */ +struct rkisp1_cif_isp_bls_config { + __u8 enable_auto; + __u8 en_windows; + struct rkisp1_cif_isp_window bls_window1; + struct rkisp1_cif_isp_window bls_window2; + __u8 bls_samples; + struct rkisp1_cif_isp_bls_fixed_val fixed_val; +} __packed; + +/** + * struct rkisp1_cif_isp_dpcc_methods_config - methods configuration used by dpcc + * + * methods configuration used by defect pixel cluster correction + * + * @method: method enable bits + * @line_thresh: line threshold + * @line_mad_fac: line mad factor + * @pg_fac: peak gradient factor + * @rnd_thresh: rank neighbor difference threshold + * @rg_fac: rank gradient factor + */ +struct rkisp1_cif_isp_dpcc_methods_config { + __u32 method; + __u32 line_thresh; + __u32 line_mad_fac; + __u32 pg_fac; + __u32 rnd_thresh; + __u32 rg_fac; +} __packed; + +/** + * struct rkisp1_cif_isp_dpcc_config - configuration used by dpcc + * + * configuration used by defect pixel cluster correction + * + * @mode: dpcc output mode + * @output_mode: whether use hard coded methods + * @set_use: stage1 methods set + * @methods: methods config + * @ro_limits: rank order limits + * @rnd_offs: differential rank offsets for rank neighbor difference + */ +struct rkisp1_cif_isp_dpcc_config { + __u32 mode; + __u32 output_mode; + __u32 set_use; + struct rkisp1_cif_isp_dpcc_methods_config methods[rkisp1_cif_isp_dpcc_methods_max]; + __u32 ro_limits; + __u32 rnd_offs; +} __packed; + +struct rkisp1_cif_isp_gamma_corr_curve { + __u16 gamma_y[rkisp1_cif_isp_degamma_curve_size]; +} __packed; + +struct rkisp1_cif_isp_gamma_curve_x_axis_pnts { + __u32 gamma_dx0; + __u32 gamma_dx1; +} __packed; + +/** + * struct rkisp1_cif_isp_sdg_config - configuration used by sensor degamma + * + * @curve_x: gamma curve point definition axis for x + * @xa_pnts: x increments + */ +struct rkisp1_cif_isp_sdg_config { + struct rkisp1_cif_isp_gamma_corr_curve curve_r; + struct rkisp1_cif_isp_gamma_corr_curve curve_g; + struct rkisp1_cif_isp_gamma_corr_curve curve_b; + struct rkisp1_cif_isp_gamma_curve_x_axis_pnts xa_pnts; +} __packed; + +/** + * struct rkisp1_cif_isp_lsc_config - configuration used by lens shading correction + * + * refer to ref_01 for details + */ +struct rkisp1_cif_isp_lsc_config { + __u32 r_data_tbl[rkisp1_cif_isp_lsc_data_tbl_size]; + __u32 gr_data_tbl[rkisp1_cif_isp_lsc_data_tbl_size]; + __u32 gb_data_tbl[rkisp1_cif_isp_lsc_data_tbl_size]; + __u32 b_data_tbl[rkisp1_cif_isp_lsc_data_tbl_size]; + + __u32 x_grad_tbl[rkisp1_cif_isp_lsc_grad_tbl_size]; + __u32 y_grad_tbl[rkisp1_cif_isp_lsc_grad_tbl_size]; + + __u32 x_size_tbl[rkisp1_cif_isp_lsc_size_tbl_size]; + __u32 y_size_tbl[rkisp1_cif_isp_lsc_size_tbl_size]; + __u16 config_width; + __u16 config_height; +} __packed; + +/** + * struct rkisp1_cif_isp_ie_config - configuration used by image effects + * + * @eff_mat_1: 3x3 matrix coefficients for emboss effect 1 + * @eff_mat_2: 3x3 matrix coefficients for emboss effect 2 + * @eff_mat_3: 3x3 matrix coefficients for emboss 3/sketch 1 + * @eff_mat_4: 3x3 matrix coefficients for sketch effect 2 + * @eff_mat_5: 3x3 matrix coefficients for sketch effect 3 + * @eff_tint: chrominance increment values of tint (used for sepia effect) + */ +struct rkisp1_cif_isp_ie_config { + __u16 effect; + __u16 color_sel; + __u16 eff_mat_1; + __u16 eff_mat_2; + __u16 eff_mat_3; + __u16 eff_mat_4; + __u16 eff_mat_5; + __u16 eff_tint; +} __packed; + +/** + * struct rkisp1_cif_isp_cproc_config - configuration used by color processing + * + * @c_out_range: chrominance pixel clipping range at output. + * (0 for limit, 1 for full) + * @y_in_range: luminance pixel clipping range at output. + * @y_out_range: luminance pixel clipping range at output. + * @contrast: 00~ff, 0.0~1.992 + * @brightness: 80~7f, -128~+127 + * @sat: saturation, 00~ff, 0.0~1.992 + * @hue: 80~7f, -90~+87.188 + */ +struct rkisp1_cif_isp_cproc_config { + __u8 c_out_range; + __u8 y_in_range; + __u8 y_out_range; + __u8 contrast; + __u8 brightness; + __u8 sat; + __u8 hue; +} __packed; + +/** + * struct rkisp1_cif_isp_awb_meas_config - configuration used by auto white balance + * + * @awb_wnd: white balance measurement window (in pixels) + * (from enum rkisp1_cif_isp_awb_mode_type) + * @max_y: only pixels values < max_y contribute to awb measurement, set to 0 + * to disable this feature + * @min_y: only pixels values > min_y contribute to awb measurement + * @max_csum: chrominance sum maximum value, only consider pixels with cb+cr, + * smaller than threshold for awb measurements + * @min_c: chrominance minimum value, only consider pixels with cb/cr + * each greater than threshold value for awb measurements + * @frames: number of frames - 1 used for mean value calculation + * (ucframes=0 means 1 frame) + * @awb_ref_cr: reference cr value for awb regulation, target for awb + * @awb_ref_cb: reference cb value for awb regulation, target for awb + */ +struct rkisp1_cif_isp_awb_meas_config { + /* + * note: currently the h and v offsets are mapped to grid offsets + */ + struct rkisp1_cif_isp_window awb_wnd; + __u32 awb_mode; + __u8 max_y; + __u8 min_y; + __u8 max_csum; + __u8 min_c; + __u8 frames; + __u8 awb_ref_cr; + __u8 awb_ref_cb; + __u8 enable_ymax_cmp; +} __packed; + +/** + * struct rkisp1_cif_isp_awb_gain_config - configuration used by auto white balance gain + * + * out_data_x = ( awb_geain_x * in_data + 128) >> 8 + */ +struct rkisp1_cif_isp_awb_gain_config { + __u16 gain_red; + __u16 gain_green_r; + __u16 gain_blue; + __u16 gain_green_b; +} __packed; + +/** + * struct rkisp1_cif_isp_flt_config - configuration used by isp filtering + * + * @mode: isp_filt_mode register fields (from enum rkisp1_cif_isp_flt_mode) + * @grn_stage1: isp_filt_mode register fields + * @chr_h_mode: isp_filt_mode register fields + * @chr_v_mode: isp_filt_mode register fields + * + * refer to ref_01 for details. + */ + +struct rkisp1_cif_isp_flt_config { + __u32 mode; + __u8 grn_stage1; + __u8 chr_h_mode; + __u8 chr_v_mode; + __u32 thresh_bl0; + __u32 thresh_bl1; + __u32 thresh_sh0; + __u32 thresh_sh1; + __u32 lum_weight; + __u32 fac_sh1; + __u32 fac_sh0; + __u32 fac_mid; + __u32 fac_bl0; + __u32 fac_bl1; +} __packed; + +/** + * struct rkisp1_cif_isp_bdm_config - configuration used by bayer demosaic + * + * @demosaic_th: threshod for bayer demosaicing texture detection + */ +struct rkisp1_cif_isp_bdm_config { + __u8 demosaic_th; +} __packed; + +/** + * struct rkisp1_cif_isp_ctk_config - configuration used by cross talk correction + * + * @coeff: color correction matrix + * @ct_offset_b: offset for the crosstalk correction matrix + */ +struct rkisp1_cif_isp_ctk_config { + __u16 coeff0; + __u16 coeff1; + __u16 coeff2; + __u16 coeff3; + __u16 coeff4; + __u16 coeff5; + __u16 coeff6; + __u16 coeff7; + __u16 coeff8; + __u16 ct_offset_r; + __u16 ct_offset_g; + __u16 ct_offset_b; +} __packed; + +enum rkisp1_cif_isp_goc_mode { + rkisp1_cif_isp_goc_mode_logarithmic, + rkisp1_cif_isp_goc_mode_equidistant +}; + +/** + * struct rkisp1_cif_isp_goc_config - configuration used by gamma out correction + * + * @mode: goc mode (from enum rkisp1_cif_isp_goc_mode) + * @gamma_y: gamma out curve y-axis for all color components + */ +struct rkisp1_cif_isp_goc_config { + __u32 mode; + __u16 gamma_y[rkisp1_cif_isp_gamma_out_max_samples]; +} __packed; + +/** + * struct rkisp1_cif_isp_hst_config - configuration used by histogram + * + * @mode: histogram mode (from enum rkisp1_cif_isp_histogram_mode) + * @histogram_predivider: process every stepsize pixel, all other pixels are + * skipped + * @meas_window: coordinates of the measure window + * @hist_weight: weighting factor for sub-windows + */ +struct rkisp1_cif_isp_hst_config { + __u32 mode; + __u8 histogram_predivider; + struct rkisp1_cif_isp_window meas_window; + __u8 hist_weight[rkisp1_cif_isp_histogram_weight_grids_size]; +} __packed; + +/** + * struct rkisp1_cif_isp_aec_config - configuration used by auto exposure control + * + * @mode: exposure measure mode (from enum rkisp1_cif_isp_exp_meas_mode) + * @autostop: stop mode (from enum rkisp1_cif_isp_exp_ctrl_autostop) + * @meas_window: coordinates of the measure window + */ +struct rkisp1_cif_isp_aec_config { + __u32 mode; + __u32 autostop; + struct rkisp1_cif_isp_window meas_window; +} __packed; + +/** + * struct rkisp1_cif_isp_afc_config - configuration used by auto focus control + * + * @num_afm_win: max rkisp1_cif_isp_afm_max_windows + * @afm_win: coordinates of the meas window + * @thres: threshold used for minimizing the influence of noise + * @var_shift: the number of bits for the shift operation at the end of the + * calculation chain. + */ +struct rkisp1_cif_isp_afc_config { + __u8 num_afm_win; + struct rkisp1_cif_isp_window afm_win[rkisp1_cif_isp_afm_max_windows]; + __u32 thres; + __u32 var_shift; +} __packed; + +/** + * enum rkisp1_cif_isp_dpf_gain_usage - dpf gain usage + * @rkisp1_cif_isp_dpf_gain_usage_disabled: don't use any gains in preprocessing stage + * @rkisp1_cif_isp_dpf_gain_usage_nf_gains: use only the noise function gains from + * registers dpf_nf_gain_r, ... + * @rkisp1_cif_isp_dpf_gain_usage_lsc_gains: use only the gains from lsc module + * @rkisp1_cif_isp_dpf_gain_usage_nf_lsc_gains: use the noise function gains and the + * gains from lsc module + * @rkisp1_cif_isp_dpf_gain_usage_awb_gains: use only the gains from awb module + * @rkisp1_cif_isp_dpf_gain_usage_awb_lsc_gains: use the gains from awb and lsc module + * @rkisp1_cif_isp_dpf_gain_usage_max: upper border (only for an internal evaluation) + */ +enum rkisp1_cif_isp_dpf_gain_usage { + rkisp1_cif_isp_dpf_gain_usage_disabled, + rkisp1_cif_isp_dpf_gain_usage_nf_gains, + rkisp1_cif_isp_dpf_gain_usage_lsc_gains, + rkisp1_cif_isp_dpf_gain_usage_nf_lsc_gains, + rkisp1_cif_isp_dpf_gain_usage_awb_gains, + rkisp1_cif_isp_dpf_gain_usage_awb_lsc_gains, + rkisp1_cif_isp_dpf_gain_usage_max +}; + +/** + * enum rkisp1_cif_isp_dpf_rb_filtersize - red and blue filter sizes + * @rkisp1_cif_isp_dpf_rb_filtersize_13x9: red and blue filter kernel size 13x9 + * (means 7x5 active pixel) + * @rkisp1_cif_isp_dpf_rb_filtersize_9x9: red and blue filter kernel size 9x9 + * (means 5x5 active pixel) + */ +enum rkisp1_cif_isp_dpf_rb_filtersize { + rkisp1_cif_isp_dpf_rb_filtersize_13x9, + rkisp1_cif_isp_dpf_rb_filtersize_9x9, +}; + +/** + * enum rkisp1_cif_isp_dpf_nll_scale_mode - dpf noise level scale mode + * @rkisp1_cif_isp_nll_scale_linear: use a linear scaling + * @rkisp1_cif_isp_nll_scale_logarithmic: use a logarithmic scaling + */ +enum rkisp1_cif_isp_dpf_nll_scale_mode { + rkisp1_cif_isp_nll_scale_linear, + rkisp1_cif_isp_nll_scale_logarithmic, +}; + +/** + * struct rkisp1_cif_isp_dpf_nll - noise level lookup + * + * @coeff: noise level lookup coefficient + * @scale_mode: dpf noise level scale mode (from enum rkisp1_cif_isp_dpf_nll_scale_mode) + */ +struct rkisp1_cif_isp_dpf_nll { + __u16 coeff[rkisp1_cif_isp_dpf_max_nlf_coeffs]; + __u32 scale_mode; +} __packed; + +/** + * struct rkisp1_cif_isp_dpf_rb_flt - red blue filter config + * + * @fltsize: the filter size for the red and blue pixels + * (from enum rkisp1_cif_isp_dpf_rb_filtersize) + * @spatial_coeff: spatial weights + * @r_enable: enable filter processing for red pixels + * @b_enable: enable filter processing for blue pixels + */ +struct rkisp1_cif_isp_dpf_rb_flt { + __u32 fltsize; + __u8 spatial_coeff[rkisp1_cif_isp_dpf_max_spatial_coeffs]; + __u8 r_enable; + __u8 b_enable; +} __packed; + +/** + * struct rkisp1_cif_isp_dpf_g_flt - green filter configuration + * + * @spatial_coeff: spatial weights + * @gr_enable: enable filter processing for green pixels in green/red lines + * @gb_enable: enable filter processing for green pixels in green/blue lines + */ +struct rkisp1_cif_isp_dpf_g_flt { + __u8 spatial_coeff[rkisp1_cif_isp_dpf_max_spatial_coeffs]; + __u8 gr_enable; + __u8 gb_enable; +} __packed; + +/** + * struct rkisp1_cif_isp_dpf_gain - noise function configuration + * + * @mode: dpf gain usage (from enum rkisp1_cif_isp_dpf_gain_usage) + * @nf_r_gain: noise function gain that replaces the awb gain for red pixels + * @nf_b_gain: noise function gain that replaces the awb gain for blue pixels + * @nf_gr_gain: noise function gain that replaces the awb gain + * for green pixels in a red line + * @nf_gb_gain: noise function gain that replaces the awb gain + * for green pixels in a blue line + */ +struct rkisp1_cif_isp_dpf_gain { + __u32 mode; + __u16 nf_r_gain; + __u16 nf_b_gain; + __u16 nf_gr_gain; + __u16 nf_gb_gain; +} __packed; + +/** + * struct rkisp1_cif_isp_dpf_config - configuration used by de-noising pre-filter + * + * @gain: noise function gain + * @g_flt: green filter config + * @rb_flt: red blue filter config + * @nll: noise level lookup + */ +struct rkisp1_cif_isp_dpf_config { + struct rkisp1_cif_isp_dpf_gain gain; + struct rkisp1_cif_isp_dpf_g_flt g_flt; + struct rkisp1_cif_isp_dpf_rb_flt rb_flt; + struct rkisp1_cif_isp_dpf_nll nll; +} __packed; + +/** + * struct rkisp1_cif_isp_dpf_strength_config - strength of the filter + * + * @r: filter strength of the red filter + * @g: filter strength of the green filter + * @b: filter strength of the blue filter + */ +struct rkisp1_cif_isp_dpf_strength_config { + __u8 r; + __u8 g; + __u8 b; +} __packed; + +/** + * struct rkisp1_cif_isp_isp_other_cfg - parameters for some blocks in rockchip isp1 + * + * @dpcc_config: defect pixel cluster correction config + * @bls_config: black level subtraction config + * @sdg_config: sensor degamma config + * @lsc_config: lens shade config + * @awb_gain_config: auto white balance gain config + * @flt_config: filter config + * @bdm_config: demosaic config + * @ctk_config: cross talk config + * @goc_config: gamma out config + * @bls_config: black level subtraction config + * @dpf_config: de-noising pre-filter config + * @dpf_strength_config: dpf strength config + * @cproc_config: color process config + * @ie_config: image effects config + */ +struct rkisp1_cif_isp_isp_other_cfg { + struct rkisp1_cif_isp_dpcc_config dpcc_config; + struct rkisp1_cif_isp_bls_config bls_config; + struct rkisp1_cif_isp_sdg_config sdg_config; + struct rkisp1_cif_isp_lsc_config lsc_config; + struct rkisp1_cif_isp_awb_gain_config awb_gain_config; + struct rkisp1_cif_isp_flt_config flt_config; + struct rkisp1_cif_isp_bdm_config bdm_config; + struct rkisp1_cif_isp_ctk_config ctk_config; + struct rkisp1_cif_isp_goc_config goc_config; + struct rkisp1_cif_isp_dpf_config dpf_config; + struct rkisp1_cif_isp_dpf_strength_config dpf_strength_config; + struct rkisp1_cif_isp_cproc_config cproc_config; + struct rkisp1_cif_isp_ie_config ie_config; +} __packed; + +/** + * struct rkisp1_cif_isp_isp_meas_cfg - rockchip isp1 measure parameters + * + * @awb_meas_config: auto white balance config + * @hst_config: histogram config + * @aec_config: auto exposure config + * @afc_config: auto focus config + */ +struct rkisp1_cif_isp_isp_meas_cfg { + struct rkisp1_cif_isp_awb_meas_config awb_meas_config; + struct rkisp1_cif_isp_hst_config hst_config; + struct rkisp1_cif_isp_aec_config aec_config; + struct rkisp1_cif_isp_afc_config afc_config; +} __packed; + +/** + * struct rkisp1_params_cfg - rockchip isp1 input parameters meta data + * + * @module_en_update: mask the enable bits of which module should be updated + * @module_ens: mask the enable value of each module, only update the module + * which correspond bit was set in module_en_update + * @module_cfg_update: mask the config bits of which module should be updated + * @meas: measurement config + * @others: other config + */ +struct rkisp1_params_cfg { + __u32 module_en_update; + __u32 module_ens; + __u32 module_cfg_update; + + struct rkisp1_cif_isp_isp_meas_cfg meas; + struct rkisp1_cif_isp_isp_other_cfg others; +} __packed; + +/*---------- part2: measurement statistics ------------*/ + +/** + * struct rkisp1_cif_isp_awb_meas - awb measured values + * + * @cnt: white pixel count, number of "white pixels" found during last + * measurement + * @mean_y_or_g: mean value of y within window and frames, + * green if rgb is selected. + * @mean_cb_or_b: mean value of cb within window and frames, + * blue if rgb is selected. + * @mean_cr_or_r: mean value of cr within window and frames, + * red if rgb is selected. + */ +struct rkisp1_cif_isp_awb_meas { + __u32 cnt; + __u8 mean_y_or_g; + __u8 mean_cb_or_b; + __u8 mean_cr_or_r; +} __packed; + +/** + * struct rkisp1_cif_isp_awb_stat - statistics automatic white balance data + * + * @awb_mean: mean measured data + */ +struct rkisp1_cif_isp_awb_stat { + struct rkisp1_cif_isp_awb_meas awb_mean[rkisp1_cif_isp_awb_max_grid]; +} __packed; + +/** + * struct rkisp1_cif_isp_bls_meas_val - bls measured values + * + * @meas_r: mean measured value for bayer pattern r + * @meas_gr: mean measured value for bayer pattern gr + * @meas_gb: mean measured value for bayer pattern gb + * @meas_b: mean measured value for bayer pattern b + */ +struct rkisp1_cif_isp_bls_meas_val { + __u16 meas_r; + __u16 meas_gr; + __u16 meas_gb; + __u16 meas_b; +} __packed; + +/** + * struct rkisp1_cif_isp_ae_stat - statistics auto exposure data + * + * @exp_mean: mean luminance value of block xx + * @bls_val: bls measured values + * + * image is divided into 5x5 blocks. + */ +struct rkisp1_cif_isp_ae_stat { + __u8 exp_mean[rkisp1_cif_isp_ae_mean_max]; + struct rkisp1_cif_isp_bls_meas_val bls_val; +} __packed; + +/** + * struct rkisp1_cif_isp_af_meas_val - af measured values + * + * @sum: sharpness, refer to ref_01 for definition + * @lum: luminance, refer to ref_01 for definition + */ +struct rkisp1_cif_isp_af_meas_val { + __u32 sum; + __u32 lum; +} __packed; + +/** + * struct rkisp1_cif_isp_af_stat - statistics auto focus data + * + * @window: af measured value of window x + * + * the module measures the sharpness in 3 windows of selectable size via + * register settings(isp_afm_*_a/b/c) + */ +struct rkisp1_cif_isp_af_stat { + struct rkisp1_cif_isp_af_meas_val window[rkisp1_cif_isp_afm_max_windows]; +} __packed; + +/** + * struct rkisp1_cif_isp_hist_stat - statistics histogram data + * + * @hist_bins: measured bin counters + * + * measurement window divided into 25 sub-windows, set + * with isp_hist_xxx + */ +struct rkisp1_cif_isp_hist_stat { + __u16 hist_bins[rkisp1_cif_isp_hist_bin_n_max]; +} __packed; + +/** + * struct rkisp1_stat_buffer - rockchip isp1 statistics data + * + * @rkisp1_cif_isp_awb_stat: statistics data for automatic white balance + * @rkisp1_cif_isp_ae_stat: statistics data for auto exposure + * @rkisp1_cif_isp_af_stat: statistics data for auto focus + * @rkisp1_cif_isp_hist_stat: statistics histogram data + */ +struct rkisp1_cif_isp_stat { + struct rkisp1_cif_isp_awb_stat awb; + struct rkisp1_cif_isp_ae_stat ae; + struct rkisp1_cif_isp_af_stat af; + struct rkisp1_cif_isp_hist_stat hist; +} __packed; + +/** + * struct rkisp1_stat_buffer - rockchip isp1 statistics meta data + * + * @meas_type: measurement types (rkisp1_cif_isp_stat_ definitions) + * @frame_id: frame id for sync + * @params: statistics data + */ +struct rkisp1_stat_buffer { + __u32 meas_type; + __u32 frame_id; + struct rkisp1_cif_isp_stat params; +} __packed; + +#endif /* _uapi_rkisp1_config_h */
|
Drivers in the Staging area
|
8e2be317dcf5ce42c33d4e04ed7734c155487f31
|
jeffy chen
|
drivers
|
staging
|
media, rkisp1, uapi
|
media: staging: rkisp1: add capture device for statistics
|
add the capture video driver for rockchip isp1 statistics block.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['h', 'c', 'makefile']
| 5
| 605
| 11
|
--- diff --git a/drivers/staging/media/rkisp1/makefile b/drivers/staging/media/rkisp1/makefile --- a/drivers/staging/media/rkisp1/makefile +++ b/drivers/staging/media/rkisp1/makefile - rkisp1-resizer.o + rkisp1-resizer.o \ + rkisp1-stats.o diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h --- a/drivers/staging/media/rkisp1/rkisp1-common.h +++ b/drivers/staging/media/rkisp1/rkisp1-common.h +#include "uapi/rkisp1-config.h" +/* + * struct rkisp1_stats - isp statistics device + * + * @irq_lock: buffer queue lock + * @stat: stats buffer list + * @readout_wq: workqueue for statistics information read + */ +struct rkisp1_stats { + struct rkisp1_vdev_node vnode; + struct rkisp1_device *rkisp1; + + spinlock_t irq_lock; + struct list_head stat; + struct v4l2_format vdev_fmt; + bool is_streaming; + + struct workqueue_struct *readout_wq; + struct mutex wq_lock; +}; + + unsigned long stats_error; + * @stats: isp statistics output device + struct rkisp1_stats stats; +void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris); +int rkisp1_stats_register(struct rkisp1_stats *stats, + struct v4l2_device *v4l2_dev, + struct rkisp1_device *rkisp1); +void rkisp1_stats_unregister(struct rkisp1_stats *stats); + diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c --- a/drivers/staging/media/rkisp1/rkisp1-dev.c +++ b/drivers/staging/media/rkisp1/rkisp1-dev.c + * rkisp1-stats.c + * |===============| + * +---------------+ + * | | + * | isp | + * | | + * +---------------+ + * - * +-------------| 2 | 3 | - * | +------+------+ - * | | - * v v - * +- ---------+ +-----------+ - * | 0 | | 0 | - * ------------- ------------- - * | resizer | | resizer | + * +-------------| 2 | 3 |----------+ + * | +------+------+ | + * | | | + * v v v + * +- ---------+ +-----------+ +-----------+ + * | 0 | | 0 | | stats | + * ------------- ------------- | (capture) | + * | resizer | | resizer | +-----------+ - return 0; + /* 3a stats links */ + source = &rkisp1->isp.sd.entity; + sink = &rkisp1->stats.vnode.vdev.entity; + return media_create_pad_link(source, rkisp1_isp_pad_source_stats, + sink, 0, flags); + ret = rkisp1_stats_register(&rkisp1->stats, &rkisp1->v4l2_dev, rkisp1); + if (ret) + goto err_unreg_capture_devs; + - goto err_unreg_capture_devs; + goto err_unreg_stats; +err_unreg_stats: + rkisp1_stats_unregister(&rkisp1->stats); + debugfs_create_ulong("stats_error", 0444, debug->debugfs_dir, + &debug->stats_error); + rkisp1_stats_unregister(&rkisp1->stats); diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c --- a/drivers/staging/media/rkisp1/rkisp1-isp.c +++ b/drivers/staging/media/rkisp1/rkisp1-isp.c + + if (status & rkisp1_cif_isp_frame) { + u32 isp_ris; + + /* new frame from the sensor received */ + isp_ris = rkisp1_read(rkisp1, rkisp1_cif_isp_ris); + if (isp_ris & (rkisp1_cif_isp_awb_done | + rkisp1_cif_isp_afm_fin | + rkisp1_cif_isp_exp_end | + rkisp1_cif_isp_hist_measure_rdy)) + rkisp1_stats_isr(&rkisp1->stats, isp_ris); + } diff --git a/drivers/staging/media/rkisp1/rkisp1-stats.c b/drivers/staging/media/rkisp1/rkisp1-stats.c --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-stats.c +// spdx-license-identifier: (gpl-2.0+ or mit) +/* + * rockchip isp1 driver - stats subdevice + * + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +#include <media/v4l2-common.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ioctl.h> +#include <media/videobuf2-core.h> +#include <media/videobuf2-vmalloc.h> /* for isp statistics */ + +#include "rkisp1-common.h" + +#define rkisp1_stats_dev_name rkisp1_driver_name "_stats" + +#define rkisp1_isp_stats_req_bufs_min 2 +#define rkisp1_isp_stats_req_bufs_max 8 + +enum rkisp1_isp_readout_cmd { + rkisp1_isp_readout_meas, + rkisp1_isp_readout_meta, +}; + +struct rkisp1_isp_readout_work { + struct work_struct work; + struct rkisp1_stats *stats; + + unsigned int frame_id; + unsigned int isp_ris; + enum rkisp1_isp_readout_cmd readout; + struct vb2_buffer *vb; +}; + +static int rkisp1_stats_enum_fmt_meta_cap(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct video_device *video = video_devdata(file); + struct rkisp1_stats *stats = video_get_drvdata(video); + + if (f->index > 0 || f->type != video->queue->type) + return -einval; + + f->pixelformat = stats->vdev_fmt.fmt.meta.dataformat; + return 0; +} + +static int rkisp1_stats_g_fmt_meta_cap(struct file *file, void *priv, + struct v4l2_format *f) +{ + struct video_device *video = video_devdata(file); + struct rkisp1_stats *stats = video_get_drvdata(video); + struct v4l2_meta_format *meta = &f->fmt.meta; + + if (f->type != video->queue->type) + return -einval; + + memset(meta, 0, sizeof(*meta)); + meta->dataformat = stats->vdev_fmt.fmt.meta.dataformat; + meta->buffersize = stats->vdev_fmt.fmt.meta.buffersize; + + return 0; +} + +static int rkisp1_stats_querycap(struct file *file, + void *priv, struct v4l2_capability *cap) +{ + struct video_device *vdev = video_devdata(file); + + strscpy(cap->driver, rkisp1_driver_name, sizeof(cap->driver)); + strscpy(cap->card, vdev->name, sizeof(cap->card)); + strscpy(cap->bus_info, "platform: " rkisp1_driver_name, + sizeof(cap->bus_info)); + + return 0; +} + +/* isp video device ioctls */ +static const struct v4l2_ioctl_ops rkisp1_stats_ioctl = { + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_enum_fmt_meta_cap = rkisp1_stats_enum_fmt_meta_cap, + .vidioc_g_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap, + .vidioc_s_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap, + .vidioc_try_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap, + .vidioc_querycap = rkisp1_stats_querycap, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static const struct v4l2_file_operations rkisp1_stats_fops = { + .mmap = vb2_fop_mmap, + .unlocked_ioctl = video_ioctl2, + .poll = vb2_fop_poll, + .open = v4l2_fh_open, + .release = vb2_fop_release +}; + +static int rkisp1_stats_vb2_queue_setup(struct vb2_queue *vq, + unsigned int *num_buffers, + unsigned int *num_planes, + unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct rkisp1_stats *stats = vq->drv_priv; + + *num_planes = 1; + + *num_buffers = clamp_t(u32, *num_buffers, rkisp1_isp_stats_req_bufs_min, + rkisp1_isp_stats_req_bufs_max); + + sizes[0] = sizeof(struct rkisp1_stat_buffer); + + init_list_head(&stats->stat); + + return 0; +} + +static void rkisp1_stats_vb2_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct rkisp1_buffer *stats_buf = + container_of(vbuf, struct rkisp1_buffer, vb); + struct vb2_queue *vq = vb->vb2_queue; + struct rkisp1_stats *stats_dev = vq->drv_priv; + + stats_buf->vaddr[0] = vb2_plane_vaddr(vb, 0); + + mutex_lock(&stats_dev->wq_lock); + list_add_tail(&stats_buf->queue, &stats_dev->stat); + mutex_unlock(&stats_dev->wq_lock); +} + +static int rkisp1_stats_vb2_buf_prepare(struct vb2_buffer *vb) +{ + if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_stat_buffer)) + return -einval; + + vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_stat_buffer)); + + return 0; +} + +static void rkisp1_stats_vb2_stop_streaming(struct vb2_queue *vq) +{ + struct rkisp1_stats *stats = vq->drv_priv; + struct rkisp1_buffer *buf; + unsigned long flags; + unsigned int i; + + /* make sure no new work queued in isr before draining wq */ + spin_lock_irqsave(&stats->irq_lock, flags); + stats->is_streaming = false; + spin_unlock_irqrestore(&stats->irq_lock, flags); + + drain_workqueue(stats->readout_wq); + + mutex_lock(&stats->wq_lock); + for (i = 0; i < rkisp1_isp_stats_req_bufs_max; i++) { + if (list_empty(&stats->stat)) + break; + buf = list_first_entry(&stats->stat, + struct rkisp1_buffer, queue); + list_del(&buf->queue); + vb2_buffer_done(&buf->vb.vb2_buf, vb2_buf_state_error); + } + mutex_unlock(&stats->wq_lock); +} + +static int +rkisp1_stats_vb2_start_streaming(struct vb2_queue *queue, unsigned int count) +{ + struct rkisp1_stats *stats = queue->drv_priv; + + stats->is_streaming = true; + + return 0; +} + +static const struct vb2_ops rkisp1_stats_vb2_ops = { + .queue_setup = rkisp1_stats_vb2_queue_setup, + .buf_queue = rkisp1_stats_vb2_buf_queue, + .buf_prepare = rkisp1_stats_vb2_buf_prepare, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .stop_streaming = rkisp1_stats_vb2_stop_streaming, + .start_streaming = rkisp1_stats_vb2_start_streaming, +}; + +static int +rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats) +{ + struct rkisp1_vdev_node *node; + + node = container_of(q, struct rkisp1_vdev_node, buf_queue); + + q->type = v4l2_buf_type_meta_capture; + q->io_modes = vb2_mmap | vb2_userptr | vb2_dmabuf; + q->drv_priv = stats; + q->ops = &rkisp1_stats_vb2_ops; + q->mem_ops = &vb2_vmalloc_memops; + q->buf_struct_size = sizeof(struct rkisp1_buffer); + q->timestamp_flags = v4l2_buf_flag_timestamp_monotonic; + q->lock = &node->vlock; + + return vb2_queue_init(q); +} + +static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats, + struct rkisp1_stat_buffer *pbuf) +{ + /* protect against concurrent access from isr? */ + struct rkisp1_device *rkisp1 = stats->rkisp1; + u32 reg_val; + + pbuf->meas_type |= rkisp1_cif_isp_stat_awb; + reg_val = rkisp1_read(rkisp1, rkisp1_cif_isp_awb_white_cnt); + pbuf->params.awb.awb_mean[0].cnt = + rkisp1_cif_isp_awb_get_pixel_cnt(reg_val); + reg_val = rkisp1_read(rkisp1, rkisp1_cif_isp_awb_mean); + + pbuf->params.awb.awb_mean[0].mean_cr_or_r = + rkisp1_cif_isp_awb_get_mean_cr_r(reg_val); + pbuf->params.awb.awb_mean[0].mean_cb_or_b = + rkisp1_cif_isp_awb_get_mean_cb_b(reg_val); + pbuf->params.awb.awb_mean[0].mean_y_or_g = + rkisp1_cif_isp_awb_get_mean_y_g(reg_val); +} + +static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats, + struct rkisp1_stat_buffer *pbuf) +{ + struct rkisp1_device *rkisp1 = stats->rkisp1; + unsigned int i; + + pbuf->meas_type |= rkisp1_cif_isp_stat_autoexp; + for (i = 0; i < rkisp1_cif_isp_ae_mean_max; i++) + pbuf->params.ae.exp_mean[i] = + (u8)rkisp1_read(rkisp1, + rkisp1_cif_isp_exp_mean_00 + i * 4); +} + +static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats, + struct rkisp1_stat_buffer *pbuf) +{ + struct rkisp1_device *rkisp1 = stats->rkisp1; + struct rkisp1_cif_isp_af_stat *af; + + pbuf->meas_type = rkisp1_cif_isp_stat_afm_fin; + + af = &pbuf->params.af; + af->window[0].sum = rkisp1_read(rkisp1, rkisp1_cif_isp_afm_sum_a); + af->window[0].lum = rkisp1_read(rkisp1, rkisp1_cif_isp_afm_lum_a); + af->window[1].sum = rkisp1_read(rkisp1, rkisp1_cif_isp_afm_sum_b); + af->window[1].lum = rkisp1_read(rkisp1, rkisp1_cif_isp_afm_lum_b); + af->window[2].sum = rkisp1_read(rkisp1, rkisp1_cif_isp_afm_sum_c); + af->window[2].lum = rkisp1_read(rkisp1, rkisp1_cif_isp_afm_lum_c); +} + +static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats, + struct rkisp1_stat_buffer *pbuf) +{ + struct rkisp1_device *rkisp1 = stats->rkisp1; + unsigned int i; + + pbuf->meas_type |= rkisp1_cif_isp_stat_hist; + for (i = 0; i < rkisp1_cif_isp_hist_bin_n_max; i++) + pbuf->params.hist.hist_bins[i] = + (u8)rkisp1_read(rkisp1, + rkisp1_cif_isp_hist_bin_0 + i * 4); +} + +static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats, + struct rkisp1_stat_buffer *pbuf) +{ + struct rkisp1_device *rkisp1 = stats->rkisp1; + const struct rkisp1_isp_mbus_info *in_fmt = rkisp1->isp.sink_fmt; + struct rkisp1_cif_isp_bls_meas_val *bls_val; + + bls_val = &pbuf->params.ae.bls_val; + if (in_fmt->bayer_pat == rkisp1_raw_bggr) { + bls_val->meas_b = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_a_measured); + bls_val->meas_gb = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_b_measured); + bls_val->meas_gr = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_c_measured); + bls_val->meas_r = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_d_measured); + } else if (in_fmt->bayer_pat == rkisp1_raw_gbrg) { + bls_val->meas_gb = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_a_measured); + bls_val->meas_b = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_b_measured); + bls_val->meas_r = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_c_measured); + bls_val->meas_gr = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_d_measured); + } else if (in_fmt->bayer_pat == rkisp1_raw_grbg) { + bls_val->meas_gr = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_a_measured); + bls_val->meas_r = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_b_measured); + bls_val->meas_b = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_c_measured); + bls_val->meas_gb = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_d_measured); + } else if (in_fmt->bayer_pat == rkisp1_raw_rggb) { + bls_val->meas_r = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_a_measured); + bls_val->meas_gr = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_b_measured); + bls_val->meas_gb = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_c_measured); + bls_val->meas_b = + rkisp1_read(rkisp1, rkisp1_cif_isp_bls_d_measured); + } +} + +static void +rkisp1_stats_send_measurement(struct rkisp1_stats *stats, + struct rkisp1_isp_readout_work *meas_work) +{ + struct rkisp1_stat_buffer *cur_stat_buf; + struct rkisp1_buffer *cur_buf = null; + unsigned int frame_sequence = + atomic_read(&stats->rkisp1->isp.frame_sequence); + u64 timestamp = ktime_get_ns(); + + if (frame_sequence != meas_work->frame_id) { + dev_warn(stats->rkisp1->dev, + "measurement late(%d, %d) ", + frame_sequence, meas_work->frame_id); + frame_sequence = meas_work->frame_id; + } + + mutex_lock(&stats->wq_lock); + /* get one empty buffer */ + if (!list_empty(&stats->stat)) { + cur_buf = list_first_entry(&stats->stat, + struct rkisp1_buffer, queue); + list_del(&cur_buf->queue); + } + mutex_unlock(&stats->wq_lock); + + if (!cur_buf) + return; + + cur_stat_buf = + (struct rkisp1_stat_buffer *)(cur_buf->vaddr[0]); + + if (meas_work->isp_ris & rkisp1_cif_isp_awb_done) { + rkisp1_stats_get_awb_meas(stats, cur_stat_buf); + cur_stat_buf->meas_type |= rkisp1_cif_isp_stat_awb; + } + + if (meas_work->isp_ris & rkisp1_cif_isp_afm_fin) { + rkisp1_stats_get_afc_meas(stats, cur_stat_buf); + cur_stat_buf->meas_type |= rkisp1_cif_isp_stat_afm_fin; + } + + if (meas_work->isp_ris & rkisp1_cif_isp_exp_end) { + rkisp1_stats_get_aec_meas(stats, cur_stat_buf); + rkisp1_stats_get_bls_meas(stats, cur_stat_buf); + cur_stat_buf->meas_type |= rkisp1_cif_isp_stat_autoexp; + } + + if (meas_work->isp_ris & rkisp1_cif_isp_hist_measure_rdy) { + rkisp1_stats_get_hst_meas(stats, cur_stat_buf); + cur_stat_buf->meas_type |= rkisp1_cif_isp_stat_hist; + } + + vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0, + sizeof(struct rkisp1_stat_buffer)); + cur_buf->vb.sequence = frame_sequence; + cur_buf->vb.vb2_buf.timestamp = timestamp; + vb2_buffer_done(&cur_buf->vb.vb2_buf, vb2_buf_state_done); +} + +static void rkisp1_stats_readout_work(struct work_struct *work) +{ + struct rkisp1_isp_readout_work *readout_work = + container_of(work, struct rkisp1_isp_readout_work, work); + struct rkisp1_stats *stats = readout_work->stats; + + if (readout_work->readout == rkisp1_isp_readout_meas) + rkisp1_stats_send_measurement(stats, readout_work); + + kfree(readout_work); +} + +void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris) +{ + unsigned int frame_sequence = + atomic_read(&stats->rkisp1->isp.frame_sequence); + struct rkisp1_device *rkisp1 = stats->rkisp1; + struct rkisp1_isp_readout_work *work; + unsigned int isp_mis_tmp = 0; + u32 val; + + spin_lock(&stats->irq_lock); + + val = rkisp1_cif_isp_awb_done | rkisp1_cif_isp_afm_fin | + rkisp1_cif_isp_exp_end | rkisp1_cif_isp_hist_measure_rdy; + rkisp1_write(rkisp1, val, rkisp1_cif_isp_icr); + + isp_mis_tmp = rkisp1_read(rkisp1, rkisp1_cif_isp_mis); + if (isp_mis_tmp & + (rkisp1_cif_isp_awb_done | rkisp1_cif_isp_afm_fin | + rkisp1_cif_isp_exp_end | rkisp1_cif_isp_hist_measure_rdy)) + rkisp1->debug.stats_error++; + + if (!stats->is_streaming) + goto unlock; + if (isp_ris & (rkisp1_cif_isp_awb_done | + rkisp1_cif_isp_afm_fin | + rkisp1_cif_isp_exp_end | + rkisp1_cif_isp_hist_measure_rdy)) { + work = kzalloc(sizeof(*work), gfp_atomic); + if (work) { + init_work(&work->work, + rkisp1_stats_readout_work); + work->readout = rkisp1_isp_readout_meas; + work->stats = stats; + work->frame_id = frame_sequence; + work->isp_ris = isp_ris; + if (!queue_work(stats->readout_wq, + &work->work)) + kfree(work); + } else { + dev_err(stats->rkisp1->dev, + "could not allocate work "); + } + } + +unlock: + spin_unlock(&stats->irq_lock); +} + +static void rkisp1_init_stats(struct rkisp1_stats *stats) +{ + stats->vdev_fmt.fmt.meta.dataformat = + v4l2_meta_fmt_rk_isp1_stat_3a; + stats->vdev_fmt.fmt.meta.buffersize = + sizeof(struct rkisp1_stat_buffer); +} + +int rkisp1_stats_register(struct rkisp1_stats *stats, + struct v4l2_device *v4l2_dev, + struct rkisp1_device *rkisp1) +{ + struct rkisp1_vdev_node *node = &stats->vnode; + struct video_device *vdev = &node->vdev; + int ret; + + stats->rkisp1 = rkisp1; + mutex_init(&stats->wq_lock); + mutex_init(&node->vlock); + init_list_head(&stats->stat); + spin_lock_init(&stats->irq_lock); + + strscpy(vdev->name, rkisp1_stats_dev_name, sizeof(vdev->name)); + + video_set_drvdata(vdev, stats); + vdev->ioctl_ops = &rkisp1_stats_ioctl; + vdev->fops = &rkisp1_stats_fops; + vdev->release = video_device_release_empty; + vdev->lock = &node->vlock; + vdev->v4l2_dev = v4l2_dev; + vdev->queue = &node->buf_queue; + vdev->device_caps = v4l2_cap_meta_capture | v4l2_cap_streaming; + vdev->vfl_dir = vfl_dir_rx; + rkisp1_stats_init_vb2_queue(vdev->queue, stats); + rkisp1_init_stats(stats); + video_set_drvdata(vdev, stats); + + node->pad.flags = media_pad_fl_sink; + ret = media_entity_pads_init(&vdev->entity, 1, &node->pad); + if (ret) + goto err_release_queue; + + ret = video_register_device(vdev, vfl_type_grabber, -1); + if (ret) { + dev_err(&vdev->dev, + "failed to register %s, ret=%d ", vdev->name, ret); + goto err_cleanup_media_entity; + } + + stats->readout_wq = alloc_workqueue("measurement_queue", + wq_unbound | wq_mem_reclaim, + 1); + + if (!stats->readout_wq) { + ret = -enomem; + goto err_unreg_vdev; + } + + return 0; + +err_unreg_vdev: + video_unregister_device(vdev); +err_cleanup_media_entity: + media_entity_cleanup(&vdev->entity); +err_release_queue: + vb2_queue_release(vdev->queue); + mutex_destroy(&node->vlock); + mutex_destroy(&stats->wq_lock); + return ret; +} + +void rkisp1_stats_unregister(struct rkisp1_stats *stats) +{ + struct rkisp1_vdev_node *node = &stats->vnode; + struct video_device *vdev = &node->vdev; + + destroy_workqueue(stats->readout_wq); + video_unregister_device(vdev); + media_entity_cleanup(&vdev->entity); + vb2_queue_release(vdev->queue); + mutex_destroy(&node->vlock); + mutex_destroy(&stats->wq_lock); +}
|
Drivers in the Staging area
|
9a28dbd65a8882462079d66daf9282234d3fa1a0
|
jacob chen
|
drivers
|
staging
|
media, rkisp1
|
media: staging: rkisp1: add output device for parameters
|
add the output video driver that accept parameters from userspace.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['h', 'c', 'makefile']
| 5
| 1,717
| 16
|
--- diff --git a/drivers/staging/media/rkisp1/makefile b/drivers/staging/media/rkisp1/makefile --- a/drivers/staging/media/rkisp1/makefile +++ b/drivers/staging/media/rkisp1/makefile - rkisp1-stats.o + rkisp1-stats.o \ + rkisp1-params.o diff --git a/drivers/staging/media/rkisp1/rkisp1-common.h b/drivers/staging/media/rkisp1/rkisp1-common.h --- a/drivers/staging/media/rkisp1/rkisp1-common.h +++ b/drivers/staging/media/rkisp1/rkisp1-common.h +/* + * struct rkisp1_params - isp input parameters device + * + * @cur_params: current isp parameters + * @is_first_params: the first params should take effect immediately + */ +struct rkisp1_params { + struct rkisp1_vdev_node vnode; + struct rkisp1_device *rkisp1; + + spinlock_t config_lock; + struct list_head params; + struct rkisp1_params_cfg cur_params; + struct v4l2_format vdev_fmt; + bool is_streaming; + bool is_first_params; + + enum v4l2_quantization quantization; + enum rkisp1_fmt_raw_pat_type raw_type; +}; + + * @params: isp input parameters device + struct rkisp1_params params; +void rkisp1_params_isr(struct rkisp1_device *rkisp1, u32 isp_mis); +void rkisp1_params_configure(struct rkisp1_params *params, + enum rkisp1_fmt_raw_pat_type bayer_pat, + enum v4l2_quantization quantization); +void rkisp1_params_disable(struct rkisp1_params *params); +int rkisp1_params_register(struct rkisp1_params *params, + struct v4l2_device *v4l2_dev, + struct rkisp1_device *rkisp1); +void rkisp1_params_unregister(struct rkisp1_params *params); + +void rkisp1_params_isr_handler(struct rkisp1_device *rkisp1, u32 isp_mis); + diff --git a/drivers/staging/media/rkisp1/rkisp1-dev.c b/drivers/staging/media/rkisp1/rkisp1-dev.c --- a/drivers/staging/media/rkisp1/rkisp1-dev.c +++ b/drivers/staging/media/rkisp1/rkisp1-dev.c - * rkisp1-stats.c - * |===============| - * +---------------+ - * | | - * | isp | - * | | - * +---------------+ + * rkisp1-stats.c rkisp1-params.c + * |===============| |===============| + * +---------------+ +---------------+ + * | | | | + * | isp | | isp | + * | | | | + * +---------------+ +---------------+ - * +----------+ +----------+ - * \ | - * \ | - * +----------+ \ | - * | sensor 1 | v v - * ------------ +------+------+ - * | 0 |----->| 0 | 1 | + * +----------+ +----------+ +-----------+ + * \ | | params | + * \ | | (output) | + * +----------+ \ | +-----------+ + * | sensor 1 | v v | + * ------------ +------+------+ | + * | 0 |----->| 0 | 1 |<---------+ + /* params links */ + source = &rkisp1->params.vnode.vdev.entity; + sink = &rkisp1->isp.sd.entity; + ret = media_create_pad_link(source, 0, sink, + rkisp1_isp_pad_sink_params, flags); + if (ret) + return ret; + + ret = rkisp1_params_register(&rkisp1->params, + &rkisp1->v4l2_dev, rkisp1); + if (ret) + goto err_unreg_stats; + - goto err_unreg_stats; + goto err_unreg_params; +err_unreg_params: + rkisp1_params_unregister(&rkisp1->params); + rkisp1_params_unregister(&rkisp1->params); diff --git a/drivers/staging/media/rkisp1/rkisp1-isp.c b/drivers/staging/media/rkisp1/rkisp1-isp.c --- a/drivers/staging/media/rkisp1/rkisp1-isp.c +++ b/drivers/staging/media/rkisp1/rkisp1-isp.c + if (src_fmt->fmt_type == rkisp1_fmt_bayer) { + rkisp1_params_disable(&rkisp1->params); + } else { + struct v4l2_mbus_framefmt *src_frm; + + src_frm = rkisp1_isp_get_pad_fmt(&rkisp1->isp, null, + rkisp1_isp_pad_sink_video, + v4l2_subdev_format_active); + rkisp1_params_configure(&rkisp1->params, sink_fmt->bayer_pat, + src_frm->quantization); + } + + + /* + * then update changed configs. some of them involve + * lot of register writes. do those only one per frame. + * do the updates in the order of the processing flow. + */ + rkisp1_params_isr(rkisp1, status); diff --git a/drivers/staging/media/rkisp1/rkisp1-params.c b/drivers/staging/media/rkisp1/rkisp1-params.c --- /dev/null +++ b/drivers/staging/media/rkisp1/rkisp1-params.c +// spdx-license-identifier: (gpl-2.0+ or mit) +/* + * rockchip isp1 driver - params subdevice + * + * copyright (c) 2017 rockchip electronics co., ltd. + */ + +#include <media/v4l2-common.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ioctl.h> +#include <media/videobuf2-core.h> +#include <media/videobuf2-vmalloc.h> /* for isp params */ + +#include "rkisp1-common.h" + +#define rkisp1_params_dev_name rkisp1_driver_name "_params" + +#define rkisp1_isp_params_req_bufs_min 2 +#define rkisp1_isp_params_req_bufs_max 8 + +#define rkisp1_isp_dpcc_line_thresh(n) \ + (rkisp1_cif_isp_dpcc_line_thresh_1 + 0x14 * (n)) +#define rkisp1_isp_dpcc_line_mad_fac(n) \ + (rkisp1_cif_isp_dpcc_line_mad_fac_1 + 0x14 * (n)) +#define rkisp1_isp_dpcc_pg_fac(n) \ + (rkisp1_cif_isp_dpcc_pg_fac_1 + 0x14 * (n)) +#define rkisp1_isp_dpcc_rnd_thresh(n) \ + (rkisp1_cif_isp_dpcc_rnd_thresh_1 + 0x14 * (n)) +#define rkisp1_isp_dpcc_rg_fac(n) \ + (rkisp1_cif_isp_dpcc_rg_fac_1 + 0x14 * (n)) +#define rkisp1_isp_cc_coeff(n) \ + (rkisp1_cif_isp_cc_coeff_0 + (n) * 4) + +static inline void +rkisp1_param_set_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask) +{ + u32 val; + + val = rkisp1_read(params->rkisp1, reg); + rkisp1_write(params->rkisp1, val | bit_mask, reg); +} + +static inline void +rkisp1_param_clear_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask) +{ + u32 val; + + val = rkisp1_read(params->rkisp1, reg); + rkisp1_write(params->rkisp1, val & ~bit_mask, reg); +} + +/* isp bp interface function */ +static void rkisp1_dpcc_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_dpcc_config *arg) +{ + unsigned int i; + u32 mode; + + /* avoid to override the old enable value */ + mode = rkisp1_read(params->rkisp1, rkisp1_cif_isp_dpcc_mode); + mode &= rkisp1_cif_isp_dpcc_ena; + mode |= arg->mode & ~rkisp1_cif_isp_dpcc_ena; + rkisp1_write(params->rkisp1, mode, rkisp1_cif_isp_dpcc_mode); + rkisp1_write(params->rkisp1, arg->output_mode, + rkisp1_cif_isp_dpcc_output_mode); + rkisp1_write(params->rkisp1, arg->set_use, + rkisp1_cif_isp_dpcc_set_use); + + rkisp1_write(params->rkisp1, arg->methods[0].method, + rkisp1_cif_isp_dpcc_methods_set_1); + rkisp1_write(params->rkisp1, arg->methods[1].method, + rkisp1_cif_isp_dpcc_methods_set_2); + rkisp1_write(params->rkisp1, arg->methods[2].method, + rkisp1_cif_isp_dpcc_methods_set_3); + for (i = 0; i < rkisp1_cif_isp_dpcc_methods_max; i++) { + rkisp1_write(params->rkisp1, arg->methods[i].line_thresh, + rkisp1_isp_dpcc_line_thresh(i)); + rkisp1_write(params->rkisp1, arg->methods[i].line_mad_fac, + rkisp1_isp_dpcc_line_mad_fac(i)); + rkisp1_write(params->rkisp1, arg->methods[i].pg_fac, + rkisp1_isp_dpcc_pg_fac(i)); + rkisp1_write(params->rkisp1, arg->methods[i].rnd_thresh, + rkisp1_isp_dpcc_rnd_thresh(i)); + rkisp1_write(params->rkisp1, arg->methods[i].rg_fac, + rkisp1_isp_dpcc_rg_fac(i)); + } + + rkisp1_write(params->rkisp1, arg->rnd_offs, + rkisp1_cif_isp_dpcc_rnd_offs); + rkisp1_write(params->rkisp1, arg->ro_limits, + rkisp1_cif_isp_dpcc_ro_limits); +} + +/* isp black level subtraction interface function */ +static void rkisp1_bls_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_bls_config *arg) +{ + /* avoid to override the old enable value */ + u32 new_control; + + new_control = rkisp1_read(params->rkisp1, rkisp1_cif_isp_bls_ctrl); + new_control &= rkisp1_cif_isp_bls_ena; + /* fixed subtraction values */ + if (!arg->enable_auto) { + const struct rkisp1_cif_isp_bls_fixed_val *pval = + &arg->fixed_val; + + switch (params->raw_type) { + case rkisp1_raw_bggr: + rkisp1_write(params->rkisp1, + pval->r, rkisp1_cif_isp_bls_d_fixed); + rkisp1_write(params->rkisp1, + pval->gr, rkisp1_cif_isp_bls_c_fixed); + rkisp1_write(params->rkisp1, + pval->gb, rkisp1_cif_isp_bls_b_fixed); + rkisp1_write(params->rkisp1, + pval->b, rkisp1_cif_isp_bls_a_fixed); + break; + case rkisp1_raw_gbrg: + rkisp1_write(params->rkisp1, + pval->r, rkisp1_cif_isp_bls_c_fixed); + rkisp1_write(params->rkisp1, + pval->gr, rkisp1_cif_isp_bls_d_fixed); + rkisp1_write(params->rkisp1, + pval->gb, rkisp1_cif_isp_bls_a_fixed); + rkisp1_write(params->rkisp1, + pval->b, rkisp1_cif_isp_bls_b_fixed); + break; + case rkisp1_raw_grbg: + rkisp1_write(params->rkisp1, + pval->r, rkisp1_cif_isp_bls_b_fixed); + rkisp1_write(params->rkisp1, + pval->gr, rkisp1_cif_isp_bls_a_fixed); + rkisp1_write(params->rkisp1, + pval->gb, rkisp1_cif_isp_bls_d_fixed); + rkisp1_write(params->rkisp1, + pval->b, rkisp1_cif_isp_bls_c_fixed); + break; + case rkisp1_raw_rggb: + rkisp1_write(params->rkisp1, + pval->r, rkisp1_cif_isp_bls_a_fixed); + rkisp1_write(params->rkisp1, + pval->gr, rkisp1_cif_isp_bls_b_fixed); + rkisp1_write(params->rkisp1, + pval->gb, rkisp1_cif_isp_bls_c_fixed); + rkisp1_write(params->rkisp1, + pval->b, rkisp1_cif_isp_bls_d_fixed); + break; + default: + break; + } + + } else { + if (arg->en_windows & bit(1)) { + rkisp1_write(params->rkisp1, arg->bls_window2.h_offs, + rkisp1_cif_isp_bls_h2_start); + rkisp1_write(params->rkisp1, arg->bls_window2.h_size, + rkisp1_cif_isp_bls_h2_stop); + rkisp1_write(params->rkisp1, arg->bls_window2.v_offs, + rkisp1_cif_isp_bls_v2_start); + rkisp1_write(params->rkisp1, arg->bls_window2.v_size, + rkisp1_cif_isp_bls_v2_stop); + new_control |= rkisp1_cif_isp_bls_window_2; + } + + if (arg->en_windows & bit(0)) { + rkisp1_write(params->rkisp1, arg->bls_window1.h_offs, + rkisp1_cif_isp_bls_h1_start); + rkisp1_write(params->rkisp1, arg->bls_window1.h_size, + rkisp1_cif_isp_bls_h1_stop); + rkisp1_write(params->rkisp1, arg->bls_window1.v_offs, + rkisp1_cif_isp_bls_v1_start); + rkisp1_write(params->rkisp1, arg->bls_window1.v_size, + rkisp1_cif_isp_bls_v1_stop); + new_control |= rkisp1_cif_isp_bls_window_1; + } + + rkisp1_write(params->rkisp1, arg->bls_samples, + rkisp1_cif_isp_bls_samples); + + new_control |= rkisp1_cif_isp_bls_mode_measured; + } + rkisp1_write(params->rkisp1, new_control, rkisp1_cif_isp_bls_ctrl); +} + +/* isp ls correction interface function */ +static void +rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_lsc_config *pconfig) +{ + unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data; + + isp_lsc_status = rkisp1_read(params->rkisp1, rkisp1_cif_isp_lsc_status); + + /* rkisp1_cif_isp_lsc_table_address_153 = ( 17 * 18 ) >> 1 */ + sram_addr = (isp_lsc_status & rkisp1_cif_isp_lsc_active_table) ? + rkisp1_cif_isp_lsc_table_address_0 : + rkisp1_cif_isp_lsc_table_address_153; + rkisp1_write(params->rkisp1, sram_addr, + rkisp1_cif_isp_lsc_r_table_addr); + rkisp1_write(params->rkisp1, sram_addr, + rkisp1_cif_isp_lsc_gr_table_addr); + rkisp1_write(params->rkisp1, sram_addr, + rkisp1_cif_isp_lsc_gb_table_addr); + rkisp1_write(params->rkisp1, sram_addr, + rkisp1_cif_isp_lsc_b_table_addr); + + /* program data tables (table size is 9 * 17 = 153) */ + for (i = 0; + i < rkisp1_cif_isp_lsc_sectors_max * rkisp1_cif_isp_lsc_sectors_max; + i += rkisp1_cif_isp_lsc_sectors_max) { + /* + * 17 sectors with 2 values in one dword = 9 + * dwords (2nd value of last dword unused) + */ + for (j = 0; j < rkisp1_cif_isp_lsc_sectors_max - 1; j += 2) { + data = rkisp1_cif_isp_lsc_table_data(pconfig->r_data_tbl[i + j], + pconfig->r_data_tbl[i + j + 1]); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_r_table_data); + + data = rkisp1_cif_isp_lsc_table_data(pconfig->gr_data_tbl[i + j], + pconfig->gr_data_tbl[i + j + 1]); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_gr_table_data); + + data = rkisp1_cif_isp_lsc_table_data(pconfig->gb_data_tbl[i + j], + pconfig->gb_data_tbl[i + j + 1]); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_gb_table_data); + + data = rkisp1_cif_isp_lsc_table_data(pconfig->b_data_tbl[i + j], + pconfig->b_data_tbl[i + j + 1]); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_b_table_data); + } + data = rkisp1_cif_isp_lsc_table_data(pconfig->r_data_tbl[i + j], 0); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_r_table_data); + + data = rkisp1_cif_isp_lsc_table_data(pconfig->gr_data_tbl[i + j], 0); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_gr_table_data); + + data = rkisp1_cif_isp_lsc_table_data(pconfig->gb_data_tbl[i + j], 0); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_gb_table_data); + + data = rkisp1_cif_isp_lsc_table_data(pconfig->b_data_tbl[i + j], 0); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_b_table_data); + } + isp_lsc_table_sel = (isp_lsc_status & rkisp1_cif_isp_lsc_active_table) ? + rkisp1_cif_isp_lsc_table_0 : + rkisp1_cif_isp_lsc_table_1; + rkisp1_write(params->rkisp1, isp_lsc_table_sel, + rkisp1_cif_isp_lsc_table_sel); +} + +static void rkisp1_lsc_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_lsc_config *arg) +{ + unsigned int i, data; + u32 lsc_ctrl; + + /* to config must be off , store the current status firstly */ + lsc_ctrl = rkisp1_read(params->rkisp1, rkisp1_cif_isp_lsc_ctrl); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_lsc_ctrl, + rkisp1_cif_isp_lsc_ctrl_ena); + rkisp1_lsc_correct_matrix_config(params, arg); + + for (i = 0; i < 4; i++) { + /* program x size tables */ + data = rkisp1_cif_isp_lsc_sect_size(arg->x_size_tbl[i * 2], + arg->x_size_tbl[i * 2 + 1]); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_xsize_01 + i * 4); + + /* program x grad tables */ + data = rkisp1_cif_isp_lsc_sect_size(arg->x_grad_tbl[i * 2], + arg->x_grad_tbl[i * 2 + 1]); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_xgrad_01 + i * 4); + + /* program y size tables */ + data = rkisp1_cif_isp_lsc_sect_size(arg->y_size_tbl[i * 2], + arg->y_size_tbl[i * 2 + 1]); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_ysize_01 + i * 4); + + /* program y grad tables */ + data = rkisp1_cif_isp_lsc_sect_size(arg->y_grad_tbl[i * 2], + arg->y_grad_tbl[i * 2 + 1]); + rkisp1_write(params->rkisp1, data, + rkisp1_cif_isp_lsc_ygrad_01 + i * 4); + } + + /* restore the lsc ctrl status */ + if (lsc_ctrl & rkisp1_cif_isp_lsc_ctrl_ena) { + rkisp1_param_set_bits(params, + rkisp1_cif_isp_lsc_ctrl, + rkisp1_cif_isp_lsc_ctrl_ena); + } else { + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_lsc_ctrl, + rkisp1_cif_isp_lsc_ctrl_ena); + } +} + +/* isp filtering function */ +static void rkisp1_flt_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_flt_config *arg) +{ + u32 filt_mode; + + rkisp1_write(params->rkisp1, + arg->thresh_bl0, rkisp1_cif_isp_filt_thresh_bl0); + rkisp1_write(params->rkisp1, + arg->thresh_bl1, rkisp1_cif_isp_filt_thresh_bl1); + rkisp1_write(params->rkisp1, + arg->thresh_sh0, rkisp1_cif_isp_filt_thresh_sh0); + rkisp1_write(params->rkisp1, + arg->thresh_sh1, rkisp1_cif_isp_filt_thresh_sh1); + rkisp1_write(params->rkisp1, arg->fac_bl0, rkisp1_cif_isp_filt_fac_bl0); + rkisp1_write(params->rkisp1, arg->fac_bl1, rkisp1_cif_isp_filt_fac_bl1); + rkisp1_write(params->rkisp1, arg->fac_mid, rkisp1_cif_isp_filt_fac_mid); + rkisp1_write(params->rkisp1, arg->fac_sh0, rkisp1_cif_isp_filt_fac_sh0); + rkisp1_write(params->rkisp1, arg->fac_sh1, rkisp1_cif_isp_filt_fac_sh1); + rkisp1_write(params->rkisp1, + arg->lum_weight, rkisp1_cif_isp_filt_lum_weight); + + rkisp1_write(params->rkisp1, + (arg->mode ? rkisp1_cif_isp_flt_mode_dnr : 0) | + rkisp1_cif_isp_flt_chroma_v_mode(arg->chr_v_mode) | + rkisp1_cif_isp_flt_chroma_h_mode(arg->chr_h_mode) | + rkisp1_cif_isp_flt_green_stage1(arg->grn_stage1), + rkisp1_cif_isp_filt_mode); + + /* avoid to override the old enable value */ + filt_mode = rkisp1_read(params->rkisp1, rkisp1_cif_isp_filt_mode); + filt_mode &= rkisp1_cif_isp_flt_ena; + if (arg->mode) + filt_mode |= rkisp1_cif_isp_flt_mode_dnr; + filt_mode |= rkisp1_cif_isp_flt_chroma_v_mode(arg->chr_v_mode) | + rkisp1_cif_isp_flt_chroma_h_mode(arg->chr_h_mode) | + rkisp1_cif_isp_flt_green_stage1(arg->grn_stage1); + rkisp1_write(params->rkisp1, filt_mode, rkisp1_cif_isp_filt_mode); +} + +/* isp demosaic interface function */ +static int rkisp1_bdm_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_bdm_config *arg) +{ + u32 bdm_th; + + /* avoid to override the old enable value */ + bdm_th = rkisp1_read(params->rkisp1, rkisp1_cif_isp_demosaic); + bdm_th &= rkisp1_cif_isp_demosaic_bypass; + bdm_th |= arg->demosaic_th & ~rkisp1_cif_isp_demosaic_bypass; + /* set demosaic threshold */ + rkisp1_write(params->rkisp1, bdm_th, rkisp1_cif_isp_demosaic); + return 0; +} + +/* isp gamma correction interface function */ +static void rkisp1_sdg_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_sdg_config *arg) +{ + unsigned int i; + + rkisp1_write(params->rkisp1, + arg->xa_pnts.gamma_dx0, rkisp1_cif_isp_gamma_dx_lo); + rkisp1_write(params->rkisp1, + arg->xa_pnts.gamma_dx1, rkisp1_cif_isp_gamma_dx_hi); + + for (i = 0; i < rkisp1_cif_isp_degamma_curve_size; i++) { + rkisp1_write(params->rkisp1, arg->curve_r.gamma_y[i], + rkisp1_cif_isp_gamma_r_y0 + i * 4); + rkisp1_write(params->rkisp1, arg->curve_g.gamma_y[i], + rkisp1_cif_isp_gamma_g_y0 + i * 4); + rkisp1_write(params->rkisp1, arg->curve_b.gamma_y[i], + rkisp1_cif_isp_gamma_b_y0 + i * 4); + } +} + +/* isp gamma correction interface function */ +static void rkisp1_goc_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_goc_config *arg) +{ + unsigned int i; + + rkisp1_param_clear_bits(params, rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_gamma_out_ena); + rkisp1_write(params->rkisp1, arg->mode, rkisp1_cif_isp_gamma_out_mode); + + for (i = 0; i < rkisp1_cif_isp_gamma_out_max_samples; i++) + rkisp1_write(params->rkisp1, arg->gamma_y[i], + rkisp1_cif_isp_gamma_out_y_0 + i * 4); +} + +/* isp cross talk */ +static void rkisp1_ctk_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_ctk_config *arg) +{ + rkisp1_write(params->rkisp1, arg->coeff0, rkisp1_cif_isp_ct_coeff_0); + rkisp1_write(params->rkisp1, arg->coeff1, rkisp1_cif_isp_ct_coeff_1); + rkisp1_write(params->rkisp1, arg->coeff2, rkisp1_cif_isp_ct_coeff_2); + rkisp1_write(params->rkisp1, arg->coeff3, rkisp1_cif_isp_ct_coeff_3); + rkisp1_write(params->rkisp1, arg->coeff4, rkisp1_cif_isp_ct_coeff_4); + rkisp1_write(params->rkisp1, arg->coeff5, rkisp1_cif_isp_ct_coeff_5); + rkisp1_write(params->rkisp1, arg->coeff6, rkisp1_cif_isp_ct_coeff_6); + rkisp1_write(params->rkisp1, arg->coeff7, rkisp1_cif_isp_ct_coeff_7); + rkisp1_write(params->rkisp1, arg->coeff8, rkisp1_cif_isp_ct_coeff_8); + rkisp1_write(params->rkisp1, arg->ct_offset_r, + rkisp1_cif_isp_ct_offset_r); + rkisp1_write(params->rkisp1, arg->ct_offset_g, + rkisp1_cif_isp_ct_offset_g); + rkisp1_write(params->rkisp1, arg->ct_offset_b, + rkisp1_cif_isp_ct_offset_b); +} + +static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en) +{ + if (en) + return; + + /* write back the default values. */ + rkisp1_write(params->rkisp1, 0x80, rkisp1_cif_isp_ct_coeff_0); + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_coeff_1); + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_coeff_2); + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_coeff_3); + rkisp1_write(params->rkisp1, 0x80, rkisp1_cif_isp_ct_coeff_4); + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_coeff_5); + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_coeff_6); + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_coeff_7); + rkisp1_write(params->rkisp1, 0x80, rkisp1_cif_isp_ct_coeff_8); + + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_offset_r); + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_offset_g); + rkisp1_write(params->rkisp1, 0, rkisp1_cif_isp_ct_offset_b); +} + +/* isp white balance mode */ +static void rkisp1_awb_meas_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_awb_meas_config *arg) +{ + u32 reg_val = 0; + /* based on the mode,configure the awb module */ + if (arg->awb_mode == rkisp1_cif_isp_awb_mode_ycbcr) { + /* reference cb and cr */ + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_awb_ref_cr_set(arg->awb_ref_cr) | + arg->awb_ref_cb, rkisp1_cif_isp_awb_ref); + /* yc threshold */ + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_awb_max_y_set(arg->max_y) | + rkisp1_cif_isp_awb_min_y_set(arg->min_y) | + rkisp1_cif_isp_awb_max_cs_set(arg->max_csum) | + arg->min_c, rkisp1_cif_isp_awb_thresh); + } + + reg_val = rkisp1_read(params->rkisp1, rkisp1_cif_isp_awb_prop); + if (arg->enable_ymax_cmp) + reg_val |= rkisp1_cif_isp_awb_ymax_cmp_en; + else + reg_val &= ~rkisp1_cif_isp_awb_ymax_cmp_en; + rkisp1_write(params->rkisp1, reg_val, rkisp1_cif_isp_awb_prop); + + /* window offset */ + rkisp1_write(params->rkisp1, + arg->awb_wnd.v_offs, rkisp1_cif_isp_awb_wnd_v_offs); + rkisp1_write(params->rkisp1, + arg->awb_wnd.h_offs, rkisp1_cif_isp_awb_wnd_h_offs); + /* awb window size */ + rkisp1_write(params->rkisp1, + arg->awb_wnd.v_size, rkisp1_cif_isp_awb_wnd_v_size); + rkisp1_write(params->rkisp1, + arg->awb_wnd.h_size, rkisp1_cif_isp_awb_wnd_h_size); + /* number of frames */ + rkisp1_write(params->rkisp1, + arg->frames, rkisp1_cif_isp_awb_frames); +} + +static void +rkisp1_awb_meas_enable(struct rkisp1_params *params, + const struct rkisp1_cif_isp_awb_meas_config *arg, + bool en) +{ + u32 reg_val = rkisp1_read(params->rkisp1, rkisp1_cif_isp_awb_prop); + + /* switch off */ + reg_val &= rkisp1_cif_isp_awb_mode_mask_none; + + if (en) { + if (arg->awb_mode == rkisp1_cif_isp_awb_mode_rgb) + reg_val |= rkisp1_cif_isp_awb_mode_rgb_en; + else + reg_val |= rkisp1_cif_isp_awb_mode_ycbcr_en; + + rkisp1_write(params->rkisp1, reg_val, rkisp1_cif_isp_awb_prop); + + /* measurements require awb block be active. */ + rkisp1_param_set_bits(params, rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_awb_ena); + } else { + rkisp1_write(params->rkisp1, + reg_val, rkisp1_cif_isp_awb_prop); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_awb_ena); + } +} + +static void +rkisp1_awb_gain_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_awb_gain_config *arg) +{ + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_awb_gain_r_set(arg->gain_green_r) | + arg->gain_green_b, rkisp1_cif_isp_awb_gain_g); + + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_awb_gain_r_set(arg->gain_red) | + arg->gain_blue, rkisp1_cif_isp_awb_gain_rb); +} + +static void rkisp1_aec_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_aec_config *arg) +{ + unsigned int block_hsize, block_vsize; + u32 exp_ctrl; + + /* avoid to override the old enable value */ + exp_ctrl = rkisp1_read(params->rkisp1, rkisp1_cif_isp_exp_ctrl); + exp_ctrl &= rkisp1_cif_isp_exp_ena; + if (arg->autostop) + exp_ctrl |= rkisp1_cif_isp_exp_ctrl_autostop; + if (arg->mode == rkisp1_cif_isp_exp_measuring_mode_1) + exp_ctrl |= rkisp1_cif_isp_exp_ctrl_measmode_1; + rkisp1_write(params->rkisp1, exp_ctrl, rkisp1_cif_isp_exp_ctrl); + + rkisp1_write(params->rkisp1, + arg->meas_window.h_offs, rkisp1_cif_isp_exp_h_offset); + rkisp1_write(params->rkisp1, + arg->meas_window.v_offs, rkisp1_cif_isp_exp_v_offset); + + block_hsize = arg->meas_window.h_size / + rkisp1_cif_isp_exp_column_num - 1; + block_vsize = arg->meas_window.v_size / + rkisp1_cif_isp_exp_row_num - 1; + + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_exp_h_size_set(block_hsize), + rkisp1_cif_isp_exp_h_size); + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_exp_v_size_set(block_vsize), + rkisp1_cif_isp_exp_v_size); +} + +static void rkisp1_cproc_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_cproc_config *arg) +{ + struct rkisp1_cif_isp_isp_other_cfg *cur_other_cfg = + ¶ms->cur_params.others; + struct rkisp1_cif_isp_ie_config *cur_ie_config = + &cur_other_cfg->ie_config; + u32 effect = cur_ie_config->effect; + u32 quantization = params->quantization; + + rkisp1_write(params->rkisp1, arg->contrast, rkisp1_cif_c_proc_contrast); + rkisp1_write(params->rkisp1, arg->hue, rkisp1_cif_c_proc_hue); + rkisp1_write(params->rkisp1, arg->sat, rkisp1_cif_c_proc_saturation); + rkisp1_write(params->rkisp1, arg->brightness, + rkisp1_cif_c_proc_brightness); + + if (quantization != v4l2_quantization_full_range || + effect != v4l2_colorfx_none) { + rkisp1_param_clear_bits(params, rkisp1_cif_c_proc_ctrl, + rkisp1_cif_c_proc_yout_full | + rkisp1_cif_c_proc_yin_full | + rkisp1_cif_c_proc_cout_full); + } else { + rkisp1_param_set_bits(params, rkisp1_cif_c_proc_ctrl, + rkisp1_cif_c_proc_yout_full | + rkisp1_cif_c_proc_yin_full | + rkisp1_cif_c_proc_cout_full); + } +} + +static void rkisp1_hst_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_hst_config *arg) +{ + unsigned int block_hsize, block_vsize; + static const u32 hist_weight_regs[] = { + rkisp1_cif_isp_hist_weight_00to30, + rkisp1_cif_isp_hist_weight_40to21, + rkisp1_cif_isp_hist_weight_31to12, + rkisp1_cif_isp_hist_weight_22to03, + rkisp1_cif_isp_hist_weight_13to43, + rkisp1_cif_isp_hist_weight_04to34, + rkisp1_cif_isp_hist_weight_44, + }; + const u8 *weight; + unsigned int i; + u32 hist_prop; + + /* avoid to override the old enable value */ + hist_prop = rkisp1_read(params->rkisp1, rkisp1_cif_isp_hist_prop); + hist_prop &= rkisp1_cif_isp_hist_prop_mode_mask; + hist_prop |= rkisp1_cif_isp_hist_prediv_set(arg->histogram_predivider); + rkisp1_write(params->rkisp1, hist_prop, rkisp1_cif_isp_hist_prop); + rkisp1_write(params->rkisp1, + arg->meas_window.h_offs, + rkisp1_cif_isp_hist_h_offs); + rkisp1_write(params->rkisp1, + arg->meas_window.v_offs, + rkisp1_cif_isp_hist_v_offs); + + block_hsize = arg->meas_window.h_size / + rkisp1_cif_isp_hist_column_num - 1; + block_vsize = arg->meas_window.v_size / rkisp1_cif_isp_hist_row_num - 1; + + rkisp1_write(params->rkisp1, block_hsize, rkisp1_cif_isp_hist_h_size); + rkisp1_write(params->rkisp1, block_vsize, rkisp1_cif_isp_hist_v_size); + + weight = arg->hist_weight; + for (i = 0; i < array_size(hist_weight_regs); ++i, weight += 4) + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_hist_weight_set(weight[0], + weight[1], + weight[2], + weight[3]), + hist_weight_regs[i]); +} + +static void +rkisp1_hst_enable(struct rkisp1_params *params, + const struct rkisp1_cif_isp_hst_config *arg, bool en) +{ + if (en) { + u32 hist_prop = rkisp1_read(params->rkisp1, + rkisp1_cif_isp_hist_prop); + + hist_prop &= ~rkisp1_cif_isp_hist_prop_mode_mask; + hist_prop |= arg->mode; + rkisp1_param_set_bits(params, rkisp1_cif_isp_hist_prop, + hist_prop); + } else { + rkisp1_param_clear_bits(params, rkisp1_cif_isp_hist_prop, + rkisp1_cif_isp_hist_prop_mode_mask); + } +} + +static void rkisp1_afm_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_afc_config *arg) +{ + size_t num_of_win = min_t(size_t, array_size(arg->afm_win), + arg->num_afm_win); + u32 afm_ctrl = rkisp1_read(params->rkisp1, rkisp1_cif_isp_afm_ctrl); + unsigned int i; + + /* switch off to configure. */ + rkisp1_param_clear_bits(params, rkisp1_cif_isp_afm_ctrl, + rkisp1_cif_isp_afm_ena); + + for (i = 0; i < num_of_win; i++) { + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_afm_window_x(arg->afm_win[i].h_offs) | + rkisp1_cif_isp_afm_window_y(arg->afm_win[i].v_offs), + rkisp1_cif_isp_afm_lt_a + i * 8); + rkisp1_write(params->rkisp1, + rkisp1_cif_isp_afm_window_x(arg->afm_win[i].h_size + + arg->afm_win[i].h_offs) | + rkisp1_cif_isp_afm_window_y(arg->afm_win[i].v_size + + arg->afm_win[i].v_offs), + rkisp1_cif_isp_afm_rb_a + i * 8); + } + rkisp1_write(params->rkisp1, arg->thres, rkisp1_cif_isp_afm_thres); + rkisp1_write(params->rkisp1, arg->var_shift, + rkisp1_cif_isp_afm_var_shift); + /* restore afm status */ + rkisp1_write(params->rkisp1, afm_ctrl, rkisp1_cif_isp_afm_ctrl); +} + +static void rkisp1_ie_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_ie_config *arg) +{ + u32 eff_ctrl; + + eff_ctrl = rkisp1_read(params->rkisp1, rkisp1_cif_img_eff_ctrl); + eff_ctrl &= ~rkisp1_cif_img_eff_ctrl_mode_mask; + + if (params->quantization == v4l2_quantization_full_range) + eff_ctrl |= rkisp1_cif_img_eff_ctrl_ycbcr_full; + + switch (arg->effect) { + case v4l2_colorfx_sepia: + eff_ctrl |= rkisp1_cif_img_eff_ctrl_mode_sepia; + break; + case v4l2_colorfx_set_cbcr: + rkisp1_write(params->rkisp1, arg->eff_tint, + rkisp1_cif_img_eff_tint); + eff_ctrl |= rkisp1_cif_img_eff_ctrl_mode_sepia; + break; + /* + * color selection is similar to water color(aqua): + * grayscale + selected color w threshold + */ + case v4l2_colorfx_aqua: + eff_ctrl |= rkisp1_cif_img_eff_ctrl_mode_color_sel; + rkisp1_write(params->rkisp1, arg->color_sel, + rkisp1_cif_img_eff_color_sel); + break; + case v4l2_colorfx_emboss: + eff_ctrl |= rkisp1_cif_img_eff_ctrl_mode_emboss; + rkisp1_write(params->rkisp1, arg->eff_mat_1, + rkisp1_cif_img_eff_mat_1); + rkisp1_write(params->rkisp1, arg->eff_mat_2, + rkisp1_cif_img_eff_mat_2); + rkisp1_write(params->rkisp1, arg->eff_mat_3, + rkisp1_cif_img_eff_mat_3); + break; + case v4l2_colorfx_sketch: + eff_ctrl |= rkisp1_cif_img_eff_ctrl_mode_sketch; + rkisp1_write(params->rkisp1, arg->eff_mat_3, + rkisp1_cif_img_eff_mat_3); + rkisp1_write(params->rkisp1, arg->eff_mat_4, + rkisp1_cif_img_eff_mat_4); + rkisp1_write(params->rkisp1, arg->eff_mat_5, + rkisp1_cif_img_eff_mat_5); + break; + case v4l2_colorfx_bw: + eff_ctrl |= rkisp1_cif_img_eff_ctrl_mode_blackwhite; + break; + case v4l2_colorfx_negative: + eff_ctrl |= rkisp1_cif_img_eff_ctrl_mode_negative; + break; + default: + break; + } + + rkisp1_write(params->rkisp1, eff_ctrl, rkisp1_cif_img_eff_ctrl); +} + +static void rkisp1_ie_enable(struct rkisp1_params *params, bool en) +{ + if (en) { + rkisp1_param_set_bits(params, rkisp1_cif_iccl, + rkisp1_cif_iccl_ie_clk); + rkisp1_write(params->rkisp1, rkisp1_cif_img_eff_ctrl_enable, + rkisp1_cif_img_eff_ctrl); + rkisp1_param_set_bits(params, rkisp1_cif_img_eff_ctrl, + rkisp1_cif_img_eff_ctrl_cfg_upd); + } else { + rkisp1_param_clear_bits(params, rkisp1_cif_img_eff_ctrl, + rkisp1_cif_img_eff_ctrl_enable); + rkisp1_param_clear_bits(params, rkisp1_cif_iccl, + rkisp1_cif_iccl_ie_clk); + } +} + +static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range) +{ + static const u16 full_range_coeff[] = { + 0x0026, 0x004b, 0x000f, + 0x01ea, 0x01d6, 0x0040, + 0x0040, 0x01ca, 0x01f6 + }; + static const u16 limited_range_coeff[] = { + 0x0021, 0x0040, 0x000d, + 0x01ed, 0x01db, 0x0038, + 0x0038, 0x01d1, 0x01f7, + }; + unsigned int i; + + if (full_range) { + for (i = 0; i < array_size(full_range_coeff); i++) + rkisp1_write(params->rkisp1, full_range_coeff[i], + rkisp1_cif_isp_cc_coeff_0 + i * 4); + + rkisp1_param_set_bits(params, rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_csm_y_full_ena | + rkisp1_cif_isp_ctrl_isp_csm_c_full_ena); + } else { + for (i = 0; i < array_size(limited_range_coeff); i++) + rkisp1_write(params->rkisp1, limited_range_coeff[i], + rkisp1_cif_isp_cc_coeff_0 + i * 4); + + rkisp1_param_clear_bits(params, rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_csm_y_full_ena | + rkisp1_cif_isp_ctrl_isp_csm_c_full_ena); + } +} + +/* isp de-noise pre-filter(dpf) function */ +static void rkisp1_dpf_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_dpf_config *arg) +{ + unsigned int isp_dpf_mode, spatial_coeff, i; + + switch (arg->gain.mode) { + case rkisp1_cif_isp_dpf_gain_usage_nf_gains: + isp_dpf_mode = rkisp1_cif_isp_dpf_mode_use_nf_gain | + rkisp1_cif_isp_dpf_mode_awb_gain_comp; + break; + case rkisp1_cif_isp_dpf_gain_usage_lsc_gains: + isp_dpf_mode = rkisp1_cif_isp_dpf_mode_lsc_gain_comp; + break; + case rkisp1_cif_isp_dpf_gain_usage_nf_lsc_gains: + isp_dpf_mode = rkisp1_cif_isp_dpf_mode_use_nf_gain | + rkisp1_cif_isp_dpf_mode_awb_gain_comp | + rkisp1_cif_isp_dpf_mode_lsc_gain_comp; + break; + case rkisp1_cif_isp_dpf_gain_usage_awb_gains: + isp_dpf_mode = rkisp1_cif_isp_dpf_mode_awb_gain_comp; + break; + case rkisp1_cif_isp_dpf_gain_usage_awb_lsc_gains: + isp_dpf_mode = rkisp1_cif_isp_dpf_mode_lsc_gain_comp | + rkisp1_cif_isp_dpf_mode_awb_gain_comp; + break; + case rkisp1_cif_isp_dpf_gain_usage_disabled: + default: + isp_dpf_mode = 0; + break; + } + + if (arg->nll.scale_mode == rkisp1_cif_isp_nll_scale_logarithmic) + isp_dpf_mode |= rkisp1_cif_isp_dpf_mode_nll_segmentation; + if (arg->rb_flt.fltsize == rkisp1_cif_isp_dpf_rb_filtersize_9x9) + isp_dpf_mode |= rkisp1_cif_isp_dpf_mode_rb_fltsize_9x9; + if (!arg->rb_flt.r_enable) + isp_dpf_mode |= rkisp1_cif_isp_dpf_mode_r_flt_dis; + if (!arg->rb_flt.b_enable) + isp_dpf_mode |= rkisp1_cif_isp_dpf_mode_b_flt_dis; + if (!arg->g_flt.gb_enable) + isp_dpf_mode |= rkisp1_cif_isp_dpf_mode_gb_flt_dis; + if (!arg->g_flt.gr_enable) + isp_dpf_mode |= rkisp1_cif_isp_dpf_mode_gr_flt_dis; + + rkisp1_param_set_bits(params, rkisp1_cif_isp_dpf_mode, + isp_dpf_mode); + rkisp1_write(params->rkisp1, arg->gain.nf_b_gain, + rkisp1_cif_isp_dpf_nf_gain_b); + rkisp1_write(params->rkisp1, arg->gain.nf_r_gain, + rkisp1_cif_isp_dpf_nf_gain_r); + rkisp1_write(params->rkisp1, arg->gain.nf_gb_gain, + rkisp1_cif_isp_dpf_nf_gain_gb); + rkisp1_write(params->rkisp1, arg->gain.nf_gr_gain, + rkisp1_cif_isp_dpf_nf_gain_gr); + + for (i = 0; i < rkisp1_cif_isp_dpf_max_nlf_coeffs; i++) { + rkisp1_write(params->rkisp1, arg->nll.coeff[i], + rkisp1_cif_isp_dpf_null_coeff_0 + i * 4); + } + + spatial_coeff = arg->g_flt.spatial_coeff[0] | + (arg->g_flt.spatial_coeff[1] << 8) | + (arg->g_flt.spatial_coeff[2] << 16) | + (arg->g_flt.spatial_coeff[3] << 24); + rkisp1_write(params->rkisp1, spatial_coeff, + rkisp1_cif_isp_dpf_s_weight_g_1_4); + + spatial_coeff = arg->g_flt.spatial_coeff[4] | + (arg->g_flt.spatial_coeff[5] << 8); + rkisp1_write(params->rkisp1, spatial_coeff, + rkisp1_cif_isp_dpf_s_weight_g_5_6); + + spatial_coeff = arg->rb_flt.spatial_coeff[0] | + (arg->rb_flt.spatial_coeff[1] << 8) | + (arg->rb_flt.spatial_coeff[2] << 16) | + (arg->rb_flt.spatial_coeff[3] << 24); + rkisp1_write(params->rkisp1, spatial_coeff, + rkisp1_cif_isp_dpf_s_weight_rb_1_4); + + spatial_coeff = arg->rb_flt.spatial_coeff[4] | + (arg->rb_flt.spatial_coeff[5] << 8); + rkisp1_write(params->rkisp1, spatial_coeff, + rkisp1_cif_isp_dpf_s_weight_rb_5_6); +} + +static void +rkisp1_dpf_strength_config(struct rkisp1_params *params, + const struct rkisp1_cif_isp_dpf_strength_config *arg) +{ + rkisp1_write(params->rkisp1, arg->b, rkisp1_cif_isp_dpf_strength_b); + rkisp1_write(params->rkisp1, arg->g, rkisp1_cif_isp_dpf_strength_g); + rkisp1_write(params->rkisp1, arg->r, rkisp1_cif_isp_dpf_strength_r); +} + +static void +rkisp1_isp_isr_other_config(struct rkisp1_params *params, + const struct rkisp1_params_cfg *new_params) +{ + unsigned int module_en_update, module_cfg_update, module_ens; + + module_en_update = new_params->module_en_update; + module_cfg_update = new_params->module_cfg_update; + module_ens = new_params->module_ens; + + if ((module_en_update & rkisp1_cif_isp_module_dpcc) || + (module_cfg_update & rkisp1_cif_isp_module_dpcc)) { + /*update dpc config */ + if ((module_cfg_update & rkisp1_cif_isp_module_dpcc)) + rkisp1_dpcc_config(params, + &new_params->others.dpcc_config); + + if (module_en_update & rkisp1_cif_isp_module_dpcc) { + if (!!(module_ens & rkisp1_cif_isp_module_dpcc)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_dpcc_mode, + rkisp1_cif_isp_dpcc_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_dpcc_mode, + rkisp1_cif_isp_dpcc_ena); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_bls) || + (module_cfg_update & rkisp1_cif_isp_module_bls)) { + /* update bls config */ + if ((module_cfg_update & rkisp1_cif_isp_module_bls)) + rkisp1_bls_config(params, + &new_params->others.bls_config); + + if (module_en_update & rkisp1_cif_isp_module_bls) { + if (!!(module_ens & rkisp1_cif_isp_module_bls)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_bls_ctrl, + rkisp1_cif_isp_bls_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_bls_ctrl, + rkisp1_cif_isp_bls_ena); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_sdg) || + (module_cfg_update & rkisp1_cif_isp_module_sdg)) { + /* update sdg config */ + if ((module_cfg_update & rkisp1_cif_isp_module_sdg)) + rkisp1_sdg_config(params, + &new_params->others.sdg_config); + + if (module_en_update & rkisp1_cif_isp_module_sdg) { + if (!!(module_ens & rkisp1_cif_isp_module_sdg)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_gamma_in_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_gamma_in_ena); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_lsc) || + (module_cfg_update & rkisp1_cif_isp_module_lsc)) { + /* update lsc config */ + if ((module_cfg_update & rkisp1_cif_isp_module_lsc)) + rkisp1_lsc_config(params, + &new_params->others.lsc_config); + + if (module_en_update & rkisp1_cif_isp_module_lsc) { + if (!!(module_ens & rkisp1_cif_isp_module_lsc)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_lsc_ctrl, + rkisp1_cif_isp_lsc_ctrl_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_lsc_ctrl, + rkisp1_cif_isp_lsc_ctrl_ena); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_awb_gain) || + (module_cfg_update & rkisp1_cif_isp_module_awb_gain)) { + /* update awb gains */ + if ((module_cfg_update & rkisp1_cif_isp_module_awb_gain)) + rkisp1_awb_gain_config(params, + &new_params->others.awb_gain_config); + + if (module_en_update & rkisp1_cif_isp_module_awb_gain) { + if (!!(module_ens & rkisp1_cif_isp_module_awb_gain)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_awb_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_awb_ena); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_bdm) || + (module_cfg_update & rkisp1_cif_isp_module_bdm)) { + /* update bdm config */ + if ((module_cfg_update & rkisp1_cif_isp_module_bdm)) + rkisp1_bdm_config(params, + &new_params->others.bdm_config); + + if (module_en_update & rkisp1_cif_isp_module_bdm) { + if (!!(module_ens & rkisp1_cif_isp_module_bdm)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_demosaic, + rkisp1_cif_isp_demosaic_bypass); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_demosaic, + rkisp1_cif_isp_demosaic_bypass); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_flt) || + (module_cfg_update & rkisp1_cif_isp_module_flt)) { + /* update filter config */ + if ((module_cfg_update & rkisp1_cif_isp_module_flt)) + rkisp1_flt_config(params, + &new_params->others.flt_config); + + if (module_en_update & rkisp1_cif_isp_module_flt) { + if (!!(module_ens & rkisp1_cif_isp_module_flt)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_filt_mode, + rkisp1_cif_isp_flt_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_filt_mode, + rkisp1_cif_isp_flt_ena); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_ctk) || + (module_cfg_update & rkisp1_cif_isp_module_ctk)) { + /* update ctk config */ + if ((module_cfg_update & rkisp1_cif_isp_module_ctk)) + rkisp1_ctk_config(params, + &new_params->others.ctk_config); + + if (module_en_update & rkisp1_cif_isp_module_ctk) + rkisp1_ctk_enable(params, + !!(module_ens & rkisp1_cif_isp_module_ctk)); + } + + if ((module_en_update & rkisp1_cif_isp_module_goc) || + (module_cfg_update & rkisp1_cif_isp_module_goc)) { + /* update goc config */ + if ((module_cfg_update & rkisp1_cif_isp_module_goc)) + rkisp1_goc_config(params, + &new_params->others.goc_config); + + if (module_en_update & rkisp1_cif_isp_module_goc) { + if (!!(module_ens & rkisp1_cif_isp_module_goc)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_gamma_out_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_gamma_out_ena); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_cproc) || + (module_cfg_update & rkisp1_cif_isp_module_cproc)) { + /* update cproc config */ + if ((module_cfg_update & rkisp1_cif_isp_module_cproc)) { + rkisp1_cproc_config(params, + &new_params->others.cproc_config); + } + + if (module_en_update & rkisp1_cif_isp_module_cproc) { + if (!!(module_ens & rkisp1_cif_isp_module_cproc)) + rkisp1_param_set_bits(params, + rkisp1_cif_c_proc_ctrl, + rkisp1_cif_c_proc_ctr_enable); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_c_proc_ctrl, + rkisp1_cif_c_proc_ctr_enable); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_ie) || + (module_cfg_update & rkisp1_cif_isp_module_ie)) { + /* update ie config */ + if ((module_cfg_update & rkisp1_cif_isp_module_ie)) + rkisp1_ie_config(params, + &new_params->others.ie_config); + + if (module_en_update & rkisp1_cif_isp_module_ie) + rkisp1_ie_enable(params, + !!(module_ens & rkisp1_cif_isp_module_ie)); + } + + if ((module_en_update & rkisp1_cif_isp_module_dpf) || + (module_cfg_update & rkisp1_cif_isp_module_dpf)) { + /* update dpf config */ + if ((module_cfg_update & rkisp1_cif_isp_module_dpf)) + rkisp1_dpf_config(params, + &new_params->others.dpf_config); + + if (module_en_update & rkisp1_cif_isp_module_dpf) { + if (!!(module_ens & rkisp1_cif_isp_module_dpf)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_dpf_mode, + rkisp1_cif_isp_dpf_mode_en); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_dpf_mode, + rkisp1_cif_isp_dpf_mode_en); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_dpf_strength) || + (module_cfg_update & rkisp1_cif_isp_module_dpf_strength)) { + /* update dpf strength config */ + rkisp1_dpf_strength_config(params, + &new_params->others.dpf_strength_config); + } +} + +static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params, + struct rkisp1_params_cfg *new_params) +{ + unsigned int module_en_update, module_cfg_update, module_ens; + + module_en_update = new_params->module_en_update; + module_cfg_update = new_params->module_cfg_update; + module_ens = new_params->module_ens; + + if ((module_en_update & rkisp1_cif_isp_module_awb) || + (module_cfg_update & rkisp1_cif_isp_module_awb)) { + /* update awb config */ + if ((module_cfg_update & rkisp1_cif_isp_module_awb)) + rkisp1_awb_meas_config(params, + &new_params->meas.awb_meas_config); + + if (module_en_update & rkisp1_cif_isp_module_awb) + rkisp1_awb_meas_enable(params, + &new_params->meas.awb_meas_config, + !!(module_ens & rkisp1_cif_isp_module_awb)); + } + + if ((module_en_update & rkisp1_cif_isp_module_afc) || + (module_cfg_update & rkisp1_cif_isp_module_afc)) { + /* update afc config */ + if ((module_cfg_update & rkisp1_cif_isp_module_afc)) + rkisp1_afm_config(params, + &new_params->meas.afc_config); + + if (module_en_update & rkisp1_cif_isp_module_afc) { + if (!!(module_ens & rkisp1_cif_isp_module_afc)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_afm_ctrl, + rkisp1_cif_isp_afm_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_afm_ctrl, + rkisp1_cif_isp_afm_ena); + } + } + + if ((module_en_update & rkisp1_cif_isp_module_hst) || + (module_cfg_update & rkisp1_cif_isp_module_hst)) { + /* update hst config */ + if ((module_cfg_update & rkisp1_cif_isp_module_hst)) + rkisp1_hst_config(params, + &new_params->meas.hst_config); + + if (module_en_update & rkisp1_cif_isp_module_hst) + rkisp1_hst_enable(params, + &new_params->meas.hst_config, + !!(module_ens & rkisp1_cif_isp_module_hst)); + } + + if ((module_en_update & rkisp1_cif_isp_module_aec) || + (module_cfg_update & rkisp1_cif_isp_module_aec)) { + /* update aec config */ + if ((module_cfg_update & rkisp1_cif_isp_module_aec)) + rkisp1_aec_config(params, + &new_params->meas.aec_config); + + if (module_en_update & rkisp1_cif_isp_module_aec) { + if (!!(module_ens & rkisp1_cif_isp_module_aec)) + rkisp1_param_set_bits(params, + rkisp1_cif_isp_exp_ctrl, + rkisp1_cif_isp_exp_ena); + else + rkisp1_param_clear_bits(params, + rkisp1_cif_isp_exp_ctrl, + rkisp1_cif_isp_exp_ena); + } + } +} + +void rkisp1_params_isr(struct rkisp1_device *rkisp1, u32 isp_mis) +{ + unsigned int frame_sequence = atomic_read(&rkisp1->isp.frame_sequence); + struct rkisp1_params *params = &rkisp1->params; + struct rkisp1_params_cfg *new_params; + struct rkisp1_buffer *cur_buf = null; + + spin_lock(¶ms->config_lock); + if (!params->is_streaming) { + spin_unlock(¶ms->config_lock); + return; + } + + /* get one empty buffer */ + if (!list_empty(¶ms->params)) + cur_buf = list_first_entry(¶ms->params, + struct rkisp1_buffer, queue); + spin_unlock(¶ms->config_lock); + + if (!cur_buf) + return; + + new_params = (struct rkisp1_params_cfg *)(cur_buf->vaddr[0]); + + if (isp_mis & rkisp1_cif_isp_frame) { + u32 isp_ctrl; + + rkisp1_isp_isr_other_config(params, new_params); + rkisp1_isp_isr_meas_config(params, new_params); + + /* update shadow register immediately */ + isp_ctrl = rkisp1_read(params->rkisp1, rkisp1_cif_isp_ctrl); + isp_ctrl |= rkisp1_cif_isp_ctrl_isp_cfg_upd; + rkisp1_write(params->rkisp1, isp_ctrl, rkisp1_cif_isp_ctrl); + + spin_lock(¶ms->config_lock); + list_del(&cur_buf->queue); + spin_unlock(¶ms->config_lock); + + cur_buf->vb.sequence = frame_sequence; + vb2_buffer_done(&cur_buf->vb.vb2_buf, vb2_buf_state_done); + } +} + +static const struct rkisp1_cif_isp_awb_meas_config rkisp1_awb_params_default_config = { + { + 0, 0, rkisp1_default_width, rkisp1_default_height + }, + rkisp1_cif_isp_awb_mode_ycbcr, 200, 30, 20, 20, 0, 128, 128 +}; + +static const struct rkisp1_cif_isp_aec_config rkisp1_aec_params_default_config = { + rkisp1_cif_isp_exp_measuring_mode_0, + rkisp1_cif_isp_exp_ctrl_autostop_0, + { + rkisp1_default_width >> 2, rkisp1_default_height >> 2, + rkisp1_default_width >> 1, rkisp1_default_height >> 1 + } +}; + +static const struct rkisp1_cif_isp_hst_config rkisp1_hst_params_default_config = { + rkisp1_cif_isp_histogram_mode_rgb_combined, + 3, + { + rkisp1_default_width >> 2, rkisp1_default_height >> 2, + rkisp1_default_width >> 1, rkisp1_default_height >> 1 + }, + { + 0, /* to be filled in with 0x01 at runtime. */ + } +}; + +static const struct rkisp1_cif_isp_afc_config rkisp1_afc_params_default_config = { + 1, + { + { + 300, 225, 200, 150 + } + }, + 4, + 14 +}; + +static void rkisp1_params_config_parameter(struct rkisp1_params *params) +{ + struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config; + + spin_lock(¶ms->config_lock); + + rkisp1_awb_meas_config(params, &rkisp1_awb_params_default_config); + rkisp1_awb_meas_enable(params, &rkisp1_awb_params_default_config, + true); + + rkisp1_aec_config(params, &rkisp1_aec_params_default_config); + rkisp1_param_set_bits(params, rkisp1_cif_isp_exp_ctrl, + rkisp1_cif_isp_exp_ena); + + rkisp1_afm_config(params, &rkisp1_afc_params_default_config); + rkisp1_param_set_bits(params, rkisp1_cif_isp_afm_ctrl, + rkisp1_cif_isp_afm_ena); + + memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight)); + rkisp1_hst_config(params, &hst); + rkisp1_param_set_bits(params, rkisp1_cif_isp_hist_prop, + ~rkisp1_cif_isp_hist_prop_mode_mask | + rkisp1_hst_params_default_config.mode); + + /* set the range */ + if (params->quantization == v4l2_quantization_full_range) + rkisp1_csm_config(params, true); + else + rkisp1_csm_config(params, false); + + /* override the default things */ + rkisp1_isp_isr_other_config(params, ¶ms->cur_params); + rkisp1_isp_isr_meas_config(params, ¶ms->cur_params); + + spin_unlock(¶ms->config_lock); +} + +/* not called when the camera active, thus not isr protection. */ +void rkisp1_params_configure(struct rkisp1_params *params, + enum rkisp1_fmt_raw_pat_type bayer_pat, + enum v4l2_quantization quantization) +{ + params->quantization = quantization; + params->raw_type = bayer_pat; + rkisp1_params_config_parameter(params); +} + +/* not called when the camera active, thus not isr protection. */ +void rkisp1_params_disable(struct rkisp1_params *params) +{ + rkisp1_param_clear_bits(params, rkisp1_cif_isp_dpcc_mode, + rkisp1_cif_isp_dpcc_ena); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_lsc_ctrl, + rkisp1_cif_isp_lsc_ctrl_ena); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_bls_ctrl, + rkisp1_cif_isp_bls_ena); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_gamma_in_ena); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_gamma_out_ena); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_demosaic, + rkisp1_cif_isp_demosaic_bypass); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_filt_mode, + rkisp1_cif_isp_flt_ena); + rkisp1_awb_meas_enable(params, null, false); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_ctrl, + rkisp1_cif_isp_ctrl_isp_awb_ena); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_exp_ctrl, + rkisp1_cif_isp_exp_ena); + rkisp1_ctk_enable(params, false); + rkisp1_param_clear_bits(params, rkisp1_cif_c_proc_ctrl, + rkisp1_cif_c_proc_ctr_enable); + rkisp1_hst_enable(params, null, false); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_afm_ctrl, + rkisp1_cif_isp_afm_ena); + rkisp1_ie_enable(params, false); + rkisp1_param_clear_bits(params, rkisp1_cif_isp_dpf_mode, + rkisp1_cif_isp_dpf_mode_en); +} + +static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv, + struct v4l2_fmtdesc *f) +{ + struct video_device *video = video_devdata(file); + struct rkisp1_params *params = video_get_drvdata(video); + + if (f->index > 0 || f->type != video->queue->type) + return -einval; + + f->pixelformat = params->vdev_fmt.fmt.meta.dataformat; + + return 0; +} + +static int rkisp1_params_g_fmt_meta_out(struct file *file, void *fh, + struct v4l2_format *f) +{ + struct video_device *video = video_devdata(file); + struct rkisp1_params *params = video_get_drvdata(video); + struct v4l2_meta_format *meta = &f->fmt.meta; + + if (f->type != video->queue->type) + return -einval; + + memset(meta, 0, sizeof(*meta)); + meta->dataformat = params->vdev_fmt.fmt.meta.dataformat; + meta->buffersize = params->vdev_fmt.fmt.meta.buffersize; + + return 0; +} + +static int rkisp1_params_querycap(struct file *file, + void *priv, struct v4l2_capability *cap) +{ + struct video_device *vdev = video_devdata(file); + + strscpy(cap->driver, rkisp1_driver_name, sizeof(cap->driver)); + strscpy(cap->card, vdev->name, sizeof(cap->card)); + strscpy(cap->bus_info, rkisp1_bus_info, sizeof(cap->bus_info)); + + return 0; +} + +/* isp params video device ioctls */ +static const struct v4l2_ioctl_ops rkisp1_params_ioctl = { + .vidioc_reqbufs = vb2_ioctl_reqbufs, + .vidioc_querybuf = vb2_ioctl_querybuf, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_qbuf = vb2_ioctl_qbuf, + .vidioc_dqbuf = vb2_ioctl_dqbuf, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, + .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_streamon = vb2_ioctl_streamon, + .vidioc_streamoff = vb2_ioctl_streamoff, + .vidioc_enum_fmt_meta_out = rkisp1_params_enum_fmt_meta_out, + .vidioc_g_fmt_meta_out = rkisp1_params_g_fmt_meta_out, + .vidioc_s_fmt_meta_out = rkisp1_params_g_fmt_meta_out, + .vidioc_try_fmt_meta_out = rkisp1_params_g_fmt_meta_out, + .vidioc_querycap = rkisp1_params_querycap, + .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_unsubscribe_event = v4l2_event_unsubscribe, +}; + +static int rkisp1_params_vb2_queue_setup(struct vb2_queue *vq, + unsigned int *num_buffers, + unsigned int *num_planes, + unsigned int sizes[], + struct device *alloc_devs[]) +{ + struct rkisp1_params *params = vq->drv_priv; + + *num_buffers = clamp_t(u32, *num_buffers, + rkisp1_isp_params_req_bufs_min, + rkisp1_isp_params_req_bufs_max); + + *num_planes = 1; + + sizes[0] = sizeof(struct rkisp1_params_cfg); + + init_list_head(¶ms->params); + params->is_first_params = true; + + return 0; +} + +static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb) +{ + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct rkisp1_buffer *params_buf = + container_of(vbuf, struct rkisp1_buffer, vb); + struct vb2_queue *vq = vb->vb2_queue; + struct rkisp1_params *params = vq->drv_priv; + struct rkisp1_params_cfg *new_params; + unsigned long flags; + unsigned int frame_sequence = + atomic_read(¶ms->rkisp1->isp.frame_sequence); + + if (params->is_first_params) { + new_params = (struct rkisp1_params_cfg *) + (vb2_plane_vaddr(vb, 0)); + vbuf->sequence = frame_sequence; + vb2_buffer_done(¶ms_buf->vb.vb2_buf, vb2_buf_state_done); + params->is_first_params = false; + params->cur_params = *new_params; + return; + } + + params_buf->vaddr[0] = vb2_plane_vaddr(vb, 0); + spin_lock_irqsave(¶ms->config_lock, flags); + list_add_tail(¶ms_buf->queue, ¶ms->params); + spin_unlock_irqrestore(¶ms->config_lock, flags); +} + +static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb) +{ + if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_params_cfg)) + return -einval; + + vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_params_cfg)); + + return 0; +} + +static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq) +{ + struct rkisp1_params *params = vq->drv_priv; + struct rkisp1_buffer *buf; + unsigned long flags; + unsigned int i; + + /* stop params input firstly */ + spin_lock_irqsave(¶ms->config_lock, flags); + params->is_streaming = false; + spin_unlock_irqrestore(¶ms->config_lock, flags); + + for (i = 0; i < rkisp1_isp_params_req_bufs_max; i++) { + spin_lock_irqsave(¶ms->config_lock, flags); + if (!list_empty(¶ms->params)) { + buf = list_first_entry(¶ms->params, + struct rkisp1_buffer, queue); + list_del(&buf->queue); + spin_unlock_irqrestore(¶ms->config_lock, + flags); + } else { + spin_unlock_irqrestore(¶ms->config_lock, + flags); + break; + } + + if (buf) + vb2_buffer_done(&buf->vb.vb2_buf, vb2_buf_state_error); + buf = null; + } +} + +static int +rkisp1_params_vb2_start_streaming(struct vb2_queue *queue, unsigned int count) +{ + struct rkisp1_params *params = queue->drv_priv; + unsigned long flags; + + spin_lock_irqsave(¶ms->config_lock, flags); + params->is_streaming = true; + spin_unlock_irqrestore(¶ms->config_lock, flags); + + return 0; +} + +static struct vb2_ops rkisp1_params_vb2_ops = { + .queue_setup = rkisp1_params_vb2_queue_setup, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, + .buf_queue = rkisp1_params_vb2_buf_queue, + .buf_prepare = rkisp1_params_vb2_buf_prepare, + .start_streaming = rkisp1_params_vb2_start_streaming, + .stop_streaming = rkisp1_params_vb2_stop_streaming, + +}; + +static struct v4l2_file_operations rkisp1_params_fops = { + .mmap = vb2_fop_mmap, + .unlocked_ioctl = video_ioctl2, + .poll = vb2_fop_poll, + .open = v4l2_fh_open, + .release = vb2_fop_release +}; + +static int rkisp1_params_init_vb2_queue(struct vb2_queue *q, + struct rkisp1_params *params) +{ + struct rkisp1_vdev_node *node; + + node = container_of(q, struct rkisp1_vdev_node, buf_queue); + + q->type = v4l2_buf_type_meta_output; + q->io_modes = vb2_mmap | vb2_userptr | vb2_dmabuf; + q->drv_priv = params; + q->ops = &rkisp1_params_vb2_ops; + q->mem_ops = &vb2_vmalloc_memops; + q->buf_struct_size = sizeof(struct rkisp1_buffer); + q->timestamp_flags = v4l2_buf_flag_timestamp_monotonic; + q->lock = &node->vlock; + + return vb2_queue_init(q); +} + +static void rkisp1_init_params(struct rkisp1_params *params) +{ + params->vdev_fmt.fmt.meta.dataformat = + v4l2_meta_fmt_rk_isp1_params; + params->vdev_fmt.fmt.meta.buffersize = + sizeof(struct rkisp1_params_cfg); +} + +int rkisp1_params_register(struct rkisp1_params *params, + struct v4l2_device *v4l2_dev, + struct rkisp1_device *rkisp1) +{ + struct rkisp1_vdev_node *node = ¶ms->vnode; + struct video_device *vdev = &node->vdev; + int ret; + + params->rkisp1 = rkisp1; + mutex_init(&node->vlock); + spin_lock_init(¶ms->config_lock); + + strscpy(vdev->name, rkisp1_params_dev_name, sizeof(vdev->name)); + + video_set_drvdata(vdev, params); + vdev->ioctl_ops = &rkisp1_params_ioctl; + vdev->fops = &rkisp1_params_fops; + vdev->release = video_device_release_empty; + /* + * provide a mutex to v4l2 core. it will be used + * to protect all fops and v4l2 ioctls. + */ + vdev->lock = &node->vlock; + vdev->v4l2_dev = v4l2_dev; + vdev->queue = &node->buf_queue; + vdev->device_caps = v4l2_cap_streaming | v4l2_cap_meta_output; + vdev->vfl_dir = vfl_dir_tx; + rkisp1_params_init_vb2_queue(vdev->queue, params); + rkisp1_init_params(params); + video_set_drvdata(vdev, params); + + node->pad.flags = media_pad_fl_source; + ret = media_entity_pads_init(&vdev->entity, 1, &node->pad); + if (ret) + goto err_release_queue; + ret = video_register_device(vdev, vfl_type_grabber, -1); + if (ret) { + dev_err(&vdev->dev, + "failed to register %s, ret=%d ", vdev->name, ret); + goto err_cleanup_media_entity; + } + return 0; +err_cleanup_media_entity: + media_entity_cleanup(&vdev->entity); +err_release_queue: + vb2_queue_release(vdev->queue); + return ret; +} + +void rkisp1_params_unregister(struct rkisp1_params *params) +{ + struct rkisp1_vdev_node *node = ¶ms->vnode; + struct video_device *vdev = &node->vdev; + + video_unregister_device(vdev); + media_entity_cleanup(&vdev->entity); + vb2_queue_release(vdev->queue); +}
|
Drivers in the Staging area
|
bae1155cf5798cc65fedeecfa82c2f48fa3ed18b
|
jacob chen
|
drivers
|
staging
|
media, rkisp1
|
media: staging: rkisp1: add document for rkisp1 meta buffer format
|
this commit add document for rkisp1 meta buffer format
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['rst']
| 2
| 45
| 0
|
--- diff --git a/drivers/staging/media/rkisp1/documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst b/drivers/staging/media/rkisp1/documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst --- /dev/null +++ b/drivers/staging/media/rkisp1/documentation/media/uapi/v4l/pixfmt-meta-rkisp1-params.rst +.. spdx-license-identifier: (gpl-2.0+ or mit) + +.. _v4l2-meta-fmt-rkisp1-params: + +============================ +v4l2_meta_fmt_rk_isp1_params +============================ + +rockchip isp1 parameters data + +description +=========== + +this format describes input parameters for the rockchip isp1. + +it uses c-struct :c:type:'rkisp1_params_cfg', which is defined in +the ''linux/rkisp1-config.h'' header file. + +the parameters consist of multiple modules. +the module won't be updated if the corresponding bit was not set in module_*_update. + +.. kernel-doc:: include/uapi/linux/rkisp1-config.h + :functions: rkisp1_params_cfg diff --git a/drivers/staging/media/rkisp1/documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst b/drivers/staging/media/rkisp1/documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst --- /dev/null +++ b/drivers/staging/media/rkisp1/documentation/media/uapi/v4l/pixfmt-meta-rkisp1-stat.rst +.. spdx-license-identifier: (gpl-2.0+ or mit) + +.. _v4l2-meta-fmt-rkisp1-stat: + +============================= +v4l2_meta_fmt_rk_isp1_stat_3a +============================= + + +rockchip isp1 statistics data + +description +=========== + +this format describes image color statistics information generated by the rockchip +isp1. + +it uses c-struct :c:type:'rkisp1_stat_buffer', which is defined in +the ''linux/rkisp1-config.h'' header file. + +.. kernel-doc:: include/uapi/linux/rkisp1-config.h + :functions: rkisp1_stat_buffer
|
Drivers in the Staging area
|
49f781153e5997c4f2178a8069b1883718bf2411
|
jacob chen
|
drivers
|
staging
|
documentation, media, rkisp1, uapi, v4l
|
media: staging: rkisp1: add todo file for staging
|
add todo file with requirements to move this driver out of staging.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['todo']
| 1
| 23
| 0
|
--- diff --git a/drivers/staging/media/rkisp1/todo b/drivers/staging/media/rkisp1/todo --- /dev/null +++ b/drivers/staging/media/rkisp1/todo +* fix serialization on subdev ops. +* don't use v4l2_async_notifier_parse_fwnode_endpoints_by_port(). +e.g. isp_parse_of_endpoints in drivers/media/platform/omap3isp/isp.c +cio2_parse_firmware in drivers/media/pci/intel/ipu3/ipu3-cio2.c. +* fix pad format size for statistics and parameters entities. +* use threaded interrupt for rkisp1_stats_isr(), remove work queue. +* fix checkpatch errors. +* make sure uapi structs have the same size and layout in 32 and 62 bits, +and that there are no holes in the structures (pahole is a utility that +can be used to test this). +* review and comment every lock +* handle quantization +* document rkisp1-common.h +* streaming paths (mainpath and selfpath) check if the other path is streaming +in several places of the code, review this, specially that it doesn't seem it +supports streaming from both paths at the same time. + +notes: +* all v4l2-compliance test must pass. +* stats and params can be tested with libcamera and chromiumos stack. + +please cc patches to linux media <linux-media@vger.kernel.org> and +helen koike <helen.koike@collabora.com>.
|
Drivers in the Staging area
|
3b7668a1faadaae5ecae46033bec5a42a398b790
|
helen koike
|
drivers
|
staging
|
media, rkisp1
|
media: maintainers: add entry for rockchip isp1 driver
|
add maintainers entry for the rockchip isp1 driver.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
rockchip isp driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['media']
|
['maintainers']
| 1
| 6
| 0
|
--- diff --git a/maintainers b/maintainers --- a/maintainers +++ b/maintainers +rockchip isp v1 driver +m: helen koike <helen.koike@collabora.com> +l: linux-media@vger.kernel.org +s: maintained +f: drivers/staging/media/rkisp1/ +
|
Drivers in the Staging area
|
2a0a0bc7020ef7e66c9569d8229d79fa72e3d659
|
helen koike
| |||
staging: rtl8188eu: add device id for mercusys mw150us v2
|
this device was added to the stand-alone driver on github. add it to the staging driver as well.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add device id for mercusys mw150us v2
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['rtl8188eu']
|
['c']
| 1
| 1
| 0
|
--- diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c + {usb_device(0x2c4e, 0x0102)}, /* mercusys mw150us v2 */
|
Drivers in the Staging area
|
bb5786b9286c253557a0115bc8d21879e61b7b94
|
michael straube
|
drivers
|
staging
|
os_dep, rtl8188eu
|
staging: android: delete the 'vsoc' driver
|
the 'vsoc' driver was required for an early iteration of the android 'cuttlefish' virtual platform, but this platform has been wholly converted to use virtio drivers instead. delete this old driver.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
delete the 'vsoc' driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['android']
|
['c', 'kconfig', 'makefile', 'h', 'todo']
| 5
| 0
| 1,462
|
--- diff --git a/drivers/staging/android/kconfig b/drivers/staging/android/kconfig --- a/drivers/staging/android/kconfig +++ b/drivers/staging/android/kconfig -config android_vsoc - tristate "android virtual soc support" - depends on pci_msi - help - this option adds support for the virtual soc driver needed to boot - a 'cuttlefish' android image inside qemu. the driver interacts with - a qemu ivshmem device. if built as a module, it will be called vsoc. - diff --git a/drivers/staging/android/makefile b/drivers/staging/android/makefile --- a/drivers/staging/android/makefile +++ b/drivers/staging/android/makefile -obj-$(config_android_vsoc) += vsoc.o diff --git a/drivers/staging/android/todo b/drivers/staging/android/todo --- a/drivers/staging/android/todo +++ b/drivers/staging/android/todo - split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0) - better test framework (integration with vgem was suggested) -vsoc.c, uapi/vsoc_shm.h - - the current driver uses the same wait queue for all of the futexes in a - region. this will cause false wakeups in regions with a large number of - waiting threads. we should eventually use multiple queues and select the - queue based on the region. - - add debugfs support for examining the permissions of regions. - - remove vsoc_wait_for_incoming_interrupt ioctl. this functionality has been - superseded by the futex and is there for legacy reasons. - diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h --- a/drivers/staging/android/uapi/vsoc_shm.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * copyright (c) 2017 google, inc. - * - */ - -#ifndef _uapi_linux_vsoc_shm_h -#define _uapi_linux_vsoc_shm_h - -#include <linux/types.h> - -/** - * a permission is a token that permits a receiver to read and/or write an area - * of memory within a vsoc region. - * - * an fd_scoped permission grants both read and write access, and can be - * attached to a file description (see open(2)). - * ownership of the area can then be shared by passing a file descriptor - * among processes. - * - * begin_offset and end_offset define the area of memory that is controlled by - * the permission. owner_offset points to a word, also in shared memory, that - * controls ownership of the area. - * - * ownership of the region expires when the associated file description is - * released. - * - * at most one permission can be attached to each file description. - * - * this is useful when implementing hals like gralloc that scope and pass - * ownership of shared resources via file descriptors. - * - * the caller is responsibe for doing any fencing. - * - * the calling process will normally identify a currently free area of - * memory. it will construct a proposed fd_scoped_permission_arg structure: - * - * begin_offset and end_offset describe the area being claimed - * - * owner_offset points to the location in shared memory that indicates the - * owner of the area. - * - * owned_value is the value that will be stored in owner_offset iff the - * permission can be granted. it must be different than vsoc_region_free. - * - * two fd_scoped_permission structures are compatible if they vary only by - * their owned_value fields. - * - * the driver ensures that, for any group of simultaneous callers proposing - * compatible fd_scoped_permissions, it will accept exactly one of the - * propopsals. the other callers will get a failure with errno of eagain. - * - * a process receiving a file descriptor can identify the region being - * granted using the vsoc_get_fd_scoped_permission ioctl. - */ -struct fd_scoped_permission { - __u32 begin_offset; - __u32 end_offset; - __u32 owner_offset; - __u32 owned_value; -}; - -/* - * this value represents a free area of memory. the driver expects to see this - * value at owner_offset when creating a permission otherwise it will not do it, - * and will write this value back once the permission is no longer needed. - */ -#define vsoc_region_free ((__u32)0) - -/** - * ioctl argument for vsoc_create_fd_scope_permission - */ -struct fd_scoped_permission_arg { - struct fd_scoped_permission perm; - __s32 managed_region_fd; -}; - -#define vsoc_node_free ((__u32)0) - -/* - * describes a signal table in shared memory. each non-zero entry in the - * table indicates that the receiver should signal the futex at the given - * offset. offsets are relative to the region, not the shared memory window. - * - * interrupt_signalled_offset is used to reliably signal interrupts across the - * vmm boundary. there are two roles: transmitter and receiver. for example, - * in the host_to_guest_signal_table the host is the transmitter and the - * guest is the receiver. the protocol is as follows: - * - * 1. the transmitter should convert the offset of the futex to an offset - * in the signal table [0, (1 << num_nodes_lg2)) - * the transmitter can choose any appropriate hashing algorithm, including - * hash = futex_offset & ((1 << num_nodes_lg2) - 1) - * - * 3. the transmitter should atomically compare and swap futex_offset with 0 - * at hash. there are 3 possible outcomes - * a. the swap fails because the futex_offset is already in the table. - * the transmitter should stop. - * b. some other offset is in the table. this is a hash collision. the - * transmitter should move to another table slot and try again. one - * possible algorithm: - * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1) - * c. the swap worked. continue below. - * - * 3. the transmitter atomically swaps 1 with the value at the - * interrupt_signalled_offset. there are two outcomes: - * a. the prior value was 1. in this case an interrupt has already been - * posted. the transmitter is done. - * b. the prior value was 0, indicating that the receiver may be sleeping. - * the transmitter will issue an interrupt. - * - * 4. on waking the receiver immediately exchanges a 0 with the - * interrupt_signalled_offset. if it receives a 0 then this a spurious - * interrupt. that may occasionally happen in the current protocol, but - * should be rare. - * - * 5. the receiver scans the signal table by atomicaly exchanging 0 at each - * location. if a non-zero offset is returned from the exchange the - * receiver wakes all sleepers at the given offset: - * futex((int*)(region_base + old_value), futex_wake, max_int); - * - * 6. the receiver thread then does a conditional wait, waking immediately - * if the value at interrupt_signalled_offset is non-zero. this catches cases - * here additional signals were posted while the table was being scanned. - * on the guest the wait is handled via the vsoc_wait_for_incoming_interrupt - * ioctl. - */ -struct vsoc_signal_table_layout { - /* log_2(number of signal table entries) */ - __u32 num_nodes_lg2; - /* - * offset to the first signal table entry relative to the start of the - * region - */ - __u32 futex_uaddr_table_offset; - /* - * offset to an atomic_t / atomic uint32_t. a non-zero value indicates - * that one or more offsets are currently posted in the table. - * semi-unique access to an entry in the table - */ - __u32 interrupt_signalled_offset; -}; - -#define vsoc_region_whole ((__s32)0) -#define vsoc_device_name_sz 16 - -/** - * each hal would (usually) talk to a single device region - * mulitple entities care about these regions: - * - the ivshmem_server will populate the regions in shared memory - * - the guest kernel will read the region, create minor device nodes, and - * allow interested parties to register for futex_wake events in the region - * - hals will access via the minor device nodes published by the guest kernel - * - host side processes will access the region via the ivshmem_server: - * 1. pass name to ivshmem_server at a unix socket - * 2. ivshmemserver will reply with 2 fds: - * - host->guest doorbell fd - * - guest->host doorbell fd - * - fd for the shared memory region - * - region offset - * 3. start a futex receiver thread on the doorbell fd pointed at the - * signal_nodes - */ -struct vsoc_device_region { - __u16 current_version; - __u16 min_compatible_version; - __u32 region_begin_offset; - __u32 region_end_offset; - __u32 offset_of_region_data; - struct vsoc_signal_table_layout guest_to_host_signal_table; - struct vsoc_signal_table_layout host_to_guest_signal_table; - /* name of the device. must always be terminated with a '', so - * the longest supported device name is 15 characters. - */ - char device_name[vsoc_device_name_sz]; - /* there are two ways that permissions to access regions are handled: - * - when subdivided_by is vsoc_region_whole, any process that can - * open the device node for the region gains complete access to it. - * - when subdivided is set processes that open the region cannot - * access it. access to a sub-region must be established by invoking - * the vsoc_create_fd_scope_permission ioctl on the region - * referenced in subdivided_by, providing a fileinstance - * (represented by a fd) opened on this region. - */ - __u32 managed_by; -}; - -/* - * the vsoc layout descriptor. - * the first 4k should be reserved for the shm header and region descriptors. - * the regions should be page aligned. - */ - -struct vsoc_shm_layout_descriptor { - __u16 major_version; - __u16 minor_version; - - /* size of the shm. this may be redundant but nice to have */ - __u32 size; - - /* number of shared memory regions */ - __u32 region_count; - - /* the offset to the start of region descriptors */ - __u32 vsoc_region_desc_offset; -}; - -/* - * this specifies the current version that should be stored in - * vsoc_shm_layout_descriptor.major_version and - * vsoc_shm_layout_descriptor.minor_version. - * it should be updated only if the vsoc_device_region and - * vsoc_shm_layout_descriptor structures have changed. - * versioning within each region is transferred - * via the min_compatible_version and current_version fields in - * vsoc_device_region. the driver does not consult these fields: they are left - * for the hals and host processes and will change independently of the layout - * version. - */ -#define current_vsoc_layout_major_version 2 -#define current_vsoc_layout_minor_version 0 - -#define vsoc_create_fd_scoped_permission \ - _iow(0xf5, 0, struct fd_scoped_permission) -#define vsoc_get_fd_scoped_permission _ior(0xf5, 1, struct fd_scoped_permission) - -/* - * this is used to signal the host to scan the guest_to_host_signal_table - * for new futexes to wake. this sends an interrupt if one is not already - * in flight. - */ -#define vsoc_maybe_send_interrupt_to_host _io(0xf5, 2) - -/* - * when this returns the guest will scan host_to_guest_signal_table to - * check for new futexes to wake. - */ -/* todo(ghartman): consider moving this to the bottom half */ -#define vsoc_wait_for_incoming_interrupt _io(0xf5, 3) - -/* - * guest hals will use this to retrieve the region description after - * opening their device node. - */ -#define vsoc_describe_region _ior(0xf5, 4, struct vsoc_device_region) - -/* - * wake any threads that may be waiting for a host interrupt on this region. - * this is mostly used during shutdown. - */ -#define vsoc_self_interrupt _io(0xf5, 5) - -/* - * this is used to signal the host to scan the guest_to_host_signal_table - * for new futexes to wake. this sends an interrupt unconditionally. - */ -#define vsoc_send_interrupt_to_host _io(0xf5, 6) - -enum wait_types { - vsoc_wait_undefined = 0, - vsoc_wait_if_equal = 1, - vsoc_wait_if_equal_timeout = 2 -}; - -/* - * wait for a condition to be true - * - * note, this is sized and aligned so the 32 bit and 64 bit layouts are - * identical. - */ -struct vsoc_cond_wait { - /* input: offset of the 32 bit word to check */ - __u32 offset; - /* input: value that will be compared with the offset */ - __u32 value; - /* monotonic time to wake at in seconds */ - __u64 wake_time_sec; - /* input: monotonic time to wait in nanoseconds */ - __u32 wake_time_nsec; - /* input: type of wait */ - __u32 wait_type; - /* output: number of times the thread woke before returning. */ - __u32 wakes; - /* ensure that we're 8-byte aligned and 8 byte length for 32/64 bit - * compatibility. - */ - __u32 reserved_1; -}; - -#define vsoc_cond_wait _iowr(0xf5, 7, struct vsoc_cond_wait) - -/* wake any local threads waiting at the offset given in arg */ -#define vsoc_cond_wake _io(0xf5, 8) - -#endif /* _uapi_linux_vsoc_shm_h */ diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c --- a/drivers/staging/android/vsoc.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * drivers/android/staging/vsoc.c - * - * android virtual system on a chip (vsoc) driver - * - * copyright (c) 2017 google, inc. - * - * author: ghartman@google.com - * - * based on drivers/char/kvm_ivshmem.c - driver for kvm inter-vm shared memory - * copyright 2009 cam macdonell <cam@cs.ualberta.ca> - * - * based on cirrusfb.c and 8139cp.c: - * copyright 1999-2001 jeff garzik - * copyright 2001-2004 jeff garzik - */ - -#include <linux/dma-mapping.h> -#include <linux/freezer.h> -#include <linux/futex.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/pci.h> -#include <linux/proc_fs.h> -#include <linux/sched.h> -#include <linux/syscalls.h> -#include <linux/uaccess.h> -#include <linux/interrupt.h> -#include <linux/cdev.h> -#include <linux/file.h> -#include "uapi/vsoc_shm.h" - -#define vsoc_dev_name "vsoc" - -/* - * description of the ivshmem-doorbell pci device used by qemu. these - * constants follow docs/specs/ivshmem-spec.txt, which can be found in - * the qemu repository. this was last reconciled with the version that - * came out with 2.8 - */ - -/* - * these constants are determined kvm inter-vm shared memory device - * register offsets - */ -enum { - intr_mask = 0x00, /* interrupt mask */ - intr_status = 0x04, /* interrupt status */ - iv_position = 0x08, /* vm id */ - doorbell = 0x0c, /* doorbell */ -}; - -static const int register_bar; /* equal to 0 */ -static const int max_register_bar_len = 0x100; -/* - * the msi-x bar is not used directly. - * - * static const int msi_x_bar = 1; - */ -static const int shared_memory_bar = 2; - -struct vsoc_region_data { - char name[vsoc_device_name_sz + 1]; - wait_queue_head_t interrupt_wait_queue; - /* todo(b/73664181): use multiple futex wait queues */ - wait_queue_head_t futex_wait_queue; - /* flag indicating that an interrupt has been signalled by the host. */ - atomic_t *incoming_signalled; - /* flag indicating the guest has signalled the host. */ - atomic_t *outgoing_signalled; - bool irq_requested; - bool device_created; -}; - -struct vsoc_device { - /* kernel virtual address of register_bar. */ - void __iomem *regs; - /* physical address of shared_memory_bar. */ - phys_addr_t shm_phys_start; - /* kernel virtual address of shared_memory_bar. */ - void __iomem *kernel_mapped_shm; - /* size of the entire shared memory window in bytes. */ - size_t shm_size; - /* - * pointer to the virtual address of the shared memory layout structure. - * this is probably identical to kernel_mapped_shm, but saving this - * here saves a lot of annoying casts. - */ - struct vsoc_shm_layout_descriptor *layout; - /* - * points to a table of region descriptors in the kernel's virtual - * address space. calculated from - * vsoc_shm_layout_descriptor.vsoc_region_desc_offset - */ - struct vsoc_device_region *regions; - /* head of a list of permissions that have been granted. */ - struct list_head permissions; - struct pci_dev *dev; - /* per-region (and therefore per-interrupt) information. */ - struct vsoc_region_data *regions_data; - /* - * table of msi-x entries. this has to be separated from struct - * vsoc_region_data because the kernel deals with them as an array. - */ - struct msix_entry *msix_entries; - /* mutex that protectes the permission list */ - struct mutex mtx; - /* major number assigned by the kernel */ - int major; - /* character device assigned by the kernel */ - struct cdev cdev; - /* device class assigned by the kernel */ - struct class *class; - /* - * flags that indicate what we've initialized. these are used to do an - * orderly cleanup of the device. - */ - bool enabled_device; - bool requested_regions; - bool cdev_added; - bool class_added; - bool msix_enabled; -}; - -static struct vsoc_device vsoc_dev; - -/* - * todo(ghartman): add a /sys filesystem entry that summarizes the permissions. - */ - -struct fd_scoped_permission_node { - struct fd_scoped_permission permission; - struct list_head list; -}; - -struct vsoc_private_data { - struct fd_scoped_permission_node *fd_scoped_permission_node; -}; - -static long vsoc_ioctl(struct file *, unsigned int, unsigned long); -static int vsoc_mmap(struct file *, struct vm_area_struct *); -static int vsoc_open(struct inode *, struct file *); -static int vsoc_release(struct inode *, struct file *); -static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *); -static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *); -static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin); -static int -do_create_fd_scoped_permission(struct vsoc_device_region *region_p, - struct fd_scoped_permission_node *np, - struct fd_scoped_permission_arg __user *arg); -static void -do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p, - struct fd_scoped_permission *perm); -static long do_vsoc_describe_region(struct file *, - struct vsoc_device_region __user *); -static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off); - -/** - * validate arguments on entry points to the driver. - */ -inline int vsoc_validate_inode(struct inode *inode) -{ - if (iminor(inode) >= vsoc_dev.layout->region_count) { - dev_err(&vsoc_dev.dev->dev, - "describe_region: invalid region %d ", iminor(inode)); - return -enodev; - } - return 0; -} - -inline int vsoc_validate_filep(struct file *filp) -{ - int ret = vsoc_validate_inode(file_inode(filp)); - - if (ret) - return ret; - if (!filp->private_data) { - dev_err(&vsoc_dev.dev->dev, - "no private data on fd, region %d ", - iminor(file_inode(filp))); - return -ebadfd; - } - return 0; -} - -/* converts from shared memory offset to virtual address */ -static inline void *shm_off_to_virtual_addr(__u32 offset) -{ - return (void __force *)vsoc_dev.kernel_mapped_shm + offset; -} - -/* converts from shared memory offset to physical address */ -static inline phys_addr_t shm_off_to_phys_addr(__u32 offset) -{ - return vsoc_dev.shm_phys_start + offset; -} - -/** - * convenience functions to obtain the region from the inode or file. - * dangerous to call before validating the inode/file. - */ -static -inline struct vsoc_device_region *vsoc_region_from_inode(struct inode *inode) -{ - return &vsoc_dev.regions[iminor(inode)]; -} - -static -inline struct vsoc_device_region *vsoc_region_from_filep(struct file *inode) -{ - return vsoc_region_from_inode(file_inode(inode)); -} - -static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r) -{ - return r->region_end_offset - r->region_begin_offset; -} - -static const struct file_operations vsoc_ops = { - .owner = this_module, - .open = vsoc_open, - .mmap = vsoc_mmap, - .read = vsoc_read, - .unlocked_ioctl = vsoc_ioctl, - .compat_ioctl = vsoc_ioctl, - .write = vsoc_write, - .llseek = vsoc_lseek, - .release = vsoc_release, -}; - -static struct pci_device_id vsoc_id_table[] = { - {0x1af4, 0x1110, pci_any_id, pci_any_id, 0, 0, 0}, - {0}, -}; - -module_device_table(pci, vsoc_id_table); - -static void vsoc_remove_device(struct pci_dev *pdev); -static int vsoc_probe_device(struct pci_dev *pdev, - const struct pci_device_id *ent); - -static struct pci_driver vsoc_pci_driver = { - .name = "vsoc", - .id_table = vsoc_id_table, - .probe = vsoc_probe_device, - .remove = vsoc_remove_device, -}; - -static int -do_create_fd_scoped_permission(struct vsoc_device_region *region_p, - struct fd_scoped_permission_node *np, - struct fd_scoped_permission_arg __user *arg) -{ - struct file *managed_filp; - s32 managed_fd; - atomic_t *owner_ptr = null; - struct vsoc_device_region *managed_region_p; - - if (copy_from_user(&np->permission, - &arg->perm, sizeof(np->permission)) || - copy_from_user(&managed_fd, - &arg->managed_region_fd, sizeof(managed_fd))) { - return -efault; - } - managed_filp = fdget(managed_fd).file; - /* check that it's a valid fd, */ - if (!managed_filp || vsoc_validate_filep(managed_filp)) - return -eperm; - /* eexist if the given fd already has a permission. */ - if (((struct vsoc_private_data *)managed_filp->private_data)-> - fd_scoped_permission_node) - return -eexist; - managed_region_p = vsoc_region_from_filep(managed_filp); - /* check that the provided region is managed by this one */ - if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p) - return -eperm; - /* the area must be well formed and have non-zero size */ - if (np->permission.begin_offset >= np->permission.end_offset) - return -einval; - /* the area must fit in the memory window */ - if (np->permission.end_offset > - vsoc_device_region_size(managed_region_p)) - return -erange; - /* the area must be in the region data section */ - if (np->permission.begin_offset < - managed_region_p->offset_of_region_data) - return -erange; - /* the area must be page aligned */ - if (!page_aligned(np->permission.begin_offset) || - !page_aligned(np->permission.end_offset)) - return -einval; - /* owner offset must be naturally aligned in the window */ - if (np->permission.owner_offset & - (sizeof(np->permission.owner_offset) - 1)) - return -einval; - /* the owner flag must reside in the owner memory */ - if (np->permission.owner_offset + sizeof(np->permission.owner_offset) > - vsoc_device_region_size(region_p)) - return -erange; - /* the owner flag must reside in the data section */ - if (np->permission.owner_offset < region_p->offset_of_region_data) - return -einval; - /* the owner value must change to claim the memory */ - if (np->permission.owned_value == vsoc_region_free) - return -einval; - owner_ptr = - (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset + - np->permission.owner_offset); - /* we've already verified that this is in the shared memory window, so - * it should be safe to write to this address. - */ - if (atomic_cmpxchg(owner_ptr, - vsoc_region_free, - np->permission.owned_value) != vsoc_region_free) { - return -ebusy; - } - ((struct vsoc_private_data *)managed_filp->private_data)-> - fd_scoped_permission_node = np; - /* the file offset needs to be adjusted if the calling - * process did any read/write operations on the fd - * before creating the permission. - */ - if (managed_filp->f_pos) { - if (managed_filp->f_pos > np->permission.end_offset) { - /* if the offset is beyond the permission end, set it - * to the end. - */ - managed_filp->f_pos = np->permission.end_offset; - } else { - /* if the offset is within the permission interval - * keep it there otherwise reset it to zero. - */ - if (managed_filp->f_pos < np->permission.begin_offset) { - managed_filp->f_pos = 0; - } else { - managed_filp->f_pos -= - np->permission.begin_offset; - } - } - } - return 0; -} - -static void -do_destroy_fd_scoped_permission_node(struct vsoc_device_region *owner_region_p, - struct fd_scoped_permission_node *node) -{ - if (node) { - do_destroy_fd_scoped_permission(owner_region_p, - &node->permission); - mutex_lock(&vsoc_dev.mtx); - list_del(&node->list); - mutex_unlock(&vsoc_dev.mtx); - kfree(node); - } -} - -static void -do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p, - struct fd_scoped_permission *perm) -{ - atomic_t *owner_ptr = null; - int prev = 0; - - if (!perm) - return; - owner_ptr = (atomic_t *)shm_off_to_virtual_addr - (owner_region_p->region_begin_offset + perm->owner_offset); - prev = atomic_xchg(owner_ptr, vsoc_region_free); - if (prev != perm->owned_value) - dev_err(&vsoc_dev.dev->dev, - "%x-%x: owner (%s) %x: expected to be %x was %x", - perm->begin_offset, perm->end_offset, - owner_region_p->device_name, perm->owner_offset, - perm->owned_value, prev); -} - -static long do_vsoc_describe_region(struct file *filp, - struct vsoc_device_region __user *dest) -{ - struct vsoc_device_region *region_p; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - region_p = vsoc_region_from_filep(filp); - if (copy_to_user(dest, region_p, sizeof(*region_p))) - return -efault; - return 0; -} - -/** - * implements the inner logic of cond_wait. copies to and from userspace are - * done in the helper function below. - */ -static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) -{ - define_wait(wait); - u32 region_number = iminor(file_inode(filp)); - struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; - struct hrtimer_sleeper timeout, *to = null; - int ret = 0; - struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); - atomic_t *address = null; - ktime_t wake_time; - - /* ensure that the offset is aligned */ - if (arg->offset & (sizeof(uint32_t) - 1)) - return -eaddrnotavail; - /* ensure that the offset is within shared memory */ - if (((uint64_t)arg->offset) + region_p->region_begin_offset + - sizeof(uint32_t) > region_p->region_end_offset) - return -e2big; - address = shm_off_to_virtual_addr(region_p->region_begin_offset + - arg->offset); - - /* ensure that the type of wait is valid */ - switch (arg->wait_type) { - case vsoc_wait_if_equal: - break; - case vsoc_wait_if_equal_timeout: - to = &timeout; - break; - default: - return -einval; - } - - if (to) { - /* copy the user-supplied timesec into the kernel structure. - * we do things this way to flatten differences between 32 bit - * and 64 bit timespecs. - */ - if (arg->wake_time_nsec >= nsec_per_sec) - return -einval; - wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); - - hrtimer_init_sleeper_on_stack(to, clock_monotonic, - hrtimer_mode_abs); - hrtimer_set_expires_range_ns(&to->timer, wake_time, - current->timer_slack_ns); - } - - while (1) { - prepare_to_wait(&data->futex_wait_queue, &wait, - task_interruptible); - /* - * check the sentinel value after prepare_to_wait. if the value - * changes after this check the writer will call signal, - * changing the task state from interruptible to running. that - * will ensure that schedule() will eventually schedule this - * task. - */ - if (atomic_read(address) != arg->value) { - ret = 0; - break; - } - if (to) { - hrtimer_sleeper_start_expires(to, hrtimer_mode_abs); - if (likely(to->task)) - freezable_schedule(); - hrtimer_cancel(&to->timer); - if (!to->task) { - ret = -etimedout; - break; - } - } else { - freezable_schedule(); - } - /* count the number of times that we woke up. this is useful - * for unit testing. - */ - ++arg->wakes; - if (signal_pending(current)) { - ret = -eintr; - break; - } - } - finish_wait(&data->futex_wait_queue, &wait); - if (to) - destroy_hrtimer_on_stack(&to->timer); - return ret; -} - -/** - * handles the details of copying from/to userspace to ensure that the copies - * happen on all of the return paths of cond_wait. - */ -static int do_vsoc_cond_wait(struct file *filp, - struct vsoc_cond_wait __user *untrusted_in) -{ - struct vsoc_cond_wait arg; - int rval = 0; - - if (copy_from_user(&arg, untrusted_in, sizeof(arg))) - return -efault; - /* wakes is an out parameter. initialize it to something sensible. */ - arg.wakes = 0; - rval = handle_vsoc_cond_wait(filp, &arg); - if (copy_to_user(untrusted_in, &arg, sizeof(arg))) - return -efault; - return rval; -} - -static int do_vsoc_cond_wake(struct file *filp, uint32_t offset) -{ - struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); - u32 region_number = iminor(file_inode(filp)); - struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; - /* ensure that the offset is aligned */ - if (offset & (sizeof(uint32_t) - 1)) - return -eaddrnotavail; - /* ensure that the offset is within shared memory */ - if (((uint64_t)offset) + region_p->region_begin_offset + - sizeof(uint32_t) > region_p->region_end_offset) - return -e2big; - /* - * todo(b/73664181): use multiple futex wait queues. - * we need to wake every sleeper when the condition changes. typically - * only a single thread will be waiting on the condition, but there - * are exceptions. the worst case is about 10 threads. - */ - wake_up_interruptible_all(&data->futex_wait_queue); - return 0; -} - -static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int rv = 0; - struct vsoc_device_region *region_p; - u32 reg_num; - struct vsoc_region_data *reg_data; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - region_p = vsoc_region_from_filep(filp); - reg_num = iminor(file_inode(filp)); - reg_data = vsoc_dev.regions_data + reg_num; - switch (cmd) { - case vsoc_create_fd_scoped_permission: - { - struct fd_scoped_permission_node *node = null; - - node = kzalloc(sizeof(*node), gfp_kernel); - /* we can't allocate memory for the permission */ - if (!node) - return -enomem; - init_list_head(&node->list); - rv = do_create_fd_scoped_permission - (region_p, - node, - (struct fd_scoped_permission_arg __user *)arg); - if (!rv) { - mutex_lock(&vsoc_dev.mtx); - list_add(&node->list, &vsoc_dev.permissions); - mutex_unlock(&vsoc_dev.mtx); - } else { - kfree(node); - return rv; - } - } - break; - - case vsoc_get_fd_scoped_permission: - { - struct fd_scoped_permission_node *node = - ((struct vsoc_private_data *)filp->private_data)-> - fd_scoped_permission_node; - if (!node) - return -enoent; - if (copy_to_user - ((struct fd_scoped_permission __user *)arg, - &node->permission, sizeof(node->permission))) - return -efault; - } - break; - - case vsoc_maybe_send_interrupt_to_host: - if (!atomic_xchg(reg_data->outgoing_signalled, 1)) { - writel(reg_num, vsoc_dev.regs + doorbell); - return 0; - } else { - return -ebusy; - } - break; - - case vsoc_send_interrupt_to_host: - writel(reg_num, vsoc_dev.regs + doorbell); - return 0; - case vsoc_wait_for_incoming_interrupt: - wait_event_interruptible - (reg_data->interrupt_wait_queue, - (atomic_read(reg_data->incoming_signalled) != 0)); - break; - - case vsoc_describe_region: - return do_vsoc_describe_region - (filp, - (struct vsoc_device_region __user *)arg); - - case vsoc_self_interrupt: - atomic_set(reg_data->incoming_signalled, 1); - wake_up_interruptible(®_data->interrupt_wait_queue); - break; - - case vsoc_cond_wait: - return do_vsoc_cond_wait(filp, - (struct vsoc_cond_wait __user *)arg); - case vsoc_cond_wake: - return do_vsoc_cond_wake(filp, arg); - - default: - return -einval; - } - return 0; -} - -static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len, - loff_t *poffset) -{ - __u32 area_off; - const void *area_p; - ssize_t area_len; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - area_len = vsoc_get_area(filp, &area_off); - area_p = shm_off_to_virtual_addr(area_off); - area_p += *poffset; - area_len -= *poffset; - if (area_len <= 0) - return 0; - if (area_len < len) - len = area_len; - if (copy_to_user(buffer, area_p, len)) - return -efault; - *poffset += len; - return len; -} - -static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin) -{ - ssize_t area_len = 0; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - area_len = vsoc_get_area(filp, null); - switch (origin) { - case seek_set: - break; - - case seek_cur: - if (offset > 0 && offset + filp->f_pos < 0) - return -eoverflow; - offset += filp->f_pos; - break; - - case seek_end: - if (offset > 0 && offset + area_len < 0) - return -eoverflow; - offset += area_len; - break; - - case seek_data: - if (offset >= area_len) - return -einval; - if (offset < 0) - offset = 0; - break; - - case seek_hole: - /* next hole is always the end of the region, unless offset is - * beyond that - */ - if (offset < area_len) - offset = area_len; - break; - - default: - return -einval; - } - - if (offset < 0 || offset > area_len) - return -einval; - filp->f_pos = offset; - - return offset; -} - -static ssize_t vsoc_write(struct file *filp, const char __user *buffer, - size_t len, loff_t *poffset) -{ - __u32 area_off; - void *area_p; - ssize_t area_len; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - area_len = vsoc_get_area(filp, &area_off); - area_p = shm_off_to_virtual_addr(area_off); - area_p += *poffset; - area_len -= *poffset; - if (area_len <= 0) - return 0; - if (area_len < len) - len = area_len; - if (copy_from_user(area_p, buffer, len)) - return -efault; - *poffset += len; - return len; -} - -static irqreturn_t vsoc_interrupt(int irq, void *region_data_v) -{ - struct vsoc_region_data *region_data = - (struct vsoc_region_data *)region_data_v; - int reg_num = region_data - vsoc_dev.regions_data; - - if (unlikely(!region_data)) - return irq_none; - - if (unlikely(reg_num < 0 || - reg_num >= vsoc_dev.layout->region_count)) { - dev_err(&vsoc_dev.dev->dev, - "invalid irq @%p reg_num=0x%04x ", - region_data, reg_num); - return irq_none; - } - if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) { - dev_err(&vsoc_dev.dev->dev, - "irq not aligned @%p reg_num=0x%04x ", - region_data, reg_num); - return irq_none; - } - wake_up_interruptible(®ion_data->interrupt_wait_queue); - return irq_handled; -} - -static int vsoc_probe_device(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int result; - int i; - resource_size_t reg_size; - dev_t devt; - - vsoc_dev.dev = pdev; - result = pci_enable_device(pdev); - if (result) { - dev_err(&pdev->dev, - "pci_enable_device failed %s: error %d ", - pci_name(pdev), result); - return result; - } - vsoc_dev.enabled_device = true; - result = pci_request_regions(pdev, "vsoc"); - if (result < 0) { - dev_err(&pdev->dev, "pci_request_regions failed "); - vsoc_remove_device(pdev); - return -ebusy; - } - vsoc_dev.requested_regions = true; - /* set up the control registers in bar 0 */ - reg_size = pci_resource_len(pdev, register_bar); - if (reg_size > max_register_bar_len) - vsoc_dev.regs = - pci_iomap(pdev, register_bar, max_register_bar_len); - else - vsoc_dev.regs = pci_iomap(pdev, register_bar, reg_size); - - if (!vsoc_dev.regs) { - dev_err(&pdev->dev, - "cannot map registers of size %zu ", - (size_t)reg_size); - vsoc_remove_device(pdev); - return -ebusy; - } - - /* map the shared memory in bar 2 */ - vsoc_dev.shm_phys_start = pci_resource_start(pdev, shared_memory_bar); - vsoc_dev.shm_size = pci_resource_len(pdev, shared_memory_bar); - - dev_info(&pdev->dev, "shared memory @ dma %pa size=0x%zx ", - &vsoc_dev.shm_phys_start, vsoc_dev.shm_size); - vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, shared_memory_bar, 0); - if (!vsoc_dev.kernel_mapped_shm) { - dev_err(&vsoc_dev.dev->dev, "cannot iomap region "); - vsoc_remove_device(pdev); - return -ebusy; - } - - vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *) - vsoc_dev.kernel_mapped_shm; - dev_info(&pdev->dev, "major_version: %d ", - vsoc_dev.layout->major_version); - dev_info(&pdev->dev, "minor_version: %d ", - vsoc_dev.layout->minor_version); - dev_info(&pdev->dev, "size: 0x%x ", vsoc_dev.layout->size); - dev_info(&pdev->dev, "regions: %d ", vsoc_dev.layout->region_count); - if (vsoc_dev.layout->major_version != - current_vsoc_layout_major_version) { - dev_err(&vsoc_dev.dev->dev, - "driver supports only major_version %d ", - current_vsoc_layout_major_version); - vsoc_remove_device(pdev); - return -ebusy; - } - result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count, - vsoc_dev_name); - if (result) { - dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed "); - vsoc_remove_device(pdev); - return -ebusy; - } - vsoc_dev.major = major(devt); - cdev_init(&vsoc_dev.cdev, &vsoc_ops); - vsoc_dev.cdev.owner = this_module; - result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count); - if (result) { - dev_err(&vsoc_dev.dev->dev, "cdev_add error "); - vsoc_remove_device(pdev); - return -ebusy; - } - vsoc_dev.cdev_added = true; - vsoc_dev.class = class_create(this_module, vsoc_dev_name); - if (is_err(vsoc_dev.class)) { - dev_err(&vsoc_dev.dev->dev, "class_create failed "); - vsoc_remove_device(pdev); - return ptr_err(vsoc_dev.class); - } - vsoc_dev.class_added = true; - vsoc_dev.regions = (struct vsoc_device_region __force *) - ((void *)vsoc_dev.layout + - vsoc_dev.layout->vsoc_region_desc_offset); - vsoc_dev.msix_entries = - kcalloc(vsoc_dev.layout->region_count, - sizeof(vsoc_dev.msix_entries[0]), gfp_kernel); - if (!vsoc_dev.msix_entries) { - dev_err(&vsoc_dev.dev->dev, - "unable to allocate msix_entries "); - vsoc_remove_device(pdev); - return -enospc; - } - vsoc_dev.regions_data = - kcalloc(vsoc_dev.layout->region_count, - sizeof(vsoc_dev.regions_data[0]), gfp_kernel); - if (!vsoc_dev.regions_data) { - dev_err(&vsoc_dev.dev->dev, - "unable to allocate regions' data "); - vsoc_remove_device(pdev); - return -enospc; - } - for (i = 0; i < vsoc_dev.layout->region_count; ++i) - vsoc_dev.msix_entries[i].entry = i; - - result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries, - vsoc_dev.layout->region_count); - if (result) { - dev_info(&pdev->dev, "pci_enable_msix failed: %d ", result); - vsoc_remove_device(pdev); - return -enospc; - } - /* check that all regions are well formed */ - for (i = 0; i < vsoc_dev.layout->region_count; ++i) { - const struct vsoc_device_region *region = vsoc_dev.regions + i; - - if (!page_aligned(region->region_begin_offset) || - !page_aligned(region->region_end_offset)) { - dev_err(&vsoc_dev.dev->dev, - "region %d not aligned (%x:%x)", i, - region->region_begin_offset, - region->region_end_offset); - vsoc_remove_device(pdev); - return -efault; - } - if (region->region_begin_offset >= region->region_end_offset || - region->region_end_offset > vsoc_dev.shm_size) { - dev_err(&vsoc_dev.dev->dev, - "region %d offsets are wrong: %x %x %zx", - i, region->region_begin_offset, - region->region_end_offset, vsoc_dev.shm_size); - vsoc_remove_device(pdev); - return -efault; - } - if (region->managed_by >= vsoc_dev.layout->region_count) { - dev_err(&vsoc_dev.dev->dev, - "region %d has invalid owner: %u", - i, region->managed_by); - vsoc_remove_device(pdev); - return -efault; - } - } - vsoc_dev.msix_enabled = true; - for (i = 0; i < vsoc_dev.layout->region_count; ++i) { - const struct vsoc_device_region *region = vsoc_dev.regions + i; - size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1; - const struct vsoc_signal_table_layout *h_to_g_signal_table = - ®ion->host_to_guest_signal_table; - const struct vsoc_signal_table_layout *g_to_h_signal_table = - ®ion->guest_to_host_signal_table; - - vsoc_dev.regions_data[i].name[name_sz] = ''; - memcpy(vsoc_dev.regions_data[i].name, region->device_name, - name_sz); - dev_info(&pdev->dev, "region %d name=%s ", - i, vsoc_dev.regions_data[i].name); - init_waitqueue_head - (&vsoc_dev.regions_data[i].interrupt_wait_queue); - init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue); - vsoc_dev.regions_data[i].incoming_signalled = - shm_off_to_virtual_addr(region->region_begin_offset) + - h_to_g_signal_table->interrupt_signalled_offset; - vsoc_dev.regions_data[i].outgoing_signalled = - shm_off_to_virtual_addr(region->region_begin_offset) + - g_to_h_signal_table->interrupt_signalled_offset; - result = request_irq(vsoc_dev.msix_entries[i].vector, - vsoc_interrupt, 0, - vsoc_dev.regions_data[i].name, - vsoc_dev.regions_data + i); - if (result) { - dev_info(&pdev->dev, - "request_irq failed irq=%d vector=%d ", - i, vsoc_dev.msix_entries[i].vector); - vsoc_remove_device(pdev); - return -enospc; - } - vsoc_dev.regions_data[i].irq_requested = true; - if (!device_create(vsoc_dev.class, null, - mkdev(vsoc_dev.major, i), - null, vsoc_dev.regions_data[i].name)) { - dev_err(&vsoc_dev.dev->dev, "device_create failed "); - vsoc_remove_device(pdev); - return -ebusy; - } - vsoc_dev.regions_data[i].device_created = true; - } - return 0; -} - -/* - * this should undo all of the allocations in the probe function in reverse - * order. - * - * notes: - * - * the device may have been partially initialized, so double check - * that the allocations happened. - * - * this function may be called multiple times, so mark resources as freed - * as they are deallocated. - */ -static void vsoc_remove_device(struct pci_dev *pdev) -{ - int i; - /* - * pdev is the first thing to be set on probe and the last thing - * to be cleared here. if it's null then there is no cleanup. - */ - if (!pdev || !vsoc_dev.dev) - return; - dev_info(&pdev->dev, "remove_device "); - if (vsoc_dev.regions_data) { - for (i = 0; i < vsoc_dev.layout->region_count; ++i) { - if (vsoc_dev.regions_data[i].device_created) { - device_destroy(vsoc_dev.class, - mkdev(vsoc_dev.major, i)); - vsoc_dev.regions_data[i].device_created = false; - } - if (vsoc_dev.regions_data[i].irq_requested) - free_irq(vsoc_dev.msix_entries[i].vector, null); - vsoc_dev.regions_data[i].irq_requested = false; - } - kfree(vsoc_dev.regions_data); - vsoc_dev.regions_data = null; - } - if (vsoc_dev.msix_enabled) { - pci_disable_msix(pdev); - vsoc_dev.msix_enabled = false; - } - kfree(vsoc_dev.msix_entries); - vsoc_dev.msix_entries = null; - vsoc_dev.regions = null; - if (vsoc_dev.class_added) { - class_destroy(vsoc_dev.class); - vsoc_dev.class_added = false; - } - if (vsoc_dev.cdev_added) { - cdev_del(&vsoc_dev.cdev); - vsoc_dev.cdev_added = false; - } - if (vsoc_dev.major && vsoc_dev.layout) { - unregister_chrdev_region(mkdev(vsoc_dev.major, 0), - vsoc_dev.layout->region_count); - vsoc_dev.major = 0; - } - vsoc_dev.layout = null; - if (vsoc_dev.kernel_mapped_shm) { - pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm); - vsoc_dev.kernel_mapped_shm = null; - } - if (vsoc_dev.regs) { - pci_iounmap(pdev, vsoc_dev.regs); - vsoc_dev.regs = null; - } - if (vsoc_dev.requested_regions) { - pci_release_regions(pdev); - vsoc_dev.requested_regions = false; - } - if (vsoc_dev.enabled_device) { - pci_disable_device(pdev); - vsoc_dev.enabled_device = false; - } - /* do this last: it indicates that the device is not initialized. */ - vsoc_dev.dev = null; -} - -static void __exit vsoc_cleanup_module(void) -{ - vsoc_remove_device(vsoc_dev.dev); - pci_unregister_driver(&vsoc_pci_driver); -} - -static int __init vsoc_init_module(void) -{ - int err = -enomem; - - init_list_head(&vsoc_dev.permissions); - mutex_init(&vsoc_dev.mtx); - - err = pci_register_driver(&vsoc_pci_driver); - if (err < 0) - return err; - return 0; -} - -static int vsoc_open(struct inode *inode, struct file *filp) -{ - /* can't use vsoc_validate_filep because filp is still incomplete */ - int ret = vsoc_validate_inode(inode); - - if (ret) - return ret; - filp->private_data = - kzalloc(sizeof(struct vsoc_private_data), gfp_kernel); - if (!filp->private_data) - return -enomem; - return 0; -} - -static int vsoc_release(struct inode *inode, struct file *filp) -{ - struct vsoc_private_data *private_data = null; - struct fd_scoped_permission_node *node = null; - struct vsoc_device_region *owner_region_p = null; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - private_data = (struct vsoc_private_data *)filp->private_data; - if (!private_data) - return 0; - - node = private_data->fd_scoped_permission_node; - if (node) { - owner_region_p = vsoc_region_from_inode(inode); - if (owner_region_p->managed_by != vsoc_region_whole) { - owner_region_p = - &vsoc_dev.regions[owner_region_p->managed_by]; - } - do_destroy_fd_scoped_permission_node(owner_region_p, node); - private_data->fd_scoped_permission_node = null; - } - kfree(private_data); - filp->private_data = null; - - return 0; -} - -/* - * returns the device relative offset and length of the area specified by the - * fd scoped permission. if there is no fd scoped permission set, a default - * permission covering the entire region is assumed, unless the region is owned - * by another one, in which case the default is a permission with zero size. - */ -static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset) -{ - __u32 off = 0; - ssize_t length = 0; - struct vsoc_device_region *region_p; - struct fd_scoped_permission *perm; - - region_p = vsoc_region_from_filep(filp); - off = region_p->region_begin_offset; - perm = &((struct vsoc_private_data *)filp->private_data)-> - fd_scoped_permission_node->permission; - if (perm) { - off += perm->begin_offset; - length = perm->end_offset - perm->begin_offset; - } else if (region_p->managed_by == vsoc_region_whole) { - /* no permission set and the regions is not owned by another, - * default to full region access. - */ - length = vsoc_device_region_size(region_p); - } else { - /* return zero length, access is denied. */ - length = 0; - } - if (area_offset) - *area_offset = off; - return length; -} - -static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma) -{ - unsigned long len = vma->vm_end - vma->vm_start; - __u32 area_off; - phys_addr_t mem_off; - ssize_t area_len; - int retval = vsoc_validate_filep(filp); - - if (retval) - return retval; - area_len = vsoc_get_area(filp, &area_off); - /* add the requested offset */ - area_off += (vma->vm_pgoff << page_shift); - area_len -= (vma->vm_pgoff << page_shift); - if (area_len < len) - return -einval; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - mem_off = shm_off_to_phys_addr(area_off); - if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> page_shift, - len, vma->vm_page_prot)) - return -eagain; - return 0; -} - -module_init(vsoc_init_module); -module_exit(vsoc_cleanup_module); - -module_license("gpl"); -module_author("greg hartman <ghartman@google.com>"); -module_description("vsoc interpretation of qemu's ivshmem device"); -module_version("1.0");
|
Drivers in the Staging area
|
c3709b3285009e0c1448510b9460e96146cd5c9a
|
alistair delva joel fernandes google joel joelfernandes org
|
drivers
|
staging
|
android, uapi
|
staging: octeon-usb: delete the octeon usb host controller driver
|
this driver was merged back in 2013 and shows no progress toward every being merged into the "correct" part of the kernel. the code doesn't even build for anyone unless you have the specific hardware platform selected, so odds are it doesn't even work anymore.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
delete the octeon usb host controller driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['octeon-usb']
|
['c', 'kconfig', 'makefile', 'h', 'todo']
| 7
| 0
| 5,608
|
--- diff --git a/drivers/staging/kconfig b/drivers/staging/kconfig --- a/drivers/staging/kconfig +++ b/drivers/staging/kconfig -source "drivers/staging/octeon-usb/kconfig" - diff --git a/drivers/staging/makefile b/drivers/staging/makefile --- a/drivers/staging/makefile +++ b/drivers/staging/makefile -obj-$(config_octeon_usb) += octeon-usb/ diff --git a/drivers/staging/octeon-usb/kconfig b/drivers/staging/octeon-usb/kconfig --- a/drivers/staging/octeon-usb/kconfig +++ /dev/null -# spdx-license-identifier: gpl-2.0 -config octeon_usb - tristate "cavium networks octeon usb support" - depends on cavium_octeon_soc && usb - help - this driver supports usb host controller on some cavium - networks' products in the octeon family. - - to compile this driver as a module, choose m here. the module - will be called octeon-hcd. - diff --git a/drivers/staging/octeon-usb/makefile b/drivers/staging/octeon-usb/makefile --- a/drivers/staging/octeon-usb/makefile +++ /dev/null -# spdx-license-identifier: gpl-2.0 -obj-${config_octeon_usb} := octeon-hcd.o diff --git a/drivers/staging/octeon-usb/todo b/drivers/staging/octeon-usb/todo --- a/drivers/staging/octeon-usb/todo +++ /dev/null -this driver is functional and has been tested on edgerouter lite, -d-link dsr-1000n and ebh5600 evaluation board with usb mass storage. - -todo: - - kernel coding style - - checkpatch warnings - -contact: aaro koskinen <aaro.koskinen@iki.fi> diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c --- a/drivers/staging/octeon-usb/octeon-hcd.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is subject to the terms and conditions of the gnu general public - * license. see the file "copying" in the main directory of this archive - * for more details. - * - * copyright (c) 2008 cavium networks - * - * some parts of the code were originally released under bsd license: - * - * copyright (c) 2003-2010 cavium networks (support@cavium.com). all rights - * reserved. - * - * redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * neither the name of cavium networks nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * this software, including technical data, may be subject to u.s. export - * control laws, including the u.s. export administration act and its associated - * regulations, and may be subject to export or import regulations in other - * countries. - * - * to the maximum extent permitted by law, the software is provided "as is" - * and with all faults and cavium networks makes no promises, representations or - * warranties, either express, implied, statutory, or otherwise, with respect to - * the software, including its condition, its conformity to any representation - * or description, or the existence of any latent or patent defects, and cavium - * specifically disclaims all implied (if any) warranties of title, - * merchantability, noninfringement, fitness for a particular purpose, lack of - * viruses, accuracy or completeness, quiet enjoyment, quiet possession or - * correspondence to description. the entire risk arising out of use or - * performance of the software lies with you. - */ - -#include <linux/usb.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/usb/hcd.h> -#include <linux/prefetch.h> -#include <linux/dma-mapping.h> -#include <linux/platform_device.h> - -#include <asm/octeon/octeon.h> - -#include "octeon-hcd.h" - -/** - * enum cvmx_usb_speed - the possible usb device speeds - * - * @cvmx_usb_speed_high: device is operation at 480mbps - * @cvmx_usb_speed_full: device is operation at 12mbps - * @cvmx_usb_speed_low: device is operation at 1.5mbps - */ -enum cvmx_usb_speed { - cvmx_usb_speed_high = 0, - cvmx_usb_speed_full = 1, - cvmx_usb_speed_low = 2, -}; - -/** - * enum cvmx_usb_transfer - the possible usb transfer types - * - * @cvmx_usb_transfer_control: usb transfer type control for hub and status - * transfers - * @cvmx_usb_transfer_isochronous: usb transfer type isochronous for low - * priority periodic transfers - * @cvmx_usb_transfer_bulk: usb transfer type bulk for large low priority - * transfers - * @cvmx_usb_transfer_interrupt: usb transfer type interrupt for high priority - * periodic transfers - */ -enum cvmx_usb_transfer { - cvmx_usb_transfer_control = 0, - cvmx_usb_transfer_isochronous = 1, - cvmx_usb_transfer_bulk = 2, - cvmx_usb_transfer_interrupt = 3, -}; - -/** - * enum cvmx_usb_direction - the transfer directions - * - * @cvmx_usb_direction_out: data is transferring from octeon to the device/host - * @cvmx_usb_direction_in: data is transferring from the device/host to octeon - */ -enum cvmx_usb_direction { - cvmx_usb_direction_out, - cvmx_usb_direction_in, -}; - -/** - * enum cvmx_usb_status - possible callback function status codes - * - * @cvmx_usb_status_ok: the transaction / operation finished without - * any errors - * @cvmx_usb_status_short: fixme: this is currently not implemented - * @cvmx_usb_status_cancel: the transaction was canceled while in flight - * by a user call to cvmx_usb_cancel - * @cvmx_usb_status_error: the transaction aborted with an unexpected - * error status - * @cvmx_usb_status_stall: the transaction received a usb stall response - * from the device - * @cvmx_usb_status_xacterr: the transaction failed with an error from the - * device even after a number of retries - * @cvmx_usb_status_datatglerr: the transaction failed with a data toggle - * error even after a number of retries - * @cvmx_usb_status_babbleerr: the transaction failed with a babble error - * @cvmx_usb_status_frameerr: the transaction failed with a frame error - * even after a number of retries - */ -enum cvmx_usb_status { - cvmx_usb_status_ok, - cvmx_usb_status_short, - cvmx_usb_status_cancel, - cvmx_usb_status_error, - cvmx_usb_status_stall, - cvmx_usb_status_xacterr, - cvmx_usb_status_datatglerr, - cvmx_usb_status_babbleerr, - cvmx_usb_status_frameerr, -}; - -/** - * struct cvmx_usb_port_status - the usb port status information - * - * @port_enabled: 1 = usb port is enabled, 0 = disabled - * @port_over_current: 1 = over current detected, 0 = over current not - * detected. octeon doesn't support over current detection. - * @port_powered: 1 = port power is being supplied to the device, 0 = - * power is off. octeon doesn't support turning port power - * off. - * @port_speed: current port speed. - * @connected: 1 = a device is connected to the port, 0 = no device is - * connected. - * @connect_change: 1 = device connected state changed since the last set - * status call. - */ -struct cvmx_usb_port_status { - u32 reserved : 25; - u32 port_enabled : 1; - u32 port_over_current : 1; - u32 port_powered : 1; - enum cvmx_usb_speed port_speed : 2; - u32 connected : 1; - u32 connect_change : 1; -}; - -/** - * struct cvmx_usb_iso_packet - descriptor for isochronous packets - * - * @offset: this is the offset in bytes into the main buffer where this data - * is stored. - * @length: this is the length in bytes of the data. - * @status: this is the status of this individual packet transfer. - */ -struct cvmx_usb_iso_packet { - int offset; - int length; - enum cvmx_usb_status status; -}; - -/** - * enum cvmx_usb_initialize_flags - flags used by the initialization function - * - * @cvmx_usb_initialize_flags_clock_xo_xi: the usb port uses a 12mhz crystal - * as clock source at usb_xo and - * usb_xi. - * @cvmx_usb_initialize_flags_clock_xo_gnd: the usb port uses 12/24/48mhz 2.5v - * board clock source at usb_xo. - * usb_xi should be tied to gnd. - * @cvmx_usb_initialize_flags_clock_mhz_mask: mask for clock speed field - * @cvmx_usb_initialize_flags_clock_12mhz: speed of reference clock or - * crystal - * @cvmx_usb_initialize_flags_clock_24mhz: speed of reference clock - * @cvmx_usb_initialize_flags_clock_48mhz: speed of reference clock - * @cvmx_usb_initialize_flags_no_dma: disable dma and used polled io for - * data transfer use for the usb - */ -enum cvmx_usb_initialize_flags { - cvmx_usb_initialize_flags_clock_xo_xi = 1 << 0, - cvmx_usb_initialize_flags_clock_xo_gnd = 1 << 1, - cvmx_usb_initialize_flags_clock_mhz_mask = 3 << 3, - cvmx_usb_initialize_flags_clock_12mhz = 1 << 3, - cvmx_usb_initialize_flags_clock_24mhz = 2 << 3, - cvmx_usb_initialize_flags_clock_48mhz = 3 << 3, - /* bits 3-4 used to encode the clock frequency */ - cvmx_usb_initialize_flags_no_dma = 1 << 5, -}; - -/** - * enum cvmx_usb_pipe_flags - internal flags for a pipe. - * - * @cvmx_usb_pipe_flags_scheduled: used internally to determine if a pipe is - * actively using hardware. - * @cvmx_usb_pipe_flags_need_ping: used internally to determine if a high speed - * pipe is in the ping state. - */ -enum cvmx_usb_pipe_flags { - cvmx_usb_pipe_flags_scheduled = 1 << 17, - cvmx_usb_pipe_flags_need_ping = 1 << 18, -}; - -/* maximum number of times to retry failed transactions */ -#define max_retries 3 - -/* maximum number of hardware channels supported by the usb block */ -#define max_channels 8 - -/* - * the low level hardware can transfer a maximum of this number of bytes in each - * transfer. the field is 19 bits wide - */ -#define max_transfer_bytes ((1 << 19) - 1) - -/* - * the low level hardware can transfer a maximum of this number of packets in - * each transfer. the field is 10 bits wide - */ -#define max_transfer_packets ((1 << 10) - 1) - -/** - * logical transactions may take numerous low level - * transactions, especially when splits are concerned. this - * enum represents all of the possible stages a transaction can - * be in. note that split completes are always even. this is so - * the nak handler can backup to the previous low level - * transaction with a simple clearing of bit 0. - */ -enum cvmx_usb_stage { - cvmx_usb_stage_non_control, - cvmx_usb_stage_non_control_split_complete, - cvmx_usb_stage_setup, - cvmx_usb_stage_setup_split_complete, - cvmx_usb_stage_data, - cvmx_usb_stage_data_split_complete, - cvmx_usb_stage_status, - cvmx_usb_stage_status_split_complete, -}; - -/** - * struct cvmx_usb_transaction - describes each pending usb transaction - * regardless of type. these are linked together - * to form a list of pending requests for a pipe. - * - * @node: list node for transactions in the pipe. - * @type: type of transaction, duplicated of the pipe. - * @flags: state flags for this transaction. - * @buffer: user's physical buffer address to read/write. - * @buffer_length: size of the user's buffer in bytes. - * @control_header: for control transactions, physical address of the 8 - * byte standard header. - * @iso_start_frame: for iso transactions, the starting frame number. - * @iso_number_packets: for iso transactions, the number of packets in the - * request. - * @iso_packets: for iso transactions, the sub packets in the request. - * @actual_bytes: actual bytes transfer for this transaction. - * @stage: for control transactions, the current stage. - * @urb: urb. - */ -struct cvmx_usb_transaction { - struct list_head node; - enum cvmx_usb_transfer type; - u64 buffer; - int buffer_length; - u64 control_header; - int iso_start_frame; - int iso_number_packets; - struct cvmx_usb_iso_packet *iso_packets; - int xfersize; - int pktcnt; - int retries; - int actual_bytes; - enum cvmx_usb_stage stage; - struct urb *urb; -}; - -/** - * struct cvmx_usb_pipe - a pipe represents a virtual connection between octeon - * and some usb device. it contains a list of pending - * request to the device. - * - * @node: list node for pipe list - * @next: pipe after this one in the list - * @transactions: list of pending transactions - * @interval: for periodic pipes, the interval between packets in - * frames - * @next_tx_frame: the next frame this pipe is allowed to transmit on - * @flags: state flags for this pipe - * @device_speed: speed of device connected to this pipe - * @transfer_type: type of transaction supported by this pipe - * @transfer_dir: in or out. ignored for control - * @multi_count: max packet in a row for the device - * @max_packet: the device's maximum packet size in bytes - * @device_addr: usb device address at other end of pipe - * @endpoint_num: usb endpoint number at other end of pipe - * @hub_device_addr: hub address this device is connected to - * @hub_port: hub port this device is connected to - * @pid_toggle: this toggles between 0/1 on every packet send to track - * the data pid needed - * @channel: hardware dma channel for this pipe - * @split_sc_frame: the low order bits of the frame number the split - * complete should be sent on - */ -struct cvmx_usb_pipe { - struct list_head node; - struct list_head transactions; - u64 interval; - u64 next_tx_frame; - enum cvmx_usb_pipe_flags flags; - enum cvmx_usb_speed device_speed; - enum cvmx_usb_transfer transfer_type; - enum cvmx_usb_direction transfer_dir; - int multi_count; - u16 max_packet; - u8 device_addr; - u8 endpoint_num; - u8 hub_device_addr; - u8 hub_port; - u8 pid_toggle; - u8 channel; - s8 split_sc_frame; -}; - -struct cvmx_usb_tx_fifo { - struct { - int channel; - int size; - u64 address; - } entry[max_channels + 1]; - int head; - int tail; -}; - -/** - * struct octeon_hcd - the state of the usb block - * - * lock: serialization lock. - * init_flags: flags passed to initialize. - * index: which usb block this is for. - * idle_hardware_channels: bit set for every idle hardware channel. - * usbcx_hprt: stored port status so we don't need to read a csr to - * determine splits. - * pipe_for_channel: map channels to pipes. - * pipe: storage for pipes. - * indent: used by debug output to indent functions. - * port_status: last port status used for change notification. - * idle_pipes: list of open pipes that have no transactions. - * active_pipes: active pipes indexed by transfer type. - * frame_number: increments every sof interrupt for time keeping. - * active_split: points to the current active split, or null. - */ -struct octeon_hcd { - spinlock_t lock; /* serialization lock */ - int init_flags; - int index; - int idle_hardware_channels; - union cvmx_usbcx_hprt usbcx_hprt; - struct cvmx_usb_pipe *pipe_for_channel[max_channels]; - int indent; - struct cvmx_usb_port_status port_status; - struct list_head idle_pipes; - struct list_head active_pipes[4]; - u64 frame_number; - struct cvmx_usb_transaction *active_split; - struct cvmx_usb_tx_fifo periodic; - struct cvmx_usb_tx_fifo nonperiodic; -}; - -/* - * this macro logically sets a single field in a csr. it does the sequence - * read, modify, and write - */ -#define usb_set_field32(address, _union, field, value) \ - do { \ - union _union c; \ - \ - c.u32 = cvmx_usb_read_csr32(usb, address); \ - c.s.field = value; \ - cvmx_usb_write_csr32(usb, address, c.u32); \ - } while (0) - -/* returns the io address to push/pop stuff data from the fifos */ -#define usb_fifo_address(channel, usb_index) \ - (cvmx_usbcx_gotgctl(usb_index) + ((channel) + 1) * 0x1000) - -/** - * struct octeon_temp_buffer - a bounce buffer for usb transfers - * @orig_buffer: the original buffer passed by the usb stack - * @data: the newly allocated temporary buffer (excluding meta-data) - * - * both the dma engine and fifo mode will always transfer full 32-bit words. if - * the buffer is too short, we need to allocate a temporary one, and this struct - * represents it. - */ -struct octeon_temp_buffer { - void *orig_buffer; - u8 data[0]; -}; - -static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p) -{ - return container_of((void *)p, struct usb_hcd, hcd_priv); -} - -/** - * octeon_alloc_temp_buffer - allocate a temporary buffer for usb transfer - * (if needed) - * @urb: urb. - * @mem_flags: memory allocation flags. - * - * this function allocates a temporary bounce buffer whenever it's needed - * due to hw limitations. - */ -static int octeon_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) -{ - struct octeon_temp_buffer *temp; - - if (urb->num_sgs || urb->sg || - (urb->transfer_flags & urb_no_transfer_dma_map) || - !(urb->transfer_buffer_length % sizeof(u32))) - return 0; - - temp = kmalloc(align(urb->transfer_buffer_length, sizeof(u32)) + - sizeof(*temp), mem_flags); - if (!temp) - return -enomem; - - temp->orig_buffer = urb->transfer_buffer; - if (usb_urb_dir_out(urb)) - memcpy(temp->data, urb->transfer_buffer, - urb->transfer_buffer_length); - urb->transfer_buffer = temp->data; - urb->transfer_flags |= urb_aligned_temp_buffer; - - return 0; -} - -/** - * octeon_free_temp_buffer - free a temporary buffer used by usb transfers. - * @urb: urb. - * - * frees a buffer allocated by octeon_alloc_temp_buffer(). - */ -static void octeon_free_temp_buffer(struct urb *urb) -{ - struct octeon_temp_buffer *temp; - size_t length; - - if (!(urb->transfer_flags & urb_aligned_temp_buffer)) - return; - - temp = container_of(urb->transfer_buffer, struct octeon_temp_buffer, - data); - if (usb_urb_dir_in(urb)) { - if (usb_pipeisoc(urb->pipe)) - length = urb->transfer_buffer_length; - else - length = urb->actual_length; - - memcpy(temp->orig_buffer, urb->transfer_buffer, length); - } - urb->transfer_buffer = temp->orig_buffer; - urb->transfer_flags &= ~urb_aligned_temp_buffer; - kfree(temp); -} - -/** - * octeon_map_urb_for_dma - octeon-specific map_urb_for_dma(). - * @hcd: usb hcd structure. - * @urb: urb. - * @mem_flags: memory allocation flags. - */ -static int octeon_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, - gfp_t mem_flags) -{ - int ret; - - ret = octeon_alloc_temp_buffer(urb, mem_flags); - if (ret) - return ret; - - ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); - if (ret) - octeon_free_temp_buffer(urb); - - return ret; -} - -/** - * octeon_unmap_urb_for_dma - octeon-specific unmap_urb_for_dma() - * @hcd: usb hcd structure. - * @urb: urb. - */ -static void octeon_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) -{ - usb_hcd_unmap_urb_for_dma(hcd, urb); - octeon_free_temp_buffer(urb); -} - -/** - * read a usb 32bit csr. it performs the necessary address swizzle - * for 32bit csrs and logs the value in a readable format if - * debugging is on. - * - * @usb: usb block this access is for - * @address: 64bit address to read - * - * returns: result of the read - */ -static inline u32 cvmx_usb_read_csr32(struct octeon_hcd *usb, u64 address) -{ - return cvmx_read64_uint32(address ^ 4); -} - -/** - * write a usb 32bit csr. it performs the necessary address - * swizzle for 32bit csrs and logs the value in a readable format - * if debugging is on. - * - * @usb: usb block this access is for - * @address: 64bit address to write - * @value: value to write - */ -static inline void cvmx_usb_write_csr32(struct octeon_hcd *usb, - u64 address, u32 value) -{ - cvmx_write64_uint32(address ^ 4, value); - cvmx_read64_uint64(cvmx_usbnx_dma0_inb_chn0(usb->index)); -} - -/** - * return non zero if this pipe connects to a non high speed - * device through a high speed hub. - * - * @usb: usb block this access is for - * @pipe: pipe to check - * - * returns: non zero if we need to do split transactions - */ -static inline int cvmx_usb_pipe_needs_split(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe) -{ - return pipe->device_speed != cvmx_usb_speed_high && - usb->usbcx_hprt.s.prtspd == cvmx_usb_speed_high; -} - -/** - * trivial utility function to return the correct pid for a pipe - * - * @pipe: pipe to check - * - * returns: pid for pipe - */ -static inline int cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe) -{ - if (pipe->pid_toggle) - return 2; /* data1 */ - return 0; /* data0 */ -} - -/* loops through register until txfflsh or rxfflsh become zero.*/ -static int cvmx_wait_tx_rx(struct octeon_hcd *usb, int fflsh_type) -{ - int result; - u64 address = cvmx_usbcx_grstctl(usb->index); - u64 done = cvmx_get_cycle() + 100 * - (u64)octeon_get_clock_rate / 1000000; - union cvmx_usbcx_grstctl c; - - while (1) { - c.u32 = cvmx_usb_read_csr32(usb, address); - if (fflsh_type == 0 && c.s.txfflsh == 0) { - result = 0; - break; - } else if (fflsh_type == 1 && c.s.rxfflsh == 0) { - result = 0; - break; - } else if (cvmx_get_cycle() > done) { - result = -1; - break; - } - - __delay(100); - } - return result; -} - -static void cvmx_fifo_setup(struct octeon_hcd *usb) -{ - union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3; - union cvmx_usbcx_gnptxfsiz npsiz; - union cvmx_usbcx_hptxfsiz psiz; - - usbcx_ghwcfg3.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_ghwcfg3(usb->index)); - - /* - * program the usbc_grxfsiz register to select the size of the receive - * fifo (25%). - */ - usb_set_field32(cvmx_usbcx_grxfsiz(usb->index), cvmx_usbcx_grxfsiz, - rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4); - - /* - * program the usbc_gnptxfsiz register to select the size and the start - * address of the non-periodic transmit fifo for nonperiodic - * transactions (50%). - */ - npsiz.u32 = cvmx_usb_read_csr32(usb, cvmx_usbcx_gnptxfsiz(usb->index)); - npsiz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2; - npsiz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4; - cvmx_usb_write_csr32(usb, cvmx_usbcx_gnptxfsiz(usb->index), npsiz.u32); - - /* - * program the usbc_hptxfsiz register to select the size and start - * address of the periodic transmit fifo for periodic transactions - * (25%). - */ - psiz.u32 = cvmx_usb_read_csr32(usb, cvmx_usbcx_hptxfsiz(usb->index)); - psiz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4; - psiz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4; - cvmx_usb_write_csr32(usb, cvmx_usbcx_hptxfsiz(usb->index), psiz.u32); - - /* flush all fifos */ - usb_set_field32(cvmx_usbcx_grstctl(usb->index), - cvmx_usbcx_grstctl, txfnum, 0x10); - usb_set_field32(cvmx_usbcx_grstctl(usb->index), - cvmx_usbcx_grstctl, txfflsh, 1); - cvmx_wait_tx_rx(usb, 0); - usb_set_field32(cvmx_usbcx_grstctl(usb->index), - cvmx_usbcx_grstctl, rxfflsh, 1); - cvmx_wait_tx_rx(usb, 1); -} - -/** - * shutdown a usb port after a call to cvmx_usb_initialize(). - * the port should be disabled with all pipes closed when this - * function is called. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * - * returns: 0 or a negative error code. - */ -static int cvmx_usb_shutdown(struct octeon_hcd *usb) -{ - union cvmx_usbnx_clk_ctl usbn_clk_ctl; - - /* make sure all pipes are closed */ - if (!list_empty(&usb->idle_pipes) || - !list_empty(&usb->active_pipes[cvmx_usb_transfer_isochronous]) || - !list_empty(&usb->active_pipes[cvmx_usb_transfer_interrupt]) || - !list_empty(&usb->active_pipes[cvmx_usb_transfer_control]) || - !list_empty(&usb->active_pipes[cvmx_usb_transfer_bulk])) - return -ebusy; - - /* disable the clocks and put them in power on reset */ - usbn_clk_ctl.u64 = cvmx_read64_uint64(cvmx_usbnx_clk_ctl(usb->index)); - usbn_clk_ctl.s.enable = 1; - usbn_clk_ctl.s.por = 1; - usbn_clk_ctl.s.hclk_rst = 1; - usbn_clk_ctl.s.prst = 0; - usbn_clk_ctl.s.hrst = 0; - cvmx_write64_uint64(cvmx_usbnx_clk_ctl(usb->index), usbn_clk_ctl.u64); - return 0; -} - -/** - * initialize a usb port for use. this must be called before any - * other access to the octeon usb port is made. the port starts - * off in the disabled state. - * - * @dev: pointer to struct device for logging purposes. - * @usb: pointer to struct octeon_hcd. - * - * returns: 0 or a negative error code. - */ -static int cvmx_usb_initialize(struct device *dev, - struct octeon_hcd *usb) -{ - int channel; - int divisor; - int retries = 0; - union cvmx_usbcx_hcfg usbcx_hcfg; - union cvmx_usbnx_clk_ctl usbn_clk_ctl; - union cvmx_usbcx_gintsts usbc_gintsts; - union cvmx_usbcx_gahbcfg usbcx_gahbcfg; - union cvmx_usbcx_gintmsk usbcx_gintmsk; - union cvmx_usbcx_gusbcfg usbcx_gusbcfg; - union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status; - -retry: - /* - * power on reset and phy initialization - * - * 1. wait for dcok to assert (nothing to do) - * - * 2a. write usbn0/1_clk_ctl[por] = 1 and - * usbn0/1_clk_ctl[hrst,prst,hclk_rst] = 0 - */ - usbn_clk_ctl.u64 = cvmx_read64_uint64(cvmx_usbnx_clk_ctl(usb->index)); - usbn_clk_ctl.s.por = 1; - usbn_clk_ctl.s.hrst = 0; - usbn_clk_ctl.s.prst = 0; - usbn_clk_ctl.s.hclk_rst = 0; - usbn_clk_ctl.s.enable = 0; - /* - * 2b. select the usb reference clock/crystal parameters by writing - * appropriate values to usbn0/1_clk_ctl[p_c_sel, p_rtype, p_com_on] - */ - if (usb->init_flags & cvmx_usb_initialize_flags_clock_xo_gnd) { - /* - * the usb port uses 12/24/48mhz 2.5v board clock - * source at usb_xo. usb_xi should be tied to gnd. - * most octeon evaluation boards require this setting - */ - if (octeon_is_model(octeon_cn3xxx) || - octeon_is_model(octeon_cn56xx) || - octeon_is_model(octeon_cn50xx)) - /* from cn56xx,cn50xx,cn31xx,cn30xx manuals */ - usbn_clk_ctl.s.p_rtype = 2; /* p_rclk=1 & p_xenbn=0 */ - else - /* from cn52xx manual */ - usbn_clk_ctl.s.p_rtype = 1; - - switch (usb->init_flags & - cvmx_usb_initialize_flags_clock_mhz_mask) { - case cvmx_usb_initialize_flags_clock_12mhz: - usbn_clk_ctl.s.p_c_sel = 0; - break; - case cvmx_usb_initialize_flags_clock_24mhz: - usbn_clk_ctl.s.p_c_sel = 1; - break; - case cvmx_usb_initialize_flags_clock_48mhz: - usbn_clk_ctl.s.p_c_sel = 2; - break; - } - } else { - /* - * the usb port uses a 12mhz crystal as clock source - * at usb_xo and usb_xi - */ - if (octeon_is_model(octeon_cn3xxx)) - /* from cn31xx,cn30xx manual */ - usbn_clk_ctl.s.p_rtype = 3; /* p_rclk=1 & p_xenbn=1 */ - else - /* from cn56xx,cn52xx,cn50xx manuals. */ - usbn_clk_ctl.s.p_rtype = 0; - - usbn_clk_ctl.s.p_c_sel = 0; - } - /* - * 2c. select the hclk via writing usbn0/1_clk_ctl[divide, divide2] and - * setting usbn0/1_clk_ctl[enable] = 1. divide the core clock down - * such that usb is as close as possible to 125mhz - */ - divisor = div_round_up(octeon_get_clock_rate(), 125000000); - /* lower than 4 doesn't seem to work properly */ - if (divisor < 4) - divisor = 4; - usbn_clk_ctl.s.divide = divisor; - usbn_clk_ctl.s.divide2 = 0; - cvmx_write64_uint64(cvmx_usbnx_clk_ctl(usb->index), usbn_clk_ctl.u64); - - /* 2d. write usbn0/1_clk_ctl[hclk_rst] = 1 */ - usbn_clk_ctl.s.hclk_rst = 1; - cvmx_write64_uint64(cvmx_usbnx_clk_ctl(usb->index), usbn_clk_ctl.u64); - /* 2e. wait 64 core-clock cycles for hclk to stabilize */ - __delay(64); - /* - * 3. program the power-on reset field in the usbn clock-control - * register: - * usbn_clk_ctl[por] = 0 - */ - usbn_clk_ctl.s.por = 0; - cvmx_write64_uint64(cvmx_usbnx_clk_ctl(usb->index), usbn_clk_ctl.u64); - /* 4. wait 1 ms for phy clock to start */ - mdelay(1); - /* - * 5. program the reset input from automatic test equipment field in the - * usbp control and status register: - * usbn_usbp_ctl_status[ate_reset] = 1 - */ - usbn_usbp_ctl_status.u64 = - cvmx_read64_uint64(cvmx_usbnx_usbp_ctl_status(usb->index)); - usbn_usbp_ctl_status.s.ate_reset = 1; - cvmx_write64_uint64(cvmx_usbnx_usbp_ctl_status(usb->index), - usbn_usbp_ctl_status.u64); - /* 6. wait 10 cycles */ - __delay(10); - /* - * 7. clear ate_reset field in the usbn clock-control register: - * usbn_usbp_ctl_status[ate_reset] = 0 - */ - usbn_usbp_ctl_status.s.ate_reset = 0; - cvmx_write64_uint64(cvmx_usbnx_usbp_ctl_status(usb->index), - usbn_usbp_ctl_status.u64); - /* - * 8. program the phy reset field in the usbn clock-control register: - * usbn_clk_ctl[prst] = 1 - */ - usbn_clk_ctl.s.prst = 1; - cvmx_write64_uint64(cvmx_usbnx_clk_ctl(usb->index), usbn_clk_ctl.u64); - /* - * 9. program the usbp control and status register to select host or - * device mode. usbn_usbp_ctl_status[hst_mode] = 0 for host, = 1 for - * device - */ - usbn_usbp_ctl_status.s.hst_mode = 0; - cvmx_write64_uint64(cvmx_usbnx_usbp_ctl_status(usb->index), - usbn_usbp_ctl_status.u64); - /* 10. wait 1 us */ - udelay(1); - /* - * 11. program the hreset_n field in the usbn clock-control register: - * usbn_clk_ctl[hrst] = 1 - */ - usbn_clk_ctl.s.hrst = 1; - cvmx_write64_uint64(cvmx_usbnx_clk_ctl(usb->index), usbn_clk_ctl.u64); - /* 12. proceed to usb core initialization */ - usbn_clk_ctl.s.enable = 1; - cvmx_write64_uint64(cvmx_usbnx_clk_ctl(usb->index), usbn_clk_ctl.u64); - udelay(1); - - /* - * usb core initialization - * - * 1. read usbc_ghwcfg1, usbc_ghwcfg2, usbc_ghwcfg3, usbc_ghwcfg4 to - * determine usb core configuration parameters. - * - * nothing needed - * - * 2. program the following fields in the global ahb configuration - * register (usbc_gahbcfg) - * dma mode, usbc_gahbcfg[dmaen]: 1 = dma mode, 0 = slave mode - * burst length, usbc_gahbcfg[hbstlen] = 0 - * nonperiodic txfifo empty level (slave mode only), - * usbc_gahbcfg[nptxfemplvl] - * periodic txfifo empty level (slave mode only), - * usbc_gahbcfg[ptxfemplvl] - * global interrupt mask, usbc_gahbcfg[glblintrmsk] = 1 - */ - usbcx_gahbcfg.u32 = 0; - usbcx_gahbcfg.s.dmaen = !(usb->init_flags & - cvmx_usb_initialize_flags_no_dma); - usbcx_gahbcfg.s.hbstlen = 0; - usbcx_gahbcfg.s.nptxfemplvl = 1; - usbcx_gahbcfg.s.ptxfemplvl = 1; - usbcx_gahbcfg.s.glblintrmsk = 1; - cvmx_usb_write_csr32(usb, cvmx_usbcx_gahbcfg(usb->index), - usbcx_gahbcfg.u32); - - /* - * 3. program the following fields in usbc_gusbcfg register. - * hs/fs timeout calibration, usbc_gusbcfg[toutcal] = 0 - * ulpi ddr select, usbc_gusbcfg[ddrsel] = 0 - * usb turnaround time, usbc_gusbcfg[usbtrdtim] = 0x5 - * phy low-power clock select, usbc_gusbcfg[phylpwrclksel] = 0 - */ - usbcx_gusbcfg.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_gusbcfg(usb->index)); - usbcx_gusbcfg.s.toutcal = 0; - usbcx_gusbcfg.s.ddrsel = 0; - usbcx_gusbcfg.s.usbtrdtim = 0x5; - usbcx_gusbcfg.s.phylpwrclksel = 0; - cvmx_usb_write_csr32(usb, cvmx_usbcx_gusbcfg(usb->index), - usbcx_gusbcfg.u32); - - /* - * 4. the software must unmask the following bits in the usbc_gintmsk - * register. - * otg interrupt mask, usbc_gintmsk[otgintmsk] = 1 - * mode mismatch interrupt mask, usbc_gintmsk[modemismsk] = 1 - */ - usbcx_gintmsk.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_gintmsk(usb->index)); - usbcx_gintmsk.s.otgintmsk = 1; - usbcx_gintmsk.s.modemismsk = 1; - usbcx_gintmsk.s.hchintmsk = 1; - usbcx_gintmsk.s.sofmsk = 0; - /* we need rx fifo interrupts if we don't have dma */ - if (usb->init_flags & cvmx_usb_initialize_flags_no_dma) - usbcx_gintmsk.s.rxflvlmsk = 1; - cvmx_usb_write_csr32(usb, cvmx_usbcx_gintmsk(usb->index), - usbcx_gintmsk.u32); - - /* - * disable all channel interrupts. we'll enable them per channel later. - */ - for (channel = 0; channel < 8; channel++) - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hcintmskx(channel, usb->index), - 0); - - /* - * host port initialization - * - * 1. program the host-port interrupt-mask field to unmask, - * usbc_gintmsk[prtint] = 1 - */ - usb_set_field32(cvmx_usbcx_gintmsk(usb->index), - cvmx_usbcx_gintmsk, prtintmsk, 1); - usb_set_field32(cvmx_usbcx_gintmsk(usb->index), - cvmx_usbcx_gintmsk, disconnintmsk, 1); - - /* - * 2. program the usbc_hcfg register to select full-speed host - * or high-speed host. - */ - usbcx_hcfg.u32 = cvmx_usb_read_csr32(usb, cvmx_usbcx_hcfg(usb->index)); - usbcx_hcfg.s.fslssupp = 0; - usbcx_hcfg.s.fslspclksel = 0; - cvmx_usb_write_csr32(usb, cvmx_usbcx_hcfg(usb->index), usbcx_hcfg.u32); - - cvmx_fifo_setup(usb); - - /* - * if the controller is getting port events right after the reset, it - * means the initialization failed. try resetting the controller again - * in such case. this is seen to happen after cold boot on dsr-1000n. - */ - usbc_gintsts.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_gintsts(usb->index)); - cvmx_usb_write_csr32(usb, cvmx_usbcx_gintsts(usb->index), - usbc_gintsts.u32); - dev_dbg(dev, "gintsts after reset: 0x%x ", (int)usbc_gintsts.u32); - if (!usbc_gintsts.s.disconnint && !usbc_gintsts.s.prtint) - return 0; - if (retries++ >= 5) - return -eagain; - dev_info(dev, "controller reset failed (gintsts=0x%x) - retrying ", - (int)usbc_gintsts.u32); - msleep(50); - cvmx_usb_shutdown(usb); - msleep(50); - goto retry; -} - -/** - * reset a usb port. after this call succeeds, the usb port is - * online and servicing requests. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - */ -static void cvmx_usb_reset_port(struct octeon_hcd *usb) -{ - usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hprt(usb->index)); - - /* program the port reset bit to start the reset process */ - usb_set_field32(cvmx_usbcx_hprt(usb->index), cvmx_usbcx_hprt, - prtrst, 1); - - /* - * wait at least 50ms (high speed), or 10ms (full speed) for the reset - * process to complete. - */ - mdelay(50); - - /* program the port reset bit to 0, usbc_hprt[prtrst] = 0 */ - usb_set_field32(cvmx_usbcx_hprt(usb->index), cvmx_usbcx_hprt, - prtrst, 0); - - /* - * read the port speed field to get the enumerated speed, - * usbc_hprt[prtspd]. - */ - usb->usbcx_hprt.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hprt(usb->index)); -} - -/** - * disable a usb port. after this call the usb port will not - * generate data transfers and will not generate events. - * transactions in process will fail and call their - * associated callbacks. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * - * returns: 0 or a negative error code. - */ -static int cvmx_usb_disable(struct octeon_hcd *usb) -{ - /* disable the port */ - usb_set_field32(cvmx_usbcx_hprt(usb->index), cvmx_usbcx_hprt, - prtena, 1); - return 0; -} - -/** - * get the current state of the usb port. use this call to - * determine if the usb port has anything connected, is enabled, - * or has some sort of error condition. the return value of this - * call has "changed" bits to signal of the value of some fields - * have changed between calls. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * - * returns: port status information - */ -static struct cvmx_usb_port_status cvmx_usb_get_status(struct octeon_hcd *usb) -{ - union cvmx_usbcx_hprt usbc_hprt; - struct cvmx_usb_port_status result; - - memset(&result, 0, sizeof(result)); - - usbc_hprt.u32 = cvmx_usb_read_csr32(usb, cvmx_usbcx_hprt(usb->index)); - result.port_enabled = usbc_hprt.s.prtena; - result.port_over_current = usbc_hprt.s.prtovrcurract; - result.port_powered = usbc_hprt.s.prtpwr; - result.port_speed = usbc_hprt.s.prtspd; - result.connected = usbc_hprt.s.prtconnsts; - result.connect_change = - result.connected != usb->port_status.connected; - - return result; -} - -/** - * open a virtual pipe between the host and a usb device. a pipe - * must be opened before data can be transferred between a device - * and octeon. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @device_addr: - * usb device address to open the pipe to - * (0-127). - * @endpoint_num: - * usb endpoint number to open the pipe to - * (0-15). - * @device_speed: - * the speed of the device the pipe is going - * to. this must match the device's speed, - * which may be different than the port speed. - * @max_packet: the maximum packet length the device can - * transmit/receive (low speed=0-8, full - * speed=0-1023, high speed=0-1024). this value - * comes from the standard endpoint descriptor - * field wmaxpacketsize bits <10:0>. - * @transfer_type: - * the type of transfer this pipe is for. - * @transfer_dir: - * the direction the pipe is in. this is not - * used for control pipes. - * @interval: for isochronous and interrupt transfers, - * this is how often the transfer is scheduled - * for. all other transfers should specify - * zero. the units are in frames (8000/sec at - * high speed, 1000/sec for full speed). - * @multi_count: - * for high speed devices, this is the maximum - * allowed number of packet per microframe. - * specify zero for non high speed devices. this - * value comes from the standard endpoint descriptor - * field wmaxpacketsize bits <12:11>. - * @hub_device_addr: - * hub device address this device is connected - * to. devices connected directly to octeon - * use zero. this is only used when the device - * is full/low speed behind a high speed hub. - * the address will be of the high speed hub, - * not and full speed hubs after it. - * @hub_port: which port on the hub the device is - * connected. use zero for devices connected - * directly to octeon. like hub_device_addr, - * this is only used for full/low speed - * devices behind a high speed hub. - * - * returns: a non-null value is a pipe. null means an error. - */ -static struct cvmx_usb_pipe *cvmx_usb_open_pipe(struct octeon_hcd *usb, - int device_addr, - int endpoint_num, - enum cvmx_usb_speed - device_speed, - int max_packet, - enum cvmx_usb_transfer - transfer_type, - enum cvmx_usb_direction - transfer_dir, - int interval, int multi_count, - int hub_device_addr, - int hub_port) -{ - struct cvmx_usb_pipe *pipe; - - pipe = kzalloc(sizeof(*pipe), gfp_atomic); - if (!pipe) - return null; - if ((device_speed == cvmx_usb_speed_high) && - (transfer_dir == cvmx_usb_direction_out) && - (transfer_type == cvmx_usb_transfer_bulk)) - pipe->flags |= cvmx_usb_pipe_flags_need_ping; - pipe->device_addr = device_addr; - pipe->endpoint_num = endpoint_num; - pipe->device_speed = device_speed; - pipe->max_packet = max_packet; - pipe->transfer_type = transfer_type; - pipe->transfer_dir = transfer_dir; - init_list_head(&pipe->transactions); - - /* - * all pipes use interval to rate limit nak processing. force an - * interval if one wasn't supplied - */ - if (!interval) - interval = 1; - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - pipe->interval = interval * 8; - /* force start splits to be schedule on uframe 0 */ - pipe->next_tx_frame = ((usb->frame_number + 7) & ~7) + - pipe->interval; - } else { - pipe->interval = interval; - pipe->next_tx_frame = usb->frame_number + pipe->interval; - } - pipe->multi_count = multi_count; - pipe->hub_device_addr = hub_device_addr; - pipe->hub_port = hub_port; - pipe->pid_toggle = 0; - pipe->split_sc_frame = -1; - list_add_tail(&pipe->node, &usb->idle_pipes); - - /* - * we don't need to tell the hardware about this pipe yet since - * it doesn't have any submitted requests - */ - - return pipe; -} - -/** - * poll the rx fifos and remove data as needed. this function is only used - * in non dma mode. it is very important that this function be called quickly - * enough to prevent fifo overflow. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - */ -static void cvmx_usb_poll_rx_fifo(struct octeon_hcd *usb) -{ - union cvmx_usbcx_grxstsph rx_status; - int channel; - int bytes; - u64 address; - u32 *ptr; - - rx_status.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_grxstsph(usb->index)); - /* only read data if in data is there */ - if (rx_status.s.pktsts != 2) - return; - /* check if no data is available */ - if (!rx_status.s.bcnt) - return; - - channel = rx_status.s.chnum; - bytes = rx_status.s.bcnt; - if (!bytes) - return; - - /* get where the dma engine would have written this data */ - address = cvmx_read64_uint64(cvmx_usbnx_dma0_inb_chn0(usb->index) + - channel * 8); - - ptr = cvmx_phys_to_ptr(address); - cvmx_write64_uint64(cvmx_usbnx_dma0_inb_chn0(usb->index) + channel * 8, - address + bytes); - - /* loop writing the fifo data for this packet into memory */ - while (bytes > 0) { - *ptr++ = cvmx_usb_read_csr32(usb, - usb_fifo_address(channel, usb->index)); - bytes -= 4; - } - cvmx_syncw; -} - -/** - * fill the tx hardware fifo with data out of the software - * fifos - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @fifo: software fifo to use - * @available: amount of space in the hardware fifo - * - * returns: non zero if the hardware fifo was too small and needs - * to be serviced again. - */ -static int cvmx_usb_fill_tx_hw(struct octeon_hcd *usb, - struct cvmx_usb_tx_fifo *fifo, int available) -{ - /* - * we're done either when there isn't anymore space or the software fifo - * is empty - */ - while (available && (fifo->head != fifo->tail)) { - int i = fifo->tail; - const u32 *ptr = cvmx_phys_to_ptr(fifo->entry[i].address); - u64 csr_address = usb_fifo_address(fifo->entry[i].channel, - usb->index) ^ 4; - int words = available; - - /* limit the amount of data to what the sw fifo has */ - if (fifo->entry[i].size <= available) { - words = fifo->entry[i].size; - fifo->tail++; - if (fifo->tail > max_channels) - fifo->tail = 0; - } - - /* update the next locations and counts */ - available -= words; - fifo->entry[i].address += words * 4; - fifo->entry[i].size -= words; - - /* - * write the hw fifo data. the read every three writes is due - * to an errata on cn3xxx chips - */ - while (words > 3) { - cvmx_write64_uint32(csr_address, *ptr++); - cvmx_write64_uint32(csr_address, *ptr++); - cvmx_write64_uint32(csr_address, *ptr++); - cvmx_read64_uint64( - cvmx_usbnx_dma0_inb_chn0(usb->index)); - words -= 3; - } - cvmx_write64_uint32(csr_address, *ptr++); - if (--words) { - cvmx_write64_uint32(csr_address, *ptr++); - if (--words) - cvmx_write64_uint32(csr_address, *ptr++); - } - cvmx_read64_uint64(cvmx_usbnx_dma0_inb_chn0(usb->index)); - } - return fifo->head != fifo->tail; -} - -/** - * check the hardware fifos and fill them as needed - * - * @usb: usb device state populated by cvmx_usb_initialize(). - */ -static void cvmx_usb_poll_tx_fifo(struct octeon_hcd *usb) -{ - if (usb->periodic.head != usb->periodic.tail) { - union cvmx_usbcx_hptxsts tx_status; - - tx_status.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hptxsts(usb->index)); - if (cvmx_usb_fill_tx_hw(usb, &usb->periodic, - tx_status.s.ptxfspcavail)) - usb_set_field32(cvmx_usbcx_gintmsk(usb->index), - cvmx_usbcx_gintmsk, ptxfempmsk, 1); - else - usb_set_field32(cvmx_usbcx_gintmsk(usb->index), - cvmx_usbcx_gintmsk, ptxfempmsk, 0); - } - - if (usb->nonperiodic.head != usb->nonperiodic.tail) { - union cvmx_usbcx_gnptxsts tx_status; - - tx_status.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_gnptxsts(usb->index)); - if (cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, - tx_status.s.nptxfspcavail)) - usb_set_field32(cvmx_usbcx_gintmsk(usb->index), - cvmx_usbcx_gintmsk, nptxfempmsk, 1); - else - usb_set_field32(cvmx_usbcx_gintmsk(usb->index), - cvmx_usbcx_gintmsk, nptxfempmsk, 0); - } -} - -/** - * fill the tx fifo with an outgoing packet - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @channel: channel number to get packet from - */ -static void cvmx_usb_fill_tx_fifo(struct octeon_hcd *usb, int channel) -{ - union cvmx_usbcx_hccharx hcchar; - union cvmx_usbcx_hcspltx usbc_hcsplt; - union cvmx_usbcx_hctsizx usbc_hctsiz; - struct cvmx_usb_tx_fifo *fifo; - - /* we only need to fill data on outbound channels */ - hcchar.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hccharx(channel, usb->index)); - if (hcchar.s.epdir != cvmx_usb_direction_out) - return; - - /* out splits only have data on the start and not the complete */ - usbc_hcsplt.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hcspltx(channel, usb->index)); - if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt) - return; - - /* - * find out how many bytes we need to fill and convert it into 32bit - * words. - */ - usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hctsizx(channel, usb->index)); - if (!usbc_hctsiz.s.xfersize) - return; - - if ((hcchar.s.eptype == cvmx_usb_transfer_interrupt) || - (hcchar.s.eptype == cvmx_usb_transfer_isochronous)) - fifo = &usb->periodic; - else - fifo = &usb->nonperiodic; - - fifo->entry[fifo->head].channel = channel; - fifo->entry[fifo->head].address = - cvmx_read64_uint64(cvmx_usbnx_dma0_outb_chn0(usb->index) + - channel * 8); - fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize + 3) >> 2; - fifo->head++; - if (fifo->head > max_channels) - fifo->head = 0; - - cvmx_usb_poll_tx_fifo(usb); -} - -/** - * perform channel specific setup for control transactions. all - * the generic stuff will already have been done in cvmx_usb_start_channel(). - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @channel: channel to setup - * @pipe: pipe for control transaction - */ -static void cvmx_usb_start_channel_control(struct octeon_hcd *usb, - int channel, - struct cvmx_usb_pipe *pipe) -{ - struct usb_hcd *hcd = octeon_to_hcd(usb); - struct device *dev = hcd->self.controller; - struct cvmx_usb_transaction *transaction = - list_first_entry(&pipe->transactions, typeof(*transaction), - node); - struct usb_ctrlrequest *header = - cvmx_phys_to_ptr(transaction->control_header); - int bytes_to_transfer = transaction->buffer_length - - transaction->actual_bytes; - int packets_to_transfer; - union cvmx_usbcx_hctsizx usbc_hctsiz; - - usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hctsizx(channel, usb->index)); - - switch (transaction->stage) { - case cvmx_usb_stage_non_control: - case cvmx_usb_stage_non_control_split_complete: - dev_err(dev, "%s: error - non control stage ", __func__); - break; - case cvmx_usb_stage_setup: - usbc_hctsiz.s.pid = 3; /* setup */ - bytes_to_transfer = sizeof(*header); - /* all control operations start with a setup going out */ - usb_set_field32(cvmx_usbcx_hccharx(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - cvmx_usb_direction_out); - /* - * setup send the control header instead of the buffer data. the - * buffer data will be used in the next stage - */ - cvmx_write64_uint64(cvmx_usbnx_dma0_outb_chn0(usb->index) + - channel * 8, - transaction->control_header); - break; - case cvmx_usb_stage_setup_split_complete: - usbc_hctsiz.s.pid = 3; /* setup */ - bytes_to_transfer = 0; - /* all control operations start with a setup going out */ - usb_set_field32(cvmx_usbcx_hccharx(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - cvmx_usb_direction_out); - - usb_set_field32(cvmx_usbcx_hcspltx(channel, usb->index), - cvmx_usbcx_hcspltx, compsplt, 1); - break; - case cvmx_usb_stage_data: - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - if (header->brequesttype & usb_dir_in) - bytes_to_transfer = 0; - else if (bytes_to_transfer > pipe->max_packet) - bytes_to_transfer = pipe->max_packet; - } - usb_set_field32(cvmx_usbcx_hccharx(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - ((header->brequesttype & usb_dir_in) ? - cvmx_usb_direction_in : - cvmx_usb_direction_out)); - break; - case cvmx_usb_stage_data_split_complete: - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - if (!(header->brequesttype & usb_dir_in)) - bytes_to_transfer = 0; - usb_set_field32(cvmx_usbcx_hccharx(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - ((header->brequesttype & usb_dir_in) ? - cvmx_usb_direction_in : - cvmx_usb_direction_out)); - usb_set_field32(cvmx_usbcx_hcspltx(channel, usb->index), - cvmx_usbcx_hcspltx, compsplt, 1); - break; - case cvmx_usb_stage_status: - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - bytes_to_transfer = 0; - usb_set_field32(cvmx_usbcx_hccharx(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - ((header->brequesttype & usb_dir_in) ? - cvmx_usb_direction_out : - cvmx_usb_direction_in)); - break; - case cvmx_usb_stage_status_split_complete: - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - bytes_to_transfer = 0; - usb_set_field32(cvmx_usbcx_hccharx(channel, usb->index), - cvmx_usbcx_hccharx, epdir, - ((header->brequesttype & usb_dir_in) ? - cvmx_usb_direction_out : - cvmx_usb_direction_in)); - usb_set_field32(cvmx_usbcx_hcspltx(channel, usb->index), - cvmx_usbcx_hcspltx, compsplt, 1); - break; - } - - /* - * make sure the transfer never exceeds the byte limit of the hardware. - * further bytes will be sent as continued transactions - */ - if (bytes_to_transfer > max_transfer_bytes) { - /* round max_transfer_bytes to a multiple of out packet size */ - bytes_to_transfer = max_transfer_bytes / pipe->max_packet; - bytes_to_transfer *= pipe->max_packet; - } - - /* - * calculate the number of packets to transfer. if the length is zero - * we still need to transfer one packet - */ - packets_to_transfer = div_round_up(bytes_to_transfer, - pipe->max_packet); - if (packets_to_transfer == 0) { - packets_to_transfer = 1; - } else if ((packets_to_transfer > 1) && - (usb->init_flags & cvmx_usb_initialize_flags_no_dma)) { - /* - * limit to one packet when not using dma. channels must be - * restarted between every packet for in transactions, so there - * is no reason to do multiple packets in a row - */ - packets_to_transfer = 1; - bytes_to_transfer = packets_to_transfer * pipe->max_packet; - } else if (packets_to_transfer > max_transfer_packets) { - /* - * limit the number of packet and data transferred to what the - * hardware can handle - */ - packets_to_transfer = max_transfer_packets; - bytes_to_transfer = packets_to_transfer * pipe->max_packet; - } - - usbc_hctsiz.s.xfersize = bytes_to_transfer; - usbc_hctsiz.s.pktcnt = packets_to_transfer; - - cvmx_usb_write_csr32(usb, cvmx_usbcx_hctsizx(channel, usb->index), - usbc_hctsiz.u32); -} - -/** - * start a channel to perform the pipe's head transaction - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @channel: channel to setup - * @pipe: pipe to start - */ -static void cvmx_usb_start_channel(struct octeon_hcd *usb, int channel, - struct cvmx_usb_pipe *pipe) -{ - struct cvmx_usb_transaction *transaction = - list_first_entry(&pipe->transactions, typeof(*transaction), - node); - - /* make sure all writes to the dma region get flushed */ - cvmx_syncw; - - /* attach the channel to the pipe */ - usb->pipe_for_channel[channel] = pipe; - pipe->channel = channel; - pipe->flags |= cvmx_usb_pipe_flags_scheduled; - - /* mark this channel as in use */ - usb->idle_hardware_channels &= ~(1 << channel); - - /* enable the channel interrupt bits */ - { - union cvmx_usbcx_hcintx usbc_hcint; - union cvmx_usbcx_hcintmskx usbc_hcintmsk; - union cvmx_usbcx_haintmsk usbc_haintmsk; - - /* clear all channel status bits */ - usbc_hcint.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hcintx(channel, usb->index)); - - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hcintx(channel, usb->index), - usbc_hcint.u32); - - usbc_hcintmsk.u32 = 0; - usbc_hcintmsk.s.chhltdmsk = 1; - if (usb->init_flags & cvmx_usb_initialize_flags_no_dma) { - /* - * channels need these extra interrupts when we aren't - * in dma mode. - */ - usbc_hcintmsk.s.datatglerrmsk = 1; - usbc_hcintmsk.s.frmovrunmsk = 1; - usbc_hcintmsk.s.bblerrmsk = 1; - usbc_hcintmsk.s.xacterrmsk = 1; - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * splits don't generate xfercompl, so we need - * ack and nyet. - */ - usbc_hcintmsk.s.nyetmsk = 1; - usbc_hcintmsk.s.ackmsk = 1; - } - usbc_hcintmsk.s.nakmsk = 1; - usbc_hcintmsk.s.stallmsk = 1; - usbc_hcintmsk.s.xfercomplmsk = 1; - } - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hcintmskx(channel, usb->index), - usbc_hcintmsk.u32); - - /* enable the channel interrupt to propagate */ - usbc_haintmsk.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_haintmsk(usb->index)); - usbc_haintmsk.s.haintmsk |= 1 << channel; - cvmx_usb_write_csr32(usb, cvmx_usbcx_haintmsk(usb->index), - usbc_haintmsk.u32); - } - - /* setup the location the dma engine uses. */ - { - u64 reg; - u64 dma_address = transaction->buffer + - transaction->actual_bytes; - - if (transaction->type == cvmx_usb_transfer_isochronous) - dma_address = transaction->buffer + - transaction->iso_packets[0].offset + - transaction->actual_bytes; - - if (pipe->transfer_dir == cvmx_usb_direction_out) - reg = cvmx_usbnx_dma0_outb_chn0(usb->index); - else - reg = cvmx_usbnx_dma0_inb_chn0(usb->index); - cvmx_write64_uint64(reg + channel * 8, dma_address); - } - - /* setup both the size of the transfer and the split characteristics */ - { - union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0}; - union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0}; - int packets_to_transfer; - int bytes_to_transfer = transaction->buffer_length - - transaction->actual_bytes; - - /* - * isochronous transactions store each individual transfer size - * in the packet structure, not the global buffer_length - */ - if (transaction->type == cvmx_usb_transfer_isochronous) - bytes_to_transfer = - transaction->iso_packets[0].length - - transaction->actual_bytes; - - /* - * we need to do split transactions when we are talking to non - * high speed devices that are behind a high speed hub - */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * on the start split phase (stage is even) record the - * frame number we will need to send the split complete. - * we only store the lower two bits since the time ahead - * can only be two frames - */ - if ((transaction->stage & 1) == 0) { - if (transaction->type == cvmx_usb_transfer_bulk) - pipe->split_sc_frame = - (usb->frame_number + 1) & 0x7f; - else - pipe->split_sc_frame = - (usb->frame_number + 2) & 0x7f; - } else { - pipe->split_sc_frame = -1; - } - - usbc_hcsplt.s.spltena = 1; - usbc_hcsplt.s.hubaddr = pipe->hub_device_addr; - usbc_hcsplt.s.prtaddr = pipe->hub_port; - usbc_hcsplt.s.compsplt = (transaction->stage == - cvmx_usb_stage_non_control_split_complete); - - /* - * split transactions can only ever transmit one data - * packet so limit the transfer size to the max packet - * size - */ - if (bytes_to_transfer > pipe->max_packet) - bytes_to_transfer = pipe->max_packet; - - /* - * isochronous out splits are unique in that they limit - * data transfers to 188 byte chunks representing the - * begin/middle/end of the data or all - */ - if (!usbc_hcsplt.s.compsplt && - (pipe->transfer_dir == cvmx_usb_direction_out) && - (pipe->transfer_type == - cvmx_usb_transfer_isochronous)) { - /* - * clear the split complete frame number as - * there isn't going to be a split complete - */ - pipe->split_sc_frame = -1; - /* - * see if we've started this transfer and sent - * data - */ - if (transaction->actual_bytes == 0) { - /* - * nothing sent yet, this is either a - * begin or the entire payload - */ - if (bytes_to_transfer <= 188) - /* entire payload in one go */ - usbc_hcsplt.s.xactpos = 3; - else - /* first part of payload */ - usbc_hcsplt.s.xactpos = 2; - } else { - /* - * continuing the previous data, we must - * either be in the middle or at the end - */ - if (bytes_to_transfer <= 188) - /* end of payload */ - usbc_hcsplt.s.xactpos = 1; - else - /* middle of payload */ - usbc_hcsplt.s.xactpos = 0; - } - /* - * again, the transfer size is limited to 188 - * bytes - */ - if (bytes_to_transfer > 188) - bytes_to_transfer = 188; - } - } - - /* - * make sure the transfer never exceeds the byte limit of the - * hardware. further bytes will be sent as continued - * transactions - */ - if (bytes_to_transfer > max_transfer_bytes) { - /* - * round max_transfer_bytes to a multiple of out packet - * size - */ - bytes_to_transfer = max_transfer_bytes / - pipe->max_packet; - bytes_to_transfer *= pipe->max_packet; - } - - /* - * calculate the number of packets to transfer. if the length is - * zero we still need to transfer one packet - */ - packets_to_transfer = - div_round_up(bytes_to_transfer, pipe->max_packet); - if (packets_to_transfer == 0) { - packets_to_transfer = 1; - } else if ((packets_to_transfer > 1) && - (usb->init_flags & - cvmx_usb_initialize_flags_no_dma)) { - /* - * limit to one packet when not using dma. channels must - * be restarted between every packet for in - * transactions, so there is no reason to do multiple - * packets in a row - */ - packets_to_transfer = 1; - bytes_to_transfer = packets_to_transfer * - pipe->max_packet; - } else if (packets_to_transfer > max_transfer_packets) { - /* - * limit the number of packet and data transferred to - * what the hardware can handle - */ - packets_to_transfer = max_transfer_packets; - bytes_to_transfer = packets_to_transfer * - pipe->max_packet; - } - - usbc_hctsiz.s.xfersize = bytes_to_transfer; - usbc_hctsiz.s.pktcnt = packets_to_transfer; - - /* update the data0/data1 toggle */ - usbc_hctsiz.s.pid = cvmx_usb_get_data_pid(pipe); - /* - * high speed pipes may need a hardware ping before they start - */ - if (pipe->flags & cvmx_usb_pipe_flags_need_ping) - usbc_hctsiz.s.dopng = 1; - - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hcspltx(channel, usb->index), - usbc_hcsplt.u32); - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hctsizx(channel, usb->index), - usbc_hctsiz.u32); - } - - /* setup the host channel characteristics register */ - { - union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0}; - - /* - * set the startframe odd/even properly. this is only used for - * periodic - */ - usbc_hcchar.s.oddfrm = usb->frame_number & 1; - - /* - * set the number of back to back packets allowed by this - * endpoint. split transactions interpret "ec" as the number of - * immediate retries of failure. these retries happen too - * quickly, so we disable these entirely for splits - */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) - usbc_hcchar.s.ec = 1; - else if (pipe->multi_count < 1) - usbc_hcchar.s.ec = 1; - else if (pipe->multi_count > 3) - usbc_hcchar.s.ec = 3; - else - usbc_hcchar.s.ec = pipe->multi_count; - - /* set the rest of the endpoint specific settings */ - usbc_hcchar.s.devaddr = pipe->device_addr; - usbc_hcchar.s.eptype = transaction->type; - usbc_hcchar.s.lspddev = - (pipe->device_speed == cvmx_usb_speed_low); - usbc_hcchar.s.epdir = pipe->transfer_dir; - usbc_hcchar.s.epnum = pipe->endpoint_num; - usbc_hcchar.s.mps = pipe->max_packet; - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hccharx(channel, usb->index), - usbc_hcchar.u32); - } - - /* do transaction type specific fixups as needed */ - switch (transaction->type) { - case cvmx_usb_transfer_control: - cvmx_usb_start_channel_control(usb, channel, pipe); - break; - case cvmx_usb_transfer_bulk: - case cvmx_usb_transfer_interrupt: - break; - case cvmx_usb_transfer_isochronous: - if (!cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * iso transactions require different pids depending on - * direction and how many packets are needed - */ - if (pipe->transfer_dir == cvmx_usb_direction_out) { - if (pipe->multi_count < 2) /* need data0 */ - usb_set_field32( - cvmx_usbcx_hctsizx(channel, - usb->index), - cvmx_usbcx_hctsizx, pid, 0); - else /* need mdata */ - usb_set_field32( - cvmx_usbcx_hctsizx(channel, - usb->index), - cvmx_usbcx_hctsizx, pid, 3); - } - } - break; - } - { - union cvmx_usbcx_hctsizx usbc_hctsiz = { .u32 = - cvmx_usb_read_csr32(usb, - cvmx_usbcx_hctsizx(channel, - usb->index)) - }; - transaction->xfersize = usbc_hctsiz.s.xfersize; - transaction->pktcnt = usbc_hctsiz.s.pktcnt; - } - /* remember when we start a split transaction */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) - usb->active_split = transaction; - usb_set_field32(cvmx_usbcx_hccharx(channel, usb->index), - cvmx_usbcx_hccharx, chena, 1); - if (usb->init_flags & cvmx_usb_initialize_flags_no_dma) - cvmx_usb_fill_tx_fifo(usb, channel); -} - -/** - * find a pipe that is ready to be scheduled to hardware. - * @usb: usb device state populated by cvmx_usb_initialize(). - * @xfer_type: transfer type - * - * returns: pipe or null if none are ready - */ -static struct cvmx_usb_pipe *cvmx_usb_find_ready_pipe(struct octeon_hcd *usb, - enum cvmx_usb_transfer xfer_type) -{ - struct list_head *list = usb->active_pipes + xfer_type; - u64 current_frame = usb->frame_number; - struct cvmx_usb_pipe *pipe; - - list_for_each_entry(pipe, list, node) { - struct cvmx_usb_transaction *t = - list_first_entry(&pipe->transactions, typeof(*t), - node); - if (!(pipe->flags & cvmx_usb_pipe_flags_scheduled) && t && - (pipe->next_tx_frame <= current_frame) && - ((pipe->split_sc_frame == -1) || - ((((int)current_frame - pipe->split_sc_frame) & 0x7f) < - 0x40)) && - (!usb->active_split || (usb->active_split == t))) { - prefetch(t); - return pipe; - } - } - return null; -} - -static struct cvmx_usb_pipe *cvmx_usb_next_pipe(struct octeon_hcd *usb, - int is_sof) -{ - struct cvmx_usb_pipe *pipe; - - /* find a pipe needing service. */ - if (is_sof) { - /* - * only process periodic pipes on sof interrupts. this way we - * are sure that the periodic data is sent in the beginning of - * the frame. - */ - pipe = cvmx_usb_find_ready_pipe(usb, - cvmx_usb_transfer_isochronous); - if (pipe) - return pipe; - pipe = cvmx_usb_find_ready_pipe(usb, - cvmx_usb_transfer_interrupt); - if (pipe) - return pipe; - } - pipe = cvmx_usb_find_ready_pipe(usb, cvmx_usb_transfer_control); - if (pipe) - return pipe; - return cvmx_usb_find_ready_pipe(usb, cvmx_usb_transfer_bulk); -} - -/** - * called whenever a pipe might need to be scheduled to the - * hardware. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @is_sof: true if this schedule was called on a sof interrupt. - */ -static void cvmx_usb_schedule(struct octeon_hcd *usb, int is_sof) -{ - int channel; - struct cvmx_usb_pipe *pipe; - int need_sof; - enum cvmx_usb_transfer ttype; - - if (usb->init_flags & cvmx_usb_initialize_flags_no_dma) { - /* - * without dma we need to be careful to not schedule something - * at the end of a frame and cause an overrun. - */ - union cvmx_usbcx_hfnum hfnum = { - .u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hfnum(usb->index)) - }; - - union cvmx_usbcx_hfir hfir = { - .u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hfir(usb->index)) - }; - - if (hfnum.s.frrem < hfir.s.frint / 4) - goto done; - } - - while (usb->idle_hardware_channels) { - /* find an idle channel */ - channel = __fls(usb->idle_hardware_channels); - if (unlikely(channel > 7)) - break; - - pipe = cvmx_usb_next_pipe(usb, is_sof); - if (!pipe) - break; - - cvmx_usb_start_channel(usb, channel, pipe); - } - -done: - /* - * only enable sof interrupts when we have transactions pending in the - * future that might need to be scheduled - */ - need_sof = 0; - for (ttype = cvmx_usb_transfer_control; - ttype <= cvmx_usb_transfer_interrupt; ttype++) { - list_for_each_entry(pipe, &usb->active_pipes[ttype], node) { - if (pipe->next_tx_frame > usb->frame_number) { - need_sof = 1; - break; - } - } - } - usb_set_field32(cvmx_usbcx_gintmsk(usb->index), - cvmx_usbcx_gintmsk, sofmsk, need_sof); -} - -static void octeon_usb_urb_complete_callback(struct octeon_hcd *usb, - enum cvmx_usb_status status, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction - *transaction, - int bytes_transferred, - struct urb *urb) -{ - struct usb_hcd *hcd = octeon_to_hcd(usb); - struct device *dev = hcd->self.controller; - - if (likely(status == cvmx_usb_status_ok)) - urb->actual_length = bytes_transferred; - else - urb->actual_length = 0; - - urb->hcpriv = null; - - /* for isochronous transactions we need to update the urb packet status - * list from data in our private copy - */ - if (usb_pipetype(urb->pipe) == pipe_isochronous) { - int i; - /* - * the pointer to the private list is stored in the setup_packet - * field. - */ - struct cvmx_usb_iso_packet *iso_packet = - (struct cvmx_usb_iso_packet *)urb->setup_packet; - /* recalculate the transfer size by adding up each packet */ - urb->actual_length = 0; - for (i = 0; i < urb->number_of_packets; i++) { - if (iso_packet[i].status == cvmx_usb_status_ok) { - urb->iso_frame_desc[i].status = 0; - urb->iso_frame_desc[i].actual_length = - iso_packet[i].length; - urb->actual_length += - urb->iso_frame_desc[i].actual_length; - } else { - dev_dbg(dev, "isochronous packet=%d of %d status=%d pipe=%p transaction=%p size=%d ", - i, urb->number_of_packets, - iso_packet[i].status, pipe, - transaction, iso_packet[i].length); - urb->iso_frame_desc[i].status = -eremoteio; - } - } - /* free the private list now that we don't need it anymore */ - kfree(iso_packet); - urb->setup_packet = null; - } - - switch (status) { - case cvmx_usb_status_ok: - urb->status = 0; - break; - case cvmx_usb_status_cancel: - if (urb->status == 0) - urb->status = -enoent; - break; - case cvmx_usb_status_stall: - dev_dbg(dev, "status=stall pipe=%p transaction=%p size=%d ", - pipe, transaction, bytes_transferred); - urb->status = -epipe; - break; - case cvmx_usb_status_babbleerr: - dev_dbg(dev, "status=babble pipe=%p transaction=%p size=%d ", - pipe, transaction, bytes_transferred); - urb->status = -epipe; - break; - case cvmx_usb_status_short: - dev_dbg(dev, "status=short pipe=%p transaction=%p size=%d ", - pipe, transaction, bytes_transferred); - urb->status = -eremoteio; - break; - case cvmx_usb_status_error: - case cvmx_usb_status_xacterr: - case cvmx_usb_status_datatglerr: - case cvmx_usb_status_frameerr: - dev_dbg(dev, "status=%d pipe=%p transaction=%p size=%d ", - status, pipe, transaction, bytes_transferred); - urb->status = -eproto; - break; - } - usb_hcd_unlink_urb_from_ep(octeon_to_hcd(usb), urb); - spin_unlock(&usb->lock); - usb_hcd_giveback_urb(octeon_to_hcd(usb), urb, urb->status); - spin_lock(&usb->lock); -} - -/** - * signal the completion of a transaction and free it. the - * transaction will be removed from the pipe transaction list. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @pipe: pipe the transaction is on - * @transaction: - * transaction that completed - * @complete_code: - * completion code - */ -static void cvmx_usb_complete(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - enum cvmx_usb_status complete_code) -{ - /* if this was a split then clear our split in progress marker */ - if (usb->active_split == transaction) - usb->active_split = null; - - /* - * isochronous transactions need extra processing as they might not be - * done after a single data transfer - */ - if (unlikely(transaction->type == cvmx_usb_transfer_isochronous)) { - /* update the number of bytes transferred in this iso packet */ - transaction->iso_packets[0].length = transaction->actual_bytes; - transaction->iso_packets[0].status = complete_code; - - /* - * if there are more isos pending and we succeeded, schedule the - * next one - */ - if ((transaction->iso_number_packets > 1) && - (complete_code == cvmx_usb_status_ok)) { - /* no bytes transferred for this packet as of yet */ - transaction->actual_bytes = 0; - /* one less iso waiting to transfer */ - transaction->iso_number_packets--; - /* increment to the next location in our packet array */ - transaction->iso_packets++; - transaction->stage = cvmx_usb_stage_non_control; - return; - } - } - - /* remove the transaction from the pipe list */ - list_del(&transaction->node); - if (list_empty(&pipe->transactions)) - list_move_tail(&pipe->node, &usb->idle_pipes); - octeon_usb_urb_complete_callback(usb, complete_code, pipe, - transaction, - transaction->actual_bytes, - transaction->urb); - kfree(transaction); -} - -/** - * submit a usb transaction to a pipe. called for all types - * of transactions. - * - * @usb: - * @pipe: which pipe to submit to. - * @type: transaction type - * @buffer: user buffer for the transaction - * @buffer_length: - * user buffer's length in bytes - * @control_header: - * for control transactions, the 8 byte standard header - * @iso_start_frame: - * for iso transactions, the start frame - * @iso_number_packets: - * for iso, the number of packet in the transaction. - * @iso_packets: - * a description of each iso packet - * @urb: urb for the callback - * - * returns: transaction or null on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_transaction( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - enum cvmx_usb_transfer type, - u64 buffer, - int buffer_length, - u64 control_header, - int iso_start_frame, - int iso_number_packets, - struct cvmx_usb_iso_packet *iso_packets, - struct urb *urb) -{ - struct cvmx_usb_transaction *transaction; - - if (unlikely(pipe->transfer_type != type)) - return null; - - transaction = kzalloc(sizeof(*transaction), gfp_atomic); - if (unlikely(!transaction)) - return null; - - transaction->type = type; - transaction->buffer = buffer; - transaction->buffer_length = buffer_length; - transaction->control_header = control_header; - /* fixme: this is not used, implement it. */ - transaction->iso_start_frame = iso_start_frame; - transaction->iso_number_packets = iso_number_packets; - transaction->iso_packets = iso_packets; - transaction->urb = urb; - if (transaction->type == cvmx_usb_transfer_control) - transaction->stage = cvmx_usb_stage_setup; - else - transaction->stage = cvmx_usb_stage_non_control; - - if (!list_empty(&pipe->transactions)) { - list_add_tail(&transaction->node, &pipe->transactions); - } else { - list_add_tail(&transaction->node, &pipe->transactions); - list_move_tail(&pipe->node, - &usb->active_pipes[pipe->transfer_type]); - - /* - * we may need to schedule the pipe if this was the head of the - * pipe. - */ - cvmx_usb_schedule(usb, 0); - } - - return transaction; -} - -/** - * call to submit a usb bulk transfer to a pipe. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @pipe: handle to the pipe for the transfer. - * @urb: urb. - * - * returns: a submitted transaction or null on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_bulk( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct urb *urb) -{ - return cvmx_usb_submit_transaction(usb, pipe, cvmx_usb_transfer_bulk, - urb->transfer_dma, - urb->transfer_buffer_length, - 0, /* control_header */ - 0, /* iso_start_frame */ - 0, /* iso_number_packets */ - null, /* iso_packets */ - urb); -} - -/** - * call to submit a usb interrupt transfer to a pipe. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @pipe: handle to the pipe for the transfer. - * @urb: urb returned when the callback is called. - * - * returns: a submitted transaction or null on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_interrupt( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct urb *urb) -{ - return cvmx_usb_submit_transaction(usb, pipe, - cvmx_usb_transfer_interrupt, - urb->transfer_dma, - urb->transfer_buffer_length, - 0, /* control_header */ - 0, /* iso_start_frame */ - 0, /* iso_number_packets */ - null, /* iso_packets */ - urb); -} - -/** - * call to submit a usb control transfer to a pipe. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @pipe: handle to the pipe for the transfer. - * @urb: urb. - * - * returns: a submitted transaction or null on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_control( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct urb *urb) -{ - int buffer_length = urb->transfer_buffer_length; - u64 control_header = urb->setup_dma; - struct usb_ctrlrequest *header = cvmx_phys_to_ptr(control_header); - - if ((header->brequesttype & usb_dir_in) == 0) - buffer_length = le16_to_cpu(header->wlength); - - return cvmx_usb_submit_transaction(usb, pipe, - cvmx_usb_transfer_control, - urb->transfer_dma, buffer_length, - control_header, - 0, /* iso_start_frame */ - 0, /* iso_number_packets */ - null, /* iso_packets */ - urb); -} - -/** - * call to submit a usb isochronous transfer to a pipe. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @pipe: handle to the pipe for the transfer. - * @urb: urb returned when the callback is called. - * - * returns: a submitted transaction or null on failure. - */ -static struct cvmx_usb_transaction *cvmx_usb_submit_isochronous( - struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct urb *urb) -{ - struct cvmx_usb_iso_packet *packets; - - packets = (struct cvmx_usb_iso_packet *)urb->setup_packet; - return cvmx_usb_submit_transaction(usb, pipe, - cvmx_usb_transfer_isochronous, - urb->transfer_dma, - urb->transfer_buffer_length, - 0, /* control_header */ - urb->start_frame, - urb->number_of_packets, - packets, urb); -} - -/** - * cancel one outstanding request in a pipe. canceling a request - * can fail if the transaction has already completed before cancel - * is called. even after a successful cancel call, it may take - * a frame or two for the cvmx_usb_poll() function to call the - * associated callback. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @pipe: pipe to cancel requests in. - * @transaction: transaction to cancel, returned by the submit function. - * - * returns: 0 or a negative error code. - */ -static int cvmx_usb_cancel(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction) -{ - /* - * if the transaction is the head of the queue and scheduled. we need to - * treat it special - */ - if (list_first_entry(&pipe->transactions, typeof(*transaction), node) == - transaction && (pipe->flags & cvmx_usb_pipe_flags_scheduled)) { - union cvmx_usbcx_hccharx usbc_hcchar; - - usb->pipe_for_channel[pipe->channel] = null; - pipe->flags &= ~cvmx_usb_pipe_flags_scheduled; - - cvmx_syncw; - - usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hccharx(pipe->channel, usb->index)); - /* - * if the channel isn't enabled then the transaction already - * completed. - */ - if (usbc_hcchar.s.chena) { - usbc_hcchar.s.chdis = 1; - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hccharx(pipe->channel, - usb->index), - usbc_hcchar.u32); - } - } - cvmx_usb_complete(usb, pipe, transaction, cvmx_usb_status_cancel); - return 0; -} - -/** - * cancel all outstanding requests in a pipe. logically all this - * does is call cvmx_usb_cancel() in a loop. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @pipe: pipe to cancel requests in. - * - * returns: 0 or a negative error code. - */ -static int cvmx_usb_cancel_all(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe) -{ - struct cvmx_usb_transaction *transaction, *next; - - /* simply loop through and attempt to cancel each transaction */ - list_for_each_entry_safe(transaction, next, &pipe->transactions, node) { - int result = cvmx_usb_cancel(usb, pipe, transaction); - - if (unlikely(result != 0)) - return result; - } - return 0; -} - -/** - * close a pipe created with cvmx_usb_open_pipe(). - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * @pipe: pipe to close. - * - * returns: 0 or a negative error code. ebusy is returned if the pipe has - * outstanding transfers. - */ -static int cvmx_usb_close_pipe(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe) -{ - /* fail if the pipe has pending transactions */ - if (!list_empty(&pipe->transactions)) - return -ebusy; - - list_del(&pipe->node); - kfree(pipe); - - return 0; -} - -/** - * get the current usb protocol level frame number. the frame - * number is always in the range of 0-0x7ff. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * - * returns: usb frame number - */ -static int cvmx_usb_get_frame_number(struct octeon_hcd *usb) -{ - union cvmx_usbcx_hfnum usbc_hfnum; - - usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, cvmx_usbcx_hfnum(usb->index)); - - return usbc_hfnum.s.frnum; -} - -static void cvmx_usb_transfer_control(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - union cvmx_usbcx_hccharx usbc_hcchar, - int buffer_space_left, - int bytes_in_last_packet) -{ - switch (transaction->stage) { - case cvmx_usb_stage_non_control: - case cvmx_usb_stage_non_control_split_complete: - /* this should be impossible */ - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_error); - break; - case cvmx_usb_stage_setup: - pipe->pid_toggle = 1; - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - transaction->stage = - cvmx_usb_stage_setup_split_complete; - } else { - struct usb_ctrlrequest *header = - cvmx_phys_to_ptr(transaction->control_header); - if (header->wlength) - transaction->stage = cvmx_usb_stage_data; - else - transaction->stage = cvmx_usb_stage_status; - } - break; - case cvmx_usb_stage_setup_split_complete: - { - struct usb_ctrlrequest *header = - cvmx_phys_to_ptr(transaction->control_header); - if (header->wlength) - transaction->stage = cvmx_usb_stage_data; - else - transaction->stage = cvmx_usb_stage_status; - } - break; - case cvmx_usb_stage_data: - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - transaction->stage = cvmx_usb_stage_data_split_complete; - /* - * for setup out data that are splits, - * the hardware doesn't appear to count - * transferred data. here we manually - * update the data transferred - */ - if (!usbc_hcchar.s.epdir) { - if (buffer_space_left < pipe->max_packet) - transaction->actual_bytes += - buffer_space_left; - else - transaction->actual_bytes += - pipe->max_packet; - } - } else if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) { - pipe->pid_toggle = 1; - transaction->stage = cvmx_usb_stage_status; - } - break; - case cvmx_usb_stage_data_split_complete: - if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) { - pipe->pid_toggle = 1; - transaction->stage = cvmx_usb_stage_status; - } else { - transaction->stage = cvmx_usb_stage_data; - } - break; - case cvmx_usb_stage_status: - if (cvmx_usb_pipe_needs_split(usb, pipe)) - transaction->stage = - cvmx_usb_stage_status_split_complete; - else - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_ok); - break; - case cvmx_usb_stage_status_split_complete: - cvmx_usb_complete(usb, pipe, transaction, cvmx_usb_status_ok); - break; - } -} - -static void cvmx_usb_transfer_bulk(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - union cvmx_usbcx_hcintx usbc_hcint, - int buffer_space_left, - int bytes_in_last_packet) -{ - /* - * the only time a bulk transfer isn't complete when it finishes with - * an ack is during a split transaction. for splits we need to continue - * the transfer if more data is needed. - */ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - if (transaction->stage == cvmx_usb_stage_non_control) - transaction->stage = - cvmx_usb_stage_non_control_split_complete; - else if (buffer_space_left && - (bytes_in_last_packet == pipe->max_packet)) - transaction->stage = cvmx_usb_stage_non_control; - else - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_ok); - } else { - if ((pipe->device_speed == cvmx_usb_speed_high) && - (pipe->transfer_dir == cvmx_usb_direction_out) && - (usbc_hcint.s.nak)) - pipe->flags |= cvmx_usb_pipe_flags_need_ping; - if (!buffer_space_left || - (bytes_in_last_packet < pipe->max_packet)) - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_ok); - } -} - -static void cvmx_usb_transfer_intr(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - int buffer_space_left, - int bytes_in_last_packet) -{ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - if (transaction->stage == cvmx_usb_stage_non_control) { - transaction->stage = - cvmx_usb_stage_non_control_split_complete; - } else if (buffer_space_left && - (bytes_in_last_packet == pipe->max_packet)) { - transaction->stage = cvmx_usb_stage_non_control; - } else { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_ok); - } - } else if (!buffer_space_left || - (bytes_in_last_packet < pipe->max_packet)) { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, cvmx_usb_status_ok); - } -} - -static void cvmx_usb_transfer_isoc(struct octeon_hcd *usb, - struct cvmx_usb_pipe *pipe, - struct cvmx_usb_transaction *transaction, - int buffer_space_left, - int bytes_in_last_packet, - int bytes_this_transfer) -{ - if (cvmx_usb_pipe_needs_split(usb, pipe)) { - /* - * isochronous out splits don't require a complete split stage. - * instead they use a sequence of begin out splits to transfer - * the data 188 bytes at a time. once the transfer is complete, - * the pipe sleeps until the next schedule interval. - */ - if (pipe->transfer_dir == cvmx_usb_direction_out) { - /* - * if no space left or this wasn't a max size packet - * then this transfer is complete. otherwise start it - * again to send the next 188 bytes - */ - if (!buffer_space_left || (bytes_this_transfer < 188)) { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_ok); - } - return; - } - if (transaction->stage == - cvmx_usb_stage_non_control_split_complete) { - /* - * we are in the incoming data phase. keep getting data - * until we run out of space or get a small packet - */ - if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_ok); - } - } else { - transaction->stage = - cvmx_usb_stage_non_control_split_complete; - } - } else { - pipe->next_tx_frame += pipe->interval; - cvmx_usb_complete(usb, pipe, transaction, cvmx_usb_status_ok); - } -} - -/** - * poll a channel for status - * - * @usb: usb device - * @channel: channel to poll - * - * returns: zero on success - */ -static int cvmx_usb_poll_channel(struct octeon_hcd *usb, int channel) -{ - struct usb_hcd *hcd = octeon_to_hcd(usb); - struct device *dev = hcd->self.controller; - union cvmx_usbcx_hcintx usbc_hcint; - union cvmx_usbcx_hctsizx usbc_hctsiz; - union cvmx_usbcx_hccharx usbc_hcchar; - struct cvmx_usb_pipe *pipe; - struct cvmx_usb_transaction *transaction; - int bytes_this_transfer; - int bytes_in_last_packet; - int packets_processed; - int buffer_space_left; - - /* read the interrupt status bits for the channel */ - usbc_hcint.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hcintx(channel, usb->index)); - - if (usb->init_flags & cvmx_usb_initialize_flags_no_dma) { - usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hccharx(channel, usb->index)); - - if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) { - /* - * there seems to be a bug in cn31xx which can cause - * interrupt in transfers to get stuck until we do a - * write of hccharx without changing things - */ - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hccharx(channel, - usb->index), - usbc_hcchar.u32); - return 0; - } - - /* - * in non dma mode the channels don't halt themselves. we need - * to manually disable channels that are left running - */ - if (!usbc_hcint.s.chhltd) { - if (usbc_hcchar.s.chena) { - union cvmx_usbcx_hcintmskx hcintmsk; - /* disable all interrupts except chhltd */ - hcintmsk.u32 = 0; - hcintmsk.s.chhltdmsk = 1; - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hcintmskx(channel, usb->index), - hcintmsk.u32); - usbc_hcchar.s.chdis = 1; - cvmx_usb_write_csr32(usb, - cvmx_usbcx_hccharx(channel, usb->index), - usbc_hcchar.u32); - return 0; - } else if (usbc_hcint.s.xfercompl) { - /* - * successful in/out with transfer complete. - * channel halt isn't needed. - */ - } else { - dev_err(dev, "usb%d: channel %d interrupt without halt ", - usb->index, channel); - return 0; - } - } - } else { - /* - * there is are no interrupts that we need to process when the - * channel is still running - */ - if (!usbc_hcint.s.chhltd) - return 0; - } - - /* disable the channel interrupts now that it is done */ - cvmx_usb_write_csr32(usb, cvmx_usbcx_hcintmskx(channel, usb->index), 0); - usb->idle_hardware_channels |= (1 << channel); - - /* make sure this channel is tied to a valid pipe */ - pipe = usb->pipe_for_channel[channel]; - prefetch(pipe); - if (!pipe) - return 0; - transaction = list_first_entry(&pipe->transactions, - typeof(*transaction), - node); - prefetch(transaction); - - /* - * disconnect this pipe from the hw channel. later the schedule - * function will figure out which pipe needs to go - */ - usb->pipe_for_channel[channel] = null; - pipe->flags &= ~cvmx_usb_pipe_flags_scheduled; - - /* - * read the channel config info so we can figure out how much data - * transferred - */ - usbc_hcchar.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hccharx(channel, usb->index)); - usbc_hctsiz.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_hctsizx(channel, usb->index)); - - /* - * calculating the number of bytes successfully transferred is dependent - * on the transfer direction - */ - packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt; - if (usbc_hcchar.s.epdir) { - /* - * in transactions are easy. for every byte received the - * hardware decrements xfersize. all we need to do is subtract - * the current value of xfersize from its starting value and we - * know how many bytes were written to the buffer - */ - bytes_this_transfer = transaction->xfersize - - usbc_hctsiz.s.xfersize; - } else { - /* - * out transaction don't decrement xfersize. instead pktcnt is - * decremented on every successful packet send. the hardware - * does this when it receives an ack, or nyet. if it doesn't - * receive one of these responses pktcnt doesn't change - */ - bytes_this_transfer = packets_processed * usbc_hcchar.s.mps; - /* - * the last packet may not be a full transfer if we didn't have - * enough data - */ - if (bytes_this_transfer > transaction->xfersize) - bytes_this_transfer = transaction->xfersize; - } - /* figure out how many bytes were in the last packet of the transfer */ - if (packets_processed) - bytes_in_last_packet = bytes_this_transfer - - (packets_processed - 1) * usbc_hcchar.s.mps; - else - bytes_in_last_packet = bytes_this_transfer; - - /* - * as a special case, setup transactions output the setup header, not - * the user's data. for this reason we don't count setup data as bytes - * transferred - */ - if ((transaction->stage == cvmx_usb_stage_setup) || - (transaction->stage == cvmx_usb_stage_setup_split_complete)) - bytes_this_transfer = 0; - - /* - * add the bytes transferred to the running total. it is important that - * bytes_this_transfer doesn't count any data that needs to be - * retransmitted - */ - transaction->actual_bytes += bytes_this_transfer; - if (transaction->type == cvmx_usb_transfer_isochronous) - buffer_space_left = transaction->iso_packets[0].length - - transaction->actual_bytes; - else - buffer_space_left = transaction->buffer_length - - transaction->actual_bytes; - - /* - * we need to remember the pid toggle state for the next transaction. - * the hardware already updated it for the next transaction - */ - pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0); - - /* - * for high speed bulk out, assume the next transaction will need to do - * a ping before proceeding. if this isn't true the ack processing below - * will clear this flag - */ - if ((pipe->device_speed == cvmx_usb_speed_high) && - (pipe->transfer_type == cvmx_usb_transfer_bulk) && - (pipe->transfer_dir == cvmx_usb_direction_out)) - pipe->flags |= cvmx_usb_pipe_flags_need_ping; - - if (warn_on_once(bytes_this_transfer < 0)) { - /* - * in some rare cases the dma engine seems to get stuck and - * keeps substracting same byte count over and over again. in - * such case we just need to fail every transaction. - */ - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_error); - return 0; - } - - if (usbc_hcint.s.stall) { - /* - * stall as a response means this transaction cannot be - * completed because the device can't process transactions. tell - * the user. any data that was transferred will be counted on - * the actual bytes transferred - */ - pipe->pid_toggle = 0; - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_stall); - } else if (usbc_hcint.s.xacterr) { - /* - * xacterr as a response means the device signaled - * something wrong with the transfer. for example, pid - * toggle errors cause these. - */ - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_xacterr); - } else if (usbc_hcint.s.bblerr) { - /* babble error (bblerr) */ - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_babbleerr); - } else if (usbc_hcint.s.datatglerr) { - /* data toggle error */ - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_datatglerr); - } else if (usbc_hcint.s.nyet) { - /* - * nyet as a response is only allowed in three cases: as a - * response to a ping, as a response to a split transaction, and - * as a response to a bulk out. the ping case is handled by - * hardware, so we only have splits and bulk out - */ - if (!cvmx_usb_pipe_needs_split(usb, pipe)) { - transaction->retries = 0; - /* - * if there is more data to go then we need to try - * again. otherwise this transaction is complete - */ - if ((buffer_space_left == 0) || - (bytes_in_last_packet < pipe->max_packet)) - cvmx_usb_complete(usb, pipe, - transaction, - cvmx_usb_status_ok); - } else { - /* - * split transactions retry the split complete 4 times - * then rewind to the start split and do the entire - * transactions again - */ - transaction->retries++; - if ((transaction->retries & 0x3) == 0) { - /* - * rewind to the beginning of the transaction by - * anding off the split complete bit - */ - transaction->stage &= ~1; - pipe->split_sc_frame = -1; - } - } - } else if (usbc_hcint.s.ack) { - transaction->retries = 0; - /* - * the ack bit can only be checked after the other error bits. - * this is because a multi packet transfer may succeed in a - * number of packets and then get a different response on the - * last packet. in this case both ack and the last response bit - * will be set. if none of the other response bits is set, then - * the last packet must have been an ack - * - * since we got an ack, we know we don't need to do a ping on - * this pipe - */ - pipe->flags &= ~cvmx_usb_pipe_flags_need_ping; - - switch (transaction->type) { - case cvmx_usb_transfer_control: - cvmx_usb_transfer_control(usb, pipe, transaction, - usbc_hcchar, - buffer_space_left, - bytes_in_last_packet); - break; - case cvmx_usb_transfer_bulk: - cvmx_usb_transfer_bulk(usb, pipe, transaction, - usbc_hcint, buffer_space_left, - bytes_in_last_packet); - break; - case cvmx_usb_transfer_interrupt: - cvmx_usb_transfer_intr(usb, pipe, transaction, - buffer_space_left, - bytes_in_last_packet); - break; - case cvmx_usb_transfer_isochronous: - cvmx_usb_transfer_isoc(usb, pipe, transaction, - buffer_space_left, - bytes_in_last_packet, - bytes_this_transfer); - break; - } - } else if (usbc_hcint.s.nak) { - /* - * if this was a split then clear our split in progress marker. - */ - if (usb->active_split == transaction) - usb->active_split = null; - /* - * nak as a response means the device couldn't accept the - * transaction, but it should be retried in the future. rewind - * to the beginning of the transaction by anding off the split - * complete bit. retry in the next interval - */ - transaction->retries = 0; - transaction->stage &= ~1; - pipe->next_tx_frame += pipe->interval; - if (pipe->next_tx_frame < usb->frame_number) - pipe->next_tx_frame = usb->frame_number + - pipe->interval - - (usb->frame_number - pipe->next_tx_frame) % - pipe->interval; - } else { - struct cvmx_usb_port_status port; - - port = cvmx_usb_get_status(usb); - if (port.port_enabled) { - /* we'll retry the exact same transaction again */ - transaction->retries++; - } else { - /* - * we get channel halted interrupts with no result bits - * sets when the cable is unplugged - */ - cvmx_usb_complete(usb, pipe, transaction, - cvmx_usb_status_error); - } - } - return 0; -} - -static void octeon_usb_port_callback(struct octeon_hcd *usb) -{ - spin_unlock(&usb->lock); - usb_hcd_poll_rh_status(octeon_to_hcd(usb)); - spin_lock(&usb->lock); -} - -/** - * poll the usb block for status and call all needed callback - * handlers. this function is meant to be called in the interrupt - * handler for the usb controller. it can also be called - * periodically in a loop for non-interrupt based operation. - * - * @usb: usb device state populated by cvmx_usb_initialize(). - * - * returns: 0 or a negative error code. - */ -static int cvmx_usb_poll(struct octeon_hcd *usb) -{ - union cvmx_usbcx_hfnum usbc_hfnum; - union cvmx_usbcx_gintsts usbc_gintsts; - - prefetch_range(usb, sizeof(*usb)); - - /* update the frame counter */ - usbc_hfnum.u32 = cvmx_usb_read_csr32(usb, cvmx_usbcx_hfnum(usb->index)); - if ((usb->frame_number & 0x3fff) > usbc_hfnum.s.frnum) - usb->frame_number += 0x4000; - usb->frame_number &= ~0x3fffull; - usb->frame_number |= usbc_hfnum.s.frnum; - - /* read the pending interrupts */ - usbc_gintsts.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_gintsts(usb->index)); - - /* clear the interrupts now that we know about them */ - cvmx_usb_write_csr32(usb, cvmx_usbcx_gintsts(usb->index), - usbc_gintsts.u32); - - if (usbc_gintsts.s.rxflvl) { - /* - * rxfifo non-empty (rxflvl) - * indicates that there is at least one packet pending to be - * read from the rxfifo. - * - * in dma mode this is handled by hardware - */ - if (usb->init_flags & cvmx_usb_initialize_flags_no_dma) - cvmx_usb_poll_rx_fifo(usb); - } - if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) { - /* fill the tx fifos when not in dma mode */ - if (usb->init_flags & cvmx_usb_initialize_flags_no_dma) - cvmx_usb_poll_tx_fifo(usb); - } - if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) { - union cvmx_usbcx_hprt usbc_hprt; - /* - * disconnect detected interrupt (disconnint) - * asserted when a device disconnect is detected. - * - * host port interrupt (prtint) - * the core sets this bit to indicate a change in port status of - * one of the o2p usb core ports in host mode. the application - * must read the host port control and status (hprt) register to - * determine the exact event that caused this interrupt. the - * application must clear the appropriate status bit in the host - * port control and status register to clear this bit. - * - * call the user's port callback - */ - octeon_usb_port_callback(usb); - /* clear the port change bits */ - usbc_hprt.u32 = - cvmx_usb_read_csr32(usb, cvmx_usbcx_hprt(usb->index)); - usbc_hprt.s.prtena = 0; - cvmx_usb_write_csr32(usb, cvmx_usbcx_hprt(usb->index), - usbc_hprt.u32); - } - if (usbc_gintsts.s.hchint) { - /* - * host channels interrupt (hchint) - * the core sets this bit to indicate that an interrupt is - * pending on one of the channels of the core (in host mode). - * the application must read the host all channels interrupt - * (haint) register to determine the exact number of the channel - * on which the interrupt occurred, and then read the - * corresponding host channel-n interrupt (hcintn) register to - * determine the exact cause of the interrupt. the application - * must clear the appropriate status bit in the hcintn register - * to clear this bit. - */ - union cvmx_usbcx_haint usbc_haint; - - usbc_haint.u32 = cvmx_usb_read_csr32(usb, - cvmx_usbcx_haint(usb->index)); - while (usbc_haint.u32) { - int channel; - - channel = __fls(usbc_haint.u32); - cvmx_usb_poll_channel(usb, channel); - usbc_haint.u32 ^= 1 << channel; - } - } - - cvmx_usb_schedule(usb, usbc_gintsts.s.sof); - - return 0; -} - -/* convert between an hcd pointer and the corresponding struct octeon_hcd */ -static inline struct octeon_hcd *hcd_to_octeon(struct usb_hcd *hcd) -{ - return (struct octeon_hcd *)(hcd->hcd_priv); -} - -static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - unsigned long flags; - - spin_lock_irqsave(&usb->lock, flags); - cvmx_usb_poll(usb); - spin_unlock_irqrestore(&usb->lock, flags); - return irq_handled; -} - -static int octeon_usb_start(struct usb_hcd *hcd) -{ - hcd->state = hc_state_running; - return 0; -} - -static void octeon_usb_stop(struct usb_hcd *hcd) -{ - hcd->state = hc_state_halt; -} - -static int octeon_usb_get_frame_number(struct usb_hcd *hcd) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - - return cvmx_usb_get_frame_number(usb); -} - -static int octeon_usb_urb_enqueue(struct usb_hcd *hcd, - struct urb *urb, - gfp_t mem_flags) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - struct device *dev = hcd->self.controller; - struct cvmx_usb_transaction *transaction = null; - struct cvmx_usb_pipe *pipe; - unsigned long flags; - struct cvmx_usb_iso_packet *iso_packet; - struct usb_host_endpoint *ep = urb->ep; - int rc; - - urb->status = 0; - spin_lock_irqsave(&usb->lock, flags); - - rc = usb_hcd_link_urb_to_ep(hcd, urb); - if (rc) { - spin_unlock_irqrestore(&usb->lock, flags); - return rc; - } - - if (!ep->hcpriv) { - enum cvmx_usb_transfer transfer_type; - enum cvmx_usb_speed speed; - int split_device = 0; - int split_port = 0; - - switch (usb_pipetype(urb->pipe)) { - case pipe_isochronous: - transfer_type = cvmx_usb_transfer_isochronous; - break; - case pipe_interrupt: - transfer_type = cvmx_usb_transfer_interrupt; - break; - case pipe_control: - transfer_type = cvmx_usb_transfer_control; - break; - default: - transfer_type = cvmx_usb_transfer_bulk; - break; - } - switch (urb->dev->speed) { - case usb_speed_low: - speed = cvmx_usb_speed_low; - break; - case usb_speed_full: - speed = cvmx_usb_speed_full; - break; - default: - speed = cvmx_usb_speed_high; - break; - } - /* - * for slow devices on high speed ports we need to find the hub - * that does the speed translation so we know where to send the - * split transactions. - */ - if (speed != cvmx_usb_speed_high) { - /* - * start at this device and work our way up the usb - * tree. - */ - struct usb_device *dev = urb->dev; - - while (dev->parent) { - /* - * if our parent is high speed then he'll - * receive the splits. - */ - if (dev->parent->speed == usb_speed_high) { - split_device = dev->parent->devnum; - split_port = dev->portnum; - break; - } - /* - * move up the tree one level. if we make it all - * the way up the tree, then the port must not - * be in high speed mode and we don't need a - * split. - */ - dev = dev->parent; - } - } - pipe = cvmx_usb_open_pipe(usb, usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe), speed, - le16_to_cpu(ep->desc.wmaxpacketsize) - & 0x7ff, - transfer_type, - usb_pipein(urb->pipe) ? - cvmx_usb_direction_in : - cvmx_usb_direction_out, - urb->interval, - (le16_to_cpu(ep->desc.wmaxpacketsize) - >> 11) & 0x3, - split_device, split_port); - if (!pipe) { - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&usb->lock, flags); - dev_dbg(dev, "failed to create pipe "); - return -enomem; - } - ep->hcpriv = pipe; - } else { - pipe = ep->hcpriv; - } - - switch (usb_pipetype(urb->pipe)) { - case pipe_isochronous: - dev_dbg(dev, "submit isochronous to %d.%d ", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe)); - /* - * allocate a structure to use for our private list of - * isochronous packets. - */ - iso_packet = kmalloc_array(urb->number_of_packets, - sizeof(struct cvmx_usb_iso_packet), - gfp_atomic); - if (iso_packet) { - int i; - /* fill the list with the data from the urb */ - for (i = 0; i < urb->number_of_packets; i++) { - iso_packet[i].offset = - urb->iso_frame_desc[i].offset; - iso_packet[i].length = - urb->iso_frame_desc[i].length; - iso_packet[i].status = cvmx_usb_status_error; - } - /* - * store a pointer to the list in the urb setup_packet - * field. we know this currently isn't being used and - * this saves us a bunch of logic. - */ - urb->setup_packet = (char *)iso_packet; - transaction = cvmx_usb_submit_isochronous(usb, - pipe, urb); - /* - * if submit failed we need to free our private packet - * list. - */ - if (!transaction) { - urb->setup_packet = null; - kfree(iso_packet); - } - } - break; - case pipe_interrupt: - dev_dbg(dev, "submit interrupt to %d.%d ", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_interrupt(usb, pipe, urb); - break; - case pipe_control: - dev_dbg(dev, "submit control to %d.%d ", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_control(usb, pipe, urb); - break; - case pipe_bulk: - dev_dbg(dev, "submit bulk to %d.%d ", - usb_pipedevice(urb->pipe), - usb_pipeendpoint(urb->pipe)); - transaction = cvmx_usb_submit_bulk(usb, pipe, urb); - break; - } - if (!transaction) { - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&usb->lock, flags); - dev_dbg(dev, "failed to submit "); - return -enomem; - } - urb->hcpriv = transaction; - spin_unlock_irqrestore(&usb->lock, flags); - return 0; -} - -static int octeon_usb_urb_dequeue(struct usb_hcd *hcd, - struct urb *urb, - int status) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - unsigned long flags; - int rc; - - if (!urb->dev) - return -einval; - - spin_lock_irqsave(&usb->lock, flags); - - rc = usb_hcd_check_unlink_urb(hcd, urb, status); - if (rc) - goto out; - - urb->status = status; - cvmx_usb_cancel(usb, urb->ep->hcpriv, urb->hcpriv); - -out: - spin_unlock_irqrestore(&usb->lock, flags); - - return rc; -} - -static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, - struct usb_host_endpoint *ep) -{ - struct device *dev = hcd->self.controller; - - if (ep->hcpriv) { - struct octeon_hcd *usb = hcd_to_octeon(hcd); - struct cvmx_usb_pipe *pipe = ep->hcpriv; - unsigned long flags; - - spin_lock_irqsave(&usb->lock, flags); - cvmx_usb_cancel_all(usb, pipe); - if (cvmx_usb_close_pipe(usb, pipe)) - dev_dbg(dev, "closing pipe %p failed ", pipe); - spin_unlock_irqrestore(&usb->lock, flags); - ep->hcpriv = null; - } -} - -static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - struct cvmx_usb_port_status port_status; - unsigned long flags; - - spin_lock_irqsave(&usb->lock, flags); - port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - buf[0] = port_status.connect_change << 1; - - return buf[0] != 0; -} - -static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue, - u16 windex, char *buf, u16 wlength) -{ - struct octeon_hcd *usb = hcd_to_octeon(hcd); - struct device *dev = hcd->self.controller; - struct cvmx_usb_port_status usb_port_status; - int port_status; - struct usb_hub_descriptor *desc; - unsigned long flags; - - switch (typereq) { - case clearhubfeature: - dev_dbg(dev, "clearhubfeature "); - switch (wvalue) { - case c_hub_local_power: - case c_hub_over_current: - /* nothing required here */ - break; - default: - return -einval; - } - break; - case clearportfeature: - dev_dbg(dev, "clearportfeature "); - if (windex != 1) { - dev_dbg(dev, " invalid "); - return -einval; - } - - switch (wvalue) { - case usb_port_feat_enable: - dev_dbg(dev, " enable "); - spin_lock_irqsave(&usb->lock, flags); - cvmx_usb_disable(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - case usb_port_feat_suspend: - dev_dbg(dev, " suspend "); - /* not supported on octeon */ - break; - case usb_port_feat_power: - dev_dbg(dev, " power "); - /* not supported on octeon */ - break; - case usb_port_feat_indicator: - dev_dbg(dev, " indicator "); - /* port inidicator not supported */ - break; - case usb_port_feat_c_connection: - dev_dbg(dev, " c_connection "); - /* clears drivers internal connect status change flag */ - spin_lock_irqsave(&usb->lock, flags); - usb->port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - case usb_port_feat_c_reset: - dev_dbg(dev, " c_reset "); - /* - * clears the driver's internal port reset change flag. - */ - spin_lock_irqsave(&usb->lock, flags); - usb->port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - case usb_port_feat_c_enable: - dev_dbg(dev, " c_enable "); - /* - * clears the driver's internal port enable/disable - * change flag. - */ - spin_lock_irqsave(&usb->lock, flags); - usb->port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - case usb_port_feat_c_suspend: - dev_dbg(dev, " c_suspend "); - /* - * clears the driver's internal port suspend change - * flag, which is set when resume signaling on the host - * port is complete. - */ - break; - case usb_port_feat_c_over_current: - dev_dbg(dev, " c_over_current "); - /* clears the driver's overcurrent change flag */ - spin_lock_irqsave(&usb->lock, flags); - usb->port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - break; - default: - dev_dbg(dev, " unknown "); - return -einval; - } - break; - case gethubdescriptor: - dev_dbg(dev, "gethubdescriptor "); - desc = (struct usb_hub_descriptor *)buf; - desc->bdesclength = 9; - desc->bdescriptortype = 0x29; - desc->bnbrports = 1; - desc->whubcharacteristics = cpu_to_le16(0x08); - desc->bpwron2pwrgood = 1; - desc->bhubcontrcurrent = 0; - desc->u.hs.deviceremovable[0] = 0; - desc->u.hs.deviceremovable[1] = 0xff; - break; - case gethubstatus: - dev_dbg(dev, "gethubstatus "); - *(__le32 *)buf = 0; - break; - case getportstatus: - dev_dbg(dev, "getportstatus "); - if (windex != 1) { - dev_dbg(dev, " invalid "); - return -einval; - } - - spin_lock_irqsave(&usb->lock, flags); - usb_port_status = cvmx_usb_get_status(usb); - spin_unlock_irqrestore(&usb->lock, flags); - port_status = 0; - - if (usb_port_status.connect_change) { - port_status |= (1 << usb_port_feat_c_connection); - dev_dbg(dev, " c_connection "); - } - - if (usb_port_status.port_enabled) { - port_status |= (1 << usb_port_feat_c_enable); - dev_dbg(dev, " c_enable "); - } - - if (usb_port_status.connected) { - port_status |= (1 << usb_port_feat_connection); - dev_dbg(dev, " connection "); - } - - if (usb_port_status.port_enabled) { - port_status |= (1 << usb_port_feat_enable); - dev_dbg(dev, " enable "); - } - - if (usb_port_status.port_over_current) { - port_status |= (1 << usb_port_feat_over_current); - dev_dbg(dev, " over_current "); - } - - if (usb_port_status.port_powered) { - port_status |= (1 << usb_port_feat_power); - dev_dbg(dev, " power "); - } - - if (usb_port_status.port_speed == cvmx_usb_speed_high) { - port_status |= usb_port_stat_high_speed; - dev_dbg(dev, " highspeed "); - } else if (usb_port_status.port_speed == cvmx_usb_speed_low) { - port_status |= (1 << usb_port_feat_lowspeed); - dev_dbg(dev, " lowspeed "); - } - - *((__le32 *)buf) = cpu_to_le32(port_status); - break; - case sethubfeature: - dev_dbg(dev, "sethubfeature "); - /* no hub features supported */ - break; - case setportfeature: - dev_dbg(dev, "setportfeature "); - if (windex != 1) { - dev_dbg(dev, " invalid "); - return -einval; - } - - switch (wvalue) { - case usb_port_feat_suspend: - dev_dbg(dev, " suspend "); - return -einval; - case usb_port_feat_power: - dev_dbg(dev, " power "); - /* - * program the port power bit to drive vbus on the usb. - */ - spin_lock_irqsave(&usb->lock, flags); - usb_set_field32(cvmx_usbcx_hprt(usb->index), - cvmx_usbcx_hprt, prtpwr, 1); - spin_unlock_irqrestore(&usb->lock, flags); - return 0; - case usb_port_feat_reset: - dev_dbg(dev, " reset "); - spin_lock_irqsave(&usb->lock, flags); - cvmx_usb_reset_port(usb); - spin_unlock_irqrestore(&usb->lock, flags); - return 0; - case usb_port_feat_indicator: - dev_dbg(dev, " indicator "); - /* not supported */ - break; - default: - dev_dbg(dev, " unknown "); - return -einval; - } - break; - default: - dev_dbg(dev, "unknown root hub request "); - return -einval; - } - return 0; -} - -static const struct hc_driver octeon_hc_driver = { - .description = "octeon usb", - .product_desc = "octeon host controller", - .hcd_priv_size = sizeof(struct octeon_hcd), - .irq = octeon_usb_irq, - .flags = hcd_memory | hcd_dma | hcd_usb2, - .start = octeon_usb_start, - .stop = octeon_usb_stop, - .urb_enqueue = octeon_usb_urb_enqueue, - .urb_dequeue = octeon_usb_urb_dequeue, - .endpoint_disable = octeon_usb_endpoint_disable, - .get_frame_number = octeon_usb_get_frame_number, - .hub_status_data = octeon_usb_hub_status_data, - .hub_control = octeon_usb_hub_control, - .map_urb_for_dma = octeon_map_urb_for_dma, - .unmap_urb_for_dma = octeon_unmap_urb_for_dma, -}; - -static int octeon_usb_probe(struct platform_device *pdev) -{ - int status; - int initialize_flags; - int usb_num; - struct resource *res_mem; - struct device_node *usbn_node; - int irq = platform_get_irq(pdev, 0); - struct device *dev = &pdev->dev; - struct octeon_hcd *usb; - struct usb_hcd *hcd; - u32 clock_rate = 48000000; - bool is_crystal_clock = false; - const char *clock_type; - int i; - - if (!dev->of_node) { - dev_err(dev, "error: empty of_node "); - return -enxio; - } - usbn_node = dev->of_node->parent; - - i = of_property_read_u32(usbn_node, - "clock-frequency", &clock_rate); - if (i) - i = of_property_read_u32(usbn_node, - "refclk-frequency", &clock_rate); - if (i) { - dev_err(dev, "no usbn "clock-frequency" "); - return -enxio; - } - switch (clock_rate) { - case 12000000: - initialize_flags = cvmx_usb_initialize_flags_clock_12mhz; - break; - case 24000000: - initialize_flags = cvmx_usb_initialize_flags_clock_24mhz; - break; - case 48000000: - initialize_flags = cvmx_usb_initialize_flags_clock_48mhz; - break; - default: - dev_err(dev, "illegal usbn "clock-frequency" %u ", - clock_rate); - return -enxio; - } - - i = of_property_read_string(usbn_node, - "cavium,refclk-type", &clock_type); - if (i) - i = of_property_read_string(usbn_node, - "refclk-type", &clock_type); - - if (!i && strcmp("crystal", clock_type) == 0) - is_crystal_clock = true; - - if (is_crystal_clock) - initialize_flags |= cvmx_usb_initialize_flags_clock_xo_xi; - else - initialize_flags |= cvmx_usb_initialize_flags_clock_xo_gnd; - - res_mem = platform_get_resource(pdev, ioresource_mem, 0); - if (!res_mem) { - dev_err(dev, "found no memory resource "); - return -enxio; - } - usb_num = (res_mem->start >> 44) & 1; - - if (irq < 0) { - /* defective device tree, but we know how to fix it. */ - irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56; - - irq = irq_create_mapping(null, hwirq); - } - - /* - * set the dma mask to 64bits so we get buffers already translated for - * dma. - */ - i = dma_coerce_mask_and_coherent(dev, dma_bit_mask(64)); - if (i) - return i; - - /* - * only cn52xx and cn56xx have dwc_otg usb hardware and the - * iob priority registers. under heavy network load usb - * hardware can be starved by the iob causing a crash. give - * it a priority boost if it has been waiting more than 400 - * cycles to avoid this situation. - * - * testing indicates that a cnt_val of 8192 is not sufficient, - * but no failures are seen with 4096. we choose a value of - * 400 to give a safety factor of 10. - */ - if (octeon_is_model(octeon_cn52xx) || octeon_is_model(octeon_cn56xx)) { - union cvmx_iob_n2c_l2c_pri_cnt pri_cnt; - - pri_cnt.u64 = 0; - pri_cnt.s.cnt_enb = 1; - pri_cnt.s.cnt_val = 400; - cvmx_write_csr(cvmx_iob_n2c_l2c_pri_cnt, pri_cnt.u64); - } - - hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev)); - if (!hcd) { - dev_dbg(dev, "failed to allocate memory for hcd "); - return -1; - } - hcd->uses_new_polling = 1; - usb = (struct octeon_hcd *)hcd->hcd_priv; - - spin_lock_init(&usb->lock); - - usb->init_flags = initialize_flags; - - /* initialize the usb state structure */ - usb->index = usb_num; - init_list_head(&usb->idle_pipes); - for (i = 0; i < array_size(usb->active_pipes); i++) - init_list_head(&usb->active_pipes[i]); - - /* due to an errata, cn31xx doesn't support dma */ - if (octeon_is_model(octeon_cn31xx)) { - usb->init_flags |= cvmx_usb_initialize_flags_no_dma; - /* only use one channel with non dma */ - usb->idle_hardware_channels = 0x1; - } else if (octeon_is_model(octeon_cn5xxx)) { - /* cn5xxx have an errata with channel 3 */ - usb->idle_hardware_channels = 0xf7; - } else { - usb->idle_hardware_channels = 0xff; - } - - status = cvmx_usb_initialize(dev, usb); - if (status) { - dev_dbg(dev, "usb initialization failed with %d ", status); - usb_put_hcd(hcd); - return -1; - } - - status = usb_add_hcd(hcd, irq, 0); - if (status) { - dev_dbg(dev, "usb add hcd failed with %d ", status); - usb_put_hcd(hcd); - return -1; - } - device_wakeup_enable(hcd->self.controller); - - dev_info(dev, "registered hcd for port %d on irq %d ", usb_num, irq); - - return 0; -} - -static int octeon_usb_remove(struct platform_device *pdev) -{ - int status; - struct device *dev = &pdev->dev; - struct usb_hcd *hcd = dev_get_drvdata(dev); - struct octeon_hcd *usb = hcd_to_octeon(hcd); - unsigned long flags; - - usb_remove_hcd(hcd); - spin_lock_irqsave(&usb->lock, flags); - status = cvmx_usb_shutdown(usb); - spin_unlock_irqrestore(&usb->lock, flags); - if (status) - dev_dbg(dev, "usb shutdown failed with %d ", status); - - usb_put_hcd(hcd); - - return 0; -} - -static const struct of_device_id octeon_usb_match[] = { - { - .compatible = "cavium,octeon-5750-usbc", - }, - {}, -}; -module_device_table(of, octeon_usb_match); - -static struct platform_driver octeon_usb_driver = { - .driver = { - .name = "octeon-hcd", - .of_match_table = octeon_usb_match, - }, - .probe = octeon_usb_probe, - .remove = octeon_usb_remove, -}; - -static int __init octeon_usb_driver_init(void) -{ - if (usb_disabled()) - return 0; - - return platform_driver_register(&octeon_usb_driver); -} -module_init(octeon_usb_driver_init); - -static void __exit octeon_usb_driver_exit(void) -{ - if (usb_disabled()) - return; - - platform_driver_unregister(&octeon_usb_driver); -} -module_exit(octeon_usb_driver_exit); - -module_license("gpl"); -module_author("cavium, inc. <support@cavium.com>"); -module_description("cavium inc. octeon usb host driver."); diff --git a/drivers/staging/octeon-usb/octeon-hcd.h b/drivers/staging/octeon-usb/octeon-hcd.h --- a/drivers/staging/octeon-usb/octeon-hcd.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * octeon hcd hardware register definitions. - * - * this file is subject to the terms and conditions of the gnu general public - * license. see the file "copying" in the main directory of this archive - * for more details. - * - * some parts of the code were originally released under bsd license: - * - * copyright (c) 2003-2010 cavium networks (support@cavium.com). all rights - * reserved. - * - * redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * neither the name of cavium networks nor the names of - * its contributors may be used to endorse or promote products - * derived from this software without specific prior written - * permission. - * - * this software, including technical data, may be subject to u.s. export - * control laws, including the u.s. export administration act and its associated - * regulations, and may be subject to export or import regulations in other - * countries. - * - * to the maximum extent permitted by law, the software is provided "as is" - * and with all faults and cavium networks makes no promises, representations or - * warranties, either express, implied, statutory, or otherwise, with respect to - * the software, including its condition, its conformity to any representation - * or description, or the existence of any latent or patent defects, and cavium - * specifically disclaims all implied (if any) warranties of title, - * merchantability, noninfringement, fitness for a particular purpose, lack of - * viruses, accuracy or completeness, quiet enjoyment, quiet possession or - * correspondence to description. the entire risk arising out of use or - * performance of the software lies with you. - */ - -#ifndef __octeon_hcd_h__ -#define __octeon_hcd_h__ - -#include <asm/bitfield.h> - -#define cvmx_usbcxbase 0x00016f0010000000ull -#define cvmx_usbcxreg1(reg, bid) \ - (cvmx_add_io_seg(cvmx_usbcxbase | reg) + \ - ((bid) & 1) * 0x100000000000ull) -#define cvmx_usbcxreg2(reg, bid, off) \ - (cvmx_add_io_seg(cvmx_usbcxbase | reg) + \ - (((off) & 7) + ((bid) & 1) * 0x8000000000ull) * 32) - -#define cvmx_usbcx_gahbcfg(bid) cvmx_usbcxreg1(0x008, bid) -#define cvmx_usbcx_ghwcfg3(bid) cvmx_usbcxreg1(0x04c, bid) -#define cvmx_usbcx_gintmsk(bid) cvmx_usbcxreg1(0x018, bid) -#define cvmx_usbcx_gintsts(bid) cvmx_usbcxreg1(0x014, bid) -#define cvmx_usbcx_gnptxfsiz(bid) cvmx_usbcxreg1(0x028, bid) -#define cvmx_usbcx_gnptxsts(bid) cvmx_usbcxreg1(0x02c, bid) -#define cvmx_usbcx_gotgctl(bid) cvmx_usbcxreg1(0x000, bid) -#define cvmx_usbcx_grstctl(bid) cvmx_usbcxreg1(0x010, bid) -#define cvmx_usbcx_grxfsiz(bid) cvmx_usbcxreg1(0x024, bid) -#define cvmx_usbcx_grxstsph(bid) cvmx_usbcxreg1(0x020, bid) -#define cvmx_usbcx_gusbcfg(bid) cvmx_usbcxreg1(0x00c, bid) -#define cvmx_usbcx_haint(bid) cvmx_usbcxreg1(0x414, bid) -#define cvmx_usbcx_haintmsk(bid) cvmx_usbcxreg1(0x418, bid) -#define cvmx_usbcx_hccharx(off, bid) cvmx_usbcxreg2(0x500, bid, off) -#define cvmx_usbcx_hcfg(bid) cvmx_usbcxreg1(0x400, bid) -#define cvmx_usbcx_hcintmskx(off, bid) cvmx_usbcxreg2(0x50c, bid, off) -#define cvmx_usbcx_hcintx(off, bid) cvmx_usbcxreg2(0x508, bid, off) -#define cvmx_usbcx_hcspltx(off, bid) cvmx_usbcxreg2(0x504, bid, off) -#define cvmx_usbcx_hctsizx(off, bid) cvmx_usbcxreg2(0x510, bid, off) -#define cvmx_usbcx_hfir(bid) cvmx_usbcxreg1(0x404, bid) -#define cvmx_usbcx_hfnum(bid) cvmx_usbcxreg1(0x408, bid) -#define cvmx_usbcx_hprt(bid) cvmx_usbcxreg1(0x440, bid) -#define cvmx_usbcx_hptxfsiz(bid) cvmx_usbcxreg1(0x100, bid) -#define cvmx_usbcx_hptxsts(bid) cvmx_usbcxreg1(0x410, bid) - -#define cvmx_usbnxbid1(bid) (((bid) & 1) * 0x10000000ull) -#define cvmx_usbnxbid2(bid) (((bid) & 1) * 0x100000000000ull) - -#define cvmx_usbnxreg1(reg, bid) \ - (cvmx_add_io_seg(0x0001180068000000ull | reg) + cvmx_usbnxbid1(bid)) -#define cvmx_usbnxreg2(reg, bid) \ - (cvmx_add_io_seg(0x00016f0000000000ull | reg) + cvmx_usbnxbid2(bid)) - -#define cvmx_usbnx_clk_ctl(bid) cvmx_usbnxreg1(0x10, bid) -#define cvmx_usbnx_dma0_inb_chn0(bid) cvmx_usbnxreg2(0x818, bid) -#define cvmx_usbnx_dma0_outb_chn0(bid) cvmx_usbnxreg2(0x858, bid) -#define cvmx_usbnx_usbp_ctl_status(bid) cvmx_usbnxreg1(0x18, bid) - -/** - * cvmx_usbc#_gahbcfg - * - * core ahb configuration register (gahbcfg) - * - * this register can be used to configure the core after power-on or a change in - * mode of operation. this register mainly contains ahb system-related - * configuration parameters. the ahb is the processor interface to the o2p usb - * core. in general, software need not know about this interface except to - * program the values as specified. - * - * the application must program this register as part of the o2p usb core - * initialization. do not change this register after the initial programming. - */ -union cvmx_usbcx_gahbcfg { - u32 u32; - /** - * struct cvmx_usbcx_gahbcfg_s - * @ptxfemplvl: periodic txfifo empty level (ptxfemplvl) - * software should set this bit to 0x1. - * indicates when the periodic txfifo empty interrupt bit in the - * core interrupt register (gintsts.ptxfemp) is triggered. this - * bit is used only in slave mode. - * * 1'b0: gintsts.ptxfemp interrupt indicates that the periodic - * txfifo is half empty - * * 1'b1: gintsts.ptxfemp interrupt indicates that the periodic - * txfifo is completely empty - * @nptxfemplvl: non-periodic txfifo empty level (nptxfemplvl) - * software should set this bit to 0x1. - * indicates when the non-periodic txfifo empty interrupt bit in - * the core interrupt register (gintsts.nptxfemp) is triggered. - * this bit is used only in slave mode. - * * 1'b0: gintsts.nptxfemp interrupt indicates that the non- - * periodic txfifo is half empty - * * 1'b1: gintsts.nptxfemp interrupt indicates that the non- - * periodic txfifo is completely empty - * @dmaen: dma enable (dmaen) - * * 1'b0: core operates in slave mode - * * 1'b1: core operates in a dma mode - * @hbstlen: burst length/type (hbstlen) - * this field has not effect and should be left as 0x0. - * @glblintrmsk: global interrupt mask (glblintrmsk) - * software should set this field to 0x1. - * the application uses this bit to mask or unmask the interrupt - * line assertion to itself. irrespective of this bit's setting, - * the interrupt status registers are updated by the core. - * * 1'b0: mask the interrupt assertion to the application. - * * 1'b1: unmask the interrupt assertion to the application. - */ - struct cvmx_usbcx_gahbcfg_s { - __bitfield_field(u32 reserved_9_31 : 23, - __bitfield_field(u32 ptxfemplvl : 1, - __bitfield_field(u32 nptxfemplvl : 1, - __bitfield_field(u32 reserved_6_6 : 1, - __bitfield_field(u32 dmaen : 1, - __bitfield_field(u32 hbstlen : 4, - __bitfield_field(u32 glblintrmsk : 1, - ;))))))) - } s; -}; - -/** - * cvmx_usbc#_ghwcfg3 - * - * user hw config3 register (ghwcfg3) - * - * this register contains the configuration options of the o2p usb core. - */ -union cvmx_usbcx_ghwcfg3 { - u32 u32; - /** - * struct cvmx_usbcx_ghwcfg3_s - * @dfifodepth: dfifo depth (dfifodepth) - * this value is in terms of 32-bit words. - * * minimum value is 32 - * * maximum value is 32768 - * @ahbphysync: ahb and phy synchronous (ahbphysync) - * indicates whether ahb and phy clocks are synchronous to - * each other. - * * 1'b0: no - * * 1'b1: yes - * this bit is tied to 1. - * @rsttype: reset style for clocked always blocks in rtl (rsttype) - * * 1'b0: asynchronous reset is used in the core - * * 1'b1: synchronous reset is used in the core - * @optfeature: optional features removed (optfeature) - * indicates whether the user id register, gpio interface ports, - * and sof toggle and counter ports were removed for gate count - * optimization. - * @vendor_control_interface_support: vendor control interface support - * * 1'b0: vendor control interface is not available on the core. - * * 1'b1: vendor control interface is available. - * @i2c_selection: i2c selection - * * 1'b0: i2c interface is not available on the core. - * * 1'b1: i2c interface is available on the core. - * @otgen: otg function enabled (otgen) - * the application uses this bit to indicate the o2p usb core's - * otg capabilities. - * * 1'b0: not otg capable - * * 1'b1: otg capable - * @pktsizewidth: width of packet size counters (pktsizewidth) - * * 3'b000: 4 bits - * * 3'b001: 5 bits - * * 3'b010: 6 bits - * * 3'b011: 7 bits - * * 3'b100: 8 bits - * * 3'b101: 9 bits - * * 3'b110: 10 bits - * * others: reserved - * @xfersizewidth: width of transfer size counters (xfersizewidth) - * * 4'b0000: 11 bits - * * 4'b0001: 12 bits - * - ... - * * 4'b1000: 19 bits - * * others: reserved - */ - struct cvmx_usbcx_ghwcfg3_s { - __bitfield_field(u32 dfifodepth : 16, - __bitfield_field(u32 reserved_13_15 : 3, - __bitfield_field(u32 ahbphysync : 1, - __bitfield_field(u32 rsttype : 1, - __bitfield_field(u32 optfeature : 1, - __bitfield_field(u32 vendor_control_interface_support : 1, - __bitfield_field(u32 i2c_selection : 1, - __bitfield_field(u32 otgen : 1, - __bitfield_field(u32 pktsizewidth : 3, - __bitfield_field(u32 xfersizewidth : 4, - ;)))))))))) - } s; -}; - -/** - * cvmx_usbc#_gintmsk - * - * core interrupt mask register (gintmsk) - * - * this register works with the core interrupt register to interrupt the - * application. when an interrupt bit is masked, the interrupt associated with - * that bit will not be generated. however, the core interrupt (gintsts) - * register bit corresponding to that interrupt will still be set. - * mask interrupt: 1'b0, unmask interrupt: 1'b1 - */ -union cvmx_usbcx_gintmsk { - u32 u32; - /** - * struct cvmx_usbcx_gintmsk_s - * @wkupintmsk: resume/remote wakeup detected interrupt mask - * (wkupintmsk) - * @sessreqintmsk: session request/new session detected interrupt mask - * (sessreqintmsk) - * @disconnintmsk: disconnect detected interrupt mask (disconnintmsk) - * @conidstschngmsk: connector id status change mask (conidstschngmsk) - * @ptxfempmsk: periodic txfifo empty mask (ptxfempmsk) - * @hchintmsk: host channels interrupt mask (hchintmsk) - * @prtintmsk: host port interrupt mask (prtintmsk) - * @fetsuspmsk: data fetch suspended mask (fetsuspmsk) - * @incomplpmsk: incomplete periodic transfer mask (incomplpmsk) - * incomplete isochronous out transfer mask - * (incompisooutmsk) - * @incompisoinmsk: incomplete isochronous in transfer mask - * (incompisoinmsk) - * @oepintmsk: out endpoints interrupt mask (oepintmsk) - * @inepintmsk: in endpoints interrupt mask (inepintmsk) - * @epmismsk: endpoint mismatch interrupt mask (epmismsk) - * @eopfmsk: end of periodic frame interrupt mask (eopfmsk) - * @isooutdropmsk: isochronous out packet dropped interrupt mask - * (isooutdropmsk) - * @enumdonemsk: enumeration done mask (enumdonemsk) - * @usbrstmsk: usb reset mask (usbrstmsk) - * @usbsuspmsk: usb suspend mask (usbsuspmsk) - * @erlysuspmsk: early suspend mask (erlysuspmsk) - * @i2cint: i2c interrupt mask (i2cint) - * @ulpickintmsk: ulpi carkit interrupt mask (ulpickintmsk) - * i2c carkit interrupt mask (i2cckintmsk) - * @goutnakeffmsk: global out nak effective mask (goutnakeffmsk) - * @ginnakeffmsk: global non-periodic in nak effective mask - * (ginnakeffmsk) - * @nptxfempmsk: non-periodic txfifo empty mask (nptxfempmsk) - * @rxflvlmsk: receive fifo non-empty mask (rxflvlmsk) - * @sofmsk: start of (micro)frame mask (sofmsk) - * @otgintmsk: otg interrupt mask (otgintmsk) - * @modemismsk: mode mismatch interrupt mask (modemismsk) - */ - struct cvmx_usbcx_gintmsk_s { - __bitfield_field(u32 wkupintmsk : 1, - __bitfield_field(u32 sessreqintmsk : 1, - __bitfield_field(u32 disconnintmsk : 1, - __bitfield_field(u32 conidstschngmsk : 1, - __bitfield_field(u32 reserved_27_27 : 1, - __bitfield_field(u32 ptxfempmsk : 1, - __bitfield_field(u32 hchintmsk : 1, - __bitfield_field(u32 prtintmsk : 1, - __bitfield_field(u32 reserved_23_23 : 1, - __bitfield_field(u32 fetsuspmsk : 1, - __bitfield_field(u32 incomplpmsk : 1, - __bitfield_field(u32 incompisoinmsk : 1, - __bitfield_field(u32 oepintmsk : 1, - __bitfield_field(u32 inepintmsk : 1, - __bitfield_field(u32 epmismsk : 1, - __bitfield_field(u32 reserved_16_16 : 1, - __bitfield_field(u32 eopfmsk : 1, - __bitfield_field(u32 isooutdropmsk : 1, - __bitfield_field(u32 enumdonemsk : 1, - __bitfield_field(u32 usbrstmsk : 1, - __bitfield_field(u32 usbsuspmsk : 1, - __bitfield_field(u32 erlysuspmsk : 1, - __bitfield_field(u32 i2cint : 1, - __bitfield_field(u32 ulpickintmsk : 1, - __bitfield_field(u32 goutnakeffmsk : 1, - __bitfield_field(u32 ginnakeffmsk : 1, - __bitfield_field(u32 nptxfempmsk : 1, - __bitfield_field(u32 rxflvlmsk : 1, - __bitfield_field(u32 sofmsk : 1, - __bitfield_field(u32 otgintmsk : 1, - __bitfield_field(u32 modemismsk : 1, - __bitfield_field(u32 reserved_0_0 : 1, - ;)))))))))))))))))))))))))))))))) - } s; -}; - -/** - * cvmx_usbc#_gintsts - * - * core interrupt register (gintsts) - * - * this register interrupts the application for system-level events in the - * current mode of operation (device mode or host mode). it is shown in - * interrupt. some of the bits in this register are valid only in host mode, - * while others are valid in device mode only. this register also indicates the - * current mode of operation. in order to clear the interrupt status bits of - * type r_ss_wc, the application must write 1'b1 into the bit. the fifo status - * interrupts are read only; once software reads from or writes to the fifo - * while servicing these interrupts, fifo interrupt conditions are cleared - * automatically. - */ -union cvmx_usbcx_gintsts { - u32 u32; - /** - * struct cvmx_usbcx_gintsts_s - * @wkupint: resume/remote wakeup detected interrupt (wkupint) - * in device mode, this interrupt is asserted when a resume is - * detected on the usb. in host mode, this interrupt is asserted - * when a remote wakeup is detected on the usb. - * for more information on how to use this interrupt, see "partial - * power-down and clock gating programming model" on - * page 353. - * @sessreqint: session request/new session detected interrupt - * (sessreqint) - * in host mode, this interrupt is asserted when a session request - * is detected from the device. in device mode, this interrupt is - * asserted when the utmiotg_bvalid signal goes high. - * for more information on how to use this interrupt, see "partial - * power-down and clock gating programming model" on - * page 353. - * @disconnint: disconnect detected interrupt (disconnint) - * asserted when a device disconnect is detected. - * @conidstschng: connector id status change (conidstschng) - * the core sets this bit when there is a change in connector id - * status. - * @ptxfemp: periodic txfifo empty (ptxfemp) - * asserted when the periodic transmit fifo is either half or - * completely empty and there is space for at least one entry to be - * written in the periodic request queue. the half or completely - * empty status is determined by the periodic txfifo empty level - * bit in the core ahb configuration register - * (gahbcfg.ptxfemplvl). - * @hchint: host channels interrupt (hchint) - * the core sets this bit to indicate that an interrupt is pending - * on one of the channels of the core (in host mode). the - * application must read the host all channels interrupt (haint) - * register to determine the exact number of the channel on which - * the interrupt occurred, and then read the corresponding host - * channel-n interrupt (hcintn) register to determine the exact - * cause of the interrupt. the application must clear the - * appropriate status bit in the hcintn register to clear this bit. - * @prtint: host port interrupt (prtint) - * the core sets this bit to indicate a change in port status of - * one of the o2p usb core ports in host mode. the application must - * read the host port control and status (hprt) register to - * determine the exact event that caused this interrupt. the - * application must clear the appropriate status bit in the host - * port control and status register to clear this bit. - * @fetsusp: data fetch suspended (fetsusp) - * this interrupt is valid only in dma mode. this interrupt - * indicates that the core has stopped fetching data for in - * endpoints due to the unavailability of txfifo space or request - * queue space. this interrupt is used by the application for an - * endpoint mismatch algorithm. - * @incomplp: incomplete periodic transfer (incomplp) - * in host mode, the core sets this interrupt bit when there are - * incomplete periodic transactions still pending which are - * scheduled for the current microframe. - * incomplete isochronous out transfer (incompisoout) - * the device mode, the core sets this interrupt to indicate that - * there is at least one isochronous out endpoint on which the - * transfer is not completed in the current microframe. this - * interrupt is asserted along with the end of periodic frame - * interrupt (eopf) bit in this register. - * @incompisoin: incomplete isochronous in transfer (incompisoin) - * the core sets this interrupt to indicate that there is at least - * one isochronous in endpoint on which the transfer is not - * completed in the current microframe. this interrupt is asserted - * along with the end of periodic frame interrupt (eopf) bit in - * this register. - * @oepint: out endpoints interrupt (oepint) - * the core sets this bit to indicate that an interrupt is pending - * on one of the out endpoints of the core (in device mode). the - * application must read the device all endpoints interrupt - * (daint) register to determine the exact number of the out - * endpoint on which the interrupt occurred, and then read the - * corresponding device out endpoint-n interrupt (doepintn) - * register to determine the exact cause of the interrupt. the - * application must clear the appropriate status bit in the - * corresponding doepintn register to clear this bit. - * @iepint: in endpoints interrupt (iepint) - * the core sets this bit to indicate that an interrupt is pending - * on one of the in endpoints of the core (in device mode). the - * application must read the device all endpoints interrupt - * (daint) register to determine the exact number of the in - * endpoint on which the interrupt occurred, and then read the - * corresponding device in endpoint-n interrupt (diepintn) - * register to determine the exact cause of the interrupt. the - * application must clear the appropriate status bit in the - * corresponding diepintn register to clear this bit. - * @epmis: endpoint mismatch interrupt (epmis) - * indicates that an in token has been received for a non-periodic - * endpoint, but the data for another endpoint is present in the - * top of the non-periodic transmit fifo and the in endpoint - * mismatch count programmed by the application has expired. - * @eopf: end of periodic frame interrupt (eopf) - * indicates that the period specified in the periodic frame - * interval field of the device configuration register - * (dcfg.perfrint) has been reached in the current microframe. - * @isooutdrop: isochronous out packet dropped interrupt (isooutdrop) - * the core sets this bit when it fails to write an isochronous out - * packet into the rxfifo because the rxfifo doesn't have - * enough space to accommodate a maximum packet size packet - * for the isochronous out endpoint. - * @enumdone: enumeration done (enumdone) - * the core sets this bit to indicate that speed enumeration is - * complete. the application must read the device status (dsts) - * register to obtain the enumerated speed. - * @usbrst: usb reset (usbrst) - * the core sets this bit to indicate that a reset is detected on - * the usb. - * @usbsusp: usb suspend (usbsusp) - * the core sets this bit to indicate that a suspend was detected - * on the usb. the core enters the suspended state when there - * is no activity on the phy_line_state_i signal for an extended - * period of time. - * @erlysusp: early suspend (erlysusp) - * the core sets this bit to indicate that an idle state has been - * detected on the usb for 3 ms. - * @i2cint: i2c interrupt (i2cint) - * this bit is always 0x0. - * @ulpickint: ulpi carkit interrupt (ulpickint) - * this bit is always 0x0. - * @goutnakeff: global out nak effective (goutnakeff) - * indicates that the set global out nak bit in the device control - * register (dctl.sgoutnak), set by the application, has taken - * effect in the core. this bit can be cleared by writing the clear - * global out nak bit in the device control register - * (dctl.cgoutnak). - * @ginnakeff: global in non-periodic nak effective (ginnakeff) - * indicates that the set global non-periodic in nak bit in the - * device control register (dctl.sgnpinnak), set by the - * application, has taken effect in the core. that is, the core has - * sampled the global in nak bit set by the application. this bit - * can be cleared by clearing the clear global non-periodic in - * nak bit in the device control register (dctl.cgnpinnak). - * this interrupt does not necessarily mean that a nak handshake - * is sent out on the usb. the stall bit takes precedence over - * the nak bit. - * @nptxfemp: non-periodic txfifo empty (nptxfemp) - * this interrupt is asserted when the non-periodic txfifo is - * either half or completely empty, and there is space for at least - * one entry to be written to the non-periodic transmit request - * queue. the half or completely empty status is determined by - * the non-periodic txfifo empty level bit in the core ahb - * configuration register (gahbcfg.nptxfemplvl). - * @rxflvl: rxfifo non-empty (rxflvl) - * indicates that there is at least one packet pending to be read - * from the rxfifo. - * @sof: start of (micro)frame (sof) - * in host mode, the core sets this bit to indicate that an sof - * (fs), micro-sof (hs), or keep-alive (ls) is transmitted on the - * usb. the application must write a 1 to this bit to clear the - * interrupt. - * in device mode, in the core sets this bit to indicate that an - * sof token has been received on the usb. the application can read - * the device status register to get the current (micro)frame - * number. this interrupt is seen only when the core is operating - * at either hs or fs. - * @otgint: otg interrupt (otgint) - * the core sets this bit to indicate an otg protocol event. the - * application must read the otg interrupt status (gotgint) - * register to determine the exact event that caused this - * interrupt. the application must clear the appropriate status bit - * in the gotgint register to clear this bit. - * @modemis: mode mismatch interrupt (modemis) - * the core sets this bit when the application is trying to access: - * * a host mode register, when the core is operating in device - * mode - * * a device mode register, when the core is operating in host - * mode - * the register access is completed on the ahb with an okay - * response, but is ignored by the core internally and doesn't - * affect the operation of the core. - * @curmod: current mode of operation (curmod) - * indicates the current mode of operation. - * * 1'b0: device mode - * * 1'b1: host mode - */ - struct cvmx_usbcx_gintsts_s { - __bitfield_field(u32 wkupint : 1, - __bitfield_field(u32 sessreqint : 1, - __bitfield_field(u32 disconnint : 1, - __bitfield_field(u32 conidstschng : 1, - __bitfield_field(u32 reserved_27_27 : 1, - __bitfield_field(u32 ptxfemp : 1, - __bitfield_field(u32 hchint : 1, - __bitfield_field(u32 prtint : 1, - __bitfield_field(u32 reserved_23_23 : 1, - __bitfield_field(u32 fetsusp : 1, - __bitfield_field(u32 incomplp : 1, - __bitfield_field(u32 incompisoin : 1, - __bitfield_field(u32 oepint : 1, - __bitfield_field(u32 iepint : 1, - __bitfield_field(u32 epmis : 1, - __bitfield_field(u32 reserved_16_16 : 1, - __bitfield_field(u32 eopf : 1, - __bitfield_field(u32 isooutdrop : 1, - __bitfield_field(u32 enumdone : 1, - __bitfield_field(u32 usbrst : 1, - __bitfield_field(u32 usbsusp : 1, - __bitfield_field(u32 erlysusp : 1, - __bitfield_field(u32 i2cint : 1, - __bitfield_field(u32 ulpickint : 1, - __bitfield_field(u32 goutnakeff : 1, - __bitfield_field(u32 ginnakeff : 1, - __bitfield_field(u32 nptxfemp : 1, - __bitfield_field(u32 rxflvl : 1, - __bitfield_field(u32 sof : 1, - __bitfield_field(u32 otgint : 1, - __bitfield_field(u32 modemis : 1, - __bitfield_field(u32 curmod : 1, - ;)))))))))))))))))))))))))))))))) - } s; -}; - -/** - * cvmx_usbc#_gnptxfsiz - * - * non-periodic transmit fifo size register (gnptxfsiz) - * - * the application can program the ram size and the memory start address for the - * non-periodic txfifo. - */ -union cvmx_usbcx_gnptxfsiz { - u32 u32; - /** - * struct cvmx_usbcx_gnptxfsiz_s - * @nptxfdep: non-periodic txfifo depth (nptxfdep) - * this value is in terms of 32-bit words. - * minimum value is 16 - * maximum value is 32768 - * @nptxfstaddr: non-periodic transmit ram start address (nptxfstaddr) - * this field contains the memory start address for non-periodic - * transmit fifo ram. - */ - struct cvmx_usbcx_gnptxfsiz_s { - __bitfield_field(u32 nptxfdep : 16, - __bitfield_field(u32 nptxfstaddr : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_gnptxsts - * - * non-periodic transmit fifo/queue status register (gnptxsts) - * - * this read-only register contains the free space information for the - * non-periodic txfifo and the non-periodic transmit request queue. - */ -union cvmx_usbcx_gnptxsts { - u32 u32; - /** - * struct cvmx_usbcx_gnptxsts_s - * @nptxqtop: top of the non-periodic transmit request queue (nptxqtop) - * entry in the non-periodic tx request queue that is currently - * being processed by the mac. - * * bits [30:27]: channel/endpoint number - * * bits [26:25]: - * - 2'b00: in/out token - * - 2'b01: zero-length transmit packet (device in/host out) - * - 2'b10: ping/csplit token - * - 2'b11: channel halt command - * * bit [24]: terminate (last entry for selected channel/endpoint) - * @nptxqspcavail: non-periodic transmit request queue space available - * (nptxqspcavail) - * indicates the amount of free space available in the non- - * periodic transmit request queue. this queue holds both in - * and out requests in host mode. device mode has only in - * requests. - * * 8'h0: non-periodic transmit request queue is full - * * 8'h1: 1 location available - * * 8'h2: 2 locations available - * * n: n locations available (0..8) - * * others: reserved - * @nptxfspcavail: non-periodic txfifo space avail (nptxfspcavail) - * indicates the amount of free space available in the non- - * periodic txfifo. - * values are in terms of 32-bit words. - * * 16'h0: non-periodic txfifo is full - * * 16'h1: 1 word available - * * 16'h2: 2 words available - * * 16'hn: n words available (where 0..32768) - * * 16'h8000: 32768 words available - * * others: reserved - */ - struct cvmx_usbcx_gnptxsts_s { - __bitfield_field(u32 reserved_31_31 : 1, - __bitfield_field(u32 nptxqtop : 7, - __bitfield_field(u32 nptxqspcavail : 8, - __bitfield_field(u32 nptxfspcavail : 16, - ;)))) - } s; -}; - -/** - * cvmx_usbc#_grstctl - * - * core reset register (grstctl) - * - * the application uses this register to reset various hardware features inside - * the core. - */ -union cvmx_usbcx_grstctl { - u32 u32; - /** - * struct cvmx_usbcx_grstctl_s - * @ahbidle: ahb master idle (ahbidle) - * indicates that the ahb master state machine is in the idle - * condition. - * @dmareq: dma request signal (dmareq) - * indicates that the dma request is in progress. used for debug. - * @txfnum: txfifo number (txfnum) - * this is the fifo number that must be flushed using the txfifo - * flush bit. this field must not be changed until the core clears - * the txfifo flush bit. - * * 5'h0: non-periodic txfifo flush - * * 5'h1: periodic txfifo 1 flush in device mode or periodic - * txfifo flush in host mode - * * 5'h2: periodic txfifo 2 flush in device mode - * - ... - * * 5'hf: periodic txfifo 15 flush in device mode - * * 5'h10: flush all the periodic and non-periodic txfifos in the - * core - * @txfflsh: txfifo flush (txfflsh) - * this bit selectively flushes a single or all transmit fifos, but - * cannot do so if the core is in the midst of a transaction. - * the application must only write this bit after checking that the - * core is neither writing to the txfifo nor reading from the - * txfifo. - * the application must wait until the core clears this bit before - * performing any operations. this bit takes 8 clocks (of phy_clk - * or hclk, whichever is slower) to clear. - * @rxfflsh: rxfifo flush (rxfflsh) - * the application can flush the entire rxfifo using this bit, but - * must first ensure that the core is not in the middle of a - * transaction. - * the application must only write to this bit after checking that - * the core is neither reading from the rxfifo nor writing to the - * rxfifo. - * the application must wait until the bit is cleared before - * performing any other operations. this bit will take 8 clocks - * (slowest of phy or ahb clock) to clear. - * @intknqflsh: in token sequence learning queue flush (intknqflsh) - * the application writes this bit to flush the in token sequence - * learning queue. - * @frmcntrrst: host frame counter reset (frmcntrrst) - * the application writes this bit to reset the (micro)frame number - * counter inside the core. when the (micro)frame counter is reset, - * the subsequent sof sent out by the core will have a - * (micro)frame number of 0. - * @hsftrst: hclk soft reset (hsftrst) - * the application uses this bit to flush the control logic in the - * ahb clock domain. only ahb clock domain pipelines are reset. - * * fifos are not flushed with this bit. - * * all state machines in the ahb clock domain are reset to the - * idle state after terminating the transactions on the ahb, - * following the protocol. - * * csr control bits used by the ahb clock domain state - * machines are cleared. - * * to clear this interrupt, status mask bits that control the - * interrupt status and are generated by the ahb clock domain - * state machine are cleared. - * * because interrupt status bits are not cleared, the application - * can get the status of any core events that occurred after it set - * this bit. - * this is a self-clearing bit that the core clears after all - * necessary logic is reset in the core. this may take several - * clocks, depending on the core's current state. - * @csftrst: core soft reset (csftrst) - * resets the hclk and phy_clock domains as follows: - * * clears the interrupts and all the csr registers except the - * following register bits: - * - pcgcctl.rstpdwnmodule - * - pcgcctl.gatehclk - * - pcgcctl.pwrclmp - * - pcgcctl.stoppphylpwrclkselclk - * - gusbcfg.phylpwrclksel - * - gusbcfg.ddrsel - * - gusbcfg.physel - * - gusbcfg.fsintf - * - gusbcfg.ulpi_utmi_sel - * - gusbcfg.phyif - * - hcfg.fslspclksel - * - dcfg.devspd - * * all module state machines (except the ahb slave unit) are - * reset to the idle state, and all the transmit fifos and the - * receive fifo are flushed. - * * any transactions on the ahb master are terminated as soon - * as possible, after gracefully completing the last data phase of - * an ahb transfer. any transactions on the usb are terminated - * immediately. - * the application can write to this bit any time it wants to reset - * the core. this is a self-clearing bit and the core clears this - * bit after all the necessary logic is reset in the core, which - * may take several clocks, depending on the current state of the - * core. once this bit is cleared software should wait at least 3 - * phy clocks before doing any access to the phy domain - * (synchronization delay). software should also should check that - * bit 31 of this register is 1 (ahb master is idle) before - * starting any operation. - * typically software reset is used during software development - * and also when you dynamically change the phy selection bits - * in the usb configuration registers listed above. when you - * change the phy, the corresponding clock for the phy is - * selected and used in the phy domain. once a new clock is - * selected, the phy domain has to be reset for proper operation. - */ - struct cvmx_usbcx_grstctl_s { - __bitfield_field(u32 ahbidle : 1, - __bitfield_field(u32 dmareq : 1, - __bitfield_field(u32 reserved_11_29 : 19, - __bitfield_field(u32 txfnum : 5, - __bitfield_field(u32 txfflsh : 1, - __bitfield_field(u32 rxfflsh : 1, - __bitfield_field(u32 intknqflsh : 1, - __bitfield_field(u32 frmcntrrst : 1, - __bitfield_field(u32 hsftrst : 1, - __bitfield_field(u32 csftrst : 1, - ;)))))))))) - } s; -}; - -/** - * cvmx_usbc#_grxfsiz - * - * receive fifo size register (grxfsiz) - * - * the application can program the ram size that must be allocated to the - * rxfifo. - */ -union cvmx_usbcx_grxfsiz { - u32 u32; - /** - * struct cvmx_usbcx_grxfsiz_s - * @rxfdep: rxfifo depth (rxfdep) - * this value is in terms of 32-bit words. - * * minimum value is 16 - * * maximum value is 32768 - */ - struct cvmx_usbcx_grxfsiz_s { - __bitfield_field(u32 reserved_16_31 : 16, - __bitfield_field(u32 rxfdep : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_grxstsph - * - * receive status read and pop register, host mode (grxstsph) - * - * a read to the receive status read and pop register returns and additionally - * pops the top data entry out of the rxfifo. - * this description is only valid when the core is in host mode. for device mode - * use usbc_grxstspd instead. - * note: grxstsph and grxstspd are physically the same register and share the - * same offset in the o2p usb core. the offset difference shown in this - * document is for software clarity and is actually ignored by the - * hardware. - */ -union cvmx_usbcx_grxstsph { - u32 u32; - /** - * struct cvmx_usbcx_grxstsph_s - * @pktsts: packet status (pktsts) - * indicates the status of the received packet - * * 4'b0010: in data packet received - * * 4'b0011: in transfer completed (triggers an interrupt) - * * 4'b0101: data toggle error (triggers an interrupt) - * * 4'b0111: channel halted (triggers an interrupt) - * * others: reserved - * @dpid: data pid (dpid) - * * 2'b00: data0 - * * 2'b10: data1 - * * 2'b01: data2 - * * 2'b11: mdata - * @bcnt: byte count (bcnt) - * indicates the byte count of the received in data packet - * @chnum: channel number (chnum) - * indicates the channel number to which the current received - * packet belongs. - */ - struct cvmx_usbcx_grxstsph_s { - __bitfield_field(u32 reserved_21_31 : 11, - __bitfield_field(u32 pktsts : 4, - __bitfield_field(u32 dpid : 2, - __bitfield_field(u32 bcnt : 11, - __bitfield_field(u32 chnum : 4, - ;))))) - } s; -}; - -/** - * cvmx_usbc#_gusbcfg - * - * core usb configuration register (gusbcfg) - * - * this register can be used to configure the core after power-on or a changing - * to host mode or device mode. it contains usb and usb-phy related - * configuration parameters. the application must program this register before - * starting any transactions on either the ahb or the usb. do not make changes - * to this register after the initial programming. - */ -union cvmx_usbcx_gusbcfg { - u32 u32; - /** - * struct cvmx_usbcx_gusbcfg_s - * @otgi2csel: utmifs or i2c interface select (otgi2csel) - * this bit is always 0x0. - * @phylpwrclksel: phy low-power clock select (phylpwrclksel) - * software should set this bit to 0x0. - * selects either 480-mhz or 48-mhz (low-power) phy mode. in - * fs and ls modes, the phy can usually operate on a 48-mhz - * clock to save power. - * * 1'b0: 480-mhz internal pll clock - * * 1'b1: 48-mhz external clock - * in 480 mhz mode, the utmi interface operates at either 60 or - * 30-mhz, depending upon whether 8- or 16-bit data width is - * selected. in 48-mhz mode, the utmi interface operates at 48 - * mhz in fs mode and at either 48 or 6 mhz in ls mode - * (depending on the phy vendor). - * this bit drives the utmi_fsls_low_power core output signal, and - * is valid only for utmi+ phys. - * @usbtrdtim: usb turnaround time (usbtrdtim) - * sets the turnaround time in phy clocks. - * specifies the response time for a mac request to the packet - * fifo controller (pfc) to fetch data from the dfifo (spram). - * this must be programmed to 0x5. - * @hnpcap: hnp-capable (hnpcap) - * this bit is always 0x0. - * @srpcap: srp-capable (srpcap) - * this bit is always 0x0. - * @ddrsel: ulpi ddr select (ddrsel) - * software should set this bit to 0x0. - * @physel: usb 2.0 high-speed phy or usb 1.1 full-speed serial - * software should set this bit to 0x0. - * @fsintf: full-speed serial interface select (fsintf) - * software should set this bit to 0x0. - * @ulpi_utmi_sel: ulpi or utmi+ select (ulpi_utmi_sel) - * this bit is always 0x0. - * @phyif: phy interface (phyif) - * this bit is always 0x1. - * @toutcal: hs/fs timeout calibration (toutcal) - * the number of phy clocks that the application programs in this - * field is added to the high-speed/full-speed interpacket timeout - * duration in the core to account for any additional delays - * introduced by the phy. this may be required, since the delay - * introduced by the phy in generating the linestate condition may - * vary from one phy to another. - * the usb standard timeout value for high-speed operation is - * 736 to 816 (inclusive) bit times. the usb standard timeout - * value for full-speed operation is 16 to 18 (inclusive) bit - * times. the application must program this field based on the - * speed of enumeration. the number of bit times added per phy - * clock are: - * high-speed operation: - * * one 30-mhz phy clock = 16 bit times - * * one 60-mhz phy clock = 8 bit times - * full-speed operation: - * * one 30-mhz phy clock = 0.4 bit times - * * one 60-mhz phy clock = 0.2 bit times - * * one 48-mhz phy clock = 0.25 bit times - */ - struct cvmx_usbcx_gusbcfg_s { - __bitfield_field(u32 reserved_17_31 : 15, - __bitfield_field(u32 otgi2csel : 1, - __bitfield_field(u32 phylpwrclksel : 1, - __bitfield_field(u32 reserved_14_14 : 1, - __bitfield_field(u32 usbtrdtim : 4, - __bitfield_field(u32 hnpcap : 1, - __bitfield_field(u32 srpcap : 1, - __bitfield_field(u32 ddrsel : 1, - __bitfield_field(u32 physel : 1, - __bitfield_field(u32 fsintf : 1, - __bitfield_field(u32 ulpi_utmi_sel : 1, - __bitfield_field(u32 phyif : 1, - __bitfield_field(u32 toutcal : 3, - ;))))))))))))) - } s; -}; - -/** - * cvmx_usbc#_haint - * - * host all channels interrupt register (haint) - * - * when a significant event occurs on a channel, the host all channels interrupt - * register interrupts the application using the host channels interrupt bit of - * the core interrupt register (gintsts.hchint). this is shown in interrupt. - * there is one interrupt bit per channel, up to a maximum of 16 bits. bits in - * this register are set and cleared when the application sets and clears bits - * in the corresponding host channel-n interrupt register. - */ -union cvmx_usbcx_haint { - u32 u32; - /** - * struct cvmx_usbcx_haint_s - * @haint: channel interrupts (haint) - * one bit per channel: bit 0 for channel 0, bit 15 for channel 15 - */ - struct cvmx_usbcx_haint_s { - __bitfield_field(u32 reserved_16_31 : 16, - __bitfield_field(u32 haint : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_haintmsk - * - * host all channels interrupt mask register (haintmsk) - * - * the host all channel interrupt mask register works with the host all channel - * interrupt register to interrupt the application when an event occurs on a - * channel. there is one interrupt mask bit per channel, up to a maximum of 16 - * bits. - * mask interrupt: 1'b0 unmask interrupt: 1'b1 - */ -union cvmx_usbcx_haintmsk { - u32 u32; - /** - * struct cvmx_usbcx_haintmsk_s - * @haintmsk: channel interrupt mask (haintmsk) - * one bit per channel: bit 0 for channel 0, bit 15 for channel 15 - */ - struct cvmx_usbcx_haintmsk_s { - __bitfield_field(u32 reserved_16_31 : 16, - __bitfield_field(u32 haintmsk : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_hcchar# - * - * host channel-n characteristics register (hcchar) - * - */ -union cvmx_usbcx_hccharx { - u32 u32; - /** - * struct cvmx_usbcx_hccharx_s - * @chena: channel enable (chena) - * this field is set by the application and cleared by the otg - * host. - * * 1'b0: channel disabled - * * 1'b1: channel enabled - * @chdis: channel disable (chdis) - * the application sets this bit to stop transmitting/receiving - * data on a channel, even before the transfer for that channel is - * complete. the application must wait for the channel disabled - * interrupt before treating the channel as disabled. - * @oddfrm: odd frame (oddfrm) - * this field is set (reset) by the application to indicate that - * the otg host must perform a transfer in an odd (micro)frame. - * this field is applicable for only periodic (isochronous and - * interrupt) transactions. - * * 1'b0: even (micro)frame - * * 1'b1: odd (micro)frame - * @devaddr: device address (devaddr) - * this field selects the specific device serving as the data - * source or sink. - * @ec: multi count (mc) / error count (ec) - * when the split enable bit of the host channel-n split control - * register (hcspltn.spltena) is reset (1'b0), this field indicates - * to the host the number of transactions that should be executed - * per microframe for this endpoint. - * * 2'b00: reserved. this field yields undefined results. - * * 2'b01: 1 transaction - * * 2'b10: 2 transactions to be issued for this endpoint per - * microframe - * * 2'b11: 3 transactions to be issued for this endpoint per - * microframe - * when hcspltn.spltena is set (1'b1), this field indicates the - * number of immediate retries to be performed for a periodic split - * transactions on transaction errors. this field must be set to at - * least 2'b01. - * @eptype: endpoint type (eptype) - * indicates the transfer type selected. - * * 2'b00: control - * * 2'b01: isochronous - * * 2'b10: bulk - * * 2'b11: interrupt - * @lspddev: low-speed device (lspddev) - * this field is set by the application to indicate that this - * channel is communicating to a low-speed device. - * @epdir: endpoint direction (epdir) - * indicates whether the transaction is in or out. - * * 1'b0: out - * * 1'b1: in - * @epnum: endpoint number (epnum) - * indicates the endpoint number on the device serving as the - * data source or sink. - * @mps: maximum packet size (mps) - * indicates the maximum packet size of the associated endpoint. - */ - struct cvmx_usbcx_hccharx_s { - __bitfield_field(u32 chena : 1, - __bitfield_field(u32 chdis : 1, - __bitfield_field(u32 oddfrm : 1, - __bitfield_field(u32 devaddr : 7, - __bitfield_field(u32 ec : 2, - __bitfield_field(u32 eptype : 2, - __bitfield_field(u32 lspddev : 1, - __bitfield_field(u32 reserved_16_16 : 1, - __bitfield_field(u32 epdir : 1, - __bitfield_field(u32 epnum : 4, - __bitfield_field(u32 mps : 11, - ;))))))))))) - } s; -}; - -/** - * cvmx_usbc#_hcfg - * - * host configuration register (hcfg) - * - * this register configures the core after power-on. do not make changes to this - * register after initializing the host. - */ -union cvmx_usbcx_hcfg { - u32 u32; - /** - * struct cvmx_usbcx_hcfg_s - * @fslssupp: fs- and ls-only support (fslssupp) - * the application uses this bit to control the core's enumeration - * speed. using this bit, the application can make the core - * enumerate as a fs host, even if the connected device supports - * hs traffic. do not make changes to this field after initial - * programming. - * * 1'b0: hs/fs/ls, based on the maximum speed supported by - * the connected device - * * 1'b1: fs/ls-only, even if the connected device can support hs - * @fslspclksel: fs/ls phy clock select (fslspclksel) - * when the core is in fs host mode - * * 2'b00: phy clock is running at 30/60 mhz - * * 2'b01: phy clock is running at 48 mhz - * * others: reserved - * when the core is in ls host mode - * * 2'b00: phy clock is running at 30/60 mhz. when the - * utmi+/ulpi phy low power mode is not selected, use - * 30/60 mhz. - * * 2'b01: phy clock is running at 48 mhz. when the utmi+ - * phy low power mode is selected, use 48mhz if the phy - * supplies a 48 mhz clock during ls mode. - * * 2'b10: phy clock is running at 6 mhz. in usb 1.1 fs mode, - * use 6 mhz when the utmi+ phy low power mode is - * selected and the phy supplies a 6 mhz clock during ls - * mode. if you select a 6 mhz clock during ls mode, you must - * do a soft reset. - * * 2'b11: reserved - */ - struct cvmx_usbcx_hcfg_s { - __bitfield_field(u32 reserved_3_31 : 29, - __bitfield_field(u32 fslssupp : 1, - __bitfield_field(u32 fslspclksel : 2, - ;))) - } s; -}; - -/** - * cvmx_usbc#_hcint# - * - * host channel-n interrupt register (hcint) - * - * this register indicates the status of a channel with respect to usb- and - * ahb-related events. the application must read this register when the host - * channels interrupt bit of the core interrupt register (gintsts.hchint) is - * set. before the application can read this register, it must first read - * the host all channels interrupt (haint) register to get the exact channel - * number for the host channel-n interrupt register. the application must clear - * the appropriate bit in this register to clear the corresponding bits in the - * haint and gintsts registers. - */ -union cvmx_usbcx_hcintx { - u32 u32; - /** - * struct cvmx_usbcx_hcintx_s - * @datatglerr: data toggle error (datatglerr) - * @frmovrun: frame overrun (frmovrun) - * @bblerr: babble error (bblerr) - * @xacterr: transaction error (xacterr) - * @nyet: nyet response received interrupt (nyet) - * @ack: ack response received interrupt (ack) - * @nak: nak response received interrupt (nak) - * @stall: stall response received interrupt (stall) - * @ahberr: this bit is always 0x0. - * @chhltd: channel halted (chhltd) - * indicates the transfer completed abnormally either because of - * any usb transaction error or in response to disable request by - * the application. - * @xfercompl: transfer completed (xfercompl) - * transfer completed normally without any errors. - */ - struct cvmx_usbcx_hcintx_s { - __bitfield_field(u32 reserved_11_31 : 21, - __bitfield_field(u32 datatglerr : 1, - __bitfield_field(u32 frmovrun : 1, - __bitfield_field(u32 bblerr : 1, - __bitfield_field(u32 xacterr : 1, - __bitfield_field(u32 nyet : 1, - __bitfield_field(u32 ack : 1, - __bitfield_field(u32 nak : 1, - __bitfield_field(u32 stall : 1, - __bitfield_field(u32 ahberr : 1, - __bitfield_field(u32 chhltd : 1, - __bitfield_field(u32 xfercompl : 1, - ;)))))))))))) - } s; -}; - -/** - * cvmx_usbc#_hcintmsk# - * - * host channel-n interrupt mask register (hcintmskn) - * - * this register reflects the mask for each channel status described in the - * previous section. - * mask interrupt: 1'b0 unmask interrupt: 1'b1 - */ -union cvmx_usbcx_hcintmskx { - u32 u32; - /** - * struct cvmx_usbcx_hcintmskx_s - * @datatglerrmsk: data toggle error mask (datatglerrmsk) - * @frmovrunmsk: frame overrun mask (frmovrunmsk) - * @bblerrmsk: babble error mask (bblerrmsk) - * @xacterrmsk: transaction error mask (xacterrmsk) - * @nyetmsk: nyet response received interrupt mask (nyetmsk) - * @ackmsk: ack response received interrupt mask (ackmsk) - * @nakmsk: nak response received interrupt mask (nakmsk) - * @stallmsk: stall response received interrupt mask (stallmsk) - * @ahberrmsk: ahb error mask (ahberrmsk) - * @chhltdmsk: channel halted mask (chhltdmsk) - * @xfercomplmsk: transfer completed mask (xfercomplmsk) - */ - struct cvmx_usbcx_hcintmskx_s { - __bitfield_field(u32 reserved_11_31 : 21, - __bitfield_field(u32 datatglerrmsk : 1, - __bitfield_field(u32 frmovrunmsk : 1, - __bitfield_field(u32 bblerrmsk : 1, - __bitfield_field(u32 xacterrmsk : 1, - __bitfield_field(u32 nyetmsk : 1, - __bitfield_field(u32 ackmsk : 1, - __bitfield_field(u32 nakmsk : 1, - __bitfield_field(u32 stallmsk : 1, - __bitfield_field(u32 ahberrmsk : 1, - __bitfield_field(u32 chhltdmsk : 1, - __bitfield_field(u32 xfercomplmsk : 1, - ;)))))))))))) - } s; -}; - -/** - * cvmx_usbc#_hcsplt# - * - * host channel-n split control register (hcsplt) - * - */ -union cvmx_usbcx_hcspltx { - u32 u32; - /** - * struct cvmx_usbcx_hcspltx_s - * @spltena: split enable (spltena) - * the application sets this field to indicate that this channel is - * enabled to perform split transactions. - * @compsplt: do complete split (compsplt) - * the application sets this field to request the otg host to - * perform a complete split transaction. - * @xactpos: transaction position (xactpos) - * this field is used to determine whether to send all, first, - * middle, or last payloads with each out transaction. - * * 2'b11: all. this is the entire data payload is of this - * transaction (which is less than or equal to 188 bytes). - * * 2'b10: begin. this is the first data payload of this - * transaction (which is larger than 188 bytes). - * * 2'b00: mid. this is the middle payload of this transaction - * (which is larger than 188 bytes). - * * 2'b01: end. this is the last payload of this transaction - * (which is larger than 188 bytes). - * @hubaddr: hub address (hubaddr) - * this field holds the device address of the transaction - * translator's hub. - * @prtaddr: port address (prtaddr) - * this field is the port number of the recipient transaction - * translator. - */ - struct cvmx_usbcx_hcspltx_s { - __bitfield_field(u32 spltena : 1, - __bitfield_field(u32 reserved_17_30 : 14, - __bitfield_field(u32 compsplt : 1, - __bitfield_field(u32 xactpos : 2, - __bitfield_field(u32 hubaddr : 7, - __bitfield_field(u32 prtaddr : 7, - ;)))))) - } s; -}; - -/** - * cvmx_usbc#_hctsiz# - * - * host channel-n transfer size register (hctsiz) - * - */ -union cvmx_usbcx_hctsizx { - u32 u32; - /** - * struct cvmx_usbcx_hctsizx_s - * @dopng: do ping (dopng) - * setting this field to 1 directs the host to do ping protocol. - * @pid: pid (pid) - * the application programs this field with the type of pid to use - * for the initial transaction. the host will maintain this field - * for the rest of the transfer. - * * 2'b00: data0 - * * 2'b01: data2 - * * 2'b10: data1 - * * 2'b11: mdata (non-control)/setup (control) - * @pktcnt: packet count (pktcnt) - * this field is programmed by the application with the expected - * number of packets to be transmitted (out) or received (in). - * the host decrements this count on every successful - * transmission or reception of an out/in packet. once this count - * reaches zero, the application is interrupted to indicate normal - * completion. - * @xfersize: transfer size (xfersize) - * for an out, this field is the number of data bytes the host will - * send during the transfer. - * for an in, this field is the buffer size that the application - * has reserved for the transfer. the application is expected to - * program this field as an integer multiple of the maximum packet - * size for in transactions (periodic and non-periodic). - */ - struct cvmx_usbcx_hctsizx_s { - __bitfield_field(u32 dopng : 1, - __bitfield_field(u32 pid : 2, - __bitfield_field(u32 pktcnt : 10, - __bitfield_field(u32 xfersize : 19, - ;)))) - } s; -}; - -/** - * cvmx_usbc#_hfir - * - * host frame interval register (hfir) - * - * this register stores the frame interval information for the current speed to - * which the o2p usb core has enumerated. - */ -union cvmx_usbcx_hfir { - u32 u32; - /** - * struct cvmx_usbcx_hfir_s - * @frint: frame interval (frint) - * the value that the application programs to this field specifies - * the interval between two consecutive sofs (fs) or micro- - * sofs (hs) or keep-alive tokens (hs). this field contains the - * number of phy clocks that constitute the required frame - * interval. the default value set in this field for a fs operation - * when the phy clock frequency is 60 mhz. the application can - * write a value to this register only after the port enable bit of - * the host port control and status register (hprt.prtenaport) - * has been set. if no value is programmed, the core calculates - * the value based on the phy clock specified in the fs/ls phy - * clock select field of the host configuration register - * (hcfg.fslspclksel). do not change the value of this field - * after the initial configuration. - * * 125 us (phy clock frequency for hs) - * * 1 ms (phy clock frequency for fs/ls) - */ - struct cvmx_usbcx_hfir_s { - __bitfield_field(u32 reserved_16_31 : 16, - __bitfield_field(u32 frint : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_hfnum - * - * host frame number/frame time remaining register (hfnum) - * - * this register indicates the current frame number. - * it also indicates the time remaining (in terms of the number of phy clocks) - * in the current (micro)frame. - */ -union cvmx_usbcx_hfnum { - u32 u32; - /** - * struct cvmx_usbcx_hfnum_s - * @frrem: frame time remaining (frrem) - * indicates the amount of time remaining in the current - * microframe (hs) or frame (fs/ls), in terms of phy clocks. - * this field decrements on each phy clock. when it reaches - * zero, this field is reloaded with the value in the frame - * interval register and a new sof is transmitted on the usb. - * @frnum: frame number (frnum) - * this field increments when a new sof is transmitted on the - * usb, and is reset to 0 when it reaches 16'h3fff. - */ - struct cvmx_usbcx_hfnum_s { - __bitfield_field(u32 frrem : 16, - __bitfield_field(u32 frnum : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_hprt - * - * host port control and status register (hprt) - * - * this register is available in both host and device modes. - * currently, the otg host supports only one port. - * a single register holds usb port-related information such as usb reset, - * enable, suspend, resume, connect status, and test mode for each port. the - * r_ss_wc bits in this register can trigger an interrupt to the application - * through the host port interrupt bit of the core interrupt register - * (gintsts.prtint). on a port interrupt, the application must read this - * register and clear the bit that caused the interrupt. for the r_ss_wc bits, - * the application must write a 1 to the bit to clear the interrupt. - */ -union cvmx_usbcx_hprt { - u32 u32; - /** - * struct cvmx_usbcx_hprt_s - * @prtspd: port speed (prtspd) - * indicates the speed of the device attached to this port. - * * 2'b00: high speed - * * 2'b01: full speed - * * 2'b10: low speed - * * 2'b11: reserved - * @prttstctl: port test control (prttstctl) - * the application writes a nonzero value to this field to put - * the port into a test mode, and the corresponding pattern is - * signaled on the port. - * * 4'b0000: test mode disabled - * * 4'b0001: test_j mode - * * 4'b0010: test_k mode - * * 4'b0011: test_se0_nak mode - * * 4'b0100: test_packet mode - * * 4'b0101: test_force_enable - * * others: reserved - * prtspd must be zero (i.e. the interface must be in high-speed - * mode) to use the prttstctl test modes. - * @prtpwr: port power (prtpwr) - * the application uses this field to control power to this port, - * and the core clears this bit on an overcurrent condition. - * * 1'b0: power off - * * 1'b1: power on - * @prtlnsts: port line status (prtlnsts) - * indicates the current logic level usb data lines - * * bit [10]: logic level of d- - * * bit [11]: logic level of d+ - * @prtrst: port reset (prtrst) - * when the application sets this bit, a reset sequence is - * started on this port. the application must time the reset - * period and clear this bit after the reset sequence is - * complete. - * * 1'b0: port not in reset - * * 1'b1: port in reset - * the application must leave this bit set for at least a - * minimum duration mentioned below to start a reset on the - * port. the application can leave it set for another 10 ms in - * addition to the required minimum duration, before clearing - * the bit, even though there is no maximum limit set by the - * usb standard. - * * high speed: 50 ms - * * full speed/low speed: 10 ms - * @prtsusp: port suspend (prtsusp) - * the application sets this bit to put this port in suspend - * mode. the core only stops sending sofs when this is set. - * to stop the phy clock, the application must set the port - * clock stop bit, which will assert the suspend input pin of - * the phy. - * the read value of this bit reflects the current suspend - * status of the port. this bit is cleared by the core after a - * remote wakeup signal is detected or the application sets - * the port reset bit or port resume bit in this register or the - * resume/remote wakeup detected interrupt bit or - * disconnect detected interrupt bit in the core interrupt - * register (gintsts.wkupint or gintsts.disconnint, - * respectively). - * * 1'b0: port not in suspend mode - * * 1'b1: port in suspend mode - * @prtres: port resume (prtres) - * the application sets this bit to drive resume signaling on - * the port. the core continues to drive the resume signal - * until the application clears this bit. - * if the core detects a usb remote wakeup sequence, as - * indicated by the port resume/remote wakeup detected - * interrupt bit of the core interrupt register - * (gintsts.wkupint), the core starts driving resume - * signaling without application intervention and clears this bit - * when it detects a disconnect condition. the read value of - * this bit indicates whether the core is currently driving - * resume signaling. - * * 1'b0: no resume driven - * * 1'b1: resume driven - * @prtovrcurrchng: port overcurrent change (prtovrcurrchng) - * the core sets this bit when the status of the port - * overcurrent active bit (bit 4) in this register changes. - * @prtovrcurract: port overcurrent active (prtovrcurract) - * indicates the overcurrent condition of the port. - * * 1'b0: no overcurrent condition - * * 1'b1: overcurrent condition - * @prtenchng: port enable/disable change (prtenchng) - * the core sets this bit when the status of the port enable bit - * [2] of this register changes. - * @prtena: port enable (prtena) - * a port is enabled only by the core after a reset sequence, - * and is disabled by an overcurrent condition, a disconnect - * condition, or by the application clearing this bit. the - * application cannot set this bit by a register write. it can only - * clear it to disable the port. this bit does not trigger any - * interrupt to the application. - * * 1'b0: port disabled - * * 1'b1: port enabled - * @prtconndet: port connect detected (prtconndet) - * the core sets this bit when a device connection is detected - * to trigger an interrupt to the application using the host port - * interrupt bit of the core interrupt register (gintsts.prtint). - * the application must write a 1 to this bit to clear the - * interrupt. - * @prtconnsts: port connect status (prtconnsts) - * * 0: no device is attached to the port. - * * 1: a device is attached to the port. - */ - struct cvmx_usbcx_hprt_s { - __bitfield_field(u32 reserved_19_31 : 13, - __bitfield_field(u32 prtspd : 2, - __bitfield_field(u32 prttstctl : 4, - __bitfield_field(u32 prtpwr : 1, - __bitfield_field(u32 prtlnsts : 2, - __bitfield_field(u32 reserved_9_9 : 1, - __bitfield_field(u32 prtrst : 1, - __bitfield_field(u32 prtsusp : 1, - __bitfield_field(u32 prtres : 1, - __bitfield_field(u32 prtovrcurrchng : 1, - __bitfield_field(u32 prtovrcurract : 1, - __bitfield_field(u32 prtenchng : 1, - __bitfield_field(u32 prtena : 1, - __bitfield_field(u32 prtconndet : 1, - __bitfield_field(u32 prtconnsts : 1, - ;))))))))))))))) - } s; -}; - -/** - * cvmx_usbc#_hptxfsiz - * - * host periodic transmit fifo size register (hptxfsiz) - * - * this register holds the size and the memory start address of the periodic - * txfifo, as shown in figures 310 and 311. - */ -union cvmx_usbcx_hptxfsiz { - u32 u32; - /** - * struct cvmx_usbcx_hptxfsiz_s - * @ptxfsize: host periodic txfifo depth (ptxfsize) - * this value is in terms of 32-bit words. - * * minimum value is 16 - * * maximum value is 32768 - * @ptxfstaddr: host periodic txfifo start address (ptxfstaddr) - */ - struct cvmx_usbcx_hptxfsiz_s { - __bitfield_field(u32 ptxfsize : 16, - __bitfield_field(u32 ptxfstaddr : 16, - ;)) - } s; -}; - -/** - * cvmx_usbc#_hptxsts - * - * host periodic transmit fifo/queue status register (hptxsts) - * - * this read-only register contains the free space information for the periodic - * txfifo and the periodic transmit request queue - */ -union cvmx_usbcx_hptxsts { - u32 u32; - /** - * struct cvmx_usbcx_hptxsts_s - * @ptxqtop: top of the periodic transmit request queue (ptxqtop) - * this indicates the entry in the periodic tx request queue that - * is currently being processes by the mac. - * this register is used for debugging. - * * bit [31]: odd/even (micro)frame - * - 1'b0: send in even (micro)frame - * - 1'b1: send in odd (micro)frame - * * bits [30:27]: channel/endpoint number - * * bits [26:25]: type - * - 2'b00: in/out - * - 2'b01: zero-length packet - * - 2'b10: csplit - * - 2'b11: disable channel command - * * bit [24]: terminate (last entry for the selected - * channel/endpoint) - * @ptxqspcavail: periodic transmit request queue space available - * (ptxqspcavail) - * indicates the number of free locations available to be written - * in the periodic transmit request queue. this queue holds both - * in and out requests. - * * 8'h0: periodic transmit request queue is full - * * 8'h1: 1 location available - * * 8'h2: 2 locations available - * * n: n locations available (0..8) - * * others: reserved - * @ptxfspcavail: periodic transmit data fifo space available - * (ptxfspcavail) - * indicates the number of free locations available to be written - * to in the periodic txfifo. - * values are in terms of 32-bit words - * * 16'h0: periodic txfifo is full - * * 16'h1: 1 word available - * * 16'h2: 2 words available - * * 16'hn: n words available (where 0..32768) - * * 16'h8000: 32768 words available - * * others: reserved - */ - struct cvmx_usbcx_hptxsts_s { - __bitfield_field(u32 ptxqtop : 8, - __bitfield_field(u32 ptxqspcavail : 8, - __bitfield_field(u32 ptxfspcavail : 16, - ;))) - } s; -}; - -/** - * cvmx_usbn#_clk_ctl - * - * usbn_clk_ctl = usbn's clock control - * - * this register is used to control the frequency of the hclk and the - * hreset and phy_rst signals. - */ -union cvmx_usbnx_clk_ctl { - u64 u64; - /** - * struct cvmx_usbnx_clk_ctl_s - * @divide2: the 'hclk' used by the usb subsystem is derived - * from the eclk. - * also see the field divide. divide2<1> must currently - * be zero because it is not implemented, so the maximum - * ratio of eclk/hclk is currently 16. - * the actual divide number for hclk is: - * (divide2 + 1) * (divide + 1) - * @hclk_rst: when this field is '0' the hclk-divider used to - * generate the hclk in the usb subsystem is held - * in reset. this bit must be set to '0' before - * changing the value os divide in this register. - * the reset to the hclk_divideris also asserted - * when core reset is asserted. - * @p_x_on: force usb-phy on during suspend. - * '1' usb-phy xo block is powered-down during - * suspend. - * '0' usb-phy xo block is powered-up during - * suspend. - * the value of this field must be set while por is - * active. - * @p_rtype: phy reference clock type - * on cn50xx/cn52xx/cn56xx the values are: - * '0' the usb-phy uses a 12mhz crystal as a clock source - * at the usb_xo and usb_xi pins. - * '1' reserved. - * '2' the usb_phy uses 12/24/48mhz 2.5v board clock at the - * usb_xo pin. usb_xi should be tied to ground in this - * case. - * '3' reserved. - * on cn3xxx bits 14 and 15 are p_xenbn and p_rclk and values are: - * '0' reserved. - * '1' reserved. - * '2' the phy pll uses the xo block output as a reference. - * the xo block uses an external clock supplied on the - * xo pin. usb_xi should be tied to ground for this - * usage. - * '3' the xo block uses the clock from a crystal. - * @p_com_on: '0' force usb-phy xo bias, bandgap and pll to - * remain powered in suspend mode. - * '1' the usb-phy xo bias, bandgap and pll are - * powered down in suspend mode. - * the value of this field must be set while por is - * active. - * @p_c_sel: phy clock speed select. - * selects the reference clock / crystal frequency. - * '11': reserved - * '10': 48 mhz (reserved when a crystal is used) - * '01': 24 mhz (reserved when a crystal is used) - * '00': 12 mhz - * the value of this field must be set while por is - * active. - * note: if a crystal is used as a reference clock, - * this field must be set to 12 mhz. - * @cdiv_byp: used to enable the bypass input to the usb_clk_div. - * @sd_mode: scaledown mode for the usbc. control timing events - * in the usbc, for normal operation this must be '0'. - * @s_bist: starts bist on the hclk memories, during the '0' - * to '1' transition. - * @por: power on reset for the phy. - * resets all the phys registers and state machines. - * @enable: when '1' allows the generation of the hclk. when - * '0' the hclk will not be generated. see divide - * field of this register. - * @prst: when this field is '0' the reset associated with - * the phy_clk functionality in the usb subsystem is - * help in reset. this bit should not be set to '1' - * until the time it takes 6 clocks (hclk or phy_clk, - * whichever is slower) has passed. under normal - * operation once this bit is set to '1' it should not - * be set to '0'. - * @hrst: when this field is '0' the reset associated with - * the hclk functioanlity in the usb subsystem is - * held in reset.this bit should not be set to '1' - * until 12ms after phy_clk is stable. under normal - * operation, once this bit is set to '1' it should - * not be set to '0'. - * @divide: the frequency of 'hclk' used by the usb subsystem - * is the eclk frequency divided by the value of - * (divide2 + 1) * (divide + 1), also see the field - * divide2 of this register. - * the hclk frequency should be less than 125mhz. - * after writing a value to this field the sw should - * read the field for the value written. - * the enable field of this register should not be set - * until after this field is set and then read. - */ - struct cvmx_usbnx_clk_ctl_s { - __bitfield_field(u64 reserved_20_63 : 44, - __bitfield_field(u64 divide2 : 2, - __bitfield_field(u64 hclk_rst : 1, - __bitfield_field(u64 p_x_on : 1, - __bitfield_field(u64 p_rtype : 2, - __bitfield_field(u64 p_com_on : 1, - __bitfield_field(u64 p_c_sel : 2, - __bitfield_field(u64 cdiv_byp : 1, - __bitfield_field(u64 sd_mode : 2, - __bitfield_field(u64 s_bist : 1, - __bitfield_field(u64 por : 1, - __bitfield_field(u64 enable : 1, - __bitfield_field(u64 prst : 1, - __bitfield_field(u64 hrst : 1, - __bitfield_field(u64 divide : 3, - ;))))))))))))))) - } s; -}; - -/** - * cvmx_usbn#_usbp_ctl_status - * - * usbn_usbp_ctl_status = usbp control and status register - * - * contains general control and status information for the usbn block. - */ -union cvmx_usbnx_usbp_ctl_status { - u64 u64; - /** - * struct cvmx_usbnx_usbp_ctl_status_s - * @txrisetune: hs transmitter rise/fall time adjustment - * @txvreftune: hs dc voltage level adjustment - * @txfslstune: fs/ls source impedance adjustment - * @txhsxvtune: transmitter high-speed crossover adjustment - * @sqrxtune: squelch threshold adjustment - * @compdistune: disconnect threshold adjustment - * @otgtune: vbus valid threshold adjustment - * @otgdisable: otg block disable - * @portreset: per_port reset - * @drvvbus: drive vbus - * @lsbist: low-speed bist enable. - * @fsbist: full-speed bist enable. - * @hsbist: high-speed bist enable. - * @bist_done: phy bist done. - * asserted at the end of the phy bist sequence. - * @bist_err: phy bist error. - * indicates an internal error was detected during - * the bist sequence. - * @tdata_out: phy test data out. - * presents either internally generated signals or - * test register contents, based upon the value of - * test_data_out_sel. - * @siddq: drives the usbp (usb-phy) siddq input. - * normally should be set to zero. - * when customers have no intent to use usb phy - * interface, they should: - * - still provide 3.3v to usb_vdd33, and - * - tie usb_rext to 3.3v supply, and - * - set usbn*_usbp_ctl_status[siddq]=1 - * @txpreemphasistune: hs transmitter pre-emphasis enable - * @dma_bmode: when set to 1 the l2c dma address will be updated - * with byte-counts between packets. when set to 0 - * the l2c dma address is incremented to the next - * 4-byte aligned address after adding byte-count. - * @usbc_end: bigendian input to the usb core. this should be - * set to '0' for operation. - * @usbp_bist: phy, this is cleared '0' to run bist on the usbp. - * @tclk: phy test clock, used to load tdata_in to the usbp. - * @dp_pulld: phy dp_pulldown input to the usb-phy. - * this signal enables the pull-down resistance on - * the d+ line. '1' pull down-resistance is connected - * to d+/ '0' pull down resistance is not connected - * to d+. when an a/b device is acting as a host - * (downstream-facing port), dp_pulldown and - * dm_pulldown are enabled. this must not toggle - * during normal operation. - * @dm_pulld: phy dm_pulldown input to the usb-phy. - * this signal enables the pull-down resistance on - * the d- line. '1' pull down-resistance is connected - * to d-. '0' pull down resistance is not connected - * to d-. when an a/b device is acting as a host - * (downstream-facing port), dp_pulldown and - * dm_pulldown are enabled. this must not toggle - * during normal operation. - * @hst_mode: when '0' the usb is acting as host, when '1' - * usb is acting as device. this field needs to be - * set while the usb is in reset. - * @tuning: transmitter tuning for high-speed operation. - * tunes the current supply and rise/fall output - * times for high-speed operation. - * [20:19] == 11: current supply increased - * approximately 9% - * [20:19] == 10: current supply increased - * approximately 4.5% - * [20:19] == 01: design default. - * [20:19] == 00: current supply decreased - * approximately 4.5% - * [22:21] == 11: rise and fall times are increased. - * [22:21] == 10: design default. - * [22:21] == 01: rise and fall times are decreased. - * [22:21] == 00: rise and fall times are decreased - * further as compared to the 01 setting. - * @tx_bs_enh: transmit bit stuffing on [15:8]. - * enables or disables bit stuffing on data[15:8] - * when bit-stuffing is enabled. - * @tx_bs_en: transmit bit stuffing on [7:0]. - * enables or disables bit stuffing on data[7:0] - * when bit-stuffing is enabled. - * @loop_enb: phy loopback test enable. - * '1': during data transmission the receive is - * enabled. - * '0': during data transmission the receive is - * disabled. - * must be '0' for normal operation. - * @vtest_enb: analog test pin enable. - * '1' the phy's analog_test pin is enabled for the - * input and output of applicable analog test signals. - * '0' the analog_test pin is disabled. - * @bist_enb: built-in self test enable. - * used to activate bist in the phy. - * @tdata_sel: test data out select. - * '1' test_data_out[3:0] (phy) register contents - * are output. '0' internally generated signals are - * output. - * @taddr_in: mode address for test interface. - * specifies the register address for writing to or - * reading from the phy test interface register. - * @tdata_in: internal testing register input data and select - * this is a test bus. data is present on [3:0], - * and its corresponding select (enable) is present - * on bits [7:4]. - * @ate_reset: reset input from automatic test equipment. - * this is a test signal. when the usb core is - * powered up (not in susned mode), an automatic - * tester can use this to disable phy_clock and - * free_clk, then re-enable them with an aligned - * phase. - * '1': the phy_clk and free_clk outputs are - * disabled. "0": the phy_clock and free_clk outputs - * are available within a specific period after the - * de-assertion. - */ - struct cvmx_usbnx_usbp_ctl_status_s { - __bitfield_field(u64 txrisetune : 1, - __bitfield_field(u64 txvreftune : 4, - __bitfield_field(u64 txfslstune : 4, - __bitfield_field(u64 txhsxvtune : 2, - __bitfield_field(u64 sqrxtune : 3, - __bitfield_field(u64 compdistune : 3, - __bitfield_field(u64 otgtune : 3, - __bitfield_field(u64 otgdisable : 1, - __bitfield_field(u64 portreset : 1, - __bitfield_field(u64 drvvbus : 1, - __bitfield_field(u64 lsbist : 1, - __bitfield_field(u64 fsbist : 1, - __bitfield_field(u64 hsbist : 1, - __bitfield_field(u64 bist_done : 1, - __bitfield_field(u64 bist_err : 1, - __bitfield_field(u64 tdata_out : 4, - __bitfield_field(u64 siddq : 1, - __bitfield_field(u64 txpreemphasistune : 1, - __bitfield_field(u64 dma_bmode : 1, - __bitfield_field(u64 usbc_end : 1, - __bitfield_field(u64 usbp_bist : 1, - __bitfield_field(u64 tclk : 1, - __bitfield_field(u64 dp_pulld : 1, - __bitfield_field(u64 dm_pulld : 1, - __bitfield_field(u64 hst_mode : 1, - __bitfield_field(u64 tuning : 4, - __bitfield_field(u64 tx_bs_enh : 1, - __bitfield_field(u64 tx_bs_en : 1, - __bitfield_field(u64 loop_enb : 1, - __bitfield_field(u64 vtest_enb : 1, - __bitfield_field(u64 bist_enb : 1, - __bitfield_field(u64 tdata_sel : 1, - __bitfield_field(u64 taddr_in : 4, - __bitfield_field(u64 tdata_in : 8, - __bitfield_field(u64 ate_reset : 1, - ;))))))))))))))))))))))))))))))))))) - } s; -}; - -#endif /* __octeon_hcd_h__ */
|
Drivers in the Staging area
|
95ace52e4036482da1895b6e19f15141802cc3dd
|
greg kroah hartman
|
drivers
|
staging
|
octeon-usb
|
staging: remove isdn capi drivers
|
as described in drivers/staging/isdn/todo, the drivers are all assumed to be unmaintained and unused now, with gigaset being the last one to stop being maintained after paul bolle lost access to an isdn network.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
remove isdn capi drivers
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
[]
|
['c', 'kconfig', 'maintainers', 'rst', 'makefile', 'h', 'todo']
| 54
| 1
| 23,764
|
--- diff --git a/documentation/isdn/avmb1.rst b/documentation/isdn/avmb1.rst --- a/documentation/isdn/avmb1.rst +++ /dev/null -================================ -driver for active avm controller -================================ - -the driver provides a kernel capi2.0 interface (kernelcapi) and -on top of this a user-level-capi2.0-interface (capi) -and a driver to connect isdn4linux with capi2.0 (capidrv). -the lowlevel interface can be used to implement a capi2.0 -also for passive cards since july 1999. - -the author can be reached at calle@calle.in-berlin.de. -the command avmcapictrl is part of the isdn4k-utils. -t4-files can be found at ftp://ftp.avm.de/cardware/b1/linux/firmware - -currently supported cards: - - - b1 isa (all versions) - - b1 pci - - t1/t1b (hema card) - - m1 - - m2 - - b1 pcmcia - -installing ----------- - -you need at least /dev/capi20 to load the firmware. - -:: - - mknod /dev/capi20 c 68 0 - mknod /dev/capi20.00 c 68 1 - mknod /dev/capi20.01 c 68 2 - . - . - . - mknod /dev/capi20.19 c 68 20 - -running -------- - -to use the card you need the t4-files to download the firmware. -avm gmbh provides several t4-files for the different d-channel -protocols (b1.t4 for euro-isdn). install these file in /lib/isdn. - -if you configure as modules load the modules this way:: - - insmod /lib/modules/current/misc/capiutil.o - insmod /lib/modules/current/misc/b1.o - insmod /lib/modules/current/misc/kernelcapi.o - insmod /lib/modules/current/misc/capidrv.o - insmod /lib/modules/current/misc/capi.o - -if you have an b1-pci card load the module b1pci.o:: - - insmod /lib/modules/current/misc/b1pci.o - -and load the firmware with:: - - avmcapictrl load /lib/isdn/b1.t4 1 - -if you have an b1-isa card load the module b1isa.o -and add the card by calling:: - - avmcapictrl add 0x150 15 - -and load the firmware by calling:: - - avmcapictrl load /lib/isdn/b1.t4 1 - -if you have an t1-isa card load the module t1isa.o -and add the card by calling:: - - avmcapictrl add 0x450 15 t1 0 - -and load the firmware by calling:: - - avmcapictrl load /lib/isdn/t1.t4 1 - -if you have an pcmcia card (b1/m1/m2) load the module b1pcmcia.o -before you insert the card. - -leased lines with b1 --------------------- - -init card and load firmware. - -for an d64s use "fv: 1" as phone number - -for an d64s2 use "fv: 1" and "fv: 2" for multilink -or "fv: 1,2" to use capi channel bundling. - -/proc-interface ------------------ - -/proc/capi:: - - dr-xr-xr-x 2 root root 0 jul 1 14:03 . - dr-xr-xr-x 82 root root 0 jun 30 19:08 .. - -r--r--r-- 1 root root 0 jul 1 14:03 applications - -r--r--r-- 1 root root 0 jul 1 14:03 applstats - -r--r--r-- 1 root root 0 jul 1 14:03 capi20 - -r--r--r-- 1 root root 0 jul 1 14:03 capidrv - -r--r--r-- 1 root root 0 jul 1 14:03 controller - -r--r--r-- 1 root root 0 jul 1 14:03 contrstats - -r--r--r-- 1 root root 0 jul 1 14:03 driver - -r--r--r-- 1 root root 0 jul 1 14:03 ncci - -r--r--r-- 1 root root 0 jul 1 14:03 users - -/proc/capi/applications: - applid level3cnt datablkcnt datablklen ncci-cnt recvqueuelen - level3cnt: - capi_register parameter - datablkcnt: - capi_register parameter - ncci-cnt: - current number of nccis (connections) - recvqueuelen: - number of messages on receive queue - - for example:: - - 1 -2 16 2048 1 0 - 2 2 7 2048 1 0 - -/proc/capi/applstats: - applid recvctlmsg nrecvdatamsg nsentctlmsg nsentdatamsg - recvctlmsg: - capi messages received without data_b3_ind - recvdatamsg: - capi data_b3_ind received - sentctlmsg: - capi messages sent without data_b3_req - sentdatamsg: - capi data_b3_req sent - - for example:: - - 1 2057 1699 1721 1699 - -/proc/capi/capi20: statistics of capi.o (/dev/capi20) - minor nopen nrecvdropmsg nrecvctlmsg nrecvdatamsg sentctlmsg sentdatamsg - minor: - minor device number of capi device - nopen: - number of calls to devices open - nrecvdropmsg: - capi messages dropped (messages in recvqueue in close) - nrecvctlmsg: - capi messages received without data_b3_ind - nrecvdatamsg: - capi data_b3_ind received - nsentctlmsg: - capi messages sent without data_b3_req - nsentdatamsg: - capi data_b3_req sent - - for example:: - - 1 2 18 0 16 2 - -/proc/capi/capidrv: statistics of capidrv.o (capi messages) - nrecvctlmsg nrecvdatamsg sentctlmsg sentdatamsg - nrecvctlmsg: - capi messages received without data_b3_ind - nrecvdatamsg: - capi data_b3_ind received - nsentctlmsg: - capi messages sent without data_b3_req - nsentdatamsg: - capi data_b3_req sent - - for example: - 2780 2226 2256 2226 - -/proc/capi/controller: - controller drivername state cardname controllerinfo - - for example:: - - 1 b1pci running b1pci-e000 b1 3.07-01 0xe000 19 - 2 t1isa running t1isa-450 b1 3.07-01 0x450 11 0 - 3 b1pcmcia running m2-150 b1 3.07-01 0x150 5 - -/proc/capi/contrstats: - controller nrecvctlmsg nrecvdatamsg sentctlmsg sentdatamsg - nrecvctlmsg: - capi messages received without data_b3_ind - nrecvdatamsg: - capi data_b3_ind received - nsentctlmsg: - capi messages sent without data_b3_req - nsentdatamsg: - capi data_b3_req sent - - for example:: - - 1 2845 2272 2310 2274 - 2 2 0 2 0 - 3 2 0 2 0 - -/proc/capi/driver: - drivername ncontroller - - for example:: - - b1pci 1 - t1isa 1 - b1pcmcia 1 - b1isa 0 - -/proc/capi/ncci: - apllid ncci winsize sendwindow - - for example:: - - 1 0x10101 8 0 - -/proc/capi/users: kernelmodules that use the kernelcapi. - name - - for example:: - - capidrv - capi20 - -questions ---------- - -check out the faq (ftp.isdn4linux.de) or subscribe to the -linux-avmb1@calle.in-berlin.de mailing list by sending -a mail to majordomo@calle.in-berlin.de with -subscribe linux-avmb1 -in the body. - -german documentation and several scripts can be found at -ftp://ftp.avm.de/cardware/b1/linux/ - -bugs ----- - -if you find any please let me know. - -enjoy, - -carsten paeth (calle@calle.in-berlin.de) diff --git a/documentation/isdn/gigaset.rst b/documentation/isdn/gigaset.rst --- a/documentation/isdn/gigaset.rst +++ /dev/null -========================== -gigaset 307x device driver -========================== - -1. requirements -================= - -1.1. hardware -------------- - - this driver supports the connection of the gigaset 307x/417x family of - isdn dect bases via gigaset m101 data, gigaset m105 data or direct usb - connection. the following devices are reported to be compatible: - - bases: - - siemens gigaset 3070/3075 isdn - - siemens gigaset 4170/4175 isdn - - siemens gigaset sx205/255 - - siemens gigaset sx353 - - t-com sinus 45 [ab] isdn - - t-com sinus 721x[a] [se] - - vox chicago 390 isdn (kpn telecom) - - rs232 data boxes: - - siemens gigaset m101 data - - t-com sinus 45 data 1 - - usb data boxes: - - siemens gigaset m105 data - - siemens gigaset usb adapter dect - - t-com sinus 45 data 2 - - t-com sinus 721 data - - chicago 390 usb (kpn) - - see also http://www.erbze.info/sinus_gigaset.htm - (archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm ) and - http://gigaset307x.sourceforge.net/ - - we had also reports from users of gigaset m105 who could use the drivers - with sx 100 and cx 100 isdn bases (only in unimodem mode, see section 2.5.) - if you have another device that works with our driver, please let us know. - - chances of getting an usb device to work are good if the output of:: - - lsusb - - at the command line contains one of the following:: - - id 0681:0001 - id 0681:0002 - id 0681:0009 - id 0681:0021 - id 0681:0022 - -1.2. software -------------- - - the driver works with the kernel capi subsystem and can be used with any - software which is able to use capi 2.0 for isdn connections (voice or data). - - there are some user space tools available at - https://sourceforge.net/projects/gigaset307x/ - which provide access to additional device specific functions like sms, - phonebook or call journal. - - -2. how to use the driver -========================== - -2.1. modules ------------- - - for the devices to work, the proper kernel modules have to be loaded. - this normally happens automatically when the system detects the usb - device (base, m105) or when the line discipline is attached (m101). it - can also be triggered manually using the modprobe(8) command, for example - for troubleshooting or to pass module parameters. - - the module ser_gigaset provides a serial line discipline n_gigaset_m101 - which uses the regular serial port driver to access the device, and must - therefore be attached to the serial device to which the m101 is connected. - the ldattach(8) command (included in util-linux-ng release 2.14 or later) - can be used for that purpose, for example:: - - ldattach gigaset_m101 /dev/ttys1 - - this will open the device file, attach the line discipline to it, and - then sleep in the background, keeping the device open so that the line - discipline remains active. to deactivate it, kill the daemon, for example - with:: - - killall ldattach - - before disconnecting the device. to have this happen automatically at - system startup/shutdown on an lsb compatible system, create and activate - an appropriate lsb startup script /etc/init.d/gigaset. (the init name - 'gigaset' is officially assigned to this project by lanana.) - alternatively, just add the 'ldattach' command line to /etc/rc.local. - - the modules accept the following parameters: - - =============== ========== ========================================== - module parameter meaning - - gigaset debug debug level (see section 3.2.) - - startmode initial operation mode (see section 2.5.): - bas_gigaset ) 1=capi (default), 0=unimodem - ser_gigaset ) - usb_gigaset ) cidmode initial call-id mode setting (see section - 2.5.): 1=on (default), 0=off - - =============== ========== ========================================== - - depending on your distribution you may want to create a separate module - configuration file like /etc/modprobe.d/gigaset.conf for these. - -2.2. device nodes for user space programs ------------------------------------------ - - the device can be accessed from user space (eg. by the user space tools - mentioned in 1.2.) through the device nodes: - - - /dev/ttygs0 for m101 (rs232 data boxes) - - /dev/ttygu0 for m105 (usb data boxes) - - /dev/ttygb0 for the base driver (direct usb connection) - - if you connect more than one device of a type, they will get consecutive - device nodes, eg. /dev/ttygu1 for a second m105. - - you can also set a "default device" for the user space tools to use when - no device node is given as parameter, by creating a symlink /dev/ttyg to - one of them, eg.:: - - ln -s /dev/ttygb0 /dev/ttyg - - the devices accept the following device specific ioctl calls - (defined in gigaset_dev.h): - - ''ioctl(int fd, gigaset_redir, int *cmd);'' - - if cmd==1, the device is set to be controlled exclusively through the - character device node; access from the isdn subsystem is blocked. - - if cmd==0, the device is set to be used from the isdn subsystem and does - not communicate through the character device node. - - ''ioctl(int fd, gigaset_config, int *cmd);'' - - (ser_gigaset and usb_gigaset only) - - if cmd==1, the device is set to adapter configuration mode where commands - are interpreted by the m10x dect adapter itself instead of being - forwarded to the base station. in this mode, the device accepts the - commands described in siemens document "at-kommando alignment m10x data" - for setting the operation mode, associating with a base station and - querying parameters like field strengh and signal quality. - - note that there is no ioctl command for leaving adapter configuration - mode and returning to regular operation. in order to leave adapter - configuration mode, write the command ato to the device. - - ''ioctl(int fd, gigaset_brkchars, unsigned char brkchars[6]);'' - - (usb_gigaset only) - - set the break characters on an m105's internal serial adapter to the six - bytes stored in brkchars[]. unused bytes should be set to zero. - - ioctl(int fd, gigaset_version, unsigned version[4]); - retrieve version information from the driver. version[0] must be set to - one of: - - - gigver_driver: retrieve driver version - - gigver_compat: retrieve interface compatibility version - - gigver_fwbase: retrieve the firmware version of the base - - upon return, version[] is filled with the requested version information. - -2.3. capi ---------- - - the devices will show up as capi controllers as soon as the - corresponding driver module is loaded, and can then be used with - capi 2.0 kernel and user space applications. for user space access, - the module capi.ko must be loaded. - - most distributions handle loading and unloading of the various capi - modules automatically via the command capiinit(1) from the capi4k-utils - package or a similar mechanism. note that capiinit(1) cannot unload the - gigaset drivers because it doesn't support more than one module per - driver. - -2.5. unimodem mode ------------------- - - in this mode the device works like a modem connected to a serial port - (the /dev/ttygu0, ... mentioned above) which understands the commands:: - - atz init, reset - => ok or error - atd - atdt dial - => ok, connect, - busy, - no dial tone, - no carrier, - no answer - <pause>+++<pause> change to command mode when connected - ath hangup - - you can use some configuration tool of your distribution to configure this - "modem" or configure pppd/wvdial manually. there are some example ppp - configuration files and chat scripts in the gigaset-version/ppp directory - in the driver packages from https://sourceforge.net/projects/gigaset307x/. - please note that the usb drivers are not able to change the state of the - control lines. this means you must use "stupid mode" if you are using - wvdial or you should use the nocrtscts option of pppd. - you must also assure that the ppp_async module is loaded with the parameter - flag_time=0. you can do this e.g. by adding a line like:: - - options ppp_async flag_time=0 - - to an appropriate module configuration file, like:: - - /etc/modprobe.d/gigaset.conf. - - unimodem mode is needed for making some devices [e.g. sx100] work which - do not support the regular gigaset command set. if debug output (see - section 3.2.) shows something like this when dialing:: - - cmd received: error - available params: 0 - connection state: 0, response: -1 - gigaset_process_response: resp_code -1 in constate 0 ! - timeout occurred - - then switching to unimodem mode may help. - - if you have installed the command line tool gigacontr, you can enter - unimodem mode using:: - - gigacontr --mode unimodem - - you can switch back using:: - - gigacontr --mode isdn - - you can also put the driver directly into unimodem mode when it's loaded, - by passing the module parameter startmode=0 to the hardware specific - module, e.g.:: - - modprobe usb_gigaset startmode=0 - - or by adding a line like:: - - options usb_gigaset startmode=0 - - to an appropriate module configuration file, like:: - - /etc/modprobe.d/gigaset.conf - -2.6. call-id (cid) mode ------------------------ - - call-ids are numbers used to tag commands to, and responses from, the - gigaset base in order to support the simultaneous handling of multiple - isdn calls. their use can be enabled ("cid mode") or disabled ("unimodem - mode"). without call-ids (in unimodem mode), only a very limited set of - functions is available. it allows outgoing data connections only, but - does not signal incoming calls or other base events. - - dect cordless data devices (m10x) permanently occupy the cordless - connection to the base while call-ids are activated. as the gigaset - bases only support one dect data connection at a time, this prevents - other dect cordless data devices from accessing the base. - - during active operation, the driver switches to the necessary mode - automatically. however, for the reasons above, the mode chosen when - the device is not in use (idle) can be selected by the user. - - - if you want to receive incoming calls, you can use the default - settings (cid mode). - - if you have several dect data devices (m10x) which you want to use - in turn, select unimodem mode by passing the parameter "cidmode=0" to - the appropriate driver module (ser_gigaset or usb_gigaset). - - if you want both of these at once, you are out of luck. - - you can also use the tty class parameter "cidmode" of the device to - change its cid mode while the driver is loaded, eg.:: - - echo 0 > /sys/class/tty/ttygu0/cidmode - -2.7. dialing numbers --------------------- -provided by an application for dialing out must - be a public network number according to the local dialing plan, without - any dial prefix for getting an outside line. - - internal calls can be made by providing an internal extension number - prefixed with ''**'' (two asterisks) as the called party number. so to dial - eg. the first registered dect handset, give ''**11'' as the called party - number. dialing ''***'' (three asterisks) calls all extensions - simultaneously (global call). - - unimodem mode does not support internal calls. - -2.8. unregistered wireless devices (m101/m105) ----------------------------------------------- - - the main purpose of the ser_gigaset and usb_gigaset drivers is to allow - the m101 and m105 wireless devices to be used as isdn devices for isdn - connections through a gigaset base. therefore they assume that the device - is registered to a dect base. - - if the m101/m105 device is not registered to a base, initialization of - the device fails, and a corresponding error message is logged by the - driver. in that situation, a restricted set of functions is available - which includes, in particular, those necessary for registering the device - to a base or for switching it between fixed part and portable part - modes. see the gigacontr(8) manpage for details. - -3. troubleshooting -==================== - -3.1. solutions to frequently reported problems ----------------------------------------------- - - problem: - you have a slow provider and isdn4linux gives up dialing too early. - solution: - load the isdn module using the dialtimeout option. you can do this e.g. - by adding a line like:: - - options isdn dialtimeout=15 - - to /etc/modprobe.d/gigaset.conf or a similar file. - - problem: - the isdnlog program emits error messages or just doesn't work. - solution: - isdnlog supports only the hisax driver. do not attempt to use it with - other drivers such as gigaset. - - problem: - you have two or more dect data adapters (m101/m105) and only the - first one you turn on works. - solution: - select unimodem mode for all dect data adapters. (see section 2.5.) - - problem: - messages like this:: - - usb_gigaset 3-2:1.0: could not initialize the device. - - appear in your syslog. - solution: - check whether your m10x wireless device is correctly registered to the - gigaset base. (see section 2.7.) - -3.2. telling the driver to provide more information ---------------------------------------------------- - building the driver with the "gigaset debugging" kernel configuration - option (config_gigaset_debug) gives it the ability to produce additional - information useful for debugging. - - you can control the amount of debugging information the driver produces by - writing an appropriate value to /sys/module/gigaset/parameters/debug, - e.g.:: - - echo 0 > /sys/module/gigaset/parameters/debug - - switches off debugging output completely, - - :: - - echo 0x302020 > /sys/module/gigaset/parameters/debug - - enables a reasonable set of debugging output messages. these values are - bit patterns where every bit controls a certain type of debugging output. - see the constants debug_* in the source file gigaset.h for details. - - the initial value can be set using the debug parameter when loading the - module "gigaset", e.g. by adding a line:: - - options gigaset debug=0 - - to your module configuration file, eg. /etc/modprobe.d/gigaset.conf - - generated debugging information can be found - - as output of the command:: - - dmesg - - - in system log files written by your syslog daemon, usually - in /var/log/, e.g. /var/log/messages. - -3.3. reporting problems and bugs --------------------------------- - if you can't solve problems with the driver on your own, feel free to - use one of the forums, bug trackers, or mailing lists on - - https://sourceforge.net/projects/gigaset307x - - or write an electronic mail to the maintainers. - - try to provide as much information as possible, such as - - - distribution - - kernel version (uname -r) - - gcc version (gcc --version) - - hardware architecture (uname -m, ...) - - type and firmware version of your device (base and wireless module, - if any) - - output of "lsusb -v" (if using an usb device) - - error messages - - relevant system log messages (it would help if you activate debug - output as described in 3.2.) - - for help with general configuration problems not specific to our driver, - such as isdn4linux and network configuration issues, please refer to the - appropriate forums and newsgroups. - -3.4. reporting problem solutions --------------------------------- - if you solved a problem with our drivers, wrote startup scripts for your - distribution, ... feel free to contact us (using one of the places - mentioned in 3.3.). we'd like to add scripts, hints, documentation - to the driver and/or the project web page. - - -4. links, other software -========================== - - - sourceforge project developing this driver and associated tools - https://sourceforge.net/projects/gigaset307x - - yahoo! group on the siemens gigaset family of devices - https://de.groups.yahoo.com/group/siemens-gigaset - - siemens gigaset/t-sinus compatibility table - http://www.erbze.info/sinus_gigaset.htm - (archived at https://web.archive.org/web/20100717020421/http://www.erbze.info:80/sinus_gigaset.htm ) - - -5. credits -============ - - thanks to - - karsten keil - for his help with isdn4linux - deti fliegl - for his base driver code - dennis dietrich - for his kernel 2.6 patches - andreas rummel - for his work and logs to get unimodem mode working - andreas degert - for his logs and patches to get cx 100 working - dietrich feist - for his generous donation of one m105 and two m101 cordless adapters - christoph schweers - for his generous donation of a m34 device - - and all the other people who sent logs and other information. diff --git a/documentation/isdn/hysdn.rst b/documentation/isdn/hysdn.rst --- a/documentation/isdn/hysdn.rst +++ /dev/null -============ -hysdn driver -============ - -the hysdn driver has been written by -werner cornelius (werner@isdn4linux.de or werner@titro.de) -for hypercope gmbh aachen germany. hypercope agreed to publish this driver -under the gnu general public license. - -the capi 2.0-support was added by ulrich albrecht (ualbrecht@hypercope.de) -for hypercope gmbh aachen, germany. - - - this program is free software; you can redistribute it and/or modify - it under the terms of the gnu general public license as published by - the free software foundation; either version 2 of the license, or - (at your option) any later version. - - this program is distributed in the hope that it will be useful, - but without any warranty; without even the implied warranty of - merchantability or fitness for a particular purpose. see the - gnu general public license for more details. - - you should have received a copy of the gnu general public license - along with this program; if not, write to the free software - foundation, inc., 675 mass ave, cambridge, ma 02139, usa. - -.. table of contents - - 1. about the driver - - 2. loading/unloading the driver - - 3. entries in the /proc filesystem - - 4. the /proc/net/hysdn/cardconfx file - - 5. the /proc/net/hysdn/cardlogx file - - 6. where to get additional info and help - - -1. about the driver -=================== - - the drivers/isdn/hysdn subdir contains a driver for hypercopes active - pci isdn cards champ, ergo and metro. to enable support for this cards - enable isdn support in the kernel config and support for hysdn cards in - the active cards submenu. the driver may only be compiled and used if - support for loadable modules and the process filesystem have been enabled. - - these cards provide two different interfaces to the kernel. without the - optional capi 2.0 support, they register as ethernet card. ip-routing - to a isdn-destination is performed on the card itself. all necessary - handlers for various protocols like ppp and others as well as config info - and firmware may be fetched from hypercopes www-site www.hypercope.de. - - with capi 2.0 support enabled, the card can also be used as a capi 2.0 - compliant devices with either capi 2.0 applications - (check isdn4k-utils) or -using the capidrv module- as a regular - isdn4linux device. this is done via the same mechanism as with the - active avm cards and in fact uses the same module. - - -2. loading/unloading the driver -=============================== - - the module has no command line parameters and auto detects up to 10 cards - in the id-range 0-9. - if a loaded driver shall be unloaded all open files in the /proc/net/hysdn - subdir need to be closed and all ethernet interfaces allocated by this - driver must be shut down. otherwise the module counter will avoid a module - unload. - - if you are using the capi 2.0-interface, make sure to load/modprobe the - kernelcapi-module first. - - if you plan to use the capidrv-link to isdn4linux, make sure to load - capidrv.o after all modules using this driver (i.e. after hysdn and - any avm-specific modules). - -3. entries in the /proc filesystem -================================== - - when the module has been loaded it adds the directory hysdn in the - /proc/net tree. this directory contains exactly 2 file entries for each - card. one is called cardconfx and the other cardlogx, where x is the - card id number from 0 to 9. - the cards are numbered in the order found in the pci config data. - -4. the /proc/net/hysdn/cardconfx file -===================================== - - this file may be read to get by everyone to get info about the cards type, - actual state, available features and used resources. - the first 3 entries (id, bus and slot) are pci info fields, the following - type field gives the information about the cards type: - - - 4 -> ergo card (server card with 2 b-chans) - - 5 -> metro card (server card with 4 or 8 b-chans) - - 6 -> champ card (client card with 2 b-chans) - - the following 3 fields show the hardware assignments for irq, iobase and the - dual ported memory (dp-mem). - - the fields b-chans and fax-chans announce the available card resources of - this types for the user. - - the state variable indicates the actual drivers state for this card with the - following assignments. - - - 0 -> card has not been booted since driver load - - 1 -> card booting is actually in progess - - 2 -> card is in an error state due to a previous boot failure - - 3 -> card is booted and active - - and the last field (device) shows the name of the ethernet device assigned - to this card. up to the first successful boot this field only shows a - - to tell that no net device has been allocated up to now. once a net device - has been allocated it remains assigned to this card, even if a card is - rebooted and an boot error occurs. - - writing to the cardconfx file boots the card or transfers config lines to - the cards firmware. the type of data is automatically detected when the - first data is written. only root has write access to this file. - the firmware boot files are normally called hyclient.pof for client cards - and hyserver.pof for server cards. - after successfully writing the boot file, complete config files or single - config lines may be copied to this file. - if an error occurs the return value given to the writing process has the - following additional codes (decimal): - - ==== ============================================ - 1000 another process is currently bootng the card - 1001 invalid firmware header - 1002 boards dual-port ram test failed - 1003 internal firmware handler error - 1004 boot image size invalid - 1005 first boot stage (bootstrap loader) failed - 1006 second boot stage failure - 1007 timeout waiting for card ready during boot - 1008 operation only allowed in booted state - 1009 config line too long - 1010 invalid channel number - 1011 timeout sending config data - ==== ============================================ - - additional info about error reasons may be fetched from the log output. - -5. the /proc/net/hysdn/cardlogx file -==================================== - - the cardlogx file entry may be opened multiple for reading by everyone to - get the cards and drivers log data. card messages always start with the - keyword log. all other lines are output from the driver. - the driver log data may be redirected to the syslog by selecting the - appropriate bitmask. the cards log messages will always be send to this - interface but never to the syslog. - - a root user may write a decimal or hex (with 0x) value t this file to select - desired output options. as mentioned above the cards log dat is always - written to the cardlog file independent of the following options only used - to check and debug the driver itself: - - for example:: - - echo "0x34560078" > /proc/net/hysdn/cardlog0 - - to output the hex log mask 34560078 for card 0. - - the written value is regarded as an unsigned 32-bit value, bit ored for - desired output. the following bits are already assigned: - - ========== ============================================================ - 0x80000000 all driver log data is alternatively via syslog - 0x00000001 log memory allocation errors - 0x00000010 firmware load start and close are logged - 0x00000020 log firmware record parser - 0x00000040 log every firmware write actions - 0x00000080 log all card related boot messages - 0x00000100 output all config data sent for debugging purposes - 0x00000200 only non comment config lines are shown wth channel - 0x00000400 additional conf log output - 0x00001000 log the asynchronous scheduler actions (config and log) - 0x00100000 log all open and close actions to /proc/net/hysdn/card files - 0x00200000 log all actions from /proc file entries - 0x00010000 log network interface init and deinit - ========== ============================================================ - -6. where to get additional info and help -======================================== - - if you have any problems concerning the driver or configuration contact - the hypercope support team (support@hypercope.de) and or the authors - werner cornelius (werner@isdn4linux or cornelius@titro.de) or - ulrich albrecht (ualbrecht@hypercope.de). diff --git a/documentation/isdn/index.rst b/documentation/isdn/index.rst --- a/documentation/isdn/index.rst +++ b/documentation/isdn/index.rst - avmb1 - gigaset - hysdn diff --git a/documentation/userspace-api/ioctl/ioctl-number.rst b/documentation/userspace-api/ioctl/ioctl-number.rst --- a/documentation/userspace-api/ioctl/ioctl-number.rst +++ b/documentation/userspace-api/ioctl/ioctl-number.rst -'g' 00-0f linux/gigaset_dev.h conflict! diff --git a/maintainers b/maintainers --- a/maintainers +++ b/maintainers -isdn/capi subsystem +isdn/cmtp over bluetooth -f: drivers/staging/isdn/ diff --git a/drivers/staging/kconfig b/drivers/staging/kconfig --- a/drivers/staging/kconfig +++ b/drivers/staging/kconfig -source "drivers/staging/isdn/kconfig" - diff --git a/drivers/staging/makefile b/drivers/staging/makefile --- a/drivers/staging/makefile +++ b/drivers/staging/makefile -obj-$(config_isdn_capi) += isdn/ diff --git a/drivers/staging/isdn/kconfig b/drivers/staging/isdn/kconfig --- a/drivers/staging/isdn/kconfig +++ /dev/null -# spdx-license-identifier: gpl-2.0-only -menu "isdn capi drivers" - depends on isdn_capi - -source "drivers/staging/isdn/avm/kconfig" - -source "drivers/staging/isdn/gigaset/kconfig" - -source "drivers/staging/isdn/hysdn/kconfig" - -endmenu - diff --git a/drivers/staging/isdn/makefile b/drivers/staging/isdn/makefile --- a/drivers/staging/isdn/makefile +++ /dev/null -# spdx-license-identifier: gpl-2.0 -# makefile for the kernel isdn subsystem and device drivers. - -# object files in subdirectories - -obj-$(config_capi_avm) += avm/ -obj-$(config_hysdn) += hysdn/ -obj-$(config_isdn_drv_gigaset) += gigaset/ diff --git a/drivers/staging/isdn/todo b/drivers/staging/isdn/todo --- a/drivers/staging/isdn/todo +++ /dev/null -todo: remove in late 2019 unless there are users - - -i tried to find any indication of whether the capi drivers are -still in use, and have not found anything from a long time ago. - -with public isdn networks almost completely shut down over the past 12 -months, there is very little you can actually do with this hardware. the -main remaining use case would be to connect isdn voice phones to an -in-house installation with asterisk or lcr, but anyone trying this in -turn seems to be using either the misdn driver stack, or out-of-tree -drivers from the hardware vendors. - -i may of course have missed something, so i would suggest moving -these into drivers/staging/ just in case someone still uses one -of the three remaining in-kernel drivers (avm, hysdn, gigaset). - -if nobody complains, we can remove them entirely in six months, -or otherwise move the core code and any drivers that are still -needed back into drivers/isdn. - - arnd bergmann <arnd@arndb.de> diff --git a/drivers/staging/isdn/avm/kconfig b/drivers/staging/isdn/avm/kconfig --- a/drivers/staging/isdn/avm/kconfig +++ /dev/null -# spdx-license-identifier: gpl-2.0-only -# -# isdn avm drivers -# - -menuconfig capi_avm - bool "active avm cards" - help - enable support for avm active isdn cards. - -if capi_avm - -config isdn_drv_avmb1_b1isa - tristate "avm b1 isa support" - depends on isa - help - enable support for the isa version of the avm b1 card. - -config isdn_drv_avmb1_b1pci - tristate "avm b1 pci support" - depends on pci - help - enable support for the pci version of the avm b1 card. - -config isdn_drv_avmb1_b1pciv4 - bool "avm b1 pci v4 support" - depends on isdn_drv_avmb1_b1pci - help - enable support for the v4 version of avm b1 pci card. - -config isdn_drv_avmb1_t1isa - tristate "avm t1/t1-b isa support" - depends on isa - help - enable support for the avm t1 t1b card. - note: this is a pri card and handle 30 b-channels. - -config isdn_drv_avmb1_b1pcmcia - tristate "avm b1/m1/m2 pcmcia support" - depends on pcmcia - help - enable support for the pcmcia version of the avm b1 card. - -config isdn_drv_avmb1_avm_cs - tristate "avm b1/m1/m2 pcmcia cs module" - depends on isdn_drv_avmb1_b1pcmcia - help - enable the pcmcia client driver for the avm b1/m1/m2 - pcmcia cards. - -config isdn_drv_avmb1_t1pci - tristate "avm t1/t1-b pci support" - depends on pci - help - enable support for the avm t1 t1b card. - note: this is a pri card and handle 30 b-channels. - -config isdn_drv_avmb1_c4 - tristate "avm c4/c2 support" - depends on pci - help - enable support for the avm c4/c2 pci cards. - these cards handle 4/2 bri isdn lines (8/4 channels). - -endif # capi_avm diff --git a/drivers/staging/isdn/avm/makefile b/drivers/staging/isdn/avm/makefile --- a/drivers/staging/isdn/avm/makefile +++ /dev/null -# spdx-license-identifier: gpl-2.0 -# makefile for the avm isdn device drivers - -# each configuration option enables a list of files. - -obj-$(config_isdn_drv_avmb1_b1isa) += b1isa.o b1.o -obj-$(config_isdn_drv_avmb1_b1pci) += b1pci.o b1.o b1dma.o -obj-$(config_isdn_drv_avmb1_b1pcmcia) += b1pcmcia.o b1.o -obj-$(config_isdn_drv_avmb1_avm_cs) += avm_cs.o -obj-$(config_isdn_drv_avmb1_t1isa) += t1isa.o b1.o -obj-$(config_isdn_drv_avmb1_t1pci) += t1pci.o b1.o b1dma.o -obj-$(config_isdn_drv_avmb1_c4) += c4.o b1.o diff --git a/drivers/staging/isdn/avm/avm_cs.c b/drivers/staging/isdn/avm/avm_cs.c --- a/drivers/staging/isdn/avm/avm_cs.c +++ /dev/null -/* $id: avm_cs.c,v 1.4.6.3 2001/09/23 22:24:33 kai exp $ - * - * a pcmcia client driver for avm b1/m1/m2 - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/ptrace.h> -#include <linux/string.h> -#include <linux/tty.h> -#include <linux/serial.h> -#include <linux/major.h> -#include <asm/io.h> - -#include <pcmcia/cistpl.h> -#include <pcmcia/ciscode.h> -#include <pcmcia/ds.h> -#include <pcmcia/cisreg.h> - -#include <linux/skbuff.h> -#include <linux/capi.h> -#include <linux/b1lli.h> -#include <linux/b1pcmcia.h> - -/*====================================================================*/ - -module_description("capi4linux: pcmcia client driver for avm b1/m1/m2"); -module_author("carsten paeth"); -module_license("gpl"); - -/*====================================================================*/ - -static int avmcs_config(struct pcmcia_device *link); -static void avmcs_release(struct pcmcia_device *link); -static void avmcs_detach(struct pcmcia_device *p_dev); - -static int avmcs_probe(struct pcmcia_device *p_dev) -{ - /* general socket configuration */ - p_dev->config_flags |= conf_enable_irq | conf_auto_set_io; - p_dev->config_index = 1; - p_dev->config_regs = present_option; - - return avmcs_config(p_dev); -} /* avmcs_attach */ - - -static void avmcs_detach(struct pcmcia_device *link) -{ - avmcs_release(link); -} /* avmcs_detach */ - -static int avmcs_configcheck(struct pcmcia_device *p_dev, void *priv_data) -{ - p_dev->resource[0]->end = 16; - p_dev->resource[0]->flags &= ~io_data_path_width; - p_dev->resource[0]->flags |= io_data_path_width_8; - - return pcmcia_request_io(p_dev); -} - -static int avmcs_config(struct pcmcia_device *link) -{ - int i = -1; - char devname[128]; - int cardtype; - int (*addcard)(unsigned int port, unsigned irq); - - devname[0] = 0; - if (link->prod_id[1]) - strlcpy(devname, link->prod_id[1], sizeof(devname)); - - /* - * find io port - */ - if (pcmcia_loop_config(link, avmcs_configcheck, null)) - return -enodev; - - do { - if (!link->irq) { - /* undo */ - pcmcia_disable_device(link); - break; - } - - /* - * configure the pcmcia socket - */ - i = pcmcia_enable_device(link); - if (i != 0) { - pcmcia_disable_device(link); - break; - } - - } while (0); - - if (devname[0]) { - char *s = strrchr(devname, ' '); - if (!s) - s = devname; - else s++; - if (strcmp("m1", s) == 0) { - cardtype = avm_cardtype_m1; - } else if (strcmp("m2", s) == 0) { - cardtype = avm_cardtype_m2; - } else { - cardtype = avm_cardtype_b1; - } - } else - cardtype = avm_cardtype_b1; - - /* if any step failed, release any partially configured state */ - if (i != 0) { - avmcs_release(link); - return -enodev; - } - - - switch (cardtype) { - case avm_cardtype_m1: addcard = b1pcmcia_addcard_m1; break; - case avm_cardtype_m2: addcard = b1pcmcia_addcard_m2; break; - default: - case avm_cardtype_b1: addcard = b1pcmcia_addcard_b1; break; - } - if ((i = (*addcard)(link->resource[0]->start, link->irq)) < 0) { - dev_err(&link->dev, - "avm_cs: failed to add avm-controller at i/o %#x, irq %d ", - (unsigned int) link->resource[0]->start, link->irq); - avmcs_release(link); - return -enodev; - } - return 0; - -} /* avmcs_config */ - - -static void avmcs_release(struct pcmcia_device *link) -{ - b1pcmcia_delcard(link->resource[0]->start, link->irq); - pcmcia_disable_device(link); -} /* avmcs_release */ - - -static const struct pcmcia_device_id avmcs_ids[] = { - pcmcia_device_prod_id12("avm", "isdn-controller b1", 0x95d42008, 0x845dc335), - pcmcia_device_prod_id12("avm", "mobile isdn-controller m1", 0x95d42008, 0x81e10430), - pcmcia_device_prod_id12("avm", "mobile isdn-controller m2", 0x95d42008, 0x18e8558a), - pcmcia_device_null -}; -module_device_table(pcmcia, avmcs_ids); - -static struct pcmcia_driver avmcs_driver = { - .owner = this_module, - .name = "avm_cs", - .probe = avmcs_probe, - .remove = avmcs_detach, - .id_table = avmcs_ids, -}; -module_pcmcia_driver(avmcs_driver); diff --git a/drivers/staging/isdn/avm/avmcard.h b/drivers/staging/isdn/avm/avmcard.h --- a/drivers/staging/isdn/avm/avmcard.h +++ /dev/null -/* $id: avmcard.h,v 1.1.4.1.2.1 2001/12/21 15:00:17 kai exp $ - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#ifndef _avmcard_h_ -#define _avmcard_h_ - -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/interrupt.h> - -#define avmb1_portlen 0x1f -#define avm_maxversion 8 -#define avm_ncci_per_channel 4 - -/* - * versions - */ - -#define ver_driver 0 -#define ver_cardtype 1 -#define ver_hwid 2 -#define ver_serial 3 -#define ver_option 4 -#define ver_proto 5 -#define ver_profile 6 -#define ver_capi 7 - -enum avmcardtype { - avm_b1isa, - avm_b1pci, - avm_b1pcmcia, - avm_m1, - avm_m2, - avm_t1isa, - avm_t1pci, - avm_c4, - avm_c2 -}; - -typedef struct avmcard_dmabuf { - long size; - u8 *dmabuf; - dma_addr_t dmaaddr; -} avmcard_dmabuf; - -typedef struct avmcard_dmainfo { - u32 recvlen; - avmcard_dmabuf recvbuf; - - avmcard_dmabuf sendbuf; - struct sk_buff_head send_queue; - - struct pci_dev *pcidev; -} avmcard_dmainfo; - -typedef struct avmctrl_info { - char cardname[32]; - - int versionlen; - char versionbuf[1024]; - char *version[avm_maxversion]; - - char infobuf[128]; /* for function procinfo */ - - struct avmcard *card; - struct capi_ctr capi_ctrl; - - struct list_head ncci_head; -} avmctrl_info; - -typedef struct avmcard { - char name[32]; - - spinlock_t lock; - unsigned int port; - unsigned irq; - unsigned long membase; - enum avmcardtype cardtype; - unsigned char revision; - unsigned char class; - int cardnr; /* for t1isa */ - - char msgbuf[128]; /* capimsg msg part */ - char databuf[2048]; /* capimsg data part */ - - void __iomem *mbase; - volatile u32 csr; - avmcard_dmainfo *dma; - - struct avmctrl_info *ctrlinfo; - - u_int nr_controllers; - u_int nlogcontr; - struct list_head list; -} avmcard; - -extern int b1_irq_table[16]; - -/* - * lli messages to the isdn-controllerisdn controller - */ - -#define send_poll 0x72 /* - * after load <- receive_poll - */ -#define send_init 0x11 /* - * first message <- receive_init - * int32 numapplications int32 - * numnccis int32 boardnumber - */ -#define send_register 0x12 /* - * register an application int32 - * applidid int32 nummessages - * int32 numb3connections int32 - * numb3blocks int32 b3size - * - * anzb3connection != 0 && - * anzb3blocks >= 1 && b3size >= 1 - */ -#define send_release 0x14 /* - * deregister an application int32 - * applid - */ -#define send_message 0x15 /* - * send capi-message int32 length - * capi-data ... - */ -#define send_data_b3_req 0x13 /* - * send capi-data-message int32 - * msglength capi-data ... int32 - * b3length data .... - */ - -#define send_config 0x21 /* - */ - -#define send_pollack 0x73 /* t1 watchdog */ - -/* - * lli messages from the isdn-controllerisdn controller - */ - -#define receive_poll 0x32 /* - * <- after send_poll - */ -#define receive_init 0x27 /* - * <- after send_init int32 length - * byte total length b1struct board - * driver revision b1struct card - * type b1struct reserved b1struct - * serial number b1struct driver - * capability b1struct d-channel - * protocol b1struct capi-2.0 - * profile b1struct capi version - */ -#define receive_message 0x21 /* - * <- after send_message int32 - * appllid int32 length capi-data - * .... - */ -#define receive_data_b3_ind 0x22 /* - * received data int32 appllid - * int32 length capi-data ... - * int32 b3length data ... - */ -#define receive_start 0x23 /* - * handshake - */ -#define receive_stop 0x24 /* - * handshake - */ -#define receive_new_ncci 0x25 /* - * int32 appllid int32 ncci int32 - * windowsize - */ -#define receive_free_ncci 0x26 /* - * int32 appllid int32 ncci - */ -#define receive_release 0x26 /* - * int32 appllid int32 0xffffffff - */ -#define receive_task_ready 0x31 /* - * int32 tasknr - * int32 length taskname ... - */ -#define receive_debugmsg 0x71 /* - * int32 length message - * - */ -#define receive_polldword 0x75 /* t1pci in dword mode */ - -#define write_register 0x00 -#define read_register 0x01 - -/* - * port offsets - */ - -#define b1_read 0x00 -#define b1_write 0x01 -#define b1_instat 0x02 -#define b1_outstat 0x03 -#define b1_analyse 0x04 -#define b1_revision 0x05 -#define b1_reset 0x10 - - -#define b1_stat0(cardtype) ((cardtype) == avm_m1 ? 0x81200000l : 0x80a00000l) -#define b1_stat1(cardtype) (0x80e00000l) - -/* ---------------------------------------------------------------- */ - -static inline unsigned char b1outp(unsigned int base, - unsigned short offset, - unsigned char value) -{ - outb(value, base + offset); - return inb(base + b1_analyse); -} - - -static inline int b1_rx_full(unsigned int base) -{ - return inb(base + b1_instat) & 0x1; -} - -static inline unsigned char b1_get_byte(unsigned int base) -{ - unsigned long stop = jiffies + 1 * hz; /* maximum wait time 1 sec */ - while (!b1_rx_full(base) && time_before(jiffies, stop)); - if (b1_rx_full(base)) - return inb(base + b1_read); - printk(kern_crit "b1lli(0x%x): rx not full after 1 second ", base); - return 0; -} - -static inline unsigned int b1_get_word(unsigned int base) -{ - unsigned int val = 0; - val |= b1_get_byte(base); - val |= (b1_get_byte(base) << 8); - val |= (b1_get_byte(base) << 16); - val |= (b1_get_byte(base) << 24); - return val; -} - -static inline int b1_tx_empty(unsigned int base) -{ - return inb(base + b1_outstat) & 0x1; -} - -static inline void b1_put_byte(unsigned int base, unsigned char val) -{ - while (!b1_tx_empty(base)); - b1outp(base, b1_write, val); -} - -static inline int b1_save_put_byte(unsigned int base, unsigned char val) -{ - unsigned long stop = jiffies + 2 * hz; - while (!b1_tx_empty(base) && time_before(jiffies, stop)); - if (!b1_tx_empty(base)) return -1; - b1outp(base, b1_write, val); - return 0; -} - -static inline void b1_put_word(unsigned int base, unsigned int val) -{ - b1_put_byte(base, val & 0xff); - b1_put_byte(base, (val >> 8) & 0xff); - b1_put_byte(base, (val >> 16) & 0xff); - b1_put_byte(base, (val >> 24) & 0xff); -} - -static inline unsigned int b1_get_slice(unsigned int base, - unsigned char *dp) -{ - unsigned int len, i; - - len = i = b1_get_word(base); - while (i-- > 0) *dp++ = b1_get_byte(base); - return len; -} - -static inline void b1_put_slice(unsigned int base, - unsigned char *dp, unsigned int len) -{ - unsigned i = len; - b1_put_word(base, i); - while (i-- > 0) - b1_put_byte(base, *dp++); -} - -static void b1_wr_reg(unsigned int base, - unsigned int reg, - unsigned int value) -{ - b1_put_byte(base, write_register); - b1_put_word(base, reg); - b1_put_word(base, value); -} - -static inline unsigned int b1_rd_reg(unsigned int base, - unsigned int reg) -{ - b1_put_byte(base, read_register); - b1_put_word(base, reg); - return b1_get_word(base); - -} - -static inline void b1_reset(unsigned int base) -{ - b1outp(base, b1_reset, 0); - mdelay(55 * 2); /* 2 tic's */ - - b1outp(base, b1_reset, 1); - mdelay(55 * 2); /* 2 tic's */ - - b1outp(base, b1_reset, 0); - mdelay(55 * 2); /* 2 tic's */ -} - -static inline unsigned char b1_disable_irq(unsigned int base) -{ - return b1outp(base, b1_instat, 0x00); -} - -/* ---------------------------------------------------------------- */ - -static inline void b1_set_test_bit(unsigned int base, - enum avmcardtype cardtype, - int onoff) -{ - b1_wr_reg(base, b1_stat0(cardtype), onoff ? 0x21 : 0x20); -} - -static inline int b1_get_test_bit(unsigned int base, - enum avmcardtype cardtype) -{ - return (b1_rd_reg(base, b1_stat0(cardtype)) & 0x01) != 0; -} - -/* ---------------------------------------------------------------- */ - -#define t1_fastlink 0x00 -#define t1_slowlink 0x08 - -#define t1_read b1_read -#define t1_write b1_write -#define t1_instat b1_instat -#define t1_outstat b1_outstat -#define t1_irqenable 0x05 -#define t1_fifostat 0x06 -#define t1_resetlink 0x10 -#define t1_analyse 0x11 -#define t1_irqmaster 0x12 -#define t1_ident 0x17 -#define t1_resetboard 0x1f - -#define t1f_iready 0x01 -#define t1f_ihalf 0x02 -#define t1f_ifull 0x04 -#define t1f_iempty 0x08 -#define t1f_iflags 0xf0 - -#define t1f_oready 0x10 -#define t1f_ohalf 0x20 -#define t1f_oempty 0x40 -#define t1f_ofull 0x80 -#define t1f_oflags 0xf0 - -/* there are hema cards with 1k and 4k fifo out */ -#define fifo_outbsize 256 -#define fifo_inpbsize 512 - -#define hema_version_id 0 -#define hema_pal_id 0 - -static inline void t1outp(unsigned int base, - unsigned short offset, - unsigned char value) -{ - outb(value, base + offset); -} - -static inline unsigned char t1inp(unsigned int base, - unsigned short offset) -{ - return inb(base + offset); -} - -static inline int t1_isfastlink(unsigned int base) -{ - return (inb(base + t1_ident) & ~0x82) == 1; -} - -static inline unsigned char t1_fifostatus(unsigned int base) -{ - return inb(base + t1_fifostat); -} - -static inline unsigned int t1_get_slice(unsigned int base, - unsigned char *dp) -{ - unsigned int len, i; -#ifdef fastlink_debug - unsigned wcnt = 0, bcnt = 0; -#endif - - len = i = b1_get_word(base); - if (t1_isfastlink(base)) { - int status; - while (i > 0) { - status = t1_fifostatus(base) & (t1f_iready | t1f_ihalf); - if (i >= fifo_inpbsize) status |= t1f_ifull; - - switch (status) { - case t1f_iready | t1f_ihalf | t1f_ifull: - insb(base + b1_read, dp, fifo_inpbsize); - dp += fifo_inpbsize; - i -= fifo_inpbsize; -#ifdef fastlink_debug - wcnt += fifo_inpbsize; -#endif - break; - case t1f_iready | t1f_ihalf: - insb(base + b1_read, dp, i); -#ifdef fastlink_debug - wcnt += i; -#endif - dp += i; - i = 0; - break; - default: - *dp++ = b1_get_byte(base); - i--; -#ifdef fastlink_debug - bcnt++; -#endif - break; - } - } -#ifdef fastlink_debug - if (wcnt) - printk(kern_debug "b1lli(0x%x): get_slice l=%d w=%d b=%d ", - base, len, wcnt, bcnt); -#endif - } else { - while (i-- > 0) - *dp++ = b1_get_byte(base); - } - return len; -} - -static inline void t1_put_slice(unsigned int base, - unsigned char *dp, unsigned int len) -{ - unsigned i = len; - b1_put_word(base, i); - if (t1_isfastlink(base)) { - int status; - while (i > 0) { - status = t1_fifostatus(base) & (t1f_oready | t1f_ohalf); - if (i >= fifo_outbsize) status |= t1f_oempty; - switch (status) { - case t1f_oready | t1f_ohalf | t1f_oempty: - outsb(base + b1_write, dp, fifo_outbsize); - dp += fifo_outbsize; - i -= fifo_outbsize; - break; - case t1f_oready | t1f_ohalf: - outsb(base + b1_write, dp, i); - dp += i; - i = 0; - break; - default: - b1_put_byte(base, *dp++); - i--; - break; - } - } - } else { - while (i-- > 0) - b1_put_byte(base, *dp++); - } -} - -static inline void t1_disable_irq(unsigned int base) -{ - t1outp(base, t1_irqmaster, 0x00); -} - -static inline void t1_reset(unsigned int base) -{ - /* reset t1 controller */ - b1_reset(base); - /* disable irq on hema */ - t1outp(base, b1_instat, 0x00); - t1outp(base, b1_outstat, 0x00); - t1outp(base, t1_irqmaster, 0x00); - /* reset hema board configuration */ - t1outp(base, t1_resetboard, 0xf); -} - -static inline void b1_setinterrupt(unsigned int base, unsigned irq, - enum avmcardtype cardtype) -{ - switch (cardtype) { - case avm_t1isa: - t1outp(base, b1_instat, 0x00); - t1outp(base, b1_instat, 0x02); - t1outp(base, t1_irqmaster, 0x08); - break; - case avm_b1isa: - b1outp(base, b1_instat, 0x00); - b1outp(base, b1_reset, b1_irq_table[irq]); - b1outp(base, b1_instat, 0x02); - break; - default: - case avm_m1: - case avm_m2: - case avm_b1pci: - b1outp(base, b1_instat, 0x00); - b1outp(base, b1_reset, 0xf0); - b1outp(base, b1_instat, 0x02); - break; - case avm_c4: - case avm_t1pci: - b1outp(base, b1_reset, 0xf0); - break; - } -} - -/* b1.c */ -avmcard *b1_alloc_card(int nr_controllers); -void b1_free_card(avmcard *card); -int b1_detect(unsigned int base, enum avmcardtype cardtype); -void b1_getrevision(avmcard *card); -int b1_load_t4file(avmcard *card, capiloaddatapart *t4file); -int b1_load_config(avmcard *card, capiloaddatapart *config); -int b1_loaded(avmcard *card); - -int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data); -void b1_reset_ctr(struct capi_ctr *ctrl); -void b1_register_appl(struct capi_ctr *ctrl, u16 appl, - capi_register_params *rp); -void b1_release_appl(struct capi_ctr *ctrl, u16 appl); -u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb); -void b1_parse_version(avmctrl_info *card); -irqreturn_t b1_interrupt(int interrupt, void *devptr); - -int b1_proc_show(struct seq_file *m, void *v); - -avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *, - long rsize, long ssize); -void avmcard_dma_free(avmcard_dmainfo *); - -/* b1dma.c */ -int b1pciv4_detect(avmcard *card); -int t1pci_detect(avmcard *card); -void b1dma_reset(avmcard *card); -irqreturn_t b1dma_interrupt(int interrupt, void *devptr); - -int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data); -void b1dma_reset_ctr(struct capi_ctr *ctrl); -void b1dma_remove_ctr(struct capi_ctr *ctrl); -void b1dma_register_appl(struct capi_ctr *ctrl, - u16 appl, - capi_register_params *rp); -void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl); -u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb); -int b1dma_proc_show(struct seq_file *m, void *v); - -#endif /* _avmcard_h_ */ diff --git a/drivers/staging/isdn/avm/b1.c b/drivers/staging/isdn/avm/b1.c --- a/drivers/staging/isdn/avm/b1.c +++ /dev/null -/* $id: b1.c,v 1.1.2.2 2004/01/16 21:09:27 keil exp $ - * - * common module for avm b1 cards. - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/capi.h> -#include <linux/kernelcapi.h> -#include <linux/slab.h> -#include <asm/io.h> -#include <linux/init.h> -#include <linux/uaccess.h> -#include <linux/netdevice.h> -#include <linux/isdn/capilli.h> -#include "avmcard.h" -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> - -static char *revision = "$revision: 1.1.2.2 $"; - -/* ------------------------------------------------------------- */ - -module_description("capi4linux: common support for active avm cards"); -module_author("carsten paeth"); -module_license("gpl"); - -/* ------------------------------------------------------------- */ - -int b1_irq_table[16] = -{0, - 0, - 0, - 192, /* irq 3 */ - 32, /* irq 4 */ - 160, /* irq 5 */ - 96, /* irq 6 */ - 224, /* irq 7 */ - 0, - 64, /* irq 9 */ - 80, /* irq 10 */ - 208, /* irq 11 */ - 48, /* irq 12 */ - 0, - 0, - 112, /* irq 15 */ -}; - -/* ------------------------------------------------------------- */ - -avmcard *b1_alloc_card(int nr_controllers) -{ - avmcard *card; - avmctrl_info *cinfo; - int i; - - card = kzalloc(sizeof(*card), gfp_kernel); - if (!card) - return null; - - cinfo = kcalloc(nr_controllers, sizeof(*cinfo), gfp_kernel); - if (!cinfo) { - kfree(card); - return null; - } - - card->ctrlinfo = cinfo; - for (i = 0; i < nr_controllers; i++) { - init_list_head(&cinfo[i].ncci_head); - cinfo[i].card = card; - } - spin_lock_init(&card->lock); - card->nr_controllers = nr_controllers; - - return card; -} - -/* ------------------------------------------------------------- */ - -void b1_free_card(avmcard *card) -{ - kfree(card->ctrlinfo); - kfree(card); -} - -/* ------------------------------------------------------------- */ - -int b1_detect(unsigned int base, enum avmcardtype cardtype) -{ - int onoff, i; - - /* - * statusregister 0000 00xx - */ - if ((inb(base + b1_instat) & 0xfc) - || (inb(base + b1_outstat) & 0xfc)) - return 1; - /* - * statusregister 0000 001x - */ - b1outp(base, b1_instat, 0x2); /* enable irq */ - /* b1outp(base, b1_outstat, 0x2); */ - if ((inb(base + b1_instat) & 0xfe) != 0x2 - /* || (inb(base + b1_outstat) & 0xfe) != 0x2 */) - return 2; - /* - * statusregister 0000 000x - */ - b1outp(base, b1_instat, 0x0); /* disable irq */ - b1outp(base, b1_outstat, 0x0); - if ((inb(base + b1_instat) & 0xfe) - || (inb(base + b1_outstat) & 0xfe)) - return 3; - - for (onoff = !0, i = 0; i < 10; i++) { - b1_set_test_bit(base, cardtype, onoff); - if (b1_get_test_bit(base, cardtype) != onoff) - return 4; - onoff = !onoff; - } - - if (cardtype == avm_m1) - return 0; - - if ((b1_rd_reg(base, b1_stat1(cardtype)) & 0x0f) != 0x01) - return 5; - - return 0; -} - -void b1_getrevision(avmcard *card) -{ - card->class = inb(card->port + b1_analyse); - card->revision = inb(card->port + b1_revision); -} - -#define fwbuf_size 256 -int b1_load_t4file(avmcard *card, capiloaddatapart *t4file) -{ - unsigned char buf[fwbuf_size]; - unsigned char *dp; - int i, left; - unsigned int base = card->port; - - dp = t4file->data; - left = t4file->len; - while (left > fwbuf_size) { - if (t4file->user) { - if (copy_from_user(buf, dp, fwbuf_size)) - return -efault; - } else { - memcpy(buf, dp, fwbuf_size); - } - for (i = 0; i < fwbuf_size; i++) - if (b1_save_put_byte(base, buf[i]) < 0) { - printk(kern_err "%s: corrupted firmware file ? ", - card->name); - return -eio; - } - left -= fwbuf_size; - dp += fwbuf_size; - } - if (left) { - if (t4file->user) { - if (copy_from_user(buf, dp, left)) - return -efault; - } else { - memcpy(buf, dp, left); - } - for (i = 0; i < left; i++) - if (b1_save_put_byte(base, buf[i]) < 0) { - printk(kern_err "%s: corrupted firmware file ? ", - card->name); - return -eio; - } - } - return 0; -} - -int b1_load_config(avmcard *card, capiloaddatapart *config) -{ - unsigned char buf[fwbuf_size]; - unsigned char *dp; - unsigned int base = card->port; - int i, j, left; - - dp = config->data; - left = config->len; - if (left) { - b1_put_byte(base, send_config); - b1_put_word(base, 1); - b1_put_byte(base, send_config); - b1_put_word(base, left); - } - while (left > fwbuf_size) { - if (config->user) { - if (copy_from_user(buf, dp, fwbuf_size)) - return -efault; - } else { - memcpy(buf, dp, fwbuf_size); - } - for (i = 0; i < fwbuf_size; ) { - b1_put_byte(base, send_config); - for (j = 0; j < 4; j++) { - b1_put_byte(base, buf[i++]); - } - } - left -= fwbuf_size; - dp += fwbuf_size; - } - if (left) { - if (config->user) { - if (copy_from_user(buf, dp, left)) - return -efault; - } else { - memcpy(buf, dp, left); - } - for (i = 0; i < left; ) { - b1_put_byte(base, send_config); - for (j = 0; j < 4; j++) { - if (i < left) - b1_put_byte(base, buf[i++]); - else - b1_put_byte(base, 0); - } - } - } - return 0; -} - -int b1_loaded(avmcard *card) -{ - unsigned int base = card->port; - unsigned long stop; - unsigned char ans; - unsigned long tout = 2; - - for (stop = jiffies + tout * hz; time_before(jiffies, stop);) { - if (b1_tx_empty(base)) - break; - } - if (!b1_tx_empty(base)) { - printk(kern_err "%s: b1_loaded: tx err, corrupted t4 file ? ", - card->name); - return 0; - } - b1_put_byte(base, send_poll); - for (stop = jiffies + tout * hz; time_before(jiffies, stop);) { - if (b1_rx_full(base)) { - ans = b1_get_byte(base); - if (ans == receive_poll) - return 1; - - printk(kern_err "%s: b1_loaded: got 0x%x, firmware not running ", - card->name, ans); - return 0; - } - } - printk(kern_err "%s: b1_loaded: firmware not running ", card->name); - return 0; -} - -/* ------------------------------------------------------------- */ - -int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - unsigned long flags; - int retval; - - b1_reset(port); - retval = b1_load_t4file(card, &data->firmware); - - if (retval) { - b1_reset(port); - printk(kern_err "%s: failed to load t4file!! ", - card->name); - return retval; - } - - b1_disable_irq(port); - - if (data->configuration.len > 0 && data->configuration.data) { - retval = b1_load_config(card, &data->configuration); - if (retval) { - b1_reset(port); - printk(kern_err "%s: failed to load config!! ", - card->name); - return retval; - } - } - - if (!b1_loaded(card)) { - printk(kern_err "%s: failed to load t4file. ", card->name); - return -eio; - } - - spin_lock_irqsave(&card->lock, flags); - b1_setinterrupt(port, card->irq, card->cardtype); - b1_put_byte(port, send_init); - b1_put_word(port, capi_maxappl); - b1_put_word(port, avm_ncci_per_channel * 2); - b1_put_word(port, ctrl->cnr - 1); - spin_unlock_irqrestore(&card->lock, flags); - - return 0; -} - -void b1_reset_ctr(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - unsigned long flags; - - b1_reset(port); - b1_reset(port); - - memset(cinfo->version, 0, sizeof(cinfo->version)); - spin_lock_irqsave(&card->lock, flags); - capilib_release(&cinfo->ncci_head); - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_down(ctrl); -} - -void b1_register_appl(struct capi_ctr *ctrl, - u16 appl, - capi_register_params *rp) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - unsigned long flags; - int nconn, want = rp->level3cnt; - - if (want > 0) nconn = want; - else nconn = ctrl->profile.nbchannel * -want; - if (nconn == 0) nconn = ctrl->profile.nbchannel; - - spin_lock_irqsave(&card->lock, flags); - b1_put_byte(port, send_register); - b1_put_word(port, appl); - b1_put_word(port, 1024 * (nconn + 1)); - b1_put_word(port, nconn); - b1_put_word(port, rp->datablkcnt); - b1_put_word(port, rp->datablklen); - spin_unlock_irqrestore(&card->lock, flags); -} - -void b1_release_appl(struct capi_ctr *ctrl, u16 appl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - capilib_release_appl(&cinfo->ncci_head, appl); - b1_put_byte(port, send_release); - b1_put_word(port, appl); - spin_unlock_irqrestore(&card->lock, flags); -} - -u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - unsigned long flags; - u16 len = capimsg_len(skb->data); - u8 cmd = capimsg_command(skb->data); - u8 subcmd = capimsg_subcommand(skb->data); - u16 dlen, retval; - - spin_lock_irqsave(&card->lock, flags); - if (capicmd(cmd, subcmd) == capi_data_b3_req) { - retval = capilib_data_b3_req(&cinfo->ncci_head, - capimsg_appid(skb->data), - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - if (retval != capi_noerror) { - spin_unlock_irqrestore(&card->lock, flags); - return retval; - } - - dlen = capimsg_datalen(skb->data); - - b1_put_byte(port, send_data_b3_req); - b1_put_slice(port, skb->data, len); - b1_put_slice(port, skb->data + len, dlen); - } else { - b1_put_byte(port, send_message); - b1_put_slice(port, skb->data, len); - } - spin_unlock_irqrestore(&card->lock, flags); - - dev_kfree_skb_any(skb); - return capi_noerror; -} - -/* ------------------------------------------------------------- */ - -void b1_parse_version(avmctrl_info *cinfo) -{ - struct capi_ctr *ctrl = &cinfo->capi_ctrl; - avmcard *card = cinfo->card; - capi_profile *profp; - u8 *dversion; - u8 flag; - int i, j; - - for (j = 0; j < avm_maxversion; j++) - cinfo->version[j] = ""; - for (i = 0, j = 0; - j < avm_maxversion && i < cinfo->versionlen; - j++, i += cinfo->versionbuf[i] + 1) - cinfo->version[j] = &cinfo->versionbuf[i + 1]; - - strlcpy(ctrl->serial, cinfo->version[ver_serial], sizeof(ctrl->serial)); - memcpy(&ctrl->profile, cinfo->version[ver_profile], sizeof(capi_profile)); - strlcpy(ctrl->manu, "avm gmbh", sizeof(ctrl->manu)); - dversion = cinfo->version[ver_driver]; - ctrl->version.majorversion = 2; - ctrl->version.minorversion = 0; - ctrl->version.majormanuversion = (((dversion[0] - '0') & 0xf) << 4); - ctrl->version.majormanuversion |= ((dversion[2] - '0') & 0xf); - ctrl->version.minormanuversion = (dversion[3] - '0') << 4; - ctrl->version.minormanuversion |= - (dversion[5] - '0') * 10 + ((dversion[6] - '0') & 0xf); - - profp = &ctrl->profile; - - flag = ((u8 *)(profp->manu))[1]; - switch (flag) { - case 0: if (cinfo->version[ver_cardtype]) - strcpy(cinfo->cardname, cinfo->version[ver_cardtype]); - else strcpy(cinfo->cardname, "b1"); - break; - case 3: strcpy(cinfo->cardname, "pcmcia b"); break; - case 4: strcpy(cinfo->cardname, "pcmcia m1"); break; - case 5: strcpy(cinfo->cardname, "pcmcia m2"); break; - case 6: strcpy(cinfo->cardname, "b1 v3.0"); break; - case 7: strcpy(cinfo->cardname, "b1 pci"); break; - default: sprintf(cinfo->cardname, "avm?%u", (unsigned int)flag); break; - } - printk(kern_notice "%s: card %d "%s" ready. ", - card->name, ctrl->cnr, cinfo->cardname); - - flag = ((u8 *)(profp->manu))[3]; - if (flag) - printk(kern_notice "%s: card %d protocol:%s%s%s%s%s%s%s ", - card->name, - ctrl->cnr, - (flag & 0x01) ? " dss1" : "", - (flag & 0x02) ? " ct1" : "", - (flag & 0x04) ? " vn3" : "", - (flag & 0x08) ? " ni1" : "", - (flag & 0x10) ? " austel" : "", - (flag & 0x20) ? " ess" : "", - (flag & 0x40) ? " 1tr6" : "" - ); - - flag = ((u8 *)(profp->manu))[5]; - if (flag) - printk(kern_notice "%s: card %d linetype:%s%s%s%s ", - card->name, - ctrl->cnr, - (flag & 0x01) ? " point to point" : "", - (flag & 0x02) ? " point to multipoint" : "", - (flag & 0x08) ? " leased line without d-channel" : "", - (flag & 0x04) ? " leased line with d-channel" : "" - ); -} - -/* ------------------------------------------------------------- */ - -irqreturn_t b1_interrupt(int interrupt, void *devptr) -{ - avmcard *card = devptr; - avmctrl_info *cinfo = &card->ctrlinfo[0]; - struct capi_ctr *ctrl = &cinfo->capi_ctrl; - unsigned char b1cmd; - struct sk_buff *skb; - - unsigned applid; - unsigned msglen; - unsigned datab3len; - unsigned ncci; - unsigned windowsize; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - - if (!b1_rx_full(card->port)) { - spin_unlock_irqrestore(&card->lock, flags); - return irq_none; - } - - b1cmd = b1_get_byte(card->port); - - switch (b1cmd) { - - case receive_data_b3_ind: - - applid = (unsigned) b1_get_word(card->port); - msglen = b1_get_slice(card->port, card->msgbuf); - datab3len = b1_get_slice(card->port, card->databuf); - spin_unlock_irqrestore(&card->lock, flags); - - if (msglen < 30) { /* not capi 64bit */ - memset(card->msgbuf + msglen, 0, 30-msglen); - msglen = 30; - capimsg_setlen(card->msgbuf, 30); - } - - skb = alloc_skb(datab3len + msglen, gfp_atomic); - if (!skb) { - printk(kern_err "%s: incoming packet dropped ", - card->name); - } else { - skb_put_data(skb, card->msgbuf, msglen); - skb_put_data(skb, card->databuf, datab3len); - capi_ctr_handle_message(ctrl, applid, skb); - } - break; - - case receive_message: - - applid = (unsigned) b1_get_word(card->port); - msglen = b1_get_slice(card->port, card->msgbuf); - skb = alloc_skb(msglen, gfp_atomic); - - if (!skb) { - printk(kern_err "%s: incoming packet dropped ", - card->name); - spin_unlock_irqrestore(&card->lock, flags); - } else { - skb_put_data(skb, card->msgbuf, msglen); - if (capimsg_cmd(skb->data) == capi_data_b3_conf) - capilib_data_b3_conf(&cinfo->ncci_head, applid, - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_handle_message(ctrl, applid, skb); - } - break; - - case receive_new_ncci: - - applid = b1_get_word(card->port); - ncci = b1_get_word(card->port); - windowsize = b1_get_word(card->port); - capilib_new_ncci(&cinfo->ncci_head, applid, ncci, windowsize); - spin_unlock_irqrestore(&card->lock, flags); - break; - - case receive_free_ncci: - - applid = b1_get_word(card->port); - ncci = b1_get_word(card->port); - if (ncci != 0xffffffff) - capilib_free_ncci(&cinfo->ncci_head, applid, ncci); - spin_unlock_irqrestore(&card->lock, flags); - break; - - case receive_start: - /* b1_put_byte(card->port, send_pollack); */ - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_resume_output(ctrl); - break; - - case receive_stop: - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_suspend_output(ctrl); - break; - - case receive_init: - - cinfo->versionlen = b1_get_slice(card->port, cinfo->versionbuf); - spin_unlock_irqrestore(&card->lock, flags); - b1_parse_version(cinfo); - printk(kern_info "%s: %s-card (%s) now active ", - card->name, - cinfo->version[ver_cardtype], - cinfo->version[ver_driver]); - capi_ctr_ready(ctrl); - break; - - case receive_task_ready: - applid = (unsigned) b1_get_word(card->port); - msglen = b1_get_slice(card->port, card->msgbuf); - spin_unlock_irqrestore(&card->lock, flags); - card->msgbuf[msglen] = 0; - while (msglen > 0 - && (card->msgbuf[msglen - 1] == ' ' - || card->msgbuf[msglen - 1] == ' ')) { - card->msgbuf[msglen - 1] = 0; - msglen--; - } - printk(kern_info "%s: task %d "%s" ready. ", - card->name, applid, card->msgbuf); - break; - - case receive_debugmsg: - msglen = b1_get_slice(card->port, card->msgbuf); - spin_unlock_irqrestore(&card->lock, flags); - card->msgbuf[msglen] = 0; - while (msglen > 0 - && (card->msgbuf[msglen - 1] == ' ' - || card->msgbuf[msglen - 1] == ' ')) { - card->msgbuf[msglen - 1] = 0; - msglen--; - } - printk(kern_info "%s: debug: %s ", card->name, card->msgbuf); - break; - - case 0xff: - spin_unlock_irqrestore(&card->lock, flags); - printk(kern_err "%s: card removed ? ", card->name); - return irq_none; - default: - spin_unlock_irqrestore(&card->lock, flags); - printk(kern_err "%s: b1_interrupt: 0x%x ??? ", - card->name, b1cmd); - return irq_handled; - } - return irq_handled; -} - -/* ------------------------------------------------------------- */ -int b1_proc_show(struct seq_file *m, void *v) -{ - struct capi_ctr *ctrl = m->private; - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - u8 flag; - char *s; - - seq_printf(m, "%-16s %s ", "name", card->name); - seq_printf(m, "%-16s 0x%x ", "io", card->port); - seq_printf(m, "%-16s %d ", "irq", card->irq); - switch (card->cardtype) { - case avm_b1isa: s = "b1 isa"; break; - case avm_b1pci: s = "b1 pci"; break; - case avm_b1pcmcia: s = "b1 pcmcia"; break; - case avm_m1: s = "m1"; break; - case avm_m2: s = "m2"; break; - case avm_t1isa: s = "t1 isa (hema)"; break; - case avm_t1pci: s = "t1 pci"; break; - case avm_c4: s = "c4"; break; - case avm_c2: s = "c2"; break; - default: s = "???"; break; - } - seq_printf(m, "%-16s %s ", "type", s); - if (card->cardtype == avm_t1isa) - seq_printf(m, "%-16s %d ", "cardnr", card->cardnr); - - s = cinfo->version[ver_driver]; - if (s) - seq_printf(m, "%-16s %s ", "ver_driver", s); - - s = cinfo->version[ver_cardtype]; - if (s) - seq_printf(m, "%-16s %s ", "ver_cardtype", s); - - s = cinfo->version[ver_serial]; - if (s) - seq_printf(m, "%-16s %s ", "ver_serial", s); - - if (card->cardtype != avm_m1) { - flag = ((u8 *)(ctrl->profile.manu))[3]; - if (flag) - seq_printf(m, "%-16s%s%s%s%s%s%s%s ", - "protocol", - (flag & 0x01) ? " dss1" : "", - (flag & 0x02) ? " ct1" : "", - (flag & 0x04) ? " vn3" : "", - (flag & 0x08) ? " ni1" : "", - (flag & 0x10) ? " austel" : "", - (flag & 0x20) ? " ess" : "", - (flag & 0x40) ? " 1tr6" : "" - ); - } - if (card->cardtype != avm_m1) { - flag = ((u8 *)(ctrl->profile.manu))[5]; - if (flag) - seq_printf(m, "%-16s%s%s%s%s ", - "linetype", - (flag & 0x01) ? " point to point" : "", - (flag & 0x02) ? " point to multipoint" : "", - (flag & 0x08) ? " leased line without d-channel" : "", - (flag & 0x04) ? " leased line with d-channel" : "" - ); - } - seq_printf(m, "%-16s %s ", "cardname", cinfo->cardname); - - return 0; -} -export_symbol(b1_proc_show); - -/* ------------------------------------------------------------- */ - -#ifdef config_pci - -avmcard_dmainfo * -avmcard_dma_alloc(char *name, struct pci_dev *pdev, long rsize, long ssize) -{ - avmcard_dmainfo *p; - void *buf; - - p = kzalloc(sizeof(avmcard_dmainfo), gfp_kernel); - if (!p) { - printk(kern_warning "%s: no memory. ", name); - goto err; - } - - p->recvbuf.size = rsize; - buf = pci_alloc_consistent(pdev, rsize, &p->recvbuf.dmaaddr); - if (!buf) { - printk(kern_warning "%s: allocation of receive dma buffer failed. ", name); - goto err_kfree; - } - p->recvbuf.dmabuf = buf; - - p->sendbuf.size = ssize; - buf = pci_alloc_consistent(pdev, ssize, &p->sendbuf.dmaaddr); - if (!buf) { - printk(kern_warning "%s: allocation of send dma buffer failed. ", name); - goto err_free_consistent; - } - - p->sendbuf.dmabuf = buf; - skb_queue_head_init(&p->send_queue); - - return p; - -err_free_consistent: - pci_free_consistent(p->pcidev, p->recvbuf.size, - p->recvbuf.dmabuf, p->recvbuf.dmaaddr); -err_kfree: - kfree(p); -err: - return null; -} - -void avmcard_dma_free(avmcard_dmainfo *p) -{ - pci_free_consistent(p->pcidev, p->recvbuf.size, - p->recvbuf.dmabuf, p->recvbuf.dmaaddr); - pci_free_consistent(p->pcidev, p->sendbuf.size, - p->sendbuf.dmabuf, p->sendbuf.dmaaddr); - skb_queue_purge(&p->send_queue); - kfree(p); -} - -export_symbol(avmcard_dma_alloc); -export_symbol(avmcard_dma_free); - -#endif - -export_symbol(b1_irq_table); - -export_symbol(b1_alloc_card); -export_symbol(b1_free_card); -export_symbol(b1_detect); -export_symbol(b1_getrevision); -export_symbol(b1_load_t4file); -export_symbol(b1_load_config); -export_symbol(b1_loaded); -export_symbol(b1_load_firmware); -export_symbol(b1_reset_ctr); -export_symbol(b1_register_appl); -export_symbol(b1_release_appl); -export_symbol(b1_send_message); - -export_symbol(b1_parse_version); -export_symbol(b1_interrupt); - -static int __init b1_init(void) -{ - char *p; - char rev[32]; - - p = strchr(revision, ':'); - if (p && p[1]) { - strlcpy(rev, p + 2, 32); - p = strchr(rev, '$'); - if (p && p > rev) - *(p - 1) = 0; - } else { - strcpy(rev, "1.0"); - } - printk(kern_info "b1: revision %s ", rev); - - return 0; -} - -static void __exit b1_exit(void) -{ -} - -module_init(b1_init); -module_exit(b1_exit); diff --git a/drivers/staging/isdn/avm/b1dma.c b/drivers/staging/isdn/avm/b1dma.c --- a/drivers/staging/isdn/avm/b1dma.c +++ /dev/null -/* $id: b1dma.c,v 1.1.2.3 2004/02/10 01:07:12 keil exp $ - * - * common module for avm b1 cards that support dma with amcc - * - * copyright 2000 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/capi.h> -#include <linux/kernelcapi.h> -#include <linux/gfp.h> -#include <asm/io.h> -#include <linux/init.h> -#include <linux/uaccess.h> -#include <linux/netdevice.h> -#include <linux/isdn/capilli.h> -#include "avmcard.h" -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> - -static char *revision = "$revision: 1.1.2.3 $"; - -#undef avm_b1dma_debug - -/* ------------------------------------------------------------- */ - -module_description("capi4linux: dma support for active avm cards"); -module_author("carsten paeth"); -module_license("gpl"); - -static bool suppress_pollack = 0; -module_param(suppress_pollack, bool, 0); - -/* ------------------------------------------------------------- */ - -static void b1dma_dispatch_tx(avmcard *card); - -/* ------------------------------------------------------------- */ - -/* s5933 */ - -#define amcc_rxptr 0x24 -#define amcc_rxlen 0x28 -#define amcc_txptr 0x2c -#define amcc_txlen 0x30 - -#define amcc_intcsr 0x38 -# define en_read_tc_int 0x00008000l -# define en_write_tc_int 0x00004000l -# define en_tx_tc_int en_read_tc_int -# define en_rx_tc_int en_write_tc_int -# define avm_flag 0x30000000l - -# define any_s5933_int 0x00800000l -# define read_tc_int 0x00080000l -# define write_tc_int 0x00040000l -# define tx_tc_int read_tc_int -# define rx_tc_int write_tc_int -# define master_abort_int 0x00100000l -# define target_abort_int 0x00200000l -# define bus_master_int 0x00200000l -# define all_int 0x000c0000l - -#define amcc_mcsr 0x3c -# define a2p_hi_priority 0x00000100l -# define en_a2p_transfers 0x00000400l -# define p2a_hi_priority 0x00001000l -# define en_p2a_transfers 0x00004000l -# define reset_a2p_flags 0x04000000l -# define reset_p2a_flags 0x02000000l - -/* ------------------------------------------------------------- */ - -static inline void b1dma_writel(avmcard *card, u32 value, int off) -{ - writel(value, card->mbase + off); -} - -static inline u32 b1dma_readl(avmcard *card, int off) -{ - return readl(card->mbase + off); -} - -/* ------------------------------------------------------------- */ - -static inline int b1dma_tx_empty(unsigned int port) -{ - return inb(port + 0x03) & 0x1; -} - -static inline int b1dma_rx_full(unsigned int port) -{ - return inb(port + 0x02) & 0x1; -} - -static int b1dma_tolink(avmcard *card, void *buf, unsigned int len) -{ - unsigned long stop = jiffies + 1 * hz; /* maximum wait time 1 sec */ - unsigned char *s = (unsigned char *)buf; - while (len--) { - while (!b1dma_tx_empty(card->port) - && time_before(jiffies, stop)); - if (!b1dma_tx_empty(card->port)) - return -1; - t1outp(card->port, 0x01, *s++); - } - return 0; -} - -static int b1dma_fromlink(avmcard *card, void *buf, unsigned int len) -{ - unsigned long stop = jiffies + 1 * hz; /* maximum wait time 1 sec */ - unsigned char *s = (unsigned char *)buf; - while (len--) { - while (!b1dma_rx_full(card->port) - && time_before(jiffies, stop)); - if (!b1dma_rx_full(card->port)) - return -1; - *s++ = t1inp(card->port, 0x00); - } - return 0; -} - -static int writereg(avmcard *card, u32 reg, u8 val) -{ - u8 cmd = 0x00; - if (b1dma_tolink(card, &cmd, 1) == 0 - && b1dma_tolink(card, ®, 4) == 0) { - u32 tmp = val; - return b1dma_tolink(card, &tmp, 4); - } - return -1; -} - -static u8 readreg(avmcard *card, u32 reg) -{ - u8 cmd = 0x01; - if (b1dma_tolink(card, &cmd, 1) == 0 - && b1dma_tolink(card, ®, 4) == 0) { - u32 tmp; - if (b1dma_fromlink(card, &tmp, 4) == 0) - return (u8)tmp; - } - return 0xff; -} - -/* ------------------------------------------------------------- */ - -static inline void _put_byte(void **pp, u8 val) -{ - u8 *s = *pp; - *s++ = val; - *pp = s; -} - -static inline void _put_word(void **pp, u32 val) -{ - u8 *s = *pp; - *s++ = val & 0xff; - *s++ = (val >> 8) & 0xff; - *s++ = (val >> 16) & 0xff; - *s++ = (val >> 24) & 0xff; - *pp = s; -} - -static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len) -{ - unsigned i = len; - _put_word(pp, i); - while (i-- > 0) - _put_byte(pp, *dp++); -} - -static inline u8 _get_byte(void **pp) -{ - u8 *s = *pp; - u8 val; - val = *s++; - *pp = s; - return val; -} - -static inline u32 _get_word(void **pp) -{ - u8 *s = *pp; - u32 val; - val = *s++; - val |= (*s++ << 8); - val |= (*s++ << 16); - val |= (*s++ << 24); - *pp = s; - return val; -} - -static inline u32 _get_slice(void **pp, unsigned char *dp) -{ - unsigned int len, i; - - len = i = _get_word(pp); - while (i-- > 0) *dp++ = _get_byte(pp); - return len; -} - -/* ------------------------------------------------------------- */ - -void b1dma_reset(avmcard *card) -{ - card->csr = 0x0; - b1dma_writel(card, card->csr, amcc_intcsr); - b1dma_writel(card, 0, amcc_mcsr); - b1dma_writel(card, 0, amcc_rxlen); - b1dma_writel(card, 0, amcc_txlen); - - t1outp(card->port, 0x10, 0x00); - t1outp(card->port, 0x07, 0x00); - - b1dma_writel(card, 0, amcc_mcsr); - mdelay(10); - b1dma_writel(card, 0x0f000000, amcc_mcsr); /* reset all */ - mdelay(10); - b1dma_writel(card, 0, amcc_mcsr); - if (card->cardtype == avm_t1pci) - mdelay(42); - else - mdelay(10); -} - -/* ------------------------------------------------------------- */ - -static int b1dma_detect(avmcard *card) -{ - b1dma_writel(card, 0, amcc_mcsr); - mdelay(10); - b1dma_writel(card, 0x0f000000, amcc_mcsr); /* reset all */ - mdelay(10); - b1dma_writel(card, 0, amcc_mcsr); - mdelay(42); - - b1dma_writel(card, 0, amcc_rxlen); - b1dma_writel(card, 0, amcc_txlen); - card->csr = 0x0; - b1dma_writel(card, card->csr, amcc_intcsr); - - if (b1dma_readl(card, amcc_mcsr) != 0x000000e6) - return 1; - - b1dma_writel(card, 0xffffffff, amcc_rxptr); - b1dma_writel(card, 0xffffffff, amcc_txptr); - if (b1dma_readl(card, amcc_rxptr) != 0xfffffffc - || b1dma_readl(card, amcc_txptr) != 0xfffffffc) - return 2; - - b1dma_writel(card, 0x0, amcc_rxptr); - b1dma_writel(card, 0x0, amcc_txptr); - if (b1dma_readl(card, amcc_rxptr) != 0x0 - || b1dma_readl(card, amcc_txptr) != 0x0) - return 3; - - t1outp(card->port, 0x10, 0x00); - t1outp(card->port, 0x07, 0x00); - - t1outp(card->port, 0x02, 0x02); - t1outp(card->port, 0x03, 0x02); - - if ((t1inp(card->port, 0x02) & 0xfe) != 0x02 - || t1inp(card->port, 0x3) != 0x03) - return 4; - - t1outp(card->port, 0x02, 0x00); - t1outp(card->port, 0x03, 0x00); - - if ((t1inp(card->port, 0x02) & 0xfe) != 0x00 - || t1inp(card->port, 0x3) != 0x01) - return 5; - - return 0; -} - -int t1pci_detect(avmcard *card) -{ - int ret; - - if ((ret = b1dma_detect(card)) != 0) - return ret; - - /* transputer test */ - - if (writereg(card, 0x80001000, 0x11) != 0 - || writereg(card, 0x80101000, 0x22) != 0 - || writereg(card, 0x80201000, 0x33) != 0 - || writereg(card, 0x80301000, 0x44) != 0) - return 6; - - if (readreg(card, 0x80001000) != 0x11 - || readreg(card, 0x80101000) != 0x22 - || readreg(card, 0x80201000) != 0x33 - || readreg(card, 0x80301000) != 0x44) - return 7; - - if (writereg(card, 0x80001000, 0x55) != 0 - || writereg(card, 0x80101000, 0x66) != 0 - || writereg(card, 0x80201000, 0x77) != 0 - || writereg(card, 0x80301000, 0x88) != 0) - return 8; - - if (readreg(card, 0x80001000) != 0x55 - || readreg(card, 0x80101000) != 0x66 - || readreg(card, 0x80201000) != 0x77 - || readreg(card, 0x80301000) != 0x88) - return 9; - - return 0; -} - -int b1pciv4_detect(avmcard *card) -{ - int ret, i; - - if ((ret = b1dma_detect(card)) != 0) - return ret; - - for (i = 0; i < 5; i++) { - if (writereg(card, 0x80a00000, 0x21) != 0) - return 6; - if ((readreg(card, 0x80a00000) & 0x01) != 0x01) - return 7; - } - for (i = 0; i < 5; i++) { - if (writereg(card, 0x80a00000, 0x20) != 0) - return 8; - if ((readreg(card, 0x80a00000) & 0x01) != 0x00) - return 9; - } - - return 0; -} - -static void b1dma_queue_tx(avmcard *card, struct sk_buff *skb) -{ - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - - skb_queue_tail(&card->dma->send_queue, skb); - - if (!(card->csr & en_tx_tc_int)) { - b1dma_dispatch_tx(card); - b1dma_writel(card, card->csr, amcc_intcsr); - } - - spin_unlock_irqrestore(&card->lock, flags); -} - -/* ------------------------------------------------------------- */ - -static void b1dma_dispatch_tx(avmcard *card) -{ - avmcard_dmainfo *dma = card->dma; - struct sk_buff *skb; - u8 cmd, subcmd; - u16 len; - u32 txlen; - void *p; - - skb = skb_dequeue(&dma->send_queue); - - len = capimsg_len(skb->data); - - if (len) { - cmd = capimsg_command(skb->data); - subcmd = capimsg_subcommand(skb->data); - - p = dma->sendbuf.dmabuf; - - if (capicmd(cmd, subcmd) == capi_data_b3_req) { - u16 dlen = capimsg_datalen(skb->data); - _put_byte(&p, send_data_b3_req); - _put_slice(&p, skb->data, len); - _put_slice(&p, skb->data + len, dlen); - } else { - _put_byte(&p, send_message); - _put_slice(&p, skb->data, len); - } - txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf; -#ifdef avm_b1dma_debug - printk(kern_debug "tx: put msg len=%d ", txlen); -#endif - } else { - txlen = skb->len - 2; -#ifdef avm_b1dma_polldebug - if (skb->data[2] == send_pollack) - printk(kern_info "%s: send ack ", card->name); -#endif -#ifdef avm_b1dma_debug - printk(kern_debug "tx: put 0x%x len=%d ", - skb->data[2], txlen); -#endif - skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf, - skb->len - 2); - } - txlen = (txlen + 3) & ~3; - - b1dma_writel(card, dma->sendbuf.dmaaddr, amcc_txptr); - b1dma_writel(card, txlen, amcc_txlen); - - card->csr |= en_tx_tc_int; - - dev_kfree_skb_any(skb); -} - -/* ------------------------------------------------------------- */ - -static void queue_pollack(avmcard *card) -{ - struct sk_buff *skb; - void *p; - - skb = alloc_skb(3, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, lost poll ack ", - card->name); - return; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_pollack); - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - b1dma_queue_tx(card, skb); -} - -/* ------------------------------------------------------------- */ - -static void b1dma_handle_rx(avmcard *card) -{ - avmctrl_info *cinfo = &card->ctrlinfo[0]; - avmcard_dmainfo *dma = card->dma; - struct capi_ctr *ctrl = &cinfo->capi_ctrl; - struct sk_buff *skb; - void *p = dma->recvbuf.dmabuf + 4; - u32 applid, msglen, datab3len, ncci, windowsize; - u8 b1cmd = _get_byte(&p); - -#ifdef avm_b1dma_debug - printk(kern_debug "rx: 0x%x %lu ", b1cmd, (unsigned long)dma->recvlen); -#endif - - switch (b1cmd) { - case receive_data_b3_ind: - - applid = (unsigned) _get_word(&p); - msglen = _get_slice(&p, card->msgbuf); - datab3len = _get_slice(&p, card->databuf); - - if (msglen < 30) { /* not capi 64bit */ - memset(card->msgbuf + msglen, 0, 30 - msglen); - msglen = 30; - capimsg_setlen(card->msgbuf, 30); - } - if (!(skb = alloc_skb(datab3len + msglen, gfp_atomic))) { - printk(kern_err "%s: incoming packet dropped ", - card->name); - } else { - skb_put_data(skb, card->msgbuf, msglen); - skb_put_data(skb, card->databuf, datab3len); - capi_ctr_handle_message(ctrl, applid, skb); - } - break; - - case receive_message: - - applid = (unsigned) _get_word(&p); - msglen = _get_slice(&p, card->msgbuf); - if (!(skb = alloc_skb(msglen, gfp_atomic))) { - printk(kern_err "%s: incoming packet dropped ", - card->name); - } else { - skb_put_data(skb, card->msgbuf, msglen); - if (capimsg_cmd(skb->data) == capi_data_b3_conf) { - spin_lock(&card->lock); - capilib_data_b3_conf(&cinfo->ncci_head, applid, - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - spin_unlock(&card->lock); - } - capi_ctr_handle_message(ctrl, applid, skb); - } - break; - - case receive_new_ncci: - - applid = _get_word(&p); - ncci = _get_word(&p); - windowsize = _get_word(&p); - spin_lock(&card->lock); - capilib_new_ncci(&cinfo->ncci_head, applid, ncci, windowsize); - spin_unlock(&card->lock); - break; - - case receive_free_ncci: - - applid = _get_word(&p); - ncci = _get_word(&p); - - if (ncci != 0xffffffff) { - spin_lock(&card->lock); - capilib_free_ncci(&cinfo->ncci_head, applid, ncci); - spin_unlock(&card->lock); - } - break; - - case receive_start: -#ifdef avm_b1dma_polldebug - printk(kern_info "%s: receive poll ", card->name); -#endif - if (!suppress_pollack) - queue_pollack(card); - capi_ctr_resume_output(ctrl); - break; - - case receive_stop: - capi_ctr_suspend_output(ctrl); - break; - - case receive_init: - - cinfo->versionlen = _get_slice(&p, cinfo->versionbuf); - b1_parse_version(cinfo); - printk(kern_info "%s: %s-card (%s) now active ", - card->name, - cinfo->version[ver_cardtype], - cinfo->version[ver_driver]); - capi_ctr_ready(ctrl); - break; - - case receive_task_ready: - applid = (unsigned) _get_word(&p); - msglen = _get_slice(&p, card->msgbuf); - card->msgbuf[msglen] = 0; - while (msglen > 0 - && (card->msgbuf[msglen - 1] == ' ' - || card->msgbuf[msglen - 1] == ' ')) { - card->msgbuf[msglen - 1] = 0; - msglen--; - } - printk(kern_info "%s: task %d "%s" ready. ", - card->name, applid, card->msgbuf); - break; - - case receive_debugmsg: - msglen = _get_slice(&p, card->msgbuf); - card->msgbuf[msglen] = 0; - while (msglen > 0 - && (card->msgbuf[msglen - 1] == ' ' - || card->msgbuf[msglen - 1] == ' ')) { - card->msgbuf[msglen - 1] = 0; - msglen--; - } - printk(kern_info "%s: debug: %s ", card->name, card->msgbuf); - break; - - default: - printk(kern_err "%s: b1dma_interrupt: 0x%x ??? ", - card->name, b1cmd); - return; - } -} - -/* ------------------------------------------------------------- */ - -static void b1dma_handle_interrupt(avmcard *card) -{ - u32 status; - u32 newcsr; - - spin_lock(&card->lock); - - status = b1dma_readl(card, amcc_intcsr); - if ((status & any_s5933_int) == 0) { - spin_unlock(&card->lock); - return; - } - - newcsr = card->csr | (status & all_int); - if (status & tx_tc_int) newcsr &= ~en_tx_tc_int; - if (status & rx_tc_int) newcsr &= ~en_rx_tc_int; - b1dma_writel(card, newcsr, amcc_intcsr); - - if ((status & rx_tc_int) != 0) { - struct avmcard_dmainfo *dma = card->dma; - u32 rxlen; - if (card->dma->recvlen == 0) { - rxlen = b1dma_readl(card, amcc_rxlen); - if (rxlen == 0) { - dma->recvlen = *((u32 *)dma->recvbuf.dmabuf); - rxlen = (dma->recvlen + 3) & ~3; - b1dma_writel(card, dma->recvbuf.dmaaddr + 4, amcc_rxptr); - b1dma_writel(card, rxlen, amcc_rxlen); -#ifdef avm_b1dma_debug - } else { - printk(kern_err "%s: rx not complete (%d). ", - card->name, rxlen); -#endif - } - } else { - spin_unlock(&card->lock); - b1dma_handle_rx(card); - dma->recvlen = 0; - spin_lock(&card->lock); - b1dma_writel(card, dma->recvbuf.dmaaddr, amcc_rxptr); - b1dma_writel(card, 4, amcc_rxlen); - } - } - - if ((status & tx_tc_int) != 0) { - if (skb_queue_empty(&card->dma->send_queue)) - card->csr &= ~en_tx_tc_int; - else - b1dma_dispatch_tx(card); - } - b1dma_writel(card, card->csr, amcc_intcsr); - - spin_unlock(&card->lock); -} - -irqreturn_t b1dma_interrupt(int interrupt, void *devptr) -{ - avmcard *card = devptr; - - b1dma_handle_interrupt(card); - return irq_handled; -} - -/* ------------------------------------------------------------- */ - -static int b1dma_loaded(avmcard *card) -{ - unsigned long stop; - unsigned char ans; - unsigned long tout = 2; - unsigned int base = card->port; - - for (stop = jiffies + tout * hz; time_before(jiffies, stop);) { - if (b1_tx_empty(base)) - break; - } - if (!b1_tx_empty(base)) { - printk(kern_err "%s: b1dma_loaded: tx err, corrupted t4 file ? ", - card->name); - return 0; - } - b1_put_byte(base, send_pollack); - for (stop = jiffies + tout * hz; time_before(jiffies, stop);) { - if (b1_rx_full(base)) { - if ((ans = b1_get_byte(base)) == receive_polldword) { - return 1; - } - printk(kern_err "%s: b1dma_loaded: got 0x%x, firmware not running in dword mode ", card->name, ans); - return 0; - } - } - printk(kern_err "%s: b1dma_loaded: firmware not running ", card->name); - return 0; -} - -/* ------------------------------------------------------------- */ - -static void b1dma_send_init(avmcard *card) -{ - struct sk_buff *skb; - void *p; - - skb = alloc_skb(15, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, lost register appl. ", - card->name); - return; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_init); - _put_word(&p, capi_maxappl); - _put_word(&p, avm_ncci_per_channel * 30); - _put_word(&p, card->cardnr - 1); - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - b1dma_queue_tx(card, skb); -} - -int b1dma_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - int retval; - - b1dma_reset(card); - - if ((retval = b1_load_t4file(card, &data->firmware))) { - b1dma_reset(card); - printk(kern_err "%s: failed to load t4file!! ", - card->name); - return retval; - } - - if (data->configuration.len > 0 && data->configuration.data) { - if ((retval = b1_load_config(card, &data->configuration))) { - b1dma_reset(card); - printk(kern_err "%s: failed to load config!! ", - card->name); - return retval; - } - } - - if (!b1dma_loaded(card)) { - b1dma_reset(card); - printk(kern_err "%s: failed to load t4file. ", card->name); - return -eio; - } - - card->csr = avm_flag; - b1dma_writel(card, card->csr, amcc_intcsr); - b1dma_writel(card, en_a2p_transfers | en_p2a_transfers | a2p_hi_priority | - p2a_hi_priority | reset_a2p_flags | reset_p2a_flags, - amcc_mcsr); - t1outp(card->port, 0x07, 0x30); - t1outp(card->port, 0x10, 0xf0); - - card->dma->recvlen = 0; - b1dma_writel(card, card->dma->recvbuf.dmaaddr, amcc_rxptr); - b1dma_writel(card, 4, amcc_rxlen); - card->csr |= en_rx_tc_int; - b1dma_writel(card, card->csr, amcc_intcsr); - - b1dma_send_init(card); - - return 0; -} - -void b1dma_reset_ctr(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - b1dma_reset(card); - - memset(cinfo->version, 0, sizeof(cinfo->version)); - capilib_release(&cinfo->ncci_head); - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_down(ctrl); -} - -/* ------------------------------------------------------------- */ - -void b1dma_register_appl(struct capi_ctr *ctrl, - u16 appl, - capi_register_params *rp) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - struct sk_buff *skb; - int want = rp->level3cnt; - int nconn; - void *p; - - if (want > 0) nconn = want; - else nconn = ctrl->profile.nbchannel * -want; - if (nconn == 0) nconn = ctrl->profile.nbchannel; - - skb = alloc_skb(23, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, lost register appl. ", - card->name); - return; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_register); - _put_word(&p, appl); - _put_word(&p, 1024 * (nconn + 1)); - _put_word(&p, nconn); - _put_word(&p, rp->datablkcnt); - _put_word(&p, rp->datablklen); - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - b1dma_queue_tx(card, skb); -} - -/* ------------------------------------------------------------- */ - -void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - struct sk_buff *skb; - void *p; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - capilib_release_appl(&cinfo->ncci_head, appl); - spin_unlock_irqrestore(&card->lock, flags); - - skb = alloc_skb(7, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, lost release appl. ", - card->name); - return; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_release); - _put_word(&p, appl); - - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - b1dma_queue_tx(card, skb); -} - -/* ------------------------------------------------------------- */ - -u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - u16 retval = capi_noerror; - - if (capimsg_cmd(skb->data) == capi_data_b3_req) { - unsigned long flags; - spin_lock_irqsave(&card->lock, flags); - retval = capilib_data_b3_req(&cinfo->ncci_head, - capimsg_appid(skb->data), - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - spin_unlock_irqrestore(&card->lock, flags); - } - if (retval == capi_noerror) - b1dma_queue_tx(card, skb); - - return retval; -} - -/* ------------------------------------------------------------- */ - -int b1dma_proc_show(struct seq_file *m, void *v) -{ - struct capi_ctr *ctrl = m->private; - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - u8 flag; - char *s; - u32 txoff, txlen, rxoff, rxlen, csr; - unsigned long flags; - - seq_printf(m, "%-16s %s ", "name", card->name); - seq_printf(m, "%-16s 0x%x ", "io", card->port); - seq_printf(m, "%-16s %d ", "irq", card->irq); - seq_printf(m, "%-16s 0x%lx ", "membase", card->membase); - switch (card->cardtype) { - case avm_b1isa: s = "b1 isa"; break; - case avm_b1pci: s = "b1 pci"; break; - case avm_b1pcmcia: s = "b1 pcmcia"; break; - case avm_m1: s = "m1"; break; - case avm_m2: s = "m2"; break; - case avm_t1isa: s = "t1 isa (hema)"; break; - case avm_t1pci: s = "t1 pci"; break; - case avm_c4: s = "c4"; break; - case avm_c2: s = "c2"; break; - default: s = "???"; break; - } - seq_printf(m, "%-16s %s ", "type", s); - if ((s = cinfo->version[ver_driver]) != null) - seq_printf(m, "%-16s %s ", "ver_driver", s); - if ((s = cinfo->version[ver_cardtype]) != null) - seq_printf(m, "%-16s %s ", "ver_cardtype", s); - if ((s = cinfo->version[ver_serial]) != null) - seq_printf(m, "%-16s %s ", "ver_serial", s); - - if (card->cardtype != avm_m1) { - flag = ((u8 *)(ctrl->profile.manu))[3]; - if (flag) - seq_printf(m, "%-16s%s%s%s%s%s%s%s ", - "protocol", - (flag & 0x01) ? " dss1" : "", - (flag & 0x02) ? " ct1" : "", - (flag & 0x04) ? " vn3" : "", - (flag & 0x08) ? " ni1" : "", - (flag & 0x10) ? " austel" : "", - (flag & 0x20) ? " ess" : "", - (flag & 0x40) ? " 1tr6" : "" - ); - } - if (card->cardtype != avm_m1) { - flag = ((u8 *)(ctrl->profile.manu))[5]; - if (flag) - seq_printf(m, "%-16s%s%s%s%s ", - "linetype", - (flag & 0x01) ? " point to point" : "", - (flag & 0x02) ? " point to multipoint" : "", - (flag & 0x08) ? " leased line without d-channel" : "", - (flag & 0x04) ? " leased line with d-channel" : "" - ); - } - seq_printf(m, "%-16s %s ", "cardname", cinfo->cardname); - - - spin_lock_irqsave(&card->lock, flags); - - txoff = (dma_addr_t)b1dma_readl(card, amcc_txptr)-card->dma->sendbuf.dmaaddr; - txlen = b1dma_readl(card, amcc_txlen); - - rxoff = (dma_addr_t)b1dma_readl(card, amcc_rxptr)-card->dma->recvbuf.dmaaddr; - rxlen = b1dma_readl(card, amcc_rxlen); - - csr = b1dma_readl(card, amcc_intcsr); - - spin_unlock_irqrestore(&card->lock, flags); - - seq_printf(m, "%-16s 0x%lx ", "csr (cached)", (unsigned long)card->csr); - seq_printf(m, "%-16s 0x%lx ", "csr", (unsigned long)csr); - seq_printf(m, "%-16s %lu ", "txoff", (unsigned long)txoff); - seq_printf(m, "%-16s %lu ", "txlen", (unsigned long)txlen); - seq_printf(m, "%-16s %lu ", "rxoff", (unsigned long)rxoff); - seq_printf(m, "%-16s %lu ", "rxlen", (unsigned long)rxlen); - - return 0; -} -export_symbol(b1dma_proc_show); - -/* ------------------------------------------------------------- */ - -export_symbol(b1dma_reset); -export_symbol(t1pci_detect); -export_symbol(b1pciv4_detect); -export_symbol(b1dma_interrupt); - -export_symbol(b1dma_load_firmware); -export_symbol(b1dma_reset_ctr); -export_symbol(b1dma_register_appl); -export_symbol(b1dma_release_appl); -export_symbol(b1dma_send_message); - -static int __init b1dma_init(void) -{ - char *p; - char rev[32]; - - if ((p = strchr(revision, ':')) != null && p[1]) { - strlcpy(rev, p + 2, sizeof(rev)); - if ((p = strchr(rev, '$')) != null && p > rev) - *(p - 1) = 0; - } else - strcpy(rev, "1.0"); - - printk(kern_info "b1dma: revision %s ", rev); - - return 0; -} - -static void __exit b1dma_exit(void) -{ -} - -module_init(b1dma_init); -module_exit(b1dma_exit); diff --git a/drivers/staging/isdn/avm/b1isa.c b/drivers/staging/isdn/avm/b1isa.c --- a/drivers/staging/isdn/avm/b1isa.c +++ /dev/null -/* $id: b1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil exp $ - * - * module for avm b1 isa-card. - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/capi.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <asm/io.h> -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> -#include <linux/isdn/capilli.h> -#include "avmcard.h" - -/* ------------------------------------------------------------- */ - -static char *revision = "$revision: 1.1.2.3 $"; - -/* ------------------------------------------------------------- */ - -module_description("capi4linux: driver for avm b1 isa card"); -module_author("carsten paeth"); -module_license("gpl"); - -/* ------------------------------------------------------------- */ - -static void b1isa_remove(struct pci_dev *pdev) -{ - avmctrl_info *cinfo = pci_get_drvdata(pdev); - avmcard *card; - - if (!cinfo) - return; - - card = cinfo->card; - - b1_reset(card->port); - b1_reset(card->port); - - detach_capi_ctr(&cinfo->capi_ctrl); - free_irq(card->irq, card); - release_region(card->port, avmb1_portlen); - b1_free_card(card); -} - -/* ------------------------------------------------------------- */ - -static char *b1isa_procinfo(struct capi_ctr *ctrl); - -static int b1isa_probe(struct pci_dev *pdev) -{ - avmctrl_info *cinfo; - avmcard *card; - int retval; - - card = b1_alloc_card(1); - if (!card) { - printk(kern_warning "b1isa: no memory. "); - retval = -enomem; - goto err; - } - - cinfo = card->ctrlinfo; - - card->port = pci_resource_start(pdev, 0); - card->irq = pdev->irq; - card->cardtype = avm_b1isa; - sprintf(card->name, "b1isa-%x", card->port); - - if (card->port != 0x150 && card->port != 0x250 - && card->port != 0x300 && card->port != 0x340) { - printk(kern_warning "b1isa: invalid port 0x%x. ", card->port); - retval = -einval; - goto err_free; - } - if (b1_irq_table[card->irq & 0xf] == 0) { - printk(kern_warning "b1isa: irq %d not valid. ", card->irq); - retval = -einval; - goto err_free; - } - if (!request_region(card->port, avmb1_portlen, card->name)) { - printk(kern_warning "b1isa: ports 0x%03x-0x%03x in use. ", - card->port, card->port + avmb1_portlen); - retval = -ebusy; - goto err_free; - } - retval = request_irq(card->irq, b1_interrupt, 0, card->name, card); - if (retval) { - printk(kern_err "b1isa: unable to get irq %d. ", card->irq); - goto err_release_region; - } - b1_reset(card->port); - if ((retval = b1_detect(card->port, card->cardtype)) != 0) { - printk(kern_notice "b1isa: no card at 0x%x (%d) ", - card->port, retval); - retval = -enodev; - goto err_free_irq; - } - b1_reset(card->port); - b1_getrevision(card); - - cinfo->capi_ctrl.owner = this_module; - cinfo->capi_ctrl.driver_name = "b1isa"; - cinfo->capi_ctrl.driverdata = cinfo; - cinfo->capi_ctrl.register_appl = b1_register_appl; - cinfo->capi_ctrl.release_appl = b1_release_appl; - cinfo->capi_ctrl.send_message = b1_send_message; - cinfo->capi_ctrl.load_firmware = b1_load_firmware; - cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; - cinfo->capi_ctrl.procinfo = b1isa_procinfo; - cinfo->capi_ctrl.proc_show = b1_proc_show; - strcpy(cinfo->capi_ctrl.name, card->name); - - retval = attach_capi_ctr(&cinfo->capi_ctrl); - if (retval) { - printk(kern_err "b1isa: attach controller failed. "); - goto err_free_irq; - } - - printk(kern_info "b1isa: avm b1 isa at i/o %#x, irq %d, revision %d ", - card->port, card->irq, card->revision); - - pci_set_drvdata(pdev, cinfo); - return 0; - -err_free_irq: - free_irq(card->irq, card); -err_release_region: - release_region(card->port, avmb1_portlen); -err_free: - b1_free_card(card); -err: - return retval; -} - -static char *b1isa_procinfo(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - - if (!cinfo) - return ""; - sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d", - cinfo->cardname[0] ? cinfo->cardname : "-", - cinfo->version[ver_driver] ? cinfo->version[ver_driver] : "-", - cinfo->card ? cinfo->card->port : 0x0, - cinfo->card ? cinfo->card->irq : 0, - cinfo->card ? cinfo->card->revision : 0 - ); - return cinfo->infobuf; -} - -/* ------------------------------------------------------------- */ - -#define max_cards 4 -static struct pci_dev isa_dev[max_cards]; -static int io[max_cards]; -static int irq[max_cards]; - -module_param_hw_array(io, int, ioport, null, 0); -module_param_hw_array(irq, int, irq, null, 0); -module_parm_desc(io, "i/o base address(es)"); -module_parm_desc(irq, "irq number(s) (assigned)"); - -static int b1isa_add_card(struct capi_driver *driver, capicardparams *data) -{ - int i; - - for (i = 0; i < max_cards; i++) { - if (isa_dev[i].resource[0].start) - continue; - - isa_dev[i].resource[0].start = data->port; - isa_dev[i].irq = data->irq; - - if (b1isa_probe(&isa_dev[i]) == 0) - return 0; - } - return -enodev; -} - -static struct capi_driver capi_driver_b1isa = { - .name = "b1isa", - .revision = "1.0", - .add_card = b1isa_add_card, -}; - -static int __init b1isa_init(void) -{ - char *p; - char rev[32]; - int i; - - if ((p = strchr(revision, ':')) != null && p[1]) { - strlcpy(rev, p + 2, 32); - if ((p = strchr(rev, '$')) != null && p > rev) - *(p - 1) = 0; - } else - strcpy(rev, "1.0"); - - for (i = 0; i < max_cards; i++) { - if (!io[i]) - break; - - isa_dev[i].resource[0].start = io[i]; - isa_dev[i].irq = irq[i]; - - if (b1isa_probe(&isa_dev[i]) != 0) - return -enodev; - } - - strlcpy(capi_driver_b1isa.revision, rev, 32); - register_capi_driver(&capi_driver_b1isa); - printk(kern_info "b1isa: revision %s ", rev); - - return 0; -} - -static void __exit b1isa_exit(void) -{ - int i; - - for (i = 0; i < max_cards; i++) { - if (isa_dev[i].resource[0].start) - b1isa_remove(&isa_dev[i]); - } - unregister_capi_driver(&capi_driver_b1isa); -} - -module_init(b1isa_init); -module_exit(b1isa_exit); diff --git a/drivers/staging/isdn/avm/b1pci.c b/drivers/staging/isdn/avm/b1pci.c --- a/drivers/staging/isdn/avm/b1pci.c +++ /dev/null -/* $id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil exp $ - * - * module for avm b1 pci-card. - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/pci.h> -#include <linux/capi.h> -#include <asm/io.h> -#include <linux/init.h> -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> -#include <linux/isdn/capilli.h> -#include "avmcard.h" - -/* ------------------------------------------------------------- */ - -static char *revision = "$revision: 1.1.2.2 $"; - -/* ------------------------------------------------------------- */ - -static struct pci_device_id b1pci_pci_tbl[] = { - { pci_vendor_id_avm, pci_device_id_avm_b1, pci_any_id, pci_any_id }, - { } /* terminating entry */ -}; - -module_device_table(pci, b1pci_pci_tbl); -module_description("capi4linux: driver for avm b1 pci card"); -module_author("carsten paeth"); -module_license("gpl"); - -/* ------------------------------------------------------------- */ - -static char *b1pci_procinfo(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - - if (!cinfo) - return ""; - sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d", - cinfo->cardname[0] ? cinfo->cardname : "-", - cinfo->version[ver_driver] ? cinfo->version[ver_driver] : "-", - cinfo->card ? cinfo->card->port : 0x0, - cinfo->card ? cinfo->card->irq : 0, - cinfo->card ? cinfo->card->revision : 0 - ); - return cinfo->infobuf; -} - -/* ------------------------------------------------------------- */ - -static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev) -{ - avmcard *card; - avmctrl_info *cinfo; - int retval; - - card = b1_alloc_card(1); - if (!card) { - printk(kern_warning "b1pci: no memory. "); - retval = -enomem; - goto err; - } - - cinfo = card->ctrlinfo; - sprintf(card->name, "b1pci-%x", p->port); - card->port = p->port; - card->irq = p->irq; - card->cardtype = avm_b1pci; - - if (!request_region(card->port, avmb1_portlen, card->name)) { - printk(kern_warning "b1pci: ports 0x%03x-0x%03x in use. ", - card->port, card->port + avmb1_portlen); - retval = -ebusy; - goto err_free; - } - b1_reset(card->port); - retval = b1_detect(card->port, card->cardtype); - if (retval) { - printk(kern_notice "b1pci: no card at 0x%x (%d) ", - card->port, retval); - retval = -enodev; - goto err_release_region; - } - b1_reset(card->port); - b1_getrevision(card); - - retval = request_irq(card->irq, b1_interrupt, irqf_shared, card->name, card); - if (retval) { - printk(kern_err "b1pci: unable to get irq %d. ", card->irq); - retval = -ebusy; - goto err_release_region; - } - - cinfo->capi_ctrl.driver_name = "b1pci"; - cinfo->capi_ctrl.driverdata = cinfo; - cinfo->capi_ctrl.register_appl = b1_register_appl; - cinfo->capi_ctrl.release_appl = b1_release_appl; - cinfo->capi_ctrl.send_message = b1_send_message; - cinfo->capi_ctrl.load_firmware = b1_load_firmware; - cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; - cinfo->capi_ctrl.procinfo = b1pci_procinfo; - cinfo->capi_ctrl.proc_show = b1_proc_show; - strcpy(cinfo->capi_ctrl.name, card->name); - cinfo->capi_ctrl.owner = this_module; - - retval = attach_capi_ctr(&cinfo->capi_ctrl); - if (retval) { - printk(kern_err "b1pci: attach controller failed. "); - goto err_free_irq; - } - - if (card->revision >= 4) { - printk(kern_info "b1pci: avm b1 pci v4 at i/o %#x, irq %d, revision %d (no dma) ", - card->port, card->irq, card->revision); - } else { - printk(kern_info "b1pci: avm b1 pci at i/o %#x, irq %d, revision %d ", - card->port, card->irq, card->revision); - } - - pci_set_drvdata(pdev, card); - return 0; - -err_free_irq: - free_irq(card->irq, card); -err_release_region: - release_region(card->port, avmb1_portlen); -err_free: - b1_free_card(card); -err: - return retval; -} - -static void b1pci_remove(struct pci_dev *pdev) -{ - avmcard *card = pci_get_drvdata(pdev); - avmctrl_info *cinfo = card->ctrlinfo; - unsigned int port = card->port; - - b1_reset(port); - b1_reset(port); - - detach_capi_ctr(&cinfo->capi_ctrl); - free_irq(card->irq, card); - release_region(card->port, avmb1_portlen); - b1_free_card(card); -} - -#ifdef config_isdn_drv_avmb1_b1pciv4 -/* ------------------------------------------------------------- */ - -static char *b1pciv4_procinfo(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - - if (!cinfo) - return ""; - sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d", - cinfo->cardname[0] ? cinfo->cardname : "-", - cinfo->version[ver_driver] ? cinfo->version[ver_driver] : "-", - cinfo->card ? cinfo->card->port : 0x0, - cinfo->card ? cinfo->card->irq : 0, - cinfo->card ? cinfo->card->membase : 0, - cinfo->card ? cinfo->card->revision : 0 - ); - return cinfo->infobuf; -} - -/* ------------------------------------------------------------- */ - -static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev) -{ - avmcard *card; - avmctrl_info *cinfo; - int retval; - - card = b1_alloc_card(1); - if (!card) { - printk(kern_warning "b1pci: no memory. "); - retval = -enomem; - goto err; - } - - card->dma = avmcard_dma_alloc("b1pci", pdev, 2048 + 128, 2048 + 128); - if (!card->dma) { - printk(kern_warning "b1pci: dma alloc. "); - retval = -enomem; - goto err_free; - } - - cinfo = card->ctrlinfo; - sprintf(card->name, "b1pciv4-%x", p->port); - card->port = p->port; - card->irq = p->irq; - card->membase = p->membase; - card->cardtype = avm_b1pci; - - if (!request_region(card->port, avmb1_portlen, card->name)) { - printk(kern_warning "b1pci: ports 0x%03x-0x%03x in use. ", - card->port, card->port + avmb1_portlen); - retval = -ebusy; - goto err_free_dma; - } - - card->mbase = ioremap(card->membase, 64); - if (!card->mbase) { - printk(kern_notice "b1pci: can't remap memory at 0x%lx ", - card->membase); - retval = -enomem; - goto err_release_region; - } - - b1dma_reset(card); - - retval = b1pciv4_detect(card); - if (retval) { - printk(kern_notice "b1pci: no card at 0x%x (%d) ", - card->port, retval); - retval = -enodev; - goto err_unmap; - } - b1dma_reset(card); - b1_getrevision(card); - - retval = request_irq(card->irq, b1dma_interrupt, irqf_shared, card->name, card); - if (retval) { - printk(kern_err "b1pci: unable to get irq %d. ", - card->irq); - retval = -ebusy; - goto err_unmap; - } - - cinfo->capi_ctrl.owner = this_module; - cinfo->capi_ctrl.driver_name = "b1pciv4"; - cinfo->capi_ctrl.driverdata = cinfo; - cinfo->capi_ctrl.register_appl = b1dma_register_appl; - cinfo->capi_ctrl.release_appl = b1dma_release_appl; - cinfo->capi_ctrl.send_message = b1dma_send_message; - cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; - cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; - cinfo->capi_ctrl.procinfo = b1pciv4_procinfo; - cinfo->capi_ctrl.proc_show = b1dma_proc_show; - strcpy(cinfo->capi_ctrl.name, card->name); - - retval = attach_capi_ctr(&cinfo->capi_ctrl); - if (retval) { - printk(kern_err "b1pci: attach controller failed. "); - goto err_free_irq; - } - card->cardnr = cinfo->capi_ctrl.cnr; - - printk(kern_info "b1pci: avm b1 pci v4 at i/o %#x, irq %d, mem %#lx, revision %d (dma) ", - card->port, card->irq, card->membase, card->revision); - - pci_set_drvdata(pdev, card); - return 0; - -err_free_irq: - free_irq(card->irq, card); -err_unmap: - iounmap(card->mbase); -err_release_region: - release_region(card->port, avmb1_portlen); -err_free_dma: - avmcard_dma_free(card->dma); -err_free: - b1_free_card(card); -err: - return retval; - -} - -static void b1pciv4_remove(struct pci_dev *pdev) -{ - avmcard *card = pci_get_drvdata(pdev); - avmctrl_info *cinfo = card->ctrlinfo; - - b1dma_reset(card); - - detach_capi_ctr(&cinfo->capi_ctrl); - free_irq(card->irq, card); - iounmap(card->mbase); - release_region(card->port, avmb1_portlen); - avmcard_dma_free(card->dma); - b1_free_card(card); -} - -#endif /* config_isdn_drv_avmb1_b1pciv4 */ - -static int b1pci_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct capicardparams param; - int retval; - - if (pci_enable_device(pdev) < 0) { - printk(kern_err "b1pci: failed to enable avm-b1 "); - return -enodev; - } - param.irq = pdev->irq; - - if (pci_resource_start(pdev, 2)) { /* b1 pci v4 */ -#ifdef config_isdn_drv_avmb1_b1pciv4 - pci_set_master(pdev); -#endif - param.membase = pci_resource_start(pdev, 0); - param.port = pci_resource_start(pdev, 2); - - printk(kern_info "b1pci: pci bios reports avm-b1 v4 at i/o %#x, irq %d, mem %#x ", - param.port, param.irq, param.membase); -#ifdef config_isdn_drv_avmb1_b1pciv4 - retval = b1pciv4_probe(¶m, pdev); -#else - retval = b1pci_probe(¶m, pdev); -#endif - if (retval != 0) { - printk(kern_err "b1pci: no avm-b1 v4 at i/o %#x, irq %d, mem %#x detected ", - param.port, param.irq, param.membase); - } - } else { - param.membase = 0; - param.port = pci_resource_start(pdev, 1); - - printk(kern_info "b1pci: pci bios reports avm-b1 at i/o %#x, irq %d ", - param.port, param.irq); - retval = b1pci_probe(¶m, pdev); - if (retval != 0) { - printk(kern_err "b1pci: no avm-b1 at i/o %#x, irq %d detected ", - param.port, param.irq); - } - } - return retval; -} - -static void b1pci_pci_remove(struct pci_dev *pdev) -{ -#ifdef config_isdn_drv_avmb1_b1pciv4 - avmcard *card = pci_get_drvdata(pdev); - - if (card->dma) - b1pciv4_remove(pdev); - else - b1pci_remove(pdev); -#else - b1pci_remove(pdev); -#endif -} - -static struct pci_driver b1pci_pci_driver = { - .name = "b1pci", - .id_table = b1pci_pci_tbl, - .probe = b1pci_pci_probe, - .remove = b1pci_pci_remove, -}; - -static struct capi_driver capi_driver_b1pci = { - .name = "b1pci", - .revision = "1.0", -}; -#ifdef config_isdn_drv_avmb1_b1pciv4 -static struct capi_driver capi_driver_b1pciv4 = { - .name = "b1pciv4", - .revision = "1.0", -}; -#endif - -static int __init b1pci_init(void) -{ - char *p; - char rev[32]; - int err; - - if ((p = strchr(revision, ':')) != null && p[1]) { - strlcpy(rev, p + 2, 32); - if ((p = strchr(rev, '$')) != null && p > rev) - *(p - 1) = 0; - } else - strcpy(rev, "1.0"); - - - err = pci_register_driver(&b1pci_pci_driver); - if (!err) { - strlcpy(capi_driver_b1pci.revision, rev, 32); - register_capi_driver(&capi_driver_b1pci); -#ifdef config_isdn_drv_avmb1_b1pciv4 - strlcpy(capi_driver_b1pciv4.revision, rev, 32); - register_capi_driver(&capi_driver_b1pciv4); -#endif - printk(kern_info "b1pci: revision %s ", rev); - } - return err; -} - -static void __exit b1pci_exit(void) -{ - unregister_capi_driver(&capi_driver_b1pci); -#ifdef config_isdn_drv_avmb1_b1pciv4 - unregister_capi_driver(&capi_driver_b1pciv4); -#endif - pci_unregister_driver(&b1pci_pci_driver); -} - -module_init(b1pci_init); -module_exit(b1pci_exit); diff --git a/drivers/staging/isdn/avm/b1pcmcia.c b/drivers/staging/isdn/avm/b1pcmcia.c --- a/drivers/staging/isdn/avm/b1pcmcia.c +++ /dev/null -/* $id: b1pcmcia.c,v 1.1.2.2 2004/01/16 21:09:27 keil exp $ - * - * module for avm b1/m1/m2 pcmcia-card. - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <asm/io.h> -#include <linux/capi.h> -#include <linux/b1pcmcia.h> -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> -#include <linux/isdn/capilli.h> -#include "avmcard.h" - -/* ------------------------------------------------------------- */ - -static char *revision = "$revision: 1.1.2.2 $"; - -/* ------------------------------------------------------------- */ - -module_description("capi4linux: driver for avm pcmcia cards"); -module_author("carsten paeth"); -module_license("gpl"); - -/* ------------------------------------------------------------- */ - -static void b1pcmcia_remove_ctr(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - - b1_reset(port); - b1_reset(port); - - detach_capi_ctr(ctrl); - free_irq(card->irq, card); - b1_free_card(card); -} - -/* ------------------------------------------------------------- */ - -static list_head(cards); - -static char *b1pcmcia_procinfo(struct capi_ctr *ctrl); - -static int b1pcmcia_add_card(unsigned int port, unsigned irq, - enum avmcardtype cardtype) -{ - avmctrl_info *cinfo; - avmcard *card; - char *cardname; - int retval; - - card = b1_alloc_card(1); - if (!card) { - printk(kern_warning "b1pcmcia: no memory. "); - retval = -enomem; - goto err; - } - cinfo = card->ctrlinfo; - - switch (cardtype) { - case avm_m1: sprintf(card->name, "m1-%x", port); break; - case avm_m2: sprintf(card->name, "m2-%x", port); break; - default: sprintf(card->name, "b1pcmcia-%x", port); break; - } - card->port = port; - card->irq = irq; - card->cardtype = cardtype; - - retval = request_irq(card->irq, b1_interrupt, irqf_shared, card->name, card); - if (retval) { - printk(kern_err "b1pcmcia: unable to get irq %d. ", - card->irq); - retval = -ebusy; - goto err_free; - } - b1_reset(card->port); - if ((retval = b1_detect(card->port, card->cardtype)) != 0) { - printk(kern_notice "b1pcmcia: no card at 0x%x (%d) ", - card->port, retval); - retval = -enodev; - goto err_free_irq; - } - b1_reset(card->port); - b1_getrevision(card); - - cinfo->capi_ctrl.owner = this_module; - cinfo->capi_ctrl.driver_name = "b1pcmcia"; - cinfo->capi_ctrl.driverdata = cinfo; - cinfo->capi_ctrl.register_appl = b1_register_appl; - cinfo->capi_ctrl.release_appl = b1_release_appl; - cinfo->capi_ctrl.send_message = b1_send_message; - cinfo->capi_ctrl.load_firmware = b1_load_firmware; - cinfo->capi_ctrl.reset_ctr = b1_reset_ctr; - cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo; - cinfo->capi_ctrl.proc_show = b1_proc_show; - strcpy(cinfo->capi_ctrl.name, card->name); - - retval = attach_capi_ctr(&cinfo->capi_ctrl); - if (retval) { - printk(kern_err "b1pcmcia: attach controller failed. "); - goto err_free_irq; - } - switch (cardtype) { - case avm_m1: cardname = "m1"; break; - case avm_m2: cardname = "m2"; break; - default: cardname = "b1 pcmcia"; break; - } - - printk(kern_info "b1pcmcia: avm %s at i/o %#x, irq %d, revision %d ", - cardname, card->port, card->irq, card->revision); - - list_add(&card->list, &cards); - return cinfo->capi_ctrl.cnr; - -err_free_irq: - free_irq(card->irq, card); -err_free: - b1_free_card(card); -err: - return retval; -} - -/* ------------------------------------------------------------- */ - -static char *b1pcmcia_procinfo(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - - if (!cinfo) - return ""; - sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d", - cinfo->cardname[0] ? cinfo->cardname : "-", - cinfo->version[ver_driver] ? cinfo->version[ver_driver] : "-", - cinfo->card ? cinfo->card->port : 0x0, - cinfo->card ? cinfo->card->irq : 0, - cinfo->card ? cinfo->card->revision : 0 - ); - return cinfo->infobuf; -} - -/* ------------------------------------------------------------- */ - -int b1pcmcia_addcard_b1(unsigned int port, unsigned irq) -{ - return b1pcmcia_add_card(port, irq, avm_b1pcmcia); -} - -int b1pcmcia_addcard_m1(unsigned int port, unsigned irq) -{ - return b1pcmcia_add_card(port, irq, avm_m1); -} - -int b1pcmcia_addcard_m2(unsigned int port, unsigned irq) -{ - return b1pcmcia_add_card(port, irq, avm_m2); -} - -int b1pcmcia_delcard(unsigned int port, unsigned irq) -{ - struct list_head *l; - avmcard *card; - - list_for_each(l, &cards) { - card = list_entry(l, avmcard, list); - if (card->port == port && card->irq == irq) { - b1pcmcia_remove_ctr(&card->ctrlinfo[0].capi_ctrl); - return 0; - } - } - return -esrch; -} - -export_symbol(b1pcmcia_addcard_b1); -export_symbol(b1pcmcia_addcard_m1); -export_symbol(b1pcmcia_addcard_m2); -export_symbol(b1pcmcia_delcard); - -static struct capi_driver capi_driver_b1pcmcia = { - .name = "b1pcmcia", - .revision = "1.0", -}; - -static int __init b1pcmcia_init(void) -{ - char *p; - char rev[32]; - - if ((p = strchr(revision, ':')) != null && p[1]) { - strlcpy(rev, p + 2, 32); - if ((p = strchr(rev, '$')) != null && p > rev) - *(p - 1) = 0; - } else - strcpy(rev, "1.0"); - - strlcpy(capi_driver_b1pcmcia.revision, rev, 32); - register_capi_driver(&capi_driver_b1pcmcia); - printk(kern_info "b1pci: revision %s ", rev); - - return 0; -} - -static void __exit b1pcmcia_exit(void) -{ - unregister_capi_driver(&capi_driver_b1pcmcia); -} - -module_init(b1pcmcia_init); -module_exit(b1pcmcia_exit); diff --git a/drivers/staging/isdn/avm/c4.c b/drivers/staging/isdn/avm/c4.c --- a/drivers/staging/isdn/avm/c4.c +++ /dev/null -/* $id: c4.c,v 1.1.2.2 2004/01/16 21:09:27 keil exp $ - * - * module for avm c4 & c2 card. - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/pci.h> -#include <linux/capi.h> -#include <linux/kernelcapi.h> -#include <linux/init.h> -#include <linux/gfp.h> -#include <asm/io.h> -#include <linux/uaccess.h> -#include <linux/netdevice.h> -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> -#include <linux/isdn/capilli.h> -#include "avmcard.h" - -#undef avm_c4_debug -#undef avm_c4_polldebug - -/* ------------------------------------------------------------- */ - -static char *revision = "$revision: 1.1.2.2 $"; - -/* ------------------------------------------------------------- */ - -static bool suppress_pollack; - -static const struct pci_device_id c4_pci_tbl[] = { - { pci_vendor_id_dec, pci_device_id_dec_21285, pci_vendor_id_avm, pci_device_id_avm_c4, 0, 0, (unsigned long)4 }, - { pci_vendor_id_dec, pci_device_id_dec_21285, pci_vendor_id_avm, pci_device_id_avm_c2, 0, 0, (unsigned long)2 }, - { } /* terminating entry */ -}; - -module_device_table(pci, c4_pci_tbl); -module_description("capi4linux: driver for avm c2/c4 cards"); -module_author("carsten paeth"); -module_license("gpl"); -module_param(suppress_pollack, bool, 0); - -/* ------------------------------------------------------------- */ - -static void c4_dispatch_tx(avmcard *card); - -/* ------------------------------------------------------------- */ - -#define dc21285_dram_a0mr 0x40000000 -#define dc21285_dram_a1mr 0x40004000 -#define dc21285_dram_a2mr 0x40008000 -#define dc21285_dram_a3mr 0x4000c000 - -#define cas_offset 0x88 - -#define dc21285_armcsr_base 0x42000000 - -#define pci_out_int_status 0x30 -#define pci_out_int_mask 0x34 -#define mailbox_0 0x50 -#define mailbox_1 0x54 -#define mailbox_2 0x58 -#define mailbox_3 0x5c -#define doorbell 0x60 -#define doorbell_setup 0x64 - -#define chan_1_control 0x90 -#define chan_2_control 0xb0 -#define dram_timing 0x10c -#define dram_addr_size_0 0x110 -#define dram_addr_size_1 0x114 -#define dram_addr_size_2 0x118 -#define dram_addr_size_3 0x11c -#define sa_control 0x13c -#define xbus_cycle 0x148 -#define xbus_strobe 0x14c -#define dbell_pci_mask 0x150 -#define dbell_sa_mask 0x154 - -#define sdram_size 0x1000000 - -/* ------------------------------------------------------------- */ - -#define mbox_peek_poke mailbox_0 - -#define dbell_addr 0x01 -#define dbell_data 0x02 -#define dbell_rnwr 0x40 -#define dbell_init 0x80 - -/* ------------------------------------------------------------- */ - -#define mbox_up_addr mailbox_0 -#define mbox_up_len mailbox_1 -#define mbox_down_addr mailbox_2 -#define mbox_down_len mailbox_3 - -#define dbell_up_host 0x00000100 -#define dbell_up_arm 0x00000200 -#define dbell_down_host 0x00000400 -#define dbell_down_arm 0x00000800 -#define dbell_reset_host 0x40000000 -#define dbell_reset_arm 0x80000000 - -/* ------------------------------------------------------------- */ - -#define dram_timing_def 0x001a01a5 -#define dram_ad_sz_def0 0x00000045 -#define dram_ad_sz_null 0x00000000 - -#define sa_ctl_allright 0x64aa0271 - -#define init_xbus_cycle 0x100016db -#define init_xbus_strobe 0xf1f1f1f1 - -/* ------------------------------------------------------------- */ - -#define reset_timeout (15 * hz) /* 15 sec */ -#define peek_poke_timeout (hz / 10) /* 0.1 sec */ - -/* ------------------------------------------------------------- */ - -#define c4outmeml(addr, value) writel(value, addr) -#define c4inmeml(addr) readl(addr) -#define c4outmemw(addr, value) writew(value, addr) -#define c4inmemw(addr) readw(addr) -#define c4outmemb(addr, value) writeb(value, addr) -#define c4inmemb(addr) readb(addr) - -/* ------------------------------------------------------------- */ - -static inline int wait_for_doorbell(avmcard *card, unsigned long t) -{ - unsigned long stop; - - stop = jiffies + t; - while (c4inmeml(card->mbase + doorbell) != 0xffffffff) { - if (!time_before(jiffies, stop)) - return -1; - mb(); - } - return 0; -} - -static int c4_poke(avmcard *card, unsigned long off, unsigned long value) -{ - - if (wait_for_doorbell(card, hz / 10) < 0) - return -1; - - c4outmeml(card->mbase + mbox_peek_poke, off); - c4outmeml(card->mbase + doorbell, dbell_addr); - - if (wait_for_doorbell(card, hz / 10) < 0) - return -1; - - c4outmeml(card->mbase + mbox_peek_poke, value); - c4outmeml(card->mbase + doorbell, dbell_data | dbell_addr); - - return 0; -} - -static int c4_peek(avmcard *card, unsigned long off, unsigned long *valuep) -{ - if (wait_for_doorbell(card, hz / 10) < 0) - return -1; - - c4outmeml(card->mbase + mbox_peek_poke, off); - c4outmeml(card->mbase + doorbell, dbell_rnwr | dbell_addr); - - if (wait_for_doorbell(card, hz / 10) < 0) - return -1; - - *valuep = c4inmeml(card->mbase + mbox_peek_poke); - - return 0; -} - -/* ------------------------------------------------------------- */ - -static int c4_load_t4file(avmcard *card, capiloaddatapart *t4file) -{ - u32 val; - unsigned char *dp; - u_int left; - u32 loadoff = 0; - - dp = t4file->data; - left = t4file->len; - while (left >= sizeof(u32)) { - if (t4file->user) { - if (copy_from_user(&val, dp, sizeof(val))) - return -efault; - } else { - memcpy(&val, dp, sizeof(val)); - } - if (c4_poke(card, loadoff, val)) { - printk(kern_err "%s: corrupted firmware file ? ", - card->name); - return -eio; - } - left -= sizeof(u32); - dp += sizeof(u32); - loadoff += sizeof(u32); - } - if (left) { - val = 0; - if (t4file->user) { - if (copy_from_user(&val, dp, left)) - return -efault; - } else { - memcpy(&val, dp, left); - } - if (c4_poke(card, loadoff, val)) { - printk(kern_err "%s: corrupted firmware file ? ", - card->name); - return -eio; - } - } - return 0; -} - -/* ------------------------------------------------------------- */ - -static inline void _put_byte(void **pp, u8 val) -{ - u8 *s = *pp; - *s++ = val; - *pp = s; -} - -static inline void _put_word(void **pp, u32 val) -{ - u8 *s = *pp; - *s++ = val & 0xff; - *s++ = (val >> 8) & 0xff; - *s++ = (val >> 16) & 0xff; - *s++ = (val >> 24) & 0xff; - *pp = s; -} - -static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len) -{ - unsigned i = len; - _put_word(pp, i); - while (i-- > 0) - _put_byte(pp, *dp++); -} - -static inline u8 _get_byte(void **pp) -{ - u8 *s = *pp; - u8 val; - val = *s++; - *pp = s; - return val; -} - -static inline u32 _get_word(void **pp) -{ - u8 *s = *pp; - u32 val; - val = *s++; - val |= (*s++ << 8); - val |= (*s++ << 16); - val |= (*s++ << 24); - *pp = s; - return val; -} - -static inline u32 _get_slice(void **pp, unsigned char *dp) -{ - unsigned int len, i; - - len = i = _get_word(pp); - while (i-- > 0) *dp++ = _get_byte(pp); - return len; -} - -/* ------------------------------------------------------------- */ - -static void c4_reset(avmcard *card) -{ - unsigned long stop; - - c4outmeml(card->mbase + doorbell, dbell_reset_arm); - - stop = jiffies + hz * 10; - while (c4inmeml(card->mbase + doorbell) != 0xffffffff) { - if (!time_before(jiffies, stop)) - return; - c4outmeml(card->mbase + doorbell, dbell_addr); - mb(); - } - - c4_poke(card, dc21285_armcsr_base + chan_1_control, 0); - c4_poke(card, dc21285_armcsr_base + chan_2_control, 0); -} - -/* ------------------------------------------------------------- */ - -static int c4_detect(avmcard *card) -{ - unsigned long stop, dummy; - - c4outmeml(card->mbase + pci_out_int_mask, 0x0c); - if (c4inmeml(card->mbase + pci_out_int_mask) != 0x0c) - return 1; - - c4outmeml(card->mbase + doorbell, dbell_reset_arm); - - stop = jiffies + hz * 10; - while (c4inmeml(card->mbase + doorbell) != 0xffffffff) { - if (!time_before(jiffies, stop)) - return 2; - c4outmeml(card->mbase + doorbell, dbell_addr); - mb(); - } - - c4_poke(card, dc21285_armcsr_base + chan_1_control, 0); - c4_poke(card, dc21285_armcsr_base + chan_2_control, 0); - - c4outmeml(card->mbase + mailbox_0, 0x55aa55aa); - if (c4inmeml(card->mbase + mailbox_0) != 0x55aa55aa) return 3; - - c4outmeml(card->mbase + mailbox_0, 0xaa55aa55); - if (c4inmeml(card->mbase + mailbox_0) != 0xaa55aa55) return 4; - - if (c4_poke(card, dc21285_armcsr_base + dbell_sa_mask, 0)) return 5; - if (c4_poke(card, dc21285_armcsr_base + dbell_pci_mask, 0)) return 6; - if (c4_poke(card, dc21285_armcsr_base + sa_control, sa_ctl_allright)) - return 7; - if (c4_poke(card, dc21285_armcsr_base + xbus_cycle, init_xbus_cycle)) - return 8; - if (c4_poke(card, dc21285_armcsr_base + xbus_strobe, init_xbus_strobe)) - return 8; - if (c4_poke(card, dc21285_armcsr_base + dram_timing, 0)) return 9; - - mdelay(1); - - if (c4_peek(card, dc21285_dram_a0mr, &dummy)) return 10; - if (c4_peek(card, dc21285_dram_a1mr, &dummy)) return 11; - if (c4_peek(card, dc21285_dram_a2mr, &dummy)) return 12; - if (c4_peek(card, dc21285_dram_a3mr, &dummy)) return 13; - - if (c4_poke(card, dc21285_dram_a0mr + cas_offset, 0)) return 14; - if (c4_poke(card, dc21285_dram_a1mr + cas_offset, 0)) return 15; - if (c4_poke(card, dc21285_dram_a2mr + cas_offset, 0)) return 16; - if (c4_poke(card, dc21285_dram_a3mr + cas_offset, 0)) return 17; - - mdelay(1); - - if (c4_poke(card, dc21285_armcsr_base + dram_timing, dram_timing_def)) - return 18; - - if (c4_poke(card, dc21285_armcsr_base + dram_addr_size_0, dram_ad_sz_def0)) - return 19; - if (c4_poke(card, dc21285_armcsr_base + dram_addr_size_1, dram_ad_sz_null)) - return 20; - if (c4_poke(card, dc21285_armcsr_base + dram_addr_size_2, dram_ad_sz_null)) - return 21; - if (c4_poke(card, dc21285_armcsr_base + dram_addr_size_3, dram_ad_sz_null)) - return 22; - - /* transputer test */ - - if (c4_poke(card, 0x000000, 0x11111111) - || c4_poke(card, 0x400000, 0x22222222) - || c4_poke(card, 0x800000, 0x33333333) - || c4_poke(card, 0xc00000, 0x44444444)) - return 23; - - if (c4_peek(card, 0x000000, &dummy) || dummy != 0x11111111 - || c4_peek(card, 0x400000, &dummy) || dummy != 0x22222222 - || c4_peek(card, 0x800000, &dummy) || dummy != 0x33333333 - || c4_peek(card, 0xc00000, &dummy) || dummy != 0x44444444) - return 24; - - if (c4_poke(card, 0x000000, 0x55555555) - || c4_poke(card, 0x400000, 0x66666666) - || c4_poke(card, 0x800000, 0x77777777) - || c4_poke(card, 0xc00000, 0x88888888)) - return 25; - - if (c4_peek(card, 0x000000, &dummy) || dummy != 0x55555555 - || c4_peek(card, 0x400000, &dummy) || dummy != 0x66666666 - || c4_peek(card, 0x800000, &dummy) || dummy != 0x77777777 - || c4_peek(card, 0xc00000, &dummy) || dummy != 0x88888888) - return 26; - - return 0; -} - -/* ------------------------------------------------------------- */ - -static void c4_dispatch_tx(avmcard *card) -{ - avmcard_dmainfo *dma = card->dma; - struct sk_buff *skb; - u8 cmd, subcmd; - u16 len; - u32 txlen; - void *p; - - - if (card->csr & dbell_down_arm) { /* tx busy */ - return; - } - - skb = skb_dequeue(&dma->send_queue); - if (!skb) { -#ifdef avm_c4_debug - printk(kern_debug "%s: tx underrun ", card->name); -#endif - return; - } - - len = capimsg_len(skb->data); - - if (len) { - cmd = capimsg_command(skb->data); - subcmd = capimsg_subcommand(skb->data); - - p = dma->sendbuf.dmabuf; - - if (capicmd(cmd, subcmd) == capi_data_b3_req) { - u16 dlen = capimsg_datalen(skb->data); - _put_byte(&p, send_data_b3_req); - _put_slice(&p, skb->data, len); - _put_slice(&p, skb->data + len, dlen); - } else { - _put_byte(&p, send_message); - _put_slice(&p, skb->data, len); - } - txlen = (u8 *)p - (u8 *)dma->sendbuf.dmabuf; -#ifdef avm_c4_debug - printk(kern_debug "%s: tx put msg len=%d ", card->name, txlen); -#endif - } else { - txlen = skb->len - 2; -#ifdef avm_c4_polldebug - if (skb->data[2] == send_pollack) - printk(kern_info "%s: ack to c4 ", card->name); -#endif -#ifdef avm_c4_debug - printk(kern_debug "%s: tx put 0x%x len=%d ", - card->name, skb->data[2], txlen); -#endif - skb_copy_from_linear_data_offset(skb, 2, dma->sendbuf.dmabuf, - skb->len - 2); - } - txlen = (txlen + 3) & ~3; - - c4outmeml(card->mbase + mbox_down_addr, dma->sendbuf.dmaaddr); - c4outmeml(card->mbase + mbox_down_len, txlen); - - card->csr |= dbell_down_arm; - - c4outmeml(card->mbase + doorbell, dbell_down_arm); - - dev_kfree_skb_any(skb); -} - -/* ------------------------------------------------------------- */ - -static void queue_pollack(avmcard *card) -{ - struct sk_buff *skb; - void *p; - - skb = alloc_skb(3, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, lost poll ack ", - card->name); - return; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_pollack); - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - skb_queue_tail(&card->dma->send_queue, skb); - c4_dispatch_tx(card); -} - -/* ------------------------------------------------------------- */ - -static void c4_handle_rx(avmcard *card) -{ - avmcard_dmainfo *dma = card->dma; - struct capi_ctr *ctrl; - avmctrl_info *cinfo; - struct sk_buff *skb; - void *p = dma->recvbuf.dmabuf; - u32 applid, msglen, datab3len, ncci, windowsize; - u8 b1cmd = _get_byte(&p); - u32 cidx; - - -#ifdef avm_c4_debug - printk(kern_debug "%s: rx 0x%x len=%lu ", card->name, - b1cmd, (unsigned long)dma->recvlen); -#endif - - switch (b1cmd) { - case receive_data_b3_ind: - - applid = (unsigned) _get_word(&p); - msglen = _get_slice(&p, card->msgbuf); - datab3len = _get_slice(&p, card->databuf); - cidx = capimsg_controller(card->msgbuf)-card->cardnr; - if (cidx >= card->nlogcontr) cidx = 0; - ctrl = &card->ctrlinfo[cidx].capi_ctrl; - - if (msglen < 30) { /* not capi 64bit */ - memset(card->msgbuf + msglen, 0, 30 - msglen); - msglen = 30; - capimsg_setlen(card->msgbuf, 30); - } - if (!(skb = alloc_skb(datab3len + msglen, gfp_atomic))) { - printk(kern_err "%s: incoming packet dropped ", - card->name); - } else { - skb_put_data(skb, card->msgbuf, msglen); - skb_put_data(skb, card->databuf, datab3len); - capi_ctr_handle_message(ctrl, applid, skb); - } - break; - - case receive_message: - - applid = (unsigned) _get_word(&p); - msglen = _get_slice(&p, card->msgbuf); - cidx = capimsg_controller(card->msgbuf)-card->cardnr; - if (cidx >= card->nlogcontr) cidx = 0; - cinfo = &card->ctrlinfo[cidx]; - ctrl = &card->ctrlinfo[cidx].capi_ctrl; - - if (!(skb = alloc_skb(msglen, gfp_atomic))) { - printk(kern_err "%s: incoming packet dropped ", - card->name); - } else { - skb_put_data(skb, card->msgbuf, msglen); - if (capimsg_cmd(skb->data) == capi_data_b3_conf) - capilib_data_b3_conf(&cinfo->ncci_head, applid, - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - - capi_ctr_handle_message(ctrl, applid, skb); - } - break; - - case receive_new_ncci: - - applid = _get_word(&p); - ncci = _get_word(&p); - windowsize = _get_word(&p); - cidx = (ncci & 0x7f) - card->cardnr; - if (cidx >= card->nlogcontr) cidx = 0; - - capilib_new_ncci(&card->ctrlinfo[cidx].ncci_head, applid, ncci, windowsize); - - break; - - case receive_free_ncci: - - applid = _get_word(&p); - ncci = _get_word(&p); - - if (ncci != 0xffffffff) { - cidx = (ncci & 0x7f) - card->cardnr; - if (cidx >= card->nlogcontr) cidx = 0; - capilib_free_ncci(&card->ctrlinfo[cidx].ncci_head, applid, ncci); - } - break; - - case receive_start: -#ifdef avm_c4_polldebug - printk(kern_info "%s: poll from c4 ", card->name); -#endif - if (!suppress_pollack) - queue_pollack(card); - for (cidx = 0; cidx < card->nr_controllers; cidx++) { - ctrl = &card->ctrlinfo[cidx].capi_ctrl; - capi_ctr_resume_output(ctrl); - } - break; - - case receive_stop: - for (cidx = 0; cidx < card->nr_controllers; cidx++) { - ctrl = &card->ctrlinfo[cidx].capi_ctrl; - capi_ctr_suspend_output(ctrl); - } - break; - - case receive_init: - - cidx = card->nlogcontr; - if (cidx >= card->nr_controllers) { - printk(kern_err "%s: card with %d controllers ?? ", - card->name, cidx + 1); - break; - } - card->nlogcontr++; - cinfo = &card->ctrlinfo[cidx]; - ctrl = &cinfo->capi_ctrl; - cinfo->versionlen = _get_slice(&p, cinfo->versionbuf); - b1_parse_version(cinfo); - printk(kern_info "%s: %s-card (%s) now active ", - card->name, - cinfo->version[ver_cardtype], - cinfo->version[ver_driver]); - capi_ctr_ready(&cinfo->capi_ctrl); - break; - - case receive_task_ready: - applid = (unsigned) _get_word(&p); - msglen = _get_slice(&p, card->msgbuf); - card->msgbuf[msglen] = 0; - while (msglen > 0 - && (card->msgbuf[msglen - 1] == ' ' - || card->msgbuf[msglen - 1] == ' ')) { - card->msgbuf[msglen - 1] = 0; - msglen--; - } - printk(kern_info "%s: task %d "%s" ready. ", - card->name, applid, card->msgbuf); - break; - - case receive_debugmsg: - msglen = _get_slice(&p, card->msgbuf); - card->msgbuf[msglen] = 0; - while (msglen > 0 - && (card->msgbuf[msglen - 1] == ' ' - || card->msgbuf[msglen - 1] == ' ')) { - card->msgbuf[msglen - 1] = 0; - msglen--; - } - printk(kern_info "%s: debug: %s ", card->name, card->msgbuf); - break; - - default: - printk(kern_err "%s: c4_interrupt: 0x%x ??? ", - card->name, b1cmd); - return; - } -} - -/* ------------------------------------------------------------- */ - -static irqreturn_t c4_handle_interrupt(avmcard *card) -{ - unsigned long flags; - u32 status; - - spin_lock_irqsave(&card->lock, flags); - status = c4inmeml(card->mbase + doorbell); - - if (status & dbell_reset_host) { - u_int i; - c4outmeml(card->mbase + pci_out_int_mask, 0x0c); - spin_unlock_irqrestore(&card->lock, flags); - if (card->nlogcontr == 0) - return irq_handled; - printk(kern_err "%s: unexpected reset ", card->name); - for (i = 0; i < card->nr_controllers; i++) { - avmctrl_info *cinfo = &card->ctrlinfo[i]; - memset(cinfo->version, 0, sizeof(cinfo->version)); - spin_lock_irqsave(&card->lock, flags); - capilib_release(&cinfo->ncci_head); - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_down(&cinfo->capi_ctrl); - } - card->nlogcontr = 0; - return irq_handled; - } - - status &= (dbell_up_host | dbell_down_host); - if (!status) { - spin_unlock_irqrestore(&card->lock, flags); - return irq_handled; - } - c4outmeml(card->mbase + doorbell, status); - - if ((status & dbell_up_host) != 0) { - card->dma->recvlen = c4inmeml(card->mbase + mbox_up_len); - c4outmeml(card->mbase + mbox_up_len, 0); - c4_handle_rx(card); - card->dma->recvlen = 0; - c4outmeml(card->mbase + mbox_up_len, card->dma->recvbuf.size); - c4outmeml(card->mbase + doorbell, dbell_up_arm); - } - - if ((status & dbell_down_host) != 0) { - card->csr &= ~dbell_down_arm; - c4_dispatch_tx(card); - } else if (card->csr & dbell_down_host) { - if (c4inmeml(card->mbase + mbox_down_len) == 0) { - card->csr &= ~dbell_down_arm; - c4_dispatch_tx(card); - } - } - spin_unlock_irqrestore(&card->lock, flags); - return irq_handled; -} - -static irqreturn_t c4_interrupt(int interrupt, void *devptr) -{ - avmcard *card = devptr; - - return c4_handle_interrupt(card); -} - -/* ------------------------------------------------------------- */ - -static void c4_send_init(avmcard *card) -{ - struct sk_buff *skb; - void *p; - unsigned long flags; - - skb = alloc_skb(15, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, lost register appl. ", - card->name); - return; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_init); - _put_word(&p, capi_maxappl); - _put_word(&p, avm_ncci_per_channel * 30); - _put_word(&p, card->cardnr - 1); - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - skb_queue_tail(&card->dma->send_queue, skb); - spin_lock_irqsave(&card->lock, flags); - c4_dispatch_tx(card); - spin_unlock_irqrestore(&card->lock, flags); -} - -static int queue_sendconfigword(avmcard *card, u32 val) -{ - struct sk_buff *skb; - unsigned long flags; - void *p; - - skb = alloc_skb(3 + 4, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, send config ", - card->name); - return -enomem; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_config); - _put_word(&p, val); - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - skb_queue_tail(&card->dma->send_queue, skb); - spin_lock_irqsave(&card->lock, flags); - c4_dispatch_tx(card); - spin_unlock_irqrestore(&card->lock, flags); - return 0; -} - -static int queue_sendconfig(avmcard *card, char cval[4]) -{ - struct sk_buff *skb; - unsigned long flags; - void *p; - - skb = alloc_skb(3 + 4, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, send config ", - card->name); - return -enomem; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_config); - _put_byte(&p, cval[0]); - _put_byte(&p, cval[1]); - _put_byte(&p, cval[2]); - _put_byte(&p, cval[3]); - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - skb_queue_tail(&card->dma->send_queue, skb); - - spin_lock_irqsave(&card->lock, flags); - c4_dispatch_tx(card); - spin_unlock_irqrestore(&card->lock, flags); - return 0; -} - -static int c4_send_config(avmcard *card, capiloaddatapart *config) -{ - u8 val[4]; - unsigned char *dp; - u_int left; - int retval; - - if ((retval = queue_sendconfigword(card, 1)) != 0) - return retval; - if ((retval = queue_sendconfigword(card, config->len)) != 0) - return retval; - - dp = config->data; - left = config->len; - while (left >= sizeof(u32)) { - if (config->user) { - if (copy_from_user(val, dp, sizeof(val))) - return -efault; - } else { - memcpy(val, dp, sizeof(val)); - } - if ((retval = queue_sendconfig(card, val)) != 0) - return retval; - left -= sizeof(val); - dp += sizeof(val); - } - if (left) { - memset(val, 0, sizeof(val)); - if (config->user) { - if (copy_from_user(&val, dp, left)) - return -efault; - } else { - memcpy(&val, dp, left); - } - if ((retval = queue_sendconfig(card, val)) != 0) - return retval; - } - - return 0; -} - -static int c4_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - int retval; - - if ((retval = c4_load_t4file(card, &data->firmware))) { - printk(kern_err "%s: failed to load t4file!! ", - card->name); - c4_reset(card); - return retval; - } - - card->csr = 0; - c4outmeml(card->mbase + mbox_up_len, 0); - c4outmeml(card->mbase + mbox_down_len, 0); - c4outmeml(card->mbase + doorbell, dbell_init); - mdelay(1); - c4outmeml(card->mbase + doorbell, - dbell_up_host | dbell_down_host | dbell_reset_host); - - c4outmeml(card->mbase + pci_out_int_mask, 0x08); - - card->dma->recvlen = 0; - c4outmeml(card->mbase + mbox_up_addr, card->dma->recvbuf.dmaaddr); - c4outmeml(card->mbase + mbox_up_len, card->dma->recvbuf.size); - c4outmeml(card->mbase + doorbell, dbell_up_arm); - - if (data->configuration.len > 0 && data->configuration.data) { - retval = c4_send_config(card, &data->configuration); - if (retval) { - printk(kern_err "%s: failed to set config!! ", - card->name); - c4_reset(card); - return retval; - } - } - - c4_send_init(card); - - return 0; -} - - -static void c4_reset_ctr(struct capi_ctr *ctrl) -{ - avmcard *card = ((avmctrl_info *)(ctrl->driverdata))->card; - avmctrl_info *cinfo; - u_int i; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - - c4_reset(card); - - spin_unlock_irqrestore(&card->lock, flags); - - for (i = 0; i < card->nr_controllers; i++) { - cinfo = &card->ctrlinfo[i]; - memset(cinfo->version, 0, sizeof(cinfo->version)); - capi_ctr_down(&cinfo->capi_ctrl); - } - card->nlogcontr = 0; -} - -static void c4_remove(struct pci_dev *pdev) -{ - avmcard *card = pci_get_drvdata(pdev); - avmctrl_info *cinfo; - u_int i; - - if (!card) - return; - - c4_reset(card); - - for (i = 0; i < card->nr_controllers; i++) { - cinfo = &card->ctrlinfo[i]; - detach_capi_ctr(&cinfo->capi_ctrl); - } - - free_irq(card->irq, card); - iounmap(card->mbase); - release_region(card->port, avmb1_portlen); - avmcard_dma_free(card->dma); - pci_set_drvdata(pdev, null); - b1_free_card(card); -} - -/* ------------------------------------------------------------- */ - - -static void c4_register_appl(struct capi_ctr *ctrl, - u16 appl, - capi_register_params *rp) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - struct sk_buff *skb; - int want = rp->level3cnt; - unsigned long flags; - int nconn; - void *p; - - if (ctrl->cnr == card->cardnr) { - - if (want > 0) nconn = want; - else nconn = ctrl->profile.nbchannel * 4 * -want; - if (nconn == 0) nconn = ctrl->profile.nbchannel * 4; - - skb = alloc_skb(23, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, lost register appl. ", - card->name); - return; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_register); - _put_word(&p, appl); - _put_word(&p, 1024 * (nconn + 1)); - _put_word(&p, nconn); - _put_word(&p, rp->datablkcnt); - _put_word(&p, rp->datablklen); - skb_put(skb, (u8 *)p - (u8 *)skb->data); - - skb_queue_tail(&card->dma->send_queue, skb); - - spin_lock_irqsave(&card->lock, flags); - c4_dispatch_tx(card); - spin_unlock_irqrestore(&card->lock, flags); - } -} - -/* ------------------------------------------------------------- */ - -static void c4_release_appl(struct capi_ctr *ctrl, u16 appl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned long flags; - struct sk_buff *skb; - void *p; - - spin_lock_irqsave(&card->lock, flags); - capilib_release_appl(&cinfo->ncci_head, appl); - spin_unlock_irqrestore(&card->lock, flags); - - if (ctrl->cnr == card->cardnr) { - skb = alloc_skb(7, gfp_atomic); - if (!skb) { - printk(kern_crit "%s: no memory, lost release appl. ", - card->name); - return; - } - p = skb->data; - _put_byte(&p, 0); - _put_byte(&p, 0); - _put_byte(&p, send_release); - _put_word(&p, appl); - - skb_put(skb, (u8 *)p - (u8 *)skb->data); - skb_queue_tail(&card->dma->send_queue, skb); - spin_lock_irqsave(&card->lock, flags); - c4_dispatch_tx(card); - spin_unlock_irqrestore(&card->lock, flags); - } -} - -/* ------------------------------------------------------------- */ - - -static u16 c4_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - u16 retval = capi_noerror; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - if (capimsg_cmd(skb->data) == capi_data_b3_req) { - retval = capilib_data_b3_req(&cinfo->ncci_head, - capimsg_appid(skb->data), - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - } - if (retval == capi_noerror) { - skb_queue_tail(&card->dma->send_queue, skb); - c4_dispatch_tx(card); - } - spin_unlock_irqrestore(&card->lock, flags); - return retval; -} - -/* ------------------------------------------------------------- */ - -static char *c4_procinfo(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - - if (!cinfo) - return ""; - sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx", - cinfo->cardname[0] ? cinfo->cardname : "-", - cinfo->version[ver_driver] ? cinfo->version[ver_driver] : "-", - cinfo->card ? cinfo->card->port : 0x0, - cinfo->card ? cinfo->card->irq : 0, - cinfo->card ? cinfo->card->membase : 0 - ); - return cinfo->infobuf; -} - -static int c4_proc_show(struct seq_file *m, void *v) -{ - struct capi_ctr *ctrl = m->private; - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - u8 flag; - char *s; - - seq_printf(m, "%-16s %s ", "name", card->name); - seq_printf(m, "%-16s 0x%x ", "io", card->port); - seq_printf(m, "%-16s %d ", "irq", card->irq); - seq_printf(m, "%-16s 0x%lx ", "membase", card->membase); - switch (card->cardtype) { - case avm_b1isa: s = "b1 isa"; break; - case avm_b1pci: s = "b1 pci"; break; - case avm_b1pcmcia: s = "b1 pcmcia"; break; - case avm_m1: s = "m1"; break; - case avm_m2: s = "m2"; break; - case avm_t1isa: s = "t1 isa (hema)"; break; - case avm_t1pci: s = "t1 pci"; break; - case avm_c4: s = "c4"; break; - case avm_c2: s = "c2"; break; - default: s = "???"; break; - } - seq_printf(m, "%-16s %s ", "type", s); - if ((s = cinfo->version[ver_driver]) != null) - seq_printf(m, "%-16s %s ", "ver_driver", s); - if ((s = cinfo->version[ver_cardtype]) != null) - seq_printf(m, "%-16s %s ", "ver_cardtype", s); - if ((s = cinfo->version[ver_serial]) != null) - seq_printf(m, "%-16s %s ", "ver_serial", s); - - if (card->cardtype != avm_m1) { - flag = ((u8 *)(ctrl->profile.manu))[3]; - if (flag) - seq_printf(m, "%-16s%s%s%s%s%s%s%s ", - "protocol", - (flag & 0x01) ? " dss1" : "", - (flag & 0x02) ? " ct1" : "", - (flag & 0x04) ? " vn3" : "", - (flag & 0x08) ? " ni1" : "", - (flag & 0x10) ? " austel" : "", - (flag & 0x20) ? " ess" : "", - (flag & 0x40) ? " 1tr6" : "" - ); - } - if (card->cardtype != avm_m1) { - flag = ((u8 *)(ctrl->profile.manu))[5]; - if (flag) - seq_printf(m, "%-16s%s%s%s%s ", - "linetype", - (flag & 0x01) ? " point to point" : "", - (flag & 0x02) ? " point to multipoint" : "", - (flag & 0x08) ? " leased line without d-channel" : "", - (flag & 0x04) ? " leased line with d-channel" : "" - ); - } - seq_printf(m, "%-16s %s ", "cardname", cinfo->cardname); - - return 0; -} - -/* ------------------------------------------------------------- */ - -static int c4_add_card(struct capicardparams *p, struct pci_dev *dev, - int nr_controllers) -{ - avmcard *card; - avmctrl_info *cinfo; - int retval; - int i; - - card = b1_alloc_card(nr_controllers); - if (!card) { - printk(kern_warning "c4: no memory. "); - retval = -enomem; - goto err; - } - card->dma = avmcard_dma_alloc("c4", dev, 2048 + 128, 2048 + 128); - if (!card->dma) { - printk(kern_warning "c4: no memory. "); - retval = -enomem; - goto err_free; - } - - sprintf(card->name, "c%d-%x", nr_controllers, p->port); - card->port = p->port; - card->irq = p->irq; - card->membase = p->membase; - card->cardtype = (nr_controllers == 4) ? avm_c4 : avm_c2; - - if (!request_region(card->port, avmb1_portlen, card->name)) { - printk(kern_warning "c4: ports 0x%03x-0x%03x in use. ", - card->port, card->port + avmb1_portlen); - retval = -ebusy; - goto err_free_dma; - } - - card->mbase = ioremap(card->membase, 128); - if (card->mbase == null) { - printk(kern_notice "c4: can't remap memory at 0x%lx ", - card->membase); - retval = -eio; - goto err_release_region; - } - - retval = c4_detect(card); - if (retval != 0) { - printk(kern_notice "c4: no card at 0x%x error(%d) ", - card->port, retval); - retval = -eio; - goto err_unmap; - } - c4_reset(card); - - retval = request_irq(card->irq, c4_interrupt, irqf_shared, card->name, card); - if (retval) { - printk(kern_err "c4: unable to get irq %d. ", card->irq); - retval = -ebusy; - goto err_unmap; - } - - for (i = 0; i < nr_controllers; i++) { - cinfo = &card->ctrlinfo[i]; - cinfo->capi_ctrl.owner = this_module; - cinfo->capi_ctrl.driver_name = "c4"; - cinfo->capi_ctrl.driverdata = cinfo; - cinfo->capi_ctrl.register_appl = c4_register_appl; - cinfo->capi_ctrl.release_appl = c4_release_appl; - cinfo->capi_ctrl.send_message = c4_send_message; - cinfo->capi_ctrl.load_firmware = c4_load_firmware; - cinfo->capi_ctrl.reset_ctr = c4_reset_ctr; - cinfo->capi_ctrl.procinfo = c4_procinfo; - cinfo->capi_ctrl.proc_show = c4_proc_show; - strcpy(cinfo->capi_ctrl.name, card->name); - - retval = attach_capi_ctr(&cinfo->capi_ctrl); - if (retval) { - printk(kern_err "c4: attach controller failed (%d). ", i); - for (i--; i >= 0; i--) { - cinfo = &card->ctrlinfo[i]; - detach_capi_ctr(&cinfo->capi_ctrl); - } - goto err_free_irq; - } - if (i == 0) - card->cardnr = cinfo->capi_ctrl.cnr; - } - - printk(kern_info "c4: avm c%d at i/o %#x, irq %d, mem %#lx ", - nr_controllers, card->port, card->irq, - card->membase); - pci_set_drvdata(dev, card); - return 0; - -err_free_irq: - free_irq(card->irq, card); -err_unmap: - iounmap(card->mbase); -err_release_region: - release_region(card->port, avmb1_portlen); -err_free_dma: - avmcard_dma_free(card->dma); -err_free: - b1_free_card(card); -err: - return retval; -} - -/* ------------------------------------------------------------- */ - -static int c4_probe(struct pci_dev *dev, const struct pci_device_id *ent) -{ - int nr = ent->driver_data; - int retval = 0; - struct capicardparams param; - - if (pci_enable_device(dev) < 0) { - printk(kern_err "c4: failed to enable avm-c%d ", nr); - return -enodev; - } - pci_set_master(dev); - - param.port = pci_resource_start(dev, 1); - param.irq = dev->irq; - param.membase = pci_resource_start(dev, 0); - - printk(kern_info "c4: pci bios reports avm-c%d at i/o %#x, irq %d, mem %#x ", - nr, param.port, param.irq, param.membase); - - retval = c4_add_card(¶m, dev, nr); - if (retval != 0) { - printk(kern_err "c4: no avm-c%d at i/o %#x, irq %d detected, mem %#x ", - nr, param.port, param.irq, param.membase); - pci_disable_device(dev); - return -enodev; - } - return 0; -} - -static struct pci_driver c4_pci_driver = { - .name = "c4", - .id_table = c4_pci_tbl, - .probe = c4_probe, - .remove = c4_remove, -}; - -static struct capi_driver capi_driver_c2 = { - .name = "c2", - .revision = "1.0", -}; - -static struct capi_driver capi_driver_c4 = { - .name = "c4", - .revision = "1.0", -}; - -static int __init c4_init(void) -{ - char *p; - char rev[32]; - int err; - - if ((p = strchr(revision, ':')) != null && p[1]) { - strlcpy(rev, p + 2, 32); - if ((p = strchr(rev, '$')) != null && p > rev) - *(p - 1) = 0; - } else - strcpy(rev, "1.0"); - - err = pci_register_driver(&c4_pci_driver); - if (!err) { - strlcpy(capi_driver_c2.revision, rev, 32); - register_capi_driver(&capi_driver_c2); - strlcpy(capi_driver_c4.revision, rev, 32); - register_capi_driver(&capi_driver_c4); - printk(kern_info "c4: revision %s ", rev); - } - return err; -} - -static void __exit c4_exit(void) -{ - unregister_capi_driver(&capi_driver_c2); - unregister_capi_driver(&capi_driver_c4); - pci_unregister_driver(&c4_pci_driver); -} - -module_init(c4_init); -module_exit(c4_exit); diff --git a/drivers/staging/isdn/avm/t1isa.c b/drivers/staging/isdn/avm/t1isa.c --- a/drivers/staging/isdn/avm/t1isa.c +++ /dev/null -/* $id: t1isa.c,v 1.1.2.3 2004/02/10 01:07:12 keil exp $ - * - * module for avm t1 hema-card. - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/capi.h> -#include <linux/netdevice.h> -#include <linux/kernelcapi.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/gfp.h> -#include <asm/io.h> -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> -#include <linux/isdn/capilli.h> -#include "avmcard.h" - -/* ------------------------------------------------------------- */ - -static char *revision = "$revision: 1.1.2.3 $"; - -/* ------------------------------------------------------------- */ - -module_description("capi4linux: driver for avm t1 hema isa card"); -module_author("carsten paeth"); -module_license("gpl"); - -/* ------------------------------------------------------------- */ - -static int hema_irq_table[16] = -{0, - 0, - 0, - 0x80, /* irq 3 */ - 0, - 0x90, /* irq 5 */ - 0, - 0xa0, /* irq 7 */ - 0, - 0xb0, /* irq 9 */ - 0xc0, /* irq 10 */ - 0xd0, /* irq 11 */ - 0xe0, /* irq 12 */ - 0, - 0, - 0xf0, /* irq 15 */ -}; - -static int t1_detectandinit(unsigned int base, unsigned irq, int cardnr) -{ - unsigned char cregs[8]; - unsigned char reverse_cardnr; - unsigned char dummy; - int i; - - reverse_cardnr = ((cardnr & 0x01) << 3) | ((cardnr & 0x02) << 1) - | ((cardnr & 0x04) >> 1) | ((cardnr & 0x08) >> 3); - cregs[0] = (hema_version_id << 4) | (reverse_cardnr & 0xf); - cregs[1] = 0x00; /* fast & slow link connected to con1 */ - cregs[2] = 0x05; /* fast link 20mbit, slow link 20 mbit */ - cregs[3] = 0; - cregs[4] = 0x11; /* zero wait state */ - cregs[5] = hema_irq_table[irq & 0xf]; - cregs[6] = 0; - cregs[7] = 0; - - /* - * no one else should use the isa bus in this moment, - * but no function there to prevent this :-( - * save_flags(flags); cli(); - */ - - /* board reset */ - t1outp(base, t1_resetboard, 0xf); - mdelay(100); - dummy = t1inp(base, t1_fastlink + t1_outstat); /* first read */ - - /* write config */ - dummy = (base >> 4) & 0xff; - for (i = 1; i <= 0xf; i++) t1outp(base, i, dummy); - t1outp(base, hema_pal_id & 0xf, dummy); - t1outp(base, hema_pal_id >> 4, cregs[0]); - for (i = 1; i < 7; i++) t1outp(base, 0, cregs[i]); - t1outp(base, ((base >> 4)) & 0x3, cregs[7]); - /* restore_flags(flags); */ - - mdelay(100); - t1outp(base, t1_fastlink + t1_resetlink, 0); - t1outp(base, t1_slowlink + t1_resetlink, 0); - mdelay(10); - t1outp(base, t1_fastlink + t1_resetlink, 1); - t1outp(base, t1_slowlink + t1_resetlink, 1); - mdelay(100); - t1outp(base, t1_fastlink + t1_resetlink, 0); - t1outp(base, t1_slowlink + t1_resetlink, 0); - mdelay(10); - t1outp(base, t1_fastlink + t1_analyse, 0); - mdelay(5); - t1outp(base, t1_slowlink + t1_analyse, 0); - - if (t1inp(base, t1_fastlink + t1_outstat) != 0x1) /* tx empty */ - return 1; - if (t1inp(base, t1_fastlink + t1_instat) != 0x0) /* rx empty */ - return 2; - if (t1inp(base, t1_fastlink + t1_irqenable) != 0x0) - return 3; - if ((t1inp(base, t1_fastlink + t1_fifostat) & 0xf0) != 0x70) - return 4; - if ((t1inp(base, t1_fastlink + t1_irqmaster) & 0x0e) != 0) - return 5; - if ((t1inp(base, t1_fastlink + t1_ident) & 0x7d) != 1) - return 6; - if (t1inp(base, t1_slowlink + t1_outstat) != 0x1) /* tx empty */ - return 7; - if ((t1inp(base, t1_slowlink + t1_irqmaster) & 0x0e) != 0) - return 8; - if ((t1inp(base, t1_slowlink + t1_ident) & 0x7d) != 0) - return 9; - return 0; -} - -static irqreturn_t t1isa_interrupt(int interrupt, void *devptr) -{ - avmcard *card = devptr; - avmctrl_info *cinfo = &card->ctrlinfo[0]; - struct capi_ctr *ctrl = &cinfo->capi_ctrl; - unsigned char b1cmd; - struct sk_buff *skb; - - unsigned applid; - unsigned msglen; - unsigned datab3len; - unsigned ncci; - unsigned windowsize; - unsigned long flags; - - spin_lock_irqsave(&card->lock, flags); - - while (b1_rx_full(card->port)) { - - b1cmd = b1_get_byte(card->port); - - switch (b1cmd) { - - case receive_data_b3_ind: - - applid = (unsigned) b1_get_word(card->port); - msglen = t1_get_slice(card->port, card->msgbuf); - datab3len = t1_get_slice(card->port, card->databuf); - spin_unlock_irqrestore(&card->lock, flags); - - if (msglen < 30) { /* not capi 64bit */ - memset(card->msgbuf + msglen, 0, 30 - msglen); - msglen = 30; - capimsg_setlen(card->msgbuf, 30); - } - if (!(skb = alloc_skb(datab3len + msglen, gfp_atomic))) { - printk(kern_err "%s: incoming packet dropped ", - card->name); - } else { - skb_put_data(skb, card->msgbuf, msglen); - skb_put_data(skb, card->databuf, datab3len); - capi_ctr_handle_message(ctrl, applid, skb); - } - break; - - case receive_message: - - applid = (unsigned) b1_get_word(card->port); - msglen = t1_get_slice(card->port, card->msgbuf); - if (!(skb = alloc_skb(msglen, gfp_atomic))) { - spin_unlock_irqrestore(&card->lock, flags); - printk(kern_err "%s: incoming packet dropped ", - card->name); - } else { - skb_put_data(skb, card->msgbuf, msglen); - if (capimsg_cmd(skb->data) == capi_data_b3) - capilib_data_b3_conf(&cinfo->ncci_head, applid, - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_handle_message(ctrl, applid, skb); - } - break; - - case receive_new_ncci: - - applid = b1_get_word(card->port); - ncci = b1_get_word(card->port); - windowsize = b1_get_word(card->port); - capilib_new_ncci(&cinfo->ncci_head, applid, ncci, windowsize); - spin_unlock_irqrestore(&card->lock, flags); - break; - - case receive_free_ncci: - - applid = b1_get_word(card->port); - ncci = b1_get_word(card->port); - if (ncci != 0xffffffff) - capilib_free_ncci(&cinfo->ncci_head, applid, ncci); - spin_unlock_irqrestore(&card->lock, flags); - break; - - case receive_start: - b1_put_byte(card->port, send_pollack); - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_resume_output(ctrl); - break; - - case receive_stop: - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_suspend_output(ctrl); - break; - - case receive_init: - - cinfo->versionlen = t1_get_slice(card->port, cinfo->versionbuf); - spin_unlock_irqrestore(&card->lock, flags); - b1_parse_version(cinfo); - printk(kern_info "%s: %s-card (%s) now active ", - card->name, - cinfo->version[ver_cardtype], - cinfo->version[ver_driver]); - capi_ctr_ready(ctrl); - break; - - case receive_task_ready: - applid = (unsigned) b1_get_word(card->port); - msglen = t1_get_slice(card->port, card->msgbuf); - spin_unlock_irqrestore(&card->lock, flags); - card->msgbuf[msglen] = 0; - while (msglen > 0 - && (card->msgbuf[msglen - 1] == ' ' - || card->msgbuf[msglen - 1] == ' ')) { - card->msgbuf[msglen - 1] = 0; - msglen--; - } - printk(kern_info "%s: task %d "%s" ready. ", - card->name, applid, card->msgbuf); - break; - - case receive_debugmsg: - msglen = t1_get_slice(card->port, card->msgbuf); - spin_unlock_irqrestore(&card->lock, flags); - card->msgbuf[msglen] = 0; - while (msglen > 0 - && (card->msgbuf[msglen - 1] == ' ' - || card->msgbuf[msglen - 1] == ' ')) { - card->msgbuf[msglen - 1] = 0; - msglen--; - } - printk(kern_info "%s: debug: %s ", card->name, card->msgbuf); - break; - - - case 0xff: - spin_unlock_irqrestore(&card->lock, flags); - printk(kern_err "%s: card reseted ? ", card->name); - return irq_handled; - default: - spin_unlock_irqrestore(&card->lock, flags); - printk(kern_err "%s: b1_interrupt: 0x%x ??? ", - card->name, b1cmd); - return irq_none; - } - } - return irq_handled; -} - -/* ------------------------------------------------------------- */ - -static int t1isa_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - unsigned long flags; - int retval; - - t1_disable_irq(port); - b1_reset(port); - - if ((retval = b1_load_t4file(card, &data->firmware))) { - b1_reset(port); - printk(kern_err "%s: failed to load t4file!! ", - card->name); - return retval; - } - - if (data->configuration.len > 0 && data->configuration.data) { - if ((retval = b1_load_config(card, &data->configuration))) { - b1_reset(port); - printk(kern_err "%s: failed to load config!! ", - card->name); - return retval; - } - } - - if (!b1_loaded(card)) { - printk(kern_err "%s: failed to load t4file. ", card->name); - return -eio; - } - - spin_lock_irqsave(&card->lock, flags); - b1_setinterrupt(port, card->irq, card->cardtype); - b1_put_byte(port, send_init); - b1_put_word(port, capi_maxappl); - b1_put_word(port, avm_ncci_per_channel * 30); - b1_put_word(port, ctrl->cnr - 1); - spin_unlock_irqrestore(&card->lock, flags); - - return 0; -} - -static void t1isa_reset_ctr(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - unsigned long flags; - - t1_disable_irq(port); - b1_reset(port); - b1_reset(port); - - memset(cinfo->version, 0, sizeof(cinfo->version)); - spin_lock_irqsave(&card->lock, flags); - capilib_release(&cinfo->ncci_head); - spin_unlock_irqrestore(&card->lock, flags); - capi_ctr_down(ctrl); -} - -static void t1isa_remove(struct pci_dev *pdev) -{ - avmctrl_info *cinfo = pci_get_drvdata(pdev); - avmcard *card; - - if (!cinfo) - return; - - card = cinfo->card; - - t1_disable_irq(card->port); - b1_reset(card->port); - b1_reset(card->port); - t1_reset(card->port); - - detach_capi_ctr(&cinfo->capi_ctrl); - free_irq(card->irq, card); - release_region(card->port, avmb1_portlen); - b1_free_card(card); -} - -/* ------------------------------------------------------------- */ - -static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb); -static char *t1isa_procinfo(struct capi_ctr *ctrl); - -static int t1isa_probe(struct pci_dev *pdev, int cardnr) -{ - avmctrl_info *cinfo; - avmcard *card; - int retval; - - card = b1_alloc_card(1); - if (!card) { - printk(kern_warning "t1isa: no memory. "); - retval = -enomem; - goto err; - } - - cinfo = card->ctrlinfo; - card->port = pci_resource_start(pdev, 0); - card->irq = pdev->irq; - card->cardtype = avm_t1isa; - card->cardnr = cardnr; - sprintf(card->name, "t1isa-%x", card->port); - - if (!(((card->port & 0x7) == 0) && ((card->port & 0x30) != 0x30))) { - printk(kern_warning "t1isa: invalid port 0x%x. ", card->port); - retval = -einval; - goto err_free; - } - if (hema_irq_table[card->irq & 0xf] == 0) { - printk(kern_warning "t1isa: irq %d not valid. ", card->irq); - retval = -einval; - goto err_free; - } - if (!request_region(card->port, avmb1_portlen, card->name)) { - printk(kern_info "t1isa: ports 0x%03x-0x%03x in use. ", - card->port, card->port + avmb1_portlen); - retval = -ebusy; - goto err_free; - } - retval = request_irq(card->irq, t1isa_interrupt, 0, card->name, card); - if (retval) { - printk(kern_info "t1isa: unable to get irq %d. ", card->irq); - retval = -ebusy; - goto err_release_region; - } - - if ((retval = t1_detectandinit(card->port, card->irq, card->cardnr)) != 0) { - printk(kern_info "t1isa: no card at 0x%x (%d) ", - card->port, retval); - retval = -enodev; - goto err_free_irq; - } - t1_disable_irq(card->port); - b1_reset(card->port); - - cinfo->capi_ctrl.owner = this_module; - cinfo->capi_ctrl.driver_name = "t1isa"; - cinfo->capi_ctrl.driverdata = cinfo; - cinfo->capi_ctrl.register_appl = b1_register_appl; - cinfo->capi_ctrl.release_appl = b1_release_appl; - cinfo->capi_ctrl.send_message = t1isa_send_message; - cinfo->capi_ctrl.load_firmware = t1isa_load_firmware; - cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr; - cinfo->capi_ctrl.procinfo = t1isa_procinfo; - cinfo->capi_ctrl.proc_show = b1_proc_show; - strcpy(cinfo->capi_ctrl.name, card->name); - - retval = attach_capi_ctr(&cinfo->capi_ctrl); - if (retval) { - printk(kern_info "t1isa: attach controller failed. "); - goto err_free_irq; - } - - printk(kern_info "t1isa: avm t1 isa at i/o %#x, irq %d, card %d ", - card->port, card->irq, card->cardnr); - - pci_set_drvdata(pdev, cinfo); - return 0; - -err_free_irq: - free_irq(card->irq, card); -err_release_region: - release_region(card->port, avmb1_portlen); -err_free: - b1_free_card(card); -err: - return retval; -} - -static u16 t1isa_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - avmcard *card = cinfo->card; - unsigned int port = card->port; - unsigned long flags; - u16 len = capimsg_len(skb->data); - u8 cmd = capimsg_command(skb->data); - u8 subcmd = capimsg_subcommand(skb->data); - u16 dlen, retval; - - spin_lock_irqsave(&card->lock, flags); - if (capicmd(cmd, subcmd) == capi_data_b3_req) { - retval = capilib_data_b3_req(&cinfo->ncci_head, - capimsg_appid(skb->data), - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - if (retval != capi_noerror) { - spin_unlock_irqrestore(&card->lock, flags); - return retval; - } - dlen = capimsg_datalen(skb->data); - - b1_put_byte(port, send_data_b3_req); - t1_put_slice(port, skb->data, len); - t1_put_slice(port, skb->data + len, dlen); - } else { - b1_put_byte(port, send_message); - t1_put_slice(port, skb->data, len); - } - spin_unlock_irqrestore(&card->lock, flags); - dev_kfree_skb_any(skb); - return capi_noerror; -} -/* ------------------------------------------------------------- */ - -static char *t1isa_procinfo(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - - if (!cinfo) - return ""; - sprintf(cinfo->infobuf, "%s %s 0x%x %d %d", - cinfo->cardname[0] ? cinfo->cardname : "-", - cinfo->version[ver_driver] ? cinfo->version[ver_driver] : "-", - cinfo->card ? cinfo->card->port : 0x0, - cinfo->card ? cinfo->card->irq : 0, - cinfo->card ? cinfo->card->cardnr : 0 - ); - return cinfo->infobuf; -} - - -/* ------------------------------------------------------------- */ - -#define max_cards 4 -static struct pci_dev isa_dev[max_cards]; -static int io[max_cards]; -static int irq[max_cards]; -static int cardnr[max_cards]; - -module_param_hw_array(io, int, ioport, null, 0); -module_param_hw_array(irq, int, irq, null, 0); -module_param_array(cardnr, int, null, 0); -module_parm_desc(io, "i/o base address(es)"); -module_parm_desc(irq, "irq number(s) (assigned)"); -module_parm_desc(cardnr, "card number(s) (as jumpered)"); - -static int t1isa_add_card(struct capi_driver *driver, capicardparams *data) -{ - int i; - - for (i = 0; i < max_cards; i++) { - if (isa_dev[i].resource[0].start) - continue; - - isa_dev[i].resource[0].start = data->port; - isa_dev[i].irq = data->irq; - - if (t1isa_probe(&isa_dev[i], data->cardnr) == 0) - return 0; - } - return -enodev; -} - -static struct capi_driver capi_driver_t1isa = { - .name = "t1isa", - .revision = "1.0", - .add_card = t1isa_add_card, -}; - -static int __init t1isa_init(void) -{ - char rev[32]; - char *p; - int i; - - if ((p = strchr(revision, ':')) != null && p[1]) { - strlcpy(rev, p + 2, 32); - if ((p = strchr(rev, '$')) != null && p > rev) - *(p - 1) = 0; - } else - strcpy(rev, "1.0"); - - for (i = 0; i < max_cards; i++) { - if (!io[i]) - break; - - isa_dev[i].resource[0].start = io[i]; - isa_dev[i].irq = irq[i]; - - if (t1isa_probe(&isa_dev[i], cardnr[i]) != 0) - return -enodev; - } - - strlcpy(capi_driver_t1isa.revision, rev, 32); - register_capi_driver(&capi_driver_t1isa); - printk(kern_info "t1isa: revision %s ", rev); - - return 0; -} - -static void __exit t1isa_exit(void) -{ - int i; - - unregister_capi_driver(&capi_driver_t1isa); - for (i = 0; i < max_cards; i++) { - if (!io[i]) - break; - - t1isa_remove(&isa_dev[i]); - } -} - -module_init(t1isa_init); -module_exit(t1isa_exit); diff --git a/drivers/staging/isdn/avm/t1pci.c b/drivers/staging/isdn/avm/t1pci.c --- a/drivers/staging/isdn/avm/t1pci.c +++ /dev/null -/* $id: t1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil exp $ - * - * module for avm t1 pci-card. - * - * copyright 1999 by carsten paeth <calle@calle.de> - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/skbuff.h> -#include <linux/delay.h> -#include <linux/mm.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/pci.h> -#include <linux/capi.h> -#include <linux/init.h> -#include <asm/io.h> -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> -#include <linux/isdn/capilli.h> -#include "avmcard.h" - -#undef config_t1pci_debug -#undef config_t1pci_polldebug - -/* ------------------------------------------------------------- */ -static char *revision = "$revision: 1.1.2.2 $"; -/* ------------------------------------------------------------- */ - -static struct pci_device_id t1pci_pci_tbl[] = { - { pci_vendor_id_avm, pci_device_id_avm_t1, pci_any_id, pci_any_id }, - { } /* terminating entry */ -}; - -module_device_table(pci, t1pci_pci_tbl); -module_description("capi4linux: driver for avm t1 pci card"); -module_author("carsten paeth"); -module_license("gpl"); - -/* ------------------------------------------------------------- */ - -static char *t1pci_procinfo(struct capi_ctr *ctrl); - -static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev) -{ - avmcard *card; - avmctrl_info *cinfo; - int retval; - - card = b1_alloc_card(1); - if (!card) { - printk(kern_warning "t1pci: no memory. "); - retval = -enomem; - goto err; - } - - card->dma = avmcard_dma_alloc("t1pci", pdev, 2048 + 128, 2048 + 128); - if (!card->dma) { - printk(kern_warning "t1pci: no memory. "); - retval = -enomem; - goto err_free; - } - - cinfo = card->ctrlinfo; - sprintf(card->name, "t1pci-%x", p->port); - card->port = p->port; - card->irq = p->irq; - card->membase = p->membase; - card->cardtype = avm_t1pci; - - if (!request_region(card->port, avmb1_portlen, card->name)) { - printk(kern_warning "t1pci: ports 0x%03x-0x%03x in use. ", - card->port, card->port + avmb1_portlen); - retval = -ebusy; - goto err_free_dma; - } - - card->mbase = ioremap(card->membase, 64); - if (!card->mbase) { - printk(kern_notice "t1pci: can't remap memory at 0x%lx ", - card->membase); - retval = -eio; - goto err_release_region; - } - - b1dma_reset(card); - - retval = t1pci_detect(card); - if (retval != 0) { - if (retval < 6) - printk(kern_notice "t1pci: no card at 0x%x (%d) ", - card->port, retval); - else - printk(kern_notice "t1pci: card at 0x%x, but cable not connected or t1 has no power (%d) ", - card->port, retval); - retval = -eio; - goto err_unmap; - } - b1dma_reset(card); - - retval = request_irq(card->irq, b1dma_interrupt, irqf_shared, card->name, card); - if (retval) { - printk(kern_err "t1pci: unable to get irq %d. ", card->irq); - retval = -ebusy; - goto err_unmap; - } - - cinfo->capi_ctrl.owner = this_module; - cinfo->capi_ctrl.driver_name = "t1pci"; - cinfo->capi_ctrl.driverdata = cinfo; - cinfo->capi_ctrl.register_appl = b1dma_register_appl; - cinfo->capi_ctrl.release_appl = b1dma_release_appl; - cinfo->capi_ctrl.send_message = b1dma_send_message; - cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; - cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; - cinfo->capi_ctrl.procinfo = t1pci_procinfo; - cinfo->capi_ctrl.proc_show = b1dma_proc_show; - strcpy(cinfo->capi_ctrl.name, card->name); - - retval = attach_capi_ctr(&cinfo->capi_ctrl); - if (retval) { - printk(kern_err "t1pci: attach controller failed. "); - retval = -ebusy; - goto err_free_irq; - } - card->cardnr = cinfo->capi_ctrl.cnr; - - printk(kern_info "t1pci: avm t1 pci at i/o %#x, irq %d, mem %#lx ", - card->port, card->irq, card->membase); - - pci_set_drvdata(pdev, card); - return 0; - -err_free_irq: - free_irq(card->irq, card); -err_unmap: - iounmap(card->mbase); -err_release_region: - release_region(card->port, avmb1_portlen); -err_free_dma: - avmcard_dma_free(card->dma); -err_free: - b1_free_card(card); -err: - return retval; -} - -/* ------------------------------------------------------------- */ - -static void t1pci_remove(struct pci_dev *pdev) -{ - avmcard *card = pci_get_drvdata(pdev); - avmctrl_info *cinfo = card->ctrlinfo; - - b1dma_reset(card); - - detach_capi_ctr(&cinfo->capi_ctrl); - free_irq(card->irq, card); - iounmap(card->mbase); - release_region(card->port, avmb1_portlen); - avmcard_dma_free(card->dma); - b1_free_card(card); -} - -/* ------------------------------------------------------------- */ - -static char *t1pci_procinfo(struct capi_ctr *ctrl) -{ - avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); - - if (!cinfo) - return ""; - sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx", - cinfo->cardname[0] ? cinfo->cardname : "-", - cinfo->version[ver_driver] ? cinfo->version[ver_driver] : "-", - cinfo->card ? cinfo->card->port : 0x0, - cinfo->card ? cinfo->card->irq : 0, - cinfo->card ? cinfo->card->membase : 0 - ); - return cinfo->infobuf; -} - -/* ------------------------------------------------------------- */ - -static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) -{ - struct capicardparams param; - int retval; - - if (pci_enable_device(dev) < 0) { - printk(kern_err "t1pci: failed to enable avm-t1-pci "); - return -enodev; - } - pci_set_master(dev); - - param.port = pci_resource_start(dev, 1); - param.irq = dev->irq; - param.membase = pci_resource_start(dev, 0); - - printk(kern_info "t1pci: pci bios reports avm-t1-pci at i/o %#x, irq %d, mem %#x ", - param.port, param.irq, param.membase); - - retval = t1pci_add_card(¶m, dev); - if (retval != 0) { - printk(kern_err "t1pci: no avm-t1-pci at i/o %#x, irq %d detected, mem %#x ", - param.port, param.irq, param.membase); - pci_disable_device(dev); - return -enodev; - } - return 0; -} - -static struct pci_driver t1pci_pci_driver = { - .name = "t1pci", - .id_table = t1pci_pci_tbl, - .probe = t1pci_probe, - .remove = t1pci_remove, -}; - -static struct capi_driver capi_driver_t1pci = { - .name = "t1pci", - .revision = "1.0", -}; - -static int __init t1pci_init(void) -{ - char *p; - char rev[32]; - int err; - - if ((p = strchr(revision, ':')) != null && p[1]) { - strlcpy(rev, p + 2, 32); - if ((p = strchr(rev, '$')) != null && p > rev) - *(p - 1) = 0; - } else - strcpy(rev, "1.0"); - - err = pci_register_driver(&t1pci_pci_driver); - if (!err) { - strlcpy(capi_driver_t1pci.revision, rev, 32); - register_capi_driver(&capi_driver_t1pci); - printk(kern_info "t1pci: revision %s ", rev); - } - return err; -} - -static void __exit t1pci_exit(void) -{ - unregister_capi_driver(&capi_driver_t1pci); - pci_unregister_driver(&t1pci_pci_driver); -} - -module_init(t1pci_init); -module_exit(t1pci_exit); diff --git a/drivers/staging/isdn/gigaset/kconfig b/drivers/staging/isdn/gigaset/kconfig --- a/drivers/staging/isdn/gigaset/kconfig +++ /dev/null -# spdx-license-identifier: gpl-2.0-only -menuconfig isdn_drv_gigaset - tristate "siemens gigaset support" - depends on tty - select crc_ccitt - select bitreverse - help - this driver supports the siemens gigaset sx205/255 family of - isdn dect bases, including the predecessors gigaset 3070/3075 - and 4170/4175 and their t-com versions sinus 45isdn and sinus - 721x. - if you have one of these devices, say m here and for at least - one of the connection specific parts that follow. - this will build a module called "gigaset". - note: if you build your isdn subsystem (isdn_capi or isdn_i4l) - as a module, you have to build this driver as a module too, - otherwise the gigaset device won't show up as an isdn device. - -if isdn_drv_gigaset - -config gigaset_capi - bool "gigaset capi support" - depends on isdn_capi='y'||(isdn_capi='m'&&isdn_drv_gigaset='m') - default 'y' - help - build the gigaset driver as a capi 2.0 driver interfacing with - the kernel capi subsystem. to use it with the old isdn4linux - subsystem you'll have to enable the capidrv glue driver. - (select isdn_capi_capidrv.) - say n to build the old native isdn4linux variant. - if unsure, say y. - -config gigaset_base - tristate "gigaset base station support" - depends on usb - help - say m here if you want to use the usb interface of the gigaset - base for connection to your system. - this will build a module called "bas_gigaset". - -config gigaset_m105 - tristate "gigaset m105 support" - depends on usb - help - say m here if you want to connect to the gigaset base via dect - using a gigaset m105 (sinus 45 data 2) usb dect device. - this will build a module called "usb_gigaset". - -config gigaset_m101 - tristate "gigaset m101 support" - help - say m here if you want to connect to the gigaset base via dect - using a gigaset m101 (sinus 45 data 1) rs232 dect device. - this will build a module called "ser_gigaset". - -config gigaset_debug - bool "gigaset debugging" - help - this enables debugging code in the gigaset drivers. - if in doubt, say yes. - -endif # isdn_drv_gigaset diff --git a/drivers/staging/isdn/gigaset/makefile b/drivers/staging/isdn/gigaset/makefile --- a/drivers/staging/isdn/gigaset/makefile +++ /dev/null -# spdx-license-identifier: gpl-2.0 -gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o - -ifdef config_gigaset_capi -gigaset-y += capi.o -else -gigaset-y += dummyll.o -endif - -usb_gigaset-y := usb-gigaset.o -ser_gigaset-y := ser-gigaset.o -bas_gigaset-y := bas-gigaset.o isocdata.o - -obj-$(config_isdn_drv_gigaset) += gigaset.o -obj-$(config_gigaset_m105) += usb_gigaset.o -obj-$(config_gigaset_base) += bas_gigaset.o -obj-$(config_gigaset_m101) += ser_gigaset.o diff --git a/drivers/staging/isdn/gigaset/asyncdata.c b/drivers/staging/isdn/gigaset/asyncdata.c --- a/drivers/staging/isdn/gigaset/asyncdata.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * common data handling layer for ser_gigaset and usb_gigaset - * - * copyright (c) 2005 by tilman schmidt <tilman@imap.cc>, - * hansjoerg lipp <hjlipp@web.de>, - * stefan eilers. - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" -#include <linux/crc-ccitt.h> -#include <linux/bitrev.h> -#include <linux/export.h> - -/* check if byte must be stuffed/escaped - * i'm not sure which data should be encoded. - * therefore i will go the hard way and encode every value - * less than 0x20, the flag sequence and the control escape char. - */ -static inline int muststuff(unsigned char c) -{ - if (c < ppp_trans) return 1; - if (c == ppp_flag) return 1; - if (c == ppp_escape) return 1; - /* other possible candidates: */ - /* 0x91: xon with parity set */ - /* 0x93: xoff with parity set */ - return 0; -} - -/* == data input =========================================================== */ - -/* process a block of received bytes in command mode - * (mstate != ms_locked && (inputstate & ins_command)) - * append received bytes to the command response buffer and forward them - * line by line to the response handler. exit whenever a mode/state change - * might have occurred. - * note: received lines may be terminated by cr, lf, or cr lf, which will be - * removed before passing the line to the response handler. - * return value: - * number of processed bytes - */ -static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf) -{ - unsigned char *src = inbuf->data + inbuf->head; - struct cardstate *cs = inbuf->cs; - unsigned cbytes = cs->cbytes; - unsigned procbytes = 0; - unsigned char c; - - while (procbytes < numbytes) { - c = *src++; - procbytes++; - - switch (c) { - case ' ': - if (cbytes == 0 && cs->respdata[0] == ' ') { - /* collapse lf with preceding cr */ - cs->respdata[0] = 0; - break; - } - /* fall through */ - case ' ': - /* end of message line, pass to response handler */ - if (cbytes >= max_resp_size) { - dev_warn(cs->dev, "response too large (%d) ", - cbytes); - cbytes = max_resp_size; - } - cs->cbytes = cbytes; - gigaset_dbg_buffer(debug_transcmd, "received response", - cbytes, cs->respdata); - gigaset_handle_modem_response(cs); - cbytes = 0; - - /* store eol byte for crlf collapsing */ - cs->respdata[0] = c; - - /* cs->dle may have changed */ - if (cs->dle && !(inbuf->inputstate & ins_dle_command)) - inbuf->inputstate &= ~ins_command; - - /* return for reevaluating state */ - goto exit; - - case dle_flag: - if (inbuf->inputstate & ins_dle_char) { - /* quoted dle: clear quote flag */ - inbuf->inputstate &= ~ins_dle_char; - } else if (cs->dle || - (inbuf->inputstate & ins_dle_command)) { - /* dle escape, pass up for handling */ - inbuf->inputstate |= ins_dle_char; - goto exit; - } - /* quoted or not in dle mode: treat as regular data */ - /* fall through */ - default: - /* append to line buffer if possible */ - if (cbytes < max_resp_size) - cs->respdata[cbytes] = c; - cbytes++; - } - } -exit: - cs->cbytes = cbytes; - return procbytes; -} - -/* process a block of received bytes in lock mode - * all received bytes are passed unmodified to the tty i/f. - * return value: - * number of processed bytes - */ -static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf) -{ - unsigned char *src = inbuf->data + inbuf->head; - - gigaset_dbg_buffer(debug_lockcmd, "received response", numbytes, src); - gigaset_if_receive(inbuf->cs, src, numbytes); - return numbytes; -} - -/* process a block of received bytes in hdlc data mode - * (mstate != ms_locked && !(inputstate & ins_command) && proto2 == l2_hdlc) - * collect hdlc frames, undoing byte stuffing and watching for dle escapes. - * when a frame is complete, check the fcs and pass valid frames to the ll. - * if dle is encountered, return immediately to let the caller handle it. - * return value: - * number of processed bytes - */ -static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf) -{ - struct cardstate *cs = inbuf->cs; - struct bc_state *bcs = cs->bcs; - int inputstate = bcs->inputstate; - __u16 fcs = bcs->rx_fcs; - struct sk_buff *skb = bcs->rx_skb; - unsigned char *src = inbuf->data + inbuf->head; - unsigned procbytes = 0; - unsigned char c; - - if (inputstate & ins_byte_stuff) { - if (!numbytes) - return 0; - inputstate &= ~ins_byte_stuff; - goto byte_stuff; - } - - while (procbytes < numbytes) { - c = *src++; - procbytes++; - if (c == dle_flag) { - if (inputstate & ins_dle_char) { - /* quoted dle: clear quote flag */ - inputstate &= ~ins_dle_char; - } else if (cs->dle || (inputstate & ins_dle_command)) { - /* dle escape, pass up for handling */ - inputstate |= ins_dle_char; - break; - } - } - - if (c == ppp_escape) { - /* byte stuffing indicator: pull in next byte */ - if (procbytes >= numbytes) { - /* end of buffer, save for later processing */ - inputstate |= ins_byte_stuff; - break; - } -byte_stuff: - c = *src++; - procbytes++; - if (c == dle_flag) { - if (inputstate & ins_dle_char) { - /* quoted dle: clear quote flag */ - inputstate &= ~ins_dle_char; - } else if (cs->dle || - (inputstate & ins_dle_command)) { - /* dle escape, pass up for handling */ - inputstate |= - ins_dle_char | ins_byte_stuff; - break; - } - } - c ^= ppp_trans; -#ifdef config_gigaset_debug - if (!muststuff(c)) - gig_dbg(debug_hdlc, "byte stuffed: 0x%02x", c); -#endif - } else if (c == ppp_flag) { - /* end of frame: process content if any */ - if (inputstate & ins_have_data) { - gig_dbg(debug_hdlc, - "7e----------------------------"); - - /* check and pass received frame */ - if (!skb) { - /* skipped frame */ - gigaset_isdn_rcv_err(bcs); - } else if (skb->len < 2) { - /* frame too short for fcs */ - dev_warn(cs->dev, - "short frame (%d) ", - skb->len); - gigaset_isdn_rcv_err(bcs); - dev_kfree_skb_any(skb); - } else if (fcs != ppp_goodfcs) { - /* frame check error */ - dev_err(cs->dev, - "checksum failed, %u bytes corrupted! ", - skb->len); - gigaset_isdn_rcv_err(bcs); - dev_kfree_skb_any(skb); - } else { - /* good frame */ - __skb_trim(skb, skb->len - 2); - gigaset_skb_rcvd(bcs, skb); - } - - /* prepare reception of next frame */ - inputstate &= ~ins_have_data; - skb = gigaset_new_rx_skb(bcs); - } else { - /* empty frame (7e 7e) */ -#ifdef config_gigaset_debug - ++bcs->emptycount; -#endif - if (!skb) { - /* skipped (?) */ - gigaset_isdn_rcv_err(bcs); - skb = gigaset_new_rx_skb(bcs); - } - } - - fcs = ppp_initfcs; - continue; -#ifdef config_gigaset_debug - } else if (muststuff(c)) { - /* should not happen. possible after zdle=1<cr><lf>. */ - gig_dbg(debug_hdlc, "not byte stuffed: 0x%02x", c); -#endif - } - - /* regular data byte, append to skb */ -#ifdef config_gigaset_debug - if (!(inputstate & ins_have_data)) { - gig_dbg(debug_hdlc, "7e (%d x) ================", - bcs->emptycount); - bcs->emptycount = 0; - } -#endif - inputstate |= ins_have_data; - if (skb) { - if (skb->len >= bcs->rx_bufsize) { - dev_warn(cs->dev, "received packet too long "); - dev_kfree_skb_any(skb); - /* skip remainder of packet */ - bcs->rx_skb = skb = null; - } else { - __skb_put_u8(skb, c); - fcs = crc_ccitt_byte(fcs, c); - } - } - } - - bcs->inputstate = inputstate; - bcs->rx_fcs = fcs; - return procbytes; -} - -/* process a block of received bytes in transparent data mode - * (mstate != ms_locked && !(inputstate & ins_command) && proto2 != l2_hdlc) - * invert bytes, undoing byte stuffing and watching for dle escapes. - * if dle is encountered, return immediately to let the caller handle it. - * return value: - * number of processed bytes - */ -static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf) -{ - struct cardstate *cs = inbuf->cs; - struct bc_state *bcs = cs->bcs; - int inputstate = bcs->inputstate; - struct sk_buff *skb = bcs->rx_skb; - unsigned char *src = inbuf->data + inbuf->head; - unsigned procbytes = 0; - unsigned char c; - - if (!skb) { - /* skip this block */ - gigaset_new_rx_skb(bcs); - return numbytes; - } - - while (procbytes < numbytes && skb->len < bcs->rx_bufsize) { - c = *src++; - procbytes++; - - if (c == dle_flag) { - if (inputstate & ins_dle_char) { - /* quoted dle: clear quote flag */ - inputstate &= ~ins_dle_char; - } else if (cs->dle || (inputstate & ins_dle_command)) { - /* dle escape, pass up for handling */ - inputstate |= ins_dle_char; - break; - } - } - - /* regular data byte: append to current skb */ - inputstate |= ins_have_data; - __skb_put_u8(skb, bitrev8(c)); - } - - /* pass data up */ - if (inputstate & ins_have_data) { - gigaset_skb_rcvd(bcs, skb); - inputstate &= ~ins_have_data; - gigaset_new_rx_skb(bcs); - } - - bcs->inputstate = inputstate; - return procbytes; -} - -/* process dle escapes - * called whenever a dle sequence might be encountered in the input stream. - * either processes the entire dle sequence or, if that isn't possible, - * notes the fact that an initial dle has been received in the ins_dle_char - * inputstate flag and resumes processing of the sequence on the next call. - */ -static void handle_dle(struct inbuf_t *inbuf) -{ - struct cardstate *cs = inbuf->cs; - - if (cs->mstate == ms_locked) - return; /* no dle processing in lock mode */ - - if (!(inbuf->inputstate & ins_dle_char)) { - /* no dle pending */ - if (inbuf->data[inbuf->head] == dle_flag && - (cs->dle || inbuf->inputstate & ins_dle_command)) { - /* start of dle sequence */ - inbuf->head++; - if (inbuf->head == inbuf->tail || - inbuf->head == rbufsize) { - /* end of buffer, save for later processing */ - inbuf->inputstate |= ins_dle_char; - return; - } - } else { - /* regular data byte */ - return; - } - } - - /* consume pending dle */ - inbuf->inputstate &= ~ins_dle_char; - - switch (inbuf->data[inbuf->head]) { - case 'x': /* begin of event message */ - if (inbuf->inputstate & ins_command) - dev_notice(cs->dev, - "received <dle>x in command mode "); - inbuf->inputstate |= ins_command | ins_dle_command; - inbuf->head++; /* byte consumed */ - break; - case '.': /* end of event message */ - if (!(inbuf->inputstate & ins_dle_command)) - dev_notice(cs->dev, - "received <dle>. without <dle>x "); - inbuf->inputstate &= ~ins_dle_command; - /* return to data mode if in dle mode */ - if (cs->dle) - inbuf->inputstate &= ~ins_command; - inbuf->head++; /* byte consumed */ - break; - case dle_flag: /* dle in data stream */ - /* mark as quoted */ - inbuf->inputstate |= ins_dle_char; - if (!(cs->dle || inbuf->inputstate & ins_dle_command)) - dev_notice(cs->dev, - "received <dle><dle> not in dle mode "); - break; /* quoted byte left in buffer */ - default: - dev_notice(cs->dev, "received <dle><%02x> ", - inbuf->data[inbuf->head]); - /* quoted byte left in buffer */ - } -} - -/** - * gigaset_m10x_input() - process a block of data received from the device - * @inbuf: received data and device descriptor structure. - * - * called by hardware module {ser,usb}_gigaset with a block of received - * bytes. separates the bytes received over the serial data channel into - * user data and command replies (locked/unlocked) according to the - * current state of the interface. - */ -void gigaset_m10x_input(struct inbuf_t *inbuf) -{ - struct cardstate *cs = inbuf->cs; - unsigned numbytes, procbytes; - - gig_dbg(debug_intr, "buffer state: %u -> %u", inbuf->head, inbuf->tail); - - while (inbuf->head != inbuf->tail) { - /* check for dle escape */ - handle_dle(inbuf); - - /* process a contiguous block of bytes */ - numbytes = (inbuf->head > inbuf->tail ? - rbufsize : inbuf->tail) - inbuf->head; - gig_dbg(debug_intr, "processing %u bytes", numbytes); - /* - * numbytes may be 0 if handle_dle() ate the last byte. - * this does no harm, *_loop() will just return 0 immediately. - */ - - if (cs->mstate == ms_locked) - procbytes = lock_loop(numbytes, inbuf); - else if (inbuf->inputstate & ins_command) - procbytes = cmd_loop(numbytes, inbuf); - else if (cs->bcs->proto2 == l2_hdlc) - procbytes = hdlc_loop(numbytes, inbuf); - else - procbytes = iraw_loop(numbytes, inbuf); - inbuf->head += procbytes; - - /* check for buffer wraparound */ - if (inbuf->head >= rbufsize) - inbuf->head = 0; - - gig_dbg(debug_intr, "head set to %u", inbuf->head); - } -} -export_symbol_gpl(gigaset_m10x_input); - - -/* == data output ========================================================== */ - -/* - * encode a data packet into an octet stuffed hdlc frame with fcs, - * opening and closing flags, preserving headroom data. - * parameters: - * skb skb containing original packet (freed upon return) - * return value: - * pointer to newly allocated skb containing the result frame - * and the original link layer header, null on error - */ -static struct sk_buff *hdlc_encode(struct sk_buff *skb) -{ - struct sk_buff *hdlc_skb; - __u16 fcs; - unsigned char c; - unsigned char *cp; - int len; - unsigned int stuf_cnt; - - stuf_cnt = 0; - fcs = ppp_initfcs; - cp = skb->data; - len = skb->len; - while (len--) { - if (muststuff(*cp)) - stuf_cnt++; - fcs = crc_ccitt_byte(fcs, *cp++); - } - fcs ^= 0xffff; /* complement */ - - /* size of new buffer: original size + number of stuffing bytes - * + 2 bytes fcs + 2 stuffing bytes for fcs (if needed) + 2 flag bytes - * + room for link layer header - */ - hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len); - if (!hdlc_skb) { - dev_kfree_skb_any(skb); - return null; - } - - /* copy link layer header into new skb */ - skb_reset_mac_header(hdlc_skb); - skb_reserve(hdlc_skb, skb->mac_len); - memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len); - hdlc_skb->mac_len = skb->mac_len; - - /* add flag sequence in front of everything.. */ - skb_put_u8(hdlc_skb, ppp_flag); - - /* perform byte stuffing while copying data. */ - while (skb->len--) { - if (muststuff(*skb->data)) { - skb_put_u8(hdlc_skb, ppp_escape); - skb_put_u8(hdlc_skb, (*skb->data++) ^ ppp_trans); - } else - skb_put_u8(hdlc_skb, *skb->data++); - } - - /* finally add fcs (byte stuffed) and flag sequence */ - c = (fcs & 0x00ff); /* least significant byte first */ - if (muststuff(c)) { - skb_put_u8(hdlc_skb, ppp_escape); - c ^= ppp_trans; - } - skb_put_u8(hdlc_skb, c); - - c = ((fcs >> 8) & 0x00ff); - if (muststuff(c)) { - skb_put_u8(hdlc_skb, ppp_escape); - c ^= ppp_trans; - } - skb_put_u8(hdlc_skb, c); - - skb_put_u8(hdlc_skb, ppp_flag); - - dev_kfree_skb_any(skb); - return hdlc_skb; -} - -/* - * encode a data packet into an octet stuffed raw bit inverted frame, - * preserving headroom data. - * parameters: - * skb skb containing original packet (freed upon return) - * return value: - * pointer to newly allocated skb containing the result frame - * and the original link layer header, null on error - */ -static struct sk_buff *iraw_encode(struct sk_buff *skb) -{ - struct sk_buff *iraw_skb; - unsigned char c; - unsigned char *cp; - int len; - - /* size of new buffer (worst case = every byte must be stuffed): - * 2 * original size + room for link layer header - */ - iraw_skb = dev_alloc_skb(2 * skb->len + skb->mac_len); - if (!iraw_skb) { - dev_kfree_skb_any(skb); - return null; - } - - /* copy link layer header into new skb */ - skb_reset_mac_header(iraw_skb); - skb_reserve(iraw_skb, skb->mac_len); - memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len); - iraw_skb->mac_len = skb->mac_len; - - /* copy and stuff data */ - cp = skb->data; - len = skb->len; - while (len--) { - c = bitrev8(*cp++); - if (c == dle_flag) - skb_put_u8(iraw_skb, c); - skb_put_u8(iraw_skb, c); - } - dev_kfree_skb_any(skb); - return iraw_skb; -} - -/** - * gigaset_m10x_send_skb() - queue an skb for sending - * @bcs: b channel descriptor structure. - * @skb: data to send. - * - * called by ll to encode and queue an skb for sending, and start - * transmission if necessary. - * once the payload data has been transmitted completely, gigaset_skb_sent() - * will be called with the skb's link layer header preserved. - * - * return value: - * number of bytes accepted for sending (skb->len) if ok, - * error code < 0 (eg. -enomem) on error - */ -int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) -{ - struct cardstate *cs = bcs->cs; - unsigned len = skb->len; - unsigned long flags; - - if (bcs->proto2 == l2_hdlc) - skb = hdlc_encode(skb); - else - skb = iraw_encode(skb); - if (!skb) { - dev_err(cs->dev, - "unable to allocate memory for encoding! "); - return -enomem; - } - - skb_queue_tail(&bcs->squeue, skb); - spin_lock_irqsave(&cs->lock, flags); - if (cs->connected) - tasklet_schedule(&cs->write_tasklet); - spin_unlock_irqrestore(&cs->lock, flags); - - return len; /* ok so far */ -} -export_symbol_gpl(gigaset_m10x_send_skb); diff --git a/drivers/staging/isdn/gigaset/bas-gigaset.c b/drivers/staging/isdn/gigaset/bas-gigaset.c --- a/drivers/staging/isdn/gigaset/bas-gigaset.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * usb driver for gigaset 307x base via direct usb connection. - * - * copyright (c) 2001 by hansjoerg lipp <hjlipp@web.de>, - * tilman schmidt <tilman@imap.cc>, - * stefan eilers. - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" -#include <linux/usb.h> -#include <linux/module.h> -#include <linux/moduleparam.h> - -/* version information */ -#define driver_author "tilman schmidt <tilman@imap.cc>, hansjoerg lipp <hjlipp@web.de>, stefan eilers" -#define driver_desc "usb driver for gigaset 307x" - - -/* module parameters */ - -static int startmode = sm_isdn; -static int cidmode = 1; - -module_param(startmode, int, s_irugo); -module_param(cidmode, int, s_irugo); -module_parm_desc(startmode, "start in isdn4linux mode"); -module_parm_desc(cidmode, "call-id mode"); - -#define gigaset_minors 1 -#define gigaset_minor 16 -#define gigaset_modulename "bas_gigaset" -#define gigaset_devname "ttygb" - -/* length limit according to siemens 3070usb-protokoll.doc ch. 2.1 */ -#define if_writebuf 264 - -/* interrupt pipe message size according to ibid. ch. 2.2 */ -#define ip_msgsize 3 - -/* values for the gigaset 307x */ -#define usb_giga_vendor_id 0x0681 -#define usb_3070_product_id 0x0001 -#define usb_3075_product_id 0x0002 -#define usb_sx303_product_id 0x0021 -#define usb_sx353_product_id 0x0022 - -/* table of devices that work with this driver */ -static const struct usb_device_id gigaset_table[] = { - { usb_device(usb_giga_vendor_id, usb_3070_product_id) }, - { usb_device(usb_giga_vendor_id, usb_3075_product_id) }, - { usb_device(usb_giga_vendor_id, usb_sx303_product_id) }, - { usb_device(usb_giga_vendor_id, usb_sx353_product_id) }, - { } /* terminating entry */ -}; - -module_device_table(usb, gigaset_table); - -/*======================= local function prototypes ==========================*/ - -/* function called if a new device belonging to this driver is connected */ -static int gigaset_probe(struct usb_interface *interface, - const struct usb_device_id *id); - -/* function will be called if the device is unplugged */ -static void gigaset_disconnect(struct usb_interface *interface); - -/* functions called before/after suspend */ -static int gigaset_suspend(struct usb_interface *intf, pm_message_t message); -static int gigaset_resume(struct usb_interface *intf); - -/* functions called before/after device reset */ -static int gigaset_pre_reset(struct usb_interface *intf); -static int gigaset_post_reset(struct usb_interface *intf); - -static int atread_submit(struct cardstate *, int); -static void stopurbs(struct bas_bc_state *); -static int req_submit(struct bc_state *, int, int, int); -static int atwrite_submit(struct cardstate *, unsigned char *, int); -static int start_cbsend(struct cardstate *); - -/*============================================================================*/ - -struct bas_cardstate { - struct usb_device *udev; /* usb device pointer */ - struct cardstate *cs; - struct usb_interface *interface; /* interface for this device */ - unsigned char minor; /* starting minor number */ - - struct urb *urb_ctrl; /* control pipe default urb */ - struct usb_ctrlrequest dr_ctrl; - struct timer_list timer_ctrl; /* control request timeout */ - int retry_ctrl; - - struct timer_list timer_atrdy; /* at command ready timeout */ - struct urb *urb_cmd_out; /* for sending at commands */ - struct usb_ctrlrequest dr_cmd_out; - int retry_cmd_out; - - struct urb *urb_cmd_in; /* for receiving at replies */ - struct usb_ctrlrequest dr_cmd_in; - struct timer_list timer_cmd_in; /* receive request timeout */ - unsigned char *rcvbuf; /* at reply receive buffer */ - - struct urb *urb_int_in; /* urb for interrupt pipe */ - unsigned char *int_in_buf; - struct work_struct int_in_wq; /* for usb_clear_halt() */ - struct timer_list timer_int_in; /* int read retry delay */ - int retry_int_in; - - spinlock_t lock; /* locks all following */ - int basstate; /* bitmap (bs_*) */ - int pending; /* uncompleted base request */ - wait_queue_head_t waitqueue; - int rcvbuf_size; /* size of at receive buffer */ - /* 0: no receive in progress */ - int retry_cmd_in; /* receive req retry count */ -}; - -/* status of direct usb connection to 307x base (bits in basstate) */ -#define bs_atopen 0x001 /* at channel open */ -#define bs_b1open 0x002 /* b channel 1 open */ -#define bs_b2open 0x004 /* b channel 2 open */ -#define bs_atready 0x008 /* base ready for at command */ -#define bs_init 0x010 /* base has signalled init_ok */ -#define bs_attimer 0x020 /* waiting for hd_ready_send_atdata */ -#define bs_atrdpend 0x040 /* urb_cmd_in in use */ -#define bs_atwrpend 0x080 /* urb_cmd_out in use */ -#define bs_suspend 0x100 /* usb port suspended */ -#define bs_resetting 0x200 /* waiting for hd_reset_interrupt_pipe_ack */ - - -static struct gigaset_driver *driver; - -/* usb specific object needed to register this driver with the usb subsystem */ -static struct usb_driver gigaset_usb_driver = { - .name = gigaset_modulename, - .probe = gigaset_probe, - .disconnect = gigaset_disconnect, - .id_table = gigaset_table, - .suspend = gigaset_suspend, - .resume = gigaset_resume, - .reset_resume = gigaset_post_reset, - .pre_reset = gigaset_pre_reset, - .post_reset = gigaset_post_reset, - .disable_hub_initiated_lpm = 1, -}; - -/* get message text for usb_submit_urb return code - */ -static char *get_usb_rcmsg(int rc) -{ - static char unkmsg[28]; - - switch (rc) { - case 0: - return "success"; - case -enomem: - return "out of memory"; - case -enodev: - return "device not present"; - case -enoent: - return "endpoint not present"; - case -enxio: - return "urb type not supported"; - case -einval: - return "invalid argument"; - case -eagain: - return "start frame too early or too much scheduled"; - case -efbig: - return "too many isoc frames requested"; - case -epipe: - return "endpoint stalled"; - case -emsgsize: - return "invalid packet size"; - case -enospc: - return "would overcommit usb bandwidth"; - case -eshutdown: - return "device shut down"; - case -eperm: - return "reject flag set"; - case -ehostunreach: - return "device suspended"; - default: - snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", rc); - return unkmsg; - } -} - -/* get message text for usb status code - */ -static char *get_usb_statmsg(int status) -{ - static char unkmsg[28]; - - switch (status) { - case 0: - return "success"; - case -enoent: - return "unlinked (sync)"; - case -einprogress: - return "urb still pending"; - case -eproto: - return "bitstuff error, timeout, or unknown usb error"; - case -eilseq: - return "crc mismatch, timeout, or unknown usb error"; - case -etime: - return "usb response timeout"; - case -epipe: - return "endpoint stalled"; - case -ecomm: - return "in buffer overrun"; - case -enosr: - return "out buffer underrun"; - case -eoverflow: - return "endpoint babble"; - case -eremoteio: - return "short packet"; - case -enodev: - return "device removed"; - case -exdev: - return "partial isoc transfer"; - case -einval: - return "iso madness"; - case -econnreset: - return "unlinked (async)"; - case -eshutdown: - return "device shut down"; - default: - snprintf(unkmsg, sizeof(unkmsg), "unknown status %d", status); - return unkmsg; - } -} - -/* usb_pipetype_str - * retrieve string representation of usb pipe type - */ -static inline char *usb_pipetype_str(int pipe) -{ - if (usb_pipeisoc(pipe)) - return "isoc"; - if (usb_pipeint(pipe)) - return "int"; - if (usb_pipecontrol(pipe)) - return "ctrl"; - if (usb_pipebulk(pipe)) - return "bulk"; - return "?"; -} - -/* dump_urb - * write content of urb to syslog for debugging - */ -static inline void dump_urb(enum debuglevel level, const char *tag, - struct urb *urb) -{ -#ifdef config_gigaset_debug - int i; - gig_dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb); - if (urb) { - gig_dbg(level, - " dev=0x%08lx, pipe=%s:ep%d/dv%d:%s, " - "hcpriv=0x%08lx, transfer_flags=0x%x,", - (unsigned long) urb->dev, - usb_pipetype_str(urb->pipe), - usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out", - (unsigned long) urb->hcpriv, - urb->transfer_flags); - gig_dbg(level, - " transfer_buffer=0x%08lx[%d], actual_length=%d, " - "setup_packet=0x%08lx,", - (unsigned long) urb->transfer_buffer, - urb->transfer_buffer_length, urb->actual_length, - (unsigned long) urb->setup_packet); - gig_dbg(level, - " start_frame=%d, number_of_packets=%d, interval=%d, " - "error_count=%d,", - urb->start_frame, urb->number_of_packets, urb->interval, - urb->error_count); - gig_dbg(level, - " context=0x%08lx, complete=0x%08lx, " - "iso_frame_desc[]={", - (unsigned long) urb->context, - (unsigned long) urb->complete); - for (i = 0; i < urb->number_of_packets; i++) { - struct usb_iso_packet_descriptor *pifd - = &urb->iso_frame_desc[i]; - gig_dbg(level, - " {offset=%u, length=%u, actual_length=%u, " - "status=%u}", - pifd->offset, pifd->length, pifd->actual_length, - pifd->status); - } - } - gig_dbg(level, "}}"); -#endif -} - -/* read/set modem control bits etc. (m10x only) */ -static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, - unsigned new_state) -{ - return -einval; -} - -static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) -{ - return -einval; -} - -static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) -{ - return -einval; -} - -/* set/clear bits in base connection state, return previous state - */ -static inline int update_basstate(struct bas_cardstate *ucs, - int set, int clear) -{ - unsigned long flags; - int state; - - spin_lock_irqsave(&ucs->lock, flags); - state = ucs->basstate; - ucs->basstate = (state & ~clear) | set; - spin_unlock_irqrestore(&ucs->lock, flags); - return state; -} - -/* error_hangup - * hang up any existing connection because of an unrecoverable error - * this function may be called from any context and takes care of scheduling - * the necessary actions for execution outside of interrupt context. - * cs->lock must not be held. - * argument: - * b channel control structure - */ -static inline void error_hangup(struct bc_state *bcs) -{ - struct cardstate *cs = bcs->cs; - - gigaset_add_event(cs, &bcs->at_state, ev_hup, null, 0, null); - gigaset_schedule_event(cs); -} - -/* error_reset - * reset gigaset device because of an unrecoverable error - * this function may be called from any context, and takes care of - * scheduling the necessary actions for execution outside of interrupt context. - * cs->hw.bas->lock must not be held. - * argument: - * controller state structure - */ -static inline void error_reset(struct cardstate *cs) -{ - /* reset interrupt pipe to recover (ignore errors) */ - update_basstate(cs->hw.bas, bs_resetting, 0); - if (req_submit(cs->bcs, hd_reset_interrupt_pipe, 0, bas_timeout)) - /* submission failed, escalate to usb port reset */ - usb_queue_reset_device(cs->hw.bas->interface); -} - -/* check_pending - * check for completion of pending control request - * parameter: - * ucs hardware specific controller state structure - */ -static void check_pending(struct bas_cardstate *ucs) -{ - unsigned long flags; - - spin_lock_irqsave(&ucs->lock, flags); - switch (ucs->pending) { - case 0: - break; - case hd_open_atchannel: - if (ucs->basstate & bs_atopen) - ucs->pending = 0; - break; - case hd_open_b1channel: - if (ucs->basstate & bs_b1open) - ucs->pending = 0; - break; - case hd_open_b2channel: - if (ucs->basstate & bs_b2open) - ucs->pending = 0; - break; - case hd_close_atchannel: - if (!(ucs->basstate & bs_atopen)) - ucs->pending = 0; - break; - case hd_close_b1channel: - if (!(ucs->basstate & bs_b1open)) - ucs->pending = 0; - break; - case hd_close_b2channel: - if (!(ucs->basstate & bs_b2open)) - ucs->pending = 0; - break; - case hd_device_init_ack: /* no reply expected */ - ucs->pending = 0; - break; - case hd_reset_interrupt_pipe: - if (!(ucs->basstate & bs_resetting)) - ucs->pending = 0; - break; - /* - * hd_read_atmessage and hd_write_atmessage are handled separately - * and should never end up here - */ - default: - dev_warn(&ucs->interface->dev, - "unknown pending request 0x%02x cleared ", - ucs->pending); - ucs->pending = 0; - } - - if (!ucs->pending) - del_timer(&ucs->timer_ctrl); - - spin_unlock_irqrestore(&ucs->lock, flags); -} - -/* cmd_in_timeout - * timeout routine for command input request - * argument: - * controller state structure - */ -static void cmd_in_timeout(struct timer_list *t) -{ - struct bas_cardstate *ucs = from_timer(ucs, t, timer_cmd_in); - struct cardstate *cs = ucs->cs; - int rc; - - if (!ucs->rcvbuf_size) { - gig_dbg(debug_usbreq, "%s: no receive in progress", __func__); - return; - } - - if (ucs->retry_cmd_in++ >= bas_retry) { - dev_err(cs->dev, - "control read: timeout, giving up after %d tries ", - ucs->retry_cmd_in); - kfree(ucs->rcvbuf); - ucs->rcvbuf = null; - ucs->rcvbuf_size = 0; - error_reset(cs); - return; - } - - gig_dbg(debug_usbreq, "%s: timeout, retry %d", - __func__, ucs->retry_cmd_in); - rc = atread_submit(cs, bas_timeout); - if (rc < 0) { - kfree(ucs->rcvbuf); - ucs->rcvbuf = null; - ucs->rcvbuf_size = 0; - if (rc != -enodev) - error_reset(cs); - } -} - -/* read_ctrl_callback - * usb completion handler for control pipe input - * called by the usb subsystem in interrupt context - * parameter: - * urb usb request block - * urb->context = inbuf structure for controller state - */ -static void read_ctrl_callback(struct urb *urb) -{ - struct inbuf_t *inbuf = urb->context; - struct cardstate *cs = inbuf->cs; - struct bas_cardstate *ucs = cs->hw.bas; - int status = urb->status; - unsigned numbytes; - int rc; - - update_basstate(ucs, 0, bs_atrdpend); - wake_up(&ucs->waitqueue); - del_timer(&ucs->timer_cmd_in); - - switch (status) { - case 0: /* normal completion */ - numbytes = urb->actual_length; - if (unlikely(numbytes != ucs->rcvbuf_size)) { - dev_warn(cs->dev, - "control read: received %d chars, expected %d ", - numbytes, ucs->rcvbuf_size); - if (numbytes > ucs->rcvbuf_size) - numbytes = ucs->rcvbuf_size; - } - - /* copy received bytes to inbuf, notify event layer */ - if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) { - gig_dbg(debug_intr, "%s-->bh", __func__); - gigaset_schedule_event(cs); - } - break; - - case -enoent: /* cancelled */ - case -econnreset: /* cancelled (async) */ - case -einprogress: /* pending */ - case -enodev: /* device removed */ - case -eshutdown: /* device shut down */ - /* no further action necessary */ - gig_dbg(debug_usbreq, "%s: %s", - __func__, get_usb_statmsg(status)); - break; - - default: /* other errors: retry */ - if (ucs->retry_cmd_in++ < bas_retry) { - gig_dbg(debug_usbreq, "%s: %s, retry %d", __func__, - get_usb_statmsg(status), ucs->retry_cmd_in); - rc = atread_submit(cs, bas_timeout); - if (rc >= 0) - /* successfully resubmitted, skip freeing */ - return; - if (rc == -enodev) - /* disconnect, no further action necessary */ - break; - } - dev_err(cs->dev, "control read: %s, giving up after %d tries ", - get_usb_statmsg(status), ucs->retry_cmd_in); - error_reset(cs); - } - - /* read finished, free buffer */ - kfree(ucs->rcvbuf); - ucs->rcvbuf = null; - ucs->rcvbuf_size = 0; -} - -/* atread_submit - * submit an hd_read_atmessage command urb and optionally start a timeout - * parameters: - * cs controller state structure - * timeout timeout in 1/10 sec., 0: none - * return value: - * 0 on success - * -ebusy if another request is pending - * any urb submission error code - */ -static int atread_submit(struct cardstate *cs, int timeout) -{ - struct bas_cardstate *ucs = cs->hw.bas; - int basstate; - int ret; - - gig_dbg(debug_usbreq, "-------> hd_read_atmessage (%d)", - ucs->rcvbuf_size); - - basstate = update_basstate(ucs, bs_atrdpend, 0); - if (basstate & bs_atrdpend) { - dev_err(cs->dev, - "could not submit hd_read_atmessage: urb busy "); - return -ebusy; - } - - if (basstate & bs_suspend) { - dev_notice(cs->dev, - "hd_read_atmessage not submitted, " - "suspend in progress "); - update_basstate(ucs, 0, bs_atrdpend); - /* treat like disconnect */ - return -enodev; - } - - ucs->dr_cmd_in.brequesttype = in_vendor_req; - ucs->dr_cmd_in.brequest = hd_read_atmessage; - ucs->dr_cmd_in.wvalue = 0; - ucs->dr_cmd_in.windex = 0; - ucs->dr_cmd_in.wlength = cpu_to_le16(ucs->rcvbuf_size); - usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev, - usb_rcvctrlpipe(ucs->udev, 0), - (unsigned char *) &ucs->dr_cmd_in, - ucs->rcvbuf, ucs->rcvbuf_size, - read_ctrl_callback, cs->inbuf); - - ret = usb_submit_urb(ucs->urb_cmd_in, gfp_atomic); - if (ret != 0) { - update_basstate(ucs, 0, bs_atrdpend); - dev_err(cs->dev, "could not submit hd_read_atmessage: %s ", - get_usb_rcmsg(ret)); - return ret; - } - - if (timeout > 0) { - gig_dbg(debug_usbreq, "setting timeout of %d/10 secs", timeout); - mod_timer(&ucs->timer_cmd_in, jiffies + timeout * hz / 10); - } - return 0; -} - -/* int_in_work - * workqueue routine to clear halt on interrupt in endpoint - */ - -static void int_in_work(struct work_struct *work) -{ - struct bas_cardstate *ucs = - container_of(work, struct bas_cardstate, int_in_wq); - struct urb *urb = ucs->urb_int_in; - struct cardstate *cs = urb->context; - int rc; - - /* clear halt condition */ - rc = usb_clear_halt(ucs->udev, urb->pipe); - gig_dbg(debug_usbreq, "clear_halt: %s", get_usb_rcmsg(rc)); - if (rc == 0) - /* success, resubmit interrupt read urb */ - rc = usb_submit_urb(urb, gfp_atomic); - - switch (rc) { - case 0: /* success */ - case -enodev: /* device gone */ - case -einval: /* urb already resubmitted, or terminal badness */ - break; - default: /* failure: try to recover by resetting the device */ - dev_err(cs->dev, "clear halt failed: %s ", get_usb_rcmsg(rc)); - rc = usb_lock_device_for_reset(ucs->udev, ucs->interface); - if (rc == 0) { - rc = usb_reset_device(ucs->udev); - usb_unlock_device(ucs->udev); - } - } - ucs->retry_int_in = 0; -} - -/* int_in_resubmit - * timer routine for interrupt read delayed resubmit - * argument: - * controller state structure - */ -static void int_in_resubmit(struct timer_list *t) -{ - struct bas_cardstate *ucs = from_timer(ucs, t, timer_int_in); - struct cardstate *cs = ucs->cs; - int rc; - - if (ucs->retry_int_in++ >= bas_retry) { - dev_err(cs->dev, "interrupt read: giving up after %d tries ", - ucs->retry_int_in); - usb_queue_reset_device(ucs->interface); - return; - } - - gig_dbg(debug_usbreq, "%s: retry %d", __func__, ucs->retry_int_in); - rc = usb_submit_urb(ucs->urb_int_in, gfp_atomic); - if (rc != 0 && rc != -enodev) { - dev_err(cs->dev, "could not resubmit interrupt urb: %s ", - get_usb_rcmsg(rc)); - usb_queue_reset_device(ucs->interface); - } -} - -/* read_int_callback - * usb completion handler for interrupt pipe input - * called by the usb subsystem in interrupt context - * parameter: - * urb usb request block - * urb->context = controller state structure - */ -static void read_int_callback(struct urb *urb) -{ - struct cardstate *cs = urb->context; - struct bas_cardstate *ucs = cs->hw.bas; - struct bc_state *bcs; - int status = urb->status; - unsigned long flags; - int rc; - unsigned l; - int channel; - - switch (status) { - case 0: /* success */ - ucs->retry_int_in = 0; - break; - case -epipe: /* endpoint stalled */ - schedule_work(&ucs->int_in_wq); - /* fall through */ - case -enoent: /* cancelled */ - case -econnreset: /* cancelled (async) */ - case -einprogress: /* pending */ - case -enodev: /* device removed */ - case -eshutdown: /* device shut down */ - /* no further action necessary */ - gig_dbg(debug_usbreq, "%s: %s", - __func__, get_usb_statmsg(status)); - return; - case -eproto: /* protocol error or unplug */ - case -eilseq: - case -etime: - /* resubmit after delay */ - gig_dbg(debug_usbreq, "%s: %s", - __func__, get_usb_statmsg(status)); - mod_timer(&ucs->timer_int_in, jiffies + hz / 10); - return; - default: /* other errors: just resubmit */ - dev_warn(cs->dev, "interrupt read: %s ", - get_usb_statmsg(status)); - goto resubmit; - } - - /* drop incomplete packets even if the missing bytes wouldn't matter */ - if (unlikely(urb->actual_length < ip_msgsize)) { - dev_warn(cs->dev, "incomplete interrupt packet (%d bytes) ", - urb->actual_length); - goto resubmit; - } - - l = (unsigned) ucs->int_in_buf[1] + - (((unsigned) ucs->int_in_buf[2]) << 8); - - gig_dbg(debug_usbreq, "<-------%d: 0x%02x (%u [0x%02x 0x%02x])", - urb->actual_length, (int)ucs->int_in_buf[0], l, - (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]); - - channel = 0; - - switch (ucs->int_in_buf[0]) { - case hd_device_init_ok: - update_basstate(ucs, bs_init, 0); - break; - - case hd_ready_send_atdata: - del_timer(&ucs->timer_atrdy); - update_basstate(ucs, bs_atready, bs_attimer); - start_cbsend(cs); - break; - - case hd_open_b2channel_ack: - ++channel; - /* fall through */ - case hd_open_b1channel_ack: - bcs = cs->bcs + channel; - update_basstate(ucs, bs_b1open << channel, 0); - gigaset_bchannel_up(bcs); - break; - - case hd_open_atchannel_ack: - update_basstate(ucs, bs_atopen, 0); - start_cbsend(cs); - break; - - case hd_close_b2channel_ack: - ++channel; - /* fall through */ - case hd_close_b1channel_ack: - bcs = cs->bcs + channel; - update_basstate(ucs, 0, bs_b1open << channel); - stopurbs(bcs->hw.bas); - gigaset_bchannel_down(bcs); - break; - - case hd_close_atchannel_ack: - update_basstate(ucs, 0, bs_atopen); - break; - - case hd_b2_flow_control: - ++channel; - /* fall through */ - case hd_b1_flow_control: - bcs = cs->bcs + channel; - atomic_add((l - bas_normframe) * bas_corrframes, - &bcs->hw.bas->corrbytes); - gig_dbg(debug_iso, - "flow control (channel %d, sub %d): 0x%02x => %d", - channel, bcs->hw.bas->numsub, l, - atomic_read(&bcs->hw.bas->corrbytes)); - break; - - case hd_receiveatdata_ack: /* at response ready to be received */ - if (!l) { - dev_warn(cs->dev, - "hd_receiveatdata_ack with length 0 ignored "); - break; - } - spin_lock_irqsave(&cs->lock, flags); - if (ucs->basstate & bs_atrdpend) { - spin_unlock_irqrestore(&cs->lock, flags); - dev_warn(cs->dev, - "hd_receiveatdata_ack(%d) during hd_read_atmessage(%d) ignored ", - l, ucs->rcvbuf_size); - break; - } - if (ucs->rcvbuf_size) { - /* throw away previous buffer - we have no queue */ - dev_err(cs->dev, - "receive at data overrun, %d bytes lost ", - ucs->rcvbuf_size); - kfree(ucs->rcvbuf); - ucs->rcvbuf_size = 0; - } - ucs->rcvbuf = kmalloc(l, gfp_atomic); - if (ucs->rcvbuf == null) { - spin_unlock_irqrestore(&cs->lock, flags); - dev_err(cs->dev, "out of memory receiving at data "); - break; - } - ucs->rcvbuf_size = l; - ucs->retry_cmd_in = 0; - rc = atread_submit(cs, bas_timeout); - if (rc < 0) { - kfree(ucs->rcvbuf); - ucs->rcvbuf = null; - ucs->rcvbuf_size = 0; - } - spin_unlock_irqrestore(&cs->lock, flags); - if (rc < 0 && rc != -enodev) - error_reset(cs); - break; - - case hd_reset_interrupt_pipe_ack: - update_basstate(ucs, 0, bs_resetting); - dev_notice(cs->dev, "interrupt pipe reset "); - break; - - case hd_suspend_end: - gig_dbg(debug_usbreq, "hd_suspend_end"); - break; - - default: - dev_warn(cs->dev, - "unknown gigaset signal 0x%02x (%u) ignored ", - (int) ucs->int_in_buf[0], l); - } - - check_pending(ucs); - wake_up(&ucs->waitqueue); - -resubmit: - rc = usb_submit_urb(urb, gfp_atomic); - if (unlikely(rc != 0 && rc != -enodev)) { - dev_err(cs->dev, "could not resubmit interrupt urb: %s ", - get_usb_rcmsg(rc)); - error_reset(cs); - } -} - -/* read_iso_callback - * usb completion handler for b channel isochronous input - * called by the usb subsystem in interrupt context - * parameter: - * urb usb request block of completed request - * urb->context = bc_state structure - */ -static void read_iso_callback(struct urb *urb) -{ - struct bc_state *bcs; - struct bas_bc_state *ubc; - int status = urb->status; - unsigned long flags; - int i, rc; - - /* status codes not worth bothering the tasklet with */ - if (unlikely(status == -enoent || - status == -econnreset || - status == -einprogress || - status == -enodev || - status == -eshutdown)) { - gig_dbg(debug_iso, "%s: %s", - __func__, get_usb_statmsg(status)); - return; - } - - bcs = urb->context; - ubc = bcs->hw.bas; - - spin_lock_irqsave(&ubc->isoinlock, flags); - if (likely(ubc->isoindone == null)) { - /* pass urb to tasklet */ - ubc->isoindone = urb; - ubc->isoinstatus = status; - tasklet_hi_schedule(&ubc->rcvd_tasklet); - } else { - /* tasklet still busy, drop data and resubmit urb */ - gig_dbg(debug_iso, "%s: overrun", __func__); - ubc->loststatus = status; - for (i = 0; i < bas_numframes; i++) { - ubc->isoinlost += urb->iso_frame_desc[i].actual_length; - if (unlikely(urb->iso_frame_desc[i].status != 0 && - urb->iso_frame_desc[i].status != -einprogress)) - ubc->loststatus = urb->iso_frame_desc[i].status; - urb->iso_frame_desc[i].status = 0; - urb->iso_frame_desc[i].actual_length = 0; - } - if (likely(ubc->running)) { - /* urb->dev is clobbered by usb subsystem */ - urb->dev = bcs->cs->hw.bas->udev; - urb->transfer_flags = urb_iso_asap; - urb->number_of_packets = bas_numframes; - rc = usb_submit_urb(urb, gfp_atomic); - if (unlikely(rc != 0 && rc != -enodev)) { - dev_err(bcs->cs->dev, - "could not resubmit isoc read urb: %s ", - get_usb_rcmsg(rc)); - dump_urb(debug_iso, "isoc read", urb); - error_hangup(bcs); - } - } - } - spin_unlock_irqrestore(&ubc->isoinlock, flags); -} - -/* write_iso_callback - * usb completion handler for b channel isochronous output - * called by the usb subsystem in interrupt context - * parameter: - * urb usb request block of completed request - * urb->context = isow_urbctx_t structure - */ -static void write_iso_callback(struct urb *urb) -{ - struct isow_urbctx_t *ucx; - struct bas_bc_state *ubc; - int status = urb->status; - unsigned long flags; - - /* status codes not worth bothering the tasklet with */ - if (unlikely(status == -enoent || - status == -econnreset || - status == -einprogress || - status == -enodev || - status == -eshutdown)) { - gig_dbg(debug_iso, "%s: %s", - __func__, get_usb_statmsg(status)); - return; - } - - /* pass urb context to tasklet */ - ucx = urb->context; - ubc = ucx->bcs->hw.bas; - ucx->status = status; - - spin_lock_irqsave(&ubc->isooutlock, flags); - ubc->isooutovfl = ubc->isooutdone; - ubc->isooutdone = ucx; - spin_unlock_irqrestore(&ubc->isooutlock, flags); - tasklet_hi_schedule(&ubc->sent_tasklet); -} - -/* starturbs - * prepare and submit usb request blocks for isochronous input and output - * argument: - * b channel control structure - * return value: - * 0 on success - * < 0 on error (no urbs submitted) - */ -static int starturbs(struct bc_state *bcs) -{ - struct usb_device *udev = bcs->cs->hw.bas->udev; - struct bas_bc_state *ubc = bcs->hw.bas; - struct urb *urb; - int j, k; - int rc; - - /* initialize l2 reception */ - if (bcs->proto2 == l2_hdlc) - bcs->inputstate |= ins_flag_hunt; - - /* submit all isochronous input urbs */ - ubc->running = 1; - for (k = 0; k < bas_inurbs; k++) { - urb = ubc->isoinurbs[k]; - if (!urb) { - rc = -efault; - goto error; - } - usb_fill_int_urb(urb, udev, - usb_rcvisocpipe(udev, 3 + 2 * bcs->channel), - ubc->isoinbuf + k * bas_inbufsize, - bas_inbufsize, read_iso_callback, bcs, - bas_frametime); - - urb->transfer_flags = urb_iso_asap; - urb->number_of_packets = bas_numframes; - for (j = 0; j < bas_numframes; j++) { - urb->iso_frame_desc[j].offset = j * bas_maxframe; - urb->iso_frame_desc[j].length = bas_maxframe; - urb->iso_frame_desc[j].status = 0; - urb->iso_frame_desc[j].actual_length = 0; - } - - dump_urb(debug_iso, "initial isoc read", urb); - rc = usb_submit_urb(urb, gfp_atomic); - if (rc != 0) - goto error; - } - - /* initialize l2 transmission */ - gigaset_isowbuf_init(ubc->isooutbuf, ppp_flag); - - /* set up isochronous output urbs for flag idling */ - for (k = 0; k < bas_outurbs; ++k) { - urb = ubc->isoouturbs[k].urb; - if (!urb) { - rc = -efault; - goto error; - } - usb_fill_int_urb(urb, udev, - usb_sndisocpipe(udev, 4 + 2 * bcs->channel), - ubc->isooutbuf->data, - sizeof(ubc->isooutbuf->data), - write_iso_callback, &ubc->isoouturbs[k], - bas_frametime); - - urb->transfer_flags = urb_iso_asap; - urb->number_of_packets = bas_numframes; - for (j = 0; j < bas_numframes; ++j) { - urb->iso_frame_desc[j].offset = bas_outbufsize; - urb->iso_frame_desc[j].length = bas_normframe; - urb->iso_frame_desc[j].status = 0; - urb->iso_frame_desc[j].actual_length = 0; - } - ubc->isoouturbs[k].limit = -1; - } - - /* keep one urb free, submit the others */ - for (k = 0; k < bas_outurbs - 1; ++k) { - dump_urb(debug_iso, "initial isoc write", urb); - rc = usb_submit_urb(ubc->isoouturbs[k].urb, gfp_atomic); - if (rc != 0) - goto error; - } - dump_urb(debug_iso, "initial isoc write (free)", urb); - ubc->isooutfree = &ubc->isoouturbs[bas_outurbs - 1]; - ubc->isooutdone = ubc->isooutovfl = null; - return 0; -error: - stopurbs(ubc); - return rc; -} - -/* stopurbs - * cancel the usb request blocks for isochronous input and output - * errors are silently ignored - * argument: - * b channel control structure - */ -static void stopurbs(struct bas_bc_state *ubc) -{ - int k, rc; - - ubc->running = 0; - - for (k = 0; k < bas_inurbs; ++k) { - rc = usb_unlink_urb(ubc->isoinurbs[k]); - gig_dbg(debug_iso, - "%s: isoc input urb %d unlinked, result = %s", - __func__, k, get_usb_rcmsg(rc)); - } - - for (k = 0; k < bas_outurbs; ++k) { - rc = usb_unlink_urb(ubc->isoouturbs[k].urb); - gig_dbg(debug_iso, - "%s: isoc output urb %d unlinked, result = %s", - __func__, k, get_usb_rcmsg(rc)); - } -} - -/* isochronous write - bottom half */ -/* =============================== */ - -/* submit_iso_write_urb - * fill and submit the next isochronous write urb - * parameters: - * ucx context structure containing urb - * return value: - * number of frames submitted in urb - * 0 if urb not submitted because no data available (isooutbuf busy) - * error code < 0 on error - */ -static int submit_iso_write_urb(struct isow_urbctx_t *ucx) -{ - struct urb *urb = ucx->urb; - struct bas_bc_state *ubc = ucx->bcs->hw.bas; - struct usb_iso_packet_descriptor *ifd; - int corrbytes, nframe, rc; - - /* urb->dev is clobbered by usb subsystem */ - urb->dev = ucx->bcs->cs->hw.bas->udev; - urb->transfer_flags = urb_iso_asap; - urb->transfer_buffer = ubc->isooutbuf->data; - urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data); - - for (nframe = 0; nframe < bas_numframes; nframe++) { - ifd = &urb->iso_frame_desc[nframe]; - - /* compute frame length according to flow control */ - ifd->length = bas_normframe; - corrbytes = atomic_read(&ubc->corrbytes); - if (corrbytes != 0) { - gig_dbg(debug_iso, "%s: corrbytes=%d", - __func__, corrbytes); - if (corrbytes > bas_highframe - bas_normframe) - corrbytes = bas_highframe - bas_normframe; - else if (corrbytes < bas_lowframe - bas_normframe) - corrbytes = bas_lowframe - bas_normframe; - ifd->length += corrbytes; - atomic_add(-corrbytes, &ubc->corrbytes); - } - - /* retrieve block of data to send */ - rc = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length); - if (rc < 0) { - if (rc == -ebusy) { - gig_dbg(debug_iso, - "%s: buffer busy at frame %d", - __func__, nframe); - /* tasklet will be restarted from - gigaset_isoc_send_skb() */ - } else { - dev_err(ucx->bcs->cs->dev, - "%s: buffer error %d at frame %d ", - __func__, rc, nframe); - return rc; - } - break; - } - ifd->offset = rc; - ucx->limit = ubc->isooutbuf->nextread; - ifd->status = 0; - ifd->actual_length = 0; - } - if (unlikely(nframe == 0)) - return 0; /* no data to send */ - urb->number_of_packets = nframe; - - rc = usb_submit_urb(urb, gfp_atomic); - if (unlikely(rc)) { - if (rc == -enodev) - /* device removed - give up silently */ - gig_dbg(debug_iso, "%s: disconnected", __func__); - else - dev_err(ucx->bcs->cs->dev, - "could not submit isoc write urb: %s ", - get_usb_rcmsg(rc)); - return rc; - } - ++ubc->numsub; - return nframe; -} - -/* write_iso_tasklet - * tasklet scheduled when an isochronous output urb from the gigaset device - * has completed - * parameter: - * data b channel state structure - */ -static void write_iso_tasklet(unsigned long data) -{ - struct bc_state *bcs = (struct bc_state *) data; - struct bas_bc_state *ubc = bcs->hw.bas; - struct cardstate *cs = bcs->cs; - struct isow_urbctx_t *done, *next, *ovfl; - struct urb *urb; - int status; - struct usb_iso_packet_descriptor *ifd; - unsigned long flags; - int i; - struct sk_buff *skb; - int len; - int rc; - - /* loop while completed urbs arrive in time */ - for (;;) { - if (unlikely(!(ubc->running))) { - gig_dbg(debug_iso, "%s: not running", __func__); - return; - } - - /* retrieve completed urbs */ - spin_lock_irqsave(&ubc->isooutlock, flags); - done = ubc->isooutdone; - ubc->isooutdone = null; - ovfl = ubc->isooutovfl; - ubc->isooutovfl = null; - spin_unlock_irqrestore(&ubc->isooutlock, flags); - if (ovfl) { - dev_err(cs->dev, "isoc write underrun "); - error_hangup(bcs); - break; - } - if (!done) - break; - - /* submit free urb if available */ - spin_lock_irqsave(&ubc->isooutlock, flags); - next = ubc->isooutfree; - ubc->isooutfree = null; - spin_unlock_irqrestore(&ubc->isooutlock, flags); - if (next) { - rc = submit_iso_write_urb(next); - if (unlikely(rc <= 0 && rc != -enodev)) { - /* could not submit urb, put it back */ - spin_lock_irqsave(&ubc->isooutlock, flags); - if (ubc->isooutfree == null) { - ubc->isooutfree = next; - next = null; - } - spin_unlock_irqrestore(&ubc->isooutlock, flags); - if (next) { - /* couldn't put it back */ - dev_err(cs->dev, - "losing isoc write urb "); - error_hangup(bcs); - } - } - } - - /* process completed urb */ - urb = done->urb; - status = done->status; - switch (status) { - case -exdev: /* partial completion */ - gig_dbg(debug_iso, "%s: urb partially completed", - __func__); - /* fall through - what's the difference anyway? */ - case 0: /* normal completion */ - /* inspect individual frames - * assumptions (for lack of documentation): - * - actual_length bytes of first frame in error are - * successfully sent - * - all following frames are not sent at all - */ - for (i = 0; i < bas_numframes; i++) { - ifd = &urb->iso_frame_desc[i]; - if (ifd->status || - ifd->actual_length != ifd->length) { - dev_warn(cs->dev, - "isoc write: frame %d[%d/%d]: %s ", - i, ifd->actual_length, - ifd->length, - get_usb_statmsg(ifd->status)); - break; - } - } - break; - case -epipe: /* stall - probably underrun */ - dev_err(cs->dev, "isoc write: stalled "); - error_hangup(bcs); - break; - default: /* other errors */ - dev_warn(cs->dev, "isoc write: %s ", - get_usb_statmsg(status)); - } - - /* mark the write buffer area covered by this urb as free */ - if (done->limit >= 0) - ubc->isooutbuf->read = done->limit; - - /* mark urb as free */ - spin_lock_irqsave(&ubc->isooutlock, flags); - next = ubc->isooutfree; - ubc->isooutfree = done; - spin_unlock_irqrestore(&ubc->isooutlock, flags); - if (next) { - /* only one urb still active - resubmit one */ - rc = submit_iso_write_urb(next); - if (unlikely(rc <= 0 && rc != -enodev)) { - /* couldn't submit */ - error_hangup(bcs); - } - } - } - - /* process queued skbs */ - while ((skb = skb_dequeue(&bcs->squeue))) { - /* copy to output buffer, doing l2 encapsulation */ - len = skb->len; - if (gigaset_isoc_buildframe(bcs, skb->data, len) == -eagain) { - /* insufficient buffer space, push back onto queue */ - skb_queue_head(&bcs->squeue, skb); - gig_dbg(debug_iso, "%s: skb requeued, qlen=%d", - __func__, skb_queue_len(&bcs->squeue)); - break; - } - skb_pull(skb, len); - gigaset_skb_sent(bcs, skb); - dev_kfree_skb_any(skb); - } -} - -/* isochronous read - bottom half */ -/* ============================== */ - -/* read_iso_tasklet - * tasklet scheduled when an isochronous input urb from the gigaset device - * has completed - * parameter: - * data b channel state structure - */ -static void read_iso_tasklet(unsigned long data) -{ - struct bc_state *bcs = (struct bc_state *) data; - struct bas_bc_state *ubc = bcs->hw.bas; - struct cardstate *cs = bcs->cs; - struct urb *urb; - int status; - struct usb_iso_packet_descriptor *ifd; - char *rcvbuf; - unsigned long flags; - int totleft, numbytes, offset, frame, rc; - - /* loop while more completed urbs arrive in the meantime */ - for (;;) { - /* retrieve urb */ - spin_lock_irqsave(&ubc->isoinlock, flags); - urb = ubc->isoindone; - if (!urb) { - spin_unlock_irqrestore(&ubc->isoinlock, flags); - return; - } - status = ubc->isoinstatus; - ubc->isoindone = null; - if (unlikely(ubc->loststatus != -einprogress)) { - dev_warn(cs->dev, - "isoc read overrun, urb dropped (status: %s, %d bytes) ", - get_usb_statmsg(ubc->loststatus), - ubc->isoinlost); - ubc->loststatus = -einprogress; - } - spin_unlock_irqrestore(&ubc->isoinlock, flags); - - if (unlikely(!(ubc->running))) { - gig_dbg(debug_iso, - "%s: channel not running, " - "dropped urb with status: %s", - __func__, get_usb_statmsg(status)); - return; - } - - switch (status) { - case 0: /* normal completion */ - break; - case -exdev: /* inspect individual frames - (we do that anyway) */ - gig_dbg(debug_iso, "%s: urb partially completed", - __func__); - break; - case -enoent: - case -econnreset: - case -einprogress: - gig_dbg(debug_iso, "%s: %s", - __func__, get_usb_statmsg(status)); - continue; /* -> skip */ - case -epipe: - dev_err(cs->dev, "isoc read: stalled "); - error_hangup(bcs); - continue; /* -> skip */ - default: /* other error */ - dev_warn(cs->dev, "isoc read: %s ", - get_usb_statmsg(status)); - goto error; - } - - rcvbuf = urb->transfer_buffer; - totleft = urb->actual_length; - for (frame = 0; totleft > 0 && frame < bas_numframes; frame++) { - ifd = &urb->iso_frame_desc[frame]; - numbytes = ifd->actual_length; - switch (ifd->status) { - case 0: /* success */ - break; - case -eproto: /* protocol error or unplug */ - case -eilseq: - case -etime: - /* probably just disconnected, ignore */ - gig_dbg(debug_iso, - "isoc read: frame %d[%d]: %s ", - frame, numbytes, - get_usb_statmsg(ifd->status)); - break; - default: /* other error */ - /* report, assume transferred bytes are ok */ - dev_warn(cs->dev, - "isoc read: frame %d[%d]: %s ", - frame, numbytes, - get_usb_statmsg(ifd->status)); - } - if (unlikely(numbytes > bas_maxframe)) - dev_warn(cs->dev, - "isoc read: frame %d[%d]: %s ", - frame, numbytes, - "exceeds max frame size"); - if (unlikely(numbytes > totleft)) { - dev_warn(cs->dev, - "isoc read: frame %d[%d]: %s ", - frame, numbytes, - "exceeds total transfer length"); - numbytes = totleft; - } - offset = ifd->offset; - if (unlikely(offset + numbytes > bas_inbufsize)) { - dev_warn(cs->dev, - "isoc read: frame %d[%d]: %s ", - frame, numbytes, - "exceeds end of buffer"); - numbytes = bas_inbufsize - offset; - } - gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); - totleft -= numbytes; - } - if (unlikely(totleft > 0)) - dev_warn(cs->dev, "isoc read: %d data bytes missing ", - totleft); - -error: - /* urb processed, resubmit */ - for (frame = 0; frame < bas_numframes; frame++) { - urb->iso_frame_desc[frame].status = 0; - urb->iso_frame_desc[frame].actual_length = 0; - } - /* urb->dev is clobbered by usb subsystem */ - urb->dev = bcs->cs->hw.bas->udev; - urb->transfer_flags = urb_iso_asap; - urb->number_of_packets = bas_numframes; - rc = usb_submit_urb(urb, gfp_atomic); - if (unlikely(rc != 0 && rc != -enodev)) { - dev_err(cs->dev, - "could not resubmit isoc read urb: %s ", - get_usb_rcmsg(rc)); - dump_urb(debug_iso, "resubmit isoc read", urb); - error_hangup(bcs); - } - } -} - -/* channel operations */ -/* ================== */ - -/* req_timeout - * timeout routine for control output request - * argument: - * controller state structure - */ -static void req_timeout(struct timer_list *t) -{ - struct bas_cardstate *ucs = from_timer(ucs, t, timer_ctrl); - struct cardstate *cs = ucs->cs; - int pending; - unsigned long flags; - - check_pending(ucs); - - spin_lock_irqsave(&ucs->lock, flags); - pending = ucs->pending; - ucs->pending = 0; - spin_unlock_irqrestore(&ucs->lock, flags); - - switch (pending) { - case 0: /* no pending request */ - gig_dbg(debug_usbreq, "%s: no request pending", __func__); - break; - - case hd_open_atchannel: - dev_err(cs->dev, "timeout opening at channel "); - error_reset(cs); - break; - - case hd_open_b1channel: - dev_err(cs->dev, "timeout opening channel 1 "); - error_hangup(&cs->bcs[0]); - break; - - case hd_open_b2channel: - dev_err(cs->dev, "timeout opening channel 2 "); - error_hangup(&cs->bcs[1]); - break; - - case hd_close_atchannel: - dev_err(cs->dev, "timeout closing at channel "); - error_reset(cs); - break; - - case hd_close_b1channel: - dev_err(cs->dev, "timeout closing channel 1 "); - error_reset(cs); - break; - - case hd_close_b2channel: - dev_err(cs->dev, "timeout closing channel 2 "); - error_reset(cs); - break; - - case hd_reset_interrupt_pipe: - /* error recovery escalation */ - dev_err(cs->dev, - "reset interrupt pipe timeout, attempting usb reset "); - usb_queue_reset_device(ucs->interface); - break; - - default: - dev_warn(cs->dev, "request 0x%02x timed out, clearing ", - pending); - } - - wake_up(&ucs->waitqueue); -} - -/* write_ctrl_callback - * usb completion handler for control pipe output - * called by the usb subsystem in interrupt context - * parameter: - * urb usb request block of completed request - * urb->context = hardware specific controller state structure - */ -static void write_ctrl_callback(struct urb *urb) -{ - struct bas_cardstate *ucs = urb->context; - int status = urb->status; - int rc; - unsigned long flags; - - /* check status */ - switch (status) { - case 0: /* normal completion */ - spin_lock_irqsave(&ucs->lock, flags); - switch (ucs->pending) { - case hd_device_init_ack: /* no reply expected */ - del_timer(&ucs->timer_ctrl); - ucs->pending = 0; - break; - } - spin_unlock_irqrestore(&ucs->lock, flags); - return; - - case -enoent: /* cancelled */ - case -econnreset: /* cancelled (async) */ - case -einprogress: /* pending */ - case -enodev: /* device removed */ - case -eshutdown: /* device shut down */ - /* ignore silently */ - gig_dbg(debug_usbreq, "%s: %s", - __func__, get_usb_statmsg(status)); - break; - - default: /* any failure */ - /* don't retry if suspend requested */ - if (++ucs->retry_ctrl > bas_retry || - (ucs->basstate & bs_suspend)) { - dev_err(&ucs->interface->dev, - "control request 0x%02x failed: %s ", - ucs->dr_ctrl.brequest, - get_usb_statmsg(status)); - break; /* give up */ - } - dev_notice(&ucs->interface->dev, - "control request 0x%02x: %s, retry %d ", - ucs->dr_ctrl.brequest, get_usb_statmsg(status), - ucs->retry_ctrl); - /* urb->dev is clobbered by usb subsystem */ - urb->dev = ucs->udev; - rc = usb_submit_urb(urb, gfp_atomic); - if (unlikely(rc)) { - dev_err(&ucs->interface->dev, - "could not resubmit request 0x%02x: %s ", - ucs->dr_ctrl.brequest, get_usb_rcmsg(rc)); - break; - } - /* resubmitted */ - return; - } - - /* failed, clear pending request */ - spin_lock_irqsave(&ucs->lock, flags); - del_timer(&ucs->timer_ctrl); - ucs->pending = 0; - spin_unlock_irqrestore(&ucs->lock, flags); - wake_up(&ucs->waitqueue); -} - -/* req_submit - * submit a control output request without message buffer to the gigaset base - * and optionally start a timeout - * parameters: - * bcs b channel control structure - * req control request code (hd_*) - * val control request parameter value (set to 0 if unused) - * timeout timeout in seconds (0: no timeout) - * return value: - * 0 on success - * -ebusy if another request is pending - * any urb submission error code - */ -static int req_submit(struct bc_state *bcs, int req, int val, int timeout) -{ - struct bas_cardstate *ucs = bcs->cs->hw.bas; - int ret; - unsigned long flags; - - gig_dbg(debug_usbreq, "-------> 0x%02x (%d)", req, val); - - spin_lock_irqsave(&ucs->lock, flags); - if (ucs->pending) { - spin_unlock_irqrestore(&ucs->lock, flags); - dev_err(bcs->cs->dev, - "submission of request 0x%02x failed: " - "request 0x%02x still pending ", - req, ucs->pending); - return -ebusy; - } - - ucs->dr_ctrl.brequesttype = out_vendor_req; - ucs->dr_ctrl.brequest = req; - ucs->dr_ctrl.wvalue = cpu_to_le16(val); - ucs->dr_ctrl.windex = 0; - ucs->dr_ctrl.wlength = 0; - usb_fill_control_urb(ucs->urb_ctrl, ucs->udev, - usb_sndctrlpipe(ucs->udev, 0), - (unsigned char *) &ucs->dr_ctrl, null, 0, - write_ctrl_callback, ucs); - ucs->retry_ctrl = 0; - ret = usb_submit_urb(ucs->urb_ctrl, gfp_atomic); - if (unlikely(ret)) { - dev_err(bcs->cs->dev, "could not submit request 0x%02x: %s ", - req, get_usb_rcmsg(ret)); - spin_unlock_irqrestore(&ucs->lock, flags); - return ret; - } - ucs->pending = req; - - if (timeout > 0) { - gig_dbg(debug_usbreq, "setting timeout of %d/10 secs", timeout); - mod_timer(&ucs->timer_ctrl, jiffies + timeout * hz / 10); - } - - spin_unlock_irqrestore(&ucs->lock, flags); - return 0; -} - -/* gigaset_init_bchannel - * called by common.c to connect a b channel - * initialize isochronous i/o and tell the gigaset base to open the channel - * argument: - * b channel control structure - * return value: - * 0 on success, error code < 0 on error - */ -static int gigaset_init_bchannel(struct bc_state *bcs) -{ - struct cardstate *cs = bcs->cs; - int req, ret; - unsigned long flags; - - spin_lock_irqsave(&cs->lock, flags); - if (unlikely(!cs->connected)) { - gig_dbg(debug_usbreq, "%s: not connected", __func__); - spin_unlock_irqrestore(&cs->lock, flags); - return -enodev; - } - - if (cs->hw.bas->basstate & bs_suspend) { - dev_notice(cs->dev, - "not starting isoc i/o, suspend in progress "); - spin_unlock_irqrestore(&cs->lock, flags); - return -ehostunreach; - } - - ret = starturbs(bcs); - if (ret < 0) { - spin_unlock_irqrestore(&cs->lock, flags); - dev_err(cs->dev, - "could not start isoc i/o for channel b%d: %s ", - bcs->channel + 1, - ret == -efault ? "null urb" : get_usb_rcmsg(ret)); - if (ret != -enodev) - error_hangup(bcs); - return ret; - } - - req = bcs->channel ? hd_open_b2channel : hd_open_b1channel; - ret = req_submit(bcs, req, 0, bas_timeout); - if (ret < 0) { - dev_err(cs->dev, "could not open channel b%d ", - bcs->channel + 1); - stopurbs(bcs->hw.bas); - } - - spin_unlock_irqrestore(&cs->lock, flags); - if (ret < 0 && ret != -enodev) - error_hangup(bcs); - return ret; -} - -/* gigaset_close_bchannel - * called by common.c to disconnect a b channel - * tell the gigaset base to close the channel - * stopping isochronous i/o and ll notification will be done when the - * acknowledgement for the close arrives - * argument: - * b channel control structure - * return value: - * 0 on success, error code < 0 on error - */ -static int gigaset_close_bchannel(struct bc_state *bcs) -{ - struct cardstate *cs = bcs->cs; - int req, ret; - unsigned long flags; - - spin_lock_irqsave(&cs->lock, flags); - if (unlikely(!cs->connected)) { - spin_unlock_irqrestore(&cs->lock, flags); - gig_dbg(debug_usbreq, "%s: not connected", __func__); - return -enodev; - } - - if (!(cs->hw.bas->basstate & (bcs->channel ? bs_b2open : bs_b1open))) { - /* channel not running: just signal common.c */ - spin_unlock_irqrestore(&cs->lock, flags); - gigaset_bchannel_down(bcs); - return 0; - } - - /* channel running: tell device to close it */ - req = bcs->channel ? hd_close_b2channel : hd_close_b1channel; - ret = req_submit(bcs, req, 0, bas_timeout); - if (ret < 0) - dev_err(cs->dev, "closing channel b%d failed ", - bcs->channel + 1); - - spin_unlock_irqrestore(&cs->lock, flags); - return ret; -} - -/* device operations */ -/* ================= */ - -/* complete_cb - * unqueue first command buffer from queue, waking any sleepers - * must be called with cs->cmdlock held - * parameter: - * cs controller state structure - */ -static void complete_cb(struct cardstate *cs) -{ - struct cmdbuf_t *cb = cs->cmdbuf; - - /* unqueue completed buffer */ - cs->cmdbytes -= cs->curlen; - gig_dbg(debug_output, "write_command: sent %u bytes, %u left", - cs->curlen, cs->cmdbytes); - if (cb->next != null) { - cs->cmdbuf = cb->next; - cs->cmdbuf->prev = null; - cs->curlen = cs->cmdbuf->len; - } else { - cs->cmdbuf = null; - cs->lastcmdbuf = null; - cs->curlen = 0; - } - - if (cb->wake_tasklet) - tasklet_schedule(cb->wake_tasklet); - - kfree(cb); -} - -/* write_command_callback - * usb completion handler for at command transmission - * called by the usb subsystem in interrupt context - * parameter: - * urb usb request block of completed request - * urb->context = controller state structure - */ -static void write_command_callback(struct urb *urb) -{ - struct cardstate *cs = urb->context; - struct bas_cardstate *ucs = cs->hw.bas; - int status = urb->status; - unsigned long flags; - - update_basstate(ucs, 0, bs_atwrpend); - wake_up(&ucs->waitqueue); - - /* check status */ - switch (status) { - case 0: /* normal completion */ - break; - case -enoent: /* cancelled */ - case -econnreset: /* cancelled (async) */ - case -einprogress: /* pending */ - case -enodev: /* device removed */ - case -eshutdown: /* device shut down */ - /* ignore silently */ - gig_dbg(debug_usbreq, "%s: %s", - __func__, get_usb_statmsg(status)); - return; - default: /* any failure */ - if (++ucs->retry_cmd_out > bas_retry) { - dev_warn(cs->dev, - "command write: %s, " - "giving up after %d retries ", - get_usb_statmsg(status), - ucs->retry_cmd_out); - break; - } - if (ucs->basstate & bs_suspend) { - dev_warn(cs->dev, - "command write: %s, " - "won't retry - suspend requested ", - get_usb_statmsg(status)); - break; - } - if (cs->cmdbuf == null) { - dev_warn(cs->dev, - "command write: %s, " - "cannot retry - cmdbuf gone ", - get_usb_statmsg(status)); - break; - } - dev_notice(cs->dev, "command write: %s, retry %d ", - get_usb_statmsg(status), ucs->retry_cmd_out); - if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0) - /* resubmitted - bypass regular exit block */ - return; - /* command send failed, assume base still waiting */ - update_basstate(ucs, bs_atready, 0); - } - - spin_lock_irqsave(&cs->cmdlock, flags); - if (cs->cmdbuf != null) - complete_cb(cs); - spin_unlock_irqrestore(&cs->cmdlock, flags); -} - -/* atrdy_timeout - * timeout routine for at command transmission - * argument: - * controller state structure - */ -static void atrdy_timeout(struct timer_list *t) -{ - struct bas_cardstate *ucs = from_timer(ucs, t, timer_atrdy); - struct cardstate *cs = ucs->cs; - - dev_warn(cs->dev, "timeout waiting for hd_ready_send_atdata "); - - /* fake the missing signal - what else can i do? */ - update_basstate(ucs, bs_atready, bs_attimer); - start_cbsend(cs); -} - -/* atwrite_submit - * submit an hd_write_atmessage command urb - * parameters: - * cs controller state structure - * buf buffer containing command to send - * len length of command to send - * return value: - * 0 on success - * -ebusy if another request is pending - * any urb submission error code - */ -static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len) -{ - struct bas_cardstate *ucs = cs->hw.bas; - int rc; - - gig_dbg(debug_usbreq, "-------> hd_write_atmessage (%d)", len); - - if (update_basstate(ucs, bs_atwrpend, 0) & bs_atwrpend) { - dev_err(cs->dev, - "could not submit hd_write_atmessage: urb busy "); - return -ebusy; - } - - ucs->dr_cmd_out.brequesttype = out_vendor_req; - ucs->dr_cmd_out.brequest = hd_write_atmessage; - ucs->dr_cmd_out.wvalue = 0; - ucs->dr_cmd_out.windex = 0; - ucs->dr_cmd_out.wlength = cpu_to_le16(len); - usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev, - usb_sndctrlpipe(ucs->udev, 0), - (unsigned char *) &ucs->dr_cmd_out, buf, len, - write_command_callback, cs); - rc = usb_submit_urb(ucs->urb_cmd_out, gfp_atomic); - if (unlikely(rc)) { - update_basstate(ucs, 0, bs_atwrpend); - dev_err(cs->dev, "could not submit hd_write_atmessage: %s ", - get_usb_rcmsg(rc)); - return rc; - } - - /* submitted successfully, start timeout if necessary */ - if (!(update_basstate(ucs, bs_attimer, bs_atready) & bs_attimer)) { - gig_dbg(debug_output, "setting atready timeout of %d/10 secs", - atrdy_timeout); - mod_timer(&ucs->timer_atrdy, jiffies + atrdy_timeout * hz / 10); - } - return 0; -} - -/* start_cbsend - * start transmission of at command queue if necessary - * parameter: - * cs controller state structure - * return value: - * 0 on success - * error code < 0 on error - */ -static int start_cbsend(struct cardstate *cs) -{ - struct cmdbuf_t *cb; - struct bas_cardstate *ucs = cs->hw.bas; - unsigned long flags; - int rc; - int retval = 0; - - /* check if suspend requested */ - if (ucs->basstate & bs_suspend) { - gig_dbg(debug_output, "suspending"); - return -ehostunreach; - } - - /* check if at channel is open */ - if (!(ucs->basstate & bs_atopen)) { - gig_dbg(debug_output, "at channel not open"); - rc = req_submit(cs->bcs, hd_open_atchannel, 0, bas_timeout); - if (rc < 0) { - /* flush command queue */ - spin_lock_irqsave(&cs->cmdlock, flags); - while (cs->cmdbuf != null) - complete_cb(cs); - spin_unlock_irqrestore(&cs->cmdlock, flags); - } - return rc; - } - - /* try to send first command in queue */ - spin_lock_irqsave(&cs->cmdlock, flags); - - while ((cb = cs->cmdbuf) != null && (ucs->basstate & bs_atready)) { - ucs->retry_cmd_out = 0; - rc = atwrite_submit(cs, cb->buf, cb->len); - if (unlikely(rc)) { - retval = rc; - complete_cb(cs); - } - } - - spin_unlock_irqrestore(&cs->cmdlock, flags); - return retval; -} - -/* gigaset_write_cmd - * this function is called by the device independent part of the driver - * to transmit an at command string to the gigaset device. - * it encapsulates the device specific method for transmission over the - * direct usb connection to the base. - * the command string is added to the queue of commands to send, and - * usb transmission is started if necessary. - * parameters: - * cs controller state structure - * cb command buffer structure - * return value: - * number of bytes queued on success - * error code < 0 on error - */ -static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) -{ - unsigned long flags; - int rc; - - gigaset_dbg_buffer(cs->mstate != ms_locked ? - debug_transcmd : debug_lockcmd, - "cmd transmit", cb->len, cb->buf); - - /* translate "+++" escape sequence sent as a single separate command - * into "close at channel" command for error recovery - * the next command will reopen the at channel automatically. - */ - if (cb->len == 3 && !memcmp(cb->buf, "+++", 3)) { - /* if an hd_receiveatdata_ack message remains unhandled - * because of an error, the base never sends another one. - * the response channel is thus effectively blocked. - * closing and reopening the at channel does *not* clear - * this condition. - * as a stopgap measure, submit a zero-length at read - * before closing the at channel. this has the undocumented - * effect of triggering a new hd_receiveatdata_ack message - * from the base if necessary. - * the subsequent at channel close then discards any pending - * messages. - */ - spin_lock_irqsave(&cs->lock, flags); - if (!(cs->hw.bas->basstate & bs_atrdpend)) { - kfree(cs->hw.bas->rcvbuf); - cs->hw.bas->rcvbuf = null; - cs->hw.bas->rcvbuf_size = 0; - cs->hw.bas->retry_cmd_in = 0; - atread_submit(cs, 0); - } - spin_unlock_irqrestore(&cs->lock, flags); - - rc = req_submit(cs->bcs, hd_close_atchannel, 0, bas_timeout); - if (cb->wake_tasklet) - tasklet_schedule(cb->wake_tasklet); - if (!rc) - rc = cb->len; - kfree(cb); - return rc; - } - - spin_lock_irqsave(&cs->cmdlock, flags); - cb->prev = cs->lastcmdbuf; - if (cs->lastcmdbuf) - cs->lastcmdbuf->next = cb; - else { - cs->cmdbuf = cb; - cs->curlen = cb->len; - } - cs->cmdbytes += cb->len; - cs->lastcmdbuf = cb; - spin_unlock_irqrestore(&cs->cmdlock, flags); - - spin_lock_irqsave(&cs->lock, flags); - if (unlikely(!cs->connected)) { - spin_unlock_irqrestore(&cs->lock, flags); - gig_dbg(debug_usbreq, "%s: not connected", __func__); - /* flush command queue */ - spin_lock_irqsave(&cs->cmdlock, flags); - while (cs->cmdbuf != null) - complete_cb(cs); - spin_unlock_irqrestore(&cs->cmdlock, flags); - return -enodev; - } - rc = start_cbsend(cs); - spin_unlock_irqrestore(&cs->lock, flags); - return rc < 0 ? rc : cb->len; -} - -/* gigaset_write_room - * tty_driver.write_room interface routine - * return number of characters the driver will accept to be written via - * gigaset_write_cmd - * parameter: - * controller state structure - * return value: - * number of characters - */ -static int gigaset_write_room(struct cardstate *cs) -{ - return if_writebuf; -} - -/* gigaset_chars_in_buffer - * tty_driver.chars_in_buffer interface routine - * return number of characters waiting to be sent - * parameter: - * controller state structure - * return value: - * number of characters - */ -static int gigaset_chars_in_buffer(struct cardstate *cs) -{ - return cs->cmdbytes; -} - -/* gigaset_brkchars - * implementation of ioctl(gigaset_brkchars) - * parameter: - * controller state structure - * return value: - * -einval (unimplemented function) - */ -static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) -{ - return -einval; -} - - -/* device initialization/shutdown */ -/* ============================== */ - -/* free hardware dependent part of the b channel structure - * parameter: - * bcs b channel structure - */ -static void gigaset_freebcshw(struct bc_state *bcs) -{ - struct bas_bc_state *ubc = bcs->hw.bas; - int i; - - if (!ubc) - return; - - /* kill urbs and tasklets before freeing - better safe than sorry */ - ubc->running = 0; - gig_dbg(debug_init, "%s: killing isoc urbs", __func__); - for (i = 0; i < bas_outurbs; ++i) { - usb_kill_urb(ubc->isoouturbs[i].urb); - usb_free_urb(ubc->isoouturbs[i].urb); - } - for (i = 0; i < bas_inurbs; ++i) { - usb_kill_urb(ubc->isoinurbs[i]); - usb_free_urb(ubc->isoinurbs[i]); - } - tasklet_kill(&ubc->sent_tasklet); - tasklet_kill(&ubc->rcvd_tasklet); - kfree(ubc->isooutbuf); - kfree(ubc); - bcs->hw.bas = null; -} - -/* initialize hardware dependent part of the b channel structure - * parameter: - * bcs b channel structure - * return value: - * 0 on success, error code < 0 on failure - */ -static int gigaset_initbcshw(struct bc_state *bcs) -{ - int i; - struct bas_bc_state *ubc; - - bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), gfp_kernel); - if (!ubc) { - pr_err("out of memory "); - return -enomem; - } - - ubc->running = 0; - atomic_set(&ubc->corrbytes, 0); - spin_lock_init(&ubc->isooutlock); - for (i = 0; i < bas_outurbs; ++i) { - ubc->isoouturbs[i].urb = null; - ubc->isoouturbs[i].bcs = bcs; - } - ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = null; - ubc->numsub = 0; - ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), gfp_kernel); - if (!ubc->isooutbuf) { - pr_err("out of memory "); - kfree(ubc); - bcs->hw.bas = null; - return -enomem; - } - tasklet_init(&ubc->sent_tasklet, - write_iso_tasklet, (unsigned long) bcs); - - spin_lock_init(&ubc->isoinlock); - for (i = 0; i < bas_inurbs; ++i) - ubc->isoinurbs[i] = null; - ubc->isoindone = null; - ubc->loststatus = -einprogress; - ubc->isoinlost = 0; - ubc->seqlen = 0; - ubc->inbyte = 0; - ubc->inbits = 0; - ubc->goodbytes = 0; - ubc->alignerrs = 0; - ubc->fcserrs = 0; - ubc->frameerrs = 0; - ubc->giants = 0; - ubc->runts = 0; - ubc->aborts = 0; - ubc->shared0s = 0; - ubc->stolen0s = 0; - tasklet_init(&ubc->rcvd_tasklet, - read_iso_tasklet, (unsigned long) bcs); - return 0; -} - -static void gigaset_reinitbcshw(struct bc_state *bcs) -{ - struct bas_bc_state *ubc = bcs->hw.bas; - - bcs->hw.bas->running = 0; - atomic_set(&bcs->hw.bas->corrbytes, 0); - bcs->hw.bas->numsub = 0; - spin_lock_init(&ubc->isooutlock); - spin_lock_init(&ubc->isoinlock); - ubc->loststatus = -einprogress; -} - -static void gigaset_freecshw(struct cardstate *cs) -{ - /* timers, urbs and rcvbuf are disposed of in disconnect */ - kfree(cs->hw.bas->int_in_buf); - kfree(cs->hw.bas); - cs->hw.bas = null; -} - -/* initialize hardware dependent part of the cardstate structure - * parameter: - * cs cardstate structure - * return value: - * 0 on success, error code < 0 on failure - */ -static int gigaset_initcshw(struct cardstate *cs) -{ - struct bas_cardstate *ucs; - - cs->hw.bas = ucs = kzalloc(sizeof(*ucs), gfp_kernel); - if (!ucs) { - pr_err("out of memory "); - return -enomem; - } - ucs->int_in_buf = kmalloc(ip_msgsize, gfp_kernel); - if (!ucs->int_in_buf) { - kfree(ucs); - pr_err("out of memory "); - return -enomem; - } - - spin_lock_init(&ucs->lock); - ucs->cs = cs; - timer_setup(&ucs->timer_ctrl, req_timeout, 0); - timer_setup(&ucs->timer_atrdy, atrdy_timeout, 0); - timer_setup(&ucs->timer_cmd_in, cmd_in_timeout, 0); - timer_setup(&ucs->timer_int_in, int_in_resubmit, 0); - init_waitqueue_head(&ucs->waitqueue); - init_work(&ucs->int_in_wq, int_in_work); - - return 0; -} - -/* freeurbs - * unlink and deallocate all urbs unconditionally - * caller must make sure that no commands are still in progress - * parameter: - * cs controller state structure - */ -static void freeurbs(struct cardstate *cs) -{ - struct bas_cardstate *ucs = cs->hw.bas; - struct bas_bc_state *ubc; - int i, j; - - gig_dbg(debug_init, "%s: killing urbs", __func__); - for (j = 0; j < bas_channels; ++j) { - ubc = cs->bcs[j].hw.bas; - for (i = 0; i < bas_outurbs; ++i) { - usb_kill_urb(ubc->isoouturbs[i].urb); - usb_free_urb(ubc->isoouturbs[i].urb); - ubc->isoouturbs[i].urb = null; - } - for (i = 0; i < bas_inurbs; ++i) { - usb_kill_urb(ubc->isoinurbs[i]); - usb_free_urb(ubc->isoinurbs[i]); - ubc->isoinurbs[i] = null; - } - } - usb_kill_urb(ucs->urb_int_in); - usb_free_urb(ucs->urb_int_in); - ucs->urb_int_in = null; - usb_kill_urb(ucs->urb_cmd_out); - usb_free_urb(ucs->urb_cmd_out); - ucs->urb_cmd_out = null; - usb_kill_urb(ucs->urb_cmd_in); - usb_free_urb(ucs->urb_cmd_in); - ucs->urb_cmd_in = null; - usb_kill_urb(ucs->urb_ctrl); - usb_free_urb(ucs->urb_ctrl); - ucs->urb_ctrl = null; -} - -/* gigaset_probe - * this function is called when a new usb device is connected. - * it checks whether the new device is handled by this driver. - */ -static int gigaset_probe(struct usb_interface *interface, - const struct usb_device_id *id) -{ - struct usb_host_interface *hostif; - struct usb_device *udev = interface_to_usbdev(interface); - struct cardstate *cs = null; - struct bas_cardstate *ucs = null; - struct bas_bc_state *ubc; - struct usb_endpoint_descriptor *endpoint; - int i, j; - int rc; - - gig_dbg(debug_init, - "%s: check if device matches .. (vendor: 0x%x, product: 0x%x)", - __func__, le16_to_cpu(udev->descriptor.idvendor), - le16_to_cpu(udev->descriptor.idproduct)); - - /* set required alternate setting */ - hostif = interface->cur_altsetting; - if (hostif->desc.balternatesetting != 3) { - gig_dbg(debug_init, - "%s: wrong alternate setting %d - trying to switch", - __func__, hostif->desc.balternatesetting); - if (usb_set_interface(udev, hostif->desc.binterfacenumber, 3) - < 0) { - dev_warn(&udev->dev, "usb_set_interface failed, " - "device %d interface %d altsetting %d ", - udev->devnum, hostif->desc.binterfacenumber, - hostif->desc.balternatesetting); - return -enodev; - } - hostif = interface->cur_altsetting; - } - - /* reject application specific interfaces - */ - if (hostif->desc.binterfaceclass != 255) { - dev_warn(&udev->dev, "%s: binterfaceclass == %d ", - __func__, hostif->desc.binterfaceclass); - return -enodev; - } - - if (hostif->desc.bnumendpoints < 1) - return -enodev; - - dev_info(&udev->dev, - "%s: device matched (vendor: 0x%x, product: 0x%x) ", - __func__, le16_to_cpu(udev->descriptor.idvendor), - le16_to_cpu(udev->descriptor.idproduct)); - - /* allocate memory for our device state and initialize it */ - cs = gigaset_initcs(driver, bas_channels, 0, 0, cidmode, - gigaset_modulename); - if (!cs) - return -enodev; - ucs = cs->hw.bas; - - /* save off device structure ptrs for later use */ - usb_get_dev(udev); - ucs->udev = udev; - ucs->interface = interface; - cs->dev = &interface->dev; - - /* allocate urbs: - * - one for the interrupt pipe - * - three for the different uses of the default control pipe - * - three for each isochronous pipe - */ - if (!(ucs->urb_int_in = usb_alloc_urb(0, gfp_kernel)) || - !(ucs->urb_cmd_in = usb_alloc_urb(0, gfp_kernel)) || - !(ucs->urb_cmd_out = usb_alloc_urb(0, gfp_kernel)) || - !(ucs->urb_ctrl = usb_alloc_urb(0, gfp_kernel))) - goto allocerr; - - for (j = 0; j < bas_channels; ++j) { - ubc = cs->bcs[j].hw.bas; - for (i = 0; i < bas_outurbs; ++i) - if (!(ubc->isoouturbs[i].urb = - usb_alloc_urb(bas_numframes, gfp_kernel))) - goto allocerr; - for (i = 0; i < bas_inurbs; ++i) - if (!(ubc->isoinurbs[i] = - usb_alloc_urb(bas_numframes, gfp_kernel))) - goto allocerr; - } - - ucs->rcvbuf = null; - ucs->rcvbuf_size = 0; - - /* fill the interrupt urb and send it to the core */ - endpoint = &hostif->endpoint[0].desc; - usb_fill_int_urb(ucs->urb_int_in, udev, - usb_rcvintpipe(udev, - usb_endpoint_num(endpoint)), - ucs->int_in_buf, ip_msgsize, read_int_callback, cs, - endpoint->binterval); - rc = usb_submit_urb(ucs->urb_int_in, gfp_kernel); - if (rc != 0) { - dev_err(cs->dev, "could not submit interrupt urb: %s ", - get_usb_rcmsg(rc)); - goto error; - } - ucs->retry_int_in = 0; - - /* tell the device that the driver is ready */ - rc = req_submit(cs->bcs, hd_device_init_ack, 0, 0); - if (rc != 0) - goto error; - - /* tell common part that the device is ready */ - if (startmode == sm_locked) - cs->mstate = ms_locked; - - /* save address of controller structure */ - usb_set_intfdata(interface, cs); - - rc = gigaset_start(cs); - if (rc < 0) - goto error; - - return 0; - -allocerr: - dev_err(cs->dev, "could not allocate urbs "); - rc = -enomem; -error: - freeurbs(cs); - usb_set_intfdata(interface, null); - usb_put_dev(udev); - gigaset_freecs(cs); - return rc; -} - -/* gigaset_disconnect - * this function is called when the gigaset base is unplugged. - */ -static void gigaset_disconnect(struct usb_interface *interface) -{ - struct cardstate *cs; - struct bas_cardstate *ucs; - int j; - - cs = usb_get_intfdata(interface); - - ucs = cs->hw.bas; - - dev_info(cs->dev, "disconnecting gigaset base "); - - /* mark base as not ready, all channels disconnected */ - ucs->basstate = 0; - - /* tell ll all channels are down */ - for (j = 0; j < bas_channels; ++j) - gigaset_bchannel_down(cs->bcs + j); - - /* stop driver (common part) */ - gigaset_stop(cs); - - /* stop delayed work and urbs, free ressources */ - del_timer_sync(&ucs->timer_ctrl); - del_timer_sync(&ucs->timer_atrdy); - del_timer_sync(&ucs->timer_cmd_in); - del_timer_sync(&ucs->timer_int_in); - cancel_work_sync(&ucs->int_in_wq); - freeurbs(cs); - usb_set_intfdata(interface, null); - kfree(ucs->rcvbuf); - ucs->rcvbuf = null; - ucs->rcvbuf_size = 0; - usb_put_dev(ucs->udev); - ucs->interface = null; - ucs->udev = null; - cs->dev = null; - gigaset_freecs(cs); -} - -/* gigaset_suspend - * this function is called before the usb connection is suspended - * or before the usb device is reset. - * in the latter case, message == pmsg_on. - */ -static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) -{ - struct cardstate *cs = usb_get_intfdata(intf); - struct bas_cardstate *ucs = cs->hw.bas; - int rc; - - /* set suspend flag; this stops at command/response traffic */ - if (update_basstate(ucs, bs_suspend, 0) & bs_suspend) { - gig_dbg(debug_suspend, "already suspended"); - return 0; - } - - /* wait a bit for blocking conditions to go away */ - rc = wait_event_timeout(ucs->waitqueue, - !(ucs->basstate & - (bs_b1open | bs_b2open | bs_atrdpend | bs_atwrpend)), - bas_timeout * hz / 10); - gig_dbg(debug_suspend, "wait_event_timeout() -> %d", rc); - - /* check for conditions preventing suspend */ - if (ucs->basstate & (bs_b1open | bs_b2open | bs_atrdpend | bs_atwrpend)) { - dev_warn(cs->dev, "cannot suspend: "); - if (ucs->basstate & bs_b1open) - dev_warn(cs->dev, " b channel 1 open "); - if (ucs->basstate & bs_b2open) - dev_warn(cs->dev, " b channel 2 open "); - if (ucs->basstate & bs_atrdpend) - dev_warn(cs->dev, " receiving at reply "); - if (ucs->basstate & bs_atwrpend) - dev_warn(cs->dev, " sending at command "); - update_basstate(ucs, 0, bs_suspend); - return -ebusy; - } - - /* close at channel if open */ - if (ucs->basstate & bs_atopen) { - gig_dbg(debug_suspend, "closing at channel"); - rc = req_submit(cs->bcs, hd_close_atchannel, 0, 0); - if (rc) { - update_basstate(ucs, 0, bs_suspend); - return rc; - } - wait_event_timeout(ucs->waitqueue, !ucs->pending, - bas_timeout * hz / 10); - /* in case of timeout, proceed anyway */ - } - - /* kill all urbs and delayed work that might still be pending */ - usb_kill_urb(ucs->urb_ctrl); - usb_kill_urb(ucs->urb_int_in); - del_timer_sync(&ucs->timer_ctrl); - del_timer_sync(&ucs->timer_atrdy); - del_timer_sync(&ucs->timer_cmd_in); - del_timer_sync(&ucs->timer_int_in); - - /* don't try to cancel int_in_wq from within reset as it - * might be the one requesting the reset - */ - if (message.event != pm_event_on) - cancel_work_sync(&ucs->int_in_wq); - - gig_dbg(debug_suspend, "suspend complete"); - return 0; -} - -/* gigaset_resume - * this function is called after the usb connection has been resumed. - */ -static int gigaset_resume(struct usb_interface *intf) -{ - struct cardstate *cs = usb_get_intfdata(intf); - struct bas_cardstate *ucs = cs->hw.bas; - int rc; - - /* resubmit interrupt urb for spontaneous messages from base */ - rc = usb_submit_urb(ucs->urb_int_in, gfp_kernel); - if (rc) { - dev_err(cs->dev, "could not resubmit interrupt urb: %s ", - get_usb_rcmsg(rc)); - return rc; - } - ucs->retry_int_in = 0; - - /* clear suspend flag to reallow activity */ - update_basstate(ucs, 0, bs_suspend); - - gig_dbg(debug_suspend, "resume complete"); - return 0; -} - -/* gigaset_pre_reset - * this function is called before the usb connection is reset. - */ -static int gigaset_pre_reset(struct usb_interface *intf) -{ - /* handle just like suspend */ - return gigaset_suspend(intf, pmsg_on); -} - -/* gigaset_post_reset - * this function is called after the usb connection has been reset. - */ -static int gigaset_post_reset(struct usb_interface *intf) -{ - /* fixme: send hd_device_init_ack? */ - - /* resume operations */ - return gigaset_resume(intf); -} - - -static const struct gigaset_ops gigops = { - .write_cmd = gigaset_write_cmd, - .write_room = gigaset_write_room, - .chars_in_buffer = gigaset_chars_in_buffer, - .brkchars = gigaset_brkchars, - .init_bchannel = gigaset_init_bchannel, - .close_bchannel = gigaset_close_bchannel, - .initbcshw = gigaset_initbcshw, - .freebcshw = gigaset_freebcshw, - .reinitbcshw = gigaset_reinitbcshw, - .initcshw = gigaset_initcshw, - .freecshw = gigaset_freecshw, - .set_modem_ctrl = gigaset_set_modem_ctrl, - .baud_rate = gigaset_baud_rate, - .set_line_ctrl = gigaset_set_line_ctrl, - .send_skb = gigaset_isoc_send_skb, - .handle_input = gigaset_isoc_input, -}; - -/* bas_gigaset_init - * this function is called after the kernel module is loaded. - */ -static int __init bas_gigaset_init(void) -{ - int result; - - /* allocate memory for our driver state and initialize it */ - driver = gigaset_initdriver(gigaset_minor, gigaset_minors, - gigaset_modulename, gigaset_devname, - &gigops, this_module); - if (driver == null) - goto error; - - /* register this driver with the usb subsystem */ - result = usb_register(&gigaset_usb_driver); - if (result < 0) { - pr_err("error %d registering usb driver ", -result); - goto error; - } - - pr_info(driver_desc " "); - return 0; - -error: - if (driver) - gigaset_freedriver(driver); - driver = null; - return -1; -} - -/* bas_gigaset_exit - * this function is called before the kernel module is unloaded. - */ -static void __exit bas_gigaset_exit(void) -{ - struct bas_cardstate *ucs; - int i; - - gigaset_blockdriver(driver); /* => probe will fail - * => no gigaset_start any more - */ - - /* stop all connected devices */ - for (i = 0; i < driver->minors; i++) { - if (gigaset_shutdown(driver->cs + i) < 0) - continue; /* no device */ - /* from now on, no isdn callback should be possible */ - - /* close all still open channels */ - ucs = driver->cs[i].hw.bas; - if (ucs->basstate & bs_b1open) { - gig_dbg(debug_init, "closing b1 channel"); - usb_control_msg(ucs->udev, - usb_sndctrlpipe(ucs->udev, 0), - hd_close_b1channel, out_vendor_req, - 0, 0, null, 0, bas_timeout); - } - if (ucs->basstate & bs_b2open) { - gig_dbg(debug_init, "closing b2 channel"); - usb_control_msg(ucs->udev, - usb_sndctrlpipe(ucs->udev, 0), - hd_close_b2channel, out_vendor_req, - 0, 0, null, 0, bas_timeout); - } - if (ucs->basstate & bs_atopen) { - gig_dbg(debug_init, "closing at channel"); - usb_control_msg(ucs->udev, - usb_sndctrlpipe(ucs->udev, 0), - hd_close_atchannel, out_vendor_req, - 0, 0, null, 0, bas_timeout); - } - ucs->basstate = 0; - } - - /* deregister this driver with the usb subsystem */ - usb_deregister(&gigaset_usb_driver); - /* this will call the disconnect-callback */ - /* from now on, no disconnect/probe callback should be running */ - - gigaset_freedriver(driver); - driver = null; -} - - -module_init(bas_gigaset_init); -module_exit(bas_gigaset_exit); - -module_author(driver_author); -module_description(driver_desc); -module_license("gpl"); diff --git a/drivers/staging/isdn/gigaset/capi.c b/drivers/staging/isdn/gigaset/capi.c --- a/drivers/staging/isdn/gigaset/capi.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * kernel capi interface for the gigaset driver - * - * copyright (c) 2009 by tilman schmidt <tilman@imap.cc>. - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/ratelimit.h> -#include <linux/isdn/capilli.h> -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> -#include <linux/export.h> - -/* missing from kernelcapi.h */ -#define capincpinotsupportedbyprotocol 0x0001 -#define capiflagsnotsupportedbyprotocol 0x0002 -#define capialertalreadysent 0x0003 -#define capifacilityspecificfunctionnotsupported 0x3011 - -/* missing from capicmd.h */ -#define capi_connect_ind_baselen (capi_msg_baselen + 4 + 2 + 8 * 1) -#define capi_connect_active_ind_baselen (capi_msg_baselen + 4 + 3 * 1) -#define capi_connect_b3_ind_baselen (capi_msg_baselen + 4 + 1) -#define capi_connect_b3_active_ind_baselen (capi_msg_baselen + 4 + 1) -#define capi_data_b3_req_len64 (capi_msg_baselen + 4 + 4 + 2 + 2 + 2 + 8) -#define capi_data_b3_conf_len (capi_msg_baselen + 4 + 2 + 2) -#define capi_disconnect_ind_len (capi_msg_baselen + 4 + 2) -#define capi_disconnect_b3_ind_baselen (capi_msg_baselen + 4 + 2 + 1) -#define capi_facility_conf_baselen (capi_msg_baselen + 4 + 2 + 2 + 1) -/* most _conf messages contain only controller/plci/ncci and info parameters */ -#define capi_stdconf_len (capi_msg_baselen + 4 + 2) - -#define capi_facility_handset 0x0000 -#define capi_facility_dtmf 0x0001 -#define capi_facility_v42bis 0x0002 -#define capi_facility_suppsvc 0x0003 -#define capi_facility_wakeup 0x0004 -#define capi_facility_li 0x0005 - -#define capi_suppsvc_getsupported 0x0000 -#define capi_suppsvc_listen 0x0001 - -/* missing from capiutil.h */ -#define capimsg_plci_part(m) capimsg_u8(m, 9) -#define capimsg_ncci_part(m) capimsg_u16(m, 10) -#define capimsg_handle_req(m) capimsg_u16(m, 18) /* data_b3_req/_ind only! */ -#define capimsg_flags(m) capimsg_u16(m, 20) -#define capimsg_setcontroller(m, contr) capimsg_setu8(m, 8, contr) -#define capimsg_setplci_part(m, plci) capimsg_setu8(m, 9, plci) -#define capimsg_setncci_part(m, ncci) capimsg_setu16(m, 10, ncci) -#define capimsg_setflags(m, flags) capimsg_setu16(m, 20, flags) - -/* parameters with differing location in data_b3_conf/_resp: */ -#define capimsg_sethandle_conf(m, handle) capimsg_setu16(m, 12, handle) -#define capimsg_setinfo_conf(m, info) capimsg_setu16(m, 14, info) - -/* flags (data_b3_req/_ind) */ -#define capi_flags_delivery_confirmation 0x04 -#define capi_flags_reserved (~0x1f) - -/* buffer sizes */ -#define max_bc_octets 11 -#define max_hlc_octets 3 -#define max_number_digits 20 -#define max_fmt_ie_len 20 - -/* values for bcs->apconnstate */ -#define apconn_none 0 /* inactive/listening */ -#define apconn_setup 1 /* connecting */ -#define apconn_active 2 /* b channel up */ - -/* registered application data structure */ -struct gigaset_capi_appl { - struct list_head ctrlist; - struct gigaset_capi_appl *bcnext; - u16 id; - struct capi_register_params rp; - u16 nextmessagenumber; - u32 listeninfomask; - u32 listencipmask; -}; - -/* capi specific controller data structure */ -struct gigaset_capi_ctr { - struct capi_ctr ctr; - struct list_head appls; - struct sk_buff_head sendqueue; - atomic_t sendqlen; - /* two _cmsg structures possibly used concurrently: */ - _cmsg hcmsg; /* for message composition triggered from hardware */ - _cmsg acmsg; /* for dissection of messages sent from application */ - u8 bc_buf[max_bc_octets + 1]; - u8 hlc_buf[max_hlc_octets + 1]; - u8 cgpty_buf[max_number_digits + 3]; - u8 cdpty_buf[max_number_digits + 2]; -}; - -/* cip value table (from capi 2.0 standard, ch. 6.1) */ -static struct { - u8 *bc; - u8 *hlc; -} cip2bchlc[] = { - [1] = { "8090a3", null }, /* speech (a-law) */ - [2] = { "8890", null }, /* unrestricted digital information */ - [3] = { "8990", null }, /* restricted digital information */ - [4] = { "9090a3", null }, /* 3,1 khz audio (a-law) */ - [5] = { "9190", null }, /* 7 khz audio */ - [6] = { "9890", null }, /* video */ - [7] = { "88c0c6e6", null }, /* packet mode */ - [8] = { "8890218f", null }, /* 56 kbit/s rate adaptation */ - [9] = { "9190a5", null }, /* unrestricted digital information - * with tones/announcements */ - [16] = { "8090a3", "9181" }, /* telephony */ - [17] = { "9090a3", "9184" }, /* group 2/3 facsimile */ - [18] = { "8890", "91a1" }, /* group 4 facsimile class 1 */ - [19] = { "8890", "91a4" }, /* teletex service basic and mixed mode - * and group 4 facsimile service - * classes ii and iii */ - [20] = { "8890", "91a8" }, /* teletex service basic and - * processable mode */ - [21] = { "8890", "91b1" }, /* teletex service basic mode */ - [22] = { "8890", "91b2" }, /* international interworking for - * videotex */ - [23] = { "8890", "91b5" }, /* telex */ - [24] = { "8890", "91b8" }, /* message handling systems - * in accordance with x.400 */ - [25] = { "8890", "91c1" }, /* osi application - * in accordance with x.200 */ - [26] = { "9190a5", "9181" }, /* 7 khz telephony */ - [27] = { "9190a5", "916001" }, /* video telephony, first connection */ - [28] = { "8890", "916002" }, /* video telephony, second connection */ -}; - -/* - * helper functions - * ================ - */ - -/* - * emit unsupported parameter warning - */ -static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param, - char *msgname, char *paramname) -{ - if (param && *param) - dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s ", - msgname, paramname); -} - -/* - * convert an ie from gigaset hex string to etsi binary representation - * including length byte - * return value: result length, -1 on error - */ -static int encode_ie(char *in, u8 *out, int maxlen) -{ - int l = 0; - while (*in) { - if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen) - return -1; - out[++l] = (hex_to_bin(in[0]) << 4) + hex_to_bin(in[1]); - in += 2; - } - out[0] = l; - return l; -} - -/* - * convert an ie from etsi binary representation including length byte - * to gigaset hex string - */ -static void decode_ie(u8 *in, char *out) -{ - int i = *in; - while (i-- > 0) { - /* todo: conversion to upper case necessary? */ - *out++ = toupper(hex_asc_hi(*++in)); - *out++ = toupper(hex_asc_lo(*in)); - } -} - -/* - * retrieve application data structure for an application id - */ -static inline struct gigaset_capi_appl * -get_appl(struct gigaset_capi_ctr *iif, u16 appl) -{ - struct gigaset_capi_appl *ap; - - list_for_each_entry(ap, &iif->appls, ctrlist) - if (ap->id == appl) - return ap; - return null; -} - -/* - * dump capi message to kernel messages for debugging - */ -static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p) -{ -#ifdef config_gigaset_debug - /* dump at most 20 messages in 20 secs */ - static define_ratelimit_state(msg_dump_ratelimit, 20 * hz, 20); - _cdebbuf *cdb; - - if (!(gigaset_debuglevel & level)) - return; - if (!___ratelimit(&msg_dump_ratelimit, tag)) - return; - - cdb = capi_cmsg2str(p); - if (cdb) { - gig_dbg(level, "%s: [%d] %s", tag, p->applid, cdb->buf); - cdebbuf_free(cdb); - } else { - gig_dbg(level, "%s: [%d] %s", tag, p->applid, - capi_cmd2str(p->command, p->subcommand)); - } -#endif -} - -static inline void dump_rawmsg(enum debuglevel level, const char *tag, - unsigned char *data) -{ -#ifdef config_gigaset_debug - char *dbgline; - int i, l; - - if (!(gigaset_debuglevel & level)) - return; - - l = capimsg_len(data); - if (l < 12) { - gig_dbg(level, "%s: ??? len=%04d", tag, l); - return; - } - gig_dbg(level, "%s: 0x%02x:0x%02x: id=%03d #0x%04x len=%04d ncci=0x%x", - tag, capimsg_command(data), capimsg_subcommand(data), - capimsg_appid(data), capimsg_msgid(data), l, - capimsg_control(data)); - l -= 12; - if (l <= 0) - return; - if (l > 64) - l = 64; /* arbitrary limit */ - dbgline = kmalloc_array(3, l, gfp_atomic); - if (!dbgline) - return; - for (i = 0; i < l; i++) { - dbgline[3 * i] = hex_asc_hi(data[12 + i]); - dbgline[3 * i + 1] = hex_asc_lo(data[12 + i]); - dbgline[3 * i + 2] = ' '; - } - dbgline[3 * l - 1] = ''; - gig_dbg(level, " %s", dbgline); - kfree(dbgline); - if (capimsg_command(data) == capi_data_b3 && - (capimsg_subcommand(data) == capi_req || - capimsg_subcommand(data) == capi_ind)) { - l = capimsg_datalen(data); - gig_dbg(level, " datalength=%d", l); - if (l <= 0 || !(gigaset_debuglevel & debug_lldata)) - return; - if (l > 64) - l = 64; /* arbitrary limit */ - dbgline = kmalloc_array(3, l, gfp_atomic); - if (!dbgline) - return; - data += capimsg_len(data); - for (i = 0; i < l; i++) { - dbgline[3 * i] = hex_asc_hi(data[i]); - dbgline[3 * i + 1] = hex_asc_lo(data[i]); - dbgline[3 * i + 2] = ' '; - } - dbgline[3 * l - 1] = ''; - gig_dbg(level, " %s", dbgline); - kfree(dbgline); - } -#endif -} - -/* - * format capi ie as string - */ - -#ifdef config_gigaset_debug -static const char *format_ie(const char *ie) -{ - static char result[3 * max_fmt_ie_len]; - int len, count; - char *pout = result; - - if (!ie) - return "null"; - - count = len = ie[0]; - if (count > max_fmt_ie_len) - count = max_fmt_ie_len - 1; - while (count--) { - *pout++ = hex_asc_hi(*++ie); - *pout++ = hex_asc_lo(*ie); - *pout++ = ' '; - } - if (len > max_fmt_ie_len) { - *pout++ = '.'; - *pout++ = '.'; - *pout++ = '.'; - } - *--pout = 0; - return result; -} -#endif - -/* - * emit data_b3_conf message - */ -static void send_data_b3_conf(struct cardstate *cs, struct capi_ctr *ctr, - u16 appl, u16 msgid, int channel, - u16 handle, u16 info) -{ - struct sk_buff *cskb; - u8 *msg; - - cskb = alloc_skb(capi_data_b3_conf_len, gfp_atomic); - if (!cskb) { - dev_err(cs->dev, "%s: out of memory ", __func__); - return; - } - /* frequent message, avoid _cmsg overhead */ - msg = __skb_put(cskb, capi_data_b3_conf_len); - capimsg_setlen(msg, capi_data_b3_conf_len); - capimsg_setappid(msg, appl); - capimsg_setcommand(msg, capi_data_b3); - capimsg_setsubcommand(msg, capi_conf); - capimsg_setmsgid(msg, msgid); - capimsg_setcontroller(msg, ctr->cnr); - capimsg_setplci_part(msg, channel); - capimsg_setncci_part(msg, 1); - capimsg_sethandle_conf(msg, handle); - capimsg_setinfo_conf(msg, info); - - /* emit message */ - dump_rawmsg(debug_mcmd, __func__, msg); - capi_ctr_handle_message(ctr, appl, cskb); -} - - -/* - * driver interface functions - * ========================== - */ - -/** - * gigaset_skb_sent() - acknowledge transmission of outgoing skb - * @bcs: b channel descriptor structure. - * @skb: sent data. - * - * called by hardware module {bas,ser,usb}_gigaset when the data in a - * skb has been successfully sent, for signalling completion to the ll. - */ -void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb) -{ - struct cardstate *cs = bcs->cs; - struct gigaset_capi_ctr *iif = cs->iif; - struct gigaset_capi_appl *ap = bcs->ap; - unsigned char *req = skb_mac_header(dskb); - u16 flags; - - /* update statistics */ - ++bcs->trans_up; - - if (!ap) { - gig_dbg(debug_mcmd, "%s: application gone", __func__); - return; - } - - /* don't send further b3 messages if disconnected */ - if (bcs->apconnstate < apconn_active) { - gig_dbg(debug_mcmd, "%s: disconnected", __func__); - return; - } - - /* - * send data_b3_conf if "delivery confirmation" bit was set in request; - * otherwise it has already been sent by do_data_b3_req() - */ - flags = capimsg_flags(req); - if (flags & capi_flags_delivery_confirmation) - send_data_b3_conf(cs, &iif->ctr, ap->id, capimsg_msgid(req), - bcs->channel + 1, capimsg_handle_req(req), - (flags & ~capi_flags_delivery_confirmation) ? - capiflagsnotsupportedbyprotocol : - capi_noerror); -} -export_symbol_gpl(gigaset_skb_sent); - -/** - * gigaset_skb_rcvd() - pass received skb to ll - * @bcs: b channel descriptor structure. - * @skb: received data. - * - * called by hardware module {bas,ser,usb}_gigaset when user data has - * been successfully received, for passing to the ll. - * warning: skb must not be accessed anymore! - */ -void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) -{ - struct cardstate *cs = bcs->cs; - struct gigaset_capi_ctr *iif = cs->iif; - struct gigaset_capi_appl *ap = bcs->ap; - int len = skb->len; - - /* update statistics */ - bcs->trans_down++; - - if (!ap) { - gig_dbg(debug_mcmd, "%s: application gone", __func__); - dev_kfree_skb_any(skb); - return; - } - - /* don't send further b3 messages if disconnected */ - if (bcs->apconnstate < apconn_active) { - gig_dbg(debug_mcmd, "%s: disconnected", __func__); - dev_kfree_skb_any(skb); - return; - } - - /* - * prepend data_b3_ind message to payload - * parameters: ncci = 1, all others 0/unused - * frequent message, avoid _cmsg overhead - */ - skb_push(skb, capi_data_b3_req_len); - capimsg_setlen(skb->data, capi_data_b3_req_len); - capimsg_setappid(skb->data, ap->id); - capimsg_setcommand(skb->data, capi_data_b3); - capimsg_setsubcommand(skb->data, capi_ind); - capimsg_setmsgid(skb->data, ap->nextmessagenumber++); - capimsg_setcontroller(skb->data, iif->ctr.cnr); - capimsg_setplci_part(skb->data, bcs->channel + 1); - capimsg_setncci_part(skb->data, 1); - /* data parameter not used */ - capimsg_setdatalen(skb->data, len); - /* data handle parameter not used */ - capimsg_setflags(skb->data, 0); - /* data64 parameter not present */ - - /* emit message */ - dump_rawmsg(debug_mcmd, __func__, skb->data); - capi_ctr_handle_message(&iif->ctr, ap->id, skb); -} -export_symbol_gpl(gigaset_skb_rcvd); - -/** - * gigaset_isdn_rcv_err() - signal receive error - * @bcs: b channel descriptor structure. - * - * called by hardware module {bas,ser,usb}_gigaset when a receive error - * has occurred, for signalling to the ll. - */ -void gigaset_isdn_rcv_err(struct bc_state *bcs) -{ - /* if currently ignoring packets, just count down */ - if (bcs->ignore) { - bcs->ignore--; - return; - } - - /* update statistics */ - bcs->corrupted++; - - /* todo: signal error -> ll */ -} -export_symbol_gpl(gigaset_isdn_rcv_err); - -/** - * gigaset_isdn_icall() - signal incoming call - * @at_state: connection state structure. - * - * called by main module at tasklet level to notify the ll that an incoming - * call has been received. @at_state contains the parameters of the call. - * - * return value: call disposition (icall_*) - */ -int gigaset_isdn_icall(struct at_state_t *at_state) -{ - struct cardstate *cs = at_state->cs; - struct bc_state *bcs = at_state->bcs; - struct gigaset_capi_ctr *iif = cs->iif; - struct gigaset_capi_appl *ap; - u32 actcipmask; - struct sk_buff *skb; - unsigned int msgsize; - unsigned long flags; - int i; - - /* - * todo: signal calls without a free b channel, too - * (requires a u8 handle for the at_state structure that can - * be stored in the plci and used in the connect_resp message - * handler to retrieve it) - */ - if (!bcs) - return icall_ignore; - - /* prepare connect_ind message, using b channel number as plci */ - capi_cmsg_header(&iif->hcmsg, 0, capi_connect, capi_ind, 0, - iif->ctr.cnr | ((bcs->channel + 1) << 8)); - - /* minimum size, all structs empty */ - msgsize = capi_connect_ind_baselen; - - /* bearer capability (mandatory) */ - if (at_state->str_var[str_zbc]) { - /* pass on bc from gigaset */ - if (encode_ie(at_state->str_var[str_zbc], iif->bc_buf, - max_bc_octets) < 0) { - dev_warn(cs->dev, "ring ignored - bad bc %s ", - at_state->str_var[str_zbc]); - return icall_ignore; - } - - /* look up corresponding cip value */ - iif->hcmsg.cipvalue = 0; /* default if nothing found */ - for (i = 0; i < array_size(cip2bchlc); i++) - if (cip2bchlc[i].bc != null && - cip2bchlc[i].hlc == null && - !strcmp(cip2bchlc[i].bc, - at_state->str_var[str_zbc])) { - iif->hcmsg.cipvalue = i; - break; - } - } else { - /* no bc (internal call): assume cip 1 (speech, a-law) */ - iif->hcmsg.cipvalue = 1; - encode_ie(cip2bchlc[1].bc, iif->bc_buf, max_bc_octets); - } - iif->hcmsg.bc = iif->bc_buf; - msgsize += iif->hcmsg.bc[0]; - - /* high layer compatibility (optional) */ - if (at_state->str_var[str_zhlc]) { - /* pass on hlc from gigaset */ - if (encode_ie(at_state->str_var[str_zhlc], iif->hlc_buf, - max_hlc_octets) < 0) { - dev_warn(cs->dev, "ring ignored - bad hlc %s ", - at_state->str_var[str_zhlc]); - return icall_ignore; - } - iif->hcmsg.hlc = iif->hlc_buf; - msgsize += iif->hcmsg.hlc[0]; - - /* look up corresponding cip value */ - /* keep bc based cip value if none found */ - if (at_state->str_var[str_zbc]) - for (i = 0; i < array_size(cip2bchlc); i++) - if (cip2bchlc[i].hlc != null && - !strcmp(cip2bchlc[i].hlc, - at_state->str_var[str_zhlc]) && - !strcmp(cip2bchlc[i].bc, - at_state->str_var[str_zbc])) { - iif->hcmsg.cipvalue = i; - break; - } - } - - /* called party number (optional) */ - if (at_state->str_var[str_zcpn]) { - i = strlen(at_state->str_var[str_zcpn]); - if (i > max_number_digits) { - dev_warn(cs->dev, "ring ignored - bad number %s ", - at_state->str_var[str_zbc]); - return icall_ignore; - } - iif->cdpty_buf[0] = i + 1; - iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */ - memcpy(iif->cdpty_buf + 2, at_state->str_var[str_zcpn], i); - iif->hcmsg.calledpartynumber = iif->cdpty_buf; - msgsize += iif->hcmsg.calledpartynumber[0]; - } - - /* calling party number (optional) */ - if (at_state->str_var[str_nmbr]) { - i = strlen(at_state->str_var[str_nmbr]); - if (i > max_number_digits) { - dev_warn(cs->dev, "ring ignored - bad number %s ", - at_state->str_var[str_zbc]); - return icall_ignore; - } - iif->cgpty_buf[0] = i + 2; - iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */ - iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */ - memcpy(iif->cgpty_buf + 3, at_state->str_var[str_nmbr], i); - iif->hcmsg.callingpartynumber = iif->cgpty_buf; - msgsize += iif->hcmsg.callingpartynumber[0]; - } - - /* remaining parameters (not supported, always left null): - * - calledpartysubaddress - * - callingpartysubaddress - * - additionalinfo - * - bchannelinformation - * - keypadfacility - * - useruserdata - * - facilitydataarray - */ - - gig_dbg(debug_cmd, "icall: plci %x cip %d bc %s", - iif->hcmsg.adr.adrplci, iif->hcmsg.cipvalue, - format_ie(iif->hcmsg.bc)); - gig_dbg(debug_cmd, "icall: hlc %s", - format_ie(iif->hcmsg.hlc)); - gig_dbg(debug_cmd, "icall: cgpty %s", - format_ie(iif->hcmsg.callingpartynumber)); - gig_dbg(debug_cmd, "icall: cdpty %s", - format_ie(iif->hcmsg.calledpartynumber)); - - /* scan application list for matching listeners */ - spin_lock_irqsave(&bcs->aplock, flags); - if (bcs->ap != null || bcs->apconnstate != apconn_none) { - dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d) ", - __func__, bcs->ap, bcs->apconnstate); - bcs->ap = null; - bcs->apconnstate = apconn_none; - } - spin_unlock_irqrestore(&bcs->aplock, flags); - actcipmask = 1 | (1 << iif->hcmsg.cipvalue); - list_for_each_entry(ap, &iif->appls, ctrlist) - if (actcipmask & ap->listencipmask) { - /* build connect_ind message for this application */ - iif->hcmsg.applid = ap->id; - iif->hcmsg.messagenumber = ap->nextmessagenumber++; - - skb = alloc_skb(msgsize, gfp_atomic); - if (!skb) { - dev_err(cs->dev, "%s: out of memory ", - __func__); - break; - } - if (capi_cmsg2message(&iif->hcmsg, - __skb_put(skb, msgsize))) { - dev_err(cs->dev, "%s: message parser failure ", - __func__); - dev_kfree_skb_any(skb); - break; - } - dump_cmsg(debug_cmd, __func__, &iif->hcmsg); - - /* add to listeners on this b channel, update state */ - spin_lock_irqsave(&bcs->aplock, flags); - ap->bcnext = bcs->ap; - bcs->ap = ap; - bcs->chstate |= chs_notify_ll; - bcs->apconnstate = apconn_setup; - spin_unlock_irqrestore(&bcs->aplock, flags); - - /* emit message */ - capi_ctr_handle_message(&iif->ctr, ap->id, skb); - } - - /* - * return "accept" if any listeners. - * gigaset will send alerting. - * there doesn't seem to be a way to avoid this. - */ - return bcs->ap ? icall_accept : icall_ignore; -} - -/* - * send a disconnect_ind message to an application - * does not sleep, clobbers the controller's hcmsg structure - */ -static void send_disconnect_ind(struct bc_state *bcs, - struct gigaset_capi_appl *ap, u16 reason) -{ - struct cardstate *cs = bcs->cs; - struct gigaset_capi_ctr *iif = cs->iif; - struct sk_buff *skb; - - if (bcs->apconnstate == apconn_none) - return; - - capi_cmsg_header(&iif->hcmsg, ap->id, capi_disconnect, capi_ind, - ap->nextmessagenumber++, - iif->ctr.cnr | ((bcs->channel + 1) << 8)); - iif->hcmsg.reason = reason; - skb = alloc_skb(capi_disconnect_ind_len, gfp_atomic); - if (!skb) { - dev_err(cs->dev, "%s: out of memory ", __func__); - return; - } - if (capi_cmsg2message(&iif->hcmsg, - __skb_put(skb, capi_disconnect_ind_len))) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->hcmsg); - capi_ctr_handle_message(&iif->ctr, ap->id, skb); -} - -/* - * send a disconnect_b3_ind message to an application - * parameters: ncci = 1, ncpi empty, reason_b3 = 0 - * does not sleep, clobbers the controller's hcmsg structure - */ -static void send_disconnect_b3_ind(struct bc_state *bcs, - struct gigaset_capi_appl *ap) -{ - struct cardstate *cs = bcs->cs; - struct gigaset_capi_ctr *iif = cs->iif; - struct sk_buff *skb; - - /* nothing to do if no logical connection active */ - if (bcs->apconnstate < apconn_active) - return; - bcs->apconnstate = apconn_setup; - - capi_cmsg_header(&iif->hcmsg, ap->id, capi_disconnect_b3, capi_ind, - ap->nextmessagenumber++, - iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16)); - skb = alloc_skb(capi_disconnect_b3_ind_baselen, gfp_atomic); - if (!skb) { - dev_err(cs->dev, "%s: out of memory ", __func__); - return; - } - if (capi_cmsg2message(&iif->hcmsg, - __skb_put(skb, capi_disconnect_b3_ind_baselen))) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->hcmsg); - capi_ctr_handle_message(&iif->ctr, ap->id, skb); -} - -/** - * gigaset_isdn_connd() - signal d channel connect - * @bcs: b channel descriptor structure. - * - * called by main module at tasklet level to notify the ll that the d channel - * connection has been established. - */ -void gigaset_isdn_connd(struct bc_state *bcs) -{ - struct cardstate *cs = bcs->cs; - struct gigaset_capi_ctr *iif = cs->iif; - struct gigaset_capi_appl *ap; - struct sk_buff *skb; - unsigned int msgsize; - unsigned long flags; - - spin_lock_irqsave(&bcs->aplock, flags); - ap = bcs->ap; - if (!ap) { - spin_unlock_irqrestore(&bcs->aplock, flags); - gig_dbg(debug_cmd, "%s: application gone", __func__); - return; - } - if (bcs->apconnstate == apconn_none) { - spin_unlock_irqrestore(&bcs->aplock, flags); - dev_warn(cs->dev, "%s: application %u not connected ", - __func__, ap->id); - return; - } - spin_unlock_irqrestore(&bcs->aplock, flags); - while (ap->bcnext) { - /* this should never happen */ - dev_warn(cs->dev, "%s: dropping extra application %u ", - __func__, ap->bcnext->id); - send_disconnect_ind(bcs, ap->bcnext, - capicallgiventootherapplication); - ap->bcnext = ap->bcnext->bcnext; - } - - /* prepare connect_active_ind message - * note: llc not supported by device - */ - capi_cmsg_header(&iif->hcmsg, ap->id, capi_connect_active, capi_ind, - ap->nextmessagenumber++, - iif->ctr.cnr | ((bcs->channel + 1) << 8)); - - /* minimum size, all structs empty */ - msgsize = capi_connect_active_ind_baselen; - - /* todo: set parameter: connected number - * (requires ev-layer state machine extension to collect - * zcon device reply) - */ - - /* build and emit connect_active_ind message */ - skb = alloc_skb(msgsize, gfp_atomic); - if (!skb) { - dev_err(cs->dev, "%s: out of memory ", __func__); - return; - } - if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->hcmsg); - capi_ctr_handle_message(&iif->ctr, ap->id, skb); -} - -/** - * gigaset_isdn_hupd() - signal d channel hangup - * @bcs: b channel descriptor structure. - * - * called by main module at tasklet level to notify the ll that the d channel - * connection has been shut down. - */ -void gigaset_isdn_hupd(struct bc_state *bcs) -{ - struct gigaset_capi_appl *ap; - unsigned long flags; - - /* - * todo: pass on reason code reported by device - * (requires ev-layer state machine extension to collect - * zcau device reply) - */ - spin_lock_irqsave(&bcs->aplock, flags); - while (bcs->ap != null) { - ap = bcs->ap; - bcs->ap = ap->bcnext; - spin_unlock_irqrestore(&bcs->aplock, flags); - send_disconnect_b3_ind(bcs, ap); - send_disconnect_ind(bcs, ap, 0); - spin_lock_irqsave(&bcs->aplock, flags); - } - bcs->apconnstate = apconn_none; - spin_unlock_irqrestore(&bcs->aplock, flags); -} - -/** - * gigaset_isdn_connb() - signal b channel connect - * @bcs: b channel descriptor structure. - * - * called by main module at tasklet level to notify the ll that the b channel - * connection has been established. - */ -void gigaset_isdn_connb(struct bc_state *bcs) -{ - struct cardstate *cs = bcs->cs; - struct gigaset_capi_ctr *iif = cs->iif; - struct gigaset_capi_appl *ap; - struct sk_buff *skb; - unsigned long flags; - unsigned int msgsize; - u8 command; - - spin_lock_irqsave(&bcs->aplock, flags); - ap = bcs->ap; - if (!ap) { - spin_unlock_irqrestore(&bcs->aplock, flags); - gig_dbg(debug_cmd, "%s: application gone", __func__); - return; - } - if (!bcs->apconnstate) { - spin_unlock_irqrestore(&bcs->aplock, flags); - dev_warn(cs->dev, "%s: application %u not connected ", - __func__, ap->id); - return; - } - - /* - * emit connect_b3_active_ind if we already got connect_b3_req; - * otherwise we have to emit connect_b3_ind first, and follow up with - * connect_b3_active_ind in reply to connect_b3_resp - * parameters in both cases always: ncci = 1, ncpi empty - */ - if (bcs->apconnstate >= apconn_active) { - command = capi_connect_b3_active; - msgsize = capi_connect_b3_active_ind_baselen; - } else { - command = capi_connect_b3; - msgsize = capi_connect_b3_ind_baselen; - } - bcs->apconnstate = apconn_active; - - spin_unlock_irqrestore(&bcs->aplock, flags); - - while (ap->bcnext) { - /* this should never happen */ - dev_warn(cs->dev, "%s: dropping extra application %u ", - __func__, ap->bcnext->id); - send_disconnect_ind(bcs, ap->bcnext, - capicallgiventootherapplication); - ap->bcnext = ap->bcnext->bcnext; - } - - capi_cmsg_header(&iif->hcmsg, ap->id, command, capi_ind, - ap->nextmessagenumber++, - iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16)); - skb = alloc_skb(msgsize, gfp_atomic); - if (!skb) { - dev_err(cs->dev, "%s: out of memory ", __func__); - return; - } - if (capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize))) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->hcmsg); - capi_ctr_handle_message(&iif->ctr, ap->id, skb); -} - -/** - * gigaset_isdn_hupb() - signal b channel hangup - * @bcs: b channel descriptor structure. - * - * called by main module to notify the ll that the b channel connection has - * been shut down. - */ -void gigaset_isdn_hupb(struct bc_state *bcs) -{ - struct gigaset_capi_appl *ap = bcs->ap; - - /* todo: assure order of disconnect_b3_ind and disconnect_ind ? */ - - if (!ap) { - gig_dbg(debug_cmd, "%s: application gone", __func__); - return; - } - - send_disconnect_b3_ind(bcs, ap); -} - -/** - * gigaset_isdn_start() - signal device availability - * @cs: device descriptor structure. - * - * called by main module to notify the ll that the device is available for - * use. - */ -void gigaset_isdn_start(struct cardstate *cs) -{ - struct gigaset_capi_ctr *iif = cs->iif; - - /* fill profile data: manufacturer name */ - strcpy(iif->ctr.manu, "siemens"); - /* capi and device version */ - iif->ctr.version.majorversion = 2; /* capi 2.0 */ - iif->ctr.version.minorversion = 0; - /* todo: check/assert cs->gotfwver? */ - iif->ctr.version.majormanuversion = cs->fwver[0]; - iif->ctr.version.minormanuversion = cs->fwver[1]; - /* number of b channels supported */ - iif->ctr.profile.nbchannel = cs->channels; - /* global options: internal controller, supplementary services */ - iif->ctr.profile.goptions = 0x11; - /* b1 protocols: 64 kbit/s hdlc or transparent */ - iif->ctr.profile.support1 = 0x03; - /* b2 protocols: transparent only */ - /* todo: x.75 slp ? */ - iif->ctr.profile.support2 = 0x02; - /* b3 protocols: transparent only */ - iif->ctr.profile.support3 = 0x01; - /* no serial number */ - strcpy(iif->ctr.serial, "0"); - capi_ctr_ready(&iif->ctr); -} - -/** - * gigaset_isdn_stop() - signal device unavailability - * @cs: device descriptor structure. - * - * called by main module to notify the ll that the device is no longer - * available for use. - */ -void gigaset_isdn_stop(struct cardstate *cs) -{ - struct gigaset_capi_ctr *iif = cs->iif; - capi_ctr_down(&iif->ctr); -} - -/* - * kernel capi callback methods - * ============================ - */ - -/* - * register capi application - */ -static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl, - capi_register_params *rp) -{ - struct gigaset_capi_ctr *iif - = container_of(ctr, struct gigaset_capi_ctr, ctr); - struct cardstate *cs = ctr->driverdata; - struct gigaset_capi_appl *ap; - - gig_dbg(debug_cmd, "%s [%u] l3cnt=%u blkcnt=%u blklen=%u", - __func__, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen); - - list_for_each_entry(ap, &iif->appls, ctrlist) - if (ap->id == appl) { - dev_notice(cs->dev, - "application %u already registered ", appl); - return; - } - - ap = kzalloc(sizeof(*ap), gfp_kernel); - if (!ap) { - dev_err(cs->dev, "%s: out of memory ", __func__); - return; - } - ap->id = appl; - ap->rp = *rp; - - list_add(&ap->ctrlist, &iif->appls); - dev_info(cs->dev, "application %u registered ", ap->id); -} - -/* - * remove capi application from channel - * helper function to keep indentation levels down and stay in 80 columns - */ - -static inline void remove_appl_from_channel(struct bc_state *bcs, - struct gigaset_capi_appl *ap) -{ - struct cardstate *cs = bcs->cs; - struct gigaset_capi_appl *bcap; - unsigned long flags; - int prevconnstate; - - spin_lock_irqsave(&bcs->aplock, flags); - bcap = bcs->ap; - if (bcap == null) { - spin_unlock_irqrestore(&bcs->aplock, flags); - return; - } - - /* check first application on channel */ - if (bcap == ap) { - bcs->ap = ap->bcnext; - if (bcs->ap != null) { - spin_unlock_irqrestore(&bcs->aplock, flags); - return; - } - - /* none left, clear channel state */ - prevconnstate = bcs->apconnstate; - bcs->apconnstate = apconn_none; - spin_unlock_irqrestore(&bcs->aplock, flags); - - if (prevconnstate == apconn_active) { - dev_notice(cs->dev, "%s: hanging up channel %u ", - __func__, bcs->channel); - gigaset_add_event(cs, &bcs->at_state, - ev_hup, null, 0, null); - gigaset_schedule_event(cs); - } - return; - } - - /* check remaining list */ - do { - if (bcap->bcnext == ap) { - bcap->bcnext = bcap->bcnext->bcnext; - spin_unlock_irqrestore(&bcs->aplock, flags); - return; - } - bcap = bcap->bcnext; - } while (bcap != null); - spin_unlock_irqrestore(&bcs->aplock, flags); -} - -/* - * release capi application - */ -static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl) -{ - struct gigaset_capi_ctr *iif - = container_of(ctr, struct gigaset_capi_ctr, ctr); - struct cardstate *cs = iif->ctr.driverdata; - struct gigaset_capi_appl *ap, *tmp; - unsigned ch; - - gig_dbg(debug_cmd, "%s [%u]", __func__, appl); - - list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist) - if (ap->id == appl) { - /* remove from any channels */ - for (ch = 0; ch < cs->channels; ch++) - remove_appl_from_channel(&cs->bcs[ch], ap); - - /* remove from registration list */ - list_del(&ap->ctrlist); - kfree(ap); - dev_info(cs->dev, "application %u released ", appl); - } -} - -/* - * ===================================================================== - * outgoing capi message handler - * ===================================================================== - */ - -/* - * helper function: emit reply message with given info value - */ -static void send_conf(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb, - u16 info) -{ - struct cardstate *cs = iif->ctr.driverdata; - - /* - * _conf replies always only have ncci and info parameters - * so they'll fit into the _req message skb - */ - capi_cmsg_answer(&iif->acmsg); - iif->acmsg.info = info; - if (capi_cmsg2message(&iif->acmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - __skb_trim(skb, capi_stdconf_len); - dump_cmsg(debug_cmd, __func__, &iif->acmsg); - capi_ctr_handle_message(&iif->ctr, ap->id, skb); -} - -/* - * process facility_req message - */ -static void do_facility_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - _cmsg *cmsg = &iif->acmsg; - struct sk_buff *cskb; - u8 *pparam; - unsigned int msgsize = capi_facility_conf_baselen; - u16 function, info; - static u8 confparam[10]; /* max. 9 octets + length byte */ - - /* decode message */ - if (capi_message2cmsg(cmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - - /* - * facility request parameter is not decoded by capi_message2cmsg() - * encoding depends on facility selector - */ - switch (cmsg->facilityselector) { - case capi_facility_dtmf: /* todo */ - info = capifacilitynotsupported; - confparam[0] = 2; /* length */ - /* dtmf information: unknown dtmf request */ - capimsg_setu16(confparam, 1, 2); - break; - - case capi_facility_v42bis: /* not supported */ - info = capifacilitynotsupported; - confparam[0] = 2; /* length */ - /* v.42 bis information: not available */ - capimsg_setu16(confparam, 1, 1); - break; - - case capi_facility_suppsvc: - /* decode function parameter */ - pparam = cmsg->facilityrequestparameter; - if (pparam == null || pparam[0] < 2) { - dev_notice(cs->dev, "%s: %s missing ", "facility_req", - "facility request parameter"); - send_conf(iif, ap, skb, capiillmessageparmcoding); - return; - } - function = capimsg_u16(pparam, 1); - switch (function) { - case capi_suppsvc_getsupported: - info = capisuccess; - /* supplementary service specific parameter */ - confparam[3] = 6; /* length */ - /* supplementary services info: success */ - capimsg_setu16(confparam, 4, capisuccess); - /* supported services: none */ - capimsg_setu32(confparam, 6, 0); - break; - case capi_suppsvc_listen: - if (pparam[0] < 7 || pparam[3] < 4) { - dev_notice(cs->dev, "%s: %s missing ", - "facility_req", "notification mask"); - send_conf(iif, ap, skb, - capiillmessageparmcoding); - return; - } - if (capimsg_u32(pparam, 4) != 0) { - dev_notice(cs->dev, - "%s: unsupported supplementary service notification mask 0x%x ", - "facility_req", capimsg_u32(pparam, 4)); - info = capifacilityspecificfunctionnotsupported; - confparam[3] = 2; /* length */ - capimsg_setu16(confparam, 4, - capisupplementaryservicenotsupported); - break; - } - info = capisuccess; - confparam[3] = 2; /* length */ - capimsg_setu16(confparam, 4, capisuccess); - break; - - /* todo: add supported services */ - - default: - dev_notice(cs->dev, - "%s: unsupported supplementary service function 0x%04x ", - "facility_req", function); - info = capifacilityspecificfunctionnotsupported; - /* supplementary service specific parameter */ - confparam[3] = 2; /* length */ - /* supplementary services info: not supported */ - capimsg_setu16(confparam, 4, - capisupplementaryservicenotsupported); - } - - /* facility confirmation parameter */ - confparam[0] = confparam[3] + 3; /* total length */ - /* function: copy from _req message */ - capimsg_setu16(confparam, 1, function); - /* supplementary service specific parameter already set above */ - break; - - case capi_facility_wakeup: /* todo */ - info = capifacilitynotsupported; - confparam[0] = 2; /* length */ - /* number of accepted awake request parameters: 0 */ - capimsg_setu16(confparam, 1, 0); - break; - - default: - info = capifacilitynotsupported; - confparam[0] = 0; /* empty struct */ - } - - /* send facility_conf with given info and confirmation parameter */ - dev_kfree_skb_any(skb); - capi_cmsg_answer(cmsg); - cmsg->info = info; - cmsg->facilityconfirmationparameter = confparam; - msgsize += confparam[0]; /* length */ - cskb = alloc_skb(msgsize, gfp_atomic); - if (!cskb) { - dev_err(cs->dev, "%s: out of memory ", __func__); - return; - } - if (capi_cmsg2message(cmsg, __skb_put(cskb, msgsize))) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(cskb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - capi_ctr_handle_message(&iif->ctr, ap->id, cskb); -} - - -/* - * process listen_req message - * just store the masks in the application data structure - */ -static void do_listen_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - - /* decode message */ - if (capi_message2cmsg(&iif->acmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->acmsg); - - /* store listening parameters */ - ap->listeninfomask = iif->acmsg.infomask; - ap->listencipmask = iif->acmsg.cipmask; - send_conf(iif, ap, skb, capisuccess); -} - -/* - * process alert_req message - * nothing to do, gigaset always alerts anyway - */ -static void do_alert_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - - /* decode message */ - if (capi_message2cmsg(&iif->acmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->acmsg); - send_conf(iif, ap, skb, capialertalreadysent); -} - -/* - * process connect_req message - * allocate a b channel, prepare dial commands, queue a dial event, - * emit connect_conf reply - */ -static void do_connect_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - _cmsg *cmsg = &iif->acmsg; - struct bc_state *bcs; - char **commands; - char *s; - u8 *pp; - unsigned long flags; - int i, l, lbc, lhlc; - u16 info; - - /* decode message */ - if (capi_message2cmsg(cmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - - /* get free b channel & construct plci */ - bcs = gigaset_get_free_channel(cs); - if (!bcs) { - dev_notice(cs->dev, "%s: no b channel available ", - "connect_req"); - send_conf(iif, ap, skb, capinoplciavailable); - return; - } - spin_lock_irqsave(&bcs->aplock, flags); - if (bcs->ap != null || bcs->apconnstate != apconn_none) - dev_warn(cs->dev, "%s: channel not properly cleared (%p/%d) ", - __func__, bcs->ap, bcs->apconnstate); - ap->bcnext = null; - bcs->ap = ap; - bcs->apconnstate = apconn_setup; - spin_unlock_irqrestore(&bcs->aplock, flags); - - bcs->rx_bufsize = ap->rp.datablklen; - dev_kfree_skb(bcs->rx_skb); - gigaset_new_rx_skb(bcs); - cmsg->adr.adrplci |= (bcs->channel + 1) << 8; - - /* build command table */ - commands = kcalloc(at_num, sizeof(*commands), gfp_kernel); - if (!commands) - goto oom; - - /* encode parameter: called party number */ - pp = cmsg->calledpartynumber; - if (pp == null || *pp == 0) { - dev_notice(cs->dev, "%s: %s missing ", - "connect_req", "called party number"); - info = capiillmessageparmcoding; - goto error; - } - l = *pp++; - /* check type of number/numbering plan byte */ - switch (*pp) { - case 0x80: /* unknown type / unknown numbering plan */ - case 0x81: /* unknown type / isdn/telephony numbering plan */ - break; - default: /* others: warn about potential misinterpretation */ - dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported ", - "connect_req", "called party number", *pp); - } - pp++; - l--; - /* translate "**" internal call prefix to ctp value */ - if (l >= 2 && pp[0] == '*' && pp[1] == '*') { - s = "^sctp=0 "; - pp += 2; - l -= 2; - } else { - s = "^sctp=1 "; - } - commands[at_type] = kstrdup(s, gfp_kernel); - if (!commands[at_type]) - goto oom; - commands[at_dial] = kmalloc(l + 3, gfp_kernel); - if (!commands[at_dial]) - goto oom; - snprintf(commands[at_dial], l + 3, "d%.*s ", l, pp); - - /* encode parameter: calling party number */ - pp = cmsg->callingpartynumber; - if (pp != null && *pp > 0) { - l = *pp++; - - /* check type of number/numbering plan byte */ - /* todo: allow for/handle ext=1? */ - switch (*pp) { - case 0x00: /* unknown type / unknown numbering plan */ - case 0x01: /* unknown type / isdn/telephony num. plan */ - break; - default: - dev_notice(cs->dev, - "%s: %s type/plan 0x%02x unsupported ", - "connect_req", "calling party number", *pp); - } - pp++; - l--; - - /* check presentation indicator */ - if (!l) { - dev_notice(cs->dev, "%s: %s ie truncated ", - "connect_req", "calling party number"); - info = capiillmessageparmcoding; - goto error; - } - switch (*pp & 0xfc) { /* ignore screening indicator */ - case 0x80: /* presentation allowed */ - s = "^sclip=1 "; - break; - case 0xa0: /* presentation restricted */ - s = "^sclip=0 "; - break; - default: - dev_notice(cs->dev, "%s: invalid %s 0x%02x ", - "connect_req", - "presentation/screening indicator", - *pp); - s = "^sclip=1 "; - } - commands[at_clip] = kstrdup(s, gfp_kernel); - if (!commands[at_clip]) - goto oom; - pp++; - l--; - - if (l) { - /* number */ - commands[at_msn] = kmalloc(l + 8, gfp_kernel); - if (!commands[at_msn]) - goto oom; - snprintf(commands[at_msn], l + 8, "^smsn=%*s ", l, pp); - } - } - - /* check parameter: cip value */ - if (cmsg->cipvalue >= array_size(cip2bchlc) || - (cmsg->cipvalue > 0 && cip2bchlc[cmsg->cipvalue].bc == null)) { - dev_notice(cs->dev, "%s: unknown cip value %d ", - "connect_req", cmsg->cipvalue); - info = capicipvalueunknown; - goto error; - } - - /* - * check/encode parameters: bc & hlc - * must be encoded together as device doesn't accept hlc separately - * explicit parameters override values derived from cip - */ - - /* determine lengths */ - if (cmsg->bc && cmsg->bc[0]) /* bc specified explicitly */ - lbc = 2 * cmsg->bc[0]; - else if (cip2bchlc[cmsg->cipvalue].bc) /* bc derived from cip */ - lbc = strlen(cip2bchlc[cmsg->cipvalue].bc); - else /* no bc */ - lbc = 0; - if (cmsg->hlc && cmsg->hlc[0]) /* hlc specified explicitly */ - lhlc = 2 * cmsg->hlc[0]; - else if (cip2bchlc[cmsg->cipvalue].hlc) /* hlc derived from cip */ - lhlc = strlen(cip2bchlc[cmsg->cipvalue].hlc); - else /* no hlc */ - lhlc = 0; - - if (lbc) { - /* have bc: allocate and assemble command string */ - l = lbc + 7; /* "^sbc=" + value + " " + null byte */ - if (lhlc) - l += lhlc + 7; /* ";^shlc=" + value */ - commands[at_bc] = kmalloc(l, gfp_kernel); - if (!commands[at_bc]) - goto oom; - strcpy(commands[at_bc], "^sbc="); - if (cmsg->bc && cmsg->bc[0]) /* bc specified explicitly */ - decode_ie(cmsg->bc, commands[at_bc] + 5); - else /* bc derived from cip */ - strcpy(commands[at_bc] + 5, - cip2bchlc[cmsg->cipvalue].bc); - if (lhlc) { - strcpy(commands[at_bc] + lbc + 5, ";^shlc="); - if (cmsg->hlc && cmsg->hlc[0]) - /* hlc specified explicitly */ - decode_ie(cmsg->hlc, - commands[at_bc] + lbc + 12); - else /* hlc derived from cip */ - strcpy(commands[at_bc] + lbc + 12, - cip2bchlc[cmsg->cipvalue].hlc); - } - strcpy(commands[at_bc] + l - 2, " "); - } else { - /* no bc */ - if (lhlc) { - dev_notice(cs->dev, "%s: cannot set hlc without bc ", - "connect_req"); - info = capiillmessageparmcoding; /* ? */ - goto error; - } - } - - /* check/encode parameter: b protocol */ - if (cmsg->bprotocol == capi_default) { - bcs->proto2 = l2_hdlc; - dev_warn(cs->dev, - "b2 protocol x.75 slp unsupported, using transparent "); - } else { - switch (cmsg->b1protocol) { - case 0: - bcs->proto2 = l2_hdlc; - break; - case 1: - bcs->proto2 = l2_voice; - break; - default: - dev_warn(cs->dev, - "b1 protocol %u unsupported, using transparent ", - cmsg->b1protocol); - bcs->proto2 = l2_voice; - } - if (cmsg->b2protocol != 1) - dev_warn(cs->dev, - "b2 protocol %u unsupported, using transparent ", - cmsg->b2protocol); - if (cmsg->b3protocol != 0) - dev_warn(cs->dev, - "b3 protocol %u unsupported, using transparent ", - cmsg->b3protocol); - ignore_cstruct_param(cs, cmsg->b1configuration, - "connect_req", "b1 configuration"); - ignore_cstruct_param(cs, cmsg->b2configuration, - "connect_req", "b2 configuration"); - ignore_cstruct_param(cs, cmsg->b3configuration, - "connect_req", "b3 configuration"); - } - commands[at_proto] = kmalloc(9, gfp_kernel); - if (!commands[at_proto]) - goto oom; - snprintf(commands[at_proto], 9, "^sbpr=%u ", bcs->proto2); - - /* todo: check/encode remaining parameters */ - ignore_cstruct_param(cs, cmsg->calledpartysubaddress, - "connect_req", "called pty subaddr"); - ignore_cstruct_param(cs, cmsg->callingpartysubaddress, - "connect_req", "calling pty subaddr"); - ignore_cstruct_param(cs, cmsg->llc, - "connect_req", "llc"); - if (cmsg->additionalinfo != capi_default) { - ignore_cstruct_param(cs, cmsg->bchannelinformation, - "connect_req", "b channel information"); - ignore_cstruct_param(cs, cmsg->keypadfacility, - "connect_req", "keypad facility"); - ignore_cstruct_param(cs, cmsg->useruserdata, - "connect_req", "user-user data"); - ignore_cstruct_param(cs, cmsg->facilitydataarray, - "connect_req", "facility data array"); - } - - /* encode parameter: b channel to use */ - commands[at_iso] = kmalloc(9, gfp_kernel); - if (!commands[at_iso]) - goto oom; - snprintf(commands[at_iso], 9, "^siso=%u ", - (unsigned) bcs->channel + 1); - - /* queue & schedule ev_dial event */ - if (!gigaset_add_event(cs, &bcs->at_state, ev_dial, commands, - bcs->at_state.seq_index, null)) { - info = capi_msgosresourceerr; - goto error; - } - gigaset_schedule_event(cs); - send_conf(iif, ap, skb, capisuccess); - return; - -oom: - dev_err(cs->dev, "%s: out of memory ", __func__); - info = capi_msgosresourceerr; -error: - if (commands) - for (i = 0; i < at_num; i++) - kfree(commands[i]); - kfree(commands); - gigaset_free_channel(bcs); - send_conf(iif, ap, skb, info); -} - -/* - * process connect_resp message - * checks protocol parameters and queues an accept or hup event - */ -static void do_connect_resp(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - _cmsg *cmsg = &iif->acmsg; - struct bc_state *bcs; - struct gigaset_capi_appl *oap; - unsigned long flags; - int channel; - - /* decode message */ - if (capi_message2cmsg(cmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - dev_kfree_skb_any(skb); - - /* extract and check channel number from plci */ - channel = (cmsg->adr.adrplci >> 8) & 0xff; - if (!channel || channel > cs->channels) { - dev_notice(cs->dev, "%s: invalid %s 0x%02x ", - "connect_resp", "plci", cmsg->adr.adrplci); - return; - } - bcs = cs->bcs + channel - 1; - - switch (cmsg->reject) { - case 0: /* accept */ - /* drop all competing applications, keep only this one */ - spin_lock_irqsave(&bcs->aplock, flags); - while (bcs->ap != null) { - oap = bcs->ap; - bcs->ap = oap->bcnext; - if (oap != ap) { - spin_unlock_irqrestore(&bcs->aplock, flags); - send_disconnect_ind(bcs, oap, - capicallgiventootherapplication); - spin_lock_irqsave(&bcs->aplock, flags); - } - } - ap->bcnext = null; - bcs->ap = ap; - spin_unlock_irqrestore(&bcs->aplock, flags); - - bcs->rx_bufsize = ap->rp.datablklen; - dev_kfree_skb(bcs->rx_skb); - gigaset_new_rx_skb(bcs); - bcs->chstate |= chs_notify_ll; - - /* check/encode b channel protocol */ - if (cmsg->bprotocol == capi_default) { - bcs->proto2 = l2_hdlc; - dev_warn(cs->dev, - "b2 protocol x.75 slp unsupported, using transparent "); - } else { - switch (cmsg->b1protocol) { - case 0: - bcs->proto2 = l2_hdlc; - break; - case 1: - bcs->proto2 = l2_voice; - break; - default: - dev_warn(cs->dev, - "b1 protocol %u unsupported, using transparent ", - cmsg->b1protocol); - bcs->proto2 = l2_voice; - } - if (cmsg->b2protocol != 1) - dev_warn(cs->dev, - "b2 protocol %u unsupported, using transparent ", - cmsg->b2protocol); - if (cmsg->b3protocol != 0) - dev_warn(cs->dev, - "b3 protocol %u unsupported, using transparent ", - cmsg->b3protocol); - ignore_cstruct_param(cs, cmsg->b1configuration, - "connect_resp", "b1 configuration"); - ignore_cstruct_param(cs, cmsg->b2configuration, - "connect_resp", "b2 configuration"); - ignore_cstruct_param(cs, cmsg->b3configuration, - "connect_resp", "b3 configuration"); - } - - /* todo: check/encode remaining parameters */ - ignore_cstruct_param(cs, cmsg->connectednumber, - "connect_resp", "connected number"); - ignore_cstruct_param(cs, cmsg->connectedsubaddress, - "connect_resp", "connected subaddress"); - ignore_cstruct_param(cs, cmsg->llc, - "connect_resp", "llc"); - if (cmsg->additionalinfo != capi_default) { - ignore_cstruct_param(cs, cmsg->bchannelinformation, - "connect_resp", "bchannel information"); - ignore_cstruct_param(cs, cmsg->keypadfacility, - "connect_resp", "keypad facility"); - ignore_cstruct_param(cs, cmsg->useruserdata, - "connect_resp", "user-user data"); - ignore_cstruct_param(cs, cmsg->facilitydataarray, - "connect_resp", "facility data array"); - } - - /* accept call */ - if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state, - ev_accept, null, 0, null)) - return; - gigaset_schedule_event(cs); - return; - - case 1: /* ignore */ - /* send disconnect_ind to this application */ - send_disconnect_ind(bcs, ap, 0); - - /* remove it from the list of listening apps */ - spin_lock_irqsave(&bcs->aplock, flags); - if (bcs->ap == ap) { - bcs->ap = ap->bcnext; - if (bcs->ap == null) { - /* last one: stop ev-layer hupd notifications */ - bcs->apconnstate = apconn_none; - bcs->chstate &= ~chs_notify_ll; - } - spin_unlock_irqrestore(&bcs->aplock, flags); - return; - } - for (oap = bcs->ap; oap != null; oap = oap->bcnext) { - if (oap->bcnext == ap) { - oap->bcnext = oap->bcnext->bcnext; - spin_unlock_irqrestore(&bcs->aplock, flags); - return; - } - } - spin_unlock_irqrestore(&bcs->aplock, flags); - dev_err(cs->dev, "%s: application %u not found ", - __func__, ap->id); - return; - - default: /* reject */ - /* drop all competing applications, keep only this one */ - spin_lock_irqsave(&bcs->aplock, flags); - while (bcs->ap != null) { - oap = bcs->ap; - bcs->ap = oap->bcnext; - if (oap != ap) { - spin_unlock_irqrestore(&bcs->aplock, flags); - send_disconnect_ind(bcs, oap, - capicallgiventootherapplication); - spin_lock_irqsave(&bcs->aplock, flags); - } - } - ap->bcnext = null; - bcs->ap = ap; - spin_unlock_irqrestore(&bcs->aplock, flags); - - /* reject call - will trigger disconnect_ind for this app */ - dev_info(cs->dev, "%s: reject=%x ", - "connect_resp", cmsg->reject); - if (!gigaset_add_event(cs, &cs->bcs[channel - 1].at_state, - ev_hup, null, 0, null)) - return; - gigaset_schedule_event(cs); - return; - } -} - -/* - * process connect_b3_req message - * build ncci and emit connect_b3_conf reply - */ -static void do_connect_b3_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - _cmsg *cmsg = &iif->acmsg; - struct bc_state *bcs; - int channel; - - /* decode message */ - if (capi_message2cmsg(cmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - - /* extract and check channel number from plci */ - channel = (cmsg->adr.adrplci >> 8) & 0xff; - if (!channel || channel > cs->channels) { - dev_notice(cs->dev, "%s: invalid %s 0x%02x ", - "connect_b3_req", "plci", cmsg->adr.adrplci); - send_conf(iif, ap, skb, capiillcontrplcincci); - return; - } - bcs = &cs->bcs[channel - 1]; - - /* mark logical connection active */ - bcs->apconnstate = apconn_active; - - /* build ncci: always 1 (one b3 connection only) */ - cmsg->adr.adrncci |= 1 << 16; - - /* ncpi parameter: not applicable for b3 transparent */ - ignore_cstruct_param(cs, cmsg->ncpi, "connect_b3_req", "ncpi"); - send_conf(iif, ap, skb, - (cmsg->ncpi && cmsg->ncpi[0]) ? - capincpinotsupportedbyprotocol : capisuccess); -} - -/* - * process connect_b3_resp message - * depending on the reject parameter, either emit connect_b3_active_ind - * or queue ev_hup and emit disconnect_b3_ind. - * the emitted message is always shorter than the received one, - * allowing to reuse the skb. - */ -static void do_connect_b3_resp(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - _cmsg *cmsg = &iif->acmsg; - struct bc_state *bcs; - int channel; - unsigned int msgsize; - u8 command; - - /* decode message */ - if (capi_message2cmsg(cmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - - /* extract and check channel number and ncci */ - channel = (cmsg->adr.adrncci >> 8) & 0xff; - if (!channel || channel > cs->channels || - ((cmsg->adr.adrncci >> 16) & 0xffff) != 1) { - dev_notice(cs->dev, "%s: invalid %s 0x%02x ", - "connect_b3_resp", "ncci", cmsg->adr.adrncci); - dev_kfree_skb_any(skb); - return; - } - bcs = &cs->bcs[channel - 1]; - - if (cmsg->reject) { - /* reject: clear b3 connect received flag */ - bcs->apconnstate = apconn_setup; - - /* trigger hangup, causing eventual disconnect_ind */ - if (!gigaset_add_event(cs, &bcs->at_state, - ev_hup, null, 0, null)) { - dev_kfree_skb_any(skb); - return; - } - gigaset_schedule_event(cs); - - /* emit disconnect_b3_ind */ - command = capi_disconnect_b3; - msgsize = capi_disconnect_b3_ind_baselen; - } else { - /* - * accept: emit connect_b3_active_ind immediately, as - * we only send connect_b3_ind if the b channel is up - */ - command = capi_connect_b3_active; - msgsize = capi_connect_b3_active_ind_baselen; - } - capi_cmsg_header(cmsg, ap->id, command, capi_ind, - ap->nextmessagenumber++, cmsg->adr.adrncci); - __skb_trim(skb, msgsize); - if (capi_cmsg2message(cmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - capi_ctr_handle_message(&iif->ctr, ap->id, skb); -} - -/* - * process disconnect_req message - * schedule ev_hup and emit disconnect_b3_ind if necessary, - * emit disconnect_conf reply - */ -static void do_disconnect_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - _cmsg *cmsg = &iif->acmsg; - struct bc_state *bcs; - _cmsg *b3cmsg; - struct sk_buff *b3skb; - int channel; - - /* decode message */ - if (capi_message2cmsg(cmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - - /* extract and check channel number from plci */ - channel = (cmsg->adr.adrplci >> 8) & 0xff; - if (!channel || channel > cs->channels) { - dev_notice(cs->dev, "%s: invalid %s 0x%02x ", - "disconnect_req", "plci", cmsg->adr.adrplci); - send_conf(iif, ap, skb, capiillcontrplcincci); - return; - } - bcs = cs->bcs + channel - 1; - - /* todo: process parameter: additional info */ - if (cmsg->additionalinfo != capi_default) { - ignore_cstruct_param(cs, cmsg->bchannelinformation, - "disconnect_req", "b channel information"); - ignore_cstruct_param(cs, cmsg->keypadfacility, - "disconnect_req", "keypad facility"); - ignore_cstruct_param(cs, cmsg->useruserdata, - "disconnect_req", "user-user data"); - ignore_cstruct_param(cs, cmsg->facilitydataarray, - "disconnect_req", "facility data array"); - } - - /* skip if disconnect_ind already sent */ - if (!bcs->apconnstate) - return; - - /* check for active logical connection */ - if (bcs->apconnstate >= apconn_active) { - /* clear it */ - bcs->apconnstate = apconn_setup; - - /* - * emit disconnect_b3_ind with cause 0x3301 - * use separate cmsg structure, as the content of iif->acmsg - * is still needed for creating the _conf message - */ - b3cmsg = kmalloc(sizeof(*b3cmsg), gfp_kernel); - if (!b3cmsg) { - dev_err(cs->dev, "%s: out of memory ", __func__); - send_conf(iif, ap, skb, capi_msgosresourceerr); - return; - } - capi_cmsg_header(b3cmsg, ap->id, capi_disconnect_b3, capi_ind, - ap->nextmessagenumber++, - cmsg->adr.adrplci | (1 << 16)); - b3cmsg->reason_b3 = capiprotocolerrorlayer1; - b3skb = alloc_skb(capi_disconnect_b3_ind_baselen, gfp_kernel); - if (b3skb == null) { - dev_err(cs->dev, "%s: out of memory ", __func__); - send_conf(iif, ap, skb, capi_msgosresourceerr); - kfree(b3cmsg); - return; - } - if (capi_cmsg2message(b3cmsg, - __skb_put(b3skb, capi_disconnect_b3_ind_baselen))) { - dev_err(cs->dev, "%s: message parser failure ", - __func__); - kfree(b3cmsg); - dev_kfree_skb_any(b3skb); - return; - } - dump_cmsg(debug_cmd, __func__, b3cmsg); - kfree(b3cmsg); - capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); - } - - /* trigger hangup, causing eventual disconnect_ind */ - if (!gigaset_add_event(cs, &bcs->at_state, ev_hup, null, 0, null)) { - send_conf(iif, ap, skb, capi_msgosresourceerr); - return; - } - gigaset_schedule_event(cs); - - /* emit reply */ - send_conf(iif, ap, skb, capisuccess); -} - -/* - * process disconnect_b3_req message - * schedule ev_hup and emit disconnect_b3_conf reply - */ -static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - _cmsg *cmsg = &iif->acmsg; - struct bc_state *bcs; - int channel; - - /* decode message */ - if (capi_message2cmsg(cmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, cmsg); - - /* extract and check channel number and ncci */ - channel = (cmsg->adr.adrncci >> 8) & 0xff; - if (!channel || channel > cs->channels || - ((cmsg->adr.adrncci >> 16) & 0xffff) != 1) { - dev_notice(cs->dev, "%s: invalid %s 0x%02x ", - "disconnect_b3_req", "ncci", cmsg->adr.adrncci); - send_conf(iif, ap, skb, capiillcontrplcincci); - return; - } - bcs = &cs->bcs[channel - 1]; - - /* reject if logical connection not active */ - if (bcs->apconnstate < apconn_active) { - send_conf(iif, ap, skb, - capimessagenotsupportedincurrentstate); - return; - } - - /* trigger hangup, causing eventual disconnect_b3_ind */ - if (!gigaset_add_event(cs, &bcs->at_state, ev_hup, null, 0, null)) { - send_conf(iif, ap, skb, capi_msgosresourceerr); - return; - } - gigaset_schedule_event(cs); - - /* ncpi parameter: not applicable for b3 transparent */ - ignore_cstruct_param(cs, cmsg->ncpi, - "disconnect_b3_req", "ncpi"); - send_conf(iif, ap, skb, - (cmsg->ncpi && cmsg->ncpi[0]) ? - capincpinotsupportedbyprotocol : capisuccess); -} - -/* - * process data_b3_req message - */ -static void do_data_b3_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - struct bc_state *bcs; - int channel = capimsg_plci_part(skb->data); - u16 ncci = capimsg_ncci_part(skb->data); - u16 msglen = capimsg_len(skb->data); - u16 datalen = capimsg_datalen(skb->data); - u16 flags = capimsg_flags(skb->data); - u16 msgid = capimsg_msgid(skb->data); - u16 handle = capimsg_handle_req(skb->data); - - /* frequent message, avoid _cmsg overhead */ - dump_rawmsg(debug_mcmd, __func__, skb->data); - - /* check parameters */ - if (channel == 0 || channel > cs->channels || ncci != 1) { - dev_notice(cs->dev, "%s: invalid %s 0x%02x ", - "data_b3_req", "ncci", capimsg_ncci(skb->data)); - send_conf(iif, ap, skb, capiillcontrplcincci); - return; - } - bcs = &cs->bcs[channel - 1]; - if (msglen != capi_data_b3_req_len && msglen != capi_data_b3_req_len64) - dev_notice(cs->dev, "%s: unexpected length %d ", - "data_b3_req", msglen); - if (msglen + datalen != skb->len) - dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d) ", - "data_b3_req", msglen, datalen, skb->len); - if (msglen + datalen > skb->len) { - /* message too short for announced data length */ - send_conf(iif, ap, skb, capiillmessageparmcoding); /* ? */ - return; - } - if (flags & capi_flags_reserved) { - dev_notice(cs->dev, "%s: reserved flags set (%x) ", - "data_b3_req", flags); - send_conf(iif, ap, skb, capiillmessageparmcoding); - return; - } - - /* reject if logical connection not active */ - if (bcs->apconnstate < apconn_active) { - send_conf(iif, ap, skb, capimessagenotsupportedincurrentstate); - return; - } - - /* pull capi message into link layer header */ - skb_reset_mac_header(skb); - skb->mac_len = msglen; - skb_pull(skb, msglen); - - /* pass to device-specific module */ - if (cs->ops->send_skb(bcs, skb) < 0) { - send_conf(iif, ap, skb, capi_msgosresourceerr); - return; - } - - /* - * data_b3_conf will be sent by gigaset_skb_sent() only if "delivery - * confirmation" bit is set; otherwise we have to send it now - */ - if (!(flags & capi_flags_delivery_confirmation)) - send_data_b3_conf(cs, &iif->ctr, ap->id, msgid, channel, handle, - flags ? capiflagsnotsupportedbyprotocol - : capi_noerror); -} - -/* - * process reset_b3_req message - * just always reply "not supported by current protocol" - */ -static void do_reset_b3_req(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - - /* decode message */ - if (capi_message2cmsg(&iif->acmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->acmsg); - send_conf(iif, ap, skb, - capiresetprocedurenotsupportedbycurrentprotocol); -} - -/* - * unsupported capi message handler - */ -static void do_unsupported(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - - /* decode message */ - if (capi_message2cmsg(&iif->acmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->acmsg); - send_conf(iif, ap, skb, capimessagenotsupportedincurrentstate); -} - -/* - * capi message handler: no-op - */ -static void do_nothing(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - struct cardstate *cs = iif->ctr.driverdata; - - /* decode message */ - if (capi_message2cmsg(&iif->acmsg, skb->data)) { - dev_err(cs->dev, "%s: message parser failure ", __func__); - dev_kfree_skb_any(skb); - return; - } - dump_cmsg(debug_cmd, __func__, &iif->acmsg); - dev_kfree_skb_any(skb); -} - -static void do_data_b3_resp(struct gigaset_capi_ctr *iif, - struct gigaset_capi_appl *ap, - struct sk_buff *skb) -{ - dump_rawmsg(debug_mcmd, __func__, skb->data); - dev_kfree_skb_any(skb); -} - -/* table of outgoing capi message handlers with lookup function */ -typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *, - struct gigaset_capi_appl *, - struct sk_buff *); - -static struct { - u16 cmd; - capi_send_handler_t handler; -} capi_send_handler_table[] = { - /* most frequent messages first for faster lookup */ - { capi_data_b3_req, do_data_b3_req }, - { capi_data_b3_resp, do_data_b3_resp }, - - { capi_alert_req, do_alert_req }, - { capi_connect_active_resp, do_nothing }, - { capi_connect_b3_active_resp, do_nothing }, - { capi_connect_b3_req, do_connect_b3_req }, - { capi_connect_b3_resp, do_connect_b3_resp }, - { capi_connect_b3_t90_active_resp, do_nothing }, - { capi_connect_req, do_connect_req }, - { capi_connect_resp, do_connect_resp }, - { capi_disconnect_b3_req, do_disconnect_b3_req }, - { capi_disconnect_b3_resp, do_nothing }, - { capi_disconnect_req, do_disconnect_req }, - { capi_disconnect_resp, do_nothing }, - { capi_facility_req, do_facility_req }, - { capi_facility_resp, do_nothing }, - { capi_listen_req, do_listen_req }, - { capi_select_b_protocol_req, do_unsupported }, - { capi_reset_b3_req, do_reset_b3_req }, - { capi_reset_b3_resp, do_nothing }, - - /* - * todo: support overlap sending (requires ev-layer state - * machine extension to generate additional atd commands) - */ - { capi_info_req, do_unsupported }, - { capi_info_resp, do_nothing }, - - /* - * todo: what's the proper response for these? - */ - { capi_manufacturer_req, do_nothing }, - { capi_manufacturer_resp, do_nothing }, -}; - -/* look up handler */ -static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd) -{ - size_t i; - - for (i = 0; i < array_size(capi_send_handler_table); i++) - if (capi_send_handler_table[i].cmd == cmd) - return capi_send_handler_table[i].handler; - return null; -} - - -/** - * gigaset_send_message() - accept a capi message from an application - * @ctr: controller descriptor structure. - * @skb: capi message. - * - * return value: capi error code - * note: capidrv (and probably others, too) only uses the return value to - * decide whether it has to free the skb (only if result != capi_noerror (0)) - */ -static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb) -{ - struct gigaset_capi_ctr *iif - = container_of(ctr, struct gigaset_capi_ctr, ctr); - struct cardstate *cs = ctr->driverdata; - struct gigaset_capi_appl *ap; - capi_send_handler_t handler; - - /* can only handle linear sk_buffs */ - if (skb_linearize(skb) < 0) { - dev_warn(cs->dev, "%s: skb_linearize failed ", __func__); - return capi_msgosresourceerr; - } - - /* retrieve application data structure */ - ap = get_appl(iif, capimsg_appid(skb->data)); - if (!ap) { - dev_notice(cs->dev, "%s: application %u not registered ", - __func__, capimsg_appid(skb->data)); - return capi_illappnr; - } - - /* look up command */ - handler = lookup_capi_send_handler(capimsg_cmd(skb->data)); - if (!handler) { - /* unknown/unsupported message type */ - if (printk_ratelimit()) - dev_notice(cs->dev, "%s: unsupported message %u ", - __func__, capimsg_cmd(skb->data)); - return capi_illcmdorsubcmdormsgtosmall; - } - - /* serialize */ - if (atomic_add_return(1, &iif->sendqlen) > 1) { - /* queue behind other messages */ - skb_queue_tail(&iif->sendqueue, skb); - return capi_noerror; - } - - /* process message */ - handler(iif, ap, skb); - - /* process other messages arrived in the meantime */ - while (atomic_sub_return(1, &iif->sendqlen) > 0) { - skb = skb_dequeue(&iif->sendqueue); - if (!skb) { - /* should never happen */ - dev_err(cs->dev, "%s: send queue empty ", __func__); - continue; - } - ap = get_appl(iif, capimsg_appid(skb->data)); - if (!ap) { - /* could that happen? */ - dev_warn(cs->dev, "%s: application %u vanished ", - __func__, capimsg_appid(skb->data)); - continue; - } - handler = lookup_capi_send_handler(capimsg_cmd(skb->data)); - if (!handler) { - /* should never happen */ - dev_err(cs->dev, "%s: handler %x vanished ", - __func__, capimsg_cmd(skb->data)); - continue; - } - handler(iif, ap, skb); - } - - return capi_noerror; -} - -/** - * gigaset_procinfo() - build single line description for controller - * @ctr: controller descriptor structure. - * - * return value: pointer to generated string (null terminated) - */ -static char *gigaset_procinfo(struct capi_ctr *ctr) -{ - return ctr->name; /* todo: more? */ -} - -static int gigaset_proc_show(struct seq_file *m, void *v) -{ - struct capi_ctr *ctr = m->private; - struct cardstate *cs = ctr->driverdata; - char *s; - int i; - - seq_printf(m, "%-16s %s ", "name", ctr->name); - seq_printf(m, "%-16s %s %s ", "dev", - dev_driver_string(cs->dev), dev_name(cs->dev)); - seq_printf(m, "%-16s %d ", "id", cs->myid); - if (cs->gotfwver) - seq_printf(m, "%-16s %d.%d.%d.%d ", "firmware", - cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]); - seq_printf(m, "%-16s %d ", "channels", cs->channels); - seq_printf(m, "%-16s %s ", "onechannel", cs->onechannel ? "yes" : "no"); - - switch (cs->mode) { - case m_unknown: - s = "unknown"; - break; - case m_config: - s = "config"; - break; - case m_unimodem: - s = "unimodem"; - break; - case m_cid: - s = "cid"; - break; - default: - s = "??"; - } - seq_printf(m, "%-16s %s ", "mode", s); - - switch (cs->mstate) { - case ms_uninitialized: - s = "uninitialized"; - break; - case ms_init: - s = "init"; - break; - case ms_locked: - s = "locked"; - break; - case ms_shutdown: - s = "shutdown"; - break; - case ms_recover: - s = "recover"; - break; - case ms_ready: - s = "ready"; - break; - default: - s = "??"; - } - seq_printf(m, "%-16s %s ", "mstate", s); - - seq_printf(m, "%-16s %s ", "running", cs->running ? "yes" : "no"); - seq_printf(m, "%-16s %s ", "connected", cs->connected ? "yes" : "no"); - seq_printf(m, "%-16s %s ", "isdn_up", cs->isdn_up ? "yes" : "no"); - seq_printf(m, "%-16s %s ", "cidmode", cs->cidmode ? "yes" : "no"); - - for (i = 0; i < cs->channels; i++) { - seq_printf(m, "[%d]%-13s %d ", i, "corrupted", - cs->bcs[i].corrupted); - seq_printf(m, "[%d]%-13s %d ", i, "trans_down", - cs->bcs[i].trans_down); - seq_printf(m, "[%d]%-13s %d ", i, "trans_up", - cs->bcs[i].trans_up); - seq_printf(m, "[%d]%-13s %d ", i, "chstate", - cs->bcs[i].chstate); - switch (cs->bcs[i].proto2) { - case l2_bitsync: - s = "bitsync"; - break; - case l2_hdlc: - s = "hdlc"; - break; - case l2_voice: - s = "voice"; - break; - default: - s = "??"; - } - seq_printf(m, "[%d]%-13s %s ", i, "proto2", s); - } - return 0; -} - -/** - * gigaset_isdn_regdev() - register device to ll - * @cs: device descriptor structure. - * @isdnid: device name. - * - * return value: 0 on success, error code < 0 on failure - */ -int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) -{ - struct gigaset_capi_ctr *iif; - int rc; - - iif = kzalloc(sizeof(*iif), gfp_kernel); - if (!iif) { - pr_err("%s: out of memory ", __func__); - return -enomem; - } - - /* prepare controller structure */ - iif->ctr.owner = this_module; - iif->ctr.driverdata = cs; - strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name) - 1); - iif->ctr.driver_name = "gigaset"; - iif->ctr.load_firmware = null; - iif->ctr.reset_ctr = null; - iif->ctr.register_appl = gigaset_register_appl; - iif->ctr.release_appl = gigaset_release_appl; - iif->ctr.send_message = gigaset_send_message; - iif->ctr.procinfo = gigaset_procinfo; - iif->ctr.proc_show = gigaset_proc_show, - init_list_head(&iif->appls); - skb_queue_head_init(&iif->sendqueue); - atomic_set(&iif->sendqlen, 0); - - /* register controller with capi */ - rc = attach_capi_ctr(&iif->ctr); - if (rc) { - pr_err("attach_capi_ctr failed (%d) ", rc); - kfree(iif); - return rc; - } - - cs->iif = iif; - cs->hw_hdr_len = capi_data_b3_req_len; - return 0; -} - -/** - * gigaset_isdn_unregdev() - unregister device from ll - * @cs: device descriptor structure. - */ -void gigaset_isdn_unregdev(struct cardstate *cs) -{ - struct gigaset_capi_ctr *iif = cs->iif; - - detach_capi_ctr(&iif->ctr); - kfree(iif); - cs->iif = null; -} - -static struct capi_driver capi_driver_gigaset = { - .name = "gigaset", - .revision = "1.0", -}; - -/** - * gigaset_isdn_regdrv() - register driver to ll - */ -void gigaset_isdn_regdrv(void) -{ - pr_info("kernel capi interface "); - register_capi_driver(&capi_driver_gigaset); -} - -/** - * gigaset_isdn_unregdrv() - unregister driver from ll - */ -void gigaset_isdn_unregdrv(void) -{ - unregister_capi_driver(&capi_driver_gigaset); -} diff --git a/drivers/staging/isdn/gigaset/common.c b/drivers/staging/isdn/gigaset/common.c --- a/drivers/staging/isdn/gigaset/common.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * stuff used by all variants of the driver - * - * copyright (c) 2001 by stefan eilers, - * hansjoerg lipp <hjlipp@web.de>, - * tilman schmidt <tilman@imap.cc>. - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" -#include <linux/module.h> -#include <linux/moduleparam.h> - -/* version information */ -#define driver_author "hansjoerg lipp <hjlipp@web.de>, tilman schmidt <tilman@imap.cc>, stefan eilers" -#define driver_desc "driver for gigaset 307x" - -#ifdef config_gigaset_debug -#define driver_desc_debug " (debug build)" -#else -#define driver_desc_debug "" -#endif - -/* module parameters */ -int gigaset_debuglevel; -export_symbol_gpl(gigaset_debuglevel); -module_param_named(debug, gigaset_debuglevel, int, s_irugo | s_iwusr); -module_parm_desc(debug, "debug level"); - -/* driver state flags */ -#define valid_minor 0x01 -#define valid_id 0x02 - -/** - * gigaset_dbg_buffer() - dump data in ascii and hex for debugging - * @level: debugging level. - * @msg: message prefix. - * @len: number of bytes to dump. - * @buf: data to dump. - * - * if the current debugging level includes one of the bits set in @level, - * @len bytes starting at @buf are logged to dmesg at kern_debug prio, - * prefixed by the text @msg. - */ -void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, - size_t len, const unsigned char *buf) -{ - unsigned char outbuf[80]; - unsigned char c; - size_t space = sizeof outbuf - 1; - unsigned char *out = outbuf; - size_t numin = len; - - while (numin--) { - c = *buf++; - if (c == '~' || c == '^' || c == '\') { - if (!space--) - break; - *out++ = '\'; - } - if (c & 0x80) { - if (!space--) - break; - *out++ = '~'; - c ^= 0x80; - } - if (c < 0x20 || c == 0x7f) { - if (!space--) - break; - *out++ = '^'; - c ^= 0x40; - } - if (!space--) - break; - *out++ = c; - } - *out = 0; - - gig_dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf); -} -export_symbol_gpl(gigaset_dbg_buffer); - -static int setflags(struct cardstate *cs, unsigned flags, unsigned delay) -{ - int r; - - r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags); - cs->control_state = flags; - if (r < 0) - return r; - - if (delay) { - set_current_state(task_interruptible); - schedule_timeout(delay * hz / 1000); - } - - return 0; -} - -int gigaset_enterconfigmode(struct cardstate *cs) -{ - int i, r; - - cs->control_state = tiocm_rts; - - r = setflags(cs, tiocm_dtr, 200); - if (r < 0) - goto error; - r = setflags(cs, 0, 200); - if (r < 0) - goto error; - for (i = 0; i < 5; ++i) { - r = setflags(cs, tiocm_rts, 100); - if (r < 0) - goto error; - r = setflags(cs, 0, 100); - if (r < 0) - goto error; - } - r = setflags(cs, tiocm_rts | tiocm_dtr, 800); - if (r < 0) - goto error; - - return 0; - -error: - dev_err(cs->dev, "error %d on setuartbits ", -r); - cs->control_state = tiocm_rts | tiocm_dtr; - cs->ops->set_modem_ctrl(cs, 0, tiocm_rts | tiocm_dtr); - - return -1; -} - -static int test_timeout(struct at_state_t *at_state) -{ - if (!at_state->timer_expires) - return 0; - - if (--at_state->timer_expires) { - gig_dbg(debug_mcmd, "decreased timer of %p to %lu", - at_state, at_state->timer_expires); - return 0; - } - - gigaset_add_event(at_state->cs, at_state, ev_timeout, null, - at_state->timer_index, null); - return 1; -} - -static void timer_tick(struct timer_list *t) -{ - struct cardstate *cs = from_timer(cs, t, timer); - unsigned long flags; - unsigned channel; - struct at_state_t *at_state; - int timeout = 0; - - spin_lock_irqsave(&cs->lock, flags); - - for (channel = 0; channel < cs->channels; ++channel) - if (test_timeout(&cs->bcs[channel].at_state)) - timeout = 1; - - if (test_timeout(&cs->at_state)) - timeout = 1; - - list_for_each_entry(at_state, &cs->temp_at_states, list) - if (test_timeout(at_state)) - timeout = 1; - - if (cs->running) { - mod_timer(&cs->timer, jiffies + msecs_to_jiffies(gig_tick)); - if (timeout) { - gig_dbg(debug_event, "scheduling timeout"); - tasklet_schedule(&cs->event_tasklet); - } - } - - spin_unlock_irqrestore(&cs->lock, flags); -} - -int gigaset_get_channel(struct bc_state *bcs) -{ - unsigned long flags; - - spin_lock_irqsave(&bcs->cs->lock, flags); - if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) { - gig_dbg(debug_channel, "could not allocate channel %d", - bcs->channel); - spin_unlock_irqrestore(&bcs->cs->lock, flags); - return -ebusy; - } - ++bcs->use_count; - bcs->busy = 1; - gig_dbg(debug_channel, "allocated channel %d", bcs->channel); - spin_unlock_irqrestore(&bcs->cs->lock, flags); - return 0; -} - -struct bc_state *gigaset_get_free_channel(struct cardstate *cs) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&cs->lock, flags); - if (!try_module_get(cs->driver->owner)) { - gig_dbg(debug_channel, - "could not get module for allocating channel"); - spin_unlock_irqrestore(&cs->lock, flags); - return null; - } - for (i = 0; i < cs->channels; ++i) - if (!cs->bcs[i].use_count) { - ++cs->bcs[i].use_count; - cs->bcs[i].busy = 1; - spin_unlock_irqrestore(&cs->lock, flags); - gig_dbg(debug_channel, "allocated channel %d", i); - return cs->bcs + i; - } - module_put(cs->driver->owner); - spin_unlock_irqrestore(&cs->lock, flags); - gig_dbg(debug_channel, "no free channel"); - return null; -} - -void gigaset_free_channel(struct bc_state *bcs) -{ - unsigned long flags; - - spin_lock_irqsave(&bcs->cs->lock, flags); - if (!bcs->busy) { - gig_dbg(debug_channel, "could not free channel %d", - bcs->channel); - spin_unlock_irqrestore(&bcs->cs->lock, flags); - return; - } - --bcs->use_count; - bcs->busy = 0; - module_put(bcs->cs->driver->owner); - gig_dbg(debug_channel, "freed channel %d", bcs->channel); - spin_unlock_irqrestore(&bcs->cs->lock, flags); -} - -int gigaset_get_channels(struct cardstate *cs) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&cs->lock, flags); - for (i = 0; i < cs->channels; ++i) - if (cs->bcs[i].use_count) { - spin_unlock_irqrestore(&cs->lock, flags); - gig_dbg(debug_channel, - "could not allocate all channels"); - return -ebusy; - } - for (i = 0; i < cs->channels; ++i) - ++cs->bcs[i].use_count; - spin_unlock_irqrestore(&cs->lock, flags); - - gig_dbg(debug_channel, "allocated all channels"); - - return 0; -} - -void gigaset_free_channels(struct cardstate *cs) -{ - unsigned long flags; - int i; - - gig_dbg(debug_channel, "unblocking all channels"); - spin_lock_irqsave(&cs->lock, flags); - for (i = 0; i < cs->channels; ++i) - --cs->bcs[i].use_count; - spin_unlock_irqrestore(&cs->lock, flags); -} - -void gigaset_block_channels(struct cardstate *cs) -{ - unsigned long flags; - int i; - - gig_dbg(debug_channel, "blocking all channels"); - spin_lock_irqsave(&cs->lock, flags); - for (i = 0; i < cs->channels; ++i) - ++cs->bcs[i].use_count; - spin_unlock_irqrestore(&cs->lock, flags); -} - -static void clear_events(struct cardstate *cs) -{ - struct event_t *ev; - unsigned head, tail; - unsigned long flags; - - spin_lock_irqsave(&cs->ev_lock, flags); - - head = cs->ev_head; - tail = cs->ev_tail; - - while (tail != head) { - ev = cs->events + head; - kfree(ev->ptr); - head = (head + 1) % max_events; - } - - cs->ev_head = tail; - - spin_unlock_irqrestore(&cs->ev_lock, flags); -} - -/** - * gigaset_add_event() - add event to device event queue - * @cs: device descriptor structure. - * @at_state: connection state structure. - * @type: event type. - * @ptr: pointer parameter for event. - * @parameter: integer parameter for event. - * @arg: pointer parameter for event. - * - * allocate an event queue entry from the device's event queue, and set it up - * with the parameters given. - * - * return value: added event - */ -struct event_t *gigaset_add_event(struct cardstate *cs, - struct at_state_t *at_state, int type, - void *ptr, int parameter, void *arg) -{ - unsigned long flags; - unsigned next, tail; - struct event_t *event = null; - - gig_dbg(debug_event, "queueing event %d", type); - - spin_lock_irqsave(&cs->ev_lock, flags); - - tail = cs->ev_tail; - next = (tail + 1) % max_events; - if (unlikely(next == cs->ev_head)) - dev_err(cs->dev, "event queue full "); - else { - event = cs->events + tail; - event->type = type; - event->at_state = at_state; - event->cid = -1; - event->ptr = ptr; - event->arg = arg; - event->parameter = parameter; - cs->ev_tail = next; - } - - spin_unlock_irqrestore(&cs->ev_lock, flags); - - return event; -} -export_symbol_gpl(gigaset_add_event); - -static void clear_at_state(struct at_state_t *at_state) -{ - int i; - - for (i = 0; i < str_num; ++i) { - kfree(at_state->str_var[i]); - at_state->str_var[i] = null; - } -} - -static void dealloc_temp_at_states(struct cardstate *cs) -{ - struct at_state_t *cur, *next; - - list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { - list_del(&cur->list); - clear_at_state(cur); - kfree(cur); - } -} - -static void gigaset_freebcs(struct bc_state *bcs) -{ - int i; - - gig_dbg(debug_init, "freeing bcs[%d]->hw", bcs->channel); - bcs->cs->ops->freebcshw(bcs); - - gig_dbg(debug_init, "clearing bcs[%d]->at_state", bcs->channel); - clear_at_state(&bcs->at_state); - gig_dbg(debug_init, "freeing bcs[%d]->skb", bcs->channel); - dev_kfree_skb(bcs->rx_skb); - bcs->rx_skb = null; - - for (i = 0; i < at_num; ++i) { - kfree(bcs->commands[i]); - bcs->commands[i] = null; - } -} - -static struct cardstate *alloc_cs(struct gigaset_driver *drv) -{ - unsigned long flags; - unsigned i; - struct cardstate *cs; - struct cardstate *ret = null; - - spin_lock_irqsave(&drv->lock, flags); - if (drv->blocked) - goto exit; - for (i = 0; i < drv->minors; ++i) { - cs = drv->cs + i; - if (!(cs->flags & valid_minor)) { - cs->flags = valid_minor; - ret = cs; - break; - } - } -exit: - spin_unlock_irqrestore(&drv->lock, flags); - return ret; -} - -static void free_cs(struct cardstate *cs) -{ - cs->flags = 0; -} - -static void make_valid(struct cardstate *cs, unsigned mask) -{ - unsigned long flags; - struct gigaset_driver *drv = cs->driver; - spin_lock_irqsave(&drv->lock, flags); - cs->flags |= mask; - spin_unlock_irqrestore(&drv->lock, flags); -} - -static void make_invalid(struct cardstate *cs, unsigned mask) -{ - unsigned long flags; - struct gigaset_driver *drv = cs->driver; - spin_lock_irqsave(&drv->lock, flags); - cs->flags &= ~mask; - spin_unlock_irqrestore(&drv->lock, flags); -} - -/** - * gigaset_freecs() - free all associated ressources of a device - * @cs: device descriptor structure. - * - * stops all tasklets and timers, unregisters the device from all - * subsystems it was registered to, deallocates the device structure - * @cs and all structures referenced from it. - * operations on the device should be stopped before calling this. - */ -void gigaset_freecs(struct cardstate *cs) -{ - int i; - unsigned long flags; - - if (!cs) - return; - - mutex_lock(&cs->mutex); - - spin_lock_irqsave(&cs->lock, flags); - cs->running = 0; - spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are - not rescheduled below */ - - tasklet_kill(&cs->event_tasklet); - del_timer_sync(&cs->timer); - - switch (cs->cs_init) { - default: - /* clear b channel structures */ - for (i = 0; i < cs->channels; ++i) { - gig_dbg(debug_init, "clearing bcs[%d]", i); - gigaset_freebcs(cs->bcs + i); - } - - /* clear device sysfs */ - gigaset_free_dev_sysfs(cs); - - gigaset_if_free(cs); - - gig_dbg(debug_init, "clearing hw"); - cs->ops->freecshw(cs); - - /* fall through */ - case 2: /* error in initcshw */ - /* deregister from ll */ - make_invalid(cs, valid_id); - gigaset_isdn_unregdev(cs); - - /* fall through */ - case 1: /* error when registering to ll */ - gig_dbg(debug_init, "clearing at_state"); - clear_at_state(&cs->at_state); - dealloc_temp_at_states(cs); - clear_events(cs); - tty_port_destroy(&cs->port); - - /* fall through */ - case 0: /* error in basic setup */ - gig_dbg(debug_init, "freeing inbuf"); - kfree(cs->inbuf); - kfree(cs->bcs); - } - - mutex_unlock(&cs->mutex); - free_cs(cs); -} -export_symbol_gpl(gigaset_freecs); - -void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, - struct cardstate *cs, int cid) -{ - int i; - - init_list_head(&at_state->list); - at_state->waiting = 0; - at_state->getstring = 0; - at_state->pending_commands = 0; - at_state->timer_expires = 0; - at_state->timer_active = 0; - at_state->timer_index = 0; - at_state->seq_index = 0; - at_state->constate = 0; - for (i = 0; i < str_num; ++i) - at_state->str_var[i] = null; - at_state->int_var[var_zdle] = 0; - at_state->int_var[var_zctp] = -1; - at_state->int_var[var_zsau] = zsau_null; - at_state->cs = cs; - at_state->bcs = bcs; - at_state->cid = cid; - if (!cid) - at_state->replystruct = cs->tabnocid; - else - at_state->replystruct = cs->tabcid; -} - - -static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs) -/* inbuf->read must be allocated before! */ -{ - inbuf->head = 0; - inbuf->tail = 0; - inbuf->cs = cs; - inbuf->inputstate = ins_command; -} - -/** - * gigaset_fill_inbuf() - append received data to input buffer - * @inbuf: buffer structure. - * @src: received data. - * @numbytes: number of bytes received. - * - * return value: !=0 if some data was appended - */ -int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, - unsigned numbytes) -{ - unsigned n, head, tail, bytesleft; - - gig_dbg(debug_intr, "received %u bytes", numbytes); - - if (!numbytes) - return 0; - - bytesleft = numbytes; - tail = inbuf->tail; - head = inbuf->head; - gig_dbg(debug_intr, "buffer state: %u -> %u", head, tail); - - while (bytesleft) { - if (head > tail) - n = head - 1 - tail; - else if (head == 0) - n = (rbufsize - 1) - tail; - else - n = rbufsize - tail; - if (!n) { - dev_err(inbuf->cs->dev, - "buffer overflow (%u bytes lost) ", - bytesleft); - break; - } - if (n > bytesleft) - n = bytesleft; - memcpy(inbuf->data + tail, src, n); - bytesleft -= n; - tail = (tail + n) % rbufsize; - src += n; - } - gig_dbg(debug_intr, "setting tail to %u", tail); - inbuf->tail = tail; - return numbytes != bytesleft; -} -export_symbol_gpl(gigaset_fill_inbuf); - -/* initialize the b-channel structure */ -static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs, - int channel) -{ - int i; - - bcs->tx_skb = null; - - skb_queue_head_init(&bcs->squeue); - - bcs->corrupted = 0; - bcs->trans_down = 0; - bcs->trans_up = 0; - - gig_dbg(debug_init, "setting up bcs[%d]->at_state", channel); - gigaset_at_init(&bcs->at_state, bcs, cs, -1); - -#ifdef config_gigaset_debug - bcs->emptycount = 0; -#endif - - bcs->rx_bufsize = 0; - bcs->rx_skb = null; - bcs->rx_fcs = ppp_initfcs; - bcs->inputstate = 0; - bcs->channel = channel; - bcs->cs = cs; - - bcs->chstate = 0; - bcs->use_count = 1; - bcs->busy = 0; - bcs->ignore = cs->ignoreframes; - - for (i = 0; i < at_num; ++i) - bcs->commands[i] = null; - - spin_lock_init(&bcs->aplock); - bcs->ap = null; - bcs->apconnstate = 0; - - gig_dbg(debug_init, " setting up bcs[%d]->hw", channel); - return cs->ops->initbcshw(bcs); -} - -/** - * gigaset_initcs() - initialize device structure - * @drv: hardware driver the device belongs to - * @channels: number of b channels supported by device - * @onechannel: !=0 if b channel data and at commands share one - * communication channel (m10x), - * ==0 if b channels have separate communication channels (base) - * @ignoreframes: number of frames to ignore after setting up b channel - * @cidmode: !=0: start in callid mode - * @modulename: name of driver module for ll registration - * - * allocate and initialize cardstate structure for gigaset driver - * calls hardware dependent gigaset_initcshw() function - * calls b channel initialization function gigaset_initbcs() for each b channel - * - * return value: - * pointer to cardstate structure - */ -struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, - int onechannel, int ignoreframes, - int cidmode, const char *modulename) -{ - struct cardstate *cs; - unsigned long flags; - int i; - - gig_dbg(debug_init, "allocating cs"); - cs = alloc_cs(drv); - if (!cs) { - pr_err("maximum number of devices exceeded "); - return null; - } - - cs->cs_init = 0; - cs->channels = channels; - cs->onechannel = onechannel; - cs->ignoreframes = ignoreframes; - init_list_head(&cs->temp_at_states); - cs->running = 0; - timer_setup(&cs->timer, timer_tick, 0); - spin_lock_init(&cs->ev_lock); - cs->ev_tail = 0; - cs->ev_head = 0; - - tasklet_init(&cs->event_tasklet, gigaset_handle_event, - (unsigned long) cs); - tty_port_init(&cs->port); - cs->commands_pending = 0; - cs->cur_at_seq = 0; - cs->gotfwver = -1; - cs->dev = null; - cs->tty_dev = null; - cs->cidmode = cidmode != 0; - cs->tabnocid = gigaset_tab_nocid; - cs->tabcid = gigaset_tab_cid; - - init_waitqueue_head(&cs->waitqueue); - cs->waiting = 0; - - cs->mode = m_unknown; - cs->mstate = ms_uninitialized; - - cs->bcs = kmalloc_array(channels, sizeof(struct bc_state), gfp_kernel); - cs->inbuf = kmalloc(sizeof(struct inbuf_t), gfp_kernel); - if (!cs->bcs || !cs->inbuf) { - pr_err("out of memory "); - goto error; - } - ++cs->cs_init; - - gig_dbg(debug_init, "setting up at_state"); - spin_lock_init(&cs->lock); - gigaset_at_init(&cs->at_state, null, cs, 0); - cs->dle = 0; - cs->cbytes = 0; - - gig_dbg(debug_init, "setting up inbuf"); - gigaset_inbuf_init(cs->inbuf, cs); - - cs->connected = 0; - cs->isdn_up = 0; - - gig_dbg(debug_init, "setting up cmdbuf"); - cs->cmdbuf = cs->lastcmdbuf = null; - spin_lock_init(&cs->cmdlock); - cs->curlen = 0; - cs->cmdbytes = 0; - - gig_dbg(debug_init, "setting up iif"); - if (gigaset_isdn_regdev(cs, modulename) < 0) { - pr_err("error registering isdn device "); - goto error; - } - - make_valid(cs, valid_id); - ++cs->cs_init; - gig_dbg(debug_init, "setting up hw"); - if (cs->ops->initcshw(cs) < 0) - goto error; - - ++cs->cs_init; - - /* set up character device */ - gigaset_if_init(cs); - - /* set up device sysfs */ - gigaset_init_dev_sysfs(cs); - - /* set up channel data structures */ - for (i = 0; i < channels; ++i) { - gig_dbg(debug_init, "setting up bcs[%d]", i); - if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) { - pr_err("could not allocate channel %d data ", i); - goto error; - } - } - - spin_lock_irqsave(&cs->lock, flags); - cs->running = 1; - spin_unlock_irqrestore(&cs->lock, flags); - cs->timer.expires = jiffies + msecs_to_jiffies(gig_tick); - add_timer(&cs->timer); - - gig_dbg(debug_init, "cs initialized"); - return cs; - -error: - gig_dbg(debug_init, "failed"); - gigaset_freecs(cs); - return null; -} -export_symbol_gpl(gigaset_initcs); - -/* reinitialize the b-channel structure on hangup */ -void gigaset_bcs_reinit(struct bc_state *bcs) -{ - struct sk_buff *skb; - struct cardstate *cs = bcs->cs; - unsigned long flags; - - while ((skb = skb_dequeue(&bcs->squeue)) != null) - dev_kfree_skb(skb); - - spin_lock_irqsave(&cs->lock, flags); - clear_at_state(&bcs->at_state); - bcs->at_state.constate = 0; - bcs->at_state.timer_active = 0; - bcs->at_state.timer_expires = 0; - bcs->at_state.cid = -1; /* no cid defined */ - spin_unlock_irqrestore(&cs->lock, flags); - - bcs->inputstate = 0; - -#ifdef config_gigaset_debug - bcs->emptycount = 0; -#endif - - bcs->rx_fcs = ppp_initfcs; - bcs->chstate = 0; - - bcs->ignore = cs->ignoreframes; - dev_kfree_skb(bcs->rx_skb); - bcs->rx_skb = null; - - cs->ops->reinitbcshw(bcs); -} - -static void cleanup_cs(struct cardstate *cs) -{ - struct cmdbuf_t *cb, *tcb; - int i; - unsigned long flags; - - spin_lock_irqsave(&cs->lock, flags); - - cs->mode = m_unknown; - cs->mstate = ms_uninitialized; - - clear_at_state(&cs->at_state); - dealloc_temp_at_states(cs); - gigaset_at_init(&cs->at_state, null, cs, 0); - - cs->inbuf->inputstate = ins_command; - cs->inbuf->head = 0; - cs->inbuf->tail = 0; - - cb = cs->cmdbuf; - while (cb) { - tcb = cb; - cb = cb->next; - kfree(tcb); - } - cs->cmdbuf = cs->lastcmdbuf = null; - cs->curlen = 0; - cs->cmdbytes = 0; - cs->gotfwver = -1; - cs->dle = 0; - cs->cur_at_seq = 0; - cs->commands_pending = 0; - cs->cbytes = 0; - - spin_unlock_irqrestore(&cs->lock, flags); - - for (i = 0; i < cs->channels; ++i) { - gigaset_freebcs(cs->bcs + i); - if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) - pr_err("could not allocate channel %d data ", i); - } - - if (cs->waiting) { - cs->cmd_result = -enodev; - cs->waiting = 0; - wake_up_interruptible(&cs->waitqueue); - } -} - - -/** - * gigaset_start() - start device operations - * @cs: device descriptor structure. - * - * prepares the device for use by setting up communication parameters, - * scheduling an ev_start event to initiate device initialization, and - * waiting for completion of the initialization. - * - * return value: - * 0 on success, error code < 0 on failure - */ -int gigaset_start(struct cardstate *cs) -{ - unsigned long flags; - - if (mutex_lock_interruptible(&cs->mutex)) - return -ebusy; - - spin_lock_irqsave(&cs->lock, flags); - cs->connected = 1; - spin_unlock_irqrestore(&cs->lock, flags); - - if (cs->mstate != ms_locked) { - cs->ops->set_modem_ctrl(cs, 0, tiocm_dtr | tiocm_rts); - cs->ops->baud_rate(cs, b115200); - cs->ops->set_line_ctrl(cs, cs8); - cs->control_state = tiocm_dtr | tiocm_rts; - } - - cs->waiting = 1; - - if (!gigaset_add_event(cs, &cs->at_state, ev_start, null, 0, null)) { - cs->waiting = 0; - goto error; - } - gigaset_schedule_event(cs); - - wait_event(cs->waitqueue, !cs->waiting); - - mutex_unlock(&cs->mutex); - return 0; - -error: - mutex_unlock(&cs->mutex); - return -enomem; -} -export_symbol_gpl(gigaset_start); - -/** - * gigaset_shutdown() - shut down device operations - * @cs: device descriptor structure. - * - * deactivates the device by scheduling an ev_shutdown event and - * waiting for completion of the shutdown. - * - * return value: - * 0 - success, -enodev - error (no device associated) - */ -int gigaset_shutdown(struct cardstate *cs) -{ - mutex_lock(&cs->mutex); - - if (!(cs->flags & valid_minor)) { - mutex_unlock(&cs->mutex); - return -enodev; - } - - cs->waiting = 1; - - if (!gigaset_add_event(cs, &cs->at_state, ev_shutdown, null, 0, null)) - goto exit; - gigaset_schedule_event(cs); - - wait_event(cs->waitqueue, !cs->waiting); - - cleanup_cs(cs); - -exit: - mutex_unlock(&cs->mutex); - return 0; -} -export_symbol_gpl(gigaset_shutdown); - -/** - * gigaset_stop() - stop device operations - * @cs: device descriptor structure. - * - * stops operations on the device by scheduling an ev_stop event and - * waiting for completion of the shutdown. - */ -void gigaset_stop(struct cardstate *cs) -{ - mutex_lock(&cs->mutex); - - cs->waiting = 1; - - if (!gigaset_add_event(cs, &cs->at_state, ev_stop, null, 0, null)) - goto exit; - gigaset_schedule_event(cs); - - wait_event(cs->waitqueue, !cs->waiting); - - cleanup_cs(cs); - -exit: - mutex_unlock(&cs->mutex); -} -export_symbol_gpl(gigaset_stop); - -static list_head(drivers); -static define_spinlock(driver_lock); - -struct cardstate *gigaset_get_cs_by_id(int id) -{ - unsigned long flags; - struct cardstate *ret = null; - struct cardstate *cs; - struct gigaset_driver *drv; - unsigned i; - - spin_lock_irqsave(&driver_lock, flags); - list_for_each_entry(drv, &drivers, list) { - spin_lock(&drv->lock); - for (i = 0; i < drv->minors; ++i) { - cs = drv->cs + i; - if ((cs->flags & valid_id) && cs->myid == id) { - ret = cs; - break; - } - } - spin_unlock(&drv->lock); - if (ret) - break; - } - spin_unlock_irqrestore(&driver_lock, flags); - return ret; -} - -static struct cardstate *gigaset_get_cs_by_minor(unsigned minor) -{ - unsigned long flags; - struct cardstate *ret = null; - struct gigaset_driver *drv; - unsigned index; - - spin_lock_irqsave(&driver_lock, flags); - list_for_each_entry(drv, &drivers, list) { - if (minor < drv->minor || minor >= drv->minor + drv->minors) - continue; - index = minor - drv->minor; - spin_lock(&drv->lock); - if (drv->cs[index].flags & valid_minor) - ret = drv->cs + index; - spin_unlock(&drv->lock); - if (ret) - break; - } - spin_unlock_irqrestore(&driver_lock, flags); - return ret; -} - -struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty) -{ - return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); -} - -/** - * gigaset_freedriver() - free all associated ressources of a driver - * @drv: driver descriptor structure. - * - * unregisters the driver from the system and deallocates the driver - * structure @drv and all structures referenced from it. - * all devices should be shut down before calling this. - */ -void gigaset_freedriver(struct gigaset_driver *drv) -{ - unsigned long flags; - - spin_lock_irqsave(&driver_lock, flags); - list_del(&drv->list); - spin_unlock_irqrestore(&driver_lock, flags); - - gigaset_if_freedriver(drv); - - kfree(drv->cs); - kfree(drv); -} -export_symbol_gpl(gigaset_freedriver); - -/** - * gigaset_initdriver() - initialize driver structure - * @minor: first minor number - * @minors: number of minors this driver can handle - * @procname: name of the driver - * @devname: name of the device files (prefix without minor number) - * - * allocate and initialize gigaset_driver structure. initialize interface. - * - * return value: - * pointer to the gigaset_driver structure on success, null on failure. - */ -struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, - const char *procname, - const char *devname, - const struct gigaset_ops *ops, - struct module *owner) -{ - struct gigaset_driver *drv; - unsigned long flags; - unsigned i; - - drv = kmalloc(sizeof *drv, gfp_kernel); - if (!drv) - return null; - - drv->have_tty = 0; - drv->minor = minor; - drv->minors = minors; - spin_lock_init(&drv->lock); - drv->blocked = 0; - drv->ops = ops; - drv->owner = owner; - init_list_head(&drv->list); - - drv->cs = kmalloc_array(minors, sizeof(*drv->cs), gfp_kernel); - if (!drv->cs) - goto error; - - for (i = 0; i < minors; ++i) { - drv->cs[i].flags = 0; - drv->cs[i].driver = drv; - drv->cs[i].ops = drv->ops; - drv->cs[i].minor_index = i; - mutex_init(&drv->cs[i].mutex); - } - - gigaset_if_initdriver(drv, procname, devname); - - spin_lock_irqsave(&driver_lock, flags); - list_add(&drv->list, &drivers); - spin_unlock_irqrestore(&driver_lock, flags); - - return drv; - -error: - kfree(drv); - return null; -} -export_symbol_gpl(gigaset_initdriver); - -/** - * gigaset_blockdriver() - block driver - * @drv: driver descriptor structure. - * - * prevents the driver from attaching new devices, in preparation for - * deregistration. - */ -void gigaset_blockdriver(struct gigaset_driver *drv) -{ - drv->blocked = 1; -} -export_symbol_gpl(gigaset_blockdriver); - -static int __init gigaset_init_module(void) -{ - /* in accordance with the principle of least astonishment, - * setting the 'debug' parameter to 1 activates a sensible - * set of default debug levels - */ - if (gigaset_debuglevel == 1) - gigaset_debuglevel = debug_default; - - pr_info(driver_desc driver_desc_debug " "); - gigaset_isdn_regdrv(); - return 0; -} - -static void __exit gigaset_exit_module(void) -{ - gigaset_isdn_unregdrv(); -} - -module_init(gigaset_init_module); -module_exit(gigaset_exit_module); - -module_author(driver_author); -module_description(driver_desc); - -module_license("gpl"); diff --git a/drivers/staging/isdn/gigaset/dummyll.c b/drivers/staging/isdn/gigaset/dummyll.c --- a/drivers/staging/isdn/gigaset/dummyll.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * dummy ll interface for the gigaset driver - * - * copyright (c) 2009 by tilman schmidt <tilman@imap.cc>. - * - * ===================================================================== - * ===================================================================== - */ - -#include <linux/export.h> -#include "gigaset.h" - -void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) -{ -} -export_symbol_gpl(gigaset_skb_sent); - -void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) -{ -} -export_symbol_gpl(gigaset_skb_rcvd); - -void gigaset_isdn_rcv_err(struct bc_state *bcs) -{ -} -export_symbol_gpl(gigaset_isdn_rcv_err); - -int gigaset_isdn_icall(struct at_state_t *at_state) -{ - return icall_ignore; -} - -void gigaset_isdn_connd(struct bc_state *bcs) -{ -} - -void gigaset_isdn_hupd(struct bc_state *bcs) -{ -} - -void gigaset_isdn_connb(struct bc_state *bcs) -{ -} - -void gigaset_isdn_hupb(struct bc_state *bcs) -{ -} - -void gigaset_isdn_start(struct cardstate *cs) -{ -} - -void gigaset_isdn_stop(struct cardstate *cs) -{ -} - -int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) -{ - return 0; -} - -void gigaset_isdn_unregdev(struct cardstate *cs) -{ -} - -void gigaset_isdn_regdrv(void) -{ - pr_info("no isdn subsystem interface "); -} - -void gigaset_isdn_unregdrv(void) -{ -} diff --git a/drivers/staging/isdn/gigaset/ev-layer.c b/drivers/staging/isdn/gigaset/ev-layer.c --- a/drivers/staging/isdn/gigaset/ev-layer.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * stuff used by all variants of the driver - * - * copyright (c) 2001 by stefan eilers, - * hansjoerg lipp <hjlipp@web.de>, - * tilman schmidt <tilman@imap.cc>. - * - * ===================================================================== - * ===================================================================== - */ - -#include <linux/export.h> -#include "gigaset.h" - -/* ========================================================== */ -/* bit masks for pending commands */ -#define pc_dial 0x001 -#define pc_hup 0x002 -#define pc_init 0x004 -#define pc_dle0 0x008 -#define pc_dle1 0x010 -#define pc_shutdown 0x020 -#define pc_accept 0x040 -#define pc_cid 0x080 -#define pc_nocid 0x100 -#define pc_cidmode 0x200 -#define pc_ummode 0x400 - -/* types of modem responses */ -#define rt_nothing 0 -#define rt_zsau 1 -#define rt_ring 2 -#define rt_number 3 -#define rt_string 4 -#define rt_zcau 6 - -/* possible ascii responses */ -#define rsp_ok 0 -#define rsp_error 1 -#define rsp_zgci 3 -#define rsp_ring 4 -#define rsp_zvls 5 -#define rsp_zcau 6 - -/* responses with values to store in at_state */ -/* - numeric */ -#define rsp_var 100 -#define rsp_zsau (rsp_var + var_zsau) -#define rsp_zdle (rsp_var + var_zdle) -#define rsp_zctp (rsp_var + var_zctp) -/* - string */ -#define rsp_str (rsp_var + var_num) -#define rsp_nmbr (rsp_str + str_nmbr) -#define rsp_zcpn (rsp_str + str_zcpn) -#define rsp_zcon (rsp_str + str_zcon) -#define rsp_zbc (rsp_str + str_zbc) -#define rsp_zhlc (rsp_str + str_zhlc) - -#define rsp_wrong_cid -2 /* unknown cid in cmd */ -#define rsp_inval -6 /* invalid response */ -#define rsp_nodev -9 /* device not connected */ - -#define rsp_none -19 -#define rsp_string -20 -#define rsp_null -21 -#define rsp_init -27 -#define rsp_any -26 -#define rsp_last -28 - -/* actions for process_response */ -#define act_nothing 0 -#define act_setdle1 1 -#define act_setdle0 2 -#define act_failinit 3 -#define act_hupmodem 4 -#define act_configmode 5 -#define act_init 6 -#define act_dle0 7 -#define act_dle1 8 -#define act_faildle0 9 -#define act_faildle1 10 -#define act_ring 11 -#define act_cid 12 -#define act_failcid 13 -#define act_sdown 14 -#define act_failsdown 15 -#define act_debug 16 -#define act_warn 17 -#define act_dialing 18 -#define act_abortdial 19 -#define act_disconnect 20 -#define act_connect 21 -#define act_remotereject 22 -#define act_conntimeout 23 -#define act_remotehup 24 -#define act_aborthup 25 -#define act_icall 26 -#define act_accepted 27 -#define act_abortaccept 28 -#define act_timeout 29 -#define act_getstring 30 -#define act_setver 31 -#define act_failver 32 -#define act_gotver 33 -#define act_test 34 -#define act_error 35 -#define act_abortcid 36 -#define act_zcau 37 -#define act_notify_bc_down 38 -#define act_notify_bc_up 39 -#define act_dial 40 -#define act_accept 41 -#define act_hup 43 -#define act_if_lock 44 -#define act_start 45 -#define act_stop 46 -#define act_fakedle0 47 -#define act_fakehup 48 -#define act_fakesdown 49 -#define act_shutdown 50 -#define act_proc_cidmode 51 -#define act_umodeset 52 -#define act_failumode 53 -#define act_cmodeset 54 -#define act_failcmode 55 -#define act_if_ver 56 -#define act_cmd 100 - -/* at command sequences */ -#define seq_none 0 -#define seq_init 100 -#define seq_dle0 200 -#define seq_dle1 250 -#define seq_cid 300 -#define seq_nocid 350 -#define seq_hup 400 -#define seq_dial 600 -#define seq_accept 720 -#define seq_shutdown 500 -#define seq_cidmode 10 -#define seq_ummode 11 - - -/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), - * 400: hup, 500: reset, 600: dial, 700: ring */ -struct reply_t gigaset_tab_nocid[] = -{ -/* resp_code, min_constate, max_constate, parameter, new_constate, timeout, - * action, command */ - -/* initialize device, set cid mode if possible */ - {rsp_init, -1, -1, seq_init, 100, 1, {act_timeout} }, - - {ev_timeout, 100, 100, -1, 101, 3, {0}, "z "}, - {rsp_ok, 101, 103, -1, 120, 5, {act_getstring}, - "+gmr "}, - - {ev_timeout, 101, 101, -1, 102, 5, {0}, "z "}, - {rsp_error, 101, 101, -1, 102, 5, {0}, "z "}, - - {ev_timeout, 102, 102, -1, 108, 5, {act_setdle1}, - "^sdle=0 "}, - {rsp_ok, 108, 108, -1, 104, -1}, - {rsp_zdle, 104, 104, 0, 103, 5, {0}, "z "}, - {ev_timeout, 104, 104, -1, 0, 0, {act_failinit} }, - {rsp_error, 108, 108, -1, 0, 0, {act_failinit} }, - - {ev_timeout, 108, 108, -1, 105, 2, {act_setdle0, - act_hupmodem, - act_timeout} }, - {ev_timeout, 105, 105, -1, 103, 5, {0}, "z "}, - - {rsp_error, 102, 102, -1, 107, 5, {0}, "^getpre "}, - {rsp_ok, 107, 107, -1, 0, 0, {act_configmode} }, - {rsp_error, 107, 107, -1, 0, 0, {act_failinit} }, - {ev_timeout, 107, 107, -1, 0, 0, {act_failinit} }, - - {rsp_error, 103, 103, -1, 0, 0, {act_failinit} }, - {ev_timeout, 103, 103, -1, 0, 0, {act_failinit} }, - - {rsp_string, 120, 120, -1, 121, -1, {act_setver} }, - - {ev_timeout, 120, 121, -1, 0, 0, {act_failver, - act_init} }, - {rsp_error, 120, 121, -1, 0, 0, {act_failver, - act_init} }, - {rsp_ok, 121, 121, -1, 0, 0, {act_gotver, - act_init} }, - {rsp_none, 121, 121, -1, 120, 0, {act_getstring} }, - -/* leave dle mode */ - {rsp_init, 0, 0, seq_dle0, 201, 5, {0}, "^sdle=0 "}, - {rsp_ok, 201, 201, -1, 202, -1}, - {rsp_zdle, 202, 202, 0, 0, 0, {act_dle0} }, - {rsp_nodev, 200, 249, -1, 0, 0, {act_fakedle0} }, - {rsp_error, 200, 249, -1, 0, 0, {act_faildle0} }, - {ev_timeout, 200, 249, -1, 0, 0, {act_faildle0} }, - -/* enter dle mode */ - {rsp_init, 0, 0, seq_dle1, 251, 5, {0}, "^sdle=1 "}, - {rsp_ok, 251, 251, -1, 252, -1}, - {rsp_zdle, 252, 252, 1, 0, 0, {act_dle1} }, - {rsp_error, 250, 299, -1, 0, 0, {act_faildle1} }, - {ev_timeout, 250, 299, -1, 0, 0, {act_faildle1} }, - -/* incoming call */ - {rsp_ring, -1, -1, -1, -1, -1, {act_ring} }, - -/* get cid */ - {rsp_init, 0, 0, seq_cid, 301, 5, {0}, "^sgci? "}, - {rsp_ok, 301, 301, -1, 302, -1}, - {rsp_zgci, 302, 302, -1, 0, 0, {act_cid} }, - {rsp_error, 301, 349, -1, 0, 0, {act_failcid} }, - {ev_timeout, 301, 349, -1, 0, 0, {act_failcid} }, - -/* enter cid mode */ - {rsp_init, 0, 0, seq_cidmode, 150, 5, {0}, "^sgci=1 "}, - {rsp_ok, 150, 150, -1, 0, 0, {act_cmodeset} }, - {rsp_error, 150, 150, -1, 0, 0, {act_failcmode} }, - {ev_timeout, 150, 150, -1, 0, 0, {act_failcmode} }, - -/* leave cid mode */ - {rsp_init, 0, 0, seq_ummode, 160, 5, {0}, "z "}, - {rsp_ok, 160, 160, -1, 0, 0, {act_umodeset} }, - {rsp_error, 160, 160, -1, 0, 0, {act_failumode} }, - {ev_timeout, 160, 160, -1, 0, 0, {act_failumode} }, - -/* abort getting cid */ - {rsp_init, 0, 0, seq_nocid, 0, 0, {act_abortcid} }, - -/* reset */ - {rsp_init, 0, 0, seq_shutdown, 504, 5, {0}, "z "}, - {rsp_ok, 504, 504, -1, 0, 0, {act_sdown} }, - {rsp_error, 501, 599, -1, 0, 0, {act_failsdown} }, - {ev_timeout, 501, 599, -1, 0, 0, {act_failsdown} }, - {rsp_nodev, 501, 599, -1, 0, 0, {act_fakesdown} }, - - {ev_proc_cidmode, -1, -1, -1, -1, -1, {act_proc_cidmode} }, - {ev_if_lock, -1, -1, -1, -1, -1, {act_if_lock} }, - {ev_if_ver, -1, -1, -1, -1, -1, {act_if_ver} }, - {ev_start, -1, -1, -1, -1, -1, {act_start} }, - {ev_stop, -1, -1, -1, -1, -1, {act_stop} }, - {ev_shutdown, -1, -1, -1, -1, -1, {act_shutdown} }, - -/* misc. */ - {rsp_error, -1, -1, -1, -1, -1, {act_error} }, - {rsp_zcau, -1, -1, -1, -1, -1, {act_zcau} }, - {rsp_none, -1, -1, -1, -1, -1, {act_debug} }, - {rsp_any, -1, -1, -1, -1, -1, {act_warn} }, - {rsp_last} -}; - -/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, - * 400: hup, 750: accepted icall */ -struct reply_t gigaset_tab_cid[] = -{ -/* resp_code, min_constate, max_constate, parameter, new_constate, timeout, - * action, command */ - -/* dial */ - {ev_dial, -1, -1, -1, -1, -1, {act_dial} }, - {rsp_init, 0, 0, seq_dial, 601, 5, {act_cmd + at_bc} }, - {rsp_ok, 601, 601, -1, 603, 5, {act_cmd + at_proto} }, - {rsp_ok, 603, 603, -1, 604, 5, {act_cmd + at_type} }, - {rsp_ok, 604, 604, -1, 605, 5, {act_cmd + at_msn} }, - {rsp_null, 605, 605, -1, 606, 5, {act_cmd + at_clip} }, - {rsp_ok, 605, 605, -1, 606, 5, {act_cmd + at_clip} }, - {rsp_null, 606, 606, -1, 607, 5, {act_cmd + at_iso} }, - {rsp_ok, 606, 606, -1, 607, 5, {act_cmd + at_iso} }, - {rsp_ok, 607, 607, -1, 608, 5, {0}, "+vls=17 "}, - {rsp_ok, 608, 608, -1, 609, -1}, - {rsp_zsau, 609, 609, zsau_proceeding, 610, 5, {act_cmd + at_dial} }, - {rsp_ok, 610, 610, -1, 650, 0, {act_dialing} }, - - {rsp_error, 601, 610, -1, 0, 0, {act_abortdial} }, - {ev_timeout, 601, 610, -1, 0, 0, {act_abortdial} }, - -/* optional dialing responses */ - {ev_bc_open, 650, 650, -1, 651, -1}, - {rsp_zvls, 609, 651, 17, -1, -1, {act_debug} }, - {rsp_zctp, 610, 651, -1, -1, -1, {act_debug} }, - {rsp_zcpn, 610, 651, -1, -1, -1, {act_debug} }, - {rsp_zsau, 650, 651, zsau_call_delivered, -1, -1, {act_debug} }, - -/* connect */ - {rsp_zsau, 650, 650, zsau_active, 800, -1, {act_connect} }, - {rsp_zsau, 651, 651, zsau_active, 800, -1, {act_connect, - act_notify_bc_up} }, - {rsp_zsau, 750, 750, zsau_active, 800, -1, {act_connect} }, - {rsp_zsau, 751, 751, zsau_active, 800, -1, {act_connect, - act_notify_bc_up} }, - {ev_bc_open, 800, 800, -1, 800, -1, {act_notify_bc_up} }, - -/* remote hangup */ - {rsp_zsau, 650, 651, zsau_disconnect_ind, 0, 0, {act_remotereject} }, - {rsp_zsau, 750, 751, zsau_disconnect_ind, 0, 0, {act_remotehup} }, - {rsp_zsau, 800, 800, zsau_disconnect_ind, 0, 0, {act_remotehup} }, - -/* hangup */ - {ev_hup, -1, -1, -1, -1, -1, {act_hup} }, - {rsp_init, -1, -1, seq_hup, 401, 5, {0}, "+vls=0 "}, - {rsp_ok, 401, 401, -1, 402, 5}, - {rsp_zvls, 402, 402, 0, 403, 5}, - {rsp_zsau, 403, 403, zsau_disconnect_req, -1, -1, {act_debug} }, - {rsp_zsau, 403, 403, zsau_null, 0, 0, {act_disconnect} }, - {rsp_nodev, 401, 403, -1, 0, 0, {act_fakehup} }, - {rsp_error, 401, 401, -1, 0, 0, {act_aborthup} }, - {ev_timeout, 401, 403, -1, 0, 0, {act_aborthup} }, - - {ev_bc_closed, 0, 0, -1, 0, -1, {act_notify_bc_down} }, - -/* ring */ - {rsp_zbc, 700, 700, -1, -1, -1, {0} }, - {rsp_zhlc, 700, 700, -1, -1, -1, {0} }, - {rsp_nmbr, 700, 700, -1, -1, -1, {0} }, - {rsp_zcpn, 700, 700, -1, -1, -1, {0} }, - {rsp_zctp, 700, 700, -1, -1, -1, {0} }, - {ev_timeout, 700, 700, -1, 720, 720, {act_icall} }, - {ev_bc_closed, 720, 720, -1, 0, -1, {act_notify_bc_down} }, - -/*accept icall*/ - {ev_accept, -1, -1, -1, -1, -1, {act_accept} }, - {rsp_init, 720, 720, seq_accept, 721, 5, {act_cmd + at_proto} }, - {rsp_ok, 721, 721, -1, 722, 5, {act_cmd + at_iso} }, - {rsp_ok, 722, 722, -1, 723, 5, {0}, "+vls=17 "}, - {rsp_ok, 723, 723, -1, 724, 5, {0} }, - {rsp_zvls, 724, 724, 17, 750, 50, {act_accepted} }, - {rsp_error, 721, 729, -1, 0, 0, {act_abortaccept} }, - {ev_timeout, 721, 729, -1, 0, 0, {act_abortaccept} }, - {rsp_zsau, 700, 729, zsau_null, 0, 0, {act_abortaccept} }, - {rsp_zsau, 700, 729, zsau_active, 0, 0, {act_abortaccept} }, - {rsp_zsau, 700, 729, zsau_disconnect_ind, 0, 0, {act_abortaccept} }, - - {ev_bc_open, 750, 750, -1, 751, -1}, - {ev_timeout, 750, 751, -1, 0, 0, {act_conntimeout} }, - -/* b channel closed (general case) */ - {ev_bc_closed, -1, -1, -1, -1, -1, {act_notify_bc_down} }, - -/* misc. */ - {rsp_zcon, -1, -1, -1, -1, -1, {act_debug} }, - {rsp_zcau, -1, -1, -1, -1, -1, {act_zcau} }, - {rsp_none, -1, -1, -1, -1, -1, {act_debug} }, - {rsp_any, -1, -1, -1, -1, -1, {act_warn} }, - {rsp_last} -}; - - -static const struct resp_type_t { - char *response; - int resp_code; - int type; -} -resp_type[] = -{ - {"ok", rsp_ok, rt_nothing}, - {"error", rsp_error, rt_nothing}, - {"zsau", rsp_zsau, rt_zsau}, - {"zcau", rsp_zcau, rt_zcau}, - {"ring", rsp_ring, rt_ring}, - {"zgci", rsp_zgci, rt_number}, - {"zvls", rsp_zvls, rt_number}, - {"zctp", rsp_zctp, rt_number}, - {"zdle", rsp_zdle, rt_number}, - {"zhlc", rsp_zhlc, rt_string}, - {"zbc", rsp_zbc, rt_string}, - {"nmbr", rsp_nmbr, rt_string}, - {"zcpn", rsp_zcpn, rt_string}, - {"zcon", rsp_zcon, rt_string}, - {null, 0, 0} -}; - -static const struct zsau_resp_t { - char *str; - int code; -} -zsau_resp[] = -{ - {"outgoing_call_proceeding", zsau_proceeding}, - {"call_delivered", zsau_call_delivered}, - {"active", zsau_active}, - {"disconnect_ind", zsau_disconnect_ind}, - {"null", zsau_null}, - {"disconnect_req", zsau_disconnect_req}, - {null, zsau_unknown} -}; - -/* check for and remove fixed string prefix - * if s starts with prefix terminated by a non-alphanumeric character, - * return pointer to the first character after that, otherwise return null. - */ -static char *skip_prefix(char *s, const char *prefix) -{ - while (*prefix) - if (*s++ != *prefix++) - return null; - if (isalnum(*s)) - return null; - return s; -} - -/* queue event with cid */ -static void add_cid_event(struct cardstate *cs, int cid, int type, - void *ptr, int parameter) -{ - unsigned long flags; - unsigned next, tail; - struct event_t *event; - - gig_dbg(debug_event, "queueing event %d for cid %d", type, cid); - - spin_lock_irqsave(&cs->ev_lock, flags); - - tail = cs->ev_tail; - next = (tail + 1) % max_events; - if (unlikely(next == cs->ev_head)) { - dev_err(cs->dev, "event queue full "); - kfree(ptr); - } else { - event = cs->events + tail; - event->type = type; - event->cid = cid; - event->ptr = ptr; - event->arg = null; - event->parameter = parameter; - event->at_state = null; - cs->ev_tail = next; - } - - spin_unlock_irqrestore(&cs->ev_lock, flags); -} - -/** - * gigaset_handle_modem_response() - process received modem response - * @cs: device descriptor structure. - * - * called by asyncdata/isocdata if a block of data received from the - * device must be processed as a modem command response. the data is - * already in the cs structure. - */ -void gigaset_handle_modem_response(struct cardstate *cs) -{ - char *eoc, *psep, *ptr; - const struct resp_type_t *rt; - const struct zsau_resp_t *zr; - int cid, parameter; - u8 type, value; - - if (!cs->cbytes) { - /* ignore additional lfs/crs (m10x config mode or cx100) */ - gig_dbg(debug_mcmd, "skipped eol [%02x]", cs->respdata[0]); - return; - } - cs->respdata[cs->cbytes] = 0; - - if (cs->at_state.getstring) { - /* state machine wants next line verbatim */ - cs->at_state.getstring = 0; - ptr = kstrdup(cs->respdata, gfp_atomic); - gig_dbg(debug_event, "string==%s", ptr ? ptr : "null"); - add_cid_event(cs, 0, rsp_string, ptr, 0); - return; - } - - /* look up response type */ - for (rt = resp_type; rt->response; ++rt) { - eoc = skip_prefix(cs->respdata, rt->response); - if (eoc) - break; - } - if (!rt->response) { - add_cid_event(cs, 0, rsp_none, null, 0); - gig_dbg(debug_event, "unknown modem response: '%s' ", - cs->respdata); - return; - } - - /* check for cid */ - psep = strrchr(cs->respdata, ';'); - if (psep && - !kstrtoint(psep + 1, 10, &cid) && - cid >= 1 && cid <= 65535) { - /* valid cid: chop it off */ - *psep = 0; - } else { - /* no valid cid: leave unchanged */ - cid = 0; - } - - gig_dbg(debug_event, "cmd received: %s", cs->respdata); - if (cid) - gig_dbg(debug_event, "cid: %d", cid); - - switch (rt->type) { - case rt_nothing: - /* check parameter separator */ - if (*eoc) - goto bad_param; /* extra parameter */ - - add_cid_event(cs, cid, rt->resp_code, null, 0); - break; - - case rt_ring: - /* check parameter separator */ - if (!*eoc) - eoc = null; /* no parameter */ - else if (*eoc++ != ',') - goto bad_param; - - add_cid_event(cs, 0, rt->resp_code, null, cid); - - /* process parameters as individual responses */ - while (eoc) { - /* look up parameter type */ - psep = null; - for (rt = resp_type; rt->response; ++rt) { - psep = skip_prefix(eoc, rt->response); - if (psep) - break; - } - - /* all legal parameters are of type rt_string */ - if (!psep || rt->type != rt_string) { - dev_warn(cs->dev, - "illegal ring parameter: '%s' ", - eoc); - return; - } - - /* skip parameter value separator */ - if (*psep++ != '=') - goto bad_param; - - /* look up end of parameter */ - eoc = strchr(psep, ','); - if (eoc) - *eoc++ = 0; - - /* retrieve parameter value */ - ptr = kstrdup(psep, gfp_atomic); - - /* queue event */ - add_cid_event(cs, cid, rt->resp_code, ptr, 0); - } - break; - - case rt_zsau: - /* check parameter separator */ - if (!*eoc) { - /* no parameter */ - add_cid_event(cs, cid, rt->resp_code, null, zsau_none); - break; - } - if (*eoc++ != '=') - goto bad_param; - - /* look up parameter value */ - for (zr = zsau_resp; zr->str; ++zr) - if (!strcmp(eoc, zr->str)) - break; - if (!zr->str) - goto bad_param; - - add_cid_event(cs, cid, rt->resp_code, null, zr->code); - break; - - case rt_string: - /* check parameter separator */ - if (*eoc++ != '=') - goto bad_param; - - /* retrieve parameter value */ - ptr = kstrdup(eoc, gfp_atomic); - - /* queue event */ - add_cid_event(cs, cid, rt->resp_code, ptr, 0); - break; - - case rt_zcau: - /* check parameter separators */ - if (*eoc++ != '=') - goto bad_param; - psep = strchr(eoc, ','); - if (!psep) - goto bad_param; - *psep++ = 0; - - /* decode parameter values */ - if (kstrtou8(eoc, 16, &type) || kstrtou8(psep, 16, &value)) { - *--psep = ','; - goto bad_param; - } - parameter = (type << 8) | value; - - add_cid_event(cs, cid, rt->resp_code, null, parameter); - break; - - case rt_number: - /* check parameter separator */ - if (*eoc++ != '=') - goto bad_param; - - /* decode parameter value */ - if (kstrtoint(eoc, 10, ¶meter)) - goto bad_param; - - /* special case zdle: set flag before queueing event */ - if (rt->resp_code == rsp_zdle) - cs->dle = parameter; - - add_cid_event(cs, cid, rt->resp_code, null, parameter); - break; - -bad_param: - /* parameter unexpected, incomplete or malformed */ - dev_warn(cs->dev, "bad parameter in response '%s' ", - cs->respdata); - add_cid_event(cs, cid, rt->resp_code, null, -1); - break; - - default: - dev_err(cs->dev, "%s: internal error on '%s' ", - __func__, cs->respdata); - } -} -export_symbol_gpl(gigaset_handle_modem_response); - -/* disconnect_nobc - * process closing of connection associated with given at state structure - * without b channel - */ -static void disconnect_nobc(struct at_state_t **at_state_p, - struct cardstate *cs) -{ - unsigned long flags; - - spin_lock_irqsave(&cs->lock, flags); - ++(*at_state_p)->seq_index; - - /* revert to selected idle mode */ - if (!cs->cidmode) { - cs->at_state.pending_commands |= pc_ummode; - gig_dbg(debug_event, "scheduling pc_ummode"); - cs->commands_pending = 1; - } - - /* check for and deallocate temporary at state */ - if (!list_empty(&(*at_state_p)->list)) { - list_del(&(*at_state_p)->list); - kfree(*at_state_p); - *at_state_p = null; - } - - spin_unlock_irqrestore(&cs->lock, flags); -} - -/* disconnect_bc - * process closing of connection associated with given at state structure - * and b channel - */ -static void disconnect_bc(struct at_state_t *at_state, - struct cardstate *cs, struct bc_state *bcs) -{ - unsigned long flags; - - spin_lock_irqsave(&cs->lock, flags); - ++at_state->seq_index; - - /* revert to selected idle mode */ - if (!cs->cidmode) { - cs->at_state.pending_commands |= pc_ummode; - gig_dbg(debug_event, "scheduling pc_ummode"); - cs->commands_pending = 1; - } - spin_unlock_irqrestore(&cs->lock, flags); - - /* invoke hardware specific handler */ - cs->ops->close_bchannel(bcs); - - /* notify ll */ - if (bcs->chstate & (chs_d_up | chs_notify_ll)) { - bcs->chstate &= ~(chs_d_up | chs_notify_ll); - gigaset_isdn_hupd(bcs); - } -} - -/* get_free_channel - * get a free at state structure: either one of those associated with the - * b channels of the gigaset device, or if none of those is available, - * a newly allocated one with bcs=null - * the structure should be freed by calling disconnect_nobc() after use. - */ -static inline struct at_state_t *get_free_channel(struct cardstate *cs, - int cid) -/* cids: >0: siemens-cid - * 0: without cid - * -1: no cid assigned yet - */ -{ - unsigned long flags; - int i; - struct at_state_t *ret; - - for (i = 0; i < cs->channels; ++i) - if (gigaset_get_channel(cs->bcs + i) >= 0) { - ret = &cs->bcs[i].at_state; - ret->cid = cid; - return ret; - } - - spin_lock_irqsave(&cs->lock, flags); - ret = kmalloc(sizeof(struct at_state_t), gfp_atomic); - if (ret) { - gigaset_at_init(ret, null, cs, cid); - list_add(&ret->list, &cs->temp_at_states); - } - spin_unlock_irqrestore(&cs->lock, flags); - return ret; -} - -static void init_failed(struct cardstate *cs, int mode) -{ - int i; - struct at_state_t *at_state; - - cs->at_state.pending_commands &= ~pc_init; - cs->mode = mode; - cs->mstate = ms_uninitialized; - gigaset_free_channels(cs); - for (i = 0; i < cs->channels; ++i) { - at_state = &cs->bcs[i].at_state; - if (at_state->pending_commands & pc_cid) { - at_state->pending_commands &= ~pc_cid; - at_state->pending_commands |= pc_nocid; - cs->commands_pending = 1; - } - } -} - -static void schedule_init(struct cardstate *cs, int state) -{ - if (cs->at_state.pending_commands & pc_init) { - gig_dbg(debug_event, "not scheduling pc_init again"); - return; - } - cs->mstate = state; - cs->mode = m_unknown; - gigaset_block_channels(cs); - cs->at_state.pending_commands |= pc_init; - gig_dbg(debug_event, "scheduling pc_init"); - cs->commands_pending = 1; -} - -/* send an at command - * adding the "at" prefix, cid and dle encapsulation as appropriate - */ -static void send_command(struct cardstate *cs, const char *cmd, - struct at_state_t *at_state) -{ - int cid = at_state->cid; - struct cmdbuf_t *cb; - size_t buflen; - - buflen = strlen(cmd) + 12; /* dle ( a t 1 2 3 4 5 <cmd> dle ) */ - cb = kmalloc(sizeof(struct cmdbuf_t) + buflen, gfp_atomic); - if (!cb) { - dev_err(cs->dev, "%s: out of memory ", __func__); - return; - } - if (cid > 0 && cid <= 65535) - cb->len = snprintf(cb->buf, buflen, - cs->dle ? "(at%d%s)" : "at%d%s", - cid, cmd); - else - cb->len = snprintf(cb->buf, buflen, - cs->dle ? "(at%s)" : "at%s", - cmd); - cb->offset = 0; - cb->next = null; - cb->wake_tasklet = null; - cs->ops->write_cmd(cs, cb); -} - -static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid) -{ - struct at_state_t *at_state; - int i; - unsigned long flags; - - if (cid == 0) - return &cs->at_state; - - for (i = 0; i < cs->channels; ++i) - if (cid == cs->bcs[i].at_state.cid) - return &cs->bcs[i].at_state; - - spin_lock_irqsave(&cs->lock, flags); - - list_for_each_entry(at_state, &cs->temp_at_states, list) - if (cid == at_state->cid) { - spin_unlock_irqrestore(&cs->lock, flags); - return at_state; - } - - spin_unlock_irqrestore(&cs->lock, flags); - - return null; -} - -static void bchannel_down(struct bc_state *bcs) -{ - if (bcs->chstate & chs_b_up) { - bcs->chstate &= ~chs_b_up; - gigaset_isdn_hupb(bcs); - } - - if (bcs->chstate & (chs_d_up | chs_notify_ll)) { - bcs->chstate &= ~(chs_d_up | chs_notify_ll); - gigaset_isdn_hupd(bcs); - } - - gigaset_free_channel(bcs); - - gigaset_bcs_reinit(bcs); -} - -static void bchannel_up(struct bc_state *bcs) -{ - if (bcs->chstate & chs_b_up) { - dev_notice(bcs->cs->dev, "%s: b channel already up ", - __func__); - return; - } - - bcs->chstate |= chs_b_up; - gigaset_isdn_connb(bcs); -} - -static void start_dial(struct at_state_t *at_state, void *data, - unsigned seq_index) -{ - struct bc_state *bcs = at_state->bcs; - struct cardstate *cs = at_state->cs; - char **commands = data; - unsigned long flags; - int i; - - bcs->chstate |= chs_notify_ll; - - spin_lock_irqsave(&cs->lock, flags); - if (at_state->seq_index != seq_index) { - spin_unlock_irqrestore(&cs->lock, flags); - goto error; - } - spin_unlock_irqrestore(&cs->lock, flags); - - for (i = 0; i < at_num; ++i) { - kfree(bcs->commands[i]); - bcs->commands[i] = commands[i]; - } - - at_state->pending_commands |= pc_cid; - gig_dbg(debug_event, "scheduling pc_cid"); - cs->commands_pending = 1; - return; - -error: - for (i = 0; i < at_num; ++i) { - kfree(commands[i]); - commands[i] = null; - } - at_state->pending_commands |= pc_nocid; - gig_dbg(debug_event, "scheduling pc_nocid"); - cs->commands_pending = 1; - return; -} - -static void start_accept(struct at_state_t *at_state) -{ - struct cardstate *cs = at_state->cs; - struct bc_state *bcs = at_state->bcs; - int i; - - for (i = 0; i < at_num; ++i) { - kfree(bcs->commands[i]); - bcs->commands[i] = null; - } - - bcs->commands[at_proto] = kmalloc(9, gfp_atomic); - bcs->commands[at_iso] = kmalloc(9, gfp_atomic); - if (!bcs->commands[at_proto] || !bcs->commands[at_iso]) { - dev_err(at_state->cs->dev, "out of memory "); - /* error reset */ - at_state->pending_commands |= pc_hup; - gig_dbg(debug_event, "scheduling pc_hup"); - cs->commands_pending = 1; - return; - } - - snprintf(bcs->commands[at_proto], 9, "^sbpr=%u ", bcs->proto2); - snprintf(bcs->commands[at_iso], 9, "^siso=%u ", bcs->channel + 1); - - at_state->pending_commands |= pc_accept; - gig_dbg(debug_event, "scheduling pc_accept"); - cs->commands_pending = 1; -} - -static void do_start(struct cardstate *cs) -{ - gigaset_free_channels(cs); - - if (cs->mstate != ms_locked) - schedule_init(cs, ms_init); - - cs->isdn_up = 1; - gigaset_isdn_start(cs); - - cs->waiting = 0; - wake_up(&cs->waitqueue); -} - -static void finish_shutdown(struct cardstate *cs) -{ - if (cs->mstate != ms_locked) { - cs->mstate = ms_uninitialized; - cs->mode = m_unknown; - } - - /* tell the ll that the device is not available .. */ - if (cs->isdn_up) { - cs->isdn_up = 0; - gigaset_isdn_stop(cs); - } - - /* the rest is done by cleanup_cs() in process context. */ - - cs->cmd_result = -enodev; - cs->waiting = 0; - wake_up(&cs->waitqueue); -} - -static void do_shutdown(struct cardstate *cs) -{ - gigaset_block_channels(cs); - - if (cs->mstate == ms_ready) { - cs->mstate = ms_shutdown; - cs->at_state.pending_commands |= pc_shutdown; - gig_dbg(debug_event, "scheduling pc_shutdown"); - cs->commands_pending = 1; - } else - finish_shutdown(cs); -} - -static void do_stop(struct cardstate *cs) -{ - unsigned long flags; - - spin_lock_irqsave(&cs->lock, flags); - cs->connected = 0; - spin_unlock_irqrestore(&cs->lock, flags); - - do_shutdown(cs); -} - -/* entering cid mode or getting a cid failed: - * try to initialize the device and try again. - * - * channel >= 0: getting cid for the channel failed - * channel < 0: entering cid mode failed - * - * returns 0 on success, <0 on failure - */ -static int reinit_and_retry(struct cardstate *cs, int channel) -{ - int i; - - if (--cs->retry_count <= 0) - return -efault; - - for (i = 0; i < cs->channels; ++i) - if (cs->bcs[i].at_state.cid > 0) - return -ebusy; - - if (channel < 0) - dev_warn(cs->dev, - "could not enter cid mode. reinit device and try again. "); - else { - dev_warn(cs->dev, - "could not get a call id. reinit device and try again. "); - cs->bcs[channel].at_state.pending_commands |= pc_cid; - } - schedule_init(cs, ms_init); - return 0; -} - -static int at_state_invalid(struct cardstate *cs, - struct at_state_t *test_ptr) -{ - unsigned long flags; - unsigned channel; - struct at_state_t *at_state; - int retval = 0; - - spin_lock_irqsave(&cs->lock, flags); - - if (test_ptr == &cs->at_state) - goto exit; - - list_for_each_entry(at_state, &cs->temp_at_states, list) - if (at_state == test_ptr) - goto exit; - - for (channel = 0; channel < cs->channels; ++channel) - if (&cs->bcs[channel].at_state == test_ptr) - goto exit; - - retval = 1; -exit: - spin_unlock_irqrestore(&cs->lock, flags); - return retval; -} - -static void handle_icall(struct cardstate *cs, struct bc_state *bcs, - struct at_state_t *at_state) -{ - int retval; - - retval = gigaset_isdn_icall(at_state); - switch (retval) { - case icall_accept: - break; - default: - dev_err(cs->dev, "internal error: disposition=%d ", retval); - /* fall through */ - case icall_ignore: - case icall_reject: - /* hang up actively - * device doc says that would reject the call. - * in fact it doesn't. - */ - at_state->pending_commands |= pc_hup; - cs->commands_pending = 1; - break; - } -} - -static int do_lock(struct cardstate *cs) -{ - int mode; - int i; - - switch (cs->mstate) { - case ms_uninitialized: - case ms_ready: - if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) || - cs->at_state.pending_commands) - return -ebusy; - - for (i = 0; i < cs->channels; ++i) - if (cs->bcs[i].at_state.pending_commands) - return -ebusy; - - if (gigaset_get_channels(cs) < 0) - return -ebusy; - - break; - case ms_locked: - break; - default: - return -ebusy; - } - - mode = cs->mode; - cs->mstate = ms_locked; - cs->mode = m_unknown; - - return mode; -} - -static int do_unlock(struct cardstate *cs) -{ - if (cs->mstate != ms_locked) - return -einval; - - cs->mstate = ms_uninitialized; - cs->mode = m_unknown; - gigaset_free_channels(cs); - if (cs->connected) - schedule_init(cs, ms_init); - - return 0; -} - -static void do_action(int action, struct cardstate *cs, - struct bc_state *bcs, - struct at_state_t **p_at_state, char **pp_command, - int *p_genresp, int *p_resp_code, - struct event_t *ev) -{ - struct at_state_t *at_state = *p_at_state; - struct bc_state *bcs2; - unsigned long flags; - - int channel; - - unsigned char *s, *e; - int i; - unsigned long val; - - switch (action) { - case act_nothing: - break; - case act_timeout: - at_state->waiting = 1; - break; - case act_init: - cs->at_state.pending_commands &= ~pc_init; - cs->cur_at_seq = seq_none; - cs->mode = m_unimodem; - spin_lock_irqsave(&cs->lock, flags); - if (!cs->cidmode) { - spin_unlock_irqrestore(&cs->lock, flags); - gigaset_free_channels(cs); - cs->mstate = ms_ready; - break; - } - spin_unlock_irqrestore(&cs->lock, flags); - cs->at_state.pending_commands |= pc_cidmode; - gig_dbg(debug_event, "scheduling pc_cidmode"); - cs->commands_pending = 1; - break; - case act_failinit: - dev_warn(cs->dev, "could not initialize the device. "); - cs->dle = 0; - init_failed(cs, m_unknown); - cs->cur_at_seq = seq_none; - break; - case act_configmode: - init_failed(cs, m_config); - cs->cur_at_seq = seq_none; - break; - case act_setdle1: - cs->dle = 1; - /* cs->inbuf[0].inputstate |= ins_command | ins_dle_command; */ - cs->inbuf[0].inputstate &= - ~(ins_command | ins_dle_command); - break; - case act_setdle0: - cs->dle = 0; - cs->inbuf[0].inputstate = - (cs->inbuf[0].inputstate & ~ins_dle_command) - | ins_command; - break; - case act_cmodeset: - if (cs->mstate == ms_init || cs->mstate == ms_recover) { - gigaset_free_channels(cs); - cs->mstate = ms_ready; - } - cs->mode = m_cid; - cs->cur_at_seq = seq_none; - break; - case act_umodeset: - cs->mode = m_unimodem; - cs->cur_at_seq = seq_none; - break; - case act_failcmode: - cs->cur_at_seq = seq_none; - if (cs->mstate == ms_init || cs->mstate == ms_recover) { - init_failed(cs, m_unknown); - break; - } - if (reinit_and_retry(cs, -1) < 0) - schedule_init(cs, ms_recover); - break; - case act_failumode: - cs->cur_at_seq = seq_none; - schedule_init(cs, ms_recover); - break; - case act_hupmodem: - /* send "+++" (hangup in unimodem mode) */ - if (cs->connected) { - struct cmdbuf_t *cb; - - cb = kmalloc(sizeof(struct cmdbuf_t) + 3, gfp_atomic); - if (!cb) { - dev_err(cs->dev, "%s: out of memory ", - __func__); - return; - } - memcpy(cb->buf, "+++", 3); - cb->len = 3; - cb->offset = 0; - cb->next = null; - cb->wake_tasklet = null; - cs->ops->write_cmd(cs, cb); - } - break; - case act_ring: - /* get fresh at state structure for new cid */ - at_state = get_free_channel(cs, ev->parameter); - if (!at_state) { - dev_warn(cs->dev, - "ring ignored: could not allocate channel structure "); - break; - } - - /* initialize at state structure - * note that bcs may be null if no b channel is free - */ - at_state->constate = 700; - for (i = 0; i < str_num; ++i) { - kfree(at_state->str_var[i]); - at_state->str_var[i] = null; - } - at_state->int_var[var_zctp] = -1; - - spin_lock_irqsave(&cs->lock, flags); - at_state->timer_expires = ring_timeout; - at_state->timer_active = 1; - spin_unlock_irqrestore(&cs->lock, flags); - break; - case act_icall: - handle_icall(cs, bcs, at_state); - break; - case act_failsdown: - dev_warn(cs->dev, "could not shut down the device. "); - /* fall through */ - case act_fakesdown: - case act_sdown: - cs->cur_at_seq = seq_none; - finish_shutdown(cs); - break; - case act_connect: - if (cs->onechannel) { - at_state->pending_commands |= pc_dle1; - cs->commands_pending = 1; - break; - } - bcs->chstate |= chs_d_up; - gigaset_isdn_connd(bcs); - cs->ops->init_bchannel(bcs); - break; - case act_dle1: - cs->cur_at_seq = seq_none; - bcs = cs->bcs + cs->curchannel; - - bcs->chstate |= chs_d_up; - gigaset_isdn_connd(bcs); - cs->ops->init_bchannel(bcs); - break; - case act_fakehup: - at_state->int_var[var_zsau] = zsau_null; - /* fall through */ - case act_disconnect: - cs->cur_at_seq = seq_none; - at_state->cid = -1; - if (!bcs) { - disconnect_nobc(p_at_state, cs); - } else if (cs->onechannel && cs->dle) { - /* check for other open channels not needed: - * dle only used for m10x with one b channel. - */ - at_state->pending_commands |= pc_dle0; - cs->commands_pending = 1; - } else { - disconnect_bc(at_state, cs, bcs); - } - break; - case act_fakedle0: - at_state->int_var[var_zdle] = 0; - cs->dle = 0; - /* fall through */ - case act_dle0: - cs->cur_at_seq = seq_none; - bcs2 = cs->bcs + cs->curchannel; - disconnect_bc(&bcs2->at_state, cs, bcs2); - break; - case act_aborthup: - cs->cur_at_seq = seq_none; - dev_warn(cs->dev, "could not hang up. "); - at_state->cid = -1; - if (!bcs) - disconnect_nobc(p_at_state, cs); - else if (cs->onechannel) - at_state->pending_commands |= pc_dle0; - else - disconnect_bc(at_state, cs, bcs); - schedule_init(cs, ms_recover); - break; - case act_faildle0: - cs->cur_at_seq = seq_none; - dev_warn(cs->dev, "error leaving dle mode. "); - cs->dle = 0; - bcs2 = cs->bcs + cs->curchannel; - disconnect_bc(&bcs2->at_state, cs, bcs2); - schedule_init(cs, ms_recover); - break; - case act_faildle1: - cs->cur_at_seq = seq_none; - dev_warn(cs->dev, - "could not enter dle mode. trying to hang up. "); - channel = cs->curchannel; - cs->bcs[channel].at_state.pending_commands |= pc_hup; - cs->commands_pending = 1; - break; - - case act_cid: /* got cid; start dialing */ - cs->cur_at_seq = seq_none; - channel = cs->curchannel; - if (ev->parameter > 0 && ev->parameter <= 65535) { - cs->bcs[channel].at_state.cid = ev->parameter; - cs->bcs[channel].at_state.pending_commands |= - pc_dial; - cs->commands_pending = 1; - break; - } - /* fall through - bad cid */ - case act_failcid: - cs->cur_at_seq = seq_none; - channel = cs->curchannel; - if (reinit_and_retry(cs, channel) < 0) { - dev_warn(cs->dev, - "could not get a call id. cannot dial. "); - bcs2 = cs->bcs + channel; - disconnect_bc(&bcs2->at_state, cs, bcs2); - } - break; - case act_abortcid: - cs->cur_at_seq = seq_none; - bcs2 = cs->bcs + cs->curchannel; - disconnect_bc(&bcs2->at_state, cs, bcs2); - break; - - case act_dialing: - case act_accepted: - cs->cur_at_seq = seq_none; - break; - - case act_abortaccept: /* hangup/error/timeout during icall procssng */ - if (bcs) - disconnect_bc(at_state, cs, bcs); - else - disconnect_nobc(p_at_state, cs); - break; - - case act_abortdial: /* error/timeout during dial preparation */ - cs->cur_at_seq = seq_none; - at_state->pending_commands |= pc_hup; - cs->commands_pending = 1; - break; - - case act_remotereject: /* disconnect_ind after dialling */ - case act_conntimeout: /* timeout waiting for zsau=active */ - case act_remotehup: /* disconnect_ind with established connection */ - at_state->pending_commands |= pc_hup; - cs->commands_pending = 1; - break; - case act_getstring: /* warning: ring, zdle, ... - are not handled properly anymore */ - at_state->getstring = 1; - break; - case act_setver: - if (!ev->ptr) { - *p_genresp = 1; - *p_resp_code = rsp_error; - break; - } - s = ev->ptr; - - if (!strcmp(s, "ok")) { - /* ok without version string: assume old response */ - *p_genresp = 1; - *p_resp_code = rsp_none; - break; - } - - for (i = 0; i < 4; ++i) { - val = simple_strtoul(s, (char **) &e, 10); - if (val > int_max || e == s) - break; - if (i == 3) { - if (*e) - break; - } else if (*e != '.') - break; - else - s = e + 1; - cs->fwver[i] = val; - } - if (i != 4) { - *p_genresp = 1; - *p_resp_code = rsp_error; - break; - } - cs->gotfwver = 0; - break; - case act_gotver: - if (cs->gotfwver == 0) { - cs->gotfwver = 1; - gig_dbg(debug_event, - "firmware version %02d.%03d.%02d.%02d", - cs->fwver[0], cs->fwver[1], - cs->fwver[2], cs->fwver[3]); - break; - } - /* fall through */ - case act_failver: - cs->gotfwver = -1; - dev_err(cs->dev, "could not read firmware version. "); - break; - case act_error: - gig_dbg(debug_any, "%s: error response in constate %d", - __func__, at_state->constate); - cs->cur_at_seq = seq_none; - break; - case act_debug: - gig_dbg(debug_any, "%s: resp_code %d in constate %d", - __func__, ev->type, at_state->constate); - break; - case act_warn: - dev_warn(cs->dev, "%s: resp_code %d in constate %d! ", - __func__, ev->type, at_state->constate); - break; - case act_zcau: - dev_warn(cs->dev, "cause code %04x in connection state %d. ", - ev->parameter, at_state->constate); - break; - - /* events from the ll */ - - case act_dial: - if (!ev->ptr) { - *p_genresp = 1; - *p_resp_code = rsp_error; - break; - } - start_dial(at_state, ev->ptr, ev->parameter); - break; - case act_accept: - start_accept(at_state); - break; - case act_hup: - at_state->pending_commands |= pc_hup; - gig_dbg(debug_event, "scheduling pc_hup"); - cs->commands_pending = 1; - break; - - /* hotplug events */ - - case act_stop: - do_stop(cs); - break; - case act_start: - do_start(cs); - break; - - /* events from the interface */ - - case act_if_lock: - cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); - cs->waiting = 0; - wake_up(&cs->waitqueue); - break; - case act_if_ver: - if (ev->parameter != 0) - cs->cmd_result = -einval; - else if (cs->gotfwver != 1) { - cs->cmd_result = -enoent; - } else { - memcpy(ev->arg, cs->fwver, sizeof cs->fwver); - cs->cmd_result = 0; - } - cs->waiting = 0; - wake_up(&cs->waitqueue); - break; - - /* events from the proc file system */ - - case act_proc_cidmode: - spin_lock_irqsave(&cs->lock, flags); - if (ev->parameter != cs->cidmode) { - cs->cidmode = ev->parameter; - if (ev->parameter) { - cs->at_state.pending_commands |= pc_cidmode; - gig_dbg(debug_event, "scheduling pc_cidmode"); - } else { - cs->at_state.pending_commands |= pc_ummode; - gig_dbg(debug_event, "scheduling pc_ummode"); - } - cs->commands_pending = 1; - } - spin_unlock_irqrestore(&cs->lock, flags); - cs->waiting = 0; - wake_up(&cs->waitqueue); - break; - - /* events from the hardware drivers */ - - case act_notify_bc_down: - bchannel_down(bcs); - break; - case act_notify_bc_up: - bchannel_up(bcs); - break; - case act_shutdown: - do_shutdown(cs); - break; - - - default: - if (action >= act_cmd && action < act_cmd + at_num) { - *pp_command = at_state->bcs->commands[action - act_cmd]; - if (!*pp_command) { - *p_genresp = 1; - *p_resp_code = rsp_null; - } - } else - dev_err(cs->dev, "%s: action==%d! ", __func__, action); - } -} - -/* state machine to do the calling and hangup procedure */ -static void process_event(struct cardstate *cs, struct event_t *ev) -{ - struct bc_state *bcs; - char *p_command = null; - struct reply_t *rep; - int rcode; - int genresp = 0; - int resp_code = rsp_error; - struct at_state_t *at_state; - int index; - int curact; - unsigned long flags; - - if (ev->cid >= 0) { - at_state = at_state_from_cid(cs, ev->cid); - if (!at_state) { - gig_dbg(debug_event, "event %d for invalid cid %d", - ev->type, ev->cid); - gigaset_add_event(cs, &cs->at_state, rsp_wrong_cid, - null, 0, null); - return; - } - } else { - at_state = ev->at_state; - if (at_state_invalid(cs, at_state)) { - gig_dbg(debug_event, "event for invalid at_state %p", - at_state); - return; - } - } - - gig_dbg(debug_event, "connection state %d, event %d", - at_state->constate, ev->type); - - bcs = at_state->bcs; - - /* setting the pointer to the dial array */ - rep = at_state->replystruct; - - spin_lock_irqsave(&cs->lock, flags); - if (ev->type == ev_timeout) { - if (ev->parameter != at_state->timer_index - || !at_state->timer_active) { - ev->type = rsp_none; /* old timeout */ - gig_dbg(debug_event, "old timeout"); - } else { - if (at_state->waiting) - gig_dbg(debug_event, "stopped waiting"); - else - gig_dbg(debug_event, "timeout occurred"); - } - } - spin_unlock_irqrestore(&cs->lock, flags); - - /* if the response belongs to a variable in at_state->int_var[var_xxxx] - or at_state->str_var[str_xxxx], set it */ - if (ev->type >= rsp_var && ev->type < rsp_var + var_num) { - index = ev->type - rsp_var; - at_state->int_var[index] = ev->parameter; - } else if (ev->type >= rsp_str && ev->type < rsp_str + str_num) { - index = ev->type - rsp_str; - kfree(at_state->str_var[index]); - at_state->str_var[index] = ev->ptr; - ev->ptr = null; /* prevent process_events() from - deallocating ptr */ - } - - if (ev->type == ev_timeout || ev->type == rsp_string) - at_state->getstring = 0; - - /* search row in dial array which matches modem response and current - constate */ - for (;; rep++) { - rcode = rep->resp_code; - if (rcode == rsp_last) { - /* found nothing...*/ - dev_warn(cs->dev, "%s: rcode=rsp_last: " - "resp_code %d in constate %d! ", - __func__, ev->type, at_state->constate); - return; - } - if ((rcode == rsp_any || rcode == ev->type) - && ((int) at_state->constate >= rep->min_constate) - && (rep->max_constate < 0 - || (int) at_state->constate <= rep->max_constate) - && (rep->parameter < 0 || rep->parameter == ev->parameter)) - break; - } - - p_command = rep->command; - - at_state->waiting = 0; - for (curact = 0; curact < maxact; ++curact) { - /* the row tells us what we should do .. - */ - do_action(rep->action[curact], cs, bcs, &at_state, &p_command, - &genresp, &resp_code, ev); - if (!at_state) - /* at_state destroyed by disconnect */ - return; - } - - /* jump to the next con-state regarding the array */ - if (rep->new_constate >= 0) - at_state->constate = rep->new_constate; - - if (genresp) { - spin_lock_irqsave(&cs->lock, flags); - at_state->timer_expires = 0; - at_state->timer_active = 0; - spin_unlock_irqrestore(&cs->lock, flags); - gigaset_add_event(cs, at_state, resp_code, null, 0, null); - } else { - /* send command to modem if not null... */ - if (p_command) { - if (cs->connected) - send_command(cs, p_command, at_state); - else - gigaset_add_event(cs, at_state, rsp_nodev, - null, 0, null); - } - - spin_lock_irqsave(&cs->lock, flags); - if (!rep->timeout) { - at_state->timer_expires = 0; - at_state->timer_active = 0; - } else if (rep->timeout > 0) { /* new timeout */ - at_state->timer_expires = rep->timeout * 10; - at_state->timer_active = 1; - ++at_state->timer_index; - } - spin_unlock_irqrestore(&cs->lock, flags); - } -} - -static void schedule_sequence(struct cardstate *cs, - struct at_state_t *at_state, int sequence) -{ - cs->cur_at_seq = sequence; - gigaset_add_event(cs, at_state, rsp_init, null, sequence, null); -} - -static void process_command_flags(struct cardstate *cs) -{ - struct at_state_t *at_state = null; - struct bc_state *bcs; - int i; - int sequence; - unsigned long flags; - - cs->commands_pending = 0; - - if (cs->cur_at_seq) { - gig_dbg(debug_event, "not searching scheduled commands: busy"); - return; - } - - gig_dbg(debug_event, "searching scheduled commands"); - - sequence = seq_none; - - /* clear pending_commands and hangup channels on shutdown */ - if (cs->at_state.pending_commands & pc_shutdown) { - cs->at_state.pending_commands &= ~pc_cidmode; - for (i = 0; i < cs->channels; ++i) { - bcs = cs->bcs + i; - at_state = &bcs->at_state; - at_state->pending_commands &= - ~(pc_dle1 | pc_accept | pc_dial); - if (at_state->cid > 0) - at_state->pending_commands |= pc_hup; - if (at_state->pending_commands & pc_cid) { - at_state->pending_commands |= pc_nocid; - at_state->pending_commands &= ~pc_cid; - } - } - } - - /* clear pending_commands and hangup channels on reset */ - if (cs->at_state.pending_commands & pc_init) { - cs->at_state.pending_commands &= ~pc_cidmode; - for (i = 0; i < cs->channels; ++i) { - bcs = cs->bcs + i; - at_state = &bcs->at_state; - at_state->pending_commands &= - ~(pc_dle1 | pc_accept | pc_dial); - if (at_state->cid > 0) - at_state->pending_commands |= pc_hup; - if (cs->mstate == ms_recover) { - if (at_state->pending_commands & pc_cid) { - at_state->pending_commands |= pc_nocid; - at_state->pending_commands &= ~pc_cid; - } - } - } - } - - /* only switch back to unimodem mode if no commands are pending and - * no channels are up */ - spin_lock_irqsave(&cs->lock, flags); - if (cs->at_state.pending_commands == pc_ummode - && !cs->cidmode - && list_empty(&cs->temp_at_states) - && cs->mode == m_cid) { - sequence = seq_ummode; - at_state = &cs->at_state; - for (i = 0; i < cs->channels; ++i) { - bcs = cs->bcs + i; - if (bcs->at_state.pending_commands || - bcs->at_state.cid > 0) { - sequence = seq_none; - break; - } - } - } - spin_unlock_irqrestore(&cs->lock, flags); - cs->at_state.pending_commands &= ~pc_ummode; - if (sequence != seq_none) { - schedule_sequence(cs, at_state, sequence); - return; - } - - for (i = 0; i < cs->channels; ++i) { - bcs = cs->bcs + i; - if (bcs->at_state.pending_commands & pc_hup) { - if (cs->dle) { - cs->curchannel = bcs->channel; - schedule_sequence(cs, &cs->at_state, seq_dle0); - return; - } - bcs->at_state.pending_commands &= ~pc_hup; - if (bcs->at_state.pending_commands & pc_cid) { - /* not yet dialing: pc_nocid is sufficient */ - bcs->at_state.pending_commands |= pc_nocid; - bcs->at_state.pending_commands &= ~pc_cid; - } else { - schedule_sequence(cs, &bcs->at_state, seq_hup); - return; - } - } - if (bcs->at_state.pending_commands & pc_nocid) { - bcs->at_state.pending_commands &= ~pc_nocid; - cs->curchannel = bcs->channel; - schedule_sequence(cs, &cs->at_state, seq_nocid); - return; - } else if (bcs->at_state.pending_commands & pc_dle0) { - bcs->at_state.pending_commands &= ~pc_dle0; - cs->curchannel = bcs->channel; - schedule_sequence(cs, &cs->at_state, seq_dle0); - return; - } - } - - list_for_each_entry(at_state, &cs->temp_at_states, list) - if (at_state->pending_commands & pc_hup) { - at_state->pending_commands &= ~pc_hup; - schedule_sequence(cs, at_state, seq_hup); - return; - } - - if (cs->at_state.pending_commands & pc_init) { - cs->at_state.pending_commands &= ~pc_init; - cs->dle = 0; - cs->inbuf->inputstate = ins_command; - schedule_sequence(cs, &cs->at_state, seq_init); - return; - } - if (cs->at_state.pending_commands & pc_shutdown) { - cs->at_state.pending_commands &= ~pc_shutdown; - schedule_sequence(cs, &cs->at_state, seq_shutdown); - return; - } - if (cs->at_state.pending_commands & pc_cidmode) { - cs->at_state.pending_commands &= ~pc_cidmode; - if (cs->mode == m_unimodem) { - cs->retry_count = 1; - schedule_sequence(cs, &cs->at_state, seq_cidmode); - return; - } - } - - for (i = 0; i < cs->channels; ++i) { - bcs = cs->bcs + i; - if (bcs->at_state.pending_commands & pc_dle1) { - bcs->at_state.pending_commands &= ~pc_dle1; - cs->curchannel = bcs->channel; - schedule_sequence(cs, &cs->at_state, seq_dle1); - return; - } - if (bcs->at_state.pending_commands & pc_accept) { - bcs->at_state.pending_commands &= ~pc_accept; - schedule_sequence(cs, &bcs->at_state, seq_accept); - return; - } - if (bcs->at_state.pending_commands & pc_dial) { - bcs->at_state.pending_commands &= ~pc_dial; - schedule_sequence(cs, &bcs->at_state, seq_dial); - return; - } - if (bcs->at_state.pending_commands & pc_cid) { - switch (cs->mode) { - case m_unimodem: - cs->at_state.pending_commands |= pc_cidmode; - gig_dbg(debug_event, "scheduling pc_cidmode"); - cs->commands_pending = 1; - return; - case m_unknown: - schedule_init(cs, ms_init); - return; - } - bcs->at_state.pending_commands &= ~pc_cid; - cs->curchannel = bcs->channel; - cs->retry_count = 2; - schedule_sequence(cs, &cs->at_state, seq_cid); - return; - } - } -} - -static void process_events(struct cardstate *cs) -{ - struct event_t *ev; - unsigned head, tail; - int i; - int check_flags = 0; - int was_busy; - unsigned long flags; - - spin_lock_irqsave(&cs->ev_lock, flags); - head = cs->ev_head; - - for (i = 0; i < 2 * max_events; ++i) { - tail = cs->ev_tail; - if (tail == head) { - if (!check_flags && !cs->commands_pending) - break; - check_flags = 0; - spin_unlock_irqrestore(&cs->ev_lock, flags); - process_command_flags(cs); - spin_lock_irqsave(&cs->ev_lock, flags); - tail = cs->ev_tail; - if (tail == head) { - if (!cs->commands_pending) - break; - continue; - } - } - - ev = cs->events + head; - was_busy = cs->cur_at_seq != seq_none; - spin_unlock_irqrestore(&cs->ev_lock, flags); - process_event(cs, ev); - spin_lock_irqsave(&cs->ev_lock, flags); - kfree(ev->ptr); - ev->ptr = null; - if (was_busy && cs->cur_at_seq == seq_none) - check_flags = 1; - - head = (head + 1) % max_events; - cs->ev_head = head; - } - - spin_unlock_irqrestore(&cs->ev_lock, flags); - - if (i == 2 * max_events) { - dev_err(cs->dev, - "infinite loop in process_events; aborting. "); - } -} - -/* tasklet scheduled on any event received from the gigaset device - * parameter: - * data isdn controller state structure - */ -void gigaset_handle_event(unsigned long data) -{ - struct cardstate *cs = (struct cardstate *) data; - - /* handle incoming data on control/common channel */ - if (cs->inbuf->head != cs->inbuf->tail) { - gig_dbg(debug_intr, "processing new data"); - cs->ops->handle_input(cs->inbuf); - } - - process_events(cs); -} diff --git a/drivers/staging/isdn/gigaset/gigaset.h b/drivers/staging/isdn/gigaset/gigaset.h --- a/drivers/staging/isdn/gigaset/gigaset.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0-or-later */ -/* - * siemens gigaset 307x driver - * common header file for all connection variants - * - * written by stefan eilers - * and hansjoerg lipp <hjlipp@web.de> - * - * ===================================================================== - * ===================================================================== - */ - -#ifndef gigaset_h -#define gigaset_h - -/* define global prefix for pr_ macros in linux/kernel.h */ -#define pr_fmt(fmt) kbuild_modname ": " fmt - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/compiler.h> -#include <linux/types.h> -#include <linux/ctype.h> -#include <linux/slab.h> -#include <linux/spinlock.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/ppp_defs.h> -#include <linux/timer.h> -#include <linux/interrupt.h> -#include <linux/tty.h> -#include <linux/tty_driver.h> -#include <linux/list.h> -#include <linux/atomic.h> - -#define gig_version {0, 5, 0, 0} -#define gig_compat {0, 4, 0, 0} - -#define max_rec_params 10 /* max. number of params in response string */ -#define max_resp_size 511 /* max. size of a response string */ - -#define max_events 64 /* size of event queue */ - -#define rbufsize 8192 - -#define gig_tick 100 /* in milliseconds */ - -/* timeout values (unit: 1 sec) */ -#define init_timeout 1 - -/* timeout values (unit: 0.1 sec) */ -#define ring_timeout 3 /* for additional parameters to ring */ -#define bas_timeout 20 /* for response to base usb ops */ -#define atrdy_timeout 3 /* for hd_ready_send_atdata */ - -#define bas_retry 3 /* max. retries for base usb ops */ - -#define maxact 3 - -extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */ - -/* debug flags, combine by adding/bitwise or */ -enum debuglevel { - debug_intr = 0x00008, /* interrupt processing */ - debug_cmd = 0x00020, /* sent/received ll commands */ - debug_stream = 0x00040, /* application data stream i/o events */ - debug_stream_dump = 0x00080, /* application data stream content */ - debug_lldata = 0x00100, /* sent/received ll data */ - debug_event = 0x00200, /* event processing */ - debug_hdlc = 0x00800, /* m10x hdlc processing */ - debug_channel = 0x01000, /* channel allocation/deallocation */ - debug_transcmd = 0x02000, /* at-commands+responses */ - debug_mcmd = 0x04000, /* commands that are sent very often */ - debug_init = 0x08000, /* (de)allocation+initialization of data - structures */ - debug_suspend = 0x10000, /* suspend/resume processing */ - debug_output = 0x20000, /* output to device */ - debug_iso = 0x40000, /* isochronous transfers */ - debug_if = 0x80000, /* character device operations */ - debug_usbreq = 0x100000, /* usb communication (except payload - data) */ - debug_lockcmd = 0x200000, /* at commands and responses when - ms_locked */ - - debug_any = 0x3fffff, /* print message if any of the others is - activated */ -}; - -#ifdef config_gigaset_debug - -#define gig_dbg(level, format, arg...) \ - do { \ - if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \ - printk(kern_debug kbuild_modname ": " format " ", \ - ## arg); \ - } while (0) -#define debug_default (debug_transcmd | debug_cmd | debug_usbreq) - -#else - -#define gig_dbg(level, format, arg...) do {} while (0) -#define debug_default 0 - -#endif - -void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, - size_t len, const unsigned char *buf); - -/* connection state */ -#define zsau_none 0 -#define zsau_proceeding 1 -#define zsau_call_delivered 2 -#define zsau_active 3 -#define zsau_disconnect_ind 4 -#define zsau_null 5 -#define zsau_disconnect_req 6 -#define zsau_unknown -1 - -/* usb control transfer requests */ -#define out_vendor_req (usb_dir_out | usb_type_vendor | usb_recip_endpoint) -#define in_vendor_req (usb_dir_in | usb_type_vendor | usb_recip_endpoint) - -/* interrupt pipe messages */ -#define hd_b1_flow_control 0x80 -#define hd_b2_flow_control 0x81 -#define hd_receiveatdata_ack (0x35) /* 3070 */ -#define hd_ready_send_atdata (0x36) /* 3070 */ -#define hd_open_atchannel_ack (0x37) /* 3070 */ -#define hd_close_atchannel_ack (0x38) /* 3070 */ -#define hd_device_init_ok (0x11) /* isurf usb + 3070 */ -#define hd_open_b1channel_ack (0x51) /* isurf usb + 3070 */ -#define hd_open_b2channel_ack (0x52) /* isurf usb + 3070 */ -#define hd_close_b1channel_ack (0x53) /* isurf usb + 3070 */ -#define hd_close_b2channel_ack (0x54) /* isurf usb + 3070 */ -#define hd_suspend_end (0x61) /* isurf usb */ -#define hd_reset_interrupt_pipe_ack (0xff) /* isurf usb + 3070 */ - -/* control requests */ -#define hd_open_b1channel (0x23) /* isurf usb + 3070 */ -#define hd_close_b1channel (0x24) /* isurf usb + 3070 */ -#define hd_open_b2channel (0x25) /* isurf usb + 3070 */ -#define hd_close_b2channel (0x26) /* isurf usb + 3070 */ -#define hd_reset_interrupt_pipe (0x27) /* isurf usb + 3070 */ -#define hd_device_init_ack (0x34) /* isurf usb + 3070 */ -#define hd_write_atmessage (0x12) /* 3070 */ -#define hd_read_atmessage (0x13) /* 3070 */ -#define hd_open_atchannel (0x28) /* 3070 */ -#define hd_close_atchannel (0x29) /* 3070 */ - -/* number of b channels supported by base driver */ -#define bas_channels 2 - -/* usb frames for isochronous transfer */ -#define bas_frametime 1 /* number of milliseconds between frames */ -#define bas_numframes 8 /* number of frames per urb */ -#define bas_maxframe 16 /* allocated bytes per frame */ -#define bas_normframe 8 /* send size without flow control */ -#define bas_highframe 10 /* " " with positive flow control */ -#define bas_lowframe 5 /* " " with negative flow control */ -#define bas_corrframes 4 /* flow control multiplicator */ - -#define bas_inbufsize (bas_maxframe * bas_numframes) /* size of isoc in buf - * per urb */ -#define bas_outbufsize 4096 /* size of common isoc out buffer */ -#define bas_outbufpad bas_maxframe /* size of pad area for isoc out buf */ - -#define bas_inurbs 3 -#define bas_outurbs 3 - -/* variable commands in struct bc_state */ -#define at_iso 0 -#define at_dial 1 -#define at_msn 2 -#define at_bc 3 -#define at_proto 4 -#define at_type 5 -#define at_clip 6 -/* total number */ -#define at_num 7 - -/* variables in struct at_state_t */ -/* - numeric */ -#define var_zsau 0 -#define var_zdle 1 -#define var_zctp 2 -/* total number */ -#define var_num 3 -/* - string */ -#define str_nmbr 0 -#define str_zcpn 1 -#define str_zcon 2 -#define str_zbc 3 -#define str_zhlc 4 -/* total number */ -#define str_num 5 - -/* event types */ -#define ev_timeout -105 -#define ev_if_ver -106 -#define ev_proc_cidmode -107 -#define ev_shutdown -108 -#define ev_start -110 -#define ev_stop -111 -#define ev_if_lock -112 -#define ev_accept -114 -#define ev_dial -115 -#define ev_hup -116 -#define ev_bc_open -117 -#define ev_bc_closed -118 - -/* input state */ -#define ins_command 0x0001 /* receiving messages (not payload data) */ -#define ins_dle_char 0x0002 /* dle flag received (in dle mode) */ -#define ins_byte_stuff 0x0004 -#define ins_have_data 0x0008 -#define ins_dle_command 0x0020 /* dle message start (<dle> x) received */ -#define ins_flag_hunt 0x0040 - -/* channel state */ -#define chs_d_up 0x01 -#define chs_b_up 0x02 -#define chs_notify_ll 0x04 - -#define icall_reject 0 -#define icall_accept 1 -#define icall_ignore 2 - -/* device state */ -#define ms_uninitialized 0 -#define ms_init 1 -#define ms_locked 2 -#define ms_shutdown 3 -#define ms_recover 4 -#define ms_ready 5 - -/* mode */ -#define m_unknown 0 -#define m_config 1 -#define m_unimodem 2 -#define m_cid 3 - -/* start mode */ -#define sm_locked 0 -#define sm_isdn 1 /* default */ - -/* layer 2 protocols (at^sbpr=...) */ -#define l2_bitsync 0 -#define l2_hdlc 1 -#define l2_voice 2 - -struct gigaset_ops; -struct gigaset_driver; - -struct usb_cardstate; -struct ser_cardstate; -struct bas_cardstate; - -struct bc_state; -struct usb_bc_state; -struct ser_bc_state; -struct bas_bc_state; - -struct reply_t { - int resp_code; /* rsp_xxxx */ - int min_constate; /* <0 => ignore */ - int max_constate; /* <0 => ignore */ - int parameter; /* e.g. zsau_xxxx <0: ignore*/ - int new_constate; /* <0 => ignore */ - int timeout; /* >0 => *hz; <=0 => tout_xxxx*/ - int action[maxact]; /* act_xxxx */ - char *command; /* null==none */ -}; - -extern struct reply_t gigaset_tab_cid[]; -extern struct reply_t gigaset_tab_nocid[]; - -struct inbuf_t { - struct cardstate *cs; - int inputstate; - int head, tail; - unsigned char data[rbufsize]; -}; - -/* isochronous write buffer structure - * circular buffer with pad area for extraction of complete usb frames - * - data[read..nextread-1] is valid data already submitted to the usb subsystem - * - data[nextread..write-1] is valid data yet to be sent - * - data[write] is the next byte to write to - * - in byte-oriented l2 procotols, it is completely free - * - in bit-oriented l2 procotols, it may contain a partial byte of valid data - * - data[write+1..read-1] is free - * - wbits is the number of valid data bits in data[write], starting at the lsb - * - writesem is the semaphore for writing to the buffer: - * if writesem <= 0, data[write..read-1] is currently being written to - * - idle contains the byte value to repeat when the end of valid data is - * reached; if nextread==write (buffer contains no data to send), either the - * bas_outbufpad bytes immediately before data[write] (if - * write>=bas_outbufpad) or those of the pad area (if write<bas_outbufpad) - * are also filled with that value - */ -struct isowbuf_t { - int read; - int nextread; - int write; - atomic_t writesem; - int wbits; - unsigned char data[bas_outbufsize + bas_outbufpad]; - unsigned char idle; -}; - -/* isochronous write urb context structure - * data to be stored along with the urb and retrieved when it is returned - * as completed by the usb subsystem - * - urb: pointer to the urb itself - * - bcs: pointer to the b channel control structure - * - limit: end of write buffer area covered by this urb - * - status: urb completion status - */ -struct isow_urbctx_t { - struct urb *urb; - struct bc_state *bcs; - int limit; - int status; -}; - -/* at state structure - * data associated with the state of an isdn connection, whether or not - * it is currently assigned a b channel - */ -struct at_state_t { - struct list_head list; - int waiting; - int getstring; - unsigned timer_index; - unsigned long timer_expires; - int timer_active; - unsigned int constate; /* state of connection */ - struct reply_t *replystruct; - int cid; - int int_var[var_num]; /* see var_xxxx */ - char *str_var[str_num]; /* see str_xxxx */ - unsigned pending_commands; /* see pc_xxxx */ - unsigned seq_index; - - struct cardstate *cs; - struct bc_state *bcs; -}; - -struct event_t { - int type; - void *ptr, *arg; - int parameter; - int cid; - struct at_state_t *at_state; -}; - -/* this buffer holds all information about the used b-channel */ -struct bc_state { - struct sk_buff *tx_skb; /* current transfer buffer to modem */ - struct sk_buff_head squeue; /* b-channel send queue */ - - /* variables for debugging .. */ - int corrupted; /* counter for corrupted packages */ - int trans_down; /* counter of packages (downstream) */ - int trans_up; /* counter of packages (upstream) */ - - struct at_state_t at_state; - - /* receive buffer */ - unsigned rx_bufsize; /* max size accepted by application */ - struct sk_buff *rx_skb; - __u16 rx_fcs; - int inputstate; /* see ins_xxxx */ - - int channel; - - struct cardstate *cs; - - unsigned chstate; /* bitmap (chs_*) */ - int ignore; - unsigned proto2; /* layer 2 protocol (l2_*) */ - char *commands[at_num]; /* see at_xxxx */ - -#ifdef config_gigaset_debug - int emptycount; -#endif - int busy; - int use_count; - - /* private data of hardware drivers */ - union { - struct ser_bc_state *ser; /* serial hardware driver */ - struct usb_bc_state *usb; /* usb hardware driver (m105) */ - struct bas_bc_state *bas; /* usb hardware driver (base) */ - } hw; - - void *ap; /* associated ll application */ - int apconnstate; /* ll application connection state */ - spinlock_t aplock; -}; - -struct cardstate { - struct gigaset_driver *driver; - unsigned minor_index; - struct device *dev; - struct device *tty_dev; - unsigned flags; - - const struct gigaset_ops *ops; - - /* stuff to handle communication */ - wait_queue_head_t waitqueue; - int waiting; - int mode; /* see m_xxxx */ - int mstate; /* modem state: see ms_xxxx */ - /* only changed by the event layer */ - int cmd_result; - - int channels; - struct bc_state *bcs; /* array of struct bc_state */ - - int onechannel; /* data and commands transmitted in one - stream (m10x) */ - - spinlock_t lock; - struct at_state_t at_state; /* at_state_t for cid == 0 */ - struct list_head temp_at_states;/* list of temporary "struct - at_state_t"s without b channel */ - - struct inbuf_t *inbuf; - - struct cmdbuf_t *cmdbuf, *lastcmdbuf; - spinlock_t cmdlock; - unsigned curlen, cmdbytes; - - struct tty_port port; - struct tasklet_struct if_wake_tasklet; - unsigned control_state; - - unsigned fwver[4]; - int gotfwver; - - unsigned running; /* !=0 if events are handled */ - unsigned connected; /* !=0 if hardware is connected */ - unsigned isdn_up; /* !=0 after gigaset_isdn_start() */ - - unsigned cidmode; - - int myid; /* id for communication with ll */ - void *iif; /* ll interface structure */ - unsigned short hw_hdr_len; /* headroom needed in data skbs */ - - struct reply_t *tabnocid; - struct reply_t *tabcid; - int cs_init; - int ignoreframes; /* frames to ignore after setting up the - b channel */ - struct mutex mutex; /* locks this structure: - * connected is not changed, - * hardware_up is not changed, - * mstate is not changed to or from - * ms_locked */ - - struct timer_list timer; - int retry_count; - int dle; /* !=0 if dle mode is active - (zdle=1 received -- m10x only) */ - int cur_at_seq; /* sequence of at commands being - processed */ - int curchannel; /* channel those commands are meant - for */ - int commands_pending; /* flag(s) in xxx.commands_pending have - been set */ - struct tasklet_struct - event_tasklet; /* tasklet for serializing at commands. - * scheduled - * -> for modem reponses (and - * incoming data for m10x) - * -> on timeout - * -> after setting bits in - * xxx.at_state.pending_command - * (e.g. command from ll) */ - struct tasklet_struct - write_tasklet; /* tasklet for serial output - * (not used in base driver) */ - - /* event queue */ - struct event_t events[max_events]; - unsigned ev_tail, ev_head; - spinlock_t ev_lock; - - /* current modem response */ - unsigned char respdata[max_resp_size + 1]; - unsigned cbytes; - - /* private data of hardware drivers */ - union { - struct usb_cardstate *usb; /* usb hardware driver (m105) */ - struct ser_cardstate *ser; /* serial hardware driver */ - struct bas_cardstate *bas; /* usb hardware driver (base) */ - } hw; -}; - -struct gigaset_driver { - struct list_head list; - spinlock_t lock; /* locks minor tables and blocked */ - struct tty_driver *tty; - unsigned have_tty; - unsigned minor; - unsigned minors; - struct cardstate *cs; - int blocked; - - const struct gigaset_ops *ops; - struct module *owner; -}; - -struct cmdbuf_t { - struct cmdbuf_t *next, *prev; - int len, offset; - struct tasklet_struct *wake_tasklet; - unsigned char buf[0]; -}; - -struct bas_bc_state { - /* isochronous output state */ - int running; - atomic_t corrbytes; - spinlock_t isooutlock; - struct isow_urbctx_t isoouturbs[bas_outurbs]; - struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl; - struct isowbuf_t *isooutbuf; - unsigned numsub; /* submitted urb counter - (for diagnostic messages only) */ - struct tasklet_struct sent_tasklet; - - /* isochronous input state */ - spinlock_t isoinlock; - struct urb *isoinurbs[bas_inurbs]; - unsigned char isoinbuf[bas_inbufsize * bas_inurbs]; - struct urb *isoindone; /* completed isoc read urb */ - int isoinstatus; /* status of completed urb */ - int loststatus; /* status of dropped urb */ - unsigned isoinlost; /* number of bytes lost */ - /* state of bit unstuffing algorithm - (in addition to bc_state.inputstate) */ - unsigned seqlen; /* number of '1' bits not yet - unstuffed */ - unsigned inbyte, inbits; /* collected bits for next byte */ - /* statistics */ - unsigned goodbytes; /* bytes correctly received */ - unsigned alignerrs; /* frames with incomplete byte at end */ - unsigned fcserrs; /* fcs errors */ - unsigned frameerrs; /* framing errors */ - unsigned giants; /* long frames */ - unsigned runts; /* short frames */ - unsigned aborts; /* hdlc aborts */ - unsigned shared0s; /* '0' bits shared between flags */ - unsigned stolen0s; /* '0' stuff bits also serving as - leading flag bits */ - struct tasklet_struct rcvd_tasklet; -}; - -struct gigaset_ops { - /* called from ev-layer.c/interface.c for sending at commands to the - device */ - int (*write_cmd)(struct cardstate *cs, struct cmdbuf_t *cb); - - /* called from interface.c for additional device control */ - int (*write_room)(struct cardstate *cs); - int (*chars_in_buffer)(struct cardstate *cs); - int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]); - - /* called from ev-layer.c after setting up connection - * should call gigaset_bchannel_up(), when finished. */ - int (*init_bchannel)(struct bc_state *bcs); - - /* called from ev-layer.c after hanging up - * should call gigaset_bchannel_down(), when finished. */ - int (*close_bchannel)(struct bc_state *bcs); - - /* called by gigaset_initcs() for setting up bcs->hw.xxx */ - int (*initbcshw)(struct bc_state *bcs); - - /* called by gigaset_freecs() for freeing bcs->hw.xxx */ - void (*freebcshw)(struct bc_state *bcs); - - /* called by gigaset_bchannel_down() for resetting bcs->hw.xxx */ - void (*reinitbcshw)(struct bc_state *bcs); - - /* called by gigaset_initcs() for setting up cs->hw.xxx */ - int (*initcshw)(struct cardstate *cs); - - /* called by gigaset_freecs() for freeing cs->hw.xxx */ - void (*freecshw)(struct cardstate *cs); - - /* called from common.c/interface.c for additional serial port - control */ - int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state, - unsigned new_state); - int (*baud_rate)(struct cardstate *cs, unsigned cflag); - int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag); - - /* called from ll interface to put an skb into the send-queue. - * after sending is completed, gigaset_skb_sent() must be called - * with the skb's link layer header preserved. */ - int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb); - - /* called from ev-layer.c to process a block of data - * received through the common/control channel. */ - void (*handle_input)(struct inbuf_t *inbuf); - -}; - -/* = common structures and definitions ======================================= - */ - -/* parser states for dle-event: - * <dle-event>: <dle_flag> "x" <event> <dle_flag> "." - * <dle_flag>: 0x10 - * <event>: ((a-z)* | (a-z)* | (0-10)*)+ - */ -#define dle_flag 0x10 - -/* =========================================================================== - * functions implemented in asyncdata.c - */ - -/* called from ll interface to put an skb into the send queue. */ -int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb); - -/* called from ev-layer.c to process a block of data - * received through the common/control channel. */ -void gigaset_m10x_input(struct inbuf_t *inbuf); - -/* =========================================================================== - * functions implemented in isocdata.c - */ - -/* called from ll interface to put an skb into the send queue. */ -int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb); - -/* called from ev-layer.c to process a block of data - * received through the common/control channel. */ -void gigaset_isoc_input(struct inbuf_t *inbuf); - -/* called from bas-gigaset.c to process a block of data - * received through the isochronous channel */ -void gigaset_isoc_receive(unsigned char *src, unsigned count, - struct bc_state *bcs); - -/* called from bas-gigaset.c to put a block of data - * into the isochronous output buffer */ -int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len); - -/* called from bas-gigaset.c to initialize the isochronous output buffer */ -void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle); - -/* called from bas-gigaset.c to retrieve a block of bytes for sending */ -int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size); - -/* =========================================================================== - * functions implemented in ll interface - */ - -/* called from common.c for setting up/shutting down with the isdn subsystem */ -void gigaset_isdn_regdrv(void); -void gigaset_isdn_unregdrv(void); -int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid); -void gigaset_isdn_unregdev(struct cardstate *cs); - -/* called from hardware module to indicate completion of an skb */ -void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb); -void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb); -void gigaset_isdn_rcv_err(struct bc_state *bcs); - -/* called from common.c/ev-layer.c to indicate events relevant to the ll */ -void gigaset_isdn_start(struct cardstate *cs); -void gigaset_isdn_stop(struct cardstate *cs); -int gigaset_isdn_icall(struct at_state_t *at_state); -void gigaset_isdn_connd(struct bc_state *bcs); -void gigaset_isdn_hupd(struct bc_state *bcs); -void gigaset_isdn_connb(struct bc_state *bcs); -void gigaset_isdn_hupb(struct bc_state *bcs); - -/* =========================================================================== - * functions implemented in ev-layer.c - */ - -/* tasklet called from common.c to process queued events */ -void gigaset_handle_event(unsigned long data); - -/* called from isocdata.c / asyncdata.c - * when a complete modem response line has been received */ -void gigaset_handle_modem_response(struct cardstate *cs); - -/* =========================================================================== - * functions implemented in proc.c - */ - -/* initialize sysfs for device */ -void gigaset_init_dev_sysfs(struct cardstate *cs); -void gigaset_free_dev_sysfs(struct cardstate *cs); - -/* =========================================================================== - * functions implemented in common.c/gigaset.h - */ - -void gigaset_bcs_reinit(struct bc_state *bcs); -void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, - struct cardstate *cs, int cid); -int gigaset_get_channel(struct bc_state *bcs); -struct bc_state *gigaset_get_free_channel(struct cardstate *cs); -void gigaset_free_channel(struct bc_state *bcs); -int gigaset_get_channels(struct cardstate *cs); -void gigaset_free_channels(struct cardstate *cs); -void gigaset_block_channels(struct cardstate *cs); - -/* allocate and initialize driver structure. */ -struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, - const char *procname, - const char *devname, - const struct gigaset_ops *ops, - struct module *owner); - -/* deallocate driver structure. */ -void gigaset_freedriver(struct gigaset_driver *drv); - -struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty); -struct cardstate *gigaset_get_cs_by_id(int id); -void gigaset_blockdriver(struct gigaset_driver *drv); - -/* allocate and initialize card state. calls hardware dependent - gigaset_init[b]cs(). */ -struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, - int onechannel, int ignoreframes, - int cidmode, const char *modulename); - -/* free card state. calls hardware dependent gigaset_free[b]cs(). */ -void gigaset_freecs(struct cardstate *cs); - -/* tell common.c that hardware and driver are ready. */ -int gigaset_start(struct cardstate *cs); - -/* tell common.c that the device is not present any more. */ -void gigaset_stop(struct cardstate *cs); - -/* tell common.c that the driver is being unloaded. */ -int gigaset_shutdown(struct cardstate *cs); - -/* append event to the queue. - * returns null on failure or a pointer to the event on success. - * ptr must be kmalloc()ed (and not be freed by the caller). - */ -struct event_t *gigaset_add_event(struct cardstate *cs, - struct at_state_t *at_state, int type, - void *ptr, int parameter, void *arg); - -/* called on config1 command from frontend. */ -int gigaset_enterconfigmode(struct cardstate *cs); - -/* cs->lock must not be locked */ -static inline void gigaset_schedule_event(struct cardstate *cs) -{ - unsigned long flags; - spin_lock_irqsave(&cs->lock, flags); - if (cs->running) - tasklet_schedule(&cs->event_tasklet); - spin_unlock_irqrestore(&cs->lock, flags); -} - -/* tell common.c that b channel has been closed. */ -/* cs->lock must not be locked */ -static inline void gigaset_bchannel_down(struct bc_state *bcs) -{ - gigaset_add_event(bcs->cs, &bcs->at_state, ev_bc_closed, null, 0, null); - gigaset_schedule_event(bcs->cs); -} - -/* tell common.c that b channel has been opened. */ -/* cs->lock must not be locked */ -static inline void gigaset_bchannel_up(struct bc_state *bcs) -{ - gigaset_add_event(bcs->cs, &bcs->at_state, ev_bc_open, null, 0, null); - gigaset_schedule_event(bcs->cs); -} - -/* set up next receive skb for data mode */ -static inline struct sk_buff *gigaset_new_rx_skb(struct bc_state *bcs) -{ - struct cardstate *cs = bcs->cs; - unsigned short hw_hdr_len = cs->hw_hdr_len; - - if (bcs->ignore) { - bcs->rx_skb = null; - } else { - bcs->rx_skb = dev_alloc_skb(bcs->rx_bufsize + hw_hdr_len); - if (bcs->rx_skb == null) - dev_warn(cs->dev, "could not allocate skb "); - else - skb_reserve(bcs->rx_skb, hw_hdr_len); - } - return bcs->rx_skb; -} - -/* append received bytes to inbuf */ -int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, - unsigned numbytes); - -/* =========================================================================== - * functions implemented in interface.c - */ - -/* initialize interface */ -void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname, - const char *devname); -/* release interface */ -void gigaset_if_freedriver(struct gigaset_driver *drv); -/* add minor */ -void gigaset_if_init(struct cardstate *cs); -/* remove minor */ -void gigaset_if_free(struct cardstate *cs); -/* device received data */ -void gigaset_if_receive(struct cardstate *cs, - unsigned char *buffer, size_t len); - -#endif diff --git a/drivers/staging/isdn/gigaset/interface.c b/drivers/staging/isdn/gigaset/interface.c --- a/drivers/staging/isdn/gigaset/interface.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * interface to user space for the gigaset driver - * - * copyright (c) 2004 by hansjoerg lipp <hjlipp@web.de> - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" -#include <linux/gigaset_dev.h> -#include <linux/tty_flip.h> -#include <linux/module.h> - -/*** our ioctls ***/ - -static int if_lock(struct cardstate *cs, int *arg) -{ - int cmd = *arg; - - gig_dbg(debug_if, "%u: if_lock (%d)", cs->minor_index, cmd); - - if (cmd > 1) - return -einval; - - if (cmd < 0) { - *arg = cs->mstate == ms_locked; - return 0; - } - - if (!cmd && cs->mstate == ms_locked && cs->connected) { - cs->ops->set_modem_ctrl(cs, 0, tiocm_dtr | tiocm_rts); - cs->ops->baud_rate(cs, b115200); - cs->ops->set_line_ctrl(cs, cs8); - cs->control_state = tiocm_dtr | tiocm_rts; - } - - cs->waiting = 1; - if (!gigaset_add_event(cs, &cs->at_state, ev_if_lock, - null, cmd, null)) { - cs->waiting = 0; - return -enomem; - } - gigaset_schedule_event(cs); - - wait_event(cs->waitqueue, !cs->waiting); - - if (cs->cmd_result >= 0) { - *arg = cs->cmd_result; - return 0; - } - - return cs->cmd_result; -} - -static int if_version(struct cardstate *cs, unsigned arg[4]) -{ - static const unsigned version[4] = gig_version; - static const unsigned compat[4] = gig_compat; - unsigned cmd = arg[0]; - - gig_dbg(debug_if, "%u: if_version (%d)", cs->minor_index, cmd); - - switch (cmd) { - case gigver_driver: - memcpy(arg, version, sizeof version); - return 0; - case gigver_compat: - memcpy(arg, compat, sizeof compat); - return 0; - case gigver_fwbase: - cs->waiting = 1; - if (!gigaset_add_event(cs, &cs->at_state, ev_if_ver, - null, 0, arg)) { - cs->waiting = 0; - return -enomem; - } - gigaset_schedule_event(cs); - - wait_event(cs->waitqueue, !cs->waiting); - - if (cs->cmd_result >= 0) - return 0; - - return cs->cmd_result; - default: - return -einval; - } -} - -static int if_config(struct cardstate *cs, int *arg) -{ - gig_dbg(debug_if, "%u: if_config (%d)", cs->minor_index, *arg); - - if (*arg != 1) - return -einval; - - if (cs->mstate != ms_locked) - return -ebusy; - - if (!cs->connected) { - pr_err("%s: not connected ", __func__); - return -enodev; - } - - *arg = 0; - return gigaset_enterconfigmode(cs); -} - -/*** the terminal driver ***/ - -static int if_open(struct tty_struct *tty, struct file *filp) -{ - struct cardstate *cs; - - gig_dbg(debug_if, "%d+%d: %s()", - tty->driver->minor_start, tty->index, __func__); - - cs = gigaset_get_cs_by_tty(tty); - if (!cs || !try_module_get(cs->driver->owner)) - return -enodev; - - if (mutex_lock_interruptible(&cs->mutex)) { - module_put(cs->driver->owner); - return -erestartsys; - } - tty->driver_data = cs; - - ++cs->port.count; - - if (cs->port.count == 1) { - tty_port_tty_set(&cs->port, tty); - cs->port.low_latency = 1; - } - - mutex_unlock(&cs->mutex); - return 0; -} - -static void if_close(struct tty_struct *tty, struct file *filp) -{ - struct cardstate *cs = tty->driver_data; - - if (!cs) { /* happens if we didn't find cs in open */ - gig_dbg(debug_if, "%s: no cardstate", __func__); - return; - } - - gig_dbg(debug_if, "%u: %s()", cs->minor_index, __func__); - - mutex_lock(&cs->mutex); - - if (!cs->connected) - gig_dbg(debug_if, "not connected"); /* nothing to do */ - else if (!cs->port.count) - dev_warn(cs->dev, "%s: device not opened ", __func__); - else if (!--cs->port.count) - tty_port_tty_set(&cs->port, null); - - mutex_unlock(&cs->mutex); - - module_put(cs->driver->owner); -} - -static int if_ioctl(struct tty_struct *tty, - unsigned int cmd, unsigned long arg) -{ - struct cardstate *cs = tty->driver_data; - int retval = -enodev; - int int_arg; - unsigned char buf[6]; - unsigned version[4]; - - gig_dbg(debug_if, "%u: %s(0x%x)", cs->minor_index, __func__, cmd); - - if (mutex_lock_interruptible(&cs->mutex)) - return -erestartsys; - - if (!cs->connected) { - gig_dbg(debug_if, "not connected"); - retval = -enodev; - } else { - retval = 0; - switch (cmd) { - case gigaset_redir: - retval = get_user(int_arg, (int __user *) arg); - if (retval >= 0) - retval = if_lock(cs, &int_arg); - if (retval >= 0) - retval = put_user(int_arg, (int __user *) arg); - break; - case gigaset_config: - retval = get_user(int_arg, (int __user *) arg); - if (retval >= 0) - retval = if_config(cs, &int_arg); - if (retval >= 0) - retval = put_user(int_arg, (int __user *) arg); - break; - case gigaset_brkchars: - retval = copy_from_user(&buf, - (const unsigned char __user *) arg, 6) - ? -efault : 0; - if (retval >= 0) { - gigaset_dbg_buffer(debug_if, "gigaset_brkchars", - 6, buf); - retval = cs->ops->brkchars(cs, buf); - } - break; - case gigaset_version: - retval = copy_from_user(version, - (unsigned __user *) arg, sizeof version) - ? -efault : 0; - if (retval >= 0) - retval = if_version(cs, version); - if (retval >= 0) - retval = copy_to_user((unsigned __user *) arg, - version, sizeof version) - ? -efault : 0; - break; - default: - gig_dbg(debug_if, "%s: arg not supported - 0x%04x", - __func__, cmd); - retval = -enoioctlcmd; - } - } - - mutex_unlock(&cs->mutex); - - return retval; -} - -#ifdef config_compat -static long if_compat_ioctl(struct tty_struct *tty, - unsigned int cmd, unsigned long arg) -{ - return if_ioctl(tty, cmd, (unsigned long)compat_ptr(arg)); -} -#endif - -static int if_tiocmget(struct tty_struct *tty) -{ - struct cardstate *cs = tty->driver_data; - int retval; - - gig_dbg(debug_if, "%u: %s()", cs->minor_index, __func__); - - if (mutex_lock_interruptible(&cs->mutex)) - return -erestartsys; - - retval = cs->control_state & (tiocm_rts | tiocm_dtr); - - mutex_unlock(&cs->mutex); - - return retval; -} - -static int if_tiocmset(struct tty_struct *tty, - unsigned int set, unsigned int clear) -{ - struct cardstate *cs = tty->driver_data; - int retval; - unsigned mc; - - gig_dbg(debug_if, "%u: %s(0x%x, 0x%x)", - cs->minor_index, __func__, set, clear); - - if (mutex_lock_interruptible(&cs->mutex)) - return -erestartsys; - - if (!cs->connected) { - gig_dbg(debug_if, "not connected"); - retval = -enodev; - } else { - mc = (cs->control_state | set) & ~clear & (tiocm_rts | tiocm_dtr); - retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc); - cs->control_state = mc; - } - - mutex_unlock(&cs->mutex); - - return retval; -} - -static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) -{ - struct cardstate *cs = tty->driver_data; - struct cmdbuf_t *cb; - int retval; - - gig_dbg(debug_if, "%u: %s()", cs->minor_index, __func__); - - if (mutex_lock_interruptible(&cs->mutex)) - return -erestartsys; - - if (!cs->connected) { - gig_dbg(debug_if, "not connected"); - retval = -enodev; - goto done; - } - if (cs->mstate != ms_locked) { - dev_warn(cs->dev, "can't write to unlocked device "); - retval = -ebusy; - goto done; - } - if (count <= 0) { - /* nothing to do */ - retval = 0; - goto done; - } - - cb = kmalloc(sizeof(struct cmdbuf_t) + count, gfp_kernel); - if (!cb) { - dev_err(cs->dev, "%s: out of memory ", __func__); - retval = -enomem; - goto done; - } - - memcpy(cb->buf, buf, count); - cb->len = count; - cb->offset = 0; - cb->next = null; - cb->wake_tasklet = &cs->if_wake_tasklet; - retval = cs->ops->write_cmd(cs, cb); -done: - mutex_unlock(&cs->mutex); - return retval; -} - -static int if_write_room(struct tty_struct *tty) -{ - struct cardstate *cs = tty->driver_data; - int retval; - - gig_dbg(debug_if, "%u: %s()", cs->minor_index, __func__); - - if (mutex_lock_interruptible(&cs->mutex)) - return -erestartsys; - - if (!cs->connected) { - gig_dbg(debug_if, "not connected"); - retval = -enodev; - } else if (cs->mstate != ms_locked) { - dev_warn(cs->dev, "can't write to unlocked device "); - retval = -ebusy; - } else - retval = cs->ops->write_room(cs); - - mutex_unlock(&cs->mutex); - - return retval; -} - -static int if_chars_in_buffer(struct tty_struct *tty) -{ - struct cardstate *cs = tty->driver_data; - int retval = 0; - - gig_dbg(debug_if, "%u: %s()", cs->minor_index, __func__); - - mutex_lock(&cs->mutex); - - if (!cs->connected) - gig_dbg(debug_if, "not connected"); - else if (cs->mstate != ms_locked) - dev_warn(cs->dev, "can't write to unlocked device "); - else - retval = cs->ops->chars_in_buffer(cs); - - mutex_unlock(&cs->mutex); - - return retval; -} - -static void if_throttle(struct tty_struct *tty) -{ - struct cardstate *cs = tty->driver_data; - - gig_dbg(debug_if, "%u: %s()", cs->minor_index, __func__); - - mutex_lock(&cs->mutex); - - if (!cs->connected) - gig_dbg(debug_if, "not connected"); /* nothing to do */ - else - gig_dbg(debug_if, "%s: not implemented ", __func__); - - mutex_unlock(&cs->mutex); -} - -static void if_unthrottle(struct tty_struct *tty) -{ - struct cardstate *cs = tty->driver_data; - - gig_dbg(debug_if, "%u: %s()", cs->minor_index, __func__); - - mutex_lock(&cs->mutex); - - if (!cs->connected) - gig_dbg(debug_if, "not connected"); /* nothing to do */ - else - gig_dbg(debug_if, "%s: not implemented ", __func__); - - mutex_unlock(&cs->mutex); -} - -static void if_set_termios(struct tty_struct *tty, struct ktermios *old) -{ - struct cardstate *cs = tty->driver_data; - unsigned int iflag; - unsigned int cflag; - unsigned int old_cflag; - unsigned int control_state, new_state; - - gig_dbg(debug_if, "%u: %s()", cs->minor_index, __func__); - - mutex_lock(&cs->mutex); - - if (!cs->connected) { - gig_dbg(debug_if, "not connected"); - goto out; - } - - iflag = tty->termios.c_iflag; - cflag = tty->termios.c_cflag; - old_cflag = old ? old->c_cflag : cflag; - gig_dbg(debug_if, "%u: iflag %x cflag %x old %x", - cs->minor_index, iflag, cflag, old_cflag); - - /* get a local copy of the current port settings */ - control_state = cs->control_state; - - /* - * update baud rate. - * do not attempt to cache old rates and skip settings, - * disconnects screw such tricks up completely. - * premature optimization is the root of all evil. - */ - - /* reassert dtr and (maybe) rts on transition from b0 */ - if ((old_cflag & cbaud) == b0) { - new_state = control_state | tiocm_dtr; - /* don't set rts if using hardware flow control */ - if (!(old_cflag & crtscts)) - new_state |= tiocm_rts; - gig_dbg(debug_if, "%u: from b0 - set dtr%s", - cs->minor_index, - (new_state & tiocm_rts) ? " only" : "/rts"); - cs->ops->set_modem_ctrl(cs, control_state, new_state); - control_state = new_state; - } - - cs->ops->baud_rate(cs, cflag & cbaud); - - if ((cflag & cbaud) == b0) { - /* drop rts and dtr */ - gig_dbg(debug_if, "%u: to b0 - drop dtr/rts", cs->minor_index); - new_state = control_state & ~(tiocm_dtr | tiocm_rts); - cs->ops->set_modem_ctrl(cs, control_state, new_state); - control_state = new_state; - } - - /* - * update line control register (lcr) - */ - - cs->ops->set_line_ctrl(cs, cflag); - - /* save off the modified port settings */ - cs->control_state = control_state; - -out: - mutex_unlock(&cs->mutex); -} - -static const struct tty_operations if_ops = { - .open = if_open, - .close = if_close, - .ioctl = if_ioctl, -#ifdef config_compat - .compat_ioctl = if_compat_ioctl, -#endif - .write = if_write, - .write_room = if_write_room, - .chars_in_buffer = if_chars_in_buffer, - .set_termios = if_set_termios, - .throttle = if_throttle, - .unthrottle = if_unthrottle, - .tiocmget = if_tiocmget, - .tiocmset = if_tiocmset, -}; - - -/* wakeup tasklet for the write operation */ -static void if_wake(unsigned long data) -{ - struct cardstate *cs = (struct cardstate *)data; - - tty_port_tty_wakeup(&cs->port); -} - -/*** interface to common ***/ - -void gigaset_if_init(struct cardstate *cs) -{ - struct gigaset_driver *drv; - - drv = cs->driver; - if (!drv->have_tty) - return; - - tasklet_init(&cs->if_wake_tasklet, if_wake, (unsigned long) cs); - - mutex_lock(&cs->mutex); - cs->tty_dev = tty_port_register_device(&cs->port, drv->tty, - cs->minor_index, null); - - if (!is_err(cs->tty_dev)) - dev_set_drvdata(cs->tty_dev, cs); - else { - pr_warn("could not register device to the tty subsystem "); - cs->tty_dev = null; - } - mutex_unlock(&cs->mutex); -} - -void gigaset_if_free(struct cardstate *cs) -{ - struct gigaset_driver *drv; - - drv = cs->driver; - if (!drv->have_tty) - return; - - tasklet_disable(&cs->if_wake_tasklet); - tasklet_kill(&cs->if_wake_tasklet); - cs->tty_dev = null; - tty_unregister_device(drv->tty, cs->minor_index); -} - -/** - * gigaset_if_receive() - pass a received block of data to the tty device - * @cs: device descriptor structure. - * @buffer: received data. - * @len: number of bytes received. - * - * called by asyncdata/isocdata if a block of data received from the - * device must be sent to userspace through the ttyg* device. - */ -void gigaset_if_receive(struct cardstate *cs, - unsigned char *buffer, size_t len) -{ - tty_insert_flip_string(&cs->port, buffer, len); - tty_flip_buffer_push(&cs->port); -} -export_symbol_gpl(gigaset_if_receive); - -/* gigaset_if_initdriver - * initialize tty interface. - * parameters: - * drv driver - * procname name of the driver (e.g. for /proc/tty/drivers) - * devname name of the device files (prefix without minor number) - */ -void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname, - const char *devname) -{ - int ret; - struct tty_driver *tty; - - drv->have_tty = 0; - - drv->tty = tty = alloc_tty_driver(drv->minors); - if (tty == null) - goto enomem; - - tty->type = tty_driver_type_serial; - tty->subtype = serial_type_normal; - tty->flags = tty_driver_real_raw | tty_driver_dynamic_dev; - - tty->driver_name = procname; - tty->name = devname; - tty->minor_start = drv->minor; - - tty->init_termios = tty_std_termios; - tty->init_termios.c_cflag = b9600 | cs8 | cread | hupcl | clocal; - tty_set_operations(tty, &if_ops); - - ret = tty_register_driver(tty); - if (ret < 0) { - pr_err("error %d registering tty driver ", ret); - goto error; - } - gig_dbg(debug_if, "tty driver initialized"); - drv->have_tty = 1; - return; - -enomem: - pr_err("out of memory "); -error: - if (drv->tty) - put_tty_driver(drv->tty); -} - -void gigaset_if_freedriver(struct gigaset_driver *drv) -{ - if (!drv->have_tty) - return; - - drv->have_tty = 0; - tty_unregister_driver(drv->tty); - put_tty_driver(drv->tty); -} diff --git a/drivers/staging/isdn/gigaset/isocdata.c b/drivers/staging/isdn/gigaset/isocdata.c --- a/drivers/staging/isdn/gigaset/isocdata.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * common data handling layer for bas_gigaset - * - * copyright (c) 2005 by tilman schmidt <tilman@imap.cc>, - * hansjoerg lipp <hjlipp@web.de>. - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" -#include <linux/crc-ccitt.h> -#include <linux/bitrev.h> - -/* access methods for isowbuf_t */ -/* ============================ */ - -/* initialize buffer structure - */ -void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle) -{ - iwb->read = 0; - iwb->nextread = 0; - iwb->write = 0; - atomic_set(&iwb->writesem, 1); - iwb->wbits = 0; - iwb->idle = idle; - memset(iwb->data + bas_outbufsize, idle, bas_outbufpad); -} - -/* compute number of bytes which can be appended to buffer - * so that there is still room to append a maximum frame of flags - */ -static inline int isowbuf_freebytes(struct isowbuf_t *iwb) -{ - int read, write, freebytes; - - read = iwb->read; - write = iwb->write; - freebytes = read - write; - if (freebytes > 0) { - /* no wraparound: need padding space within regular area */ - return freebytes - bas_outbufpad; - } else if (read < bas_outbufpad) { - /* wraparound: can use space up to end of regular area */ - return bas_outbufsize - write; - } else { - /* following the wraparound yields more space */ - return freebytes + bas_outbufsize - bas_outbufpad; - } -} - -/* start writing - * acquire the write semaphore - * return 0 if acquired, <0 if busy - */ -static inline int isowbuf_startwrite(struct isowbuf_t *iwb) -{ - if (!atomic_dec_and_test(&iwb->writesem)) { - atomic_inc(&iwb->writesem); - gig_dbg(debug_iso, "%s: couldn't acquire iso write semaphore", - __func__); - return -ebusy; - } - gig_dbg(debug_iso, - "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d", - __func__, iwb->data[iwb->write], iwb->wbits); - return 0; -} - -/* finish writing - * release the write semaphore - * returns the current write position - */ -static inline int isowbuf_donewrite(struct isowbuf_t *iwb) -{ - int write = iwb->write; - atomic_inc(&iwb->writesem); - return write; -} - -/* append bits to buffer without any checks - * - data contains bits to append, starting at lsb - * - nbits is number of bits to append (0..24) - * must be called with the write semaphore held - * if more than nbits bits are set in data, the extraneous bits are set in the - * buffer too, but the write position is only advanced by nbits. - */ -static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits) -{ - int write = iwb->write; - data <<= iwb->wbits; - data |= iwb->data[write]; - nbits += iwb->wbits; - while (nbits >= 8) { - iwb->data[write++] = data & 0xff; - write %= bas_outbufsize; - data >>= 8; - nbits -= 8; - } - iwb->wbits = nbits; - iwb->data[write] = data & 0xff; - iwb->write = write; -} - -/* put final flag on hdlc bitstream - * also sets the idle fill byte to the correspondingly shifted flag pattern - * must be called with the write semaphore held - */ -static inline void isowbuf_putflag(struct isowbuf_t *iwb) -{ - int write; - - /* add two flags, thus reliably covering one byte */ - isowbuf_putbits(iwb, 0x7e7e, 8); - /* recover the idle flag byte */ - write = iwb->write; - iwb->idle = iwb->data[write]; - gig_dbg(debug_iso, "idle fill byte %02x", iwb->idle); - /* mask extraneous bits in buffer */ - iwb->data[write] &= (1 << iwb->wbits) - 1; -} - -/* retrieve a block of bytes for sending - * the requested number of bytes is provided as a contiguous block. - * if necessary, the frame is filled to the requested number of bytes - * with the idle value. - * returns offset to frame, < 0 on busy or error - */ -int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size) -{ - int read, write, limit, src, dst; - unsigned char pbyte; - - read = iwb->nextread; - write = iwb->write; - if (likely(read == write)) { - /* return idle frame */ - return read < bas_outbufpad ? - bas_outbufsize : read - bas_outbufpad; - } - - limit = read + size; - gig_dbg(debug_stream, "%s: read=%d write=%d limit=%d", - __func__, read, write, limit); -#ifdef config_gigaset_debug - if (unlikely(size < 0 || size > bas_outbufpad)) { - pr_err("invalid size %d ", size); - return -einval; - } -#endif - - if (read < write) { - /* no wraparound in valid data */ - if (limit >= write) { - /* append idle frame */ - if (isowbuf_startwrite(iwb) < 0) - return -ebusy; - /* write position could have changed */ - write = iwb->write; - if (limit >= write) { - pbyte = iwb->data[write]; /* save - partial byte */ - limit = write + bas_outbufpad; - gig_dbg(debug_stream, - "%s: filling %d->%d with %02x", - __func__, write, limit, iwb->idle); - if (write + bas_outbufpad < bas_outbufsize) - memset(iwb->data + write, iwb->idle, - bas_outbufpad); - else { - /* wraparound, fill entire pad area */ - memset(iwb->data + write, iwb->idle, - bas_outbufsize + bas_outbufpad - - write); - limit = 0; - } - gig_dbg(debug_stream, - "%s: restoring %02x at %d", - __func__, pbyte, limit); - iwb->data[limit] = pbyte; /* restore - partial byte */ - iwb->write = limit; - } - isowbuf_donewrite(iwb); - } - } else { - /* valid data wraparound */ - if (limit >= bas_outbufsize) { - /* copy wrapped part into pad area */ - src = 0; - dst = bas_outbufsize; - while (dst < limit && src < write) - iwb->data[dst++] = iwb->data[src++]; - if (dst <= limit) { - /* fill pad area with idle byte */ - memset(iwb->data + dst, iwb->idle, - bas_outbufsize + bas_outbufpad - dst); - } - limit = src; - } - } - iwb->nextread = limit; - return read; -} - -/* dump_bytes - * write hex bytes to syslog for debugging - */ -static inline void dump_bytes(enum debuglevel level, const char *tag, - unsigned char *bytes, int count) -{ -#ifdef config_gigaset_debug - unsigned char c; - static char dbgline[3 * 32 + 1]; - int i = 0; - - if (!(gigaset_debuglevel & level)) - return; - - while (count-- > 0) { - if (i > sizeof(dbgline) - 4) { - dbgline[i] = ''; - gig_dbg(level, "%s:%s", tag, dbgline); - i = 0; - } - c = *bytes++; - dbgline[i] = (i && !(i % 12)) ? '-' : ' '; - i++; - dbgline[i++] = hex_asc_hi(c); - dbgline[i++] = hex_asc_lo(c); - } - dbgline[i] = ''; - gig_dbg(level, "%s:%s", tag, dbgline); -#endif -} - -/*============================================================================*/ - -/* bytewise hdlc bitstuffing via table lookup - * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits - * index: 256*(number of preceding '1' bits) + (next byte to stuff) - * value: bit 9.. 0 = result bits - * bit 12..10 = number of trailing '1' bits in result - * bit 14..13 = number of bits added by stuffing - */ -static const u16 stufftab[5 * 256] = { -/* previous 1s = 0: */ - 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, - 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f, - 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, - 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f, - 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, - 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f, - 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, - 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df, - 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f, - 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f, - 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af, - 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f, - 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf, - 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f, - 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef, - 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf, - -/* previous 1s = 1: */ - 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f, - 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f, - 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f, - 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f, - 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f, - 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af, - 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf, - 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef, - 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f, - 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f, - 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f, - 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f, - 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f, - 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af, - 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf, - 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef, - -/* previous 1s = 2: */ - 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017, - 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037, - 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057, - 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077, - 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097, - 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7, - 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7, - 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7, - 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517, - 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537, - 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557, - 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577, - 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997, - 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7, - 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7, - 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7, - -/* previous 1s = 3: */ - 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b, - 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b, - 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b, - 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b, - 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b, - 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb, - 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db, - 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb, - 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b, - 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b, - 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b, - 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b, - 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b, - 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb, - 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb, - 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb, - -/* previous 1s = 4: */ - 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d, - 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d, - 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d, - 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d, - 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d, - 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd, - 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd, - 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d, - 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d, - 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d, - 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d, - 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d, - 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d, - 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd, - 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd, - 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d -}; - -/* hdlc_bitstuff_byte - * perform hdlc bitstuffing for one input byte (8 bits, lsb first) - * parameters: - * cin input byte - * ones number of trailing '1' bits in result before this step - * iwb pointer to output buffer structure - * (write semaphore must be held) - * return value: - * number of trailing '1' bits in result after this step - */ - -static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin, - int ones) -{ - u16 stuff; - int shiftinc, newones; - - /* get stuffing information for input byte - * value: bit 9.. 0 = result bits - * bit 12..10 = number of trailing '1' bits in result - * bit 14..13 = number of bits added by stuffing - */ - stuff = stufftab[256 * ones + cin]; - shiftinc = (stuff >> 13) & 3; - newones = (stuff >> 10) & 7; - stuff &= 0x3ff; - - /* append stuffed byte to output stream */ - isowbuf_putbits(iwb, stuff, 8 + shiftinc); - return newones; -} - -/* hdlc_buildframe - * perform hdlc framing with bitstuffing on a byte buffer - * the input buffer is regarded as a sequence of bits, starting with the least - * significant bit of the first byte and ending with the most significant bit - * of the last byte. a 16 bit fcs is appended as defined by rfc 1662. - * whenever five consecutive '1' bits appear in the resulting bit sequence, a - * '0' bit is inserted after them. - * the resulting bit string and a closing flag pattern (ppp_flag, '01111110') - * are appended to the output buffer starting at the given bit position, which - * is assumed to already contain a leading flag. - * the output buffer must have sufficient length; count + count/5 + 6 bytes - * starting at *out are safe and are verified to be present. - * parameters: - * in input buffer - * count number of bytes in input buffer - * iwb pointer to output buffer structure - * (write semaphore must be held) - * return value: - * position of end of packet in output buffer on success, - * -eagain if write semaphore busy or buffer full - */ - -static inline int hdlc_buildframe(struct isowbuf_t *iwb, - unsigned char *in, int count) -{ - int ones; - u16 fcs; - int end; - unsigned char c; - - if (isowbuf_freebytes(iwb) < count + count / 5 + 6 || - isowbuf_startwrite(iwb) < 0) { - gig_dbg(debug_iso, "%s: %d bytes free -> -eagain", - __func__, isowbuf_freebytes(iwb)); - return -eagain; - } - - dump_bytes(debug_stream_dump, "snd data", in, count); - - /* bitstuff and checksum input data */ - fcs = ppp_initfcs; - ones = 0; - while (count-- > 0) { - c = *in++; - ones = hdlc_bitstuff_byte(iwb, c, ones); - fcs = crc_ccitt_byte(fcs, c); - } - - /* bitstuff and append fcs - * (complemented, least significant byte first) */ - fcs ^= 0xffff; - ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones); - ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones); - - /* put closing flag and repeat byte for flag idle */ - isowbuf_putflag(iwb); - end = isowbuf_donewrite(iwb); - return end; -} - -/* trans_buildframe - * append a block of 'transparent' data to the output buffer, - * inverting the bytes. - * the output buffer must have sufficient length; count bytes - * starting at *out are safe and are verified to be present. - * parameters: - * in input buffer - * count number of bytes in input buffer - * iwb pointer to output buffer structure - * (write semaphore must be held) - * return value: - * position of end of packet in output buffer on success, - * -eagain if write semaphore busy or buffer full - */ - -static inline int trans_buildframe(struct isowbuf_t *iwb, - unsigned char *in, int count) -{ - int write; - unsigned char c; - - if (unlikely(count <= 0)) - return iwb->write; - - if (isowbuf_freebytes(iwb) < count || - isowbuf_startwrite(iwb) < 0) { - gig_dbg(debug_iso, "can't put %d bytes", count); - return -eagain; - } - - gig_dbg(debug_stream, "put %d bytes", count); - dump_bytes(debug_stream_dump, "snd data", in, count); - - write = iwb->write; - do { - c = bitrev8(*in++); - iwb->data[write++] = c; - write %= bas_outbufsize; - } while (--count > 0); - iwb->write = write; - iwb->idle = c; - - return isowbuf_donewrite(iwb); -} - -int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len) -{ - int result; - - switch (bcs->proto2) { - case l2_hdlc: - result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len); - gig_dbg(debug_iso, "%s: %d bytes hdlc -> %d", - __func__, len, result); - break; - default: /* assume transparent */ - result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len); - gig_dbg(debug_iso, "%s: %d bytes trans -> %d", - __func__, len, result); - } - return result; -} - -/* hdlc_putbyte - * append byte c to current skb of b channel structure *bcs, updating fcs - */ -static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs) -{ - bcs->rx_fcs = crc_ccitt_byte(bcs->rx_fcs, c); - if (bcs->rx_skb == null) - /* skipping */ - return; - if (bcs->rx_skb->len >= bcs->rx_bufsize) { - dev_warn(bcs->cs->dev, "received oversized packet discarded "); - bcs->hw.bas->giants++; - dev_kfree_skb_any(bcs->rx_skb); - bcs->rx_skb = null; - return; - } - __skb_put_u8(bcs->rx_skb, c); -} - -/* hdlc_flush - * drop partial hdlc data packet - */ -static inline void hdlc_flush(struct bc_state *bcs) -{ - /* clear skb or allocate new if not skipping */ - if (bcs->rx_skb != null) - skb_trim(bcs->rx_skb, 0); - else - gigaset_new_rx_skb(bcs); - - /* reset packet state */ - bcs->rx_fcs = ppp_initfcs; -} - -/* hdlc_done - * process completed hdlc data packet - */ -static inline void hdlc_done(struct bc_state *bcs) -{ - struct cardstate *cs = bcs->cs; - struct sk_buff *procskb; - unsigned int len; - - if (unlikely(bcs->ignore)) { - bcs->ignore--; - hdlc_flush(bcs); - return; - } - procskb = bcs->rx_skb; - if (procskb == null) { - /* previous error */ - gig_dbg(debug_iso, "%s: skb=null", __func__); - gigaset_isdn_rcv_err(bcs); - } else if (procskb->len < 2) { - dev_notice(cs->dev, "received short frame (%d octets) ", - procskb->len); - bcs->hw.bas->runts++; - dev_kfree_skb_any(procskb); - gigaset_isdn_rcv_err(bcs); - } else if (bcs->rx_fcs != ppp_goodfcs) { - dev_notice(cs->dev, "frame check error "); - bcs->hw.bas->fcserrs++; - dev_kfree_skb_any(procskb); - gigaset_isdn_rcv_err(bcs); - } else { - len = procskb->len; - __skb_trim(procskb, len -= 2); /* subtract fcs */ - gig_dbg(debug_iso, "%s: good frame (%d octets)", __func__, len); - dump_bytes(debug_stream_dump, - "rcv data", procskb->data, len); - bcs->hw.bas->goodbytes += len; - gigaset_skb_rcvd(bcs, procskb); - } - gigaset_new_rx_skb(bcs); - bcs->rx_fcs = ppp_initfcs; -} - -/* hdlc_frag - * drop hdlc data packet with non-integral last byte - */ -static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits) -{ - if (unlikely(bcs->ignore)) { - bcs->ignore--; - hdlc_flush(bcs); - return; - } - - dev_notice(bcs->cs->dev, "received partial byte (%d bits) ", inbits); - bcs->hw.bas->alignerrs++; - gigaset_isdn_rcv_err(bcs); - __skb_trim(bcs->rx_skb, 0); - bcs->rx_fcs = ppp_initfcs; -} - -/* bit counts lookup table for hdlc bit unstuffing - * index: input byte - * value: bit 0..3 = number of consecutive '1' bits starting from lsb - * bit 4..6 = number of consecutive '1' bits starting from msb - * (replacing 8 by 7 to make it fit; the algorithm won't care) - * bit 7 set if there are 5 or more "interior" consecutive '1' bits - */ -static const unsigned char bitcounts[256] = { - 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, - 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05, - 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, - 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06, - 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, - 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05, - 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, - 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07, - 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14, - 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15, - 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14, - 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16, - 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24, - 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25, - 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34, - 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78 -}; - -/* hdlc_unpack - * perform hdlc frame processing (bit unstuffing, flag detection, fcs - * calculation) on a sequence of received data bytes (8 bits each, lsb first) - * pass on successfully received, complete frames as skbs via gigaset_skb_rcvd - * notify of errors via gigaset_isdn_rcv_err - * tally frames, errors etc. in bc structure counters - * parameters: - * src received data - * count number of received bytes - * bcs receiving b channel structure - */ -static inline void hdlc_unpack(unsigned char *src, unsigned count, - struct bc_state *bcs) -{ - struct bas_bc_state *ubc = bcs->hw.bas; - int inputstate; - unsigned seqlen, inbyte, inbits; - - /* load previous state: - * inputstate = set of flag bits: - * - ins_flag_hunt: no complete opening flag received since connection - * setup or last abort - * - ins_have_data: at least one complete data byte received since last - * flag - * seqlen = number of consecutive '1' bits in last 7 input stream bits - * (0..7) - * inbyte = accumulated partial data byte (if !ins_flag_hunt) - * inbits = number of valid bits in inbyte, starting at lsb (0..6) - */ - inputstate = bcs->inputstate; - seqlen = ubc->seqlen; - inbyte = ubc->inbyte; - inbits = ubc->inbits; - - /* bit unstuffing a byte a time - * take your time to understand this; it's straightforward but tedious. - * the "bitcounts" lookup table is used to speed up the counting of - * leading and trailing '1' bits. - */ - while (count--) { - unsigned char c = *src++; - unsigned char tabentry = bitcounts[c]; - unsigned lead1 = tabentry & 0x0f; - unsigned trail1 = (tabentry >> 4) & 0x0f; - - seqlen += lead1; - - if (unlikely(inputstate & ins_flag_hunt)) { - if (c == ppp_flag) { - /* flag-in-one */ - inputstate &= ~(ins_flag_hunt | ins_have_data); - inbyte = 0; - inbits = 0; - } else if (seqlen == 6 && trail1 != 7) { - /* flag completed & not followed by abort */ - inputstate &= ~(ins_flag_hunt | ins_have_data); - inbyte = c >> (lead1 + 1); - inbits = 7 - lead1; - if (trail1 >= 8) { - /* interior stuffing: - * omitting the msb handles most cases, - * correct the incorrectly handled - * cases individually */ - inbits--; - switch (c) { - case 0xbe: - inbyte = 0x3f; - break; - } - } - } - /* else: continue flag-hunting */ - } else if (likely(seqlen < 5 && trail1 < 7)) { - /* streamlined case: 8 data bits, no stuffing */ - inbyte |= c << inbits; - hdlc_putbyte(inbyte & 0xff, bcs); - inputstate |= ins_have_data; - inbyte >>= 8; - /* inbits unchanged */ - } else if (likely(seqlen == 6 && inbits == 7 - lead1 && - trail1 + 1 == inbits && - !(inputstate & ins_have_data))) { - /* streamlined case: flag idle - state unchanged */ - } else if (unlikely(seqlen > 6)) { - /* abort sequence */ - ubc->aborts++; - hdlc_flush(bcs); - inputstate |= ins_flag_hunt; - } else if (seqlen == 6) { - /* closing flag, including (6 - lead1) '1's - * and one '0' from inbits */ - if (inbits > 7 - lead1) { - hdlc_frag(bcs, inbits + lead1 - 7); - inputstate &= ~ins_have_data; - } else { - if (inbits < 7 - lead1) - ubc->stolen0s++; - if (inputstate & ins_have_data) { - hdlc_done(bcs); - inputstate &= ~ins_have_data; - } - } - - if (c == ppp_flag) { - /* complete flag, lsb overlaps preceding flag */ - ubc->shared0s++; - inbits = 0; - inbyte = 0; - } else if (trail1 != 7) { - /* remaining bits */ - inbyte = c >> (lead1 + 1); - inbits = 7 - lead1; - if (trail1 >= 8) { - /* interior stuffing: - * omitting the msb handles most cases, - * correct the incorrectly handled - * cases individually */ - inbits--; - switch (c) { - case 0xbe: - inbyte = 0x3f; - break; - } - } - } else { - /* abort sequence follows, - * skb already empty anyway */ - ubc->aborts++; - inputstate |= ins_flag_hunt; - } - } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */ - - if (c == ppp_flag) { - /* complete flag */ - if (seqlen == 5) - ubc->stolen0s++; - if (inbits) { - hdlc_frag(bcs, inbits); - inbits = 0; - inbyte = 0; - } else if (inputstate & ins_have_data) - hdlc_done(bcs); - inputstate &= ~ins_have_data; - } else if (trail1 == 7) { - /* abort sequence */ - ubc->aborts++; - hdlc_flush(bcs); - inputstate |= ins_flag_hunt; - } else { - /* stuffed data */ - if (trail1 < 7) { /* => seqlen == 5 */ - /* stuff bit at position lead1, - * no interior stuffing */ - unsigned char mask = (1 << lead1) - 1; - c = (c & mask) | ((c & ~mask) >> 1); - inbyte |= c << inbits; - inbits += 7; - } else if (seqlen < 5) { /* trail1 >= 8 */ - /* interior stuffing: - * omitting the msb handles most cases, - * correct the incorrectly handled - * cases individually */ - switch (c) { - case 0xbe: - c = 0x7e; - break; - } - inbyte |= c << inbits; - inbits += 7; - } else { /* seqlen == 5 && trail1 >= 8 */ - - /* stuff bit at lead1 *and* interior - * stuffing -- unstuff individually */ - switch (c) { - case 0x7d: - c = 0x3f; - break; - case 0xbe: - c = 0x3f; - break; - case 0x3e: - c = 0x1f; - break; - case 0x7c: - c = 0x3e; - break; - } - inbyte |= c << inbits; - inbits += 6; - } - if (inbits >= 8) { - inbits -= 8; - hdlc_putbyte(inbyte & 0xff, bcs); - inputstate |= ins_have_data; - inbyte >>= 8; - } - } - } - seqlen = trail1 & 7; - } - - /* save new state */ - bcs->inputstate = inputstate; - ubc->seqlen = seqlen; - ubc->inbyte = inbyte; - ubc->inbits = inbits; -} - -/* trans_receive - * pass on received usb frame transparently as skb via gigaset_skb_rcvd - * invert bytes - * tally frames, errors etc. in bc structure counters - * parameters: - * src received data - * count number of received bytes - * bcs receiving b channel structure - */ -static inline void trans_receive(unsigned char *src, unsigned count, - struct bc_state *bcs) -{ - struct sk_buff *skb; - int dobytes; - unsigned char *dst; - - if (unlikely(bcs->ignore)) { - bcs->ignore--; - return; - } - skb = bcs->rx_skb; - if (skb == null) { - skb = gigaset_new_rx_skb(bcs); - if (skb == null) - return; - } - dobytes = bcs->rx_bufsize - skb->len; - while (count > 0) { - dst = skb_put(skb, count < dobytes ? count : dobytes); - while (count > 0 && dobytes > 0) { - *dst++ = bitrev8(*src++); - count--; - dobytes--; - } - if (dobytes == 0) { - dump_bytes(debug_stream_dump, - "rcv data", skb->data, skb->len); - bcs->hw.bas->goodbytes += skb->len; - gigaset_skb_rcvd(bcs, skb); - skb = gigaset_new_rx_skb(bcs); - if (skb == null) - return; - dobytes = bcs->rx_bufsize; - } - } -} - -void gigaset_isoc_receive(unsigned char *src, unsigned count, - struct bc_state *bcs) -{ - switch (bcs->proto2) { - case l2_hdlc: - hdlc_unpack(src, count, bcs); - break; - default: /* assume transparent */ - trans_receive(src, count, bcs); - } -} - -/* == data input =========================================================== */ - -/* process a block of received bytes in command mode (mstate != ms_locked) - * append received bytes to the command response buffer and forward them - * line by line to the response handler. - * note: received lines may be terminated by cr, lf, or cr lf, which will be - * removed before passing the line to the response handler. - */ -static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf) -{ - struct cardstate *cs = inbuf->cs; - unsigned cbytes = cs->cbytes; - unsigned char c; - - while (numbytes--) { - c = *src++; - switch (c) { - case ' ': - if (cbytes == 0 && cs->respdata[0] == ' ') { - /* collapse lf with preceding cr */ - cs->respdata[0] = 0; - break; - } - /* fall through */ - case ' ': - /* end of message line, pass to response handler */ - if (cbytes >= max_resp_size) { - dev_warn(cs->dev, "response too large (%d) ", - cbytes); - cbytes = max_resp_size; - } - cs->cbytes = cbytes; - gigaset_dbg_buffer(debug_transcmd, "received response", - cbytes, cs->respdata); - gigaset_handle_modem_response(cs); - cbytes = 0; - - /* store eol byte for crlf collapsing */ - cs->respdata[0] = c; - break; - default: - /* append to line buffer if possible */ - if (cbytes < max_resp_size) - cs->respdata[cbytes] = c; - cbytes++; - } - } - - /* save state */ - cs->cbytes = cbytes; -} - - -/* process a block of data received through the control channel - */ -void gigaset_isoc_input(struct inbuf_t *inbuf) -{ - struct cardstate *cs = inbuf->cs; - unsigned tail, head, numbytes; - unsigned char *src; - - head = inbuf->head; - while (head != (tail = inbuf->tail)) { - gig_dbg(debug_intr, "buffer state: %u -> %u", head, tail); - if (head > tail) - tail = rbufsize; - src = inbuf->data + head; - numbytes = tail - head; - gig_dbg(debug_intr, "processing %u bytes", numbytes); - - if (cs->mstate == ms_locked) { - gigaset_dbg_buffer(debug_lockcmd, "received response", - numbytes, src); - gigaset_if_receive(inbuf->cs, src, numbytes); - } else { - cmd_loop(src, numbytes, inbuf); - } - - head += numbytes; - if (head == rbufsize) - head = 0; - gig_dbg(debug_intr, "setting head to %u", head); - inbuf->head = head; - } -} - - -/* == data output ========================================================== */ - -/** - * gigaset_isoc_send_skb() - queue an skb for sending - * @bcs: b channel descriptor structure. - * @skb: data to send. - * - * called by ll to queue an skb for sending, and start transmission if - * necessary. - * once the payload data has been transmitted completely, gigaset_skb_sent() - * will be called with the skb's link layer header preserved. - * - * return value: - * number of bytes accepted for sending (skb->len) if ok, - * error code < 0 (eg. -enodev) on error - */ -int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) -{ - int len = skb->len; - unsigned long flags; - - spin_lock_irqsave(&bcs->cs->lock, flags); - if (!bcs->cs->connected) { - spin_unlock_irqrestore(&bcs->cs->lock, flags); - return -enodev; - } - - skb_queue_tail(&bcs->squeue, skb); - gig_dbg(debug_iso, "%s: skb queued, qlen=%d", - __func__, skb_queue_len(&bcs->squeue)); - - /* tasklet submits urb if necessary */ - tasklet_schedule(&bcs->hw.bas->sent_tasklet); - spin_unlock_irqrestore(&bcs->cs->lock, flags); - - return len; /* ok so far */ -} diff --git a/drivers/staging/isdn/gigaset/proc.c b/drivers/staging/isdn/gigaset/proc.c --- a/drivers/staging/isdn/gigaset/proc.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * stuff used by all variants of the driver - * - * copyright (c) 2001 by stefan eilers, - * hansjoerg lipp <hjlipp@web.de>, - * tilman schmidt <tilman@imap.cc>. - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" - -static ssize_t show_cidmode(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cardstate *cs = dev_get_drvdata(dev); - - return sprintf(buf, "%u ", cs->cidmode); -} - -static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cardstate *cs = dev_get_drvdata(dev); - long int value; - char *end; - - value = simple_strtol(buf, &end, 0); - while (*end) - if (!isspace(*end++)) - return -einval; - if (value < 0 || value > 1) - return -einval; - - if (mutex_lock_interruptible(&cs->mutex)) - return -erestartsys; - - cs->waiting = 1; - if (!gigaset_add_event(cs, &cs->at_state, ev_proc_cidmode, - null, value, null)) { - cs->waiting = 0; - mutex_unlock(&cs->mutex); - return -enomem; - } - gigaset_schedule_event(cs); - - wait_event(cs->waitqueue, !cs->waiting); - - mutex_unlock(&cs->mutex); - - return count; -} - -static device_attr(cidmode, s_irugo | s_iwusr, show_cidmode, set_cidmode); - -/* free sysfs for device */ -void gigaset_free_dev_sysfs(struct cardstate *cs) -{ - if (!cs->tty_dev) - return; - - gig_dbg(debug_init, "removing sysfs entries"); - device_remove_file(cs->tty_dev, &dev_attr_cidmode); -} - -/* initialize sysfs for device */ -void gigaset_init_dev_sysfs(struct cardstate *cs) -{ - if (!cs->tty_dev) - return; - - gig_dbg(debug_init, "setting up sysfs"); - if (device_create_file(cs->tty_dev, &dev_attr_cidmode)) - pr_err("could not create sysfs attribute "); -} diff --git a/drivers/staging/isdn/gigaset/ser-gigaset.c b/drivers/staging/isdn/gigaset/ser-gigaset.c --- a/drivers/staging/isdn/gigaset/ser-gigaset.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* this is the serial hardware link layer (hll) for the gigaset 307x isdn - * dect base (aka sinus 45 isdn) using the rs232 dect data module m101, - * written as a line discipline. - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/platform_device.h> -#include <linux/completion.h> - -/* version information */ -#define driver_author "tilman schmidt" -#define driver_desc "serial driver for gigaset 307x using siemens m101" - -#define gigaset_minors 1 -#define gigaset_minor 0 -#define gigaset_modulename "ser_gigaset" -#define gigaset_devname "ttygs" - -/* length limit according to siemens 3070usb-protokoll.doc ch. 2.1 */ -#define if_writebuf 264 - -module_author(driver_author); -module_description(driver_desc); -module_license("gpl"); -module_alias_ldisc(n_gigaset_m101); - -static int startmode = sm_isdn; -module_param(startmode, int, s_irugo); -module_parm_desc(startmode, "initial operation mode"); -static int cidmode = 1; -module_param(cidmode, int, s_irugo); -module_parm_desc(cidmode, "stay in cid mode when idle"); - -static struct gigaset_driver *driver; - -struct ser_cardstate { - struct platform_device dev; - struct tty_struct *tty; - atomic_t refcnt; - struct completion dead_cmp; -}; - -static struct platform_driver device_driver = { - .driver = { - .name = gigaset_modulename, - }, -}; - -static void flush_send_queue(struct cardstate *); - -/* transmit data from current open skb - * result: number of bytes sent or error code < 0 - */ -static int write_modem(struct cardstate *cs) -{ - struct tty_struct *tty = cs->hw.ser->tty; - struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ - struct sk_buff *skb = bcs->tx_skb; - int sent = -eopnotsupp; - - warn_on(!tty || !tty->ops || !skb); - - if (!skb->len) { - dev_kfree_skb_any(skb); - bcs->tx_skb = null; - return -einval; - } - - set_bit(tty_do_write_wakeup, &tty->flags); - if (tty->ops->write) - sent = tty->ops->write(tty, skb->data, skb->len); - gig_dbg(debug_output, "write_modem: sent %d", sent); - if (sent < 0) { - /* error */ - flush_send_queue(cs); - return sent; - } - skb_pull(skb, sent); - if (!skb->len) { - /* skb sent completely */ - gigaset_skb_sent(bcs, skb); - - gig_dbg(debug_intr, "kfree skb (adr: %lx)!", - (unsigned long) skb); - dev_kfree_skb_any(skb); - bcs->tx_skb = null; - } - return sent; -} - -/* - * transmit first queued command buffer - * result: number of bytes sent or error code < 0 - */ -static int send_cb(struct cardstate *cs) -{ - struct tty_struct *tty = cs->hw.ser->tty; - struct cmdbuf_t *cb, *tcb; - unsigned long flags; - int sent = 0; - - warn_on(!tty || !tty->ops); - - cb = cs->cmdbuf; - if (!cb) - return 0; /* nothing to do */ - - if (cb->len) { - set_bit(tty_do_write_wakeup, &tty->flags); - sent = tty->ops->write(tty, cb->buf + cb->offset, cb->len); - if (sent < 0) { - /* error */ - gig_dbg(debug_output, "send_cb: write error %d", sent); - flush_send_queue(cs); - return sent; - } - cb->offset += sent; - cb->len -= sent; - gig_dbg(debug_output, "send_cb: sent %d, left %u, queued %u", - sent, cb->len, cs->cmdbytes); - } - - while (cb && !cb->len) { - spin_lock_irqsave(&cs->cmdlock, flags); - cs->cmdbytes -= cs->curlen; - tcb = cb; - cs->cmdbuf = cb = cb->next; - if (cb) { - cb->prev = null; - cs->curlen = cb->len; - } else { - cs->lastcmdbuf = null; - cs->curlen = 0; - } - spin_unlock_irqrestore(&cs->cmdlock, flags); - - if (tcb->wake_tasklet) - tasklet_schedule(tcb->wake_tasklet); - kfree(tcb); - } - return sent; -} - -/* - * send queue tasklet - * if there is already a skb opened, put data to the transfer buffer - * by calling "write_modem". - * otherwise take a new skb out of the queue. - */ -static void gigaset_modem_fill(unsigned long data) -{ - struct cardstate *cs = (struct cardstate *) data; - struct bc_state *bcs; - struct sk_buff *nextskb; - int sent = 0; - - if (!cs) { - gig_dbg(debug_output, "%s: no cardstate", __func__); - return; - } - bcs = cs->bcs; - if (!bcs) { - gig_dbg(debug_output, "%s: no cardstate", __func__); - return; - } - if (!bcs->tx_skb) { - /* no skb is being sent; send command if any */ - sent = send_cb(cs); - gig_dbg(debug_output, "%s: send_cb -> %d", __func__, sent); - if (sent) - /* something sent or error */ - return; - - /* no command to send; get skb */ - nextskb = skb_dequeue(&bcs->squeue); - if (!nextskb) - /* no skb either, nothing to do */ - return; - bcs->tx_skb = nextskb; - - gig_dbg(debug_intr, "dequeued skb (adr: %lx)", - (unsigned long) bcs->tx_skb); - } - - /* send skb */ - gig_dbg(debug_output, "%s: tx_skb", __func__); - if (write_modem(cs) < 0) - gig_dbg(debug_output, "%s: write_modem failed", __func__); -} - -/* - * throw away all data queued for sending - */ -static void flush_send_queue(struct cardstate *cs) -{ - struct sk_buff *skb; - struct cmdbuf_t *cb; - unsigned long flags; - - /* command queue */ - spin_lock_irqsave(&cs->cmdlock, flags); - while ((cb = cs->cmdbuf) != null) { - cs->cmdbuf = cb->next; - if (cb->wake_tasklet) - tasklet_schedule(cb->wake_tasklet); - kfree(cb); - } - cs->cmdbuf = cs->lastcmdbuf = null; - cs->cmdbytes = cs->curlen = 0; - spin_unlock_irqrestore(&cs->cmdlock, flags); - - /* data queue */ - if (cs->bcs->tx_skb) - dev_kfree_skb_any(cs->bcs->tx_skb); - while ((skb = skb_dequeue(&cs->bcs->squeue)) != null) - dev_kfree_skb_any(skb); -} - - -/* gigaset driver interface */ -/* ======================== */ - -/* - * queue an at command string for transmission to the gigaset device - * parameters: - * cs controller state structure - * buf buffer containing the string to send - * len number of characters to send - * wake_tasklet tasklet to run when transmission is complete, or null - * return value: - * number of bytes queued, or error code < 0 - */ -static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) -{ - unsigned long flags; - - gigaset_dbg_buffer(cs->mstate != ms_locked ? - debug_transcmd : debug_lockcmd, - "cmd transmit", cb->len, cb->buf); - - spin_lock_irqsave(&cs->cmdlock, flags); - cb->prev = cs->lastcmdbuf; - if (cs->lastcmdbuf) - cs->lastcmdbuf->next = cb; - else { - cs->cmdbuf = cb; - cs->curlen = cb->len; - } - cs->cmdbytes += cb->len; - cs->lastcmdbuf = cb; - spin_unlock_irqrestore(&cs->cmdlock, flags); - - spin_lock_irqsave(&cs->lock, flags); - if (cs->connected) - tasklet_schedule(&cs->write_tasklet); - spin_unlock_irqrestore(&cs->lock, flags); - return cb->len; -} - -/* - * tty_driver.write_room interface routine - * return number of characters the driver will accept to be written - * parameter: - * controller state structure - * return value: - * number of characters - */ -static int gigaset_write_room(struct cardstate *cs) -{ - unsigned bytes; - - bytes = cs->cmdbytes; - return bytes < if_writebuf ? if_writebuf - bytes : 0; -} - -/* - * tty_driver.chars_in_buffer interface routine - * return number of characters waiting to be sent - * parameter: - * controller state structure - * return value: - * number of characters - */ -static int gigaset_chars_in_buffer(struct cardstate *cs) -{ - return cs->cmdbytes; -} - -/* - * implementation of ioctl(gigaset_brkchars) - * parameter: - * controller state structure - * return value: - * -einval (unimplemented function) - */ -static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) -{ - /* not implemented */ - return -einval; -} - -/* - * open b channel - * called by "do_action" in ev-layer.c - */ -static int gigaset_init_bchannel(struct bc_state *bcs) -{ - /* nothing to do for m10x */ - gigaset_bchannel_up(bcs); - return 0; -} - -/* - * close b channel - * called by "do_action" in ev-layer.c - */ -static int gigaset_close_bchannel(struct bc_state *bcs) -{ - /* nothing to do for m10x */ - gigaset_bchannel_down(bcs); - return 0; -} - -/* - * set up b channel structure - * this is called by "gigaset_initcs" in common.c - */ -static int gigaset_initbcshw(struct bc_state *bcs) -{ - /* unused */ - bcs->hw.ser = null; - return 0; -} - -/* - * free b channel structure - * called by "gigaset_freebcs" in common.c - */ -static void gigaset_freebcshw(struct bc_state *bcs) -{ - /* unused */ -} - -/* - * reinitialize b channel structure - * this is called by "bcs_reinit" in common.c - */ -static void gigaset_reinitbcshw(struct bc_state *bcs) -{ - /* nothing to do for m10x */ -} - -/* - * free hardware specific device data - * this will be called by "gigaset_freecs" in common.c - */ -static void gigaset_freecshw(struct cardstate *cs) -{ - tasklet_kill(&cs->write_tasklet); - if (!cs->hw.ser) - return; - platform_device_unregister(&cs->hw.ser->dev); -} - -static void gigaset_device_release(struct device *dev) -{ - kfree(container_of(dev, struct ser_cardstate, dev.dev)); -} - -/* - * set up hardware specific device data - * this is called by "gigaset_initcs" in common.c - */ -static int gigaset_initcshw(struct cardstate *cs) -{ - int rc; - struct ser_cardstate *scs; - - scs = kzalloc(sizeof(struct ser_cardstate), gfp_kernel); - if (!scs) { - pr_err("out of memory "); - return -enomem; - } - cs->hw.ser = scs; - - cs->hw.ser->dev.name = gigaset_modulename; - cs->hw.ser->dev.id = cs->minor_index; - cs->hw.ser->dev.dev.release = gigaset_device_release; - rc = platform_device_register(&cs->hw.ser->dev); - if (rc != 0) { - pr_err("error %d registering platform device ", rc); - kfree(cs->hw.ser); - cs->hw.ser = null; - return rc; - } - - tasklet_init(&cs->write_tasklet, - gigaset_modem_fill, (unsigned long) cs); - return 0; -} - -/* - * set modem control lines - * parameters: - * card state structure - * modem control line state ([tiocm_dtr]|[tiocm_rts]) - * called by "gigaset_start" and "gigaset_enterconfigmode" in common.c - * and by "if_lock" and "if_termios" in interface.c - */ -static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, - unsigned new_state) -{ - struct tty_struct *tty = cs->hw.ser->tty; - unsigned int set, clear; - - warn_on(!tty || !tty->ops); - /* tiocmset is an optional tty driver method */ - if (!tty->ops->tiocmset) - return -einval; - set = new_state & ~old_state; - clear = old_state & ~new_state; - if (!set && !clear) - return 0; - gig_dbg(debug_if, "tiocmset set %x clear %x", set, clear); - return tty->ops->tiocmset(tty, set, clear); -} - -static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) -{ - return -einval; -} - -static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) -{ - return -einval; -} - -static const struct gigaset_ops ops = { - .write_cmd = gigaset_write_cmd, - .write_room = gigaset_write_room, - .chars_in_buffer = gigaset_chars_in_buffer, - .brkchars = gigaset_brkchars, - .init_bchannel = gigaset_init_bchannel, - .close_bchannel = gigaset_close_bchannel, - .initbcshw = gigaset_initbcshw, - .freebcshw = gigaset_freebcshw, - .reinitbcshw = gigaset_reinitbcshw, - .initcshw = gigaset_initcshw, - .freecshw = gigaset_freecshw, - .set_modem_ctrl = gigaset_set_modem_ctrl, - .baud_rate = gigaset_baud_rate, - .set_line_ctrl = gigaset_set_line_ctrl, - .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */ - .handle_input = gigaset_m10x_input, /* asyncdata.c */ -}; - - -/* line discipline interface */ -/* ========================= */ - -/* helper functions for cardstate refcounting */ -static struct cardstate *cs_get(struct tty_struct *tty) -{ - struct cardstate *cs = tty->disc_data; - - if (!cs || !cs->hw.ser) { - gig_dbg(debug_any, "%s: no cardstate", __func__); - return null; - } - atomic_inc(&cs->hw.ser->refcnt); - return cs; -} - -static void cs_put(struct cardstate *cs) -{ - if (atomic_dec_and_test(&cs->hw.ser->refcnt)) - complete(&cs->hw.ser->dead_cmp); -} - -/* - * called by the tty driver when the line discipline is pushed onto the tty. - * called in process context. - */ -static int -gigaset_tty_open(struct tty_struct *tty) -{ - struct cardstate *cs; - int rc; - - gig_dbg(debug_init, "starting hll for gigaset m101"); - - pr_info(driver_desc " "); - - if (!driver) { - pr_err("%s: no driver structure ", __func__); - return -enodev; - } - - /* allocate memory for our device state and initialize it */ - cs = gigaset_initcs(driver, 1, 1, 0, cidmode, gigaset_modulename); - if (!cs) { - rc = -enodev; - goto error; - } - - cs->dev = &cs->hw.ser->dev.dev; - cs->hw.ser->tty = tty; - atomic_set(&cs->hw.ser->refcnt, 1); - init_completion(&cs->hw.ser->dead_cmp); - tty->disc_data = cs; - - /* set the amount of data we're willing to receive per call - * from the hardware driver to half of the input buffer size - * to leave some reserve. - * note: we don't do flow control towards the hardware driver. - * if more data is received than will fit into the input buffer, - * it will be dropped and an error will be logged. this should - * never happen as the device is slow and the buffer size ample. - */ - tty->receive_room = rbufsize/2; - - /* ok.. initialization of the datastructures and the hw is done.. now - * startup system and notify the ll that we are ready to run - */ - if (startmode == sm_locked) - cs->mstate = ms_locked; - rc = gigaset_start(cs); - if (rc < 0) { - tasklet_kill(&cs->write_tasklet); - goto error; - } - - gig_dbg(debug_init, "startup of hll done"); - return 0; - -error: - gig_dbg(debug_init, "startup of hll failed"); - tty->disc_data = null; - gigaset_freecs(cs); - return rc; -} - -/* - * called by the tty driver when the line discipline is removed. - * called from process context. - */ -static void -gigaset_tty_close(struct tty_struct *tty) -{ - struct cardstate *cs = tty->disc_data; - - gig_dbg(debug_init, "stopping hll for gigaset m101"); - - if (!cs) { - gig_dbg(debug_init, "%s: no cardstate", __func__); - return; - } - - /* prevent other callers from entering ldisc methods */ - tty->disc_data = null; - - if (!cs->hw.ser) - pr_err("%s: no hw cardstate ", __func__); - else { - /* wait for running methods to finish */ - if (!atomic_dec_and_test(&cs->hw.ser->refcnt)) - wait_for_completion(&cs->hw.ser->dead_cmp); - } - - /* stop operations */ - gigaset_stop(cs); - tasklet_kill(&cs->write_tasklet); - flush_send_queue(cs); - cs->dev = null; - gigaset_freecs(cs); - - gig_dbg(debug_init, "shutdown of hll done"); -} - -/* - * called by the tty driver when the tty line is hung up. - * wait for i/o to driver to complete and unregister isdn device. - * this is already done by the close routine, so just call that. - * called from process context. - */ -static int gigaset_tty_hangup(struct tty_struct *tty) -{ - gigaset_tty_close(tty); - return 0; -} - -/* - * ioctl on the tty. - * called in process context only. - * may be re-entered by multiple ioctl calling threads. - */ -static int -gigaset_tty_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg) -{ - struct cardstate *cs = cs_get(tty); - int rc, val; - int __user *p = (int __user *)arg; - - if (!cs) - return -enxio; - - switch (cmd) { - - case fionread: - /* unused, always return zero */ - val = 0; - rc = put_user(val, p); - break; - - case tcflsh: - /* flush our buffers and the serial port's buffer */ - switch (arg) { - case tciflush: - /* no own input buffer to flush */ - break; - case tcioflush: - case tcoflush: - flush_send_queue(cs); - break; - } - /* fall through */ - - default: - /* pass through to underlying serial device */ - rc = n_tty_ioctl_helper(tty, file, cmd, arg); - break; - } - cs_put(cs); - return rc; -} - -/* - * called by the tty driver when a block of data has been received. - * will not be re-entered while running but other ldisc functions - * may be called in parallel. - * can be called from hard interrupt level as well as soft interrupt - * level or mainline. - * parameters: - * tty tty structure - * buf buffer containing received characters - * cflags buffer containing error flags for received characters (ignored) - * count number of received characters - */ -static void -gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf, - char *cflags, int count) -{ - struct cardstate *cs = cs_get(tty); - unsigned tail, head, n; - struct inbuf_t *inbuf; - - if (!cs) - return; - inbuf = cs->inbuf; - if (!inbuf) { - dev_err(cs->dev, "%s: no inbuf ", __func__); - cs_put(cs); - return; - } - - tail = inbuf->tail; - head = inbuf->head; - gig_dbg(debug_intr, "buffer state: %u -> %u, receive %u bytes", - head, tail, count); - - if (head <= tail) { - /* possible buffer wraparound */ - n = min_t(unsigned, count, rbufsize - tail); - memcpy(inbuf->data + tail, buf, n); - tail = (tail + n) % rbufsize; - buf += n; - count -= n; - } - - if (count > 0) { - /* tail < head and some data left */ - n = head - tail - 1; - if (count > n) { - dev_err(cs->dev, - "inbuf overflow, discarding %d bytes ", - count - n); - count = n; - } - memcpy(inbuf->data + tail, buf, count); - tail += count; - } - - gig_dbg(debug_intr, "setting tail to %u", tail); - inbuf->tail = tail; - - /* everything was received .. push data into handler */ - gig_dbg(debug_intr, "%s-->bh", __func__); - gigaset_schedule_event(cs); - cs_put(cs); -} - -/* - * called by the tty driver when there's room for more data to send. - */ -static void -gigaset_tty_wakeup(struct tty_struct *tty) -{ - struct cardstate *cs = cs_get(tty); - - clear_bit(tty_do_write_wakeup, &tty->flags); - if (!cs) - return; - tasklet_schedule(&cs->write_tasklet); - cs_put(cs); -} - -static struct tty_ldisc_ops gigaset_ldisc = { - .owner = this_module, - .magic = tty_ldisc_magic, - .name = "ser_gigaset", - .open = gigaset_tty_open, - .close = gigaset_tty_close, - .hangup = gigaset_tty_hangup, - .ioctl = gigaset_tty_ioctl, - .receive_buf = gigaset_tty_receive, - .write_wakeup = gigaset_tty_wakeup, -}; - - -/* initialization / shutdown */ -/* ========================= */ - -static int __init ser_gigaset_init(void) -{ - int rc; - - gig_dbg(debug_init, "%s", __func__); - rc = platform_driver_register(&device_driver); - if (rc != 0) { - pr_err("error %d registering platform driver ", rc); - return rc; - } - - /* allocate memory for our driver state and initialize it */ - driver = gigaset_initdriver(gigaset_minor, gigaset_minors, - gigaset_modulename, gigaset_devname, - &ops, this_module); - if (!driver) { - rc = -enomem; - goto error; - } - - rc = tty_register_ldisc(n_gigaset_m101, &gigaset_ldisc); - if (rc != 0) { - pr_err("error %d registering line discipline ", rc); - goto error; - } - - return 0; - -error: - if (driver) { - gigaset_freedriver(driver); - driver = null; - } - platform_driver_unregister(&device_driver); - return rc; -} - -static void __exit ser_gigaset_exit(void) -{ - int rc; - - gig_dbg(debug_init, "%s", __func__); - - if (driver) { - gigaset_freedriver(driver); - driver = null; - } - - rc = tty_unregister_ldisc(n_gigaset_m101); - if (rc != 0) - pr_err("error %d unregistering line discipline ", rc); - - platform_driver_unregister(&device_driver); -} - -module_init(ser_gigaset_init); -module_exit(ser_gigaset_exit); diff --git a/drivers/staging/isdn/gigaset/usb-gigaset.c b/drivers/staging/isdn/gigaset/usb-gigaset.c --- a/drivers/staging/isdn/gigaset/usb-gigaset.c +++ /dev/null -// spdx-license-identifier: gpl-2.0-or-later -/* - * usb driver for gigaset 307x directly or using m105 data. - * - * copyright (c) 2001 by stefan eilers - * and hansjoerg lipp <hjlipp@web.de>. - * - * this driver was derived from the usb skeleton driver by - * greg kroah-hartman <greg@kroah.com> - * - * ===================================================================== - * ===================================================================== - */ - -#include "gigaset.h" -#include <linux/usb.h> -#include <linux/module.h> -#include <linux/moduleparam.h> - -/* version information */ -#define driver_author "hansjoerg lipp <hjlipp@web.de>, stefan eilers" -#define driver_desc "usb driver for gigaset 307x using m105" - -/* module parameters */ - -static int startmode = sm_isdn; -static int cidmode = 1; - -module_param(startmode, int, s_irugo); -module_param(cidmode, int, s_irugo); -module_parm_desc(startmode, "start in isdn4linux mode"); -module_parm_desc(cidmode, "call-id mode"); - -#define gigaset_minors 1 -#define gigaset_minor 8 -#define gigaset_modulename "usb_gigaset" -#define gigaset_devname "ttygu" - -/* length limit according to siemens 3070usb-protokoll.doc ch. 2.1 */ -#define if_writebuf 264 - -/* values for the gigaset m105 data */ -#define usb_m105_vendor_id 0x0681 -#define usb_m105_product_id 0x0009 - -/* table of devices that work with this driver */ -static const struct usb_device_id gigaset_table[] = { - { usb_device(usb_m105_vendor_id, usb_m105_product_id) }, - { } /* terminating entry */ -}; - -module_device_table(usb, gigaset_table); - -/* - * control requests (empty fields: 00) - * - * rt|rq|value|index|len |data - * in: - * c1 08 01 - * get flags (1 byte). bits: 0=dtr,1=rts,3-7:? - * c1 0f ll ll - * get device information/status (llll: 0x200 and 0x40 seen). - * real size: i only saw min(llll,0x64). - * contents: seems to be always the same... - * offset 0x00: length of this structure (0x64) (len: 1,2,3 bytes) - * offset 0x3c: string (16 bit chars): "mcci usb serial v2.0" - * rest: ? - * out: - * 41 11 - * initialize/reset device ? - * 41 00 xx 00 - * ? (xx=00 or 01; 01 on start, 00 on close) - * 41 07 vv mm - * set/clear flags vv=value, mm=mask (see rq 08) - * 41 12 xx - * used before the following configuration requests are issued - * (with xx=0x0f). i've seen other values<0xf, though. - * 41 01 xx xx - * set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1. - * 41 03 ps bb - * set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity - * [ 0x30: m, 0x40: s ] - * [s: 0: 1 stop bit; 1: 1.5; 2: 2] - * bb: bits/byte (seen 7 and 8) - * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00 - * ?? - * initialization: 01, 40, 00, 00 - * open device: 00 40, 00, 00 - * yy and zz seem to be equal, either 0x00 or 0x0a - * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80) - * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13 - * used after every "configuration sequence" (rq 12, rqs 01/03/13). - * xx is usually 0x00 but was 0x7e before starting data transfer - * in unimodem mode. so, this might be an array of characters that - * need special treatment ("commit all bufferd data"?), 11=^q, 13=^s. - * - * unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two - * flags per packet. - */ - -/* functions called if a device of this driver is connected/disconnected */ -static int gigaset_probe(struct usb_interface *interface, - const struct usb_device_id *id); -static void gigaset_disconnect(struct usb_interface *interface); - -/* functions called before/after suspend */ -static int gigaset_suspend(struct usb_interface *intf, pm_message_t message); -static int gigaset_resume(struct usb_interface *intf); -static int gigaset_pre_reset(struct usb_interface *intf); - -static struct gigaset_driver *driver; - -/* usb specific object needed to register this driver with the usb subsystem */ -static struct usb_driver gigaset_usb_driver = { - .name = gigaset_modulename, - .probe = gigaset_probe, - .disconnect = gigaset_disconnect, - .id_table = gigaset_table, - .suspend = gigaset_suspend, - .resume = gigaset_resume, - .reset_resume = gigaset_resume, - .pre_reset = gigaset_pre_reset, - .post_reset = gigaset_resume, - .disable_hub_initiated_lpm = 1, -}; - -struct usb_cardstate { - struct usb_device *udev; /* usb device pointer */ - struct usb_interface *interface; /* interface for this device */ - int busy; /* bulk output in progress */ - - /* output buffer */ - unsigned char *bulk_out_buffer; - int bulk_out_size; - int bulk_out_epnum; - struct urb *bulk_out_urb; - - /* input buffer */ - unsigned char *rcvbuf; - int rcvbuf_size; - struct urb *read_urb; - - char bchars[6]; /* for request 0x19 */ -}; - -static inline unsigned tiocm_to_gigaset(unsigned state) -{ - return ((state & tiocm_dtr) ? 1 : 0) | ((state & tiocm_rts) ? 2 : 0); -} - -static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, - unsigned new_state) -{ - struct usb_device *udev = cs->hw.usb->udev; - unsigned mask, val; - int r; - - mask = tiocm_to_gigaset(old_state ^ new_state); - val = tiocm_to_gigaset(new_state); - - gig_dbg(debug_usbreq, "set flags 0x%02x with mask 0x%02x", val, mask); - r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41, - (val & 0xff) | ((mask & 0xff) << 8), 0, - null, 0, 2000 /* timeout? */); - if (r < 0) - return r; - return 0; -} - -/* - * set m105 configuration value - * using undocumented device commands reverse engineered from usb traces - * of the siemens windows driver - */ -static int set_value(struct cardstate *cs, u8 req, u16 val) -{ - struct usb_device *udev = cs->hw.usb->udev; - int r, r2; - - gig_dbg(debug_usbreq, "request %02x (%04x)", - (unsigned)req, (unsigned)val); - r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x12, 0x41, - 0xf /*?*/, 0, null, 0, 2000 /*?*/); - /* no idea what this does */ - if (r < 0) { - dev_err(&udev->dev, "error %d on request 0x12 ", -r); - return r; - } - - r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, 0x41, - val, 0, null, 0, 2000 /*?*/); - if (r < 0) - dev_err(&udev->dev, "error %d on request 0x%02x ", - -r, (unsigned)req); - - r2 = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41, - 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/); - if (r2 < 0) - dev_err(&udev->dev, "error %d on request 0x19 ", -r2); - - return r < 0 ? r : (r2 < 0 ? r2 : 0); -} - -/* - * set the baud rate on the internal serial adapter - * using the undocumented parameter setting command - */ -static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) -{ - u16 val; - u32 rate; - - cflag &= cbaud; - - switch (cflag) { - case b300: rate = 300; break; - case b600: rate = 600; break; - case b1200: rate = 1200; break; - case b2400: rate = 2400; break; - case b4800: rate = 4800; break; - case b9600: rate = 9600; break; - case b19200: rate = 19200; break; - case b38400: rate = 38400; break; - case b57600: rate = 57600; break; - case b115200: rate = 115200; break; - default: - rate = 9600; - dev_err(cs->dev, "unsupported baudrate request 0x%x," - " using default of b9600 ", cflag); - } - - val = 0x383fff / rate + 1; - - return set_value(cs, 1, val); -} - -/* - * set the line format on the internal serial adapter - * using the undocumented parameter setting command - */ -static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) -{ - u16 val = 0; - - /* set the parity */ - if (cflag & parenb) - val |= (cflag & parodd) ? 0x10 : 0x20; - - /* set the number of data bits */ - switch (cflag & csize) { - case cs5: - val |= 5 << 8; break; - case cs6: - val |= 6 << 8; break; - case cs7: - val |= 7 << 8; break; - case cs8: - val |= 8 << 8; break; - default: - dev_err(cs->dev, "csize was not cs5-cs8, using default of 8 "); - val |= 8 << 8; - break; - } - - /* set the number of stop bits */ - if (cflag & cstopb) { - if ((cflag & csize) == cs5) - val |= 1; /* 1.5 stop bits */ - else - val |= 2; /* 2 stop bits */ - } - - return set_value(cs, 3, val); -} - - -/*============================================================================*/ -static int gigaset_init_bchannel(struct bc_state *bcs) -{ - /* nothing to do for m10x */ - gigaset_bchannel_up(bcs); - return 0; -} - -static int gigaset_close_bchannel(struct bc_state *bcs) -{ - /* nothing to do for m10x */ - gigaset_bchannel_down(bcs); - return 0; -} - -static int write_modem(struct cardstate *cs); -static int send_cb(struct cardstate *cs); - - -/* write tasklet handler: continue sending current skb, or send command, or - * start sending an skb from the send queue. - */ -static void gigaset_modem_fill(unsigned long data) -{ - struct cardstate *cs = (struct cardstate *) data; - struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ - - gig_dbg(debug_output, "modem_fill"); - - if (cs->hw.usb->busy) { - gig_dbg(debug_output, "modem_fill: busy"); - return; - } - -again: - if (!bcs->tx_skb) { /* no skb is being sent */ - if (cs->cmdbuf) { /* commands to send? */ - gig_dbg(debug_output, "modem_fill: cb"); - if (send_cb(cs) < 0) { - gig_dbg(debug_output, - "modem_fill: send_cb failed"); - goto again; /* no callback will be called! */ - } - return; - } - - /* skbs to send? */ - bcs->tx_skb = skb_dequeue(&bcs->squeue); - if (!bcs->tx_skb) - return; - - gig_dbg(debug_intr, "dequeued skb (adr: %lx)!", - (unsigned long) bcs->tx_skb); - } - - gig_dbg(debug_output, "modem_fill: tx_skb"); - if (write_modem(cs) < 0) { - gig_dbg(debug_output, "modem_fill: write_modem failed"); - goto again; /* no callback will be called! */ - } -} - -/* - * interrupt input urb completion routine - */ -static void gigaset_read_int_callback(struct urb *urb) -{ - struct cardstate *cs = urb->context; - struct inbuf_t *inbuf = cs->inbuf; - int status = urb->status; - int r; - unsigned numbytes; - unsigned char *src; - unsigned long flags; - - if (!status) { - numbytes = urb->actual_length; - - if (numbytes) { - src = cs->hw.usb->rcvbuf; - if (unlikely(*src)) - dev_warn(cs->dev, - "%s: there was no leading 0, but 0x%02x! ", - __func__, (unsigned) *src); - ++src; /* skip leading 0x00 */ - --numbytes; - if (gigaset_fill_inbuf(inbuf, src, numbytes)) { - gig_dbg(debug_intr, "%s-->bh", __func__); - gigaset_schedule_event(inbuf->cs); - } - } else - gig_dbg(debug_intr, "received zero block length"); - } else { - /* the urb might have been killed. */ - gig_dbg(debug_any, "%s - nonzero status received: %d", - __func__, status); - if (status == -enoent || status == -eshutdown) - /* killed or endpoint shutdown: don't resubmit */ - return; - } - - /* resubmit urb */ - spin_lock_irqsave(&cs->lock, flags); - if (!cs->connected) { - spin_unlock_irqrestore(&cs->lock, flags); - pr_err("%s: disconnected ", __func__); - return; - } - r = usb_submit_urb(urb, gfp_atomic); - spin_unlock_irqrestore(&cs->lock, flags); - if (r) - dev_err(cs->dev, "error %d resubmitting urb ", -r); -} - - -/* this callback routine is called when data was transmitted to the device. */ -static void gigaset_write_bulk_callback(struct urb *urb) -{ - struct cardstate *cs = urb->context; - int status = urb->status; - unsigned long flags; - - switch (status) { - case 0: /* normal completion */ - break; - case -enoent: /* killed */ - gig_dbg(debug_any, "%s: killed", __func__); - cs->hw.usb->busy = 0; - return; - default: - dev_err(cs->dev, "bulk transfer failed (status %d) ", - -status); - /* that's all we can do. communication problems - are handled by timeouts or network protocols. */ - } - - spin_lock_irqsave(&cs->lock, flags); - if (!cs->connected) { - pr_err("%s: disconnected ", __func__); - } else { - cs->hw.usb->busy = 0; - tasklet_schedule(&cs->write_tasklet); - } - spin_unlock_irqrestore(&cs->lock, flags); -} - -static int send_cb(struct cardstate *cs) -{ - struct cmdbuf_t *cb = cs->cmdbuf; - unsigned long flags; - int count; - int status = -enoent; - struct usb_cardstate *ucs = cs->hw.usb; - - do { - if (!cb->len) { - spin_lock_irqsave(&cs->cmdlock, flags); - cs->cmdbytes -= cs->curlen; - gig_dbg(debug_output, "send_cb: sent %u bytes, %u left", - cs->curlen, cs->cmdbytes); - cs->cmdbuf = cb->next; - if (cs->cmdbuf) { - cs->cmdbuf->prev = null; - cs->curlen = cs->cmdbuf->len; - } else { - cs->lastcmdbuf = null; - cs->curlen = 0; - } - spin_unlock_irqrestore(&cs->cmdlock, flags); - - if (cb->wake_tasklet) - tasklet_schedule(cb->wake_tasklet); - kfree(cb); - - cb = cs->cmdbuf; - } - - if (cb) { - count = min(cb->len, ucs->bulk_out_size); - gig_dbg(debug_output, "send_cb: send %d bytes", count); - - usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, - usb_sndbulkpipe(ucs->udev, - ucs->bulk_out_epnum), - cb->buf + cb->offset, count, - gigaset_write_bulk_callback, cs); - - cb->offset += count; - cb->len -= count; - ucs->busy = 1; - - spin_lock_irqsave(&cs->lock, flags); - status = cs->connected ? - usb_submit_urb(ucs->bulk_out_urb, gfp_atomic) : - -enodev; - spin_unlock_irqrestore(&cs->lock, flags); - - if (status) { - ucs->busy = 0; - dev_err(cs->dev, - "could not submit urb (error %d) ", - -status); - cb->len = 0; /* skip urb => remove cb+wakeup - in next loop cycle */ - } - } - } while (cb && status); /* next command on error */ - - return status; -} - -/* send command to device. */ -static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) -{ - unsigned long flags; - int len; - - gigaset_dbg_buffer(cs->mstate != ms_locked ? - debug_transcmd : debug_lockcmd, - "cmd transmit", cb->len, cb->buf); - - spin_lock_irqsave(&cs->cmdlock, flags); - cb->prev = cs->lastcmdbuf; - if (cs->lastcmdbuf) - cs->lastcmdbuf->next = cb; - else { - cs->cmdbuf = cb; - cs->curlen = cb->len; - } - cs->cmdbytes += cb->len; - cs->lastcmdbuf = cb; - spin_unlock_irqrestore(&cs->cmdlock, flags); - - spin_lock_irqsave(&cs->lock, flags); - len = cb->len; - if (cs->connected) - tasklet_schedule(&cs->write_tasklet); - spin_unlock_irqrestore(&cs->lock, flags); - return len; -} - -static int gigaset_write_room(struct cardstate *cs) -{ - unsigned bytes; - - bytes = cs->cmdbytes; - return bytes < if_writebuf ? if_writebuf - bytes : 0; -} - -static int gigaset_chars_in_buffer(struct cardstate *cs) -{ - return cs->cmdbytes; -} - -/* - * set the break characters on the internal serial adapter - * using undocumented device commands reverse engineered from usb traces - * of the siemens windows driver - */ -static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) -{ - struct usb_device *udev = cs->hw.usb->udev; - - gigaset_dbg_buffer(debug_usbreq, "brkchars", 6, buf); - memcpy(cs->hw.usb->bchars, buf, 6); - return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41, - 0, 0, &buf, 6, 2000); -} - -static void gigaset_freebcshw(struct bc_state *bcs) -{ - /* unused */ -} - -/* initialize the b-channel structure */ -static int gigaset_initbcshw(struct bc_state *bcs) -{ - /* unused */ - bcs->hw.usb = null; - return 0; -} - -static void gigaset_reinitbcshw(struct bc_state *bcs) -{ - /* nothing to do for m10x */ -} - -static void gigaset_freecshw(struct cardstate *cs) -{ - tasklet_kill(&cs->write_tasklet); - kfree(cs->hw.usb); -} - -static int gigaset_initcshw(struct cardstate *cs) -{ - struct usb_cardstate *ucs; - - cs->hw.usb = ucs = - kmalloc(sizeof(struct usb_cardstate), gfp_kernel); - if (!ucs) { - pr_err("out of memory "); - return -enomem; - } - - ucs->bchars[0] = 0; - ucs->bchars[1] = 0; - ucs->bchars[2] = 0; - ucs->bchars[3] = 0; - ucs->bchars[4] = 0x11; - ucs->bchars[5] = 0x13; - ucs->bulk_out_buffer = null; - ucs->bulk_out_urb = null; - ucs->read_urb = null; - tasklet_init(&cs->write_tasklet, - gigaset_modem_fill, (unsigned long) cs); - - return 0; -} - -/* send data from current skb to the device. */ -static int write_modem(struct cardstate *cs) -{ - int ret = 0; - int count; - struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ - struct usb_cardstate *ucs = cs->hw.usb; - unsigned long flags; - - gig_dbg(debug_output, "len: %d...", bcs->tx_skb->len); - - if (!bcs->tx_skb->len) { - dev_kfree_skb_any(bcs->tx_skb); - bcs->tx_skb = null; - return -einval; - } - - /* copy data to bulk out buffer and transmit data */ - count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); - skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); - skb_pull(bcs->tx_skb, count); - ucs->busy = 1; - gig_dbg(debug_output, "write_modem: send %d bytes", count); - - spin_lock_irqsave(&cs->lock, flags); - if (cs->connected) { - usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, - usb_sndbulkpipe(ucs->udev, - ucs->bulk_out_epnum), - ucs->bulk_out_buffer, count, - gigaset_write_bulk_callback, cs); - ret = usb_submit_urb(ucs->bulk_out_urb, gfp_atomic); - } else { - ret = -enodev; - } - spin_unlock_irqrestore(&cs->lock, flags); - - if (ret) { - dev_err(cs->dev, "could not submit urb (error %d) ", -ret); - ucs->busy = 0; - } - - if (!bcs->tx_skb->len) { - /* skb sent completely */ - gigaset_skb_sent(bcs, bcs->tx_skb); - - gig_dbg(debug_intr, "kfree skb (adr: %lx)!", - (unsigned long) bcs->tx_skb); - dev_kfree_skb_any(bcs->tx_skb); - bcs->tx_skb = null; - } - - return ret; -} - -static int gigaset_probe(struct usb_interface *interface, - const struct usb_device_id *id) -{ - int retval; - struct usb_device *udev = interface_to_usbdev(interface); - struct usb_host_interface *hostif = interface->cur_altsetting; - struct cardstate *cs = null; - struct usb_cardstate *ucs = null; - struct usb_endpoint_descriptor *endpoint; - int buffer_size; - - gig_dbg(debug_any, "%s: check if device matches ...", __func__); - - /* see if the device offered us matches what we can accept */ - if ((le16_to_cpu(udev->descriptor.idvendor) != usb_m105_vendor_id) || - (le16_to_cpu(udev->descriptor.idproduct) != usb_m105_product_id)) { - gig_dbg(debug_any, "device id (0x%x, 0x%x) not for me - skip", - le16_to_cpu(udev->descriptor.idvendor), - le16_to_cpu(udev->descriptor.idproduct)); - return -enodev; - } - if (hostif->desc.binterfacenumber != 0) { - gig_dbg(debug_any, "interface %d not for me - skip", - hostif->desc.binterfacenumber); - return -enodev; - } - if (hostif->desc.balternatesetting != 0) { - dev_notice(&udev->dev, "unsupported altsetting %d - skip", - hostif->desc.balternatesetting); - return -enodev; - } - if (hostif->desc.binterfaceclass != 255) { - dev_notice(&udev->dev, "unsupported interface class %d - skip", - hostif->desc.binterfaceclass); - return -enodev; - } - - dev_info(&udev->dev, "%s: device matched ... ! ", __func__); - - /* allocate memory for our device state and initialize it */ - cs = gigaset_initcs(driver, 1, 1, 0, cidmode, gigaset_modulename); - if (!cs) - return -enodev; - ucs = cs->hw.usb; - - /* save off device structure ptrs for later use */ - usb_get_dev(udev); - ucs->udev = udev; - ucs->interface = interface; - cs->dev = &interface->dev; - - /* save address of controller structure */ - usb_set_intfdata(interface, cs); - - endpoint = &hostif->endpoint[0].desc; - - buffer_size = le16_to_cpu(endpoint->wmaxpacketsize); - ucs->bulk_out_size = buffer_size; - ucs->bulk_out_epnum = usb_endpoint_num(endpoint); - ucs->bulk_out_buffer = kmalloc(buffer_size, gfp_kernel); - if (!ucs->bulk_out_buffer) { - dev_err(cs->dev, "couldn't allocate bulk_out_buffer "); - retval = -enomem; - goto error; - } - - ucs->bulk_out_urb = usb_alloc_urb(0, gfp_kernel); - if (!ucs->bulk_out_urb) { - dev_err(cs->dev, "couldn't allocate bulk_out_urb "); - retval = -enomem; - goto error; - } - - endpoint = &hostif->endpoint[1].desc; - - ucs->busy = 0; - - ucs->read_urb = usb_alloc_urb(0, gfp_kernel); - if (!ucs->read_urb) { - dev_err(cs->dev, "no free urbs available "); - retval = -enomem; - goto error; - } - buffer_size = le16_to_cpu(endpoint->wmaxpacketsize); - ucs->rcvbuf_size = buffer_size; - ucs->rcvbuf = kmalloc(buffer_size, gfp_kernel); - if (!ucs->rcvbuf) { - dev_err(cs->dev, "couldn't allocate rcvbuf "); - retval = -enomem; - goto error; - } - /* fill the interrupt urb and send it to the core */ - usb_fill_int_urb(ucs->read_urb, udev, - usb_rcvintpipe(udev, usb_endpoint_num(endpoint)), - ucs->rcvbuf, buffer_size, - gigaset_read_int_callback, - cs, endpoint->binterval); - - retval = usb_submit_urb(ucs->read_urb, gfp_kernel); - if (retval) { - dev_err(cs->dev, "could not submit urb (error %d) ", -retval); - goto error; - } - - /* tell common part that the device is ready */ - if (startmode == sm_locked) - cs->mstate = ms_locked; - - retval = gigaset_start(cs); - if (retval < 0) { - tasklet_kill(&cs->write_tasklet); - goto error; - } - return 0; - -error: - usb_kill_urb(ucs->read_urb); - kfree(ucs->bulk_out_buffer); - usb_free_urb(ucs->bulk_out_urb); - kfree(ucs->rcvbuf); - usb_free_urb(ucs->read_urb); - usb_set_intfdata(interface, null); - ucs->read_urb = ucs->bulk_out_urb = null; - ucs->rcvbuf = ucs->bulk_out_buffer = null; - usb_put_dev(ucs->udev); - ucs->udev = null; - ucs->interface = null; - gigaset_freecs(cs); - return retval; -} - -static void gigaset_disconnect(struct usb_interface *interface) -{ - struct cardstate *cs; - struct usb_cardstate *ucs; - - cs = usb_get_intfdata(interface); - ucs = cs->hw.usb; - - dev_info(cs->dev, "disconnecting gigaset usb adapter "); - - usb_kill_urb(ucs->read_urb); - - gigaset_stop(cs); - - usb_set_intfdata(interface, null); - tasklet_kill(&cs->write_tasklet); - - usb_kill_urb(ucs->bulk_out_urb); - - kfree(ucs->bulk_out_buffer); - usb_free_urb(ucs->bulk_out_urb); - kfree(ucs->rcvbuf); - usb_free_urb(ucs->read_urb); - ucs->read_urb = ucs->bulk_out_urb = null; - ucs->rcvbuf = ucs->bulk_out_buffer = null; - - usb_put_dev(ucs->udev); - ucs->interface = null; - ucs->udev = null; - cs->dev = null; - gigaset_freecs(cs); -} - -/* gigaset_suspend - * this function is called before the usb connection is suspended or reset. - */ -static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) -{ - struct cardstate *cs = usb_get_intfdata(intf); - - /* stop activity */ - cs->connected = 0; /* prevent rescheduling */ - usb_kill_urb(cs->hw.usb->read_urb); - tasklet_kill(&cs->write_tasklet); - usb_kill_urb(cs->hw.usb->bulk_out_urb); - - gig_dbg(debug_suspend, "suspend complete"); - return 0; -} - -/* gigaset_resume - * this function is called after the usb connection has been resumed or reset. - */ -static int gigaset_resume(struct usb_interface *intf) -{ - struct cardstate *cs = usb_get_intfdata(intf); - int rc; - - /* resubmit interrupt urb */ - cs->connected = 1; - rc = usb_submit_urb(cs->hw.usb->read_urb, gfp_kernel); - if (rc) { - dev_err(cs->dev, "could not submit read urb (error %d) ", -rc); - return rc; - } - - gig_dbg(debug_suspend, "resume complete"); - return 0; -} - -/* gigaset_pre_reset - * this function is called before the usb connection is reset. - */ -static int gigaset_pre_reset(struct usb_interface *intf) -{ - /* same as suspend */ - return gigaset_suspend(intf, pmsg_on); -} - -static const struct gigaset_ops ops = { - .write_cmd = gigaset_write_cmd, - .write_room = gigaset_write_room, - .chars_in_buffer = gigaset_chars_in_buffer, - .brkchars = gigaset_brkchars, - .init_bchannel = gigaset_init_bchannel, - .close_bchannel = gigaset_close_bchannel, - .initbcshw = gigaset_initbcshw, - .freebcshw = gigaset_freebcshw, - .reinitbcshw = gigaset_reinitbcshw, - .initcshw = gigaset_initcshw, - .freecshw = gigaset_freecshw, - .set_modem_ctrl = gigaset_set_modem_ctrl, - .baud_rate = gigaset_baud_rate, - .set_line_ctrl = gigaset_set_line_ctrl, - .send_skb = gigaset_m10x_send_skb, - .handle_input = gigaset_m10x_input, -}; - -/* - * this function is called while kernel-module is loaded - */ -static int __init usb_gigaset_init(void) -{ - int result; - - /* allocate memory for our driver state and initialize it */ - driver = gigaset_initdriver(gigaset_minor, gigaset_minors, - gigaset_modulename, gigaset_devname, - &ops, this_module); - if (driver == null) { - result = -enomem; - goto error; - } - - /* register this driver with the usb subsystem */ - result = usb_register(&gigaset_usb_driver); - if (result < 0) { - pr_err("error %d registering usb driver ", -result); - goto error; - } - - pr_info(driver_desc " "); - return 0; - -error: - if (driver) - gigaset_freedriver(driver); - driver = null; - return result; -} - -/* - * this function is called while unloading the kernel-module - */ -static void __exit usb_gigaset_exit(void) -{ - int i; - - gigaset_blockdriver(driver); /* => probe will fail - * => no gigaset_start any more - */ - - /* stop all connected devices */ - for (i = 0; i < driver->minors; i++) - gigaset_shutdown(driver->cs + i); - - /* from now on, no isdn callback should be possible */ - - /* deregister this driver with the usb subsystem */ - usb_deregister(&gigaset_usb_driver); - /* this will call the disconnect-callback */ - /* from now on, no disconnect/probe callback should be running */ - - gigaset_freedriver(driver); - driver = null; -} - - -module_init(usb_gigaset_init); -module_exit(usb_gigaset_exit); - -module_author(driver_author); -module_description(driver_desc); - -module_license("gpl"); diff --git a/drivers/staging/isdn/hysdn/kconfig b/drivers/staging/isdn/hysdn/kconfig --- a/drivers/staging/isdn/hysdn/kconfig +++ /dev/null -# spdx-license-identifier: gpl-2.0-only -config hysdn - tristate "hypercope hysdn cards (champ, ergo, metro) support (module only)" - depends on m && proc_fs && pci - help - say y here if you have one of hypercope's active pci isdn cards - champ, ergo and metro. you will then get a module called hysdn. - please read the file <file:documentation/isdn/hysdn.rst> for more - information. - -config hysdn_capi - bool "hysdn capi 2.0 support" - depends on hysdn && isdn_capi - help - say y here if you like to use hypercope's capi 2.0 interface. diff --git a/drivers/staging/isdn/hysdn/makefile b/drivers/staging/isdn/hysdn/makefile --- a/drivers/staging/isdn/hysdn/makefile +++ /dev/null -# spdx-license-identifier: gpl-2.0-only -# makefile for the hysdn isdn device driver - -# each configuration option enables a list of files. - -obj-$(config_hysdn) += hysdn.o - -# multipart objects. - -hysdn-y := hysdn_procconf.o hysdn_proclog.o boardergo.o \ - hysdn_boot.o hysdn_sched.o hysdn_net.o hysdn_init.o -hysdn-$(config_hysdn_capi) += hycapi.o diff --git a/drivers/staging/isdn/hysdn/boardergo.c b/drivers/staging/isdn/hysdn/boardergo.c --- a/drivers/staging/isdn/hysdn/boardergo.c +++ /dev/null -/* $id: boardergo.c,v 1.5.6.7 2001/11/06 21:58:19 kai exp $ - * - * linux driver for hysdn cards, specific routines for ergo type boards. - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - * as all linux supported cards champ2, ergo and metro2/4 use the same - * dpram interface and layout with only minor differences all related - * stuff is done here, not in separate modules. - * - */ - -#include <linux/signal.h> -#include <linux/kernel.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/vmalloc.h> -#include <linux/delay.h> -#include <asm/io.h> - -#include "hysdn_defs.h" -#include "boardergo.h" - -#define byteout(addr, val) outb(val, addr) -#define bytein(addr) inb(addr) - -/***************************************************/ -/* the cards interrupt handler. called from system */ -/***************************************************/ -static irqreturn_t -ergo_interrupt(int intno, void *dev_id) -{ - hysdn_card *card = dev_id; /* parameter from irq */ - tergdpram *dpr; - unsigned long flags; - unsigned char volatile b; - - if (!card) - return irq_none; /* error -> spurious interrupt */ - if (!card->irq_enabled) - return irq_none; /* other device interrupting or irq switched off */ - - spin_lock_irqsave(&card->hysdn_lock, flags); /* no further irqs allowed */ - - if (!(bytein(card->iobase + pci9050_intr_reg) & pci9050_intr_reg_stat1)) { - spin_unlock_irqrestore(&card->hysdn_lock, flags); /* restore old state */ - return irq_none; /* no interrupt requested by e1 */ - } - /* clear any pending ints on the board */ - dpr = card->dpram; - b = dpr->topcint; /* clear for ergo */ - b |= dpr->topcintmetro; /* same for metro */ - b |= dpr->tohyint; /* and for champ */ - - /* start kernel task immediately after leaving all interrupts */ - if (!card->hw_lock) - schedule_work(&card->irq_queue); - spin_unlock_irqrestore(&card->hysdn_lock, flags); - return irq_handled; -} /* ergo_interrupt */ - -/******************************************************************************/ -/* ergo_irq_bh will be called as part of the kernel clearing its shared work */ -/* queue sometime after a call to schedule_work has been made passing our */ -/* work_struct. this task is the only one handling data transfer from or to */ -/* the card after booting. the task may be queued from everywhere */ -/* (interrupts included). */ -/******************************************************************************/ -static void -ergo_irq_bh(struct work_struct *ugli_api) -{ - hysdn_card *card = container_of(ugli_api, hysdn_card, irq_queue); - tergdpram *dpr; - int again; - unsigned long flags; - - if (card->state != card_state_run) - return; /* invalid call */ - - dpr = card->dpram; /* point to dpram */ - - spin_lock_irqsave(&card->hysdn_lock, flags); - if (card->hw_lock) { - spin_unlock_irqrestore(&card->hysdn_lock, flags); /* hardware currently unavailable */ - return; - } - card->hw_lock = 1; /* we now lock the hardware */ - - do { - again = 0; /* assume loop not to be repeated */ - - if (!dpr->tohyflag) { - /* we are able to send a buffer */ - - if (hysdn_sched_tx(card, dpr->tohybuf, &dpr->tohysize, &dpr->tohychannel, - erg_to_hy_buf_size)) { - dpr->tohyflag = 1; /* enable tx */ - again = 1; /* restart loop */ - } - } /* we are able to send a buffer */ - if (dpr->topcflag) { - /* a message has arrived for us, handle it */ - - if (hysdn_sched_rx(card, dpr->topcbuf, dpr->topcsize, dpr->topcchannel)) { - dpr->topcflag = 0; /* we worked the data */ - again = 1; /* restart loop */ - } - } /* a message has arrived for us */ - if (again) { - dpr->tohyint = 1; - dpr->topcint = 1; /* interrupt to e1 for all cards */ - } else - card->hw_lock = 0; /* free hardware again */ - } while (again); /* until nothing more to do */ - - spin_unlock_irqrestore(&card->hysdn_lock, flags); -} /* ergo_irq_bh */ - - -/*********************************************************/ -/* stop the card (hardware reset) and disable interrupts */ -/*********************************************************/ -static void -ergo_stopcard(hysdn_card *card) -{ - unsigned long flags; - unsigned char val; - - hysdn_net_release(card); /* first release the net device if existing */ -#ifdef config_hysdn_capi - hycapi_capi_stop(card); -#endif /* config_hysdn_capi */ - spin_lock_irqsave(&card->hysdn_lock, flags); - val = bytein(card->iobase + pci9050_intr_reg); /* get actual value */ - val &= ~(pci9050_intr_reg_enpci | pci9050_intr_reg_en1); /* mask irq */ - byteout(card->iobase + pci9050_intr_reg, val); - card->irq_enabled = 0; - byteout(card->iobase + pci9050_user_io, pci9050_e1_reset); /* reset e1 processor */ - card->state = card_state_unused; - card->err_log_state = errlog_state_off; /* currently no log active */ - - spin_unlock_irqrestore(&card->hysdn_lock, flags); -} /* ergo_stopcard */ - -/**************************************************************************/ -/* enable or disable the cards error log. the event is queued if possible */ -/**************************************************************************/ -static void -ergo_set_errlog_state(hysdn_card *card, int on) -{ - unsigned long flags; - - if (card->state != card_state_run) { - card->err_log_state = errlog_state_off; /* must be off */ - return; - } - spin_lock_irqsave(&card->hysdn_lock, flags); - - if (((card->err_log_state == errlog_state_off) && !on) || - ((card->err_log_state == errlog_state_on) && on)) { - spin_unlock_irqrestore(&card->hysdn_lock, flags); - return; /* nothing to do */ - } - if (on) - card->err_log_state = errlog_state_start; /* request start */ - else - card->err_log_state = errlog_state_stop; /* request stop */ - - spin_unlock_irqrestore(&card->hysdn_lock, flags); - schedule_work(&card->irq_queue); -} /* ergo_set_errlog_state */ - -/******************************************/ -/* test the cards ram and return 0 if ok. */ -/******************************************/ -static const char testtext[36] = "this message is filler, why read it"; - -static int -ergo_testram(hysdn_card *card) -{ - tergdpram *dpr = card->dpram; - - memset(dpr->traptable, 0, sizeof(dpr->traptable)); /* clear all traps */ - dpr->tohyint = 1; /* e1 intr state forced */ - - memcpy(&dpr->tohybuf[erg_to_hy_buf_size - sizeof(testtext)], testtext, - sizeof(testtext)); - if (memcmp(&dpr->tohybuf[erg_to_hy_buf_size - sizeof(testtext)], testtext, - sizeof(testtext))) - return (-1); - - memcpy(&dpr->topcbuf[erg_to_pc_buf_size - sizeof(testtext)], testtext, - sizeof(testtext)); - if (memcmp(&dpr->topcbuf[erg_to_pc_buf_size - sizeof(testtext)], testtext, - sizeof(testtext))) - return (-1); - - return (0); -} /* ergo_testram */ - -/*****************************************************************************/ -/* this function is intended to write stage 1 boot image to the cards buffer */ -/* this is done in two steps. first the 1024 hi-words are written (offs=0), */ -/* then the 1024 lo-bytes are written. the remaining dpram is cleared, the */ -/* pci-write-buffers flushed and the card is taken out of reset. */ -/* the function then waits for a reaction of the e1 processor or a timeout. */ -/* negative return values are interpreted as errors. */ -/*****************************************************************************/ -static int -ergo_writebootimg(struct hysdn_card *card, unsigned char *buf, - unsigned long offs) -{ - unsigned char *dst; - tergdpram *dpram; - int cnt = (boot_img_size >> 2); /* number of words to move and swap (byte order!) */ - - if (card->debug_flags & log_pof_card) - hysdn_addlog(card, "ergo: write bootldr offs=0x%lx ", offs); - - dst = card->dpram; /* pointer to start of dpram */ - dst += (offs + erg_dpram_fill_size); /* offset in the dpram */ - while (cnt--) { - *dst++ = *(buf + 1); /* high byte */ - *dst++ = *buf; /* low byte */ - dst += 2; /* point to next longword */ - buf += 2; /* buffer only filled with words */ - } - - /* if low words (offs = 2) have been written, clear the rest of the dpram, */ - /* flush the pci-write-buffer and take the e1 out of reset */ - if (offs) { - memset(card->dpram, 0, erg_dpram_fill_size); /* fill the dpram still not cleared */ - dpram = card->dpram; /* get pointer to dpram structure */ - dpram->tohynodpramerrlog = 0xff; /* write a dpram register */ - while (!dpram->tohynodpramerrlog); /* reread volatile register to flush pci */ - - byteout(card->iobase + pci9050_user_io, pci9050_e1_run); /* start e1 processor */ - /* the interrupts are still masked */ - - msleep_interruptible(20); /* timeout 20ms */ - - if (((tdprambootspooler *) card->dpram)->len != dpram_spooler_data_size) { - if (card->debug_flags & log_pof_card) - hysdn_addlog(card, "ergo: write bootldr no answer"); - return (-err_bootimg_fail); - } - } /* start_boot_img */ - return (0); /* successful */ -} /* ergo_writebootimg */ - -/********************************************************************************/ -/* ergo_writebootseq writes the buffer containing len bytes to the e1 processor */ -/* using the boot spool mechanism. if everything works fine 0 is returned. in */ -/* case of errors a negative error value is returned. */ -/********************************************************************************/ -static int -ergo_writebootseq(struct hysdn_card *card, unsigned char *buf, int len) -{ - tdprambootspooler *sp = (tdprambootspooler *) card->dpram; - unsigned char *dst; - unsigned char buflen; - int nr_write; - unsigned char tmp_rdptr; - unsigned char wr_mirror; - int i; - - if (card->debug_flags & log_pof_card) - hysdn_addlog(card, "ergo: write boot seq len=%d ", len); - - dst = sp->data; /* point to data in spool structure */ - buflen = sp->len; /* maximum len of spooled data */ - wr_mirror = sp->wrptr; /* only once read */ - - /* try until all bytes written or error */ - i = 0x1000; /* timeout value */ - while (len) { - - /* first determine the number of bytes that may be buffered */ - do { - tmp_rdptr = sp->rdptr; /* first read the pointer */ - i--; /* decrement timeout */ - } while (i && (tmp_rdptr != sp->rdptr)); /* wait for stable pointer */ - - if (!i) { - if (card->debug_flags & log_pof_card) - hysdn_addlog(card, "ergo: write boot seq timeout"); - return (-err_bootseq_fail); /* value not stable -> timeout */ - } - if ((nr_write = tmp_rdptr - wr_mirror - 1) < 0) - nr_write += buflen; /* now we got number of free bytes - 1 in buffer */ - - if (!nr_write) - continue; /* no free bytes in buffer */ - - if (nr_write > len) - nr_write = len; /* limit if last few bytes */ - i = 0x1000; /* reset timeout value */ - - /* now we know how much bytes we may put in the puffer */ - len -= nr_write; /* we savely could adjust len before output */ - while (nr_write--) { - *(dst + wr_mirror) = *buf++; /* output one byte */ - if (++wr_mirror >= buflen) - wr_mirror = 0; - sp->wrptr = wr_mirror; /* announce the next byte to e1 */ - } /* while (nr_write) */ - - } /* while (len) */ - return (0); -} /* ergo_writebootseq */ - -/***********************************************************************************/ -/* ergo_waitpofready waits for a maximum of 10 seconds for the completition of the */ -/* boot process. if the process has been successful 0 is returned otherwise a */ -/* negative error code is returned. */ -/***********************************************************************************/ -static int -ergo_waitpofready(struct hysdn_card *card) -{ - tergdpram *dpr = card->dpram; /* pointer to dpram structure */ - int timecnt = 10000 / 50; /* timeout is 10 secs max. */ - unsigned long flags; - int msg_size; - int i; - - if (card->debug_flags & log_pof_card) - hysdn_addlog(card, "ergo: waiting for pof ready"); - while (timecnt--) { - /* wait until timeout */ - - if (dpr->topcflag) { - /* data has arrived */ - - if ((dpr->topcchannel != chan_system) || - (dpr->topcsize < min_rdy_msg_size) || - (dpr->topcsize > max_rdy_msg_size) || - ((*(unsigned long *) dpr->topcbuf) != rdy_magic)) - break; /* an error occurred */ - - /* check for additional data delivered during sysready */ - msg_size = dpr->topcsize - rdy_magic_size; - if (msg_size > 0) - if (evalsysrtokdata(card, dpr->topcbuf + rdy_magic_size, msg_size)) - break; - - if (card->debug_flags & log_pof_record) - hysdn_addlog(card, "ergo: pof boot success"); - spin_lock_irqsave(&card->hysdn_lock, flags); - - card->state = card_state_run; /* now card is running */ - /* enable the cards interrupt */ - byteout(card->iobase + pci9050_intr_reg, - bytein(card->iobase + pci9050_intr_reg) | - (pci9050_intr_reg_enpci | pci9050_intr_reg_en1)); - card->irq_enabled = 1; /* we are ready to receive interrupts */ - - dpr->topcflag = 0; /* reset data indicator */ - dpr->tohyint = 1; - dpr->topcint = 1; /* interrupt to e1 for all cards */ - - spin_unlock_irqrestore(&card->hysdn_lock, flags); - if ((hynet_enable & (1 << card->myid)) - && (i = hysdn_net_create(card))) - { - ergo_stopcard(card); - card->state = card_state_booterr; - return (i); - } -#ifdef config_hysdn_capi - if ((i = hycapi_capi_create(card))) { - printk(kern_warning "hysdn: failed to create capi-interface. "); - } -#endif /* config_hysdn_capi */ - return (0); /* success */ - } /* data has arrived */ - msleep_interruptible(50); /* timeout 50ms */ - } /* wait until timeout */ - - if (card->debug_flags & log_pof_card) - hysdn_addlog(card, "ergo: pof boot ready timeout"); - return (-err_pof_timeout); -} /* ergo_waitpofready */ - - - -/************************************************************************************/ -/* release the cards hardware. before releasing do a interrupt disable and hardware */ -/* reset. also unmap dpram. */ -/* use only during module release. */ -/************************************************************************************/ -static void -ergo_releasehardware(hysdn_card *card) -{ - ergo_stopcard(card); /* first stop the card if not already done */ - free_irq(card->irq, card); /* release interrupt */ - release_region(card->iobase + pci9050_intr_reg, 1); /* release all io ports */ - release_region(card->iobase + pci9050_user_io, 1); - iounmap(card->dpram); - card->dpram = null; /* release shared mem */ -} /* ergo_releasehardware */ - - -/*********************************************************************************/ -/* acquire the needed hardware ports and map dpram. if an error occurs a nonzero */ -/* value is returned. */ -/* use only during module init. */ -/*********************************************************************************/ -int -ergo_inithardware(hysdn_card *card) -{ - if (!request_region(card->iobase + pci9050_intr_reg, 1, "hysdn")) - return (-1); - if (!request_region(card->iobase + pci9050_user_io, 1, "hysdn")) { - release_region(card->iobase + pci9050_intr_reg, 1); - return (-1); /* ports already in use */ - } - card->memend = card->membase + erg_dpram_page_size - 1; - if (!(card->dpram = ioremap(card->membase, erg_dpram_page_size))) { - release_region(card->iobase + pci9050_intr_reg, 1); - release_region(card->iobase + pci9050_user_io, 1); - return (-1); - } - - ergo_stopcard(card); /* disable interrupts */ - if (request_irq(card->irq, ergo_interrupt, irqf_shared, "hysdn", card)) { - ergo_releasehardware(card); /* return the acquired hardware */ - return (-1); - } - /* success, now setup the function pointers */ - card->stopcard = ergo_stopcard; - card->releasehardware = ergo_releasehardware; - card->testram = ergo_testram; - card->writebootimg = ergo_writebootimg; - card->writebootseq = ergo_writebootseq; - card->waitpofready = ergo_waitpofready; - card->set_errlog_state = ergo_set_errlog_state; - init_work(&card->irq_queue, ergo_irq_bh); - spin_lock_init(&card->hysdn_lock); - - return (0); -} /* ergo_inithardware */ diff --git a/drivers/staging/isdn/hysdn/boardergo.h b/drivers/staging/isdn/hysdn/boardergo.h --- a/drivers/staging/isdn/hysdn/boardergo.h +++ /dev/null -/* $id: boardergo.h,v 1.2.6.1 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards, definitions for ergo type boards (buffers..). - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - - -/************************************************/ -/* defines for the dual port memory of the card */ -/************************************************/ -#define erg_dpram_page_size 0x2000 /* dpram occupies a 8k page */ -#define boot_img_size 4096 -#define erg_dpram_fill_size (erg_dpram_page_size - boot_img_size) - -#define erg_to_hy_buf_size 0x0e00 /* 3072 bytes buffer size to card */ -#define erg_to_pc_buf_size 0x0e00 /* 3072 bytes to pc, too */ - -/* following dpram layout copied from os2-driver boarderg.h */ -typedef struct ergdpram_tag { - /*0000 */ unsigned char tohybuf[erg_to_hy_buf_size]; - /*0e00 */ unsigned char topcbuf[erg_to_pc_buf_size]; - - /*1c00 */ unsigned char bsoftuart[size_rsv_soft_uart]; - /* size 0x1b0 */ - - /*1db0 *//* terrlogentry */ unsigned char volatile errlogmsg[64]; - /* size 64 bytes */ - /*1db0 unsigned long ulerrtype; */ - /*1db4 unsigned long ulerrsubtype; */ - /*1db8 unsigned long uctextsize; */ - /*1db9 unsigned long uctext[errlog_text_size]; *//* asciiz of len uctextsize-1 */ - /*1df0 */ - - /*1df0 */ unsigned short volatile tohychannel; - /*1df2 */ unsigned short volatile tohysize; - /*1df4 */ unsigned char volatile tohyflag; - /* !=0: msg for hy waiting */ - /*1df5 */ unsigned char volatile topcflag; - /* !=0: msg for pc waiting */ - /*1df6 */ unsigned short volatile topcchannel; - /*1df8 */ unsigned short volatile topcsize; - /*1dfa */ unsigned char bres1dba[0x1e00 - 0x1dfa]; - /* 6 bytes */ - - /*1e00 */ unsigned char brestofentrytbl[0x1f00 - 0x1e00]; - /*1f00 */ unsigned long traptable[62]; - /*1ff8 */ unsigned char bres1ff8[0x1ffb - 0x1ff8]; - /* low part of reset vetor */ - /*1ffb */ unsigned char topcintmetro; - /* notes: - * - metro has 32-bit boot ram - accessing - * topcint and tohyint would be the same; - * so we moved topcint to 1ffb. - * because on the pc side both vars are - * readonly (reseting on int from e1 to pc), - * we can read both vars on both cards - * without destroying anything. - * - 1ffb is the high byte of the reset vector, - * so e1 side should not change this byte - * when writing! - */ - /*1ffc */ unsigned char volatile tohynodpramerrlog; - /* note: tohynodpramerrlog is used to inform - * boot loader, not to use dpram based - * errlog; when dos driver is rewritten - * this becomes obsolete - */ - /*1ffd */ unsigned char bres1ffd; - /*1ffe */ unsigned char topcint; - /* e1_intclear; on champ2: e1_intset */ - /*1fff */ unsigned char tohyint; - /* e1_intset; on champ2: e1_intclear */ -} tergdpram; - -/**********************************************/ -/* pci9050 controller local register offsets: */ -/* copied from boarderg.c */ -/**********************************************/ -#define pci9050_intr_reg 0x4c /* interrupt register */ -#define pci9050_user_io 0x51 /* user i/o register */ - -/* bitmask for pci9050_intr_reg: */ -#define pci9050_intr_reg_en1 0x01 /* 1= enable (def.), 0= disable */ -#define pci9050_intr_reg_pol1 0x02 /* 1= active high (def.), 0= active low */ -#define pci9050_intr_reg_stat1 0x04 /* 1= intr. active, 0= intr. not active (def.) */ -#define pci9050_intr_reg_enpci 0x40 /* 1= pci interrupts enable (def.) */ - -/* bitmask for pci9050_user_io: */ -#define pci9050_user_io_en3 0x02 /* 1= disable , 0= enable (def.) */ -#define pci9050_user_io_dir3 0x04 /* 1= output (def.), 0= input */ -#define pci9050_user_io_dat3 0x08 /* 1= high (def.) , 0= low */ - -#define pci9050_e1_reset (pci9050_user_io_dir3) /* 0x04 */ -#define pci9050_e1_run (pci9050_user_io_dat3 | pci9050_user_io_dir3) /* 0x0c */ diff --git a/drivers/staging/isdn/hysdn/hycapi.c b/drivers/staging/isdn/hysdn/hycapi.c --- a/drivers/staging/isdn/hysdn/hycapi.c +++ /dev/null -/* $id: hycapi.c,v 1.8.6.4 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards, capi2.0-interface. - * - * author ulrich albrecht <u.albrecht@hypercope.de> for hypercope gmbh - * copyright 2000 by hypercope gmbh - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/signal.h> -#include <linux/kernel.h> -#include <linux/skbuff.h> -#include <linux/netdevice.h> -#include <linux/slab.h> - -#define ver_driver 0 -#define ver_cardtype 1 -#define ver_hwid 2 -#define ver_serial 3 -#define ver_option 4 -#define ver_proto 5 -#define ver_profile 6 -#define ver_capi 7 - -#include "hysdn_defs.h" -#include <linux/kernelcapi.h> - -static char hycapi_revision[] = "$revision: 1.8.6.4 $"; - -unsigned int hycapi_enable = 0xffffffff; -module_param(hycapi_enable, uint, 0); - -typedef struct _hycapi_appl { - unsigned int ctrl_mask; - capi_register_params rp; - struct sk_buff *listen_req[capi_maxcontr]; -} hycapi_appl; - -static hycapi_appl hycapi_applications[capi_maxappl]; - -static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb); - -static inline int _hycapi_appcheck(int app_id, int ctrl_no) -{ - if ((ctrl_no <= 0) || (ctrl_no > capi_maxcontr) || (app_id <= 0) || - (app_id > capi_maxappl)) - { - printk(kern_err "hycapi: invalid request app_id %d for controller %d", app_id, ctrl_no); - return -1; - } - return ((hycapi_applications[app_id - 1].ctrl_mask & (1 << (ctrl_no-1))) != 0); -} - -/****************************** -kernel-capi callback reset_ctr -******************************/ - -static void -hycapi_reset_ctr(struct capi_ctr *ctrl) -{ - hycapictrl_info *cinfo = ctrl->driverdata; - -#ifdef hycapi_printfnames - printk(kern_notice "hycapi hycapi_reset_ctr "); -#endif - capilib_release(&cinfo->ncci_head); - capi_ctr_down(ctrl); -} - -/****************************** -kernel-capi callback remove_ctr -******************************/ - -static void -hycapi_remove_ctr(struct capi_ctr *ctrl) -{ - int i; - hycapictrl_info *cinfo = null; - hysdn_card *card = null; -#ifdef hycapi_printfnames - printk(kern_notice "hycapi hycapi_remove_ctr "); -#endif - cinfo = (hycapictrl_info *)(ctrl->driverdata); - if (!cinfo) { - printk(kern_err "no hycapictrl_info set!"); - return; - } - card = cinfo->card; - capi_ctr_suspend_output(ctrl); - for (i = 0; i < capi_maxappl; i++) { - if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) { - kfree_skb(hycapi_applications[i].listen_req[ctrl->cnr - 1]); - hycapi_applications[i].listen_req[ctrl->cnr - 1] = null; - } - } - detach_capi_ctr(ctrl); - ctrl->driverdata = null; - kfree(card->hyctrlinfo); - - - card->hyctrlinfo = null; -} - -/*********************************************************** - -queue a capi-message to the controller. - -***********************************************************/ - -static void -hycapi_sendmsg_internal(struct capi_ctr *ctrl, struct sk_buff *skb) -{ - hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); - hysdn_card *card = cinfo->card; - - spin_lock_irq(&cinfo->lock); -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_send_message "); -#endif - cinfo->skbs[cinfo->in_idx++] = skb; /* add to buffer list */ - if (cinfo->in_idx >= hysdn_max_capi_skb) - cinfo->in_idx = 0; /* wrap around */ - cinfo->sk_count++; /* adjust counter */ - if (cinfo->sk_count >= hysdn_max_capi_skb) { - /* inform upper layers we're full */ - printk(kern_err "hysdn card%d: capi-buffer overrun! ", - card->myid); - capi_ctr_suspend_output(ctrl); - } - cinfo->tx_skb = skb; - spin_unlock_irq(&cinfo->lock); - schedule_work(&card->irq_queue); -} - -/*********************************************************** -hycapi_register_internal - -send down the capi_register-command to the controller. -this functions will also be used if the adapter has been rebooted to -re-register any applications in the private list. - -************************************************************/ - -static void -hycapi_register_internal(struct capi_ctr *ctrl, __u16 appl, - capi_register_params *rp) -{ - char extfeaturedefaults[] = "49 /0/0/0/0,*/1,*/2,*/3,*/4,*/5,*/6,*/7,*/8,*/9,*"; - hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); - hysdn_card *card = cinfo->card; - struct sk_buff *skb; - __u16 len; - __u8 _command = 0xa0, _subcommand = 0x80; - __u16 messagenumber = 0x0000; - __u16 messagebuffersize = 0; - int slen = strlen(extfeaturedefaults); -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_register_appl "); -#endif - messagebuffersize = rp->level3cnt * rp->datablkcnt * rp->datablklen; - - len = capi_msg_baselen + 8 + slen + 1; - if (!(skb = alloc_skb(len, gfp_atomic))) { - printk(kern_err "hysdn card%d: memory squeeze in hycapi_register_appl ", - card->myid); - return; - } - skb_put_data(skb, &len, sizeof(__u16)); - skb_put_data(skb, &appl, sizeof(__u16)); - skb_put_data(skb, &_command, sizeof(__u8)); - skb_put_data(skb, &_subcommand, sizeof(__u8)); - skb_put_data(skb, &messagenumber, sizeof(__u16)); - skb_put_data(skb, &messagebuffersize, sizeof(__u16)); - skb_put_data(skb, &(rp->level3cnt), sizeof(__u16)); - skb_put_data(skb, &(rp->datablkcnt), sizeof(__u16)); - skb_put_data(skb, &(rp->datablklen), sizeof(__u16)); - skb_put_data(skb, extfeaturedefaults, slen); - hycapi_applications[appl - 1].ctrl_mask |= (1 << (ctrl->cnr - 1)); - hycapi_send_message(ctrl, skb); -} - -/************************************************************ -hycapi_restart_internal - -after an adapter has been rebootet, re-register all applications and -send a listen_req (if there has been such a thing ) - -*************************************************************/ - -static void hycapi_restart_internal(struct capi_ctr *ctrl) -{ - int i; - struct sk_buff *skb; -#ifdef hycapi_printfnames - printk(kern_warning "hysdn: hycapi_restart_internal"); -#endif - for (i = 0; i < capi_maxappl; i++) { - if (_hycapi_appcheck(i + 1, ctrl->cnr) == 1) { - hycapi_register_internal(ctrl, i + 1, - &hycapi_applications[i].rp); - if (hycapi_applications[i].listen_req[ctrl->cnr - 1]) { - skb = skb_copy(hycapi_applications[i].listen_req[ctrl->cnr - 1], gfp_atomic); - hycapi_sendmsg_internal(ctrl, skb); - } - } - } -} - -/************************************************************* -register an application. -error-checking is done for capi-compliance. - -the application is recorded in the internal list. -*************************************************************/ - -static void -hycapi_register_appl(struct capi_ctr *ctrl, __u16 appl, - capi_register_params *rp) -{ - int maxlogicalconnections = 0, maxbdatablocks = 0, maxbdatalen = 0; - hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); - hysdn_card *card = cinfo->card; - int chk = _hycapi_appcheck(appl, ctrl->cnr); - if (chk < 0) { - return; - } - if (chk == 1) { - printk(kern_info "hysdn: apl %d already registered ", appl); - return; - } - maxbdatablocks = rp->datablkcnt > capi_maxdatawindow ? capi_maxdatawindow : rp->datablkcnt; - rp->datablkcnt = maxbdatablocks; - maxbdatalen = rp->datablklen < 1024 ? 1024 : rp->datablklen; - rp->datablklen = maxbdatalen; - - maxlogicalconnections = rp->level3cnt; - if (maxlogicalconnections < 0) { - maxlogicalconnections = card->bchans * -maxlogicalconnections; - } - if (maxlogicalconnections == 0) { - maxlogicalconnections = card->bchans; - } - - rp->level3cnt = maxlogicalconnections; - memcpy(&hycapi_applications[appl - 1].rp, - rp, sizeof(capi_register_params)); -} - -/********************************************************************* - -hycapi_release_internal - -send down a capi_release to the controller. -*********************************************************************/ - -static void hycapi_release_internal(struct capi_ctr *ctrl, __u16 appl) -{ - hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); - hysdn_card *card = cinfo->card; - struct sk_buff *skb; - __u16 len; - __u8 _command = 0xa1, _subcommand = 0x80; - __u16 messagenumber = 0x0000; - - capilib_release_appl(&cinfo->ncci_head, appl); - -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_release_appl "); -#endif - len = capi_msg_baselen; - if (!(skb = alloc_skb(len, gfp_atomic))) { - printk(kern_err "hysdn card%d: memory squeeze in hycapi_register_appl ", - card->myid); - return; - } - skb_put_data(skb, &len, sizeof(__u16)); - skb_put_data(skb, &appl, sizeof(__u16)); - skb_put_data(skb, &_command, sizeof(__u8)); - skb_put_data(skb, &_subcommand, sizeof(__u8)); - skb_put_data(skb, &messagenumber, sizeof(__u16)); - hycapi_send_message(ctrl, skb); - hycapi_applications[appl - 1].ctrl_mask &= ~(1 << (ctrl->cnr - 1)); -} - -/****************************************************************** -hycapi_release_appl - -release the application from the internal list an remove it's -registration at controller-level -******************************************************************/ - -static void -hycapi_release_appl(struct capi_ctr *ctrl, __u16 appl) -{ - int chk; - - chk = _hycapi_appcheck(appl, ctrl->cnr); - if (chk < 0) { - printk(kern_err "hycapi: releasing invalid appl %d on controller %d ", appl, ctrl->cnr); - return; - } - if (hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]) { - kfree_skb(hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1]); - hycapi_applications[appl - 1].listen_req[ctrl->cnr - 1] = null; - } - if (chk == 1) - { - hycapi_release_internal(ctrl, appl); - } -} - - -/************************************************************** -kill a single controller. -**************************************************************/ - -int hycapi_capi_release(hysdn_card *card) -{ - hycapictrl_info *cinfo = card->hyctrlinfo; - struct capi_ctr *ctrl; -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_capi_release "); -#endif - if (cinfo) { - ctrl = &cinfo->capi_ctrl; - hycapi_remove_ctr(ctrl); - } - return 0; -} - -/************************************************************** -hycapi_capi_stop - -stop capi-output on a card. (e.g. during reboot) -***************************************************************/ - -int hycapi_capi_stop(hysdn_card *card) -{ - hycapictrl_info *cinfo = card->hyctrlinfo; - struct capi_ctr *ctrl; -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_capi_stop "); -#endif - if (cinfo) { - ctrl = &cinfo->capi_ctrl; -/* ctrl->suspend_output(ctrl); */ - capi_ctr_down(ctrl); - } - return 0; -} - -/*************************************************************** -hycapi_send_message - -send a message to the controller. - -messages are parsed for their command/subcommand-type, and appropriate -action's are performed. - -note that we have to muck around with a 64bit-data_req as there are -firmware-releases that do not check the msglen-indication! - -***************************************************************/ - -static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) -{ - __u16 appl_id; - int _len, _len2; - __u8 msghead[64]; - hycapictrl_info *cinfo = ctrl->driverdata; - u16 retval = capi_noerror; - - appl_id = capimsg_appid(skb->data); - switch (_hycapi_appcheck(appl_id, ctrl->cnr)) - { - case 0: -/* printk(kern_info "need to register "); */ - hycapi_register_internal(ctrl, - appl_id, - &(hycapi_applications[appl_id - 1].rp)); - break; - case 1: - break; - default: - printk(kern_err "hycapi: controller mixup! "); - retval = capi_illappnr; - goto out; - } - switch (capimsg_cmd(skb->data)) { - case capi_disconnect_b3_resp: - capilib_free_ncci(&cinfo->ncci_head, appl_id, - capimsg_ncci(skb->data)); - break; - case capi_data_b3_req: - _len = capimsg_len(skb->data); - if (_len > 22) { - _len2 = _len - 22; - skb_copy_from_linear_data(skb, msghead, 22); - skb_copy_to_linear_data_offset(skb, _len2, - msghead, 22); - skb_pull(skb, _len2); - capimsg_setlen(skb->data, 22); - retval = capilib_data_b3_req(&cinfo->ncci_head, - capimsg_appid(skb->data), - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - } - break; - case capi_listen_req: - if (hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]) - { - kfree_skb(hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]); - hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1] = null; - } - if (!(hycapi_applications[appl_id -1].listen_req[ctrl->cnr - 1] = skb_copy(skb, gfp_atomic))) - { - printk(kern_err "hysdn: memory squeeze in private_listen "); - } - break; - default: - break; - } -out: - if (retval == capi_noerror) - hycapi_sendmsg_internal(ctrl, skb); - else - dev_kfree_skb_any(skb); - - return retval; -} - -static int hycapi_proc_show(struct seq_file *m, void *v) -{ - struct capi_ctr *ctrl = m->private; - hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); - hysdn_card *card = cinfo->card; - char *s; - - seq_printf(m, "%-16s %s ", "name", cinfo->cardname); - seq_printf(m, "%-16s 0x%x ", "io", card->iobase); - seq_printf(m, "%-16s %d ", "irq", card->irq); - - switch (card->brdtype) { - case bd_pccard: s = "hysdn hycard"; break; - case bd_ergo: s = "hysdn ergo2"; break; - case bd_metro: s = "hysdn metro4"; break; - case bd_champ2: s = "hysdn champ2"; break; - case bd_plexus: s = "hysdn plexus30"; break; - default: s = "???"; break; - } - seq_printf(m, "%-16s %s ", "type", s); - if ((s = cinfo->version[ver_driver]) != null) - seq_printf(m, "%-16s %s ", "ver_driver", s); - if ((s = cinfo->version[ver_cardtype]) != null) - seq_printf(m, "%-16s %s ", "ver_cardtype", s); - if ((s = cinfo->version[ver_serial]) != null) - seq_printf(m, "%-16s %s ", "ver_serial", s); - - seq_printf(m, "%-16s %s ", "cardname", cinfo->cardname); - - return 0; -} - -/************************************************************** -hycapi_load_firmware - -this does not load any firmware, but the callback somehow is needed -on capi-interface registration. - -**************************************************************/ - -static int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) -{ -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_load_firmware "); -#endif - return 0; -} - - -static char *hycapi_procinfo(struct capi_ctr *ctrl) -{ - hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); -#ifdef hycapi_printfnames - printk(kern_notice "%s ", __func__); -#endif - if (!cinfo) - return ""; - sprintf(cinfo->infobuf, "%s %s 0x%x %d %s", - cinfo->cardname[0] ? cinfo->cardname : "-", - cinfo->version[ver_driver] ? cinfo->version[ver_driver] : "-", - cinfo->card ? cinfo->card->iobase : 0x0, - cinfo->card ? cinfo->card->irq : 0, - hycapi_revision - ); - return cinfo->infobuf; -} - -/****************************************************************** -hycapi_rx_capipkt - -receive a capi-message. - -all b3_data_ind are converted to 64k-extension compatible format. -new nccis are created if necessary. -*******************************************************************/ - -void -hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf, unsigned short len) -{ - struct sk_buff *skb; - hycapictrl_info *cinfo = card->hyctrlinfo; - struct capi_ctr *ctrl; - __u16 applid; - __u16 msglen, info; - __u16 len2, capicmd; - __u32 cp64[2] = {0, 0}; -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_rx_capipkt "); -#endif - if (!cinfo) { - return; - } - ctrl = &cinfo->capi_ctrl; - if (len < capi_msg_baselen) { - printk(kern_err "hysdn card%d: invalid capi-message, length %d! ", - card->myid, len); - return; - } - msglen = capimsg_len(buf); - applid = capimsg_appid(buf); - capicmd = capimsg_cmd(buf); - - if ((capicmd == capi_data_b3_ind) && (msglen < 30)) { - len2 = len + (30 - msglen); - if (!(skb = alloc_skb(len2, gfp_atomic))) { - printk(kern_err "hysdn card%d: incoming packet dropped ", - card->myid); - return; - } - skb_put_data(skb, buf, msglen); - skb_put_data(skb, cp64, 2 * sizeof(__u32)); - skb_put_data(skb, buf + msglen, len - msglen); - capimsg_setlen(skb->data, 30); - } else { - if (!(skb = alloc_skb(len, gfp_atomic))) { - printk(kern_err "hysdn card%d: incoming packet dropped ", - card->myid); - return; - } - skb_put_data(skb, buf, len); - } - switch (capimsg_cmd(skb->data)) - { - case capi_connect_b3_conf: -/* check info-field for error-indication: */ - info = capimsg_u16(skb->data, 12); - switch (info) - { - case 0: - capilib_new_ncci(&cinfo->ncci_head, applid, capimsg_ncci(skb->data), - hycapi_applications[applid - 1].rp.datablkcnt); - - break; - case 0x0001: - printk(kern_err "hysdn card%d: ncpi not supported by current " - "protocol. ncpi ignored. ", card->myid); - break; - case 0x2001: - printk(kern_err "hysdn card%d: message not supported in" - " current state ", card->myid); - break; - case 0x2002: - printk(kern_err "hysdn card%d: invalid plci ", card->myid); - break; - case 0x2004: - printk(kern_err "hysdn card%d: out of ncci ", card->myid); - break; - case 0x3008: - printk(kern_err "hysdn card%d: ncpi not supported ", - card->myid); - break; - default: - printk(kern_err "hysdn card%d: info in connect_b3_conf: %d ", - card->myid, info); - break; - } - break; - case capi_connect_b3_ind: - capilib_new_ncci(&cinfo->ncci_head, applid, - capimsg_ncci(skb->data), - hycapi_applications[applid - 1].rp.datablkcnt); - break; - case capi_data_b3_conf: - capilib_data_b3_conf(&cinfo->ncci_head, applid, - capimsg_ncci(skb->data), - capimsg_msgid(skb->data)); - break; - default: - break; - } - capi_ctr_handle_message(ctrl, applid, skb); -} - -/****************************************************************** -hycapi_tx_capiack - -internally acknowledge a msg sent. this will remove the msg from the -internal queue. - -*******************************************************************/ - -void hycapi_tx_capiack(hysdn_card *card) -{ - hycapictrl_info *cinfo = card->hyctrlinfo; -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_tx_capiack "); -#endif - if (!cinfo) { - return; - } - spin_lock_irq(&cinfo->lock); - kfree_skb(cinfo->skbs[cinfo->out_idx]); /* free skb */ - cinfo->skbs[cinfo->out_idx++] = null; - if (cinfo->out_idx >= hysdn_max_capi_skb) - cinfo->out_idx = 0; /* wrap around */ - - if (cinfo->sk_count-- == hysdn_max_capi_skb) /* dec usage count */ - capi_ctr_resume_output(&cinfo->capi_ctrl); - spin_unlock_irq(&cinfo->lock); -} - -/*************************************************************** -hycapi_tx_capiget(hysdn_card *card) - -this is called when polling for messages to send. - -****************************************************************/ - -struct sk_buff * -hycapi_tx_capiget(hysdn_card *card) -{ - hycapictrl_info *cinfo = card->hyctrlinfo; - if (!cinfo) { - return (struct sk_buff *)null; - } - if (!cinfo->sk_count) - return (struct sk_buff *)null; /* nothing available */ - - return (cinfo->skbs[cinfo->out_idx]); /* next packet to send */ -} - - -/********************************************************** -int hycapi_init() - -attach the capi-driver to the kernel-capi. - -***********************************************************/ - -int hycapi_init(void) -{ - int i; - for (i = 0; i < capi_maxappl; i++) { - memset(&(hycapi_applications[i]), 0, sizeof(hycapi_appl)); - } - return (0); -} - -/************************************************************** -hycapi_cleanup(void) - -detach the capi-driver to the kernel-capi. actually this should -free some more ressources. do that later. -**************************************************************/ - -void -hycapi_cleanup(void) -{ -} - -/******************************************************************** -hycapi_capi_create(hysdn_card *card) - -attach the card with its capi-ctrl. -*********************************************************************/ - -static void hycapi_fill_profile(hysdn_card *card) -{ - hycapictrl_info *cinfo = null; - struct capi_ctr *ctrl = null; - cinfo = card->hyctrlinfo; - if (!cinfo) return; - ctrl = &cinfo->capi_ctrl; - strcpy(ctrl->manu, "hypercope"); - ctrl->version.majorversion = 2; - ctrl->version.minorversion = 0; - ctrl->version.majormanuversion = 3; - ctrl->version.minormanuversion = 2; - ctrl->profile.ncontroller = card->myid; - ctrl->profile.nbchannel = card->bchans; - ctrl->profile.goptions = global_option_internal_controller | - global_option_b_channel_operation; - ctrl->profile.support1 = b1_prot_64kbit_hdlc | - (card->faxchans ? b1_prot_t30 : 0) | - b1_prot_64kbit_transparent; - ctrl->profile.support2 = b2_prot_iso7776 | - (card->faxchans ? b2_prot_t30 : 0) | - b2_prot_transparent; - ctrl->profile.support3 = b3_prot_transparent | - b3_prot_t90nl | - (card->faxchans ? b3_prot_t30 : 0) | - (card->faxchans ? b3_prot_t30ext : 0) | - b3_prot_iso8208; -} - -int -hycapi_capi_create(hysdn_card *card) -{ - hycapictrl_info *cinfo = null; - struct capi_ctr *ctrl = null; - int retval; -#ifdef hycapi_printfnames - printk(kern_notice "hycapi_capi_create "); -#endif - if ((hycapi_enable & (1 << card->myid)) == 0) { - return 1; - } - if (!card->hyctrlinfo) { - cinfo = kzalloc(sizeof(hycapictrl_info), gfp_atomic); - if (!cinfo) { - printk(kern_warning "hysdn: no memory for capi-ctrl. "); - return -enomem; - } - card->hyctrlinfo = cinfo; - cinfo->card = card; - spin_lock_init(&cinfo->lock); - init_list_head(&cinfo->ncci_head); - - switch (card->brdtype) { - case bd_pccard: strcpy(cinfo->cardname, "hysdn hycard"); break; - case bd_ergo: strcpy(cinfo->cardname, "hysdn ergo2"); break; - case bd_metro: strcpy(cinfo->cardname, "hysdn metro4"); break; - case bd_champ2: strcpy(cinfo->cardname, "hysdn champ2"); break; - case bd_plexus: strcpy(cinfo->cardname, "hysdn plexus30"); break; - default: strcpy(cinfo->cardname, "hysdn ???"); break; - } - - ctrl = &cinfo->capi_ctrl; - ctrl->driver_name = "hycapi"; - ctrl->driverdata = cinfo; - ctrl->register_appl = hycapi_register_appl; - ctrl->release_appl = hycapi_release_appl; - ctrl->send_message = hycapi_send_message; - ctrl->load_firmware = hycapi_load_firmware; - ctrl->reset_ctr = hycapi_reset_ctr; - ctrl->procinfo = hycapi_procinfo; - ctrl->proc_show = hycapi_proc_show; - strcpy(ctrl->name, cinfo->cardname); - ctrl->owner = this_module; - - retval = attach_capi_ctr(ctrl); - if (retval) { - printk(kern_err "hycapi: attach controller failed. "); - return -ebusy; - } - /* fill in the blanks: */ - hycapi_fill_profile(card); - capi_ctr_ready(ctrl); - } else { - /* resume output on stopped ctrl */ - ctrl = &card->hyctrlinfo->capi_ctrl; - hycapi_fill_profile(card); - capi_ctr_ready(ctrl); - hycapi_restart_internal(ctrl); -/* ctrl->resume_output(ctrl); */ - } - return 0; -} diff --git a/drivers/staging/isdn/hysdn/hysdn_boot.c b/drivers/staging/isdn/hysdn/hysdn_boot.c --- a/drivers/staging/isdn/hysdn/hysdn_boot.c +++ /dev/null -/* $id: hysdn_boot.c,v 1.4.6.4 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards - * specific routines for booting and pof handling - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/vmalloc.h> -#include <linux/slab.h> -#include <linux/uaccess.h> - -#include "hysdn_defs.h" -#include "hysdn_pof.h" - -/********************************/ -/* defines for pof read handler */ -/********************************/ -#define pof_read_file_head 0 -#define pof_read_tag_head 1 -#define pof_read_tag_data 2 - -/************************************************************/ -/* definition of boot specific data area. this data is only */ -/* needed during boot and so allocated dynamically. */ -/************************************************************/ -struct boot_data { - unsigned short cryptor; /* for use with decrypt function */ - unsigned short nrecs; /* records remaining in file */ - unsigned char pof_state;/* actual state of read handler */ - unsigned char is_crypted;/* card data is crypted */ - int bufsize; /* actual number of bytes bufferd */ - int last_error; /* last occurred error */ - unsigned short pof_recid;/* actual pof recid */ - unsigned long pof_reclen;/* total length of pof record data */ - unsigned long pof_recoffset;/* actual offset inside pof record */ - union { - unsigned char bootbuf[boot_buf_size];/* buffer as byte count */ - tpofrechdr pofrechdr; /* header for actual record/chunk */ - tpoffilehdr poffilehdr; /* header from pof file */ - tpoftimestamp poftime; /* time information */ - } buf; -}; - -/*****************************************************/ -/* start decryption of successive pof file chuncks. */ -/* */ -/* to be called at start of pof file reading, */ -/* before starting any decryption on any pof record. */ -/*****************************************************/ -static void -startdecryption(struct boot_data *boot) -{ - boot->cryptor = crypt_startterm; -} /* startdecryption */ - - -/***************************************************************/ -/* decrypt complete bootbuf */ -/* note: decryption must be applied to all or none boot tags - */ -/* to hi and lo boot loader and (all) seq tags, because */ -/* global cryptor is started for whole pof. */ -/***************************************************************/ -static void -decryptbuf(struct boot_data *boot, int cnt) -{ - unsigned char *bufp = boot->buf.bootbuf; - - while (cnt--) { - boot->cryptor = (boot->cryptor >> 1) ^ ((boot->cryptor & 1u) ? crypt_feedterm : 0); - *bufp++ ^= (unsigned char)boot->cryptor; - } -} /* decryptbuf */ - -/********************************************************************************/ -/* pof_handle_data executes the required actions dependent on the active record */ -/* id. if successful 0 is returned, a negative value shows an error. */ -/********************************************************************************/ -static int -pof_handle_data(hysdn_card *card, int datlen) -{ - struct boot_data *boot = card->boot; /* pointer to boot specific data */ - long l; - unsigned char *imgp; - int img_len; - - /* handle the different record types */ - switch (boot->pof_recid) { - - case tag_timestmp: - if (card->debug_flags & log_pof_record) - hysdn_addlog(card, "pof created %s", boot->buf.poftime.datetimetext); - break; - - case tag_cbootdta: - decryptbuf(boot, datlen); /* we need to encrypt the buffer */ - /* fall through */ - case tag_bootdta: - if (card->debug_flags & log_pof_record) - hysdn_addlog(card, "pof got %s len=%d offs=0x%lx", - (boot->pof_recid == tag_cbootdta) ? "cbootdata" : "bootdta", - datlen, boot->pof_recoffset); - - if (boot->pof_reclen != pof_boot_loader_total_size) { - boot->last_error = epof_bad_img_size; /* invalid length */ - return (boot->last_error); - } - imgp = boot->buf.bootbuf; /* start of buffer */ - img_len = datlen; /* maximum length to transfer */ - - l = pof_boot_loader_off_in_page - - (boot->pof_recoffset & (pof_boot_loader_page_size - 1)); - if (l > 0) { - /* buffer needs to be truncated */ - imgp += l; /* advance pointer */ - img_len -= l; /* adjust len */ - } - /* at this point no special handling for data wrapping over buffer */ - /* is necessary, because the boot image always will be adjusted to */ - /* match a page boundary inside the buffer. */ - /* the buffer for the boot image on the card is filled in 2 cycles */ - /* first the 1024 hi-words are put in the buffer, then the low 1024 */ - /* word are handled in the same way with different offset. */ - - if (img_len > 0) { - /* data available for copy */ - if ((boot->last_error = - card->writebootimg(card, imgp, - (boot->pof_recoffset > pof_boot_loader_page_size) ? 2 : 0)) < 0) - return (boot->last_error); - } - break; /* end of case boot image hi/lo */ - - case tag_cabsdata: - decryptbuf(boot, datlen); /* we need to encrypt the buffer */ - /* fall through */ - case tag_absdata: - if (card->debug_flags & log_pof_record) - hysdn_addlog(card, "pof got %s len=%d offs=0x%lx", - (boot->pof_recid == tag_cabsdata) ? "cabsdata" : "absdata", - datlen, boot->pof_recoffset); - - if ((boot->last_error = card->writebootseq(card, boot->buf.bootbuf, datlen)) < 0) - return (boot->last_error); /* error writing data */ - - if (boot->pof_recoffset + datlen >= boot->pof_reclen) - return (card->waitpofready(card)); /* data completely spooled, wait for ready */ - - break; /* end of case boot seq data */ - - default: - if (card->debug_flags & log_pof_record) - hysdn_addlog(card, "pof got data(id=0x%lx) len=%d offs=0x%lx", boot->pof_recid, - datlen, boot->pof_recoffset); - - break; /* simply skip record */ - } /* switch boot->pof_recid */ - - return (0); -} /* pof_handle_data */ - - -/******************************************************************************/ -/* pof_write_buffer is called when the buffer has been filled with the needed */ -/* number of data bytes. the number delivered is additionally supplied for */ -/* verification. the functions handles the data and returns the needed number */ -/* of bytes for the next action. if the returned value is 0 or less an error */ -/* occurred and booting must be aborted. */ -/******************************************************************************/ -int -pof_write_buffer(hysdn_card *card, int datlen) -{ - struct boot_data *boot = card->boot; /* pointer to boot specific data */ - - if (!boot) - return (-efault); /* invalid call */ - if (boot->last_error < 0) - return (boot->last_error); /* repeated error */ - - if (card->debug_flags & log_pof_write) - hysdn_addlog(card, "pof write: got %d bytes ", datlen); - - switch (boot->pof_state) { - case pof_read_file_head: - if (card->debug_flags & log_pof_write) - hysdn_addlog(card, "pof write: checking file header"); - - if (datlen != sizeof(tpoffilehdr)) { - boot->last_error = -epof_internal; - break; - } - if (boot->buf.poffilehdr.magic != tagfilemagic) { - boot->last_error = -epof_bad_magic; - break; - } - /* setup the new state and vars */ - boot->nrecs = (unsigned short)(boot->buf.poffilehdr.n_pofrecs); /* limited to 65535 */ - boot->pof_state = pof_read_tag_head; /* now start with single tags */ - boot->last_error = sizeof(tpofrechdr); /* new length */ - break; - - case pof_read_tag_head: - if (card->debug_flags & log_pof_write) - hysdn_addlog(card, "pof write: checking tag header"); - - if (datlen != sizeof(tpofrechdr)) { - boot->last_error = -epof_internal; - break; - } - boot->pof_recid = boot->buf.pofrechdr.pofrecid; /* actual pof recid */ - boot->pof_reclen = boot->buf.pofrechdr.pofrecdatalen; /* total length */ - boot->pof_recoffset = 0; /* no starting offset */ - - if (card->debug_flags & log_pof_record) - hysdn_addlog(card, "pof: got record id=0x%lx length=%ld ", - boot->pof_recid, boot->pof_reclen); - - boot->pof_state = pof_read_tag_data; /* now start with tag data */ - if (boot->pof_reclen < boot_buf_size) - boot->last_error = boot->pof_reclen; /* limit size */ - else - boot->last_error = boot_buf_size; /* maximum */ - - if (!boot->last_error) { /* no data inside record */ - boot->pof_state = pof_read_tag_head; /* now start with single tags */ - boot->last_error = sizeof(tpofrechdr); /* new length */ - } - break; - - case pof_read_tag_data: - if (card->debug_flags & log_pof_write) - hysdn_addlog(card, "pof write: getting tag data"); - - if (datlen != boot->last_error) { - boot->last_error = -epof_internal; - break; - } - if ((boot->last_error = pof_handle_data(card, datlen)) < 0) - return (boot->last_error); /* an error occurred */ - boot->pof_recoffset += datlen; - if (boot->pof_recoffset >= boot->pof_reclen) { - boot->pof_state = pof_read_tag_head; /* now start with single tags */ - boot->last_error = sizeof(tpofrechdr); /* new length */ - } else { - if (boot->pof_reclen - boot->pof_recoffset < boot_buf_size) - boot->last_error = boot->pof_reclen - boot->pof_recoffset; /* limit size */ - else - boot->last_error = boot_buf_size; /* maximum */ - } - break; - - default: - boot->last_error = -epof_internal; /* unknown state */ - break; - } /* switch (boot->pof_state) */ - - return (boot->last_error); -} /* pof_write_buffer */ - - -/*******************************************************************************/ -/* pof_write_open is called when an open for boot on the cardlog device occurs. */ -/* the function returns the needed number of bytes for the next operation. if */ -/* the returned number is less or equal 0 an error specified by this code */ -/* occurred. additionally the pointer to the buffer data area is set on success */ -/*******************************************************************************/ -int -pof_write_open(hysdn_card *card, unsigned char **bufp) -{ - struct boot_data *boot; /* pointer to boot specific data */ - - if (card->boot) { - if (card->debug_flags & log_pof_open) - hysdn_addlog(card, "pof open: already opened for boot"); - return (-err_already_boot); /* boot already active */ - } - /* error no mem available */ - if (!(boot = kzalloc(sizeof(struct boot_data), gfp_kernel))) { - if (card->debug_flags & log_mem_err) - hysdn_addlog(card, "pof open: unable to allocate mem"); - return (-efault); - } - card->boot = boot; - card->state = card_state_booting; - - card->stopcard(card); /* first stop the card */ - if (card->testram(card)) { - if (card->debug_flags & log_pof_open) - hysdn_addlog(card, "pof open: dpram test failure"); - boot->last_error = -err_board_dpram; - card->state = card_state_booterr; /* show boot error */ - return (boot->last_error); - } - boot->bufsize = 0; /* buffer is empty */ - boot->pof_state = pof_read_file_head; /* read file header */ - startdecryption(boot); /* if pof file should be encrypted */ - - if (card->debug_flags & log_pof_open) - hysdn_addlog(card, "pof open: success"); - - *bufp = boot->buf.bootbuf; /* point to buffer */ - return (sizeof(tpoffilehdr)); -} /* pof_write_open */ - -/********************************************************************************/ -/* pof_write_close is called when an close of boot on the cardlog device occurs. */ -/* the return value must be 0 if everything has happened as desired. */ -/********************************************************************************/ -int -pof_write_close(hysdn_card *card) -{ - struct boot_data *boot = card->boot; /* pointer to boot specific data */ - - if (!boot) - return (-efault); /* invalid call */ - - card->boot = null; /* no boot active */ - kfree(boot); - - if (card->state == card_state_run) - card->set_errlog_state(card, 1); /* activate error log */ - - if (card->debug_flags & log_pof_open) - hysdn_addlog(card, "pof close: success"); - - return (0); -} /* pof_write_close */ - -/*********************************************************************************/ -/* evalsysrtokdata checks additional records delivered with the sysready message */ -/* when pof has been booted. a return value of 0 is used if no error occurred. */ -/*********************************************************************************/ -int -evalsysrtokdata(hysdn_card *card, unsigned char *cp, int len) -{ - u_char *p; - u_char crc; - - if (card->debug_flags & log_pof_record) - hysdn_addlog(card, "sysready token data length %d", len); - - if (len < 2) { - hysdn_addlog(card, "sysready token data to short"); - return (1); - } - for (p = cp, crc = 0; p < (cp + len - 2); p++) - if ((crc & 0x80)) - crc = (((u_char) (crc << 1)) + 1) + *p; - else - crc = ((u_char) (crc << 1)) + *p; - crc = ~crc; - if (crc != *(cp + len - 1)) { - hysdn_addlog(card, "sysready token data invalid crc"); - return (1); - } - len--; /* don't check crc byte */ - while (len > 0) { - - if (*cp == sysr_tok_end) - return (0); /* end of token stream */ - - if (len < (*(cp + 1) + 2)) { - hysdn_addlog(card, "token 0x%x invalid length %d", *cp, *(cp + 1)); - return (1); - } - switch (*cp) { - case sysr_tok_b_chan: /* 1 */ - if (*(cp + 1) != 1) - return (1); /* length invalid */ - card->bchans = *(cp + 2); - break; - - case sysr_tok_fax_chan: /* 2 */ - if (*(cp + 1) != 1) - return (1); /* length invalid */ - card->faxchans = *(cp + 2); - break; - - case sysr_tok_mac_addr: /* 3 */ - if (*(cp + 1) != 6) - return (1); /* length invalid */ - memcpy(card->mac_addr, cp + 2, 6); - break; - - default: - hysdn_addlog(card, "unknown token 0x%02x length %d", *cp, *(cp + 1)); - break; - } - len -= (*(cp + 1) + 2); /* adjust len */ - cp += (*(cp + 1) + 2); /* and pointer */ - } - - hysdn_addlog(card, "no end token found"); - return (1); -} /* evalsysrtokdata */ diff --git a/drivers/staging/isdn/hysdn/hysdn_defs.h b/drivers/staging/isdn/hysdn/hysdn_defs.h --- a/drivers/staging/isdn/hysdn/hysdn_defs.h +++ /dev/null -/* $id: hysdn_defs.h,v 1.5.6.3 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards - * global definitions and exported vars and functions. - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#ifndef hysdn_defs_h -#define hysdn_defs_h - -#include <linux/hysdn_if.h> -#include <linux/interrupt.h> -#include <linux/workqueue.h> -#include <linux/skbuff.h> - -#include "ince1pc.h" - -#ifdef config_hysdn_capi -#include <linux/capi.h> -#include <linux/isdn/capicmd.h> -#include <linux/isdn/capiutil.h> -#include <linux/isdn/capilli.h> - -/***************************/ -/* capi-profile values. */ -/***************************/ - -#define global_option_internal_controller 0x0001 -#define global_option_external_controller 0x0002 -#define global_option_handset 0x0004 -#define global_option_dtmf 0x0008 -#define global_option_suppl_services 0x0010 -#define global_option_channel_allocation 0x0020 -#define global_option_b_channel_operation 0x0040 - -#define b1_prot_64kbit_hdlc 0x0001 -#define b1_prot_64kbit_transparent 0x0002 -#define b1_prot_v110_asynch 0x0004 -#define b1_prot_v110_synch 0x0008 -#define b1_prot_t30 0x0010 -#define b1_prot_64kbit_inv_hdlc 0x0020 -#define b1_prot_56kbit_transparent 0x0040 - -#define b2_prot_iso7776 0x0001 -#define b2_prot_transparent 0x0002 -#define b2_prot_sdlc 0x0004 -#define b2_prot_lapd 0x0008 -#define b2_prot_t30 0x0010 -#define b2_prot_ppp 0x0020 -#define b2_prot_transparent_ignore_b1_framing_errors 0x0040 - -#define b3_prot_transparent 0x0001 -#define b3_prot_t90nl 0x0002 -#define b3_prot_iso8208 0x0004 -#define b3_prot_x25_dce 0x0008 -#define b3_prot_t30 0x0010 -#define b3_prot_t30ext 0x0020 - -#define hysdn_maxversion 8 - -/* number of sendbuffers in capi-queue */ -#define hysdn_max_capi_skb 20 - -#endif /* config_hysdn_capi*/ - -/************************************************/ -/* constants and bits for debugging/log outputs */ -/************************************************/ -#define log_max_linelen 120 -#define deb_out_syslog 0x80000000 /* output to syslog instead of proc fs */ -#define log_mem_err 0x00000001 /* log memory errors like kmalloc failure */ -#define log_pof_open 0x00000010 /* log pof open and close activities */ -#define log_pof_record 0x00000020 /* log pof record parser */ -#define log_pof_write 0x00000040 /* log detailed pof write operation */ -#define log_pof_card 0x00000080 /* log pof related card functions */ -#define log_cnf_line 0x00000100 /* all conf lines are put to procfs */ -#define log_cnf_data 0x00000200 /* non comment conf lines are shown with channel */ -#define log_cnf_misc 0x00000400 /* additional conf line debug outputs */ -#define log_sched_asyn 0x00001000 /* debug schedulers async tx routines */ -#define log_proc_open 0x00100000 /* open and close from procfs are logged */ -#define log_proc_all 0x00200000 /* all actions from procfs are logged */ -#define log_net_init 0x00010000 /* network init and deinit logging */ - -#define def_deb_flags 0x7fff000f /* everything is logged to procfs */ - -/**********************************/ -/* proc filesystem name constants */ -/**********************************/ -#define proc_subdir_name "hysdn" -#define proc_conf_basename "cardconf" -#define proc_log_basename "cardlog" - -/***********************************/ -/* pci 32 bit parms for io and mem */ -/***********************************/ -#define pci_reg_plx_mem_base 0 -#define pci_reg_plx_io_base 1 -#define pci_reg_memory_base 3 - -/**************/ -/* card types */ -/**************/ -#define bd_none 0u -#define bd_performance 1u -#define bd_value 2u -#define bd_pccard 3u -#define bd_ergo 4u -#define bd_metro 5u -#define bd_champ2 6u -#define bd_plexus 7u - -/******************************************************/ -/* defined states for cards shown by reading cardconf */ -/******************************************************/ -#define card_state_unused 0 /* never been used or booted */ -#define card_state_booting 1 /* booting is in progress */ -#define card_state_booterr 2 /* a previous boot was aborted */ -#define card_state_run 3 /* card is active */ - -/*******************************/ -/* defines for error_log_state */ -/*******************************/ -#define errlog_state_off 0 /* error log is switched off, nothing to do */ -#define errlog_state_on 1 /* error log is switched on, wait for data */ -#define errlog_state_start 2 /* start error logging */ -#define errlog_state_stop 3 /* stop error logging */ - -/*******************************/ -/* data structure for one card */ -/*******************************/ -typedef struct hysdn_card { - - /* general variables for the cards */ - int myid; /* own driver card id */ - unsigned char bus; /* pci bus the card is connected to */ - unsigned char devfn; /* slot+function bit encoded */ - unsigned short subsysid;/* pci subsystem id */ - unsigned char brdtype; /* type of card */ - unsigned int bchans; /* number of available b-channels */ - unsigned int faxchans; /* number of available fax-channels */ - unsigned char mac_addr[6];/* mac address read from card */ - unsigned int irq; /* interrupt number */ - unsigned int iobase; /* io-port base address */ - unsigned long plxbase; /* plx memory base */ - unsigned long membase; /* dpram memory base */ - unsigned long memend; /* dpram memory end */ - void *dpram; /* mapped dpram */ - int state; /* actual state of card -> card_state_** */ - struct hysdn_card *next; /* pointer to next card */ - - /* data areas for the /proc file system */ - void *proclog; /* pointer to proclog filesystem specific data */ - void *procconf; /* pointer to procconf filesystem specific data */ - - /* debugging and logging */ - unsigned char err_log_state;/* actual error log state of the card */ - unsigned long debug_flags;/* tells what should be debugged and where */ - void (*set_errlog_state) (struct hysdn_card *, int); - - /* interrupt handler + interrupt synchronisation */ - struct work_struct irq_queue; /* interrupt task queue */ - unsigned char volatile irq_enabled;/* interrupt enabled if != 0 */ - unsigned char volatile hw_lock;/* hardware is currently locked -> no access */ - - /* boot process */ - void *boot; /* pointer to boot private data */ - int (*writebootimg) (struct hysdn_card *, unsigned char *, unsigned long); - int (*writebootseq) (struct hysdn_card *, unsigned char *, int); - int (*waitpofready) (struct hysdn_card *); - int (*testram) (struct hysdn_card *); - - /* scheduler for data transfer (only async parts) */ - unsigned char async_data[256];/* async data to be sent (normally for config) */ - unsigned short volatile async_len;/* length of data to sent */ - unsigned short volatile async_channel;/* channel number for async transfer */ - int volatile async_busy; /* flag != 0 sending in progress */ - int volatile net_tx_busy; /* a network packet tx is in progress */ - - /* network interface */ - void *netif; /* pointer to network structure */ - - /* init and deinit stopcard for booting, too */ - void (*stopcard) (struct hysdn_card *); - void (*releasehardware) (struct hysdn_card *); - - spinlock_t hysdn_lock; -#ifdef config_hysdn_capi - struct hycapictrl_info { - char cardname[32]; - spinlock_t lock; - int versionlen; - char versionbuf[1024]; - char *version[hysdn_maxversion]; - - char infobuf[128]; /* for function procinfo */ - - struct hysdn_card *card; - struct capi_ctr capi_ctrl; - struct sk_buff *skbs[hysdn_max_capi_skb]; - int in_idx, out_idx; /* indexes to buffer ring */ - int sk_count; /* number of buffers currently in ring */ - struct sk_buff *tx_skb; /* buffer for tx operation */ - - struct list_head ncci_head; - } *hyctrlinfo; -#endif /* config_hysdn_capi */ -} hysdn_card; - -#ifdef config_hysdn_capi -typedef struct hycapictrl_info hycapictrl_info; -#endif /* config_hysdn_capi */ - - -/*****************/ -/* exported vars */ -/*****************/ -extern hysdn_card *card_root; /* pointer to first card */ - - - -/*************************/ -/* im/exported functions */ -/*************************/ - -/* hysdn_procconf.c */ -extern int hysdn_procconf_init(void); /* init proc config filesys */ -extern void hysdn_procconf_release(void); /* deinit proc config filesys */ - -/* hysdn_proclog.c */ -extern int hysdn_proclog_init(hysdn_card *); /* init proc log entry */ -extern void hysdn_proclog_release(hysdn_card *); /* deinit proc log entry */ -extern void hysdn_addlog(hysdn_card *, char *, ...); /* output data to log */ -extern void hysdn_card_errlog(hysdn_card *, terrlogentry *, int); /* output card log */ - -/* boardergo.c */ -extern int ergo_inithardware(hysdn_card *card); /* get hardware -> module init */ - -/* hysdn_boot.c */ -extern int pof_write_close(hysdn_card *); /* close proc file after writing pof */ -extern int pof_write_open(hysdn_card *, unsigned char **); /* open proc file for writing pof */ -extern int pof_write_buffer(hysdn_card *, int); /* write boot data to card */ -extern int evalsysrtokdata(hysdn_card *, unsigned char *, int); /* check sysready token data */ - -/* hysdn_sched.c */ -extern int hysdn_sched_tx(hysdn_card *, unsigned char *, - unsigned short volatile *, unsigned short volatile *, - unsigned short); -extern int hysdn_sched_rx(hysdn_card *, unsigned char *, unsigned short, - unsigned short); -extern int hysdn_tx_cfgline(hysdn_card *, unsigned char *, - unsigned short); /* send one cfg line */ - -/* hysdn_net.c */ -extern unsigned int hynet_enable; -extern int hysdn_net_create(hysdn_card *); /* create a new net device */ -extern int hysdn_net_release(hysdn_card *); /* delete the device */ -extern char *hysdn_net_getname(hysdn_card *); /* get name of net interface */ -extern void hysdn_tx_netack(hysdn_card *); /* acknowledge a packet tx */ -extern struct sk_buff *hysdn_tx_netget(hysdn_card *); /* get next network packet */ -extern void hysdn_rx_netpkt(hysdn_card *, unsigned char *, - unsigned short); /* rxed packet from network */ - -#ifdef config_hysdn_capi -extern unsigned int hycapi_enable; -extern int hycapi_capi_create(hysdn_card *); /* create a new capi device */ -extern int hycapi_capi_release(hysdn_card *); /* delete the device */ -extern int hycapi_capi_stop(hysdn_card *card); /* suspend */ -extern void hycapi_rx_capipkt(hysdn_card *card, unsigned char *buf, - unsigned short len); -extern void hycapi_tx_capiack(hysdn_card *card); -extern struct sk_buff *hycapi_tx_capiget(hysdn_card *card); -extern int hycapi_init(void); -extern void hycapi_cleanup(void); -#endif /* config_hysdn_capi */ - -#endif /* hysdn_defs_h */ diff --git a/drivers/staging/isdn/hysdn/hysdn_init.c b/drivers/staging/isdn/hysdn/hysdn_init.c --- a/drivers/staging/isdn/hysdn/hysdn_init.c +++ /dev/null -/* $id: hysdn_init.c,v 1.6.6.6 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards, init functions. - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/poll.h> -#include <linux/vmalloc.h> -#include <linux/slab.h> -#include <linux/pci.h> - -#include "hysdn_defs.h" - -static struct pci_device_id hysdn_pci_tbl[] = { - { pci_vendor_id_hypercope, pci_device_id_hypercope_plx, - pci_any_id, pci_subdevice_id_hypercope_metro, 0, 0, bd_metro }, - { pci_vendor_id_hypercope, pci_device_id_hypercope_plx, - pci_any_id, pci_subdevice_id_hypercope_champ2, 0, 0, bd_champ2 }, - { pci_vendor_id_hypercope, pci_device_id_hypercope_plx, - pci_any_id, pci_subdevice_id_hypercope_ergo, 0, 0, bd_ergo }, - { pci_vendor_id_hypercope, pci_device_id_hypercope_plx, - pci_any_id, pci_subdevice_id_hypercope_old_ergo, 0, 0, bd_ergo }, - - { } /* terminating entry */ -}; -module_device_table(pci, hysdn_pci_tbl); -module_description("isdn4linux: driver for hysdn cards"); -module_author("werner cornelius"); -module_license("gpl"); - -static int cardmax; /* number of found cards */ -hysdn_card *card_root = null; /* pointer to first card */ -static hysdn_card *card_last = null; /* pointer to first card */ - - -/****************************************************************************/ -/* the module startup and shutdown code. only compiled when used as module. */ -/* using the driver as module is always advisable, because the booting */ -/* image becomes smaller and the driver code is only loaded when needed. */ -/* additionally newer versions may be activated without rebooting. */ -/****************************************************************************/ - -/****************************************************************************/ -/* init_module is called once when the module is loaded to do all necessary */ -/* things like autodetect... */ -/* if the return value of this function is 0 the init has been successful */ -/* and the module is added to the list in /proc/modules, otherwise an error */ -/* is assumed and the module will not be kept in memory. */ -/****************************************************************************/ - -static int hysdn_pci_init_one(struct pci_dev *akt_pcidev, - const struct pci_device_id *ent) -{ - hysdn_card *card; - int rc; - - rc = pci_enable_device(akt_pcidev); - if (rc) - return rc; - - if (!(card = kzalloc(sizeof(hysdn_card), gfp_kernel))) { - printk(kern_err "hysdn: unable to alloc device mem "); - rc = -enomem; - goto err_out; - } - card->myid = cardmax; /* set own id */ - card->bus = akt_pcidev->bus->number; - card->devfn = akt_pcidev->devfn; /* slot + function */ - card->subsysid = akt_pcidev->subsystem_device; - card->irq = akt_pcidev->irq; - card->iobase = pci_resource_start(akt_pcidev, pci_reg_plx_io_base); - card->plxbase = pci_resource_start(akt_pcidev, pci_reg_plx_mem_base); - card->membase = pci_resource_start(akt_pcidev, pci_reg_memory_base); - card->brdtype = bd_none; /* unknown */ - card->debug_flags = def_deb_flags; /* set default debug */ - card->faxchans = 0; /* default no fax channels */ - card->bchans = 2; /* and 2 b-channels */ - card->brdtype = ent->driver_data; - - if (ergo_inithardware(card)) { - printk(kern_warning "hysdn: card at io 0x%04x already in use ", card->iobase); - rc = -ebusy; - goto err_out_card; - } - - cardmax++; - card->next = null; /*end of chain */ - if (card_last) - card_last->next = card; /* pointer to next card */ - else - card_root = card; - card_last = card; /* new chain end */ - - pci_set_drvdata(akt_pcidev, card); - return 0; - -err_out_card: - kfree(card); -err_out: - pci_disable_device(akt_pcidev); - return rc; -} - -static void hysdn_pci_remove_one(struct pci_dev *akt_pcidev) -{ - hysdn_card *card = pci_get_drvdata(akt_pcidev); - - pci_set_drvdata(akt_pcidev, null); - - if (card->stopcard) - card->stopcard(card); - -#ifdef config_hysdn_capi - hycapi_capi_release(card); -#endif - - if (card->releasehardware) - card->releasehardware(card); /* free all hardware resources */ - - if (card == card_root) { - card_root = card_root->next; - if (!card_root) - card_last = null; - } else { - hysdn_card *tmp = card_root; - while (tmp) { - if (tmp->next == card) - tmp->next = card->next; - card_last = tmp; - tmp = tmp->next; - } - } - - kfree(card); - pci_disable_device(akt_pcidev); -} - -static struct pci_driver hysdn_pci_driver = { - .name = "hysdn", - .id_table = hysdn_pci_tbl, - .probe = hysdn_pci_init_one, - .remove = hysdn_pci_remove_one, -}; - -static int hysdn_have_procfs; - -static int __init -hysdn_init(void) -{ - int rc; - - printk(kern_notice "hysdn: module loaded "); - - rc = pci_register_driver(&hysdn_pci_driver); - if (rc) - return rc; - - printk(kern_info "hysdn: %d card(s) found. ", cardmax); - - if (!hysdn_procconf_init()) - hysdn_have_procfs = 1; - -#ifdef config_hysdn_capi - if (cardmax > 0) { - if (hycapi_init()) { - printk(kern_err "hycapi: init failed "); - - if (hysdn_have_procfs) - hysdn_procconf_release(); - - pci_unregister_driver(&hysdn_pci_driver); - return -espipe; - } - } -#endif /* config_hysdn_capi */ - - return 0; /* no error */ -} /* init_module */ - - -/***********************************************************************/ -/* cleanup_module is called when the module is released by the kernel. */ -/* the routine is only called if init_module has been successful and */ -/* the module counter has a value of 0. otherwise this function will */ -/* not be called. this function must release all resources still allo- */ -/* cated as after the return from this function the module code will */ -/* be removed from memory. */ -/***********************************************************************/ -static void __exit -hysdn_exit(void) -{ - if (hysdn_have_procfs) - hysdn_procconf_release(); - - pci_unregister_driver(&hysdn_pci_driver); - -#ifdef config_hysdn_capi - hycapi_cleanup(); -#endif /* config_hysdn_capi */ - - printk(kern_notice "hysdn: module unloaded "); -} /* cleanup_module */ - -module_init(hysdn_init); -module_exit(hysdn_exit); diff --git a/drivers/staging/isdn/hysdn/hysdn_net.c b/drivers/staging/isdn/hysdn/hysdn_net.c --- a/drivers/staging/isdn/hysdn/hysdn_net.c +++ /dev/null -/* $id: hysdn_net.c,v 1.8.6.4 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards, net (ethernet type) handling routines. - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - * this net module has been inspired by the skeleton driver from - * donald becker (becker@cesdis.gsfc.nasa.gov) - * - */ - -#include <linux/module.h> -#include <linux/signal.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/inetdevice.h> - -#include "hysdn_defs.h" - -unsigned int hynet_enable = 0xffffffff; -module_param(hynet_enable, uint, 0); - -#define max_skb_buffers 20 /* number of buffers for keeping tx-data */ - -/****************************************************************************/ -/* structure containing the complete network data. the structure is aligned */ -/* in a way that both, the device and statistics are kept inside it. */ -/* for proper access, the device structure must be the first var/struct */ -/* inside the definition. */ -/****************************************************************************/ -struct net_local { - /* tx control lock. this protects the transmit buffer ring - * state along with the "tx full" state of the driver. this - * means all netif_queue flow control actions are protected - * by this lock as well. - */ - struct net_device *dev; - spinlock_t lock; - struct sk_buff *skbs[max_skb_buffers]; /* pointers to tx-skbs */ - int in_idx, out_idx; /* indexes to buffer ring */ - int sk_count; /* number of buffers currently in ring */ -}; /* net_local */ - - - -/*********************************************************************/ -/* open/initialize the board. this is called (in the current kernel) */ -/* sometime after booting when the 'ifconfig' program is run. */ -/* this routine should set everything up anew at each open, even */ -/* registers that "should" only need to be set once at boot, so that */ -/* there is non-reboot way to recover if something goes wrong. */ -/*********************************************************************/ -static int -net_open(struct net_device *dev) -{ - struct in_device *in_dev; - hysdn_card *card = dev->ml_priv; - int i; - - netif_start_queue(dev); /* start tx-queueing */ - - /* fill in the mac-level header (if not already set) */ - if (!card->mac_addr[0]) { - for (i = 0; i < eth_alen; i++) - dev->dev_addr[i] = 0xfc; - if ((in_dev = dev->ip_ptr) != null) { - const struct in_ifaddr *ifa; - - rcu_read_lock(); - ifa = rcu_dereference(in_dev->ifa_list); - if (ifa != null) - memcpy(dev->dev_addr + (eth_alen - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local)); - rcu_read_unlock(); - } - } else - memcpy(dev->dev_addr, card->mac_addr, eth_alen); - - return (0); -} /* net_open */ - -/*******************************************/ -/* flush the currently occupied tx-buffers */ -/* must only be called when device closed */ -/*******************************************/ -static void -flush_tx_buffers(struct net_local *nl) -{ - - while (nl->sk_count) { - dev_kfree_skb(nl->skbs[nl->out_idx++]); /* free skb */ - if (nl->out_idx >= max_skb_buffers) - nl->out_idx = 0; /* wrap around */ - nl->sk_count--; - } -} /* flush_tx_buffers */ - - -/*********************************************************************/ -/* close/decativate the device. the device is not removed, but only */ -/* deactivated. */ -/*********************************************************************/ -static int -net_close(struct net_device *dev) -{ - - netif_stop_queue(dev); /* disable queueing */ - - flush_tx_buffers((struct net_local *) dev); - - return (0); /* success */ -} /* net_close */ - -/************************************/ -/* send a packet on this interface. */ -/* new style for kernel >= 2.3.33 */ -/************************************/ -static netdev_tx_t -net_send_packet(struct sk_buff *skb, struct net_device *dev) -{ - struct net_local *lp = (struct net_local *) dev; - - spin_lock_irq(&lp->lock); - - lp->skbs[lp->in_idx++] = skb; /* add to buffer list */ - if (lp->in_idx >= max_skb_buffers) - lp->in_idx = 0; /* wrap around */ - lp->sk_count++; /* adjust counter */ - netif_trans_update(dev); - - /* if we just used up the very last entry in the - * tx ring on this device, tell the queueing - * layer to send no more. - */ - if (lp->sk_count >= max_skb_buffers) - netif_stop_queue(dev); - - /* when the tx completion hw interrupt arrives, this - * is when the transmit statistics are updated. - */ - - spin_unlock_irq(&lp->lock); - - if (lp->sk_count <= 3) { - schedule_work(&((hysdn_card *) dev->ml_priv)->irq_queue); - } - return netdev_tx_ok; /* success */ -} /* net_send_packet */ - - - -/***********************************************************************/ -/* acknowlegde a packet send. the network layer will be informed about */ -/* completion */ -/***********************************************************************/ -void -hysdn_tx_netack(hysdn_card *card) -{ - struct net_local *lp = card->netif; - - if (!lp) - return; /* non existing device */ - - - if (!lp->sk_count) - return; /* error condition */ - - lp->dev->stats.tx_packets++; - lp->dev->stats.tx_bytes += lp->skbs[lp->out_idx]->len; - - dev_kfree_skb(lp->skbs[lp->out_idx++]); /* free skb */ - if (lp->out_idx >= max_skb_buffers) - lp->out_idx = 0; /* wrap around */ - - if (lp->sk_count-- == max_skb_buffers) /* dec usage count */ - netif_start_queue((struct net_device *) lp); -} /* hysdn_tx_netack */ - -/*****************************************************/ -/* we got a packet from the network, go and queue it */ -/*****************************************************/ -void -hysdn_rx_netpkt(hysdn_card *card, unsigned char *buf, unsigned short len) -{ - struct net_local *lp = card->netif; - struct net_device *dev; - struct sk_buff *skb; - - if (!lp) - return; /* non existing device */ - - dev = lp->dev; - dev->stats.rx_bytes += len; - - skb = dev_alloc_skb(len); - if (skb == null) { - printk(kern_notice "%s: memory squeeze, dropping packet. ", - dev->name); - dev->stats.rx_dropped++; - return; - } - /* copy the data */ - skb_put_data(skb, buf, len); - - /* determine the used protocol */ - skb->protocol = eth_type_trans(skb, dev); - - dev->stats.rx_packets++; /* adjust packet count */ - - netif_rx(skb); -} /* hysdn_rx_netpkt */ - -/*****************************************************/ -/* return the pointer to a network packet to be send */ -/*****************************************************/ -struct sk_buff * -hysdn_tx_netget(hysdn_card *card) -{ - struct net_local *lp = card->netif; - - if (!lp) - return (null); /* non existing device */ - - if (!lp->sk_count) - return (null); /* nothing available */ - - return (lp->skbs[lp->out_idx]); /* next packet to send */ -} /* hysdn_tx_netget */ - -static const struct net_device_ops hysdn_netdev_ops = { - .ndo_open = net_open, - .ndo_stop = net_close, - .ndo_start_xmit = net_send_packet, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - - -/*****************************************************************************/ -/* hysdn_net_create creates a new net device for the given card. if a device */ -/* already exists, it will be deleted and created a new one. the return value */ -/* 0 announces success, else a negative error code will be returned. */ -/*****************************************************************************/ -int -hysdn_net_create(hysdn_card *card) -{ - struct net_device *dev; - int i; - struct net_local *lp; - - if (!card) { - printk(kern_warning "no card-pt in hysdn_net_create! "); - return (-enomem); - } - hysdn_net_release(card); /* release an existing net device */ - - dev = alloc_etherdev(sizeof(struct net_local)); - if (!dev) { - printk(kern_warning "hysdn: unable to allocate mem "); - return (-enomem); - } - - lp = netdev_priv(dev); - lp->dev = dev; - - dev->netdev_ops = &hysdn_netdev_ops; - spin_lock_init(&((struct net_local *) dev)->lock); - - /* initialise necessary or informing fields */ - dev->base_addr = card->iobase; /* io address */ - dev->irq = card->irq; /* irq */ - - dev->netdev_ops = &hysdn_netdev_ops; - if ((i = register_netdev(dev))) { - printk(kern_warning "hysdn: unable to create network device "); - free_netdev(dev); - return (i); - } - dev->ml_priv = card; /* remember pointer to own data structure */ - card->netif = dev; /* setup the local pointer */ - - if (card->debug_flags & log_net_init) - hysdn_addlog(card, "network device created"); - return 0; /* and return success */ -} /* hysdn_net_create */ - -/***************************************************************************/ -/* hysdn_net_release deletes the net device for the given card. the return */ -/* value 0 announces success, else a negative error code will be returned. */ -/***************************************************************************/ -int -hysdn_net_release(hysdn_card *card) -{ - struct net_device *dev = card->netif; - - if (!dev) - return (0); /* non existing */ - - card->netif = null; /* clear out pointer */ - net_close(dev); - - flush_tx_buffers((struct net_local *) dev); /* empty buffers */ - - unregister_netdev(dev); /* release the device */ - free_netdev(dev); /* release the memory allocated */ - if (card->debug_flags & log_net_init) - hysdn_addlog(card, "network device deleted"); - - return (0); /* always successful */ -} /* hysdn_net_release */ - -/*****************************************************************************/ -/* hysdn_net_getname returns a pointer to the name of the network interface. */ -/* if the interface is not existing, a "-" is returned. */ -/*****************************************************************************/ -char * -hysdn_net_getname(hysdn_card *card) -{ - struct net_device *dev = card->netif; - - if (!dev) - return ("-"); /* non existing */ - - return (dev->name); -} /* hysdn_net_getname */ diff --git a/drivers/staging/isdn/hysdn/hysdn_pof.h b/drivers/staging/isdn/hysdn/hysdn_pof.h --- a/drivers/staging/isdn/hysdn/hysdn_pof.h +++ /dev/null -/* $id: hysdn_pof.h,v 1.2.6.1 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards, definitions used for handling pof-files. - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -/************************/ -/* pof specific defines */ -/************************/ -#define boot_buf_size 0x1000 /* =4096, maybe moved to other h file */ -#define crypt_feedterm 0x8142 -#define crypt_startterm 0x81a5 -/* max. timeout time in seconds - * from end of booting to pof is ready - */ -#define pof_ready_time_out_sec 10 - -/**********************************/ -/* defines for 1.stage boot image */ -/**********************************/ - -/* the pof file record containing the boot loader image - * has 2 pages a 16kb: - * 1. page contains the high 16-bit part of the 32-bit e1 words - * 2. page contains the low 16-bit part of the 32-bit e1 words - * - * in each 16kb page we assume the start of the boot loader code - * in the highest 2kb part (at offset 0x3800); - * the rest (0x0000..0x37ff) is assumed to contain 0 bytes. - */ - -#define pof_boot_loader_page_size 0x4000 /* =16384u */ -#define pof_boot_loader_total_size (2u * pof_boot_loader_page_size) - -#define pof_boot_loader_code_size 0x0800 /* =2kb =2048u */ - -/* offset in boot page, where loader code may start */ -/* =0x3800= 14336u */ -#define pof_boot_loader_off_in_page (pof_boot_loader_page_size-pof_boot_loader_code_size) - - -/*--------------------------------------pof file record structs------------*/ -typedef struct poffilehdr_tag { /* pof file header */ - /*00 */ unsigned long magic __attribute__((packed)); - /*04 */ unsigned long n_pofrecs __attribute__((packed)); -/*08 */ -} tpoffilehdr; - -typedef struct pofrechdr_tag { /* pof record header */ - /*00 */ unsigned short pofrecid __attribute__((packed)); - /*02 */ unsigned long pofrecdatalen __attribute__((packed)); -/*06 */ -} tpofrechdr; - -typedef struct poftimestamp_tag { - /*00 */ unsigned long unixtime __attribute__((packed)); - /*04 */ unsigned char datetimetext[0x28]; - /* =40 */ -/*2c */ -} tpoftimestamp; - -/* tpoffilehdr.magic value: */ -#define tagfilemagic 0x464f501aul -/* tpofrechdr.pofrecid values: */ -#define tag_absdata 0x1000 /* abs. data */ -#define tag_bootdta 0x1001 /* boot data */ -#define tag_comment 0x0020 -#define tag_syscall 0x0021 -#define tag_flowctrl 0x0022 -#define tag_timestmp 0x0010 /* date/time stamp of version */ -#define tag_cabsdata 0x1100 /* crypted abs. data */ -#define tag_cbootdta 0x1101 /* crypted boot data */ diff --git a/drivers/staging/isdn/hysdn/hysdn_procconf.c b/drivers/staging/isdn/hysdn/hysdn_procconf.c --- a/drivers/staging/isdn/hysdn/hysdn_procconf.c +++ /dev/null -/* $id: hysdn_procconf.c,v 1.8.6.4 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards, /proc/net filesystem dir and conf functions. - * - * written by werner cornelius (werner@titro.de) for hypercope gmbh - * - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/cred.h> -#include <linux/module.h> -#include <linux/poll.h> -#include <linux/proc_fs.h> -#include <linux/pci.h> -#include <linux/slab.h> -#include <linux/mutex.h> -#include <net/net_namespace.h> - -#include "hysdn_defs.h" - -static define_mutex(hysdn_conf_mutex); - -#define info_out_len 80 /* length of info line including lf */ - -/********************************************************/ -/* defines and data structure for conf write operations */ -/********************************************************/ -#define conf_state_detect 0 /* waiting for detect */ -#define conf_state_conf 1 /* writing config data */ -#define conf_state_pof 2 /* writing pof data */ -#define conf_line_len 255 /* 255 chars max */ - -struct conf_writedata { - hysdn_card *card; /* card the device is connected to */ - int buf_size; /* actual number of bytes in the buffer */ - int needed_size; /* needed size when reading pof */ - int state; /* actual interface states from above constants */ - unsigned char conf_line[conf_line_len]; /* buffered conf line */ - unsigned short channel; /* active channel number */ - unsigned char *pof_buffer; /* buffer when writing pof */ -}; - -/***********************************************************************/ -/* process_line parses one config line and transfers it to the card if */ -/* necessary. */ -/* if the return value is negative an error occurred. */ -/***********************************************************************/ -static int -process_line(struct conf_writedata *cnf) -{ - unsigned char *cp = cnf->conf_line; - int i; - - if (cnf->card->debug_flags & log_cnf_line) - hysdn_addlog(cnf->card, "conf line: %s", cp); - - if (*cp == '-') { /* option */ - cp++; /* point to option char */ - - if (*cp++ != 'c') - return (0); /* option unknown or used */ - i = 0; /* start value for channel */ - while ((*cp <= '9') && (*cp >= '0')) - i = i * 10 + *cp++ - '0'; /* get decimal number */ - if (i > 65535) { - if (cnf->card->debug_flags & log_cnf_misc) - hysdn_addlog(cnf->card, "conf channel invalid %d", i); - return (-err_inv_chan); /* invalid channel */ - } - cnf->channel = i & 0xffff; /* set new channel number */ - return (0); /* success */ - } /* option */ - if (*cp == '*') { /* line to send */ - if (cnf->card->debug_flags & log_cnf_data) - hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp); - return (hysdn_tx_cfgline(cnf->card, cnf->conf_line + 1, - cnf->channel)); /* send the line without * */ - } /* line to send */ - return (0); -} /* process_line */ - -/***********************************/ -/* conf file operations and tables */ -/***********************************/ - -/****************************************************/ -/* write conf file -> boot or send cfg line to card */ -/****************************************************/ -static ssize_t -hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t *off) -{ - struct conf_writedata *cnf; - int i; - unsigned char ch, *cp; - - if (!count) - return (0); /* nothing to handle */ - - if (!(cnf = file->private_data)) - return (-efault); /* should never happen */ - - if (cnf->state == conf_state_detect) { /* auto detect cnf or pof data */ - if (copy_from_user(&ch, buf, 1)) /* get first char for detect */ - return (-efault); - - if (ch == 0x1a) { - /* we detected a pof file */ - if ((cnf->needed_size = pof_write_open(cnf->card, &cnf->pof_buffer)) <= 0) - return (cnf->needed_size); /* an error occurred -> exit */ - cnf->buf_size = 0; /* buffer is empty */ - cnf->state = conf_state_pof; /* new state */ - } else { - /* conf data has been detected */ - cnf->buf_size = 0; /* buffer is empty */ - cnf->state = conf_state_conf; /* requested conf data write */ - if (cnf->card->state != card_state_run) - return (-err_not_booted); - cnf->conf_line[conf_line_len - 1] = 0; /* limit string length */ - cnf->channel = 4098; /* default channel for output */ - } - } /* state was auto detect */ - if (cnf->state == conf_state_pof) { /* pof write active */ - i = cnf->needed_size - cnf->buf_size; /* bytes still missing for write */ - if (i <= 0) - return (-einval); /* size error handling pof */ - - if (i < count) - count = i; /* limit requested number of bytes */ - if (copy_from_user(cnf->pof_buffer + cnf->buf_size, buf, count)) - return (-efault); /* error while copying */ - cnf->buf_size += count; - - if (cnf->needed_size == cnf->buf_size) { - cnf->needed_size = pof_write_buffer(cnf->card, cnf->buf_size); /* write data */ - if (cnf->needed_size <= 0) { - cnf->card->state = card_state_booterr; /* show boot error */ - return (cnf->needed_size); /* an error occurred */ - } - cnf->buf_size = 0; /* buffer is empty again */ - } - } - /* pof write active */ - else { /* conf write active */ - - if (cnf->card->state != card_state_run) { - if (cnf->card->debug_flags & log_cnf_misc) - hysdn_addlog(cnf->card, "cnf write denied -> not booted"); - return (-err_not_booted); - } - i = (conf_line_len - 1) - cnf->buf_size; /* bytes available in buffer */ - if (i > 0) { - /* copy remaining bytes into buffer */ - - if (count > i) - count = i; /* limit transfer */ - if (copy_from_user(cnf->conf_line + cnf->buf_size, buf, count)) - return (-efault); /* error while copying */ - - i = count; /* number of chars in buffer */ - cp = cnf->conf_line + cnf->buf_size; - while (i) { - /* search for end of line */ - if ((*cp < ' ') && (*cp != 9)) - break; /* end of line found */ - cp++; - i--; - } /* search for end of line */ - - if (i) { - /* delimiter found */ - *cp++ = 0; /* string termination */ - count -= (i - 1); /* subtract remaining bytes from count */ - while ((i) && (*cp < ' ') && (*cp != 9)) { - i--; /* discard next char */ - count++; /* mark as read */ - cp++; /* next char */ - } - cnf->buf_size = 0; /* buffer is empty after transfer */ - if ((i = process_line(cnf)) < 0) /* handle the line */ - count = i; /* return the error */ - } - /* delimiter found */ - else { - cnf->buf_size += count; /* add chars to string */ - if (cnf->buf_size >= conf_line_len - 1) { - if (cnf->card->debug_flags & log_cnf_misc) - hysdn_addlog(cnf->card, "cnf line too long %d chars pos %d", cnf->buf_size, count); - return (-err_conf_long); - } - } /* not delimited */ - - } - /* copy remaining bytes into buffer */ - else { - if (cnf->card->debug_flags & log_cnf_misc) - hysdn_addlog(cnf->card, "cnf line too long"); - return (-err_conf_long); - } - } /* conf write active */ - - return (count); -} /* hysdn_conf_write */ - -/*******************************************/ -/* read conf file -> output card info data */ -/*******************************************/ -static ssize_t -hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off) -{ - char *cp; - - if (!(file->f_mode & fmode_read)) - return -eperm; /* no permission to read */ - - if (!(cp = file->private_data)) - return -efault; /* should never happen */ - - return simple_read_from_buffer(buf, count, off, cp, strlen(cp)); -} /* hysdn_conf_read */ - -/******************/ -/* open conf file */ -/******************/ -static int -hysdn_conf_open(struct inode *ino, struct file *filep) -{ - hysdn_card *card; - struct conf_writedata *cnf; - char *cp, *tmp; - - /* now search the addressed card */ - mutex_lock(&hysdn_conf_mutex); - card = pde_data(ino); - if (card->debug_flags & (log_proc_open | log_proc_all)) - hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x", - filep->f_cred->fsuid, filep->f_cred->fsgid, - filep->f_mode); - - if ((filep->f_mode & (fmode_read | fmode_write)) == fmode_write) { - /* write only access -> write boot file or conf line */ - - if (!(cnf = kmalloc(sizeof(struct conf_writedata), gfp_kernel))) { - mutex_unlock(&hysdn_conf_mutex); - return (-efault); - } - cnf->card = card; - cnf->buf_size = 0; /* nothing buffered */ - cnf->state = conf_state_detect; /* start auto detect */ - filep->private_data = cnf; - - } else if ((filep->f_mode & (fmode_read | fmode_write)) == fmode_read) { - /* read access -> output card info data */ - - if (!(tmp = kmalloc(info_out_len * 2 + 2, gfp_kernel))) { - mutex_unlock(&hysdn_conf_mutex); - return (-efault); /* out of memory */ - } - filep->private_data = tmp; /* start of string */ - - /* first output a headline */ - sprintf(tmp, "id bus slot type irq iobase dp-mem b-chans fax-chans state device"); - cp = tmp; /* start of string */ - while (*cp) - cp++; - while (((cp - tmp) % (info_out_len + 1)) != info_out_len) - *cp++ = ' '; - *cp++ = ' '; - - /* and now the data */ - sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d %s", - card->myid, - card->bus, - pci_slot(card->devfn), - card->brdtype, - card->irq, - card->iobase, - card->membase, - card->bchans, - card->faxchans, - card->state, - hysdn_net_getname(card)); - while (*cp) - cp++; - while (((cp - tmp) % (info_out_len + 1)) != info_out_len) - *cp++ = ' '; - *cp++ = ' '; - *cp = 0; /* end of string */ - } else { /* simultaneous read/write access forbidden ! */ - mutex_unlock(&hysdn_conf_mutex); - return (-eperm); /* no permission this time */ - } - mutex_unlock(&hysdn_conf_mutex); - return nonseekable_open(ino, filep); -} /* hysdn_conf_open */ - -/***************************/ -/* close a config file. */ -/***************************/ -static int -hysdn_conf_close(struct inode *ino, struct file *filep) -{ - hysdn_card *card; - struct conf_writedata *cnf; - int retval = 0; - - mutex_lock(&hysdn_conf_mutex); - card = pde_data(ino); - if (card->debug_flags & (log_proc_open | log_proc_all)) - hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x", - filep->f_cred->fsuid, filep->f_cred->fsgid, - filep->f_mode); - - if ((filep->f_mode & (fmode_read | fmode_write)) == fmode_write) { - /* write only access -> write boot file or conf line */ - if (filep->private_data) { - cnf = filep->private_data; - - if (cnf->state == conf_state_pof) - retval = pof_write_close(cnf->card); /* close the pof write */ - kfree(filep->private_data); /* free allocated memory for buffer */ - - } /* handle write private data */ - } else if ((filep->f_mode & (fmode_read | fmode_write)) == fmode_read) { - /* read access -> output card info data */ - - kfree(filep->private_data); /* release memory */ - } - mutex_unlock(&hysdn_conf_mutex); - return (retval); -} /* hysdn_conf_close */ - -/******************************************************/ -/* table for conf filesystem functions defined above. */ -/******************************************************/ -static const struct file_operations conf_fops = -{ - .owner = this_module, - .llseek = no_llseek, - .read = hysdn_conf_read, - .write = hysdn_conf_write, - .open = hysdn_conf_open, - .release = hysdn_conf_close, -}; - -/*****************************/ -/* hysdn subdir in /proc/net */ -/*****************************/ -struct proc_dir_entry *hysdn_proc_entry = null; - -/*******************************************************************************/ -/* hysdn_procconf_init is called when the module is loaded and after the cards */ -/* have been detected. the needed proc dir and card config files are created. */ -/* the log init is called at last. */ -/*******************************************************************************/ -int -hysdn_procconf_init(void) -{ - hysdn_card *card; - unsigned char conf_name[20]; - - hysdn_proc_entry = proc_mkdir(proc_subdir_name, init_net.proc_net); - if (!hysdn_proc_entry) { - printk(kern_err "hysdn: unable to create hysdn subdir "); - return (-1); - } - card = card_root; /* point to first card */ - while (card) { - - sprintf(conf_name, "%s%d", proc_conf_basename, card->myid); - if ((card->procconf = (void *) proc_create_data(conf_name, - s_ifreg | s_irugo | s_iwusr, - hysdn_proc_entry, - &conf_fops, - card)) != null) { - hysdn_proclog_init(card); /* init the log file entry */ - } - card = card->next; /* next entry */ - } - - printk(kern_notice "hysdn: procfs initialised "); - return 0; -} /* hysdn_procconf_init */ - -/*************************************************************************************/ -/* hysdn_procconf_release is called when the module is unloaded and before the cards */ -/* resources are released. the module counter is assumed to be 0 ! */ -/*************************************************************************************/ -void -hysdn_procconf_release(void) -{ - hysdn_card *card; - unsigned char conf_name[20]; - - card = card_root; /* start with first card */ - while (card) { - - sprintf(conf_name, "%s%d", proc_conf_basename, card->myid); - if (card->procconf) - remove_proc_entry(conf_name, hysdn_proc_entry); - - hysdn_proclog_release(card); /* init the log file entry */ - - card = card->next; /* point to next card */ - } - - remove_proc_entry(proc_subdir_name, init_net.proc_net); -} diff --git a/drivers/staging/isdn/hysdn/hysdn_proclog.c b/drivers/staging/isdn/hysdn/hysdn_proclog.c --- a/drivers/staging/isdn/hysdn/hysdn_proclog.c +++ /dev/null -/* $id: hysdn_proclog.c,v 1.9.6.3 2001/09/23 22:24:54 kai exp $ - * - * linux driver for hysdn cards, /proc/net filesystem log functions. - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/module.h> -#include <linux/poll.h> -#include <linux/proc_fs.h> -#include <linux/sched.h> -#include <linux/slab.h> -#include <linux/mutex.h> -#include <linux/kernel.h> - -#include "hysdn_defs.h" - -/* the proc subdir for the interface is defined in the procconf module */ -extern struct proc_dir_entry *hysdn_proc_entry; - -static define_mutex(hysdn_log_mutex); -static void put_log_buffer(hysdn_card *card, char *cp); - -/*************************************************/ -/* structure keeping ascii log for device output */ -/*************************************************/ -struct log_data { - struct log_data *next; - unsigned long usage_cnt;/* number of files still to work */ - void *proc_ctrl; /* pointer to own control procdata structure */ - char log_start[2]; /* log string start (final len aligned by size) */ -}; - -/**********************************************/ -/* structure holding proc entrys for one card */ -/**********************************************/ -struct procdata { - struct proc_dir_entry *log; /* log entry */ - char log_name[15]; /* log filename */ - struct log_data *log_head, *log_tail; /* head and tail for queue */ - int if_used; /* open count for interface */ - unsigned char logtmp[log_max_linelen]; - wait_queue_head_t rd_queue; -}; - - -/**********************************************/ -/* log function for cards error log interface */ -/**********************************************/ -void -hysdn_card_errlog(hysdn_card *card, terrlogentry *logp, int maxsize) -{ - char buf[errlog_text_size + 40]; - - sprintf(buf, "log 0x%08lx 0x%08lx : %s ", logp->ulerrtype, logp->ulerrsubtype, logp->uctext); - put_log_buffer(card, buf); /* output the string */ -} /* hysdn_card_errlog */ - -/***************************************************/ -/* log function using format specifiers for output */ -/***************************************************/ -void -hysdn_addlog(hysdn_card *card, char *fmt, ...) -{ - struct procdata *pd = card->proclog; - char *cp; - va_list args; - - if (!pd) - return; /* log structure non existent */ - - cp = pd->logtmp; - cp += sprintf(cp, "hysdn: card %d ", card->myid); - - va_start(args, fmt); - cp += vsprintf(cp, fmt, args); - va_end(args); - *cp++ = ' '; - *cp = 0; - - if (card->debug_flags & deb_out_syslog) - printk(kern_info "%s", pd->logtmp); - else - put_log_buffer(card, pd->logtmp); - -} /* hysdn_addlog */ - -/********************************************/ -/* put an log buffer into the log queue. */ -/* this buffer will be kept until all files */ -/* opened for read got the contents. */ -/* flushes buffers not longer in use. */ -/********************************************/ -static void -put_log_buffer(hysdn_card *card, char *cp) -{ - struct log_data *ib; - struct procdata *pd = card->proclog; - unsigned long flags; - - if (!pd) - return; - if (!cp) - return; - if (!*cp) - return; - if (pd->if_used <= 0) - return; /* no open file for read */ - - if (!(ib = kmalloc(sizeof(struct log_data) + strlen(cp), gfp_atomic))) - return; /* no memory */ - strcpy(ib->log_start, cp); /* set output string */ - ib->next = null; - ib->proc_ctrl = pd; /* point to own control structure */ - spin_lock_irqsave(&card->hysdn_lock, flags); - ib->usage_cnt = pd->if_used; - if (!pd->log_head) - pd->log_head = ib; /* new head */ - else - pd->log_tail->next = ib; /* follows existing messages */ - pd->log_tail = ib; /* new tail */ - - /* delete old entrys */ - while (pd->log_head->next) { - if ((pd->log_head->usage_cnt <= 0) && - (pd->log_head->next->usage_cnt <= 0)) { - ib = pd->log_head; - pd->log_head = pd->log_head->next; - kfree(ib); - } else { - break; - } - } /* pd->log_head->next */ - - spin_unlock_irqrestore(&card->hysdn_lock, flags); - - wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ -} /* put_log_buffer */ - - -/******************************/ -/* file operations and tables */ -/******************************/ - -/****************************************/ -/* write log file -> set log level bits */ -/****************************************/ -static ssize_t -hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off) -{ - int rc; - hysdn_card *card = file->private_data; - - rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags); - if (rc < 0) - return rc; - hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags); - return (count); -} /* hysdn_log_write */ - -/******************/ -/* read log file */ -/******************/ -static ssize_t -hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off) -{ - struct log_data *inf; - int len; - hysdn_card *card = pde_data(file_inode(file)); - - if (!(inf = *((struct log_data **) file->private_data))) { - struct procdata *pd = card->proclog; - if (file->f_flags & o_nonblock) - return (-eagain); - - wait_event_interruptible(pd->rd_queue, (inf = - *((struct log_data **) file->private_data))); - } - if (!inf) - return (0); - - inf->usage_cnt--; /* new usage count */ - file->private_data = &inf->next; /* next structure */ - if ((len = strlen(inf->log_start)) <= count) { - if (copy_to_user(buf, inf->log_start, len)) - return -efault; - *off += len; - return (len); - } - return (0); -} /* hysdn_log_read */ - -/******************/ -/* open log file */ -/******************/ -static int -hysdn_log_open(struct inode *ino, struct file *filep) -{ - hysdn_card *card = pde_data(ino); - - mutex_lock(&hysdn_log_mutex); - if ((filep->f_mode & (fmode_read | fmode_write)) == fmode_write) { - /* write only access -> write log level only */ - filep->private_data = card; /* remember our own card */ - } else if ((filep->f_mode & (fmode_read | fmode_write)) == fmode_read) { - struct procdata *pd = card->proclog; - unsigned long flags; - - /* read access -> log/debug read */ - spin_lock_irqsave(&card->hysdn_lock, flags); - pd->if_used++; - if (pd->log_head) - filep->private_data = &pd->log_tail->next; - else - filep->private_data = &pd->log_head; - spin_unlock_irqrestore(&card->hysdn_lock, flags); - } else { /* simultaneous read/write access forbidden ! */ - mutex_unlock(&hysdn_log_mutex); - return (-eperm); /* no permission this time */ - } - mutex_unlock(&hysdn_log_mutex); - return nonseekable_open(ino, filep); -} /* hysdn_log_open */ - -/*******************************************************************************/ -/* close a cardlog file. if the file has been opened for exclusive write it is */ -/* assumed as pof data input and the pof loader is noticed about. */ -/* otherwise file is handled as log output. in this case the interface usage */ -/* count is decremented and all buffers are noticed of closing. if this file */ -/* was the last one to be closed, all buffers are freed. */ -/*******************************************************************************/ -static int -hysdn_log_close(struct inode *ino, struct file *filep) -{ - struct log_data *inf; - struct procdata *pd; - hysdn_card *card; - int retval = 0; - - mutex_lock(&hysdn_log_mutex); - if ((filep->f_mode & (fmode_read | fmode_write)) == fmode_write) { - /* write only access -> write debug level written */ - retval = 0; /* success */ - } else { - /* read access -> log/debug read, mark one further file as closed */ - - inf = *((struct log_data **) filep->private_data); /* get first log entry */ - if (inf) - pd = (struct procdata *) inf->proc_ctrl; /* still entries there */ - else { - /* no info available -> search card */ - card = pde_data(file_inode(filep)); - pd = card->proclog; /* pointer to procfs log */ - } - if (pd) - pd->if_used--; /* decrement interface usage count by one */ - - while (inf) { - inf->usage_cnt--; /* decrement usage count for buffers */ - inf = inf->next; - } - - if (pd) - if (pd->if_used <= 0) /* delete buffers if last file closed */ - while (pd->log_head) { - inf = pd->log_head; - pd->log_head = pd->log_head->next; - kfree(inf); - } - } /* read access */ - mutex_unlock(&hysdn_log_mutex); - - return (retval); -} /* hysdn_log_close */ - -/*************************************************/ -/* select/poll routine to be able using select() */ -/*************************************************/ -static __poll_t -hysdn_log_poll(struct file *file, poll_table *wait) -{ - __poll_t mask = 0; - hysdn_card *card = pde_data(file_inode(file)); - struct procdata *pd = card->proclog; - - if ((file->f_mode & (fmode_read | fmode_write)) == fmode_write) - return (mask); /* no polling for write supported */ - - poll_wait(file, &(pd->rd_queue), wait); - - if (*((struct log_data **) file->private_data)) - mask |= epollin | epollrdnorm; - - return mask; -} /* hysdn_log_poll */ - -/**************************************************/ -/* table for log filesystem functions defined above. */ -/**************************************************/ -static const struct file_operations log_fops = -{ - .owner = this_module, - .llseek = no_llseek, - .read = hysdn_log_read, - .write = hysdn_log_write, - .poll = hysdn_log_poll, - .open = hysdn_log_open, - .release = hysdn_log_close, -}; - - -/***********************************************************************************/ -/* hysdn_proclog_init is called when the module is loaded after creating the cards */ -/* conf files. */ -/***********************************************************************************/ -int -hysdn_proclog_init(hysdn_card *card) -{ - struct procdata *pd; - - /* create a cardlog proc entry */ - - if ((pd = kzalloc(sizeof(struct procdata), gfp_kernel)) != null) { - sprintf(pd->log_name, "%s%d", proc_log_basename, card->myid); - pd->log = proc_create_data(pd->log_name, - s_ifreg | s_irugo | s_iwusr, hysdn_proc_entry, - &log_fops, card); - - init_waitqueue_head(&(pd->rd_queue)); - - card->proclog = (void *) pd; /* remember procfs structure */ - } - return (0); -} /* hysdn_proclog_init */ - -/************************************************************************************/ -/* hysdn_proclog_release is called when the module is unloaded and before the cards */ -/* conf file is released */ -/* the module counter is assumed to be 0 ! */ -/************************************************************************************/ -void -hysdn_proclog_release(hysdn_card *card) -{ - struct procdata *pd; - - if ((pd = (struct procdata *) card->proclog) != null) { - if (pd->log) - remove_proc_entry(pd->log_name, hysdn_proc_entry); - kfree(pd); /* release memory */ - card->proclog = null; - } -} /* hysdn_proclog_release */ diff --git a/drivers/staging/isdn/hysdn/hysdn_sched.c b/drivers/staging/isdn/hysdn/hysdn_sched.c --- a/drivers/staging/isdn/hysdn/hysdn_sched.c +++ /dev/null -/* $id: hysdn_sched.c,v 1.5.6.4 2001/11/06 21:58:19 kai exp $ - * - * linux driver for hysdn cards - * scheduler routines for handling exchange card <-> pc. - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#include <linux/signal.h> -#include <linux/kernel.h> -#include <linux/ioport.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <asm/io.h> - -#include "hysdn_defs.h" - -/*****************************************************************************/ -/* hysdn_sched_rx is called from the cards handler to announce new data is */ -/* available from the card. the routine has to handle the data and return */ -/* with a nonzero code if the data could be worked (or even thrown away), if */ -/* no room to buffer the data is available a zero return tells the card */ -/* to keep the data until later. */ -/*****************************************************************************/ -int -hysdn_sched_rx(hysdn_card *card, unsigned char *buf, unsigned short len, - unsigned short chan) -{ - - switch (chan) { - case chan_ndis_data: - if (hynet_enable & (1 << card->myid)) { - /* give packet to network handler */ - hysdn_rx_netpkt(card, buf, len); - } - break; - - case chan_errlog: - hysdn_card_errlog(card, (terrlogentry *) buf, len); - if (card->err_log_state == errlog_state_on) - card->err_log_state = errlog_state_start; /* start new fetch */ - break; -#ifdef config_hysdn_capi - case chan_capi: -/* give packet to capi handler */ - if (hycapi_enable & (1 << card->myid)) { - hycapi_rx_capipkt(card, buf, len); - } - break; -#endif /* config_hysdn_capi */ - default: - printk(kern_info "irq message channel %d len %d unhandled ", chan, len); - break; - - } /* switch rx channel */ - - return (1); /* always handled */ -} /* hysdn_sched_rx */ - -/*****************************************************************************/ -/* hysdn_sched_tx is called from the cards handler to announce that there is */ -/* room in the tx-buffer to the card and data may be sent if needed. */ -/* if the routine wants to send data it must fill buf, len and chan with the */ -/* appropriate data and return a nonzero value. with a zero return no new */ -/* data to send is assumed. maxlen specifies the buffer size available for */ -/* sending. */ -/*****************************************************************************/ -int -hysdn_sched_tx(hysdn_card *card, unsigned char *buf, - unsigned short volatile *len, unsigned short volatile *chan, - unsigned short maxlen) -{ - struct sk_buff *skb; - - if (card->net_tx_busy) { - card->net_tx_busy = 0; /* reset flag */ - hysdn_tx_netack(card); /* acknowledge packet send */ - } /* a network packet has completely been transferred */ - /* first of all async requests are handled */ - if (card->async_busy) { - if (card->async_len <= maxlen) { - memcpy(buf, card->async_data, card->async_len); - *len = card->async_len; - *chan = card->async_channel; - card->async_busy = 0; /* reset request */ - return (1); - } - card->async_busy = 0; /* in case of length error */ - } /* async request */ - if ((card->err_log_state == errlog_state_start) && - (maxlen >= errlog_cmd_req_size)) { - strcpy(buf, errlog_cmd_req); /* copy the command */ - *len = errlog_cmd_req_size; /* buffer length */ - *chan = chan_errlog; /* and channel */ - card->err_log_state = errlog_state_on; /* new state is on */ - return (1); /* tell that data should be send */ - } /* error log start and able to send */ - if ((card->err_log_state == errlog_state_stop) && - (maxlen >= errlog_cmd_stop_size)) { - strcpy(buf, errlog_cmd_stop); /* copy the command */ - *len = errlog_cmd_stop_size; /* buffer length */ - *chan = chan_errlog; /* and channel */ - card->err_log_state = errlog_state_off; /* new state is off */ - return (1); /* tell that data should be send */ - } /* error log start and able to send */ - /* now handle network interface packets */ - if ((hynet_enable & (1 << card->myid)) && - (skb = hysdn_tx_netget(card)) != null) - { - if (skb->len <= maxlen) { - /* copy the packet to the buffer */ - skb_copy_from_linear_data(skb, buf, skb->len); - *len = skb->len; - *chan = chan_ndis_data; - card->net_tx_busy = 1; /* we are busy sending network data */ - return (1); /* go and send the data */ - } else - hysdn_tx_netack(card); /* aknowledge packet -> throw away */ - } /* send a network packet if available */ -#ifdef config_hysdn_capi - if (((hycapi_enable & (1 << card->myid))) && - ((skb = hycapi_tx_capiget(card)) != null)) - { - if (skb->len <= maxlen) { - skb_copy_from_linear_data(skb, buf, skb->len); - *len = skb->len; - *chan = chan_capi; - hycapi_tx_capiack(card); - return (1); /* go and send the data */ - } - } -#endif /* config_hysdn_capi */ - return (0); /* nothing to send */ -} /* hysdn_sched_tx */ - - -/*****************************************************************************/ -/* send one config line to the card and return 0 if successful, otherwise a */ -/* negative error code. */ -/* the function works with timeouts perhaps not giving the greatest speed */ -/* sending the line, but this should be meaningless because only some lines */ -/* are to be sent and this happens very seldom. */ -/*****************************************************************************/ -int -hysdn_tx_cfgline(hysdn_card *card, unsigned char *line, unsigned short chan) -{ - int cnt = 50; /* timeout intervalls */ - unsigned long flags; - - if (card->debug_flags & log_sched_asyn) - hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1); - - while (card->async_busy) { - - if (card->debug_flags & log_sched_asyn) - hysdn_addlog(card, "async tx-cfg delayed"); - - msleep_interruptible(20); /* timeout 20ms */ - if (!--cnt) - return (-err_async_time); /* timed out */ - } /* wait for buffer to become free */ - - spin_lock_irqsave(&card->hysdn_lock, flags); - strcpy(card->async_data, line); - card->async_len = strlen(line) + 1; - card->async_channel = chan; - card->async_busy = 1; /* request transfer */ - - /* now queue the task */ - schedule_work(&card->irq_queue); - spin_unlock_irqrestore(&card->hysdn_lock, flags); - - if (card->debug_flags & log_sched_asyn) - hysdn_addlog(card, "async tx-cfg data queued"); - - cnt++; /* short delay */ - - while (card->async_busy) { - - if (card->debug_flags & log_sched_asyn) - hysdn_addlog(card, "async tx-cfg waiting for tx-ready"); - - msleep_interruptible(20); /* timeout 20ms */ - if (!--cnt) - return (-err_async_time); /* timed out */ - } /* wait for buffer to become free again */ - - if (card->debug_flags & log_sched_asyn) - hysdn_addlog(card, "async tx-cfg data send"); - - return (0); /* line send correctly */ -} /* hysdn_tx_cfgline */ diff --git a/drivers/staging/isdn/hysdn/ince1pc.h b/drivers/staging/isdn/hysdn/ince1pc.h --- a/drivers/staging/isdn/hysdn/ince1pc.h +++ /dev/null -/* - * linux driver for hysdn cards - * common definitions for both sides of the bus: - * - conventions both spoolers must know - * - channel numbers agreed upon - * - * author m. steinkopf - * copyright 1999 by m. steinkopf - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#ifndef __ince1pc_h__ -#define __ince1pc_h__ - -/* basic scalar definitions have same meanning, - * but their declaration location depends on environment - */ - -/*--------------------------------------channel numbers---------------------*/ -#define chan_system 0x0001 /* system channel (spooler to spooler) */ -#define chan_errlog 0x0005 /* error logger */ -#define chan_capi 0x0064 /* capi interface */ -#define chan_ndis_data 0x1001 /* ndis data transfer */ - -/*--------------------------------------pof ready msg-----------------------*/ -/* note: after booting pof sends system ready message to pc: */ -#define rdy_magic 0x52535953ul /* 'sysr' reversed */ -#define rdy_magic_size 4 /* size in bytes */ - -#define max_n_tok_bytes 255 - -#define min_rdy_msg_size rdy_magic_size -#define max_rdy_msg_size (rdy_magic_size + max_n_tok_bytes) - -#define sysr_tok_end 0 -#define sysr_tok_b_chan 1 /* nr. of b-channels; datalen=1; def: 2 */ -#define sysr_tok_fax_chan 2 /* nr. of fax channels; datalen=1; def: 0 */ -#define sysr_tok_mac_addr 3 /* mac-address; datalen=6; def: auto */ -#define sysr_tok_esc 255 /* undefined data size yet */ -/* default values, if not corrected by token: */ -#define sysr_tok_b_chan_def 2 /* assume 2 b-channels */ -#define sysr_tok_fax_chan_def 1 /* assume 1 fax channel */ - -/* syntax of new sysr token stream: - * channel: chan_system - * msgsize: min_rdy_msg_size <= x <= max_rdy_msg_size - * rdy_magic_size <= x <= (rdy_magic_size+max_n_tok_bytes) - * msg : 0 1 2 3 {4 5 6 ..} - * s y s r max_n_tok_bytes bytes of tokenstream - * - * tokenstream := empty - * | {nonendtokenchunk} endtoken rotlcrc - * nonendtokenchunk:= nonendtokenid datalen [data] - * nonendtokenid := 0x01 .. 0xfe 1 byte - * datalen := 0x00 .. 0xff 1 byte - * data := datalen bytes - * endtoken := 0x00 - * rotlcrc := special 1 byte crc over all nonendtokenchunk bytes - * s. rotlcrc algorithm - * - * rotlcrc algorithm: - * ucsum= 0 1 unsigned char - * for all nonendtokenchunk bytes: - * rotl(ucsum,1) rotate left by 1 - * ucsum += char; add current byte with swap around - * rotlcrc= ~ucsum; invert all bits for result - * - * note: - * - for 16-bit fifo add padding 0 byte to achieve even token data bytes! - */ - -/*--------------------------------------error logger------------------------*/ -/* note: pof needs final 0 ! */ -#define errlog_cmd_req "errlog on" -#define errlog_cmd_req_size 10 /* with final 0 byte ! */ -#define errlog_cmd_stop "errlog off" -#define errlog_cmd_stop_size 11 /* with final 0 byte ! */ - -#define errlog_entry_size 64 /* sizeof(terrlogentry) */ - /* remaining text size = 55 */ -#define errlog_text_size (errlog_entry_size - 2 * 4 - 1) - -typedef struct errlogentry_tag { - - /*00 */ unsigned long ulerrtype; - - /*04 */ unsigned long ulerrsubtype; - - /*08 */ unsigned char uctextsize; - - /*09 */ unsigned char uctext[errlog_text_size]; - /* asciiz of len uctextsize-1 */ - -/*40 */ -} terrlogentry; - - -#if defined(__turboc__) -#if sizeof(terrlogentry) != errlog_entry_size -#error size of terrlogentry != errlog_entry_size -#endif /* */ -#endif /* */ - -/*--------------------------------------dpram boot spooler------------------*/ -/* this is the struture used between pc and - * hyperstone to exchange boot data - */ -#define dpram_spooler_data_size 0x20 -typedef struct dprambootspooler_tag { - - /*00 */ unsigned char len; - - /*01 */ volatile unsigned char rdptr; - - /*02 */ unsigned char wrptr; - - /*03 */ unsigned char data[dpram_spooler_data_size]; - -/*23 */ -} tdprambootspooler; - - -#define dpram_spooler_min_size 5 /* len+rdptr+wrptr+2*data */ -#define dpram_spooler_def_size 0x23 /* current default size */ - -/*--------------------------------------hycard/ergo dpram softuart----------*/ -/* at dpram offset 0x1c00: */ -#define size_rsv_soft_uart 0x1b0 /* 432 bytes reserved for softuart */ - - -#endif /* __ince1pc_h__ */ diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h --- a/include/linux/b1pcmcia.h +++ /dev/null -/* $id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai exp $ - * - * exported functions of module b1pcmcia to be called by - * avm_cs card services module. - * - * copyright 1999 by carsten paeth (calle@calle.in-berlin.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -#ifndef _b1pcmcia_h_ -#define _b1pcmcia_h_ - -int b1pcmcia_addcard_b1(unsigned int port, unsigned irq); -int b1pcmcia_addcard_m1(unsigned int port, unsigned irq); -int b1pcmcia_addcard_m2(unsigned int port, unsigned irq); -int b1pcmcia_delcard(unsigned int port, unsigned irq); - -#endif /* _b1pcmcia_h_ */ diff --git a/include/uapi/linux/gigaset_dev.h b/include/uapi/linux/gigaset_dev.h --- a/include/uapi/linux/gigaset_dev.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0+ with linux-syscall-note */ -/* - * interface to user space for the gigaset driver - * - * copyright (c) 2004 by hansjoerg lipp <hjlipp@web.de> - * - * ===================================================================== - * this program is free software; you can redistribute it and/or - * modify it under the terms of the gnu general public license as - * published by the free software foundation; either version 2 of - * the license, or (at your option) any later version. - * ===================================================================== - */ - -#ifndef gigaset_interface_h -#define gigaset_interface_h - -#include <linux/ioctl.h> - -/* the magic ioctl value for this interface. */ -#define gigaset_ioctl 0x47 - -/* enable/disable device control via character device (lock out isdn subsys) */ -#define gigaset_redir _iowr(gigaset_ioctl, 0, int) - -/* enable adapter configuration mode (m10x only) */ -#define gigaset_config _iowr(gigaset_ioctl, 1, int) - -/* set break characters (m105 only) */ -#define gigaset_brkchars _iow(gigaset_ioctl, 2, unsigned char[6]) - -/* get version information selected by arg[0] */ -#define gigaset_version _iowr(gigaset_ioctl, 3, unsigned[4]) -/* values for gigaset_version arg[0] */ -#define gigver_driver 0 /* get driver version */ -#define gigver_compat 1 /* get interface compatibility version */ -#define gigver_fwbase 2 /* get base station firmware version */ - -#endif diff --git a/include/uapi/linux/hysdn_if.h b/include/uapi/linux/hysdn_if.h --- a/include/uapi/linux/hysdn_if.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 with linux-syscall-note */ -/* $id: hysdn_if.h,v 1.1.8.3 2001/09/23 22:25:05 kai exp $ - * - * linux driver for hysdn cards - * ioctl definitions shared by hynetmgr and driver. - * - * author werner cornelius (werner@titro.de) for hypercope gmbh - * copyright 1999 by werner cornelius (werner@titro.de) - * - * this software may be used and distributed according to the terms - * of the gnu general public license, incorporated herein by reference. - * - */ - -/****************/ -/* error values */ -/****************/ -#define err_none 0 /* no error occurred */ -#define err_already_boot 1000 /* we are already booting */ -#define epof_bad_magic 1001 /* bad magic in pof header */ -#define err_board_dpram 1002 /* board dpram failed */ -#define epof_internal 1003 /* internal pof handler error */ -#define epof_bad_img_size 1004 /* pof boot image size invalid */ -#define err_bootimg_fail 1005 /* 1. stage boot image did not start */ -#define err_bootseq_fail 1006 /* 2. stage boot seq handshake timeout */ -#define err_pof_timeout 1007 /* timeout waiting for card pof ready */ -#define err_not_booted 1008 /* operation only allowed when booted */ -#define err_conf_long 1009 /* conf line is too long */ -#define err_inv_chan 1010 /* invalid channel number */ -#define err_async_time 1011 /* timeout sending async data */ - - - -
|
Drivers in the Staging area
|
f10870b05d5edc0652701c6a92eafcab5044795f
|
arnd bergmann
|
drivers
|
staging
|
avm, gigaset, hysdn, ioctl, isdn, linux
|
staging: octeon: delete driver
|
this driver has been in the tree since 2009 with no real movement to get it out. now it is starting to cause build issues and other problems for people who want to fix coding style problems, but can not actually build it.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
delete driver
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['octeon']
|
['c', 'kconfig', 'makefile', 'h', 'todo']
| 21
| 0
| 4,749
|
--- diff --git a/drivers/staging/kconfig b/drivers/staging/kconfig --- a/drivers/staging/kconfig +++ b/drivers/staging/kconfig -source "drivers/staging/octeon/kconfig" - diff --git a/drivers/staging/makefile b/drivers/staging/makefile --- a/drivers/staging/makefile +++ b/drivers/staging/makefile -obj-$(config_octeon_ethernet) += octeon/ diff --git a/drivers/staging/octeon/kconfig b/drivers/staging/octeon/kconfig --- a/drivers/staging/octeon/kconfig +++ /dev/null -# spdx-license-identifier: gpl-2.0 -config octeon_ethernet - tristate "cavium networks octeon ethernet support" - depends on cavium_octeon_soc || compile_test - depends on netdevices - depends on broken - select phylib - select mdio_octeon - help - this driver supports the builtin ethernet ports on cavium - networks' products in the octeon family. this driver supports the - cn3xxx and cn5xxx octeon processors. - - to compile this driver as a module, choose m here. the module - will be called octeon-ethernet. - diff --git a/drivers/staging/octeon/makefile b/drivers/staging/octeon/makefile --- a/drivers/staging/octeon/makefile +++ /dev/null -# spdx-license-identifier: gpl-2.0 -# -# copyright (c) 2005-2009 cavium networks -# - -# -# makefile for cavium octeon on-board ethernet driver -# - -obj-${config_octeon_ethernet} := octeon-ethernet.o - -octeon-ethernet-y := ethernet.o -octeon-ethernet-y += ethernet-mdio.o -octeon-ethernet-y += ethernet-mem.o -octeon-ethernet-y += ethernet-rgmii.o -octeon-ethernet-y += ethernet-rx.o -octeon-ethernet-y += ethernet-sgmii.o -octeon-ethernet-y += ethernet-spi.o -octeon-ethernet-y += ethernet-tx.o diff --git a/drivers/staging/octeon/todo b/drivers/staging/octeon/todo --- a/drivers/staging/octeon/todo +++ /dev/null -this driver is functional and supports ethernet on octeon+/octeon2/octeon3 -chips at least up to cn7030. - -todo: - - general code review and clean up - - make driver self-contained instead of being split between staging and - arch/mips/cavium-octeon. - -contact: aaro koskinen <aaro.koskinen@iki.fi> diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h --- a/drivers/staging/octeon/ethernet-defines.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -/* - * a few defines are used to control the operation of this driver: - * use_async_iobdma - * use asynchronous io access to hardware. this uses octeon's asynchronous - * iobdmas to issue io accesses without stalling. set this to zero - * to disable this. note that iobdmas require cvmseg. - * reuse_skbuffs_without_free - * allows the tx path to free an skbuff into the fpa hardware pool. this - * can significantly improve performance for forwarding and bridging, but - * may be somewhat dangerous. checks are made, but if any buffer is reused - * without the proper linux cleanup, the networking stack may have very - * bizarre bugs. - */ -#ifndef __ethernet_defines_h__ -#define __ethernet_defines_h__ - -#ifdef config_netfilter -#define reuse_skbuffs_without_free 0 -#else -#define reuse_skbuffs_without_free 1 -#endif - -#define use_async_iobdma (config_cavium_octeon_cvmseg_size > 0) - -/* maximum number of skbs to try to free per xmit packet. */ -#define max_out_queue_depth 1000 - -#define fau_total_tx_to_clean (cvmx_fau_reg_end - sizeof(u32)) -#define fau_num_packet_buffers_to_free (fau_total_tx_to_clean - sizeof(u32)) - -#define total_number_of_ports (cvmx_pip_num_input_ports + 1) - -#endif /* __ethernet_defines_h__ */ diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c --- a/drivers/staging/octeon/ethernet-mdio.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -#include <linux/kernel.h> -#include <linux/ethtool.h> -#include <linux/phy.h> -#include <linux/ratelimit.h> -#include <linux/of_mdio.h> -#include <generated/utsrelease.h> -#include <net/dst.h> - -#include "octeon-ethernet.h" -#include "ethernet-defines.h" -#include "ethernet-mdio.h" -#include "ethernet-util.h" - -static void cvm_oct_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, kbuild_modname, sizeof(info->driver)); - strlcpy(info->version, uts_release, sizeof(info->version)); - strlcpy(info->bus_info, "builtin", sizeof(info->bus_info)); -} - -static int cvm_oct_nway_reset(struct net_device *dev) -{ - if (!capable(cap_net_admin)) - return -eperm; - - if (dev->phydev) - return phy_start_aneg(dev->phydev); - - return -einval; -} - -const struct ethtool_ops cvm_oct_ethtool_ops = { - .get_drvinfo = cvm_oct_get_drvinfo, - .nway_reset = cvm_oct_nway_reset, - .get_link = ethtool_op_get_link, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -/** - * cvm_oct_ioctl - ioctl support for phy control - * @dev: device to change - * @rq: the request - * @cmd: the command - * - * returns zero on success - */ -int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - if (!netif_running(dev)) - return -einval; - - if (!dev->phydev) - return -einval; - - return phy_mii_ioctl(dev->phydev, rq, cmd); -} - -void cvm_oct_note_carrier(struct octeon_ethernet *priv, - union cvmx_helper_link_info li) -{ - if (li.s.link_up) { - pr_notice_ratelimited("%s: %u mbps %s duplex, port %d, queue %d ", - netdev_name(priv->netdev), li.s.speed, - (li.s.full_duplex) ? "full" : "half", - priv->port, priv->queue); - } else { - pr_notice_ratelimited("%s: link down ", - netdev_name(priv->netdev)); - } -} - -void cvm_oct_adjust_link(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - union cvmx_helper_link_info link_info; - - link_info.u64 = 0; - link_info.s.link_up = dev->phydev->link ? 1 : 0; - link_info.s.full_duplex = dev->phydev->duplex ? 1 : 0; - link_info.s.speed = dev->phydev->speed; - priv->link_info = link_info.u64; - - /* - * the polling task need to know about link status changes. - */ - if (priv->poll) - priv->poll(dev); - - if (priv->last_link != dev->phydev->link) { - priv->last_link = dev->phydev->link; - cvmx_helper_link_set(priv->port, link_info); - cvm_oct_note_carrier(priv, link_info); - } -} - -int cvm_oct_common_stop(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - int interface = interface(priv->port); - union cvmx_helper_link_info link_info; - union cvmx_gmxx_prtx_cfg gmx_cfg; - int index = index(priv->port); - - gmx_cfg.u64 = cvmx_read_csr(cvmx_gmxx_prtx_cfg(index, interface)); - gmx_cfg.s.en = 0; - cvmx_write_csr(cvmx_gmxx_prtx_cfg(index, interface), gmx_cfg.u64); - - priv->poll = null; - - if (dev->phydev) - phy_disconnect(dev->phydev); - - if (priv->last_link) { - link_info.u64 = 0; - priv->last_link = 0; - - cvmx_helper_link_set(priv->port, link_info); - cvm_oct_note_carrier(priv, link_info); - } - return 0; -} - -/** - * cvm_oct_phy_setup_device - setup the phy - * - * @dev: device to setup - * - * returns zero on success, negative on failure - */ -int cvm_oct_phy_setup_device(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - struct device_node *phy_node; - struct phy_device *phydev = null; - - if (!priv->of_node) - goto no_phy; - - phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0); - if (!phy_node && of_phy_is_fixed_link(priv->of_node)) { - int rc; - - rc = of_phy_register_fixed_link(priv->of_node); - if (rc) - return rc; - - phy_node = of_node_get(priv->of_node); - } - if (!phy_node) - goto no_phy; - - phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, - priv->phy_mode); - of_node_put(phy_node); - - if (!phydev) - return -enodev; - - priv->last_link = 0; - phy_start(phydev); - - return 0; -no_phy: - /* if there is no phy, assume a direct mac connection and that - * the link is up. - */ - netif_carrier_on(dev); - return 0; -} diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h --- a/drivers/staging/octeon/ethernet-mdio.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ip.h> -#include <linux/string.h> -#include <linux/ethtool.h> -#include <linux/seq_file.h> -#include <linux/proc_fs.h> -#include <net/dst.h> -#ifdef config_xfrm -#include <linux/xfrm.h> -#include <net/xfrm.h> -#endif /* config_xfrm */ - -extern const struct ethtool_ops cvm_oct_ethtool_ops; - -void octeon_mdiobus_force_mod_depencency(void); - -int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -int cvm_oct_phy_setup_device(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c --- a/drivers/staging/octeon/ethernet-mem.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2010 cavium networks - */ - -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/slab.h> - -#include "octeon-ethernet.h" -#include "ethernet-mem.h" -#include "ethernet-defines.h" - -/** - * cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs - * @pool: pool to allocate an skbuff for - * @size: size of the buffer needed for the pool - * @elements: number of buffers to allocate - * - * returns the actual number of buffers allocated. - */ -static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) -{ - int freed = elements; - - while (freed) { - struct sk_buff *skb = dev_alloc_skb(size + 256); - - if (unlikely(!skb)) - break; - skb_reserve(skb, 256 - (((unsigned long)skb->data) & 0x7f)); - *(struct sk_buff **)(skb->data - sizeof(void *)) = skb; - cvmx_fpa_free(skb->data, pool, size / 128); - freed--; - } - return elements - freed; -} - -/** - * cvm_oct_free_hw_skbuff- free hardware pool skbuffs - * @pool: pool to allocate an skbuff for - * @size: size of the buffer needed for the pool - * @elements: number of buffers to allocate - */ -static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) -{ - char *memory; - - do { - memory = cvmx_fpa_alloc(pool); - if (memory) { - struct sk_buff *skb = - *(struct sk_buff **)(memory - sizeof(void *)); - elements--; - dev_kfree_skb(skb); - } - } while (memory); - - if (elements < 0) - pr_warn("freeing of pool %u had too many skbuffs (%d) ", - pool, elements); - else if (elements > 0) - pr_warn("freeing of pool %u is missing %d skbuffs ", - pool, elements); -} - -/** - * cvm_oct_fill_hw_memory - fill a hardware pool with memory. - * @pool: pool to populate - * @size: size of each buffer in the pool - * @elements: number of buffers to allocate - * - * returns the actual number of buffers allocated. - */ -static int cvm_oct_fill_hw_memory(int pool, int size, int elements) -{ - char *memory; - char *fpa; - int freed = elements; - - while (freed) { - /* - * fpa memory must be 128 byte aligned. since we are - * aligning we need to save the original pointer so we - * can feed it to kfree when the memory is returned to - * the kernel. - * - * we allocate an extra 256 bytes to allow for - * alignment and space for the original pointer saved - * just before the block. - */ - memory = kmalloc(size + 256, gfp_atomic); - if (unlikely(!memory)) { - pr_warn("unable to allocate %u bytes for fpa pool %d ", - elements * size, pool); - break; - } - fpa = (char *)(((unsigned long)memory + 256) & ~0x7ful); - *((char **)fpa - 1) = memory; - cvmx_fpa_free(fpa, pool, 0); - freed--; - } - return elements - freed; -} - -/** - * cvm_oct_free_hw_memory - free memory allocated by cvm_oct_fill_hw_memory - * @pool: fpa pool to free - * @size: size of each buffer in the pool - * @elements: number of buffers that should be in the pool - */ -static void cvm_oct_free_hw_memory(int pool, int size, int elements) -{ - char *memory; - char *fpa; - - do { - fpa = cvmx_fpa_alloc(pool); - if (fpa) { - elements--; - fpa = (char *)phys_to_virt(cvmx_ptr_to_phys(fpa)); - memory = *((char **)fpa - 1); - kfree(memory); - } - } while (fpa); - - if (elements < 0) - pr_warn("freeing of pool %u had too many buffers (%d) ", - pool, elements); - else if (elements > 0) - pr_warn("warning: freeing of pool %u is missing %d buffers ", - pool, elements); -} - -int cvm_oct_mem_fill_fpa(int pool, int size, int elements) -{ - int freed; - - if (pool == cvmx_fpa_packet_pool) - freed = cvm_oct_fill_hw_skbuff(pool, size, elements); - else - freed = cvm_oct_fill_hw_memory(pool, size, elements); - return freed; -} - -void cvm_oct_mem_empty_fpa(int pool, int size, int elements) -{ - if (pool == cvmx_fpa_packet_pool) - cvm_oct_free_hw_skbuff(pool, size, elements); - else - cvm_oct_free_hw_memory(pool, size, elements); -} diff --git a/drivers/staging/octeon/ethernet-mem.h b/drivers/staging/octeon/ethernet-mem.h --- a/drivers/staging/octeon/ethernet-mem.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -int cvm_oct_mem_fill_fpa(int pool, int size, int elements); -void cvm_oct_mem_empty_fpa(int pool, int size, int elements); diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c --- a/drivers/staging/octeon/ethernet-rgmii.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/interrupt.h> -#include <linux/phy.h> -#include <linux/ratelimit.h> -#include <net/dst.h> - -#include "octeon-ethernet.h" -#include "ethernet-defines.h" -#include "ethernet-util.h" -#include "ethernet-mdio.h" - -static define_spinlock(global_register_lock); - -static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable) -{ - union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; - union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; - union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; - int interface = interface(priv->port); - int index = index(priv->port); - - /* set preamble checking. */ - gmxx_rxx_frm_ctl.u64 = cvmx_read_csr(cvmx_gmxx_rxx_frm_ctl(index, - interface)); - gmxx_rxx_frm_ctl.s.pre_chk = enable; - cvmx_write_csr(cvmx_gmxx_rxx_frm_ctl(index, interface), - gmxx_rxx_frm_ctl.u64); - - /* set fcs stripping. */ - ipd_sub_port_fcs.u64 = cvmx_read_csr(cvmx_ipd_sub_port_fcs); - if (enable) - ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port; - else - ipd_sub_port_fcs.s.port_bit &= - 0xffffffffull ^ (1ull << priv->port); - cvmx_write_csr(cvmx_ipd_sub_port_fcs, ipd_sub_port_fcs.u64); - - /* clear any error bits. */ - gmxx_rxx_int_reg.u64 = cvmx_read_csr(cvmx_gmxx_rxx_int_reg(index, - interface)); - cvmx_write_csr(cvmx_gmxx_rxx_int_reg(index, interface), - gmxx_rxx_int_reg.u64); -} - -static void cvm_oct_check_preamble_errors(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - union cvmx_helper_link_info link_info; - unsigned long flags; - - link_info.u64 = priv->link_info; - - /* - * take the global register lock since we are going to - * touch registers that affect more than one port. - */ - spin_lock_irqsave(&global_register_lock, flags); - - if (link_info.s.speed == 10 && priv->last_speed == 10) { - /* - * read the gmxx_rxx_int_reg[pcterr] bit and see if we are - * getting preamble errors. - */ - int interface = interface(priv->port); - int index = index(priv->port); - union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; - - gmxx_rxx_int_reg.u64 = cvmx_read_csr(cvmx_gmxx_rxx_int_reg - (index, interface)); - if (gmxx_rxx_int_reg.s.pcterr) { - /* - * we are getting preamble errors at 10mbps. most - * likely the phy is giving us packets with misaligned - * preambles. in order to get these packets we need to - * disable preamble checking and do it in software. - */ - cvm_oct_set_hw_preamble(priv, false); - printk_ratelimited("%s: using 10mbps with software preamble removal ", - dev->name); - } - } else { - /* - * since the 10mbps preamble workaround is allowed we need to - * enable preamble checking, fcs stripping, and clear error - * bits on every speed change. if errors occur during 10mbps - * operation the above code will change this stuff - */ - if (priv->last_speed != link_info.s.speed) - cvm_oct_set_hw_preamble(priv, true); - priv->last_speed = link_info.s.speed; - } - spin_unlock_irqrestore(&global_register_lock, flags); -} - -static void cvm_oct_rgmii_poll(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - union cvmx_helper_link_info link_info; - bool status_change; - - link_info = cvmx_helper_link_get(priv->port); - if (priv->link_info != link_info.u64 && - cvmx_helper_link_set(priv->port, link_info)) - link_info.u64 = priv->link_info; - status_change = priv->link_info != link_info.u64; - priv->link_info = link_info.u64; - - cvm_oct_check_preamble_errors(dev); - - if (likely(!status_change)) - return; - - /* tell core. */ - if (link_info.s.link_up) { - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); - } else if (netif_carrier_ok(dev)) { - netif_carrier_off(dev); - } - cvm_oct_note_carrier(priv, link_info); -} - -int cvm_oct_rgmii_open(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - int ret; - - ret = cvm_oct_common_open(dev, cvm_oct_rgmii_poll); - if (ret) - return ret; - - if (dev->phydev) { - /* - * in phydev mode, we need still periodic polling for the - * preamble error checking, and we also need to call this - * function on every link state change. - * - * only true rgmii ports need to be polled. in gmii mode, port - * 0 is really a rgmii port. - */ - if ((priv->imode == cvmx_helper_interface_mode_gmii && - priv->port == 0) || - (priv->imode == cvmx_helper_interface_mode_rgmii)) { - priv->poll = cvm_oct_check_preamble_errors; - cvm_oct_check_preamble_errors(dev); - } - } - - return 0; -} diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c --- a/drivers/staging/octeon/ethernet-rx.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2010 cavium networks - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/cache.h> -#include <linux/cpumask.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ip.h> -#include <linux/string.h> -#include <linux/prefetch.h> -#include <linux/ratelimit.h> -#include <linux/smp.h> -#include <linux/interrupt.h> -#include <net/dst.h> -#ifdef config_xfrm -#include <linux/xfrm.h> -#include <net/xfrm.h> -#endif /* config_xfrm */ - -#include "octeon-ethernet.h" -#include "ethernet-defines.h" -#include "ethernet-mem.h" -#include "ethernet-rx.h" -#include "ethernet-util.h" - -static atomic_t oct_rx_ready = atomic_init(0); - -static struct oct_rx_group { - int irq; - int group; - struct napi_struct napi; -} oct_rx_group[16]; - -/** - * cvm_oct_do_interrupt - interrupt handler. - * @irq: interrupt number. - * @napi_id: cookie to identify the napi instance. - * - * the interrupt occurs whenever the pow has packets in our group. - * - */ -static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id) -{ - /* disable the irq and start napi_poll. */ - disable_irq_nosync(irq); - napi_schedule(napi_id); - - return irq_handled; -} - -/** - * cvm_oct_check_rcv_error - process receive errors - * @work: work queue entry pointing to the packet. - * - * returns non-zero if the packet can be dropped, zero otherwise. - */ -static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) -{ - int port; - - if (octeon_has_feature(octeon_feature_pknd)) - port = work->word0.pip.cn68xx.pknd; - else - port = work->word1.cn38xx.ipprt; - - if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) { - /* - * ignore length errors on min size packets. some - * equipment incorrectly pads packets to 64+4fcs - * instead of 60+4fcs. note these packets still get - * counted as frame errors. - */ - } else if (work->word2.snoip.err_code == 5 || - work->word2.snoip.err_code == 7) { - /* - * we received a packet with either an alignment error - * or a fcs error. this may be signalling that we are - * running 10mbps with gmxx_rxx_frm_ctl[pre_chk] - * off. if this is the case we need to parse the - * packet to determine if we can remove a non spec - * preamble and generate a correct packet. - */ - int interface = cvmx_helper_get_interface_num(port); - int index = cvmx_helper_get_interface_index_num(port); - union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; - - gmxx_rxx_frm_ctl.u64 = - cvmx_read_csr(cvmx_gmxx_rxx_frm_ctl(index, interface)); - if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { - u8 *ptr = - cvmx_phys_to_ptr(work->packet_ptr.s.addr); - int i = 0; - - while (i < work->word1.len - 1) { - if (*ptr != 0x55) - break; - ptr++; - i++; - } - - if (*ptr == 0xd5) { - /* port received 0xd5 preamble */ - work->packet_ptr.s.addr += i + 1; - work->word1.len -= i + 5; - } else if ((*ptr & 0xf) == 0xd) { - /* port received 0xd preamble */ - work->packet_ptr.s.addr += i; - work->word1.len -= i + 4; - for (i = 0; i < work->word1.len; i++) { - *ptr = - ((*ptr & 0xf0) >> 4) | - ((*(ptr + 1) & 0xf) << 4); - ptr++; - } - } else { - printk_ratelimited("port %d unknown preamble, packet dropped ", - port); - cvm_oct_free_work(work); - return 1; - } - } - } else { - printk_ratelimited("port %d receive error code %d, packet dropped ", - port, work->word2.snoip.err_code); - cvm_oct_free_work(work); - return 1; - } - - return 0; -} - -static void copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb) -{ - int segments = work->word2.s.bufs; - union cvmx_buf_ptr segment_ptr = work->packet_ptr; - int len = work->word1.len; - int segment_size; - - while (segments--) { - union cvmx_buf_ptr next_ptr; - - next_ptr = *(union cvmx_buf_ptr *) - cvmx_phys_to_ptr(segment_ptr.s.addr - 8); - - /* - * octeon errata pki-100: the segment size is wrong. - * - * until it is fixed, calculate the segment size based on - * the packet pool buffer size. - * when it is fixed, the following line should be replaced - * with this one: - * int segment_size = segment_ptr.s.size; - */ - segment_size = - cvmx_fpa_packet_pool_size - - (segment_ptr.s.addr - - (((segment_ptr.s.addr >> 7) - - segment_ptr.s.back) << 7)); - - /* don't copy more than what is left in the packet */ - if (segment_size > len) - segment_size = len; - - /* copy the data into the packet */ - skb_put_data(skb, cvmx_phys_to_ptr(segment_ptr.s.addr), - segment_size); - len -= segment_size; - segment_ptr = next_ptr; - } -} - -static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) -{ - const int coreid = cvmx_get_core_num(); - u64 old_group_mask; - u64 old_scratch; - int rx_count = 0; - int did_work_request = 0; - int packet_not_copied; - - /* prefetch cvm_oct_device since we know we need it soon */ - prefetch(cvm_oct_device); - - if (use_async_iobdma) { - /* save scratch in case userspace is using it */ - cvmx_synciobdma; - old_scratch = cvmx_scratch_read64(cvmx_scr_scratch); - } - - /* only allow work for our group (and preserve priorities) */ - if (octeon_is_model(octeon_cn68xx)) { - old_group_mask = cvmx_read_csr(cvmx_sso_ppx_grp_msk(coreid)); - cvmx_write_csr(cvmx_sso_ppx_grp_msk(coreid), - bit(rx_group->group)); - cvmx_read_csr(cvmx_sso_ppx_grp_msk(coreid)); /* flush */ - } else { - old_group_mask = cvmx_read_csr(cvmx_pow_pp_grp_mskx(coreid)); - cvmx_write_csr(cvmx_pow_pp_grp_mskx(coreid), - (old_group_mask & ~0xffffull) | - bit(rx_group->group)); - } - - if (use_async_iobdma) { - cvmx_pow_work_request_async(cvmx_scr_scratch, cvmx_pow_no_wait); - did_work_request = 1; - } - - while (rx_count < budget) { - struct sk_buff *skb = null; - struct sk_buff **pskb = null; - int skb_in_hw; - struct cvmx_wqe *work; - int port; - - if (use_async_iobdma && did_work_request) - work = cvmx_pow_work_response_async(cvmx_scr_scratch); - else - work = cvmx_pow_work_request_sync(cvmx_pow_no_wait); - - prefetch(work); - did_work_request = 0; - if (!work) { - if (octeon_is_model(octeon_cn68xx)) { - cvmx_write_csr(cvmx_sso_wq_iq_dis, - bit(rx_group->group)); - cvmx_write_csr(cvmx_sso_wq_int, - bit(rx_group->group)); - } else { - union cvmx_pow_wq_int wq_int; - - wq_int.u64 = 0; - wq_int.s.iq_dis = bit(rx_group->group); - wq_int.s.wq_int = bit(rx_group->group); - cvmx_write_csr(cvmx_pow_wq_int, wq_int.u64); - } - break; - } - pskb = (struct sk_buff **) - (cvm_oct_get_buffer_ptr(work->packet_ptr) - - sizeof(void *)); - prefetch(pskb); - - if (use_async_iobdma && rx_count < (budget - 1)) { - cvmx_pow_work_request_async_nocheck(cvmx_scr_scratch, - cvmx_pow_no_wait); - did_work_request = 1; - } - rx_count++; - - skb_in_hw = work->word2.s.bufs == 1; - if (likely(skb_in_hw)) { - skb = *pskb; - prefetch(&skb->head); - prefetch(&skb->len); - } - - if (octeon_has_feature(octeon_feature_pknd)) - port = work->word0.pip.cn68xx.pknd; - else - port = work->word1.cn38xx.ipprt; - - prefetch(cvm_oct_device[port]); - - /* immediately throw away all packets with receive errors */ - if (unlikely(work->word2.snoip.rcv_error)) { - if (cvm_oct_check_rcv_error(work)) - continue; - } - - /* - * we can only use the zero copy path if skbuffs are - * in the fpa pool and the packet fits in a single - * buffer. - */ - if (likely(skb_in_hw)) { - skb->data = skb->head + work->packet_ptr.s.addr - - cvmx_ptr_to_phys(skb->head); - prefetch(skb->data); - skb->len = work->word1.len; - skb_set_tail_pointer(skb, skb->len); - packet_not_copied = 1; - } else { - /* - * we have to copy the packet. first allocate - * an skbuff for it. - */ - skb = dev_alloc_skb(work->word1.len); - if (!skb) { - cvm_oct_free_work(work); - continue; - } - - /* - * check if we've received a packet that was - * entirely stored in the work entry. - */ - if (unlikely(work->word2.s.bufs == 0)) { - u8 *ptr = work->packet_data; - - if (likely(!work->word2.s.not_ip)) { - /* - * the beginning of the packet - * moves for ip packets. - */ - if (work->word2.s.is_v6) - ptr += 2; - else - ptr += 6; - } - skb_put_data(skb, ptr, work->word1.len); - /* no packet buffers to free */ - } else { - copy_segments_to_skb(work, skb); - } - packet_not_copied = 0; - } - if (likely((port < total_number_of_ports) && - cvm_oct_device[port])) { - struct net_device *dev = cvm_oct_device[port]; - - /* - * only accept packets for devices that are - * currently up. - */ - if (likely(dev->flags & iff_up)) { - skb->protocol = eth_type_trans(skb, dev); - skb->dev = dev; - - if (unlikely(work->word2.s.not_ip || - work->word2.s.ip_exc || - work->word2.s.l4_error || - !work->word2.s.tcp_or_udp)) - skb->ip_summed = checksum_none; - else - skb->ip_summed = checksum_unnecessary; - - /* increment rx stats for virtual ports */ - if (port >= cvmx_pip_num_input_ports) { - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; - } - netif_receive_skb(skb); - } else { - /* - * drop any packet received for a device that - * isn't up. - */ - dev->stats.rx_dropped++; - dev_kfree_skb_irq(skb); - } - } else { - /* - * drop any packet received for a device that - * doesn't exist. - */ - printk_ratelimited("port %d not controlled by linux, packet dropped ", - port); - dev_kfree_skb_irq(skb); - } - /* - * check to see if the skbuff and work share the same - * packet buffer. - */ - if (likely(packet_not_copied)) { - /* - * this buffer needs to be replaced, increment - * the number of buffers we need to free by - * one. - */ - cvmx_fau_atomic_add32(fau_num_packet_buffers_to_free, - 1); - - cvmx_fpa_free(work, cvmx_fpa_wqe_pool, 1); - } else { - cvm_oct_free_work(work); - } - } - /* restore the original pow group mask */ - if (octeon_is_model(octeon_cn68xx)) { - cvmx_write_csr(cvmx_sso_ppx_grp_msk(coreid), old_group_mask); - cvmx_read_csr(cvmx_sso_ppx_grp_msk(coreid)); /* flush */ - } else { - cvmx_write_csr(cvmx_pow_pp_grp_mskx(coreid), old_group_mask); - } - - if (use_async_iobdma) { - /* restore the scratch area */ - cvmx_scratch_write64(cvmx_scr_scratch, old_scratch); - } - cvm_oct_rx_refill_pool(0); - - return rx_count; -} - -/** - * cvm_oct_napi_poll - the napi poll function. - * @napi: the napi instance. - * @budget: maximum number of packets to receive. - * - * returns the number of packets processed. - */ -static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) -{ - struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group, - napi); - int rx_count; - - rx_count = cvm_oct_poll(rx_group, budget); - - if (rx_count < budget) { - /* no more work */ - napi_complete_done(napi, rx_count); - enable_irq(rx_group->irq); - } - return rx_count; -} - -#ifdef config_net_poll_controller -/** - * cvm_oct_poll_controller - poll for receive packets - * device. - * - * @dev: device to poll. unused - */ -void cvm_oct_poll_controller(struct net_device *dev) -{ - int i; - - if (!atomic_read(&oct_rx_ready)) - return; - - for (i = 0; i < array_size(oct_rx_group); i++) { - if (!(pow_receive_groups & bit(i))) - continue; - - cvm_oct_poll(&oct_rx_group[i], 16); - } -} -#endif - -void cvm_oct_rx_initialize(void) -{ - int i; - struct net_device *dev_for_napi = null; - - for (i = 0; i < total_number_of_ports; i++) { - if (cvm_oct_device[i]) { - dev_for_napi = cvm_oct_device[i]; - break; - } - } - - if (!dev_for_napi) - panic("no net_devices were allocated."); - - for (i = 0; i < array_size(oct_rx_group); i++) { - int ret; - - if (!(pow_receive_groups & bit(i))) - continue; - - netif_napi_add(dev_for_napi, &oct_rx_group[i].napi, - cvm_oct_napi_poll, rx_napi_weight); - napi_enable(&oct_rx_group[i].napi); - - oct_rx_group[i].irq = octeon_irq_workq0 + i; - oct_rx_group[i].group = i; - - /* register an irq handler to receive pow interrupts */ - ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0, - "ethernet", &oct_rx_group[i].napi); - if (ret) - panic("could not acquire ethernet irq %d ", - oct_rx_group[i].irq); - - disable_irq_nosync(oct_rx_group[i].irq); - - /* enable pow interrupt when our port has at least one packet */ - if (octeon_is_model(octeon_cn68xx)) { - union cvmx_sso_wq_int_thrx int_thr; - union cvmx_pow_wq_int_pc int_pc; - - int_thr.u64 = 0; - int_thr.s.tc_en = 1; - int_thr.s.tc_thr = 1; - cvmx_write_csr(cvmx_sso_wq_int_thrx(i), int_thr.u64); - - int_pc.u64 = 0; - int_pc.s.pc_thr = 5; - cvmx_write_csr(cvmx_sso_wq_int_pc, int_pc.u64); - } else { - union cvmx_pow_wq_int_thrx int_thr; - union cvmx_pow_wq_int_pc int_pc; - - int_thr.u64 = 0; - int_thr.s.tc_en = 1; - int_thr.s.tc_thr = 1; - cvmx_write_csr(cvmx_pow_wq_int_thrx(i), int_thr.u64); - - int_pc.u64 = 0; - int_pc.s.pc_thr = 5; - cvmx_write_csr(cvmx_pow_wq_int_pc, int_pc.u64); - } - - /* schedule napi now. this will indirectly enable the - * interrupt. - */ - napi_schedule(&oct_rx_group[i].napi); - } - atomic_inc(&oct_rx_ready); -} - -void cvm_oct_rx_shutdown(void) -{ - int i; - - for (i = 0; i < array_size(oct_rx_group); i++) { - if (!(pow_receive_groups & bit(i))) - continue; - - /* disable pow interrupt */ - if (octeon_is_model(octeon_cn68xx)) - cvmx_write_csr(cvmx_sso_wq_int_thrx(i), 0); - else - cvmx_write_csr(cvmx_pow_wq_int_thrx(i), 0); - - /* free the interrupt handler */ - free_irq(oct_rx_group[i].irq, cvm_oct_device); - - netif_napi_del(&oct_rx_group[i].napi); - } -} diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h --- a/drivers/staging/octeon/ethernet-rx.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -void cvm_oct_poll_controller(struct net_device *dev); -void cvm_oct_rx_initialize(void); -void cvm_oct_rx_shutdown(void); - -static inline void cvm_oct_rx_refill_pool(int fill_threshold) -{ - int number_to_free; - int num_freed; - /* refill the packet buffer pool */ - number_to_free = - cvmx_fau_fetch_and_add32(fau_num_packet_buffers_to_free, 0); - - if (number_to_free > fill_threshold) { - cvmx_fau_atomic_add32(fau_num_packet_buffers_to_free, - -number_to_free); - num_freed = cvm_oct_mem_fill_fpa(cvmx_fpa_packet_pool, - cvmx_fpa_packet_pool_size, - number_to_free); - if (num_freed != number_to_free) { - cvmx_fau_atomic_add32(fau_num_packet_buffers_to_free, - number_to_free - num_freed); - } - } -} diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c --- a/drivers/staging/octeon/ethernet-sgmii.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -#include <linux/phy.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/ratelimit.h> -#include <net/dst.h> - -#include "octeon-ethernet.h" -#include "ethernet-defines.h" -#include "ethernet-util.h" -#include "ethernet-mdio.h" - -int cvm_oct_sgmii_open(struct net_device *dev) -{ - return cvm_oct_common_open(dev, cvm_oct_link_poll); -} - -int cvm_oct_sgmii_init(struct net_device *dev) -{ - cvm_oct_common_init(dev); - - /* fixme: need autoneg logic */ - return 0; -} diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c --- a/drivers/staging/octeon/ethernet-spi.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/interrupt.h> -#include <net/dst.h> - -#include "octeon-ethernet.h" -#include "ethernet-defines.h" -#include "ethernet-util.h" - -static int number_spi_ports; -static int need_retrain[2] = { 0, 0 }; - -static void cvm_oct_spxx_int_pr(union cvmx_spxx_int_reg spx_int_reg, int index) -{ - if (spx_int_reg.s.spf) - pr_err("spi%d: srx spi4 interface down ", index); - if (spx_int_reg.s.calerr) - pr_err("spi%d: srx spi4 calendar table parity error ", index); - if (spx_int_reg.s.syncerr) - pr_err("spi%d: srx consecutive spi4 dip4 errors have exceeded spx_err_ctl[errcnt] ", - index); - if (spx_int_reg.s.diperr) - pr_err("spi%d: srx spi4 dip4 error ", index); - if (spx_int_reg.s.tpaovr) - pr_err("spi%d: srx selected port has hit tpa overflow ", - index); - if (spx_int_reg.s.rsverr) - pr_err("spi%d: srx spi4 reserved control word detected ", - index); - if (spx_int_reg.s.drwnng) - pr_err("spi%d: srx spi4 receive fifo drowning/overflow ", - index); - if (spx_int_reg.s.clserr) - pr_err("spi%d: srx spi4 packet closed on non-16b alignment without eop ", - index); - if (spx_int_reg.s.spiovr) - pr_err("spi%d: srx spi4 async fifo overflow ", index); - if (spx_int_reg.s.abnorm) - pr_err("spi%d: srx abnormal packet termination (err bit) ", - index); - if (spx_int_reg.s.prtnxa) - pr_err("spi%d: srx port out of range ", index); -} - -static void cvm_oct_stxx_int_pr(union cvmx_stxx_int_reg stx_int_reg, int index) -{ - if (stx_int_reg.s.syncerr) - pr_err("spi%d: stx interface encountered a fatal error ", - index); - if (stx_int_reg.s.frmerr) - pr_err("spi%d: stx frmcnt has exceeded stx_dip_cnt[maxfrm] ", - index); - if (stx_int_reg.s.unxfrm) - pr_err("spi%d: stx unexpected framing sequence ", index); - if (stx_int_reg.s.nosync) - pr_err("spi%d: stx errcnt has exceeded stx_dip_cnt[maxdip] ", - index); - if (stx_int_reg.s.diperr) - pr_err("spi%d: stx dip2 error on the spi4 status channel ", - index); - if (stx_int_reg.s.datovr) - pr_err("spi%d: stx spi4 fifo overflow error ", index); - if (stx_int_reg.s.ovrbst) - pr_err("spi%d: stx transmit packet burst too big ", index); - if (stx_int_reg.s.calpar1) - pr_err("spi%d: stx calendar table parity error bank%d ", - index, 1); - if (stx_int_reg.s.calpar0) - pr_err("spi%d: stx calendar table parity error bank%d ", - index, 0); -} - -static irqreturn_t cvm_oct_spi_spx_int(int index) -{ - union cvmx_spxx_int_reg spx_int_reg; - union cvmx_stxx_int_reg stx_int_reg; - - spx_int_reg.u64 = cvmx_read_csr(cvmx_spxx_int_reg(index)); - cvmx_write_csr(cvmx_spxx_int_reg(index), spx_int_reg.u64); - if (!need_retrain[index]) { - spx_int_reg.u64 &= cvmx_read_csr(cvmx_spxx_int_msk(index)); - cvm_oct_spxx_int_pr(spx_int_reg, index); - } - - stx_int_reg.u64 = cvmx_read_csr(cvmx_stxx_int_reg(index)); - cvmx_write_csr(cvmx_stxx_int_reg(index), stx_int_reg.u64); - if (!need_retrain[index]) { - stx_int_reg.u64 &= cvmx_read_csr(cvmx_stxx_int_msk(index)); - cvm_oct_stxx_int_pr(stx_int_reg, index); - } - - cvmx_write_csr(cvmx_spxx_int_msk(index), 0); - cvmx_write_csr(cvmx_stxx_int_msk(index), 0); - need_retrain[index] = 1; - - return irq_handled; -} - -static irqreturn_t cvm_oct_spi_rml_interrupt(int cpl, void *dev_id) -{ - irqreturn_t return_status = irq_none; - union cvmx_npi_rsl_int_blocks rsl_int_blocks; - - /* check and see if this interrupt was caused by the gmx block */ - rsl_int_blocks.u64 = cvmx_read_csr(cvmx_npi_rsl_int_blocks); - if (rsl_int_blocks.s.spx1) /* 19 - spx1_int_reg & stx1_int_reg */ - return_status = cvm_oct_spi_spx_int(1); - - if (rsl_int_blocks.s.spx0) /* 18 - spx0_int_reg & stx0_int_reg */ - return_status = cvm_oct_spi_spx_int(0); - - return return_status; -} - -static void cvm_oct_spi_enable_error_reporting(int interface) -{ - union cvmx_spxx_int_msk spxx_int_msk; - union cvmx_stxx_int_msk stxx_int_msk; - - spxx_int_msk.u64 = cvmx_read_csr(cvmx_spxx_int_msk(interface)); - spxx_int_msk.s.calerr = 1; - spxx_int_msk.s.syncerr = 1; - spxx_int_msk.s.diperr = 1; - spxx_int_msk.s.tpaovr = 1; - spxx_int_msk.s.rsverr = 1; - spxx_int_msk.s.drwnng = 1; - spxx_int_msk.s.clserr = 1; - spxx_int_msk.s.spiovr = 1; - spxx_int_msk.s.abnorm = 1; - spxx_int_msk.s.prtnxa = 1; - cvmx_write_csr(cvmx_spxx_int_msk(interface), spxx_int_msk.u64); - - stxx_int_msk.u64 = cvmx_read_csr(cvmx_stxx_int_msk(interface)); - stxx_int_msk.s.frmerr = 1; - stxx_int_msk.s.unxfrm = 1; - stxx_int_msk.s.nosync = 1; - stxx_int_msk.s.diperr = 1; - stxx_int_msk.s.datovr = 1; - stxx_int_msk.s.ovrbst = 1; - stxx_int_msk.s.calpar1 = 1; - stxx_int_msk.s.calpar0 = 1; - cvmx_write_csr(cvmx_stxx_int_msk(interface), stxx_int_msk.u64); -} - -static void cvm_oct_spi_poll(struct net_device *dev) -{ - static int spi4000_port; - struct octeon_ethernet *priv = netdev_priv(dev); - int interface; - - for (interface = 0; interface < 2; interface++) { - if ((priv->port == interface * 16) && need_retrain[interface]) { - if (cvmx_spi_restart_interface - (interface, cvmx_spi_mode_duplex, 10) == 0) { - need_retrain[interface] = 0; - cvm_oct_spi_enable_error_reporting(interface); - } - } - - /* - * the spi4000 twsi interface is very slow. in order - * not to bring the system to a crawl, we only poll a - * single port every second. this means negotiation - * speed changes take up to 10 seconds, but at least - * we don't waste absurd amounts of time waiting for - * twsi. - */ - if (priv->port == spi4000_port) { - /* - * this function does nothing if it is called on an - * interface without a spi4000. - */ - cvmx_spi4000_check_speed(interface, priv->port); - /* - * normal ordering increments. by decrementing - * we only match once per iteration. - */ - spi4000_port--; - if (spi4000_port < 0) - spi4000_port = 10; - } - } -} - -int cvm_oct_spi_init(struct net_device *dev) -{ - int r; - struct octeon_ethernet *priv = netdev_priv(dev); - - if (number_spi_ports == 0) { - r = request_irq(octeon_irq_rml, cvm_oct_spi_rml_interrupt, - irqf_shared, "spi", &number_spi_ports); - if (r) - return r; - } - number_spi_ports++; - - if ((priv->port == 0) || (priv->port == 16)) { - cvm_oct_spi_enable_error_reporting(interface(priv->port)); - priv->poll = cvm_oct_spi_poll; - } - cvm_oct_common_init(dev); - return 0; -} - -void cvm_oct_spi_uninit(struct net_device *dev) -{ - int interface; - - cvm_oct_common_uninit(dev); - number_spi_ports--; - if (number_spi_ports == 0) { - for (interface = 0; interface < 2; interface++) { - cvmx_write_csr(cvmx_spxx_int_msk(interface), 0); - cvmx_write_csr(cvmx_stxx_int_msk(interface), 0); - } - free_irq(octeon_irq_rml, &number_spi_ports); - } -} diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c --- a/drivers/staging/octeon/ethernet-tx.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2010 cavium networks - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/ip.h> -#include <linux/ratelimit.h> -#include <linux/string.h> -#include <linux/interrupt.h> -#include <net/dst.h> -#ifdef config_xfrm -#include <linux/xfrm.h> -#include <net/xfrm.h> -#endif /* config_xfrm */ - -#include <linux/atomic.h> -#include <net/sch_generic.h> - -#include "octeon-ethernet.h" -#include "ethernet-defines.h" -#include "ethernet-tx.h" -#include "ethernet-util.h" - -#define cvm_oct_skb_cb(skb) ((u64 *)((skb)->cb)) - -/* - * you can define get_skbuff_qos() to override how the skbuff output - * function determines which output queue is used. the default - * implementation always uses the base queue for the port. if, for - * example, you wanted to use the skb->priority field, define - * get_skbuff_qos as: #define get_skbuff_qos(skb) ((skb)->priority) - */ -#ifndef get_skbuff_qos -#define get_skbuff_qos(skb) 0 -#endif - -static void cvm_oct_tx_do_cleanup(unsigned long arg); -static declare_tasklet(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); - -/* maximum number of skbs to try to free per xmit packet. */ -#define max_skb_to_free (max_out_queue_depth * 2) - -static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau) -{ - int undo; - - undo = skb_to_free > 0 ? max_skb_to_free : skb_to_free + - max_skb_to_free; - if (undo > 0) - cvmx_fau_atomic_add32(fau, -undo); - skb_to_free = -skb_to_free > max_skb_to_free ? max_skb_to_free : - -skb_to_free; - return skb_to_free; -} - -static void cvm_oct_kick_tx_poll_watchdog(void) -{ - union cvmx_ciu_timx ciu_timx; - - ciu_timx.u64 = 0; - ciu_timx.s.one_shot = 1; - ciu_timx.s.len = cvm_oct_tx_poll_interval; - cvmx_write_csr(cvmx_ciu_timx(1), ciu_timx.u64); -} - -static void cvm_oct_free_tx_skbs(struct net_device *dev) -{ - int skb_to_free; - int qos, queues_per_port; - int total_freed = 0; - int total_remaining = 0; - unsigned long flags; - struct octeon_ethernet *priv = netdev_priv(dev); - - queues_per_port = cvmx_pko_get_num_queues(priv->port); - /* drain any pending packets in the free list */ - for (qos = 0; qos < queues_per_port; qos++) { - if (skb_queue_len(&priv->tx_free_list[qos]) == 0) - continue; - skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, - max_skb_to_free); - skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, - priv->fau + qos * 4); - total_freed += skb_to_free; - if (skb_to_free > 0) { - struct sk_buff *to_free_list = null; - - spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); - while (skb_to_free > 0) { - struct sk_buff *t; - - t = __skb_dequeue(&priv->tx_free_list[qos]); - t->next = to_free_list; - to_free_list = t; - skb_to_free--; - } - spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, - flags); - /* do the actual freeing outside of the lock. */ - while (to_free_list) { - struct sk_buff *t = to_free_list; - - to_free_list = to_free_list->next; - dev_kfree_skb_any(t); - } - } - total_remaining += skb_queue_len(&priv->tx_free_list[qos]); - } - if (total_remaining < max_out_queue_depth && netif_queue_stopped(dev)) - netif_wake_queue(dev); - if (total_remaining) - cvm_oct_kick_tx_poll_watchdog(); -} - -/** - * cvm_oct_xmit - transmit a packet - * @skb: packet to send - * @dev: device info structure - * - * returns always returns netdev_tx_ok - */ -int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) -{ - union cvmx_pko_command_word0 pko_command; - union cvmx_buf_ptr hw_buffer; - u64 old_scratch; - u64 old_scratch2; - int qos; - int i; - enum {queue_core, queue_hw, queue_drop} queue_type; - struct octeon_ethernet *priv = netdev_priv(dev); - struct sk_buff *to_free_list; - int skb_to_free; - int buffers_to_free; - u32 total_to_clean; - unsigned long flags; -#if reuse_skbuffs_without_free - unsigned char *fpa_head; -#endif - - /* - * prefetch the private data structure. it is larger than the - * one cache line. - */ - prefetch(priv); - - /* - * the check on cvmx_pko_queues_per_port_* is designed to - * completely remove "qos" in the event neither interface - * supports multiple queues per port. - */ - if ((cvmx_pko_queues_per_port_interface0 > 1) || - (cvmx_pko_queues_per_port_interface1 > 1)) { - qos = get_skbuff_qos(skb); - if (qos <= 0) - qos = 0; - else if (qos >= cvmx_pko_get_num_queues(priv->port)) - qos = 0; - } else { - qos = 0; - } - - if (use_async_iobdma) { - /* save scratch in case userspace is using it */ - cvmx_synciobdma; - old_scratch = cvmx_scratch_read64(cvmx_scr_scratch); - old_scratch2 = cvmx_scratch_read64(cvmx_scr_scratch + 8); - - /* - * fetch and increment the number of packets to be - * freed. - */ - cvmx_fau_async_fetch_and_add32(cvmx_scr_scratch + 8, - fau_num_packet_buffers_to_free, - 0); - cvmx_fau_async_fetch_and_add32(cvmx_scr_scratch, - priv->fau + qos * 4, - max_skb_to_free); - } - - /* - * we have space for 6 segment pointers, if there will be more - * than that, we must linearize. - */ - if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { - if (unlikely(__skb_linearize(skb))) { - queue_type = queue_drop; - if (use_async_iobdma) { - /* - * get the number of skbuffs in use - * by the hardware - */ - cvmx_synciobdma; - skb_to_free = - cvmx_scratch_read64(cvmx_scr_scratch); - } else { - /* - * get the number of skbuffs in use - * by the hardware - */ - skb_to_free = - cvmx_fau_fetch_and_add32(priv->fau + - qos * 4, - max_skb_to_free); - } - skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, - priv->fau + - qos * 4); - spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); - goto skip_xmit; - } - } - - /* - * the cn3xxx series of parts has an errata (gmx-401) which - * causes the gmx block to hang if a collision occurs towards - * the end of a <68 byte packet. as a workaround for this, we - * pad packets to be 68 bytes whenever we are in half duplex - * mode. we don't handle the case of having a small packet but - * no room to add the padding. the kernel should always give - * us at least a cache line - */ - if ((skb->len < 64) && octeon_is_model(octeon_cn3xxx)) { - union cvmx_gmxx_prtx_cfg gmx_prt_cfg; - int interface = interface(priv->port); - int index = index(priv->port); - - if (interface < 2) { - /* we only need to pad packet in half duplex mode */ - gmx_prt_cfg.u64 = - cvmx_read_csr(cvmx_gmxx_prtx_cfg(index, interface)); - if (gmx_prt_cfg.s.duplex == 0) { - int add_bytes = 64 - skb->len; - - if ((skb_tail_pointer(skb) + add_bytes) <= - skb_end_pointer(skb)) - __skb_put_zero(skb, add_bytes); - } - } - } - - /* build the pko command */ - pko_command.u64 = 0; -#ifdef __little_endian - pko_command.s.le = 1; -#endif - pko_command.s.n2 = 1; /* don't pollute l2 with the outgoing packet */ - pko_command.s.segs = 1; - pko_command.s.total_bytes = skb->len; - pko_command.s.size0 = cvmx_fau_op_size_32; - pko_command.s.subone0 = 1; - - pko_command.s.dontfree = 1; - - /* build the pko buffer pointer */ - hw_buffer.u64 = 0; - if (skb_shinfo(skb)->nr_frags == 0) { - hw_buffer.s.addr = xkphys_to_phys((uintptr_t)skb->data); - hw_buffer.s.pool = 0; - hw_buffer.s.size = skb->len; - } else { - hw_buffer.s.addr = xkphys_to_phys((uintptr_t)skb->data); - hw_buffer.s.pool = 0; - hw_buffer.s.size = skb_headlen(skb); - cvm_oct_skb_cb(skb)[0] = hw_buffer.u64; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *fs = skb_shinfo(skb)->frags + i; - - hw_buffer.s.addr = - xkphys_to_phys((uintptr_t)skb_frag_address(fs)); - hw_buffer.s.size = skb_frag_size(fs); - cvm_oct_skb_cb(skb)[i + 1] = hw_buffer.u64; - } - hw_buffer.s.addr = - xkphys_to_phys((uintptr_t)cvm_oct_skb_cb(skb)); - hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; - pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; - pko_command.s.gather = 1; - goto dont_put_skbuff_in_hw; - } - - /* - * see if we can put this skb in the fpa pool. any strange - * behavior from the linux networking stack will most likely - * be caused by a bug in the following code. if some field is - * in use by the network stack and gets carried over when a - * buffer is reused, bad things may happen. if in doubt and - * you dont need the absolute best performance, disable the - * define reuse_skbuffs_without_free. the reuse of buffers has - * shown a 25% increase in performance under some loads. - */ -#if reuse_skbuffs_without_free - fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); - if (unlikely(skb->data < fpa_head)) { - /* tx buffer beginning can't meet fpa alignment constraints */ - goto dont_put_skbuff_in_hw; - } - if (unlikely - ((skb_end_pointer(skb) - fpa_head) < cvmx_fpa_packet_pool_size)) { - /* tx buffer isn't large enough for the fpa */ - goto dont_put_skbuff_in_hw; - } - if (unlikely(skb_shared(skb))) { - /* tx buffer sharing data with someone else */ - goto dont_put_skbuff_in_hw; - } - if (unlikely(skb_cloned(skb))) { - /* tx buffer has been cloned */ - goto dont_put_skbuff_in_hw; - } - if (unlikely(skb_header_cloned(skb))) { - /* tx buffer header has been cloned */ - goto dont_put_skbuff_in_hw; - } - if (unlikely(skb->destructor)) { - /* tx buffer has a destructor */ - goto dont_put_skbuff_in_hw; - } - if (unlikely(skb_shinfo(skb)->nr_frags)) { - /* tx buffer has fragments */ - goto dont_put_skbuff_in_hw; - } - if (unlikely - (skb->truesize != - sizeof(*skb) + skb_end_offset(skb))) { - /* tx buffer truesize has been changed */ - goto dont_put_skbuff_in_hw; - } - - /* - * we can use this buffer in the fpa. we don't need the fau - * update anymore - */ - pko_command.s.dontfree = 0; - - hw_buffer.s.back = ((unsigned long)skb->data >> 7) - - ((unsigned long)fpa_head >> 7); - - *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; - - /* - * the skbuff will be reused without ever being freed. we must - * cleanup a bunch of core things. - */ - dst_release(skb_dst(skb)); - skb_dst_set(skb, null); - skb_ext_reset(skb); - nf_reset_ct(skb); - -#ifdef config_net_sched - skb->tc_index = 0; - skb_reset_tc(skb); -#endif /* config_net_sched */ -#endif /* reuse_skbuffs_without_free */ - -dont_put_skbuff_in_hw: - - /* check if we can use the hardware checksumming */ - if ((skb->protocol == htons(eth_p_ip)) && - (ip_hdr(skb)->version == 4) && - (ip_hdr(skb)->ihl == 5) && - ((ip_hdr(skb)->frag_off == 0) || - (ip_hdr(skb)->frag_off == htons(1 << 14))) && - ((ip_hdr(skb)->protocol == ipproto_tcp) || - (ip_hdr(skb)->protocol == ipproto_udp))) { - /* use hardware checksum calc */ - pko_command.s.ipoffp1 = skb_network_offset(skb) + 1; - } - - if (use_async_iobdma) { - /* get the number of skbuffs in use by the hardware */ - cvmx_synciobdma; - skb_to_free = cvmx_scratch_read64(cvmx_scr_scratch); - buffers_to_free = cvmx_scratch_read64(cvmx_scr_scratch + 8); - } else { - /* get the number of skbuffs in use by the hardware */ - skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, - max_skb_to_free); - buffers_to_free = - cvmx_fau_fetch_and_add32(fau_num_packet_buffers_to_free, 0); - } - - skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, - priv->fau + qos * 4); - - /* - * if we're sending faster than the receive can free them then - * don't do the hw free. - */ - if ((buffers_to_free < -100) && !pko_command.s.dontfree) - pko_command.s.dontfree = 1; - - if (pko_command.s.dontfree) { - queue_type = queue_core; - pko_command.s.reg0 = priv->fau + qos * 4; - } else { - queue_type = queue_hw; - } - if (use_async_iobdma) - cvmx_fau_async_fetch_and_add32(cvmx_scr_scratch, - fau_total_tx_to_clean, 1); - - spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); - - /* drop this packet if we have too many already queued to the hw */ - if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= - max_out_queue_depth)) { - if (dev->tx_queue_len != 0) { - /* drop the lock when notifying the core. */ - spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, - flags); - netif_stop_queue(dev); - spin_lock_irqsave(&priv->tx_free_list[qos].lock, - flags); - } else { - /* if not using normal queueing. */ - queue_type = queue_drop; - goto skip_xmit; - } - } - - cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, - cvmx_pko_lock_none); - - /* send the packet to the output queue */ - if (unlikely(cvmx_pko_send_packet_finish(priv->port, - priv->queue + qos, - pko_command, hw_buffer, - cvmx_pko_lock_none))) { - printk_ratelimited("%s: failed to send the packet ", - dev->name); - queue_type = queue_drop; - } -skip_xmit: - to_free_list = null; - - switch (queue_type) { - case queue_drop: - skb->next = to_free_list; - to_free_list = skb; - dev->stats.tx_dropped++; - break; - case queue_hw: - cvmx_fau_atomic_add32(fau_num_packet_buffers_to_free, -1); - break; - case queue_core: - __skb_queue_tail(&priv->tx_free_list[qos], skb); - break; - default: - bug(); - } - - while (skb_to_free > 0) { - struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); - - t->next = to_free_list; - to_free_list = t; - skb_to_free--; - } - - spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); - - /* do the actual freeing outside of the lock. */ - while (to_free_list) { - struct sk_buff *t = to_free_list; - - to_free_list = to_free_list->next; - dev_kfree_skb_any(t); - } - - if (use_async_iobdma) { - cvmx_synciobdma; - total_to_clean = cvmx_scratch_read64(cvmx_scr_scratch); - /* restore the scratch area */ - cvmx_scratch_write64(cvmx_scr_scratch, old_scratch); - cvmx_scratch_write64(cvmx_scr_scratch + 8, old_scratch2); - } else { - total_to_clean = - cvmx_fau_fetch_and_add32(fau_total_tx_to_clean, 1); - } - - if (total_to_clean & 0x3ff) { - /* - * schedule the cleanup tasklet every 1024 packets for - * the pathological case of high traffic on one port - * delaying clean up of packets on a different port - * that is blocked waiting for the cleanup. - */ - tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); - } - - cvm_oct_kick_tx_poll_watchdog(); - - return netdev_tx_ok; -} - -/** - * cvm_oct_xmit_pow - transmit a packet to the pow - * @skb: packet to send - * @dev: device info structure - - * returns always returns zero - */ -int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - void *packet_buffer; - void *copy_location; - - /* get a work queue entry */ - struct cvmx_wqe *work = cvmx_fpa_alloc(cvmx_fpa_wqe_pool); - - if (unlikely(!work)) { - printk_ratelimited("%s: failed to allocate a work queue entry ", - dev->name); - dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - return 0; - } - - /* get a packet buffer */ - packet_buffer = cvmx_fpa_alloc(cvmx_fpa_packet_pool); - if (unlikely(!packet_buffer)) { - printk_ratelimited("%s: failed to allocate a packet buffer ", - dev->name); - cvmx_fpa_free(work, cvmx_fpa_wqe_pool, 1); - dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - return 0; - } - - /* - * calculate where we need to copy the data to. we need to - * leave 8 bytes for a next pointer (unused). we also need to - * include any configure skip. then we need to align the ip - * packet src and dest into the same 64bit word. the below - * calculation may add a little extra, but that doesn't - * hurt. - */ - copy_location = packet_buffer + sizeof(u64); - copy_location += ((cvmx_helper_first_mbuff_skip + 7) & 0xfff8) + 6; - - /* - * we have to copy the packet since whoever processes this - * packet will free it to a hardware pool. we can't use the - * trick of counting outstanding packets like in - * cvm_oct_xmit. - */ - memcpy(copy_location, skb->data, skb->len); - - /* - * fill in some of the work queue fields. we may need to add - * more if the software at the other end needs them. - */ - if (!octeon_is_model(octeon_cn68xx)) - work->word0.pip.cn38xx.hw_chksum = skb->csum; - work->word1.len = skb->len; - cvmx_wqe_set_port(work, priv->port); - cvmx_wqe_set_qos(work, priv->port & 0x7); - cvmx_wqe_set_grp(work, pow_send_group); - work->word1.tag_type = cvmx_helper_input_tag_type; - work->word1.tag = pow_send_group; /* fixme */ - /* default to zero. sets of zero later are commented out */ - work->word2.u64 = 0; - work->word2.s.bufs = 1; - work->packet_ptr.u64 = 0; - work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); - work->packet_ptr.s.pool = cvmx_fpa_packet_pool; - work->packet_ptr.s.size = cvmx_fpa_packet_pool_size; - work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; - - if (skb->protocol == htons(eth_p_ip)) { - work->word2.s.ip_offset = 14; -#if 0 - work->word2.s.vlan_valid = 0; /* fixme */ - work->word2.s.vlan_cfi = 0; /* fixme */ - work->word2.s.vlan_id = 0; /* fixme */ - work->word2.s.dec_ipcomp = 0; /* fixme */ -#endif - work->word2.s.tcp_or_udp = - (ip_hdr(skb)->protocol == ipproto_tcp) || - (ip_hdr(skb)->protocol == ipproto_udp); -#if 0 - /* fixme */ - work->word2.s.dec_ipsec = 0; - /* we only support ipv4 right now */ - work->word2.s.is_v6 = 0; - /* hardware would set to zero */ - work->word2.s.software = 0; - /* no error, packet is internal */ - work->word2.s.l4_error = 0; -#endif - work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) || - (ip_hdr(skb)->frag_off == - cpu_to_be16(1 << 14))); -#if 0 - /* assume linux is sending a good packet */ - work->word2.s.ip_exc = 0; -#endif - work->word2.s.is_bcast = (skb->pkt_type == packet_broadcast); - work->word2.s.is_mcast = (skb->pkt_type == packet_multicast); -#if 0 - /* this is an ip packet */ - work->word2.s.not_ip = 0; - /* no error, packet is internal */ - work->word2.s.rcv_error = 0; - /* no error, packet is internal */ - work->word2.s.err_code = 0; -#endif - - /* - * when copying the data, include 4 bytes of the - * ethernet header to align the same way hardware - * does. - */ - memcpy(work->packet_data, skb->data + 10, - sizeof(work->packet_data)); - } else { -#if 0 - work->word2.snoip.vlan_valid = 0; /* fixme */ - work->word2.snoip.vlan_cfi = 0; /* fixme */ - work->word2.snoip.vlan_id = 0; /* fixme */ - work->word2.snoip.software = 0; /* hardware would set to zero */ -#endif - work->word2.snoip.is_rarp = skb->protocol == htons(eth_p_rarp); - work->word2.snoip.is_arp = skb->protocol == htons(eth_p_arp); - work->word2.snoip.is_bcast = - (skb->pkt_type == packet_broadcast); - work->word2.snoip.is_mcast = - (skb->pkt_type == packet_multicast); - work->word2.snoip.not_ip = 1; /* ip was done up above */ -#if 0 - /* no error, packet is internal */ - work->word2.snoip.rcv_error = 0; - /* no error, packet is internal */ - work->word2.snoip.err_code = 0; -#endif - memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); - } - - /* submit the packet to the pow */ - cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type, - cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work)); - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - dev_consume_skb_any(skb); - return 0; -} - -/** - * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for tx. - * @dev: device being shutdown - * - */ -void cvm_oct_tx_shutdown_dev(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - unsigned long flags; - int qos; - - for (qos = 0; qos < 16; qos++) { - spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); - while (skb_queue_len(&priv->tx_free_list[qos])) - dev_kfree_skb_any(__skb_dequeue - (&priv->tx_free_list[qos])); - spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); - } -} - -static void cvm_oct_tx_do_cleanup(unsigned long arg) -{ - int port; - - for (port = 0; port < total_number_of_ports; port++) { - if (cvm_oct_device[port]) { - struct net_device *dev = cvm_oct_device[port]; - - cvm_oct_free_tx_skbs(dev); - } - } -} - -static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id) -{ - /* disable the interrupt. */ - cvmx_write_csr(cvmx_ciu_timx(1), 0); - /* do the work in the tasklet. */ - tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); - return irq_handled; -} - -void cvm_oct_tx_initialize(void) -{ - int i; - - /* disable the interrupt. */ - cvmx_write_csr(cvmx_ciu_timx(1), 0); - /* register an irq handler to receive ciu_timx(1) interrupts */ - i = request_irq(octeon_irq_timer1, - cvm_oct_tx_cleanup_watchdog, 0, - "ethernet", cvm_oct_device); - - if (i) - panic("could not acquire ethernet irq %d ", octeon_irq_timer1); -} - -void cvm_oct_tx_shutdown(void) -{ - /* free the interrupt handler */ - free_irq(octeon_irq_timer1, cvm_oct_device); -} diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h --- a/drivers/staging/octeon/ethernet-tx.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev); -int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); -int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, - int do_free, int qos); -void cvm_oct_tx_initialize(void); -void cvm_oct_tx_shutdown(void); -void cvm_oct_tx_shutdown_dev(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h --- a/drivers/staging/octeon/ethernet-util.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -/** - * cvm_oct_get_buffer_ptr - convert packet data address to pointer - * @packet_ptr: packet data hardware address - * - * returns packet buffer pointer - */ -static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) -{ - return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back) - << 7); -} - -/** - * interface - convert ipd port to logical interface - * @ipd_port: port to check - * - * returns logical interface - */ -static inline int interface(int ipd_port) -{ - int interface; - - if (ipd_port == cvmx_pip_num_input_ports) - return 10; - interface = cvmx_helper_get_interface_num(ipd_port); - if (interface >= 0) - return interface; - panic("illegal ipd_port %d passed to %s ", ipd_port, __func__); -} - -/** - * index - convert ipd/pko port number to the port's interface index - * @ipd_port: port to check - * - * returns index into interface port list - */ -static inline int index(int ipd_port) -{ - return cvmx_helper_get_interface_index_num(ipd_port); -} diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c --- a/drivers/staging/octeon/ethernet.c +++ /dev/null -// spdx-license-identifier: gpl-2.0 -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2007 cavium networks - */ - -#include <linux/platform_device.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/phy.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/of_net.h> -#include <linux/if_ether.h> -#include <linux/if_vlan.h> - -#include <net/dst.h> - -#include "octeon-ethernet.h" -#include "ethernet-defines.h" -#include "ethernet-mem.h" -#include "ethernet-rx.h" -#include "ethernet-tx.h" -#include "ethernet-mdio.h" -#include "ethernet-util.h" - -#define octeon_max_mtu 65392 - -static int num_packet_buffers = 1024; -module_param(num_packet_buffers, int, 0444); -module_parm_desc(num_packet_buffers, " " - " number of packet buffers to allocate and store in the " - " fpa. by default, 1024 packet buffers are used. "); - -static int pow_receive_group = 15; -module_param(pow_receive_group, int, 0444); -module_parm_desc(pow_receive_group, " " - " pow group to receive packets from. all ethernet hardware " - " will be configured to send incoming packets to this pow " - " group. also any other software can submit packets to this " - " group for the kernel to process."); - -static int receive_group_order; -module_param(receive_group_order, int, 0444); -module_parm_desc(receive_group_order, " " - " order (0..4) of receive groups to take into use. ethernet hardware " - " will be configured to send incoming packets to multiple pow " - " groups. pow_receive_group parameter is ignored when multiple " - " groups are taken into use and groups are allocated starting " - " from 0. by default, a single group is used. "); - -int pow_send_group = -1; -module_param(pow_send_group, int, 0644); -module_parm_desc(pow_send_group, " " - " pow group to send packets to other software on. this " - " controls the creation of the virtual device pow0. " - " always_use_pow also depends on this value."); - -int always_use_pow; -module_param(always_use_pow, int, 0444); -module_parm_desc(always_use_pow, " " - " when set, always send to the pow group. this will cause " - " packets sent to real ethernet devices to be sent to the " - " pow group instead of the hardware. unless some other " - " application changes the config, packets will still be " - " received from the low level hardware. use this option " - " to allow a cvmx app to intercept all packets from the " - " linux kernel. you must specify pow_send_group along with " - " this option."); - -char pow_send_list[128] = ""; -module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444); -module_parm_desc(pow_send_list, " " - " comma separated list of ethernet devices that should use the " - " pow for transmit instead of the actual ethernet hardware. this " - " is a per port version of always_use_pow. always_use_pow takes " - " precedence over this list. for example, setting this to " - " "eth2,spi3,spi7" would cause these three devices to transmit " - " using the pow_send_group."); - -int rx_napi_weight = 32; -module_param(rx_napi_weight, int, 0444); -module_parm_desc(rx_napi_weight, "the napi weight parameter."); - -/* mask indicating which receive groups are in use. */ -int pow_receive_groups; - -/* - * cvm_oct_poll_queue_stopping - flag to indicate polling should stop. - * - * set to one right before cvm_oct_poll_queue is destroyed. - */ -atomic_t cvm_oct_poll_queue_stopping = atomic_init(0); - -/* - * array of every ethernet device owned by this driver indexed by - * the ipd input port number. - */ -struct net_device *cvm_oct_device[total_number_of_ports]; - -u64 cvm_oct_tx_poll_interval; - -static void cvm_oct_rx_refill_worker(struct work_struct *work); -static declare_delayed_work(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); - -static void cvm_oct_rx_refill_worker(struct work_struct *work) -{ - /* - * fpa 0 may have been drained, try to refill it if we need - * more than num_packet_buffers / 2, otherwise normal receive - * processing will refill it. if it were drained, no packets - * could be received so cvm_oct_napi_poll would never be - * invoked to do the refill. - */ - cvm_oct_rx_refill_pool(num_packet_buffers / 2); - - if (!atomic_read(&cvm_oct_poll_queue_stopping)) - schedule_delayed_work(&cvm_oct_rx_refill_work, hz); -} - -static void cvm_oct_periodic_worker(struct work_struct *work) -{ - struct octeon_ethernet *priv = container_of(work, - struct octeon_ethernet, - port_periodic_work.work); - - if (priv->poll) - priv->poll(cvm_oct_device[priv->port]); - - cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats - (cvm_oct_device[priv->port]); - - if (!atomic_read(&cvm_oct_poll_queue_stopping)) - schedule_delayed_work(&priv->port_periodic_work, hz); -} - -static void cvm_oct_configure_common_hw(void) -{ - /* setup the fpa */ - cvmx_fpa_enable(); - cvm_oct_mem_fill_fpa(cvmx_fpa_packet_pool, cvmx_fpa_packet_pool_size, - num_packet_buffers); - cvm_oct_mem_fill_fpa(cvmx_fpa_wqe_pool, cvmx_fpa_wqe_pool_size, - num_packet_buffers); - if (cvmx_fpa_output_buffer_pool != cvmx_fpa_packet_pool) - cvm_oct_mem_fill_fpa(cvmx_fpa_output_buffer_pool, - cvmx_fpa_output_buffer_pool_size, 1024); - -#ifdef __little_endian - { - union cvmx_ipd_ctl_status ipd_ctl_status; - - ipd_ctl_status.u64 = cvmx_read_csr(cvmx_ipd_ctl_status); - ipd_ctl_status.s.pkt_lend = 1; - ipd_ctl_status.s.wqe_lend = 1; - cvmx_write_csr(cvmx_ipd_ctl_status, ipd_ctl_status.u64); - } -#endif - - cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8); -} - -/** - * cvm_oct_free_work- free a work queue entry - * - * @work_queue_entry: work queue entry to free - * - * returns zero on success, negative on failure. - */ -int cvm_oct_free_work(void *work_queue_entry) -{ - struct cvmx_wqe *work = work_queue_entry; - - int segments = work->word2.s.bufs; - union cvmx_buf_ptr segment_ptr = work->packet_ptr; - - while (segments--) { - union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *) - cvmx_phys_to_ptr(segment_ptr.s.addr - 8); - if (unlikely(!segment_ptr.s.i)) - cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), - segment_ptr.s.pool, - cvmx_fpa_packet_pool_size / 128); - segment_ptr = next_ptr; - } - cvmx_fpa_free(work, cvmx_fpa_wqe_pool, 1); - - return 0; -} -export_symbol(cvm_oct_free_work); - -/** - * cvm_oct_common_get_stats - get the low level ethernet statistics - * @dev: device to get the statistics from - * - * returns pointer to the statistics - */ -static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) -{ - cvmx_pip_port_status_t rx_status; - cvmx_pko_port_status_t tx_status; - struct octeon_ethernet *priv = netdev_priv(dev); - - if (priv->port < cvmx_pip_num_input_ports) { - if (octeon_is_simulation()) { - /* the simulator doesn't support statistics */ - memset(&rx_status, 0, sizeof(rx_status)); - memset(&tx_status, 0, sizeof(tx_status)); - } else { - cvmx_pip_get_port_status(priv->port, 1, &rx_status); - cvmx_pko_get_port_status(priv->port, 1, &tx_status); - } - - dev->stats.rx_packets += rx_status.inb_packets; - dev->stats.tx_packets += tx_status.packets; - dev->stats.rx_bytes += rx_status.inb_octets; - dev->stats.tx_bytes += tx_status.octets; - dev->stats.multicast += rx_status.multicast_packets; - dev->stats.rx_crc_errors += rx_status.inb_errors; - dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets; - dev->stats.rx_dropped += rx_status.dropped_packets; - } - - return &dev->stats; -} - -/** - * cvm_oct_common_change_mtu - change the link mtu - * @dev: device to change - * @new_mtu: the new mtu - * - * returns zero on success - */ -static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - int interface = interface(priv->port); -#if is_enabled(config_vlan_8021q) - int vlan_bytes = vlan_hlen; -#else - int vlan_bytes = 0; -#endif - int mtu_overhead = eth_hlen + eth_fcs_len + vlan_bytes; - - dev->mtu = new_mtu; - - if ((interface < 2) && - (cvmx_helper_interface_get_mode(interface) != - cvmx_helper_interface_mode_spi)) { - int index = index(priv->port); - /* add ethernet header and fcs, and vlan if configured. */ - int max_packet = new_mtu + mtu_overhead; - - if (octeon_is_model(octeon_cn3xxx) || - octeon_is_model(octeon_cn58xx)) { - /* signal errors on packets larger than the mtu */ - cvmx_write_csr(cvmx_gmxx_rxx_frm_max(index, interface), - max_packet); - } else { - /* - * set the hardware to truncate packets larger - * than the mtu and smaller the 64 bytes. - */ - union cvmx_pip_frm_len_chkx frm_len_chk; - - frm_len_chk.u64 = 0; - frm_len_chk.s.minlen = vlan_eth_zlen; - frm_len_chk.s.maxlen = max_packet; - cvmx_write_csr(cvmx_pip_frm_len_chkx(interface), - frm_len_chk.u64); - } - /* - * set the hardware to truncate packets larger than - * the mtu. the jabber register must be set to a - * multiple of 8 bytes, so round up. - */ - cvmx_write_csr(cvmx_gmxx_rxx_jabber(index, interface), - (max_packet + 7) & ~7u); - } - return 0; -} - -/** - * cvm_oct_common_set_multicast_list - set the multicast list - * @dev: device to work on - */ -static void cvm_oct_common_set_multicast_list(struct net_device *dev) -{ - union cvmx_gmxx_prtx_cfg gmx_cfg; - struct octeon_ethernet *priv = netdev_priv(dev); - int interface = interface(priv->port); - - if ((interface < 2) && - (cvmx_helper_interface_get_mode(interface) != - cvmx_helper_interface_mode_spi)) { - union cvmx_gmxx_rxx_adr_ctl control; - int index = index(priv->port); - - control.u64 = 0; - control.s.bcst = 1; /* allow broadcast mac addresses */ - - if (!netdev_mc_empty(dev) || (dev->flags & iff_allmulti) || - (dev->flags & iff_promisc)) - /* force accept multicast packets */ - control.s.mcst = 2; - else - /* force reject multicast packets */ - control.s.mcst = 1; - - if (dev->flags & iff_promisc) - /* - * reject matches if promisc. since cam is - * shut off, should accept everything. - */ - control.s.cam_mode = 0; - else - /* filter packets based on the cam */ - control.s.cam_mode = 1; - - gmx_cfg.u64 = - cvmx_read_csr(cvmx_gmxx_prtx_cfg(index, interface)); - cvmx_write_csr(cvmx_gmxx_prtx_cfg(index, interface), - gmx_cfg.u64 & ~1ull); - - cvmx_write_csr(cvmx_gmxx_rxx_adr_ctl(index, interface), - control.u64); - if (dev->flags & iff_promisc) - cvmx_write_csr(cvmx_gmxx_rxx_adr_cam_en - (index, interface), 0); - else - cvmx_write_csr(cvmx_gmxx_rxx_adr_cam_en - (index, interface), 1); - - cvmx_write_csr(cvmx_gmxx_prtx_cfg(index, interface), - gmx_cfg.u64); - } -} - -static int cvm_oct_set_mac_filter(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - union cvmx_gmxx_prtx_cfg gmx_cfg; - int interface = interface(priv->port); - - if ((interface < 2) && - (cvmx_helper_interface_get_mode(interface) != - cvmx_helper_interface_mode_spi)) { - int i; - u8 *ptr = dev->dev_addr; - u64 mac = 0; - int index = index(priv->port); - - for (i = 0; i < 6; i++) - mac = (mac << 8) | (u64)ptr[i]; - - gmx_cfg.u64 = - cvmx_read_csr(cvmx_gmxx_prtx_cfg(index, interface)); - cvmx_write_csr(cvmx_gmxx_prtx_cfg(index, interface), - gmx_cfg.u64 & ~1ull); - - cvmx_write_csr(cvmx_gmxx_smacx(index, interface), mac); - cvmx_write_csr(cvmx_gmxx_rxx_adr_cam0(index, interface), - ptr[0]); - cvmx_write_csr(cvmx_gmxx_rxx_adr_cam1(index, interface), - ptr[1]); - cvmx_write_csr(cvmx_gmxx_rxx_adr_cam2(index, interface), - ptr[2]); - cvmx_write_csr(cvmx_gmxx_rxx_adr_cam3(index, interface), - ptr[3]); - cvmx_write_csr(cvmx_gmxx_rxx_adr_cam4(index, interface), - ptr[4]); - cvmx_write_csr(cvmx_gmxx_rxx_adr_cam5(index, interface), - ptr[5]); - cvm_oct_common_set_multicast_list(dev); - cvmx_write_csr(cvmx_gmxx_prtx_cfg(index, interface), - gmx_cfg.u64); - } - return 0; -} - -/** - * cvm_oct_common_set_mac_address - set the hardware mac address for a device - * @dev: the device in question. - * @addr: socket address. - * - * returns zero on success - */ -static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) -{ - int r = eth_mac_addr(dev, addr); - - if (r) - return r; - return cvm_oct_set_mac_filter(dev); -} - -/** - * cvm_oct_common_init - per network device initialization - * @dev: device to initialize - * - * returns zero on success - */ -int cvm_oct_common_init(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - const u8 *mac = null; - - if (priv->of_node) - mac = of_get_mac_address(priv->of_node); - - if (!is_err_or_null(mac)) - ether_addr_copy(dev->dev_addr, mac); - else - eth_hw_addr_random(dev); - - /* - * force the interface to use the pow send if always_use_pow - * was specified or it is in the pow send list. - */ - if ((pow_send_group != -1) && - (always_use_pow || strstr(pow_send_list, dev->name))) - priv->queue = -1; - - if (priv->queue != -1) - dev->features |= netif_f_sg | netif_f_ip_csum; - - /* we do our own locking, linux doesn't need to */ - dev->features |= netif_f_lltx; - dev->ethtool_ops = &cvm_oct_ethtool_ops; - - cvm_oct_set_mac_filter(dev); - dev_set_mtu(dev, dev->mtu); - - /* - * zero out stats for port so we won't mistakenly show - * counters from the bootloader. - */ - memset(dev->netdev_ops->ndo_get_stats(dev), 0, - sizeof(struct net_device_stats)); - - if (dev->netdev_ops->ndo_stop) - dev->netdev_ops->ndo_stop(dev); - - return 0; -} - -void cvm_oct_common_uninit(struct net_device *dev) -{ - if (dev->phydev) - phy_disconnect(dev->phydev); -} - -int cvm_oct_common_open(struct net_device *dev, - void (*link_poll)(struct net_device *)) -{ - union cvmx_gmxx_prtx_cfg gmx_cfg; - struct octeon_ethernet *priv = netdev_priv(dev); - int interface = interface(priv->port); - int index = index(priv->port); - union cvmx_helper_link_info link_info; - int rv; - - rv = cvm_oct_phy_setup_device(dev); - if (rv) - return rv; - - gmx_cfg.u64 = cvmx_read_csr(cvmx_gmxx_prtx_cfg(index, interface)); - gmx_cfg.s.en = 1; - if (octeon_has_feature(octeon_feature_pknd)) - gmx_cfg.s.pknd = priv->port; - cvmx_write_csr(cvmx_gmxx_prtx_cfg(index, interface), gmx_cfg.u64); - - if (octeon_is_simulation()) - return 0; - - if (dev->phydev) { - int r = phy_read_status(dev->phydev); - - if (r == 0 && dev->phydev->link == 0) - netif_carrier_off(dev); - cvm_oct_adjust_link(dev); - } else { - link_info = cvmx_helper_link_get(priv->port); - if (!link_info.s.link_up) - netif_carrier_off(dev); - priv->poll = link_poll; - link_poll(dev); - } - - return 0; -} - -void cvm_oct_link_poll(struct net_device *dev) -{ - struct octeon_ethernet *priv = netdev_priv(dev); - union cvmx_helper_link_info link_info; - - link_info = cvmx_helper_link_get(priv->port); - if (link_info.u64 == priv->link_info) - return; - - if (cvmx_helper_link_set(priv->port, link_info)) - link_info.u64 = priv->link_info; - else - priv->link_info = link_info.u64; - - if (link_info.s.link_up) { - if (!netif_carrier_ok(dev)) - netif_carrier_on(dev); - } else if (netif_carrier_ok(dev)) { - netif_carrier_off(dev); - } - cvm_oct_note_carrier(priv, link_info); -} - -static int cvm_oct_xaui_open(struct net_device *dev) -{ - return cvm_oct_common_open(dev, cvm_oct_link_poll); -} - -static const struct net_device_ops cvm_oct_npi_netdev_ops = { - .ndo_init = cvm_oct_common_init, - .ndo_uninit = cvm_oct_common_uninit, - .ndo_start_xmit = cvm_oct_xmit, - .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, - .ndo_set_mac_address = cvm_oct_common_set_mac_address, - .ndo_do_ioctl = cvm_oct_ioctl, - .ndo_change_mtu = cvm_oct_common_change_mtu, - .ndo_get_stats = cvm_oct_common_get_stats, -#ifdef config_net_poll_controller - .ndo_poll_controller = cvm_oct_poll_controller, -#endif -}; - -static const struct net_device_ops cvm_oct_xaui_netdev_ops = { - .ndo_init = cvm_oct_common_init, - .ndo_uninit = cvm_oct_common_uninit, - .ndo_open = cvm_oct_xaui_open, - .ndo_stop = cvm_oct_common_stop, - .ndo_start_xmit = cvm_oct_xmit, - .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, - .ndo_set_mac_address = cvm_oct_common_set_mac_address, - .ndo_do_ioctl = cvm_oct_ioctl, - .ndo_change_mtu = cvm_oct_common_change_mtu, - .ndo_get_stats = cvm_oct_common_get_stats, -#ifdef config_net_poll_controller - .ndo_poll_controller = cvm_oct_poll_controller, -#endif -}; - -static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { - .ndo_init = cvm_oct_sgmii_init, - .ndo_uninit = cvm_oct_common_uninit, - .ndo_open = cvm_oct_sgmii_open, - .ndo_stop = cvm_oct_common_stop, - .ndo_start_xmit = cvm_oct_xmit, - .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, - .ndo_set_mac_address = cvm_oct_common_set_mac_address, - .ndo_do_ioctl = cvm_oct_ioctl, - .ndo_change_mtu = cvm_oct_common_change_mtu, - .ndo_get_stats = cvm_oct_common_get_stats, -#ifdef config_net_poll_controller - .ndo_poll_controller = cvm_oct_poll_controller, -#endif -}; - -static const struct net_device_ops cvm_oct_spi_netdev_ops = { - .ndo_init = cvm_oct_spi_init, - .ndo_uninit = cvm_oct_spi_uninit, - .ndo_start_xmit = cvm_oct_xmit, - .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, - .ndo_set_mac_address = cvm_oct_common_set_mac_address, - .ndo_do_ioctl = cvm_oct_ioctl, - .ndo_change_mtu = cvm_oct_common_change_mtu, - .ndo_get_stats = cvm_oct_common_get_stats, -#ifdef config_net_poll_controller - .ndo_poll_controller = cvm_oct_poll_controller, -#endif -}; - -static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { - .ndo_init = cvm_oct_common_init, - .ndo_uninit = cvm_oct_common_uninit, - .ndo_open = cvm_oct_rgmii_open, - .ndo_stop = cvm_oct_common_stop, - .ndo_start_xmit = cvm_oct_xmit, - .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, - .ndo_set_mac_address = cvm_oct_common_set_mac_address, - .ndo_do_ioctl = cvm_oct_ioctl, - .ndo_change_mtu = cvm_oct_common_change_mtu, - .ndo_get_stats = cvm_oct_common_get_stats, -#ifdef config_net_poll_controller - .ndo_poll_controller = cvm_oct_poll_controller, -#endif -}; - -static const struct net_device_ops cvm_oct_pow_netdev_ops = { - .ndo_init = cvm_oct_common_init, - .ndo_start_xmit = cvm_oct_xmit_pow, - .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, - .ndo_set_mac_address = cvm_oct_common_set_mac_address, - .ndo_do_ioctl = cvm_oct_ioctl, - .ndo_change_mtu = cvm_oct_common_change_mtu, - .ndo_get_stats = cvm_oct_common_get_stats, -#ifdef config_net_poll_controller - .ndo_poll_controller = cvm_oct_poll_controller, -#endif -}; - -static struct device_node *cvm_oct_of_get_child - (const struct device_node *parent, int reg_val) -{ - struct device_node *node = null; - int size; - const __be32 *addr; - - for (;;) { - node = of_get_next_child(parent, node); - if (!node) - break; - addr = of_get_property(node, "reg", &size); - if (addr && (be32_to_cpu(*addr) == reg_val)) - break; - } - return node; -} - -static struct device_node *cvm_oct_node_for_port(struct device_node *pip, - int interface, int port) -{ - struct device_node *ni, *np; - - ni = cvm_oct_of_get_child(pip, interface); - if (!ni) - return null; - - np = cvm_oct_of_get_child(ni, port); - of_node_put(ni); - - return np; -} - -static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface, - int port) -{ - struct device_node *np = priv->of_node; - u32 delay_value; - bool rx_delay; - bool tx_delay; - - /* by default, both rx/tx delay is enabled in - * __cvmx_helper_rgmii_enable(). - */ - rx_delay = true; - tx_delay = true; - - if (!of_property_read_u32(np, "rx-delay", &delay_value)) { - cvmx_write_csr(cvmx_asxx_rx_clk_setx(port, iface), delay_value); - rx_delay = delay_value > 0; - } - if (!of_property_read_u32(np, "tx-delay", &delay_value)) { - cvmx_write_csr(cvmx_asxx_tx_clk_setx(port, iface), delay_value); - tx_delay = delay_value > 0; - } - - if (!rx_delay && !tx_delay) - priv->phy_mode = phy_interface_mode_rgmii_id; - else if (!rx_delay) - priv->phy_mode = phy_interface_mode_rgmii_rxid; - else if (!tx_delay) - priv->phy_mode = phy_interface_mode_rgmii_txid; - else - priv->phy_mode = phy_interface_mode_rgmii; -} - -static int cvm_oct_probe(struct platform_device *pdev) -{ - int num_interfaces; - int interface; - int fau = fau_num_packet_buffers_to_free; - int qos; - struct device_node *pip; - int mtu_overhead = eth_hlen + eth_fcs_len; - -#if is_enabled(config_vlan_8021q) - mtu_overhead += vlan_hlen; -#endif - - octeon_mdiobus_force_mod_depencency(); - - pip = pdev->dev.of_node; - if (!pip) { - pr_err("error: no 'pip' in /aliases "); - return -einval; - } - - cvm_oct_configure_common_hw(); - - cvmx_helper_initialize_packet_io_global(); - - if (receive_group_order) { - if (receive_group_order > 4) - receive_group_order = 4; - pow_receive_groups = (1 << (1 << receive_group_order)) - 1; - } else { - pow_receive_groups = bit(pow_receive_group); - } - - /* change the input group for all ports before input is enabled */ - num_interfaces = cvmx_helper_get_number_of_interfaces(); - for (interface = 0; interface < num_interfaces; interface++) { - int num_ports = cvmx_helper_ports_on_interface(interface); - int port; - - for (port = cvmx_helper_get_ipd_port(interface, 0); - port < cvmx_helper_get_ipd_port(interface, num_ports); - port++) { - union cvmx_pip_prt_tagx pip_prt_tagx; - - pip_prt_tagx.u64 = - cvmx_read_csr(cvmx_pip_prt_tagx(port)); - - if (receive_group_order) { - int tag_mask; - - /* we support only 16 groups at the moment, so - * always disable the two additional "hidden" - * tag_mask bits on cn68xx. - */ - if (octeon_is_model(octeon_cn68xx)) - pip_prt_tagx.u64 |= 0x3ull << 44; - - tag_mask = ~((1 << receive_group_order) - 1); - pip_prt_tagx.s.grptagbase = 0; - pip_prt_tagx.s.grptagmask = tag_mask; - pip_prt_tagx.s.grptag = 1; - pip_prt_tagx.s.tag_mode = 0; - pip_prt_tagx.s.inc_prt_flag = 1; - pip_prt_tagx.s.ip6_dprt_flag = 1; - pip_prt_tagx.s.ip4_dprt_flag = 1; - pip_prt_tagx.s.ip6_sprt_flag = 1; - pip_prt_tagx.s.ip4_sprt_flag = 1; - pip_prt_tagx.s.ip6_dst_flag = 1; - pip_prt_tagx.s.ip4_dst_flag = 1; - pip_prt_tagx.s.ip6_src_flag = 1; - pip_prt_tagx.s.ip4_src_flag = 1; - pip_prt_tagx.s.grp = 0; - } else { - pip_prt_tagx.s.grptag = 0; - pip_prt_tagx.s.grp = pow_receive_group; - } - - cvmx_write_csr(cvmx_pip_prt_tagx(port), - pip_prt_tagx.u64); - } - } - - cvmx_helper_ipd_and_packet_input_enable(); - - memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); - - /* - * initialize the fau used for counting packet buffers that - * need to be freed. - */ - cvmx_fau_atomic_write32(fau_num_packet_buffers_to_free, 0); - - /* initialize the fau used for counting tx skbs that need to be freed */ - cvmx_fau_atomic_write32(fau_total_tx_to_clean, 0); - - if ((pow_send_group != -1)) { - struct net_device *dev; - - dev = alloc_etherdev(sizeof(struct octeon_ethernet)); - if (dev) { - /* initialize the device private structure. */ - struct octeon_ethernet *priv = netdev_priv(dev); - - set_netdev_dev(dev, &pdev->dev); - dev->netdev_ops = &cvm_oct_pow_netdev_ops; - priv->imode = cvmx_helper_interface_mode_disabled; - priv->port = cvmx_pip_num_input_ports; - priv->queue = -1; - strscpy(dev->name, "pow%d", sizeof(dev->name)); - for (qos = 0; qos < 16; qos++) - skb_queue_head_init(&priv->tx_free_list[qos]); - dev->min_mtu = vlan_eth_zlen - mtu_overhead; - dev->max_mtu = octeon_max_mtu - mtu_overhead; - - if (register_netdev(dev) < 0) { - pr_err("failed to register ethernet device for pow "); - free_netdev(dev); - } else { - cvm_oct_device[cvmx_pip_num_input_ports] = dev; - pr_info("%s: pow send group %d, receive group %d ", - dev->name, pow_send_group, - pow_receive_group); - } - } else { - pr_err("failed to allocate ethernet device for pow "); - } - } - - num_interfaces = cvmx_helper_get_number_of_interfaces(); - for (interface = 0; interface < num_interfaces; interface++) { - cvmx_helper_interface_mode_t imode = - cvmx_helper_interface_get_mode(interface); - int num_ports = cvmx_helper_ports_on_interface(interface); - int port; - int port_index; - - for (port_index = 0, - port = cvmx_helper_get_ipd_port(interface, 0); - port < cvmx_helper_get_ipd_port(interface, num_ports); - port_index++, port++) { - struct octeon_ethernet *priv; - struct net_device *dev = - alloc_etherdev(sizeof(struct octeon_ethernet)); - if (!dev) { - pr_err("failed to allocate ethernet device for port %d ", - port); - continue; - } - - /* initialize the device private structure. */ - set_netdev_dev(dev, &pdev->dev); - priv = netdev_priv(dev); - priv->netdev = dev; - priv->of_node = cvm_oct_node_for_port(pip, interface, - port_index); - - init_delayed_work(&priv->port_periodic_work, - cvm_oct_periodic_worker); - priv->imode = imode; - priv->port = port; - priv->queue = cvmx_pko_get_base_queue(priv->port); - priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; - priv->phy_mode = phy_interface_mode_na; - for (qos = 0; qos < 16; qos++) - skb_queue_head_init(&priv->tx_free_list[qos]); - for (qos = 0; qos < cvmx_pko_get_num_queues(port); - qos++) - cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); - dev->min_mtu = vlan_eth_zlen - mtu_overhead; - dev->max_mtu = octeon_max_mtu - mtu_overhead; - - switch (priv->imode) { - /* these types don't support ports to ipd/pko */ - case cvmx_helper_interface_mode_disabled: - case cvmx_helper_interface_mode_pcie: - case cvmx_helper_interface_mode_picmg: - break; - - case cvmx_helper_interface_mode_npi: - dev->netdev_ops = &cvm_oct_npi_netdev_ops; - strscpy(dev->name, "npi%d", sizeof(dev->name)); - break; - - case cvmx_helper_interface_mode_xaui: - dev->netdev_ops = &cvm_oct_xaui_netdev_ops; - strscpy(dev->name, "xaui%d", sizeof(dev->name)); - break; - - case cvmx_helper_interface_mode_loop: - dev->netdev_ops = &cvm_oct_npi_netdev_ops; - strscpy(dev->name, "loop%d", sizeof(dev->name)); - break; - - case cvmx_helper_interface_mode_sgmii: - priv->phy_mode = phy_interface_mode_sgmii; - dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; - strscpy(dev->name, "eth%d", sizeof(dev->name)); - break; - - case cvmx_helper_interface_mode_spi: - dev->netdev_ops = &cvm_oct_spi_netdev_ops; - strscpy(dev->name, "spi%d", sizeof(dev->name)); - break; - - case cvmx_helper_interface_mode_gmii: - priv->phy_mode = phy_interface_mode_gmii; - dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; - strscpy(dev->name, "eth%d", sizeof(dev->name)); - break; - - case cvmx_helper_interface_mode_rgmii: - dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; - strscpy(dev->name, "eth%d", sizeof(dev->name)); - cvm_set_rgmii_delay(priv, interface, - port_index); - break; - } - - if (!dev->netdev_ops) { - free_netdev(dev); - } else if (register_netdev(dev) < 0) { - pr_err("failed to register ethernet device for interface %d, port %d ", - interface, priv->port); - free_netdev(dev); - } else { - cvm_oct_device[priv->port] = dev; - fau -= - cvmx_pko_get_num_queues(priv->port) * - sizeof(u32); - schedule_delayed_work(&priv->port_periodic_work, - hz); - } - } - } - - cvm_oct_tx_initialize(); - cvm_oct_rx_initialize(); - - /* - * 150 us: about 10 1500-byte packets at 1ge. - */ - cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); - - schedule_delayed_work(&cvm_oct_rx_refill_work, hz); - - return 0; -} - -static int cvm_oct_remove(struct platform_device *pdev) -{ - int port; - - cvmx_ipd_disable(); - - atomic_inc_return(&cvm_oct_poll_queue_stopping); - cancel_delayed_work_sync(&cvm_oct_rx_refill_work); - - cvm_oct_rx_shutdown(); - cvm_oct_tx_shutdown(); - - cvmx_pko_disable(); - - /* free the ethernet devices */ - for (port = 0; port < total_number_of_ports; port++) { - if (cvm_oct_device[port]) { - struct net_device *dev = cvm_oct_device[port]; - struct octeon_ethernet *priv = netdev_priv(dev); - - cancel_delayed_work_sync(&priv->port_periodic_work); - - cvm_oct_tx_shutdown_dev(dev); - unregister_netdev(dev); - free_netdev(dev); - cvm_oct_device[port] = null; - } - } - - cvmx_pko_shutdown(); - - cvmx_ipd_free_ptr(); - - /* free the hw pools */ - cvm_oct_mem_empty_fpa(cvmx_fpa_packet_pool, cvmx_fpa_packet_pool_size, - num_packet_buffers); - cvm_oct_mem_empty_fpa(cvmx_fpa_wqe_pool, cvmx_fpa_wqe_pool_size, - num_packet_buffers); - if (cvmx_fpa_output_buffer_pool != cvmx_fpa_packet_pool) - cvm_oct_mem_empty_fpa(cvmx_fpa_output_buffer_pool, - cvmx_fpa_output_buffer_pool_size, 128); - return 0; -} - -static const struct of_device_id cvm_oct_match[] = { - { - .compatible = "cavium,octeon-3860-pip", - }, - {}, -}; -module_device_table(of, cvm_oct_match); - -static struct platform_driver cvm_oct_driver = { - .probe = cvm_oct_probe, - .remove = cvm_oct_remove, - .driver = { - .name = kbuild_modname, - .of_match_table = cvm_oct_match, - }, -}; - -module_platform_driver(cvm_oct_driver); - -module_license("gpl"); -module_author("cavium networks <support@caviumnetworks.com>"); -module_description("cavium networks octeon ethernet driver."); diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h --- a/drivers/staging/octeon/octeon-ethernet.h +++ /dev/null -/* spdx-license-identifier: gpl-2.0 */ -/* - * this file is based on code from octeon sdk by cavium networks. - * - * copyright (c) 2003-2010 cavium networks - */ - -/* - * external interface for the cavium octeon ethernet driver. - */ -#ifndef octeon_ethernet_h -#define octeon_ethernet_h - -#include <linux/of.h> -#include <linux/phy.h> - -#ifdef config_cavium_octeon_soc - -#include <asm/octeon/octeon.h> - -#include <asm/octeon/cvmx-asxx-defs.h> -#include <asm/octeon/cvmx-config.h> -#include <asm/octeon/cvmx-fau.h> -#include <asm/octeon/cvmx-gmxx-defs.h> -#include <asm/octeon/cvmx-helper.h> -#include <asm/octeon/cvmx-helper-util.h> -#include <asm/octeon/cvmx-ipd.h> -#include <asm/octeon/cvmx-ipd-defs.h> -#include <asm/octeon/cvmx-npi-defs.h> -#include <asm/octeon/cvmx-pip.h> -#include <asm/octeon/cvmx-pko.h> -#include <asm/octeon/cvmx-pow.h> -#include <asm/octeon/cvmx-scratch.h> -#include <asm/octeon/cvmx-spi.h> -#include <asm/octeon/cvmx-spxx-defs.h> -#include <asm/octeon/cvmx-stxx-defs.h> -#include <asm/octeon/cvmx-wqe.h> - -#else - -#include "octeon-stubs.h" - -#endif - -/** - * this is the definition of the ethernet driver's private - * driver state stored in netdev_priv(dev). - */ -struct octeon_ethernet { - /* pko hardware output port */ - int port; - /* pko hardware queue for the port */ - int queue; - /* hardware fetch and add to count outstanding tx buffers */ - int fau; - /* my netdev. */ - struct net_device *netdev; - /* - * type of port. this is one of the enums in - * cvmx_helper_interface_mode_t - */ - int imode; - /* phy mode */ - phy_interface_t phy_mode; - /* list of outstanding tx buffers per queue */ - struct sk_buff_head tx_free_list[16]; - unsigned int last_speed; - unsigned int last_link; - /* last negotiated link state */ - u64 link_info; - /* called periodically to check link status */ - void (*poll)(struct net_device *dev); - struct delayed_work port_periodic_work; - struct device_node *of_node; -}; - -int cvm_oct_free_work(void *work_queue_entry); - -int cvm_oct_rgmii_open(struct net_device *dev); - -int cvm_oct_sgmii_init(struct net_device *dev); -int cvm_oct_sgmii_open(struct net_device *dev); - -int cvm_oct_spi_init(struct net_device *dev); -void cvm_oct_spi_uninit(struct net_device *dev); - -int cvm_oct_common_init(struct net_device *dev); -void cvm_oct_common_uninit(struct net_device *dev); -void cvm_oct_adjust_link(struct net_device *dev); -int cvm_oct_common_stop(struct net_device *dev); -int cvm_oct_common_open(struct net_device *dev, - void (*link_poll)(struct net_device *)); -void cvm_oct_note_carrier(struct octeon_ethernet *priv, - union cvmx_helper_link_info li); -void cvm_oct_link_poll(struct net_device *dev); - -extern int always_use_pow; -extern int pow_send_group; -extern int pow_receive_groups; -extern char pow_send_list[]; -extern struct net_device *cvm_oct_device[]; -extern atomic_t cvm_oct_poll_queue_stopping; -extern u64 cvm_oct_tx_poll_interval; - -extern int rx_napi_weight; - -#endif diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h --- a/drivers/staging/octeon/octeon-stubs.h +++ /dev/null -#define config_cavium_octeon_cvmseg_size 512 - -#ifndef xkphys_to_phys -# define xkphys_to_phys(p) (p) -#endif - -#define octeon_irq_workq0 0 -#define octeon_irq_rml 0 -#define octeon_irq_timer1 0 -#define octeon_is_model(x) 0 -#define octeon_has_feature(x) 0 -#define octeon_get_clock_rate() 0 - -#define cvmx_synciobdma do { } while(0) - -#define cvmx_helper_input_tag_type 0 -#define cvmx_helper_first_mbuff_skip 7 -#define cvmx_fau_reg_end (2048) -#define cvmx_fpa_output_buffer_pool (2) -#define cvmx_fpa_output_buffer_pool_size 16 -#define cvmx_fpa_packet_pool (0) -#define cvmx_fpa_packet_pool_size 16 -#define cvmx_fpa_wqe_pool (1) -#define cvmx_fpa_wqe_pool_size 16 -#define cvmx_gmxx_rxx_adr_cam_en(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_adr_ctl(a, b) ((a)+(b)) -#define cvmx_gmxx_prtx_cfg(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_frm_max(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_jabber(a, b) ((a)+(b)) -#define cvmx_ipd_ctl_status 0 -#define cvmx_pip_frm_len_chkx(a) (a) -#define cvmx_pip_num_input_ports 1 -#define cvmx_scr_scratch 0 -#define cvmx_pko_queues_per_port_interface0 2 -#define cvmx_pko_queues_per_port_interface1 2 -#define cvmx_ipd_sub_port_fcs 0 -#define cvmx_sso_wq_iq_dis 0 -#define cvmx_sso_wq_int 0 -#define cvmx_pow_wq_int 0 -#define cvmx_sso_wq_int_pc 0 -#define cvmx_npi_rsl_int_blocks 0 -#define cvmx_pow_wq_int_pc 0 - -union cvmx_pip_wqe_word2 { - uint64_t u64; - struct { - uint64_t bufs:8; - uint64_t ip_offset:8; - uint64_t vlan_valid:1; - uint64_t vlan_stacked:1; - uint64_t unassigned:1; - uint64_t vlan_cfi:1; - uint64_t vlan_id:12; - uint64_t pr:4; - uint64_t unassigned2:8; - uint64_t dec_ipcomp:1; - uint64_t tcp_or_udp:1; - uint64_t dec_ipsec:1; - uint64_t is_v6:1; - uint64_t software:1; - uint64_t l4_error:1; - uint64_t is_frag:1; - uint64_t ip_exc:1; - uint64_t is_bcast:1; - uint64_t is_mcast:1; - uint64_t not_ip:1; - uint64_t rcv_error:1; - uint64_t err_code:8; - } s; - struct { - uint64_t bufs:8; - uint64_t ip_offset:8; - uint64_t vlan_valid:1; - uint64_t vlan_stacked:1; - uint64_t unassigned:1; - uint64_t vlan_cfi:1; - uint64_t vlan_id:12; - uint64_t port:12; - uint64_t dec_ipcomp:1; - uint64_t tcp_or_udp:1; - uint64_t dec_ipsec:1; - uint64_t is_v6:1; - uint64_t software:1; - uint64_t l4_error:1; - uint64_t is_frag:1; - uint64_t ip_exc:1; - uint64_t is_bcast:1; - uint64_t is_mcast:1; - uint64_t not_ip:1; - uint64_t rcv_error:1; - uint64_t err_code:8; - } s_cn68xx; - - struct { - uint64_t unused1:16; - uint64_t vlan:16; - uint64_t unused2:32; - } svlan; - struct { - uint64_t bufs:8; - uint64_t unused:8; - uint64_t vlan_valid:1; - uint64_t vlan_stacked:1; - uint64_t unassigned:1; - uint64_t vlan_cfi:1; - uint64_t vlan_id:12; - uint64_t pr:4; - uint64_t unassigned2:12; - uint64_t software:1; - uint64_t unassigned3:1; - uint64_t is_rarp:1; - uint64_t is_arp:1; - uint64_t is_bcast:1; - uint64_t is_mcast:1; - uint64_t not_ip:1; - uint64_t rcv_error:1; - uint64_t err_code:8; - } snoip; - -}; - -union cvmx_pip_wqe_word0 { - struct { - uint64_t next_ptr:40; - uint8_t unused; - __wsum hw_chksum; - } cn38xx; - struct { - uint64_t pknd:6; /* 0..5 */ - uint64_t unused2:2; /* 6..7 */ - uint64_t bpid:6; /* 8..13 */ - uint64_t unused1:18; /* 14..31 */ - uint64_t l2ptr:8; /* 32..39 */ - uint64_t l3ptr:8; /* 40..47 */ - uint64_t unused0:8; /* 48..55 */ - uint64_t l4ptr:8; /* 56..63 */ - } cn68xx; -}; - -union cvmx_wqe_word0 { - uint64_t u64; - union cvmx_pip_wqe_word0 pip; -}; - -union cvmx_wqe_word1 { - uint64_t u64; - struct { - uint64_t tag:32; - uint64_t tag_type:2; - uint64_t varies:14; - uint64_t len:16; - }; - struct { - uint64_t tag:32; - uint64_t tag_type:2; - uint64_t zero_2:3; - uint64_t grp:6; - uint64_t zero_1:1; - uint64_t qos:3; - uint64_t zero_0:1; - uint64_t len:16; - } cn68xx; - struct { - uint64_t tag:32; - uint64_t tag_type:2; - uint64_t zero_2:1; - uint64_t grp:4; - uint64_t qos:3; - uint64_t ipprt:6; - uint64_t len:16; - } cn38xx; -}; - -union cvmx_buf_ptr { - void *ptr; - uint64_t u64; - struct { - uint64_t i:1; - uint64_t back:4; - uint64_t pool:3; - uint64_t size:16; - uint64_t addr:40; - } s; -}; - -struct cvmx_wqe { - union cvmx_wqe_word0 word0; - union cvmx_wqe_word1 word1; - union cvmx_pip_wqe_word2 word2; - union cvmx_buf_ptr packet_ptr; - uint8_t packet_data[96]; -}; - -union cvmx_helper_link_info { - uint64_t u64; - struct { - uint64_t reserved_20_63:44; - uint64_t link_up:1; /**< is the physical link up? */ - uint64_t full_duplex:1; /**< 1 if the link is full duplex */ - uint64_t speed:18; /**< speed of the link in mbps */ - } s; -}; - -enum cvmx_fau_reg_32 { - cvmx_fau_reg_32_start = 0, -}; - -enum cvmx_fau_op_size { - cvmx_fau_op_size_8 = 0, - cvmx_fau_op_size_16 = 1, - cvmx_fau_op_size_32 = 2, - cvmx_fau_op_size_64 = 3 -}; - -typedef enum { - cvmx_spi_mode_unknown = 0, - cvmx_spi_mode_tx_halfplex = 1, - cvmx_spi_mode_rx_halfplex = 2, - cvmx_spi_mode_duplex = 3 -} cvmx_spi_mode_t; - -typedef enum { - cvmx_helper_interface_mode_disabled, - cvmx_helper_interface_mode_rgmii, - cvmx_helper_interface_mode_gmii, - cvmx_helper_interface_mode_spi, - cvmx_helper_interface_mode_pcie, - cvmx_helper_interface_mode_xaui, - cvmx_helper_interface_mode_sgmii, - cvmx_helper_interface_mode_picmg, - cvmx_helper_interface_mode_npi, - cvmx_helper_interface_mode_loop, -} cvmx_helper_interface_mode_t; - -typedef enum { - cvmx_pow_wait = 1, - cvmx_pow_no_wait = 0, -} cvmx_pow_wait_t; - -typedef enum { - cvmx_pko_lock_none = 0, - cvmx_pko_lock_atomic_tag = 1, - cvmx_pko_lock_cmd_queue = 2, -} cvmx_pko_lock_t; - -typedef enum { - cvmx_pko_success, - cvmx_pko_invalid_port, - cvmx_pko_invalid_queue, - cvmx_pko_invalid_priority, - cvmx_pko_no_memory, - cvmx_pko_port_already_setup, - cvmx_pko_cmd_queue_init_error -} cvmx_pko_status_t; - -enum cvmx_pow_tag_type { - cvmx_pow_tag_type_ordered = 0l, - cvmx_pow_tag_type_atomic = 1l, - cvmx_pow_tag_type_null = 2l, - cvmx_pow_tag_type_null_null = 3l -}; - -union cvmx_ipd_ctl_status { - uint64_t u64; - struct cvmx_ipd_ctl_status_s { - uint64_t reserved_18_63:46; - uint64_t use_sop:1; - uint64_t rst_done:1; - uint64_t clken:1; - uint64_t no_wptr:1; - uint64_t pq_apkt:1; - uint64_t pq_nabuf:1; - uint64_t ipd_full:1; - uint64_t pkt_off:1; - uint64_t len_m8:1; - uint64_t reset:1; - uint64_t addpkt:1; - uint64_t naddbuf:1; - uint64_t pkt_lend:1; - uint64_t wqe_lend:1; - uint64_t pbp_en:1; - uint64_t opc_mode:2; - uint64_t ipd_en:1; - } s; - struct cvmx_ipd_ctl_status_cn30xx { - uint64_t reserved_10_63:54; - uint64_t len_m8:1; - uint64_t reset:1; - uint64_t addpkt:1; - uint64_t naddbuf:1; - uint64_t pkt_lend:1; - uint64_t wqe_lend:1; - uint64_t pbp_en:1; - uint64_t opc_mode:2; - uint64_t ipd_en:1; - } cn30xx; - struct cvmx_ipd_ctl_status_cn38xxp2 { - uint64_t reserved_9_63:55; - uint64_t reset:1; - uint64_t addpkt:1; - uint64_t naddbuf:1; - uint64_t pkt_lend:1; - uint64_t wqe_lend:1; - uint64_t pbp_en:1; - uint64_t opc_mode:2; - uint64_t ipd_en:1; - } cn38xxp2; - struct cvmx_ipd_ctl_status_cn50xx { - uint64_t reserved_15_63:49; - uint64_t no_wptr:1; - uint64_t pq_apkt:1; - uint64_t pq_nabuf:1; - uint64_t ipd_full:1; - uint64_t pkt_off:1; - uint64_t len_m8:1; - uint64_t reset:1; - uint64_t addpkt:1; - uint64_t naddbuf:1; - uint64_t pkt_lend:1; - uint64_t wqe_lend:1; - uint64_t pbp_en:1; - uint64_t opc_mode:2; - uint64_t ipd_en:1; - } cn50xx; - struct cvmx_ipd_ctl_status_cn58xx { - uint64_t reserved_12_63:52; - uint64_t ipd_full:1; - uint64_t pkt_off:1; - uint64_t len_m8:1; - uint64_t reset:1; - uint64_t addpkt:1; - uint64_t naddbuf:1; - uint64_t pkt_lend:1; - uint64_t wqe_lend:1; - uint64_t pbp_en:1; - uint64_t opc_mode:2; - uint64_t ipd_en:1; - } cn58xx; - struct cvmx_ipd_ctl_status_cn63xxp1 { - uint64_t reserved_16_63:48; - uint64_t clken:1; - uint64_t no_wptr:1; - uint64_t pq_apkt:1; - uint64_t pq_nabuf:1; - uint64_t ipd_full:1; - uint64_t pkt_off:1; - uint64_t len_m8:1; - uint64_t reset:1; - uint64_t addpkt:1; - uint64_t naddbuf:1; - uint64_t pkt_lend:1; - uint64_t wqe_lend:1; - uint64_t pbp_en:1; - uint64_t opc_mode:2; - uint64_t ipd_en:1; - } cn63xxp1; -}; - -union cvmx_ipd_sub_port_fcs { - uint64_t u64; - struct cvmx_ipd_sub_port_fcs_s { - uint64_t port_bit:32; - uint64_t reserved_32_35:4; - uint64_t port_bit2:4; - uint64_t reserved_40_63:24; - } s; - struct cvmx_ipd_sub_port_fcs_cn30xx { - uint64_t port_bit:3; - uint64_t reserved_3_63:61; - } cn30xx; - struct cvmx_ipd_sub_port_fcs_cn38xx { - uint64_t port_bit:32; - uint64_t reserved_32_63:32; - } cn38xx; -}; - -union cvmx_ipd_sub_port_qos_cnt { - uint64_t u64; - struct cvmx_ipd_sub_port_qos_cnt_s { - uint64_t cnt:32; - uint64_t port_qos:9; - uint64_t reserved_41_63:23; - } s; -}; -typedef struct { - uint32_t dropped_octets; - uint32_t dropped_packets; - uint32_t pci_raw_packets; - uint32_t octets; - uint32_t packets; - uint32_t multicast_packets; - uint32_t broadcast_packets; - uint32_t len_64_packets; - uint32_t len_65_127_packets; - uint32_t len_128_255_packets; - uint32_t len_256_511_packets; - uint32_t len_512_1023_packets; - uint32_t len_1024_1518_packets; - uint32_t len_1519_max_packets; - uint32_t fcs_align_err_packets; - uint32_t runt_packets; - uint32_t runt_crc_packets; - uint32_t oversize_packets; - uint32_t oversize_crc_packets; - uint32_t inb_packets; - uint64_t inb_octets; - uint16_t inb_errors; -} cvmx_pip_port_status_t; - -typedef struct { - uint32_t packets; - uint64_t octets; - uint64_t doorbell; -} cvmx_pko_port_status_t; - -union cvmx_pip_frm_len_chkx { - uint64_t u64; - struct cvmx_pip_frm_len_chkx_s { - uint64_t reserved_32_63:32; - uint64_t maxlen:16; - uint64_t minlen:16; - } s; -}; - -union cvmx_gmxx_rxx_frm_ctl { - uint64_t u64; - struct cvmx_gmxx_rxx_frm_ctl_s { - uint64_t pre_chk:1; - uint64_t pre_strp:1; - uint64_t ctl_drp:1; - uint64_t ctl_bck:1; - uint64_t ctl_mcst:1; - uint64_t ctl_smac:1; - uint64_t pre_free:1; - uint64_t vlan_len:1; - uint64_t pad_len:1; - uint64_t pre_align:1; - uint64_t null_dis:1; - uint64_t reserved_11_11:1; - uint64_t ptp_mode:1; - uint64_t reserved_13_63:51; - } s; - struct cvmx_gmxx_rxx_frm_ctl_cn30xx { - uint64_t pre_chk:1; - uint64_t pre_strp:1; - uint64_t ctl_drp:1; - uint64_t ctl_bck:1; - uint64_t ctl_mcst:1; - uint64_t ctl_smac:1; - uint64_t pre_free:1; - uint64_t vlan_len:1; - uint64_t pad_len:1; - uint64_t reserved_9_63:55; - } cn30xx; - struct cvmx_gmxx_rxx_frm_ctl_cn31xx { - uint64_t pre_chk:1; - uint64_t pre_strp:1; - uint64_t ctl_drp:1; - uint64_t ctl_bck:1; - uint64_t ctl_mcst:1; - uint64_t ctl_smac:1; - uint64_t pre_free:1; - uint64_t vlan_len:1; - uint64_t reserved_8_63:56; - } cn31xx; - struct cvmx_gmxx_rxx_frm_ctl_cn50xx { - uint64_t pre_chk:1; - uint64_t pre_strp:1; - uint64_t ctl_drp:1; - uint64_t ctl_bck:1; - uint64_t ctl_mcst:1; - uint64_t ctl_smac:1; - uint64_t pre_free:1; - uint64_t reserved_7_8:2; - uint64_t pre_align:1; - uint64_t null_dis:1; - uint64_t reserved_11_63:53; - } cn50xx; - struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 { - uint64_t pre_chk:1; - uint64_t pre_strp:1; - uint64_t ctl_drp:1; - uint64_t ctl_bck:1; - uint64_t ctl_mcst:1; - uint64_t ctl_smac:1; - uint64_t pre_free:1; - uint64_t reserved_7_8:2; - uint64_t pre_align:1; - uint64_t reserved_10_63:54; - } cn56xxp1; - struct cvmx_gmxx_rxx_frm_ctl_cn58xx { - uint64_t pre_chk:1; - uint64_t pre_strp:1; - uint64_t ctl_drp:1; - uint64_t ctl_bck:1; - uint64_t ctl_mcst:1; - uint64_t ctl_smac:1; - uint64_t pre_free:1; - uint64_t vlan_len:1; - uint64_t pad_len:1; - uint64_t pre_align:1; - uint64_t null_dis:1; - uint64_t reserved_11_63:53; - } cn58xx; - struct cvmx_gmxx_rxx_frm_ctl_cn61xx { - uint64_t pre_chk:1; - uint64_t pre_strp:1; - uint64_t ctl_drp:1; - uint64_t ctl_bck:1; - uint64_t ctl_mcst:1; - uint64_t ctl_smac:1; - uint64_t pre_free:1; - uint64_t reserved_7_8:2; - uint64_t pre_align:1; - uint64_t null_dis:1; - uint64_t reserved_11_11:1; - uint64_t ptp_mode:1; - uint64_t reserved_13_63:51; - } cn61xx; -}; - -union cvmx_gmxx_rxx_int_reg { - uint64_t u64; - struct cvmx_gmxx_rxx_int_reg_s { - uint64_t minerr:1; - uint64_t carext:1; - uint64_t maxerr:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t alnerr:1; - uint64_t lenerr:1; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t niberr:1; - uint64_t ovrerr:1; - uint64_t pcterr:1; - uint64_t rsverr:1; - uint64_t falerr:1; - uint64_t coldet:1; - uint64_t ifgerr:1; - uint64_t phy_link:1; - uint64_t phy_spd:1; - uint64_t phy_dupx:1; - uint64_t pause_drp:1; - uint64_t loc_fault:1; - uint64_t rem_fault:1; - uint64_t bad_seq:1; - uint64_t bad_term:1; - uint64_t unsop:1; - uint64_t uneop:1; - uint64_t undat:1; - uint64_t hg2fld:1; - uint64_t hg2cc:1; - uint64_t reserved_29_63:35; - } s; - struct cvmx_gmxx_rxx_int_reg_cn30xx { - uint64_t minerr:1; - uint64_t carext:1; - uint64_t maxerr:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t alnerr:1; - uint64_t lenerr:1; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t niberr:1; - uint64_t ovrerr:1; - uint64_t pcterr:1; - uint64_t rsverr:1; - uint64_t falerr:1; - uint64_t coldet:1; - uint64_t ifgerr:1; - uint64_t phy_link:1; - uint64_t phy_spd:1; - uint64_t phy_dupx:1; - uint64_t reserved_19_63:45; - } cn30xx; - struct cvmx_gmxx_rxx_int_reg_cn50xx { - uint64_t reserved_0_0:1; - uint64_t carext:1; - uint64_t reserved_2_2:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t alnerr:1; - uint64_t reserved_6_6:1; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t niberr:1; - uint64_t ovrerr:1; - uint64_t pcterr:1; - uint64_t rsverr:1; - uint64_t falerr:1; - uint64_t coldet:1; - uint64_t ifgerr:1; - uint64_t phy_link:1; - uint64_t phy_spd:1; - uint64_t phy_dupx:1; - uint64_t pause_drp:1; - uint64_t reserved_20_63:44; - } cn50xx; - struct cvmx_gmxx_rxx_int_reg_cn52xx { - uint64_t reserved_0_0:1; - uint64_t carext:1; - uint64_t reserved_2_2:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t reserved_5_6:2; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t reserved_9_9:1; - uint64_t ovrerr:1; - uint64_t pcterr:1; - uint64_t rsverr:1; - uint64_t falerr:1; - uint64_t coldet:1; - uint64_t ifgerr:1; - uint64_t reserved_16_18:3; - uint64_t pause_drp:1; - uint64_t loc_fault:1; - uint64_t rem_fault:1; - uint64_t bad_seq:1; - uint64_t bad_term:1; - uint64_t unsop:1; - uint64_t uneop:1; - uint64_t undat:1; - uint64_t hg2fld:1; - uint64_t hg2cc:1; - uint64_t reserved_29_63:35; - } cn52xx; - struct cvmx_gmxx_rxx_int_reg_cn56xxp1 { - uint64_t reserved_0_0:1; - uint64_t carext:1; - uint64_t reserved_2_2:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t reserved_5_6:2; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t reserved_9_9:1; - uint64_t ovrerr:1; - uint64_t pcterr:1; - uint64_t rsverr:1; - uint64_t falerr:1; - uint64_t coldet:1; - uint64_t ifgerr:1; - uint64_t reserved_16_18:3; - uint64_t pause_drp:1; - uint64_t loc_fault:1; - uint64_t rem_fault:1; - uint64_t bad_seq:1; - uint64_t bad_term:1; - uint64_t unsop:1; - uint64_t uneop:1; - uint64_t undat:1; - uint64_t reserved_27_63:37; - } cn56xxp1; - struct cvmx_gmxx_rxx_int_reg_cn58xx { - uint64_t minerr:1; - uint64_t carext:1; - uint64_t maxerr:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t alnerr:1; - uint64_t lenerr:1; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t niberr:1; - uint64_t ovrerr:1; - uint64_t pcterr:1; - uint64_t rsverr:1; - uint64_t falerr:1; - uint64_t coldet:1; - uint64_t ifgerr:1; - uint64_t phy_link:1; - uint64_t phy_spd:1; - uint64_t phy_dupx:1; - uint64_t pause_drp:1; - uint64_t reserved_20_63:44; - } cn58xx; - struct cvmx_gmxx_rxx_int_reg_cn61xx { - uint64_t minerr:1; - uint64_t carext:1; - uint64_t reserved_2_2:1; - uint64_t jabber:1; - uint64_t fcserr:1; - uint64_t reserved_5_6:2; - uint64_t rcverr:1; - uint64_t skperr:1; - uint64_t reserved_9_9:1; - uint64_t ovrerr:1; - uint64_t pcterr:1; - uint64_t rsverr:1; - uint64_t falerr:1; - uint64_t coldet:1; - uint64_t ifgerr:1; - uint64_t reserved_16_18:3; - uint64_t pause_drp:1; - uint64_t loc_fault:1; - uint64_t rem_fault:1; - uint64_t bad_seq:1; - uint64_t bad_term:1; - uint64_t unsop:1; - uint64_t uneop:1; - uint64_t undat:1; - uint64_t hg2fld:1; - uint64_t hg2cc:1; - uint64_t reserved_29_63:35; - } cn61xx; -}; - -union cvmx_gmxx_prtx_cfg { - uint64_t u64; - struct cvmx_gmxx_prtx_cfg_s { - uint64_t reserved_22_63:42; - uint64_t pknd:6; - uint64_t reserved_14_15:2; - uint64_t tx_idle:1; - uint64_t rx_idle:1; - uint64_t reserved_9_11:3; - uint64_t speed_msb:1; - uint64_t reserved_4_7:4; - uint64_t slottime:1; - uint64_t duplex:1; - uint64_t speed:1; - uint64_t en:1; - } s; - struct cvmx_gmxx_prtx_cfg_cn30xx { - uint64_t reserved_4_63:60; - uint64_t slottime:1; - uint64_t duplex:1; - uint64_t speed:1; - uint64_t en:1; - } cn30xx; - struct cvmx_gmxx_prtx_cfg_cn52xx { - uint64_t reserved_14_63:50; - uint64_t tx_idle:1; - uint64_t rx_idle:1; - uint64_t reserved_9_11:3; - uint64_t speed_msb:1; - uint64_t reserved_4_7:4; - uint64_t slottime:1; - uint64_t duplex:1; - uint64_t speed:1; - uint64_t en:1; - } cn52xx; -}; - -union cvmx_gmxx_rxx_adr_ctl { - uint64_t u64; - struct cvmx_gmxx_rxx_adr_ctl_s { - uint64_t reserved_4_63:60; - uint64_t cam_mode:1; - uint64_t mcst:2; - uint64_t bcst:1; - } s; -}; - -union cvmx_pip_prt_tagx { - uint64_t u64; - struct cvmx_pip_prt_tagx_s { - uint64_t reserved_54_63:10; - uint64_t portadd_en:1; - uint64_t inc_hwchk:1; - uint64_t reserved_50_51:2; - uint64_t grptagbase_msb:2; - uint64_t reserved_46_47:2; - uint64_t grptagmask_msb:2; - uint64_t reserved_42_43:2; - uint64_t grp_msb:2; - uint64_t grptagbase:4; - uint64_t grptagmask:4; - uint64_t grptag:1; - uint64_t grptag_mskip:1; - uint64_t tag_mode:2; - uint64_t inc_vs:2; - uint64_t inc_vlan:1; - uint64_t inc_prt_flag:1; - uint64_t ip6_dprt_flag:1; - uint64_t ip4_dprt_flag:1; - uint64_t ip6_sprt_flag:1; - uint64_t ip4_sprt_flag:1; - uint64_t ip6_nxth_flag:1; - uint64_t ip4_pctl_flag:1; - uint64_t ip6_dst_flag:1; - uint64_t ip4_dst_flag:1; - uint64_t ip6_src_flag:1; - uint64_t ip4_src_flag:1; - uint64_t tcp6_tag_type:2; - uint64_t tcp4_tag_type:2; - uint64_t ip6_tag_type:2; - uint64_t ip4_tag_type:2; - uint64_t non_tag_type:2; - uint64_t grp:4; - } s; - struct cvmx_pip_prt_tagx_cn30xx { - uint64_t reserved_40_63:24; - uint64_t grptagbase:4; - uint64_t grptagmask:4; - uint64_t grptag:1; - uint64_t reserved_30_30:1; - uint64_t tag_mode:2; - uint64_t inc_vs:2; - uint64_t inc_vlan:1; - uint64_t inc_prt_flag:1; - uint64_t ip6_dprt_flag:1; - uint64_t ip4_dprt_flag:1; - uint64_t ip6_sprt_flag:1; - uint64_t ip4_sprt_flag:1; - uint64_t ip6_nxth_flag:1; - uint64_t ip4_pctl_flag:1; - uint64_t ip6_dst_flag:1; - uint64_t ip4_dst_flag:1; - uint64_t ip6_src_flag:1; - uint64_t ip4_src_flag:1; - uint64_t tcp6_tag_type:2; - uint64_t tcp4_tag_type:2; - uint64_t ip6_tag_type:2; - uint64_t ip4_tag_type:2; - uint64_t non_tag_type:2; - uint64_t grp:4; - } cn30xx; - struct cvmx_pip_prt_tagx_cn50xx { - uint64_t reserved_40_63:24; - uint64_t grptagbase:4; - uint64_t grptagmask:4; - uint64_t grptag:1; - uint64_t grptag_mskip:1; - uint64_t tag_mode:2; - uint64_t inc_vs:2; - uint64_t inc_vlan:1; - uint64_t inc_prt_flag:1; - uint64_t ip6_dprt_flag:1; - uint64_t ip4_dprt_flag:1; - uint64_t ip6_sprt_flag:1; - uint64_t ip4_sprt_flag:1; - uint64_t ip6_nxth_flag:1; - uint64_t ip4_pctl_flag:1; - uint64_t ip6_dst_flag:1; - uint64_t ip4_dst_flag:1; - uint64_t ip6_src_flag:1; - uint64_t ip4_src_flag:1; - uint64_t tcp6_tag_type:2; - uint64_t tcp4_tag_type:2; - uint64_t ip6_tag_type:2; - uint64_t ip4_tag_type:2; - uint64_t non_tag_type:2; - uint64_t grp:4; - } cn50xx; -}; - -union cvmx_spxx_int_reg { - uint64_t u64; - struct cvmx_spxx_int_reg_s { - uint64_t reserved_32_63:32; - uint64_t spf:1; - uint64_t reserved_12_30:19; - uint64_t calerr:1; - uint64_t syncerr:1; - uint64_t diperr:1; - uint64_t tpaovr:1; - uint64_t rsverr:1; - uint64_t drwnng:1; - uint64_t clserr:1; - uint64_t spiovr:1; - uint64_t reserved_2_3:2; - uint64_t abnorm:1; - uint64_t prtnxa:1; - } s; -}; - -union cvmx_spxx_int_msk { - uint64_t u64; - struct cvmx_spxx_int_msk_s { - uint64_t reserved_12_63:52; - uint64_t calerr:1; - uint64_t syncerr:1; - uint64_t diperr:1; - uint64_t tpaovr:1; - uint64_t rsverr:1; - uint64_t drwnng:1; - uint64_t clserr:1; - uint64_t spiovr:1; - uint64_t reserved_2_3:2; - uint64_t abnorm:1; - uint64_t prtnxa:1; - } s; -}; - -union cvmx_pow_wq_int { - uint64_t u64; - struct cvmx_pow_wq_int_s { - uint64_t wq_int:16; - uint64_t iq_dis:16; - uint64_t reserved_32_63:32; - } s; -}; - -union cvmx_sso_wq_int_thrx { - uint64_t u64; - struct { - uint64_t iq_thr:12; - uint64_t reserved_12_13:2; - uint64_t ds_thr:12; - uint64_t reserved_26_27:2; - uint64_t tc_thr:4; - uint64_t tc_en:1; - uint64_t reserved_33_63:31; - } s; -}; - -union cvmx_stxx_int_reg { - uint64_t u64; - struct cvmx_stxx_int_reg_s { - uint64_t reserved_9_63:55; - uint64_t syncerr:1; - uint64_t frmerr:1; - uint64_t unxfrm:1; - uint64_t nosync:1; - uint64_t diperr:1; - uint64_t datovr:1; - uint64_t ovrbst:1; - uint64_t calpar1:1; - uint64_t calpar0:1; - } s; -}; - -union cvmx_stxx_int_msk { - uint64_t u64; - struct cvmx_stxx_int_msk_s { - uint64_t reserved_8_63:56; - uint64_t frmerr:1; - uint64_t unxfrm:1; - uint64_t nosync:1; - uint64_t diperr:1; - uint64_t datovr:1; - uint64_t ovrbst:1; - uint64_t calpar1:1; - uint64_t calpar0:1; - } s; -}; - -union cvmx_pow_wq_int_pc { - uint64_t u64; - struct cvmx_pow_wq_int_pc_s { - uint64_t reserved_0_7:8; - uint64_t pc_thr:20; - uint64_t reserved_28_31:4; - uint64_t pc:28; - uint64_t reserved_60_63:4; - } s; -}; - -union cvmx_pow_wq_int_thrx { - uint64_t u64; - struct cvmx_pow_wq_int_thrx_s { - uint64_t reserved_29_63:35; - uint64_t tc_en:1; - uint64_t tc_thr:4; - uint64_t reserved_23_23:1; - uint64_t ds_thr:11; - uint64_t reserved_11_11:1; - uint64_t iq_thr:11; - } s; - struct cvmx_pow_wq_int_thrx_cn30xx { - uint64_t reserved_29_63:35; - uint64_t tc_en:1; - uint64_t tc_thr:4; - uint64_t reserved_18_23:6; - uint64_t ds_thr:6; - uint64_t reserved_6_11:6; - uint64_t iq_thr:6; - } cn30xx; - struct cvmx_pow_wq_int_thrx_cn31xx { - uint64_t reserved_29_63:35; - uint64_t tc_en:1; - uint64_t tc_thr:4; - uint64_t reserved_20_23:4; - uint64_t ds_thr:8; - uint64_t reserved_8_11:4; - uint64_t iq_thr:8; - } cn31xx; - struct cvmx_pow_wq_int_thrx_cn52xx { - uint64_t reserved_29_63:35; - uint64_t tc_en:1; - uint64_t tc_thr:4; - uint64_t reserved_21_23:3; - uint64_t ds_thr:9; - uint64_t reserved_9_11:3; - uint64_t iq_thr:9; - } cn52xx; - struct cvmx_pow_wq_int_thrx_cn63xx { - uint64_t reserved_29_63:35; - uint64_t tc_en:1; - uint64_t tc_thr:4; - uint64_t reserved_22_23:2; - uint64_t ds_thr:10; - uint64_t reserved_10_11:2; - uint64_t iq_thr:10; - } cn63xx; -}; - -union cvmx_npi_rsl_int_blocks { - uint64_t u64; - struct cvmx_npi_rsl_int_blocks_s { - uint64_t reserved_32_63:32; - uint64_t rint_31:1; - uint64_t iob:1; - uint64_t reserved_28_29:2; - uint64_t rint_27:1; - uint64_t rint_26:1; - uint64_t rint_25:1; - uint64_t rint_24:1; - uint64_t asx1:1; - uint64_t asx0:1; - uint64_t rint_21:1; - uint64_t pip:1; - uint64_t spx1:1; - uint64_t spx0:1; - uint64_t lmc:1; - uint64_t l2c:1; - uint64_t rint_15:1; - uint64_t reserved_13_14:2; - uint64_t pow:1; - uint64_t tim:1; - uint64_t pko:1; - uint64_t ipd:1; - uint64_t rint_8:1; - uint64_t zip:1; - uint64_t dfa:1; - uint64_t fpa:1; - uint64_t key:1; - uint64_t npi:1; - uint64_t gmx1:1; - uint64_t gmx0:1; - uint64_t mio:1; - } s; - struct cvmx_npi_rsl_int_blocks_cn30xx { - uint64_t reserved_32_63:32; - uint64_t rint_31:1; - uint64_t iob:1; - uint64_t rint_29:1; - uint64_t rint_28:1; - uint64_t rint_27:1; - uint64_t rint_26:1; - uint64_t rint_25:1; - uint64_t rint_24:1; - uint64_t asx1:1; - uint64_t asx0:1; - uint64_t rint_21:1; - uint64_t pip:1; - uint64_t spx1:1; - uint64_t spx0:1; - uint64_t lmc:1; - uint64_t l2c:1; - uint64_t rint_15:1; - uint64_t rint_14:1; - uint64_t usb:1; - uint64_t pow:1; - uint64_t tim:1; - uint64_t pko:1; - uint64_t ipd:1; - uint64_t rint_8:1; - uint64_t zip:1; - uint64_t dfa:1; - uint64_t fpa:1; - uint64_t key:1; - uint64_t npi:1; - uint64_t gmx1:1; - uint64_t gmx0:1; - uint64_t mio:1; - } cn30xx; - struct cvmx_npi_rsl_int_blocks_cn38xx { - uint64_t reserved_32_63:32; - uint64_t rint_31:1; - uint64_t iob:1; - uint64_t rint_29:1; - uint64_t rint_28:1; - uint64_t rint_27:1; - uint64_t rint_26:1; - uint64_t rint_25:1; - uint64_t rint_24:1; - uint64_t asx1:1; - uint64_t asx0:1; - uint64_t rint_21:1; - uint64_t pip:1; - uint64_t spx1:1; - uint64_t spx0:1; - uint64_t lmc:1; - uint64_t l2c:1; - uint64_t rint_15:1; - uint64_t rint_14:1; - uint64_t rint_13:1; - uint64_t pow:1; - uint64_t tim:1; - uint64_t pko:1; - uint64_t ipd:1; - uint64_t rint_8:1; - uint64_t zip:1; - uint64_t dfa:1; - uint64_t fpa:1; - uint64_t key:1; - uint64_t npi:1; - uint64_t gmx1:1; - uint64_t gmx0:1; - uint64_t mio:1; - } cn38xx; - struct cvmx_npi_rsl_int_blocks_cn50xx { - uint64_t reserved_31_63:33; - uint64_t iob:1; - uint64_t lmc1:1; - uint64_t agl:1; - uint64_t reserved_24_27:4; - uint64_t asx1:1; - uint64_t asx0:1; - uint64_t reserved_21_21:1; - uint64_t pip:1; - uint64_t spx1:1; - uint64_t spx0:1; - uint64_t lmc:1; - uint64_t l2c:1; - uint64_t reserved_15_15:1; - uint64_t rad:1; - uint64_t usb:1; - uint64_t pow:1; - uint64_t tim:1; - uint64_t pko:1; - uint64_t ipd:1; - uint64_t reserved_8_8:1; - uint64_t zip:1; - uint64_t dfa:1; - uint64_t fpa:1; - uint64_t key:1; - uint64_t npi:1; - uint64_t gmx1:1; - uint64_t gmx0:1; - uint64_t mio:1; - } cn50xx; -}; - -union cvmx_pko_command_word0 { - uint64_t u64; - struct { - uint64_t total_bytes:16; - uint64_t segs:6; - uint64_t dontfree:1; - uint64_t ignore_i:1; - uint64_t ipoffp1:7; - uint64_t gather:1; - uint64_t rsp:1; - uint64_t wqp:1; - uint64_t n2:1; - uint64_t le:1; - uint64_t reg0:11; - uint64_t subone0:1; - uint64_t reg1:11; - uint64_t subone1:1; - uint64_t size0:2; - uint64_t size1:2; - } s; -}; - -union cvmx_ciu_timx { - uint64_t u64; - struct cvmx_ciu_timx_s { - uint64_t reserved_37_63:27; - uint64_t one_shot:1; - uint64_t len:36; - } s; -}; - -union cvmx_gmxx_rxx_rx_inbnd { - uint64_t u64; - struct cvmx_gmxx_rxx_rx_inbnd_s { - uint64_t status:1; - uint64_t speed:2; - uint64_t duplex:1; - uint64_t reserved_4_63:60; - } s; -}; - -static inline int32_t cvmx_fau_fetch_and_add32(enum cvmx_fau_reg_32 reg, - int32_t value) -{ - return value; -} - -static inline void cvmx_fau_atomic_add32(enum cvmx_fau_reg_32 reg, - int32_t value) -{ } - -static inline void cvmx_fau_atomic_write32(enum cvmx_fau_reg_32 reg, - int32_t value) -{ } - -static inline uint64_t cvmx_scratch_read64(uint64_t address) -{ - return 0; -} - -static inline void cvmx_scratch_write64(uint64_t address, uint64_t value) -{ } - -static inline int cvmx_wqe_get_grp(struct cvmx_wqe *work) -{ - return 0; -} - -static inline void *cvmx_phys_to_ptr(uint64_t physical_address) -{ - return (void *)(uintptr_t)(physical_address); -} - -static inline uint64_t cvmx_ptr_to_phys(void *ptr) -{ - return (unsigned long)ptr; -} - -static inline int cvmx_helper_get_interface_num(int ipd_port) -{ - return ipd_port; -} - -static inline int cvmx_helper_get_interface_index_num(int ipd_port) -{ - return ipd_port; -} - -static inline void cvmx_fpa_enable(void) -{ } - -static inline uint64_t cvmx_read_csr(uint64_t csr_addr) -{ - return 0; -} - -static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val) -{ } - -static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh) -{ - return 0; -} - -static inline void *cvmx_fpa_alloc(uint64_t pool) -{ - return null; -} - -static inline void cvmx_fpa_free(void *ptr, uint64_t pool, - uint64_t num_cache_lines) -{ } - -static inline int octeon_is_simulation(void) -{ - return 1; -} - -static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear, - cvmx_pip_port_status_t *status) -{ } - -static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear, - cvmx_pko_port_status_t *status) -{ } - -static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int - interface) -{ - return 0; -} - -static inline union cvmx_helper_link_info cvmx_helper_link_get(int ipd_port) -{ - union cvmx_helper_link_info ret = { .u64 = 0 }; - - return ret; -} - -static inline int cvmx_helper_link_set(int ipd_port, - union cvmx_helper_link_info link_info) -{ - return 0; -} - -static inline int cvmx_helper_initialize_packet_io_global(void) -{ - return 0; -} - -static inline int cvmx_helper_get_number_of_interfaces(void) -{ - return 2; -} - -static inline int cvmx_helper_ports_on_interface(int interface) -{ - return 1; -} - -static inline int cvmx_helper_get_ipd_port(int interface, int port) -{ - return 0; -} - -static inline int cvmx_helper_ipd_and_packet_input_enable(void) -{ - return 0; -} - -static inline void cvmx_ipd_disable(void) -{ } - -static inline void cvmx_ipd_free_ptr(void) -{ } - -static inline void cvmx_pko_disable(void) -{ } - -static inline void cvmx_pko_shutdown(void) -{ } - -static inline int cvmx_pko_get_base_queue_per_core(int port, int core) -{ - return port; -} - -static inline int cvmx_pko_get_base_queue(int port) -{ - return port; -} - -static inline int cvmx_pko_get_num_queues(int port) -{ - return port; -} - -static inline unsigned int cvmx_get_core_num(void) -{ - return 0; -} - -static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, - cvmx_pow_wait_t wait) -{ } - -static inline void cvmx_pow_work_request_async(int scr_addr, - cvmx_pow_wait_t wait) -{ } - -static inline struct cvmx_wqe *cvmx_pow_work_response_async(int scr_addr) -{ - struct cvmx_wqe *wqe = (void *)(unsigned long)scr_addr; - - return wqe; -} - -static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait) -{ - return (void *)(unsigned long)wait; -} - -static inline int cvmx_spi_restart_interface(int interface, - cvmx_spi_mode_t mode, int timeout) -{ - return 0; -} - -static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, - enum cvmx_fau_reg_32 reg, - int32_t value) -{ } - -static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed( - int interface, - int port) -{ - union cvmx_gmxx_rxx_rx_inbnd r; - - r.u64 = 0; - return r; -} - -static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, - cvmx_pko_lock_t use_locking) -{ } - -static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port, - uint64_t queue, union cvmx_pko_command_word0 pko_command, - union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking) -{ - return 0; -} - -static inline void cvmx_wqe_set_port(struct cvmx_wqe *work, int port) -{ } - -static inline void cvmx_wqe_set_qos(struct cvmx_wqe *work, int qos) -{ } - -static inline int cvmx_wqe_get_qos(struct cvmx_wqe *work) -{ - return 0; -} - -static inline void cvmx_wqe_set_grp(struct cvmx_wqe *work, int grp) -{ } - -static inline void cvmx_pow_work_submit(struct cvmx_wqe *wqp, uint32_t tag, - enum cvmx_pow_tag_type tag_type, - uint64_t qos, uint64_t grp) -{ } - -#define cvmx_asxx_rx_clk_setx(a, b) ((a)+(b)) -#define cvmx_asxx_tx_clk_setx(a, b) ((a)+(b)) -#define cvmx_ciu_timx(a) (a) -#define cvmx_gmxx_rxx_adr_cam0(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_adr_cam1(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_adr_cam2(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_adr_cam3(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_adr_cam4(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_adr_cam5(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_frm_ctl(a, b) ((a)+(b)) -#define cvmx_gmxx_rxx_int_reg(a, b) ((a)+(b)) -#define cvmx_gmxx_smacx(a, b) ((a)+(b)) -#define cvmx_pip_prt_tagx(a) (a) -#define cvmx_pow_pp_grp_mskx(a) (a) -#define cvmx_pow_wq_int_thrx(a) (a) -#define cvmx_spxx_int_msk(a) (a) -#define cvmx_spxx_int_reg(a) (a) -#define cvmx_sso_ppx_grp_msk(a) (a) -#define cvmx_sso_wq_int_thrx(a) (a) -#define cvmx_stxx_int_msk(a) (a) -#define cvmx_stxx_int_reg(a) (a)
|
Drivers in the Staging area
|
710d7fbe21ee2ceab121f1f84a20edf68f9f9742
|
greg kroah hartman
|
drivers
|
staging
|
octeon
|
bluetooth: hci_bcm: disallow set_baudrate for bcm4354
|
without updating the patchram, the bcm4354 does not support a higher operating speed. the normal bcm_setup follows the correct order (init_speed, patchram and then oper_speed) but the serdev driver will set the operating speed before calling the hu->setup function. thus, for the bcm4354, don't set the operating speed before patchram.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
additional changes for bcm4354 support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_bcm']
|
['c']
| 1
| 29
| 2
|
--- diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c +/** + * struct bcm_device_data - device specific data + * @no_early_set_baudrate: disallow set baudrate before driver setup() + */ +struct bcm_device_data { + bool no_early_set_baudrate; +}; + + * @no_early_set_baudrate: don't set_baudrate before setup() + bool no_early_set_baudrate; - hu->oper_speed = bcm->dev->oper_speed; + + /* if oper_speed is set, ldisc/serdev will set the baudrate + * before calling setup() + */ + if (!bcm->dev->no_early_set_baudrate) + hu->oper_speed = bcm->dev->oper_speed; + + else if (bcm->dev && bcm->dev->oper_speed) + speed = bcm->dev->oper_speed; + const struct bcm_device_data *data; + data = device_get_match_data(bcmdev->dev); + if (data) + bcmdev->no_early_set_baudrate = data->no_early_set_baudrate; + +static struct bcm_device_data bcm4354_device_data = { + .no_early_set_baudrate = true, +}; + - { .compatible = "brcm,bcm43540-bt" }, + { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
|
Networking
|
5d6f391073d5c1c903ac12be72c66b96b2ae93f4
|
abhishek pandit subedi
|
drivers
|
bluetooth
| |
bluetooth: btbcm: support pcm configuration
|
add bcm vendor specific command to configure pcm parameters. the new vendor opcode allows us to set the sco routing, the pcm interface rate, and a few other pcm specific options (frame sync, sync mode, and clock mode). see broadcom-bluetooth.txt in documentation for more information about valid values for those settings.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
additional changes for bcm4354 support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_bcm']
|
['c', 'h']
| 2
| 62
| 0
|
--- diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c +int btbcm_read_pcm_int_params(struct hci_dev *hdev, + struct bcm_set_pcm_int_params *params) +{ + struct sk_buff *skb; + int err = 0; + + skb = __hci_cmd_sync(hdev, 0xfc1d, 0, null, hci_init_timeout); + if (is_err(skb)) { + err = ptr_err(skb); + bt_dev_err(hdev, "bcm: read pcm int params failed (%d)", err); + return err; + } + + if (skb->len != 6 || skb->data[0]) { + bt_dev_err(hdev, "bcm: read pcm int params length mismatch"); + kfree_skb(skb); + return -eio; + } + + if (params) + memcpy(params, skb->data + 1, 5); + + kfree_skb(skb); + + return 0; +} +export_symbol_gpl(btbcm_read_pcm_int_params); + +int btbcm_write_pcm_int_params(struct hci_dev *hdev, + const struct bcm_set_pcm_int_params *params) +{ + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync(hdev, 0xfc1c, 5, params, hci_init_timeout); + if (is_err(skb)) { + err = ptr_err(skb); + bt_dev_err(hdev, "bcm: write pcm int params failed (%d)", err); + return err; + } + kfree_skb(skb); + + return 0; +} +export_symbol_gpl(btbcm_write_pcm_int_params); + diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h --- a/drivers/bluetooth/btbcm.h +++ b/drivers/bluetooth/btbcm.h +int btbcm_read_pcm_int_params(struct hci_dev *hdev, + struct bcm_set_pcm_int_params *params); +int btbcm_write_pcm_int_params(struct hci_dev *hdev, + const struct bcm_set_pcm_int_params *params); +int btbcm_read_pcm_int_params(struct hci_dev *hdev, + struct bcm_set_pcm_int_params *params) +{ + return -eopnotsupp; +} + +int btbcm_write_pcm_int_params(struct hci_dev *hdev, + const struct bcm_set_pcm_int_params *params) +{ + return -eopnotsupp; +} +
|
Networking
|
528379902337102b0264fe5343eafb3d6c59fa45
|
abhishek pandit subedi
|
drivers
|
bluetooth
| |
dt-bindings: net: bluetooth: update broadcom-bluetooth
|
add documentation for brcm,bt-pcm-int-params vendor specific configuration of the sco pcm settings.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
additional changes for bcm4354 support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_bcm']
|
['txt']
| 1
| 7
| 0
|
--- diff --git a/documentation/devicetree/bindings/net/broadcom-bluetooth.txt b/documentation/devicetree/bindings/net/broadcom-bluetooth.txt --- a/documentation/devicetree/bindings/net/broadcom-bluetooth.txt +++ b/documentation/devicetree/bindings/net/broadcom-bluetooth.txt - "lpo": external low power 32.768 khz clock - vbat-supply: phandle to regulator supply for vbat - vddio-supply: phandle to regulator supply for vddio + - brcm,bt-pcm-int-params: configure pcm parameters via a 5-byte array + - sco-routing: 0 = pcm, 1 = transport, 2 = codec, 3 = i2s + - pcm-interface-rate: 128kbps, 256kbps, 512kbps, 1024kbps, 2048kbps + - pcm-frame-type: short, long + - pcm-sync-mode: slave, master + - pcm-clock-mode: slave, master + brcm,bt-pcm-int-params = [1 2 0 1 1];
|
Networking
|
6fc0e19036d6198b9287bb74c0c9df7c17ec8c2e
|
abhishek pandit subedi
|
documentation
|
devicetree
|
bindings, net
|
bluetooth: hci_bcm: support pcm params in dts
|
bcm chips may require configuration of pcm to operate correctly and there is a vendor specific hci command to do this. add support in the hci_bcm driver to parse this from devicetree and configure the chip.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
additional changes for bcm4354 support
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_bcm']
|
['c']
| 1
| 19
| 0
|
--- diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c + u8 pcm_int_params[5]; + /* pcm parameters if provided */ + if (bcm->dev && bcm->dev->pcm_int_params[0] != 0xff) { + struct bcm_set_pcm_int_params params; + + btbcm_read_pcm_int_params(hu->hdev, ¶ms); + + memcpy(¶ms, bcm->dev->pcm_int_params, 5); + btbcm_write_pcm_int_params(hu->hdev, ¶ms); + } + + device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params", + bdev->pcm_int_params, 5); + /* initialize routing field to an unused value */ + dev->pcm_int_params[0] = 0xff; + + /* initialize routing field to an unused value */ + bcmdev->pcm_int_params[0] = 0xff; +
|
Networking
|
eb762b94111b646b4f116ebfdbfcadbad14e12b3
|
abhishek pandit subedi
|
drivers
|
bluetooth
| |
bluetooth: btusb: add support for 04ca:3021 qca_rome device
|
usb "vendorid:04ca productid:3021" is a new qca rome usb bluetooth device, this patch will support firmware downloading for it.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add support for 04ca:3021 qca_rome device
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'btusb']
|
['c']
| 1
| 1
| 0
|
--- diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c + { usb_device(0x04ca, 0x3021), .driver_info = btusb_qca_rome },
|
Networking
|
19220f35b3708dc069135046061fbe7366d5cb6e
|
rocky liao
|
drivers
|
bluetooth
| |
bluetooth: hci_bcm: drive rts only for bcm43438
|
the commit 3347a80965b3 ("bluetooth: hci_bcm: fix rts handling during startup") is causing at least a regression for ap6256 on orange pi 3. so do the rts line handing during startup only on the necessary platform.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
drive rts only for bcm43438
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_bcm']
|
['c']
| 1
| 17
| 4
|
--- diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c + bool drive_rts_on_open; + bool drive_rts_on_open; - hci_uart_set_flow_control(hu, true); + if (bcm->dev->drive_rts_on_open) + hci_uart_set_flow_control(hu, true); + - hci_uart_set_flow_control(hu, false); + + if (bcm->dev->drive_rts_on_open) + hci_uart_set_flow_control(hu, false); + - if (data) + if (data) { + bcmdev->drive_rts_on_open = data->drive_rts_on_open; + } +static struct bcm_device_data bcm43438_device_data = { + .drive_rts_on_open = true, +}; + - { .compatible = "brcm,bcm43438-bt" }, + { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
|
Networking
|
e601daed271e9eb1b923972a0a1af65f8c7bb77b
|
stefan wahren
|
drivers
|
bluetooth
| |
bluetooth: hci_h4: add support for iso packets
|
this enables h4 driver to properly handle iso packets.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add support for iso packets
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_h4']
|
['c', 'h']
| 2
| 8
| 0
|
--- diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c + { h4_recv_iso, .recv = hci_recv_frame }, diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h +#define h4_recv_iso \ + .type = hci_isodata_pkt, \ + .hlen = hci_iso_hdr_size, \ + .loff = 2, \ + .lsize = 2, \ + .maxlen = hci_max_frame_size \ +
|
Networking
|
ef564119ba832f55337935038dc0a91baa7417d1
|
luiz augusto von dentz
|
drivers
|
bluetooth
| |
bluetooth: hci_h5: add support for iso packets
|
this enables h5 driver to properly handle iso packets.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add support for iso packets
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_h5']
|
['c']
| 1
| 3
| 0
|
--- diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c + case hci_isodata_pkt: + case hci_isodata_pkt: + case hci_isodata_pkt:
|
Networking
|
1cc3c10c5aea84d4b0400423449c316eed3f27df
|
luiz augusto von dentz
|
drivers
|
bluetooth
| |
bluetooth: hci_vhci: add support for iso packets
|
this make virtual controllers to pass iso packets around.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add support for iso packets
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_vhci']
|
['c']
| 1
| 1
| 0
|
--- diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c + case hci_isodata_pkt:
|
Networking
|
f92a8cb569e39e28515e3f9c0ccaa16f874644b7
|
luiz augusto von dentz
|
drivers
|
bluetooth
| |
bluetooth: hci_qca: enable power off/on support during hci down/up for qca rome
|
this patch registers hdev->shutdown() callback and also sets hci_quirk_non_persistent_setup for qca rome. it will power-off the bt chip during hci down and power-on/initialize the chip again during hci up. as wcn399x already enabled this, this patch also removed the callback register and quirk setting in qca_setup() for wcn399x and uniformly do this in the probe() routine.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enable power off/on support during hci down/up for qca rome
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bluetooth ', 'hci_qca']
|
['c']
| 1
| 11
| 9
|
--- diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c - /* enable non_persistent_setup quirk to ensure to execute - * setup for every hci up. - */ - set_bit(hci_quirk_non_persistent_setup, &hdev->quirks); - hu->hdev->shutdown = qca_power_off; + struct hci_dev *hdev; - goto out; + return err; - goto out; + return err; - if (err) + if (err) { + bt_err("rome serdev registration failed"); + return err; + } -out: return err; + hdev = qcadev->serdev_hu.hdev; + set_bit(hci_quirk_non_persistent_setup, &hdev->quirks); + hdev->shutdown = qca_power_off; + return 0;
|
Networking
|
ae563183b647b3bdb47e8a78a5de879adf733735
|
rocky liao
|
drivers
|
bluetooth
| |
ib/hfi1: add rcvshortlengtherrcnt to hfi1stats
|
this counter, rxshrerr, is required for error analysis and debug.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add rcvshortlengtherrcnt to hfi1stats
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['infiniband ', 'hfi1']
|
['c', 'h']
| 3
| 3
| 0
|
--- diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c +[c_rx_short_err] = rxe32_dev_cntr_elem(rxshrerr, rcv_short_err_cnt, cntr_synth), diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h + c_rx_short_err, diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h --- a/drivers/infiniband/hw/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h +#define rcv_short_err_cnt 2
|
Networking
|
2c9d4e26d1ab27ceae2ded2ffe930f8e5f5b2a89
|
mike marciniszyn kaike wan kaike wan intel com
|
drivers
|
infiniband
|
hfi1, hw
|
ib/hfi1: add software counter for ctxt0 seq drop
|
all other code paths increment some form of drop counter.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add software counter for ctxt0 seq drop
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['infiniband ', 'hfi1']
|
['c', 'h']
| 4
| 14
| 0
|
--- diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c +static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry, + void *context, int vl, int mode, u64 data) +{ + struct hfi1_devdata *dd = context; + + return dd->ctx0_seq_drop; +} + +[c_sw_ctx0_seq_drop] = cntr_elem("seqdrop0", 0, 0, cntr_normal, + access_sw_ctx0_seq_drop), diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h + c_sw_ctx0_seq_drop, diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c + packet->rcd->dd->ctx0_seq_drop++; diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h + u64 ctx0_seq_drop; +
|
Networking
|
5ffd048698ea5139743acd45e8ab388a683642b8
|
mike marciniszyn
|
drivers
|
infiniband
|
hfi1, hw
|
rdma/hns: remove some redundant variables related to capabilities
|
in struct hns_roce_caps, max_srq_sg and max_srqwqes is unused, and max_srqs has the same effect with num_srqs. so remove them from this structrue.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
get pf capabilities from firmware
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['rdma ', 'hns']
|
['h', 'c']
| 3
| 1
| 7
|
--- diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h - u32 max_srq_sg; - u32 max_srqs; - u32 max_srqwqes; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c - caps->max_srqwqes = hns_roce_v2_max_srqwqe_num; - caps->max_srq_sg = hns_roce_v2_max_srq_sge_num; - caps->max_srqs = hns_roce_v2_max_srq; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c - props->max_srq = hr_dev->caps.max_srqs; + props->max_srq = hr_dev->caps.num_srqs;
|
Networking
|
a91e093cad37c60dc16ca382ed6f869c4c99fb77
|
weihang li
|
drivers
|
infiniband
|
hns, hw
|
rdma/hns: add interfaces to get pf capabilities from firmware
|
pf capabilities are set by default for hip08 previously which should depends on different types of hardware. so add new interfaces to get them from firmware.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
get pf capabilities from firmware
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['rdma ', 'hns']
|
['h', 'c']
| 3
| 527
| 0
|
--- diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h + u16 default_ceq_max_cnt; + u16 default_ceq_period; + u16 default_aeq_max_cnt; + u16 default_aeq_period; + u16 default_aeq_arm_st; + u16 default_ceq_arm_st; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +static void set_default_caps(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_caps *caps = &hr_dev->caps; + + caps->num_qps = hns_roce_v2_max_qp_num; + caps->max_wqes = hns_roce_v2_max_wqe_num; + caps->num_cqs = hns_roce_v2_max_cq_num; + caps->num_srqs = hns_roce_v2_max_srq_num; + caps->min_cqes = hns_roce_min_cqe_num; + caps->max_cqes = hns_roce_v2_max_cqe_num; + caps->max_sq_sg = hns_roce_v2_max_sq_sge_num; + caps->max_extend_sg = hns_roce_v2_max_extend_sge_num; + caps->max_rq_sg = hns_roce_v2_max_rq_sge_num; + caps->max_sq_inline = hns_roce_v2_max_sq_inline; + caps->num_uars = hns_roce_v2_uar_num; + caps->phy_num_uars = hns_roce_v2_phy_uar_num; + caps->num_aeq_vectors = hns_roce_v2_aeqe_vec_num; + caps->num_comp_vectors = hns_roce_v2_comp_vec_num; + caps->num_other_vectors = hns_roce_v2_abnormal_vec_num; + caps->num_mtpts = hns_roce_v2_max_mtpt_num; + caps->num_mtt_segs = hns_roce_v2_max_mtt_segs; + caps->num_cqe_segs = hns_roce_v2_max_cqe_segs; + caps->num_srqwqe_segs = hns_roce_v2_max_srqwqe_segs; + caps->num_idx_segs = hns_roce_v2_max_idx_segs; + caps->num_pds = hns_roce_v2_max_pd_num; + caps->max_qp_init_rdma = hns_roce_v2_max_qp_init_rdma; + caps->max_qp_dest_rdma = hns_roce_v2_max_qp_dest_rdma; + caps->max_sq_desc_sz = hns_roce_v2_max_sq_desc_sz; + caps->max_rq_desc_sz = hns_roce_v2_max_rq_desc_sz; + caps->max_srq_desc_sz = hns_roce_v2_max_srq_desc_sz; + caps->qpc_entry_sz = hns_roce_v2_qpc_entry_sz; + caps->irrl_entry_sz = hns_roce_v2_irrl_entry_sz; + caps->trrl_entry_sz = hns_roce_v2_trrl_entry_sz; + caps->cqc_entry_sz = hns_roce_v2_cqc_entry_sz; + caps->srqc_entry_sz = hns_roce_v2_srqc_entry_sz; + caps->mtpt_entry_sz = hns_roce_v2_mtpt_entry_sz; + caps->mtt_entry_sz = hns_roce_v2_mtt_entry_sz; + caps->idx_entry_sz = hns_roce_v2_idx_entry_sz; + caps->cq_entry_sz = hns_roce_v2_cqe_entry_size; + caps->page_size_cap = hns_roce_v2_page_size_supported; + caps->reserved_lkey = 0; + caps->reserved_pds = 0; + caps->reserved_mrws = 1; + caps->reserved_uars = 0; + caps->reserved_cqs = 0; + caps->reserved_srqs = 0; + caps->reserved_qps = hns_roce_v2_rsv_qps; + + caps->qpc_ba_pg_sz = 0; + caps->qpc_buf_pg_sz = 0; + caps->qpc_hop_num = hns_roce_context_hop_num; + caps->srqc_ba_pg_sz = 0; + caps->srqc_buf_pg_sz = 0; + caps->srqc_hop_num = hns_roce_context_hop_num; + caps->cqc_ba_pg_sz = 0; + caps->cqc_buf_pg_sz = 0; + caps->cqc_hop_num = hns_roce_context_hop_num; + caps->mpt_ba_pg_sz = 0; + caps->mpt_buf_pg_sz = 0; + caps->mpt_hop_num = hns_roce_context_hop_num; + caps->mtt_ba_pg_sz = 0; + caps->mtt_buf_pg_sz = 0; + caps->mtt_hop_num = hns_roce_mtt_hop_num; + caps->wqe_sq_hop_num = hns_roce_sqwqe_hop_num; + caps->wqe_sge_hop_num = hns_roce_ext_sge_hop_num; + caps->wqe_rq_hop_num = hns_roce_rqwqe_hop_num; + caps->cqe_ba_pg_sz = hns_roce_ba_pg_sz_supported_256k; + caps->cqe_buf_pg_sz = 0; + caps->cqe_hop_num = hns_roce_cqe_hop_num; + caps->srqwqe_ba_pg_sz = 0; + caps->srqwqe_buf_pg_sz = 0; + caps->srqwqe_hop_num = hns_roce_srqwqe_hop_num; + caps->idx_ba_pg_sz = 0; + caps->idx_buf_pg_sz = 0; + caps->idx_hop_num = hns_roce_idx_hop_num; + caps->chunk_sz = hns_roce_v2_table_chunk_size; + + caps->flags = hns_roce_cap_flag_rereg_mr | + hns_roce_cap_flag_roce_v1_v2 | + hns_roce_cap_flag_rq_inline | + hns_roce_cap_flag_record_db | + hns_roce_cap_flag_sq_record_db; + + caps->pkey_table_len[0] = 1; + caps->gid_table_len[0] = hns_roce_v2_gid_index_num; + caps->ceqe_depth = hns_roce_v2_comp_eqe_num; + caps->aeqe_depth = hns_roce_v2_async_eqe_num; + caps->local_ca_ack_delay = 0; + caps->max_mtu = ib_mtu_4096; + + caps->max_srq_wrs = hns_roce_v2_max_srq_wr; + caps->max_srq_sges = hns_roce_v2_max_srq_sge; + + if (hr_dev->pci_dev->revision == pci_revision_id_hip08_b) { + caps->flags |= hns_roce_cap_flag_atomic | hns_roce_cap_flag_mw | + hns_roce_cap_flag_srq | hns_roce_cap_flag_frmr | + hns_roce_cap_flag_qp_flow_ctrl; + + caps->num_qpc_timer = hns_roce_v2_max_qpc_timer_num; + caps->qpc_timer_entry_sz = hns_roce_v2_qpc_timer_entry_sz; + caps->qpc_timer_ba_pg_sz = 0; + caps->qpc_timer_buf_pg_sz = 0; + caps->qpc_timer_hop_num = hns_roce_hop_num_0; + caps->num_cqc_timer = hns_roce_v2_max_cqc_timer_num; + caps->cqc_timer_entry_sz = hns_roce_v2_cqc_timer_entry_sz; + caps->cqc_timer_ba_pg_sz = 0; + caps->cqc_timer_buf_pg_sz = 0; + caps->cqc_timer_hop_num = hns_roce_hop_num_0; + + caps->sccc_entry_sz = hns_roce_v2_sccc_entry_sz; + caps->sccc_ba_pg_sz = 0; + caps->sccc_buf_pg_sz = 0; + caps->sccc_hop_num = hns_roce_sccc_hop_num; + } +} + +static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num, + int *buf_page_size, int *bt_page_size, u32 hem_type) +{ + u64 obj_per_chunk; + int bt_chunk_size = 1 << page_shift; + int buf_chunk_size = 1 << page_shift; + int obj_per_chunk_default = buf_chunk_size / obj_size; + + *buf_page_size = 0; + *bt_page_size = 0; + + switch (hop_num) { + case 3: + obj_per_chunk = ctx_bt_num * (bt_chunk_size / ba_byte_len) * + (bt_chunk_size / ba_byte_len) * + (bt_chunk_size / ba_byte_len) * + obj_per_chunk_default; + break; + case 2: + obj_per_chunk = ctx_bt_num * (bt_chunk_size / ba_byte_len) * + (bt_chunk_size / ba_byte_len) * + obj_per_chunk_default; + break; + case 1: + obj_per_chunk = ctx_bt_num * (bt_chunk_size / ba_byte_len) * + obj_per_chunk_default; + break; + case hns_roce_hop_num_0: + obj_per_chunk = ctx_bt_num * obj_per_chunk_default; + break; + default: + pr_err("table %d not support hop_num = %d! ", hem_type, + hop_num); + return; + } + + if (hem_type >= hem_type_mtt) + *bt_page_size = ilog2(div_round_up(obj_num, obj_per_chunk)); + else + *buf_page_size = ilog2(div_round_up(obj_num, obj_per_chunk)); +} + +static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_cmq_desc desc[hns_roce_query_pf_caps_cmd_num]; + struct hns_roce_caps *caps = &hr_dev->caps; + struct hns_roce_query_pf_caps_a *resp_a; + struct hns_roce_query_pf_caps_b *resp_b; + struct hns_roce_query_pf_caps_c *resp_c; + struct hns_roce_query_pf_caps_d *resp_d; + struct hns_roce_query_pf_caps_e *resp_e; + int ctx_hop_num; + int pbl_hop_num; + int ret; + int i; + + for (i = 0; i < hns_roce_query_pf_caps_cmd_num; i++) { + hns_roce_cmq_setup_basic_desc(&desc[i], + hns_roce_opc_query_pf_caps_num, + true); + if (i < (hns_roce_query_pf_caps_cmd_num - 1)) + desc[i].flag |= cpu_to_le16(hns_roce_cmd_flag_next); + else + desc[i].flag &= ~cpu_to_le16(hns_roce_cmd_flag_next); + } + + ret = hns_roce_cmq_send(hr_dev, desc, hns_roce_query_pf_caps_cmd_num); + if (ret) + return ret; + + resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data; + resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data; + resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data; + resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data; + resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data; + + caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; + caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); + caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); + caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); + caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg); + caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer); + caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer); + caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); + caps->num_aeq_vectors = resp_a->num_aeq_vectors; + caps->num_other_vectors = resp_a->num_other_vectors; + caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; + caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; + caps->max_srq_desc_sz = resp_a->max_srq_desc_sz; + caps->cq_entry_sz = resp_a->cq_entry_sz; + + caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; + caps->irrl_entry_sz = resp_b->irrl_entry_sz; + caps->trrl_entry_sz = resp_b->trrl_entry_sz; + caps->cqc_entry_sz = resp_b->cqc_entry_sz; + caps->srqc_entry_sz = resp_b->srqc_entry_sz; + caps->idx_entry_sz = resp_b->idx_entry_sz; + caps->sccc_entry_sz = resp_b->scc_ctx_entry_sz; + caps->max_mtu = resp_b->max_mtu; + caps->qpc_entry_sz = le16_to_cpu(resp_b->qpc_entry_sz); + caps->min_cqes = resp_b->min_cqes; + caps->min_wqes = resp_b->min_wqes; + caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap); + caps->pkey_table_len[0] = resp_b->pkey_table_len; + caps->phy_num_uars = resp_b->phy_num_uars; + ctx_hop_num = resp_b->ctx_hop_num; + pbl_hop_num = resp_b->pbl_hop_num; + + caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds, + v2_query_pf_caps_c_num_pds_m, + v2_query_pf_caps_c_num_pds_s); + caps->flags = roce_get_field(resp_c->cap_flags_num_pds, + v2_query_pf_caps_c_cap_flags_m, + v2_query_pf_caps_c_cap_flags_s); + caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs, + v2_query_pf_caps_c_num_cqs_m, + v2_query_pf_caps_c_num_cqs_s); + caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs, + v2_query_pf_caps_c_max_gid_m, + v2_query_pf_caps_c_max_gid_s); + caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth, + v2_query_pf_caps_c_cq_depth_m, + v2_query_pf_caps_c_cq_depth_s); + caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws, + v2_query_pf_caps_c_num_mrws_m, + v2_query_pf_caps_c_num_mrws_s); + caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps, + v2_query_pf_caps_c_num_qps_m, + v2_query_pf_caps_c_num_qps_s); + caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps, + v2_query_pf_caps_c_max_ord_m, + v2_query_pf_caps_c_max_ord_s); + caps->max_qp_dest_rdma = caps->max_qp_init_rdma; + caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); + caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs, + v2_query_pf_caps_d_num_srqs_m, + v2_query_pf_caps_d_num_srqs_s); + caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); + caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth, + v2_query_pf_caps_d_ceq_depth_m, + v2_query_pf_caps_d_ceq_depth_s); + caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth, + v2_query_pf_caps_d_num_ceqs_m, + v2_query_pf_caps_d_num_ceqs_s); + caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth, + v2_query_pf_caps_d_aeq_depth_m, + v2_query_pf_caps_d_aeq_depth_s); + caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, + v2_query_pf_caps_d_aeq_arm_st_m, + v2_query_pf_caps_d_aeq_arm_st_s); + caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth, + v2_query_pf_caps_d_ceq_arm_st_m, + v2_query_pf_caps_d_ceq_arm_st_s); + caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds, + v2_query_pf_caps_d_rsv_pds_m, + v2_query_pf_caps_d_rsv_pds_s); + caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds, + v2_query_pf_caps_d_num_uars_m, + v2_query_pf_caps_d_num_uars_s); + caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps, + v2_query_pf_caps_d_rsv_qps_m, + v2_query_pf_caps_d_rsv_qps_s); + caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps, + v2_query_pf_caps_d_rsv_uars_m, + v2_query_pf_caps_d_rsv_uars_s); + caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws, + v2_query_pf_caps_e_rsv_mrws_m, + v2_query_pf_caps_e_rsv_mrws_s); + caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws, + v2_query_pf_caps_e_chunk_size_shift_m, + v2_query_pf_caps_e_chunk_size_shift_s); + caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs, + v2_query_pf_caps_e_rsv_cqs_m, + v2_query_pf_caps_e_rsv_cqs_s); + caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs, + v2_query_pf_caps_e_rsv_srqs_m, + v2_query_pf_caps_e_rsv_srqs_s); + caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey, + v2_query_pf_caps_e_rsv_lkeys_m, + v2_query_pf_caps_e_rsv_lkeys_s); + caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); + caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); + caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); + caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period); + + caps->qpc_timer_entry_sz = hns_roce_v2_qpc_timer_entry_sz; + caps->cqc_timer_entry_sz = hns_roce_v2_cqc_timer_entry_sz; + caps->mtt_entry_sz = hns_roce_v2_mtt_entry_sz; + caps->num_mtt_segs = hns_roce_v2_max_mtt_segs; + caps->mtt_ba_pg_sz = 0; + caps->num_cqe_segs = hns_roce_v2_max_cqe_segs; + caps->num_srqwqe_segs = hns_roce_v2_max_srqwqe_segs; + caps->num_idx_segs = hns_roce_v2_max_idx_segs; + + caps->qpc_hop_num = ctx_hop_num; + caps->srqc_hop_num = ctx_hop_num; + caps->cqc_hop_num = ctx_hop_num; + caps->mpt_hop_num = ctx_hop_num; + caps->mtt_hop_num = pbl_hop_num; + caps->cqe_hop_num = pbl_hop_num; + caps->srqwqe_hop_num = pbl_hop_num; + caps->idx_hop_num = pbl_hop_num; + caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, + v2_query_pf_caps_d_sqwqe_hop_num_m, + v2_query_pf_caps_d_sqwqe_hop_num_s); + caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, + v2_query_pf_caps_d_ex_sge_hop_num_m, + v2_query_pf_caps_d_ex_sge_hop_num_s); + caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs, + v2_query_pf_caps_d_rqwqe_hop_num_m, + v2_query_pf_caps_d_rqwqe_hop_num_s); + + calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num, + caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz, + hem_type_qpc); + calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num, + caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz, + hem_type_mtpt); + calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num, + caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz, + hem_type_cqc); + calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num, + caps->srqc_bt_num, &caps->srqc_buf_pg_sz, + &caps->srqc_ba_pg_sz, hem_type_srqc); + + if (hr_dev->pci_dev->revision == pci_revision_id_hip08_b) { + caps->sccc_hop_num = ctx_hop_num; + caps->qpc_timer_hop_num = hns_roce_hop_num_0; + caps->cqc_timer_hop_num = hns_roce_hop_num_0; + + calc_pg_sz(caps->num_qps, caps->sccc_entry_sz, + caps->sccc_hop_num, caps->sccc_bt_num, + &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz, + hem_type_sccc); + calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz, + caps->cqc_timer_hop_num, caps->cqc_timer_bt_num, + &caps->cqc_timer_buf_pg_sz, + &caps->cqc_timer_ba_pg_sz, hem_type_cqc_timer); + } + + calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num, + 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, hem_type_cqe); + calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz, + caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz, + &caps->srqwqe_ba_pg_sz, hem_type_srqwqe); + calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num, + 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, hem_type_idx); + + return 0; +} + diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +#define hns_roce_v2_idx_entry_sz 4 +#define hns_roce_sqwqe_hop_num 2 +#define hns_roce_ext_sge_hop_num 1 +#define hns_roce_rqwqe_hop_num 2 + hns_roce_opc_query_pf_caps_num = 0x8408, +#define hns_roce_query_pf_caps_cmd_num 5 +struct hns_roce_query_pf_caps_a { + u8 number_ports; + u8 local_ca_ack_delay; + __le16 max_sq_sg; + __le16 max_sq_inline; + __le16 max_rq_sg; + __le32 max_extend_sg; + __le16 num_qpc_timer; + __le16 num_cqc_timer; + __le16 max_srq_sges; + u8 num_aeq_vectors; + u8 num_other_vectors; + u8 max_sq_desc_sz; + u8 max_rq_desc_sz; + u8 max_srq_desc_sz; + u8 cq_entry_sz; +}; + +struct hns_roce_query_pf_caps_b { + u8 mtpt_entry_sz; + u8 irrl_entry_sz; + u8 trrl_entry_sz; + u8 cqc_entry_sz; + u8 srqc_entry_sz; + u8 idx_entry_sz; + u8 scc_ctx_entry_sz; + u8 max_mtu; + __le16 qpc_entry_sz; + __le16 qpc_timer_entry_sz; + __le16 cqc_timer_entry_sz; + u8 min_cqes; + u8 min_wqes; + __le32 page_size_cap; + u8 pkey_table_len; + u8 phy_num_uars; + u8 ctx_hop_num; + u8 pbl_hop_num; +}; + +struct hns_roce_query_pf_caps_c { + __le32 cap_flags_num_pds; + __le32 max_gid_num_cqs; + __le32 cq_depth; + __le32 num_mrws; + __le32 ord_num_qps; + __le16 sq_depth; + __le16 rq_depth; +}; + +#define v2_query_pf_caps_c_num_pds_s 0 +#define v2_query_pf_caps_c_num_pds_m genmask(19, 0) + +#define v2_query_pf_caps_c_cap_flags_s 20 +#define v2_query_pf_caps_c_cap_flags_m genmask(31, 20) + +#define v2_query_pf_caps_c_num_cqs_s 0 +#define v2_query_pf_caps_c_num_cqs_m genmask(19, 0) + +#define v2_query_pf_caps_c_max_gid_s 20 +#define v2_query_pf_caps_c_max_gid_m genmask(28, 20) + +#define v2_query_pf_caps_c_cq_depth_s 0 +#define v2_query_pf_caps_c_cq_depth_m genmask(22, 0) + +#define v2_query_pf_caps_c_num_mrws_s 0 +#define v2_query_pf_caps_c_num_mrws_m genmask(19, 0) + +#define v2_query_pf_caps_c_num_qps_s 0 +#define v2_query_pf_caps_c_num_qps_m genmask(19, 0) + +#define v2_query_pf_caps_c_max_ord_s 20 +#define v2_query_pf_caps_c_max_ord_m genmask(27, 20) + +struct hns_roce_query_pf_caps_d { + __le32 wq_hop_num_max_srqs; + __le16 srq_depth; + __le16 rsv; + __le32 num_ceqs_ceq_depth; + __le32 arm_st_aeq_depth; + __le32 num_uars_rsv_pds; + __le32 rsv_uars_rsv_qps; +}; +#define v2_query_pf_caps_d_num_srqs_s 0 +#define v2_query_pf_caps_d_num_srqs_m genmask(20, 0) + +#define v2_query_pf_caps_d_rqwqe_hop_num_s 20 +#define v2_query_pf_caps_d_rqwqe_hop_num_m genmask(21, 20) + +#define v2_query_pf_caps_d_ex_sge_hop_num_s 22 +#define v2_query_pf_caps_d_ex_sge_hop_num_m genmask(23, 22) + +#define v2_query_pf_caps_d_sqwqe_hop_num_s 24 +#define v2_query_pf_caps_d_sqwqe_hop_num_m genmask(25, 24) + + +#define v2_query_pf_caps_d_ceq_depth_s 0 +#define v2_query_pf_caps_d_ceq_depth_m genmask(21, 0) + +#define v2_query_pf_caps_d_num_ceqs_s 22 +#define v2_query_pf_caps_d_num_ceqs_m genmask(31, 22) + +#define v2_query_pf_caps_d_aeq_depth_s 0 +#define v2_query_pf_caps_d_aeq_depth_m genmask(21, 0) + +#define v2_query_pf_caps_d_aeq_arm_st_s 22 +#define v2_query_pf_caps_d_aeq_arm_st_m genmask(23, 22) + +#define v2_query_pf_caps_d_ceq_arm_st_s 24 +#define v2_query_pf_caps_d_ceq_arm_st_m genmask(25, 24) + +#define v2_query_pf_caps_d_rsv_pds_s 0 +#define v2_query_pf_caps_d_rsv_pds_m genmask(19, 0) + +#define v2_query_pf_caps_d_num_uars_s 20 +#define v2_query_pf_caps_d_num_uars_m genmask(27, 20) + +#define v2_query_pf_caps_d_rsv_qps_s 0 +#define v2_query_pf_caps_d_rsv_qps_m genmask(19, 0) + +#define v2_query_pf_caps_d_rsv_uars_s 20 +#define v2_query_pf_caps_d_rsv_uars_m genmask(27, 20) + +struct hns_roce_query_pf_caps_e { + __le32 chunk_size_shift_rsv_mrws; + __le32 rsv_cqs; + __le32 rsv_srqs; + __le32 rsv_lkey; + __le16 ceq_max_cnt; + __le16 ceq_period; + __le16 aeq_max_cnt; + __le16 aeq_period; +}; + +#define v2_query_pf_caps_e_rsv_mrws_s 0 +#define v2_query_pf_caps_e_rsv_mrws_m genmask(19, 0) + +#define v2_query_pf_caps_e_chunk_size_shift_s 20 +#define v2_query_pf_caps_e_chunk_size_shift_m genmask(31, 20) + +#define v2_query_pf_caps_e_rsv_cqs_s 0 +#define v2_query_pf_caps_e_rsv_cqs_m genmask(19, 0) + +#define v2_query_pf_caps_e_rsv_srqs_s 0 +#define v2_query_pf_caps_e_rsv_srqs_m genmask(19, 0) + +#define v2_query_pf_caps_e_rsv_lkeys_s 0 +#define v2_query_pf_caps_e_rsv_lkeys_m genmask(19, 0) +
|
Networking
|
ba6bb7e974212ef9c114ad94708fb88a82e240ea
|
lijun ou
|
drivers
|
infiniband
|
hns, hw
|
rdma/hns: get pf capabilities from firmware
|
get pf capabilities from firmware according to different hardwares, if it fails, all capabilities will be set with a default value.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
get pf capabilities from firmware
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['rdma ', 'hns']
|
['c', 'h']
| 2
| 6
| 109
|
--- diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c - caps->num_qps = hns_roce_v2_max_qp_num; - caps->max_wqes = hns_roce_v2_max_wqe_num; - caps->num_cqs = hns_roce_v2_max_cq_num; - caps->num_srqs = hns_roce_v2_max_srq_num; - caps->min_cqes = hns_roce_min_cqe_num; - caps->max_cqes = hns_roce_v2_max_cqe_num; - caps->max_sq_sg = hns_roce_v2_max_sq_sge_num; - caps->max_extend_sg = hns_roce_v2_max_extend_sge_num; - caps->max_rq_sg = hns_roce_v2_max_rq_sge_num; - caps->max_sq_inline = hns_roce_v2_max_sq_inline; - caps->num_uars = hns_roce_v2_uar_num; - caps->phy_num_uars = hns_roce_v2_phy_uar_num; - caps->num_aeq_vectors = hns_roce_v2_aeqe_vec_num; - caps->num_comp_vectors = hns_roce_v2_comp_vec_num; - caps->num_other_vectors = hns_roce_v2_abnormal_vec_num; - caps->num_mtpts = hns_roce_v2_max_mtpt_num; - caps->num_pds = hns_roce_v2_max_pd_num; - caps->max_qp_init_rdma = hns_roce_v2_max_qp_init_rdma; - caps->max_qp_dest_rdma = hns_roce_v2_max_qp_dest_rdma; - caps->max_sq_desc_sz = hns_roce_v2_max_sq_desc_sz; - caps->max_rq_desc_sz = hns_roce_v2_max_rq_desc_sz; - caps->max_srq_desc_sz = hns_roce_v2_max_srq_desc_sz; - caps->qpc_entry_sz = hns_roce_v2_qpc_entry_sz; - caps->irrl_entry_sz = hns_roce_v2_irrl_entry_sz; - caps->trrl_entry_sz = hns_roce_v2_trrl_entry_sz; - caps->cqc_entry_sz = hns_roce_v2_cqc_entry_sz; - caps->srqc_entry_sz = hns_roce_v2_srqc_entry_sz; - caps->mtpt_entry_sz = hns_roce_v2_mtpt_entry_sz; - caps->mtt_entry_sz = hns_roce_v2_mtt_entry_sz; - caps->idx_entry_sz = 4; - caps->cq_entry_sz = hns_roce_v2_cqe_entry_size; - caps->page_size_cap = hns_roce_v2_page_size_supported; - caps->reserved_lkey = 0; - caps->reserved_pds = 0; - caps->reserved_mrws = 1; - caps->reserved_uars = 0; - caps->reserved_cqs = 0; - caps->reserved_srqs = 0; - caps->reserved_qps = hns_roce_v2_rsv_qps; - caps->qpc_ba_pg_sz = 0; - caps->qpc_buf_pg_sz = 0; - caps->qpc_hop_num = hns_roce_context_hop_num; - caps->srqc_ba_pg_sz = 0; - caps->srqc_buf_pg_sz = 0; - caps->srqc_hop_num = hns_roce_context_hop_num; - caps->cqc_ba_pg_sz = 0; - caps->cqc_buf_pg_sz = 0; - caps->cqc_hop_num = hns_roce_context_hop_num; - caps->mpt_ba_pg_sz = 0; - caps->mpt_buf_pg_sz = 0; - caps->mpt_hop_num = hns_roce_context_hop_num; - caps->pbl_ba_pg_sz = 2; + caps->pbl_ba_pg_sz = hns_roce_ba_pg_sz_supported_16k; - caps->mtt_ba_pg_sz = 0; - caps->mtt_buf_pg_sz = 0; - caps->mtt_hop_num = hns_roce_mtt_hop_num; - caps->wqe_sq_hop_num = 2; - caps->wqe_sge_hop_num = 1; - caps->wqe_rq_hop_num = 2; - caps->cqe_ba_pg_sz = 6; - caps->cqe_buf_pg_sz = 0; - caps->cqe_hop_num = hns_roce_cqe_hop_num; - caps->srqwqe_ba_pg_sz = 0; - caps->srqwqe_buf_pg_sz = 0; - caps->srqwqe_hop_num = hns_roce_srqwqe_hop_num; - caps->idx_ba_pg_sz = 0; - caps->idx_buf_pg_sz = 0; - caps->idx_hop_num = hns_roce_idx_hop_num; - caps->chunk_sz = hns_roce_v2_table_chunk_size; - - caps->flags = hns_roce_cap_flag_rereg_mr | - hns_roce_cap_flag_roce_v1_v2 | - hns_roce_cap_flag_rq_inline | - hns_roce_cap_flag_record_db | - hns_roce_cap_flag_sq_record_db; - - if (hr_dev->pci_dev->revision == 0x21) - caps->flags |= hns_roce_cap_flag_mw | - hns_roce_cap_flag_frmr; - caps->pkey_table_len[0] = 1; - caps->gid_table_len[0] = hns_roce_v2_gid_index_num; - caps->ceqe_depth = hns_roce_v2_comp_eqe_num; - caps->aeqe_depth = hns_roce_v2_async_eqe_num; - caps->local_ca_ack_delay = 0; - caps->max_mtu = ib_mtu_4096; - - caps->max_srq_wrs = hns_roce_v2_max_srq_wr; - caps->max_srq_sges = hns_roce_v2_max_srq_sge; - - if (hr_dev->pci_dev->revision == 0x21) { - caps->flags |= hns_roce_cap_flag_atomic | - hns_roce_cap_flag_srq | - hns_roce_cap_flag_qp_flow_ctrl; - - caps->num_qpc_timer = hns_roce_v2_max_qpc_timer_num; - caps->qpc_timer_entry_sz = hns_roce_v2_qpc_timer_entry_sz; - caps->qpc_timer_ba_pg_sz = 0; - caps->qpc_timer_buf_pg_sz = 0; - caps->qpc_timer_hop_num = hns_roce_hop_num_0; - caps->num_cqc_timer = hns_roce_v2_max_cqc_timer_num; - caps->cqc_timer_entry_sz = hns_roce_v2_cqc_timer_entry_sz; - caps->cqc_timer_ba_pg_sz = 0; - caps->cqc_timer_buf_pg_sz = 0; - caps->cqc_timer_hop_num = hns_roce_hop_num_0; - - caps->sccc_entry_sz = hns_roce_v2_sccc_entry_sz; - caps->sccc_ba_pg_sz = 0; - caps->sccc_buf_pg_sz = 0; - caps->sccc_hop_num = hns_roce_sccc_hop_num; - } + ret = hns_roce_query_pf_caps(hr_dev); + if (ret) + set_default_caps(hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +#define hns_roce_ba_pg_sz_supported_256k 6 +#define hns_roce_ba_pg_sz_supported_16k 2
|
Networking
|
80a785701660ba3a36b0f72e1674b82ca7ca3ce0
|
lijun ou
|
drivers
|
infiniband
|
hns, hw
|
rdma/hns: add support for extended atomic in userspace
|
to support extended atomic operations including cmp & swap and fetch & add of 8 bytes, 16 bytes, 32 bytes, 64 bytes in userspace, some field in qpc should be configured.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add support for extended atomic in userspace
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['rdma ', 'hns']
|
['c', 'h']
| 2
| 17
| 2
|
--- diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c - caps->trrl_entry_sz = hns_roce_v2_trrl_entry_sz; + caps->trrl_entry_sz = hns_roce_v2_ext_atomic_trrl_entry_sz; + roce_set_bit(context->byte_76_srqn_op_en, v2_qpc_byte_76_ext_ate_s, + !!(access_flags & ib_access_remote_atomic)); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, v2_qpc_byte_76_ext_ate_s, 0); + roce_set_bit(context->byte_76_srqn_op_en, + v2_qpc_byte_76_ext_ate_s, + !!(attr->qp_access_flags & + ib_access_remote_atomic)); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, + v2_qpc_byte_76_ext_ate_s, 0); + roce_set_bit(context->byte_76_srqn_op_en, + v2_qpc_byte_76_ext_ate_s, + !!(hr_qp->access_flags & ib_access_remote_atomic)); + roce_set_bit(qpc_mask->byte_76_srqn_op_en, + v2_qpc_byte_76_ext_ate_s, 0); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +#define hns_roce_v2_ext_atomic_trrl_entry_sz 100 - +#define v2_qpc_byte_76_ext_ate_s 29
|
Networking
|
7db82697b8bf05ae56d02bf8da998bcd1122531d
|
jiaran zhang
|
drivers
|
infiniband
|
hns, hw
|
rdma/hns: add support for reporting wc as software mode
|
when hardware is in resetting stage, we may can't poll back all the expected work completions as the hardware won't generate cqe anymore.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add support for reporting wc as software mode
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['rdma ', 'hns']
|
['c', 'h']
| 6
| 252
| 34
|
--- diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c + init_list_head(&hr_cq->sq_list); + init_list_head(&hr_cq->rq_list); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h + struct list_head sq_list; /* all qps on this send cq */ + struct list_head rq_list; /* all qps on this recv cq */ + int is_armed; /* cq is armed */ + struct list_head node; /* all armed cqs are on a list */ + struct list_head node; /* all qps are on a list */ + struct list_head rq_node; /* all recv qps are on a list */ + struct list_head sq_node; /* all send qps are on a list */ +enum hns_roce_device_state { + hns_roce_device_state_inited, + hns_roce_device_state_rst_down, + hns_roce_device_state_uninit, +}; + + enum hns_roce_device_state state; + struct list_head qp_list; /* list of all qps on this dev */ + spinlock_t qp_list_lock; /* protect qp_list */ +void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c - send_cq = to_hr_cq(hr_qp->ibqp.send_cq); - recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq); + send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : null; + recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : null; - __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : null); - if (send_cq != recv_cq) + if (recv_cq) + __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, + (hr_qp->ibqp.srq ? + to_hr_srq(hr_qp->ibqp.srq) : + null)); + + if (send_cq && send_cq != recv_cq) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +static int check_send_valid(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + struct ib_qp *ibqp = &hr_qp->ibqp; + struct device *dev = hr_dev->dev; + + if (unlikely(ibqp->qp_type != ib_qpt_rc && + ibqp->qp_type != ib_qpt_gsi && + ibqp->qp_type != ib_qpt_ud)) { + dev_err(dev, "not supported qp(0x%x)type! ", ibqp->qp_type); + return -eopnotsupp; + } else if (unlikely(hr_qp->state == ib_qps_reset || + hr_qp->state == ib_qps_init || + hr_qp->state == ib_qps_rtr)) { + dev_err(dev, "post wqe fail, qp state %d! ", hr_qp->state); + return -einval; + } else if (unlikely(hr_dev->state >= hns_roce_device_state_rst_down)) { + dev_err(dev, "post wqe fail, dev state %d! ", hr_dev->state); + return -eio; + } + + return 0; +} + - int ret = 0; + int ret; - if (unlikely(ibqp->qp_type != ib_qpt_rc && - ibqp->qp_type != ib_qpt_gsi && - ibqp->qp_type != ib_qpt_ud)) { - dev_err(dev, "not supported qp(0x%x)type! ", ibqp->qp_type); - *bad_wr = wr; - return -eopnotsupp; - } + spin_lock_irqsave(&qp->sq.lock, flags); - if (unlikely(qp->state == ib_qps_reset || qp->state == ib_qps_init || - qp->state == ib_qps_rtr)) { - dev_err(dev, "post wqe fail, qp state %d err! ", qp->state); + ret = check_send_valid(hr_dev, qp); + if (ret) { - return -einval; + nreq = 0; + goto out; - spin_lock_irqsave(&qp->sq.lock, flags); +static int check_recv_valid(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + if (unlikely(hr_dev->state >= hns_roce_device_state_rst_down)) + return -eio; + else if (hr_qp->state == ib_qps_reset) + return -einval; + + return 0; +} + - int ret = 0; + int ret; - if (hr_qp->state == ib_qps_reset) { - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); + ret = check_recv_valid(hr_dev, hr_qp); + if (ret) { - return -einval; + nreq = 0; + goto out; +static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq, + int num_entries, struct ib_wc *wc) +{ + unsigned int left; + int npolled = 0; + + left = wq->head - wq->tail; + if (left == 0) + return 0; + + left = min_t(unsigned int, (unsigned int)num_entries, left); + while (npolled < left) { + wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; + wc->status = ib_wc_wr_flush_err; + wc->vendor_err = 0; + wc->qp = &hr_qp->ibqp; + + wq->tail++; + wc++; + npolled++; + } + + return npolled; +} + +static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries, + struct ib_wc *wc) +{ + struct hns_roce_qp *hr_qp; + int npolled = 0; + + list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) { + npolled += sw_comp(hr_qp, &hr_qp->sq, + num_entries - npolled, wc + npolled); + if (npolled >= num_entries) + goto out; + } + + list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) { + npolled += sw_comp(hr_qp, &hr_qp->rq, + num_entries - npolled, wc + npolled); + if (npolled >= num_entries) + goto out; + } + +out: + return npolled; +} + + struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); + /* + * when the device starts to reset, the state is rst_down. at this time, + * there may still be some valid cqes in the hardware that are not + * polled. therefore, it is not allowed to switch to the software mode + * immediately. when the state changes to uninit, cqe no longer exists + * in the hardware, and then switch to software mode. + */ + if (hr_dev->state == hns_roce_device_state_uninit) { + npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc); + goto out; + } + +out: + unsigned long flags; - send_cq = to_hr_cq(hr_qp->ibqp.send_cq); - recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq); + send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : null; + recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : null; + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); + list_del(&hr_qp->node); + list_del(&hr_qp->sq_node); + list_del(&hr_qp->rq_node); + - __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : null); - if (send_cq != recv_cq) + if (recv_cq) + __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, + (hr_qp->ibqp.srq ? + to_hr_srq(hr_qp->ibqp.srq) : + null)); + + if (send_cq && send_cq != recv_cq) + + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); + + hr_dev->state = hns_roce_device_state_uninit; + hns_roce_handle_device_err(hr_dev); + - struct ib_event event; - event.event = ib_event_device_fatal; - event.device = &hr_dev->ib_dev; - event.element.port_num = 1; - ib_dispatch_event(&event); + hr_dev->state = hns_roce_device_state_rst_down; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c +static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq) +{ + struct hns_roce_cq *hr_cq = to_hr_cq(cq); + unsigned long flags; + + spin_lock_irqsave(&hr_cq->lock, flags); + if (cq->comp_handler) { + if (!hr_cq->is_armed) { + hr_cq->is_armed = 1; + list_add_tail(&hr_cq->node, cq_list); + } + } + spin_unlock_irqrestore(&hr_cq->lock, flags); +} + +void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_qp *hr_qp; + struct hns_roce_cq *hr_cq; + struct list_head cq_list; + unsigned long flags_qp; + unsigned long flags; + + init_list_head(&cq_list); + + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); + list_for_each_entry(hr_qp, &hr_dev->qp_list, node) { + spin_lock_irqsave(&hr_qp->sq.lock, flags_qp); + if (hr_qp->sq.tail != hr_qp->sq.head) + check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq); + spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp); + + spin_lock_irqsave(&hr_qp->rq.lock, flags_qp); + if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head)) + check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq); + spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp); + } + + list_for_each_entry(hr_cq, &cq_list, node) + hns_roce_cq_completion(hr_dev, hr_cq->cqn); + + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); +} + + init_list_head(&hr_dev->qp_list); + spin_lock_init(&hr_dev->qp_list_lock); + diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c +static void add_qp_to_list(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_cq *send_cq, struct ib_cq *recv_cq) +{ + struct hns_roce_cq *hr_send_cq, *hr_recv_cq; + unsigned long flags; + + hr_send_cq = send_cq ? to_hr_cq(send_cq) : null; + hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : null; + + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); + hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); + + list_add_tail(&hr_qp->node, &hr_dev->qp_list); + if (hr_send_cq) + list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); + if (hr_recv_cq) + list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); + + hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); +} + + + add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, init_attr->recv_cq); + - if (send_cq == recv_cq) { + if (unlikely(send_cq == null && recv_cq == null)) { + __acquire(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(send_cq != null && recv_cq == null)) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else if (unlikely(send_cq == null && recv_cq != null)) { + spin_lock_irq(&recv_cq->lock); + __acquire(&send_cq->lock); + } else if (send_cq == recv_cq) { - if (send_cq == recv_cq) { + if (unlikely(send_cq == null && recv_cq == null)) { + __release(&recv_cq->lock); + __release(&send_cq->lock); + } else if (unlikely(send_cq != null && recv_cq == null)) { + __release(&recv_cq->lock); + spin_unlock(&send_cq->lock); + } else if (unlikely(send_cq == null && recv_cq != null)) { + __release(&send_cq->lock); + spin_unlock(&recv_cq->lock); + } else if (send_cq == recv_cq) {
|
Networking
|
626903e9355bdf8d401fc0ac7e7407862c642710
|
xi wang
|
drivers
|
infiniband
|
hns, hw
|
ar5523: add usb id of smcwusbt-g2 wireless adapter
|
add the required usb id for running smcwusbt-g2 wireless adapter (smc "ez connect g").
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add usb id of smcwusbt-g2 wireless adapter
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ar5523']
|
['c']
| 1
| 2
| 0
|
--- diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c + ar5523_device_ug(0x083a, 0x4506), /* smc / ez connect + smcwusbt-g2 */
|
Networking
|
5b362498a79631f283578b64bf6f4d15ed4cc19a
|
mert dirik
|
drivers
|
net
|
ar5523, ath, wireless
|
ath10k: add nl80211_feature_nd_random_mac_addr for nlo
|
add nl80211_feature_nd_random_mac_addr for nlo will enable the random mac address for netdetect case. iw command: iw phy0 wowlan enable net-detect net-detect randomize=aa:7b:a1:ac:b2:41/ff:ff:ff:ff:ff:ff interval 5000 delay 30 freqs 2412 matches ssid foo. after suspend, dut will send probe request with mac aa:7b:a1:ac:b2:41.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enable the random mac address for netdetect
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath10k ']
|
['c']
| 1
| 1
| 0
|
--- diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c + ar->hw->wiphy->features |= nl80211_feature_nd_random_mac_addr;
|
Networking
|
23b5156a856b6342c8a1ef7045d387c00cdb17c5
|
wen gong
|
drivers
|
net
|
ath, ath10k, wireless
|
ath10k: enable firmware log by default for sdio
|
on sdio chips the firmware log does not impact performance. to make it easier to debug firmware problems keep it enabled on the firmware.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enable firmware log by default for sdio
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath10k ']
|
['c']
| 1
| 0
| 12
|
--- diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c - /* explicitly set fwlog prints to zero as target may turn it on - * based on scratch registers. - */ - ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m); - if (ret) - return ret; - - param |= hi_option_disable_dbglog; - ret = ath10k_bmi_write32(ar, hi_option_flag, param); - if (ret) - return ret; -
|
Networking
|
7cbf4c96d7159e4abe762f4bafa9911fc1f7d339
|
wen gong
|
drivers
|
net
|
ath, ath10k, wireless
|
ath10k: enable napi on rx path for sdio
|
for tcp rx, the quantity of tcp acks to remote is 1/2 of the quantity of tcp data from remote, then it will have many small length packets on tx path of sdio bus, then it reduce the rx packets's bandwidth of tcp.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enable napi on rx path for sdio
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath10k ']
|
['c', 'h']
| 4
| 73
| 8
|
--- diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c + skb_queue_head_init(&ar->htt.rx_indication_head); + diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h + struct sk_buff_head rx_indication_head; + +int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget); diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c - ieee80211_rx_ni(ar->hw, skb); + if (ar->napi.dev) + ieee80211_rx_napi(ar->hw, null, skb, &ar->napi); + else + ieee80211_rx_ni(ar->hw, skb); - if (ar->bus_param.dev_type == ath10k_dev_type_hl) - return ath10k_htt_rx_proc_rx_ind_hl(htt, - &resp->rx_ind_hl, - skb, - htt_rx_pn_check, - htt_rx_non_tkip_mic); - else + if (ar->bus_param.dev_type != ath10k_dev_type_hl) { + } else { + skb_queue_tail(&htt->rx_indication_head, skb); + return false; + } +int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget) +{ + struct htt_resp *resp; + struct ath10k_htt *htt = &ar->htt; + struct sk_buff *skb; + bool release; + int quota; + + for (quota = 0; quota < budget; quota++) { + skb = skb_dequeue(&htt->rx_indication_head); + if (!skb) + break; + + resp = (struct htt_resp *)skb->data; + + release = ath10k_htt_rx_proc_rx_ind_hl(htt, + &resp->rx_ind_hl, + skb, + htt_rx_pn_check, + htt_rx_non_tkip_mic); + + if (release) + dev_kfree_skb_any(skb); + + ath10k_dbg(ar, ath10k_dbg_htt, "rx indication poll pending count:%d ", + skb_queue_len(&htt->rx_indication_head)); + } + return quota; +} +export_symbol(ath10k_htt_rx_hl_indication); + diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c + + if (test_bit(ath10k_flag_core_registered, &ar->dev_flags)) + napi_schedule(&ar->napi); + napi_enable(&ar->napi); + + + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); +static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget) +{ + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done; + + done = ath10k_htt_rx_hl_indication(ar, budget); + ath10k_dbg(ar, ath10k_dbg_sdio, "napi poll: done: %d, budget:%d ", done, budget); + + if (done < budget) + napi_complete_done(ctx, done); + + return done; +} + + netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll, + ath10k_napi_budget); + + + netif_napi_del(&ar->napi); +
|
Networking
|
cfee8793a74dc3afabb08fc9ed5bbe2045709dbb
|
wen gong
|
drivers
|
net
|
ath, ath10k, wireless
|
ath10k: enable wow feature for sdio chip
|
sdio does not support wow, this patch is to enable it. when system enter sleep state, if wowlan is enabled, then sdio chip will keep power if platform support keep power, after resume, it will not need to re-load firmware again.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enable wow feature for sdio chip
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath10k ']
|
['c']
| 1
| 21
| 2
|
--- diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c - return -eopnotsupp; + return 0; - return 0; + struct sdio_func *func = dev_to_sdio_func(device); + struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func); + struct ath10k *ar = ar_sdio->ar; + mmc_pm_flag_t pm_flag, pm_caps; + int ret; + + if (!device_may_wakeup(ar->dev)) + return 0; + + pm_flag = mmc_pm_keep_power; + + ret = sdio_set_host_pm_flags(func, pm_flag); + if (ret) { + pm_caps = sdio_get_host_pm_caps(func); + ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d ", + pm_flag, pm_caps, ret); + return ret; + } + + return ret;
|
Networking
|
7321095ca39c8bb4da5e86f7a3fb8c22b47e5063
|
wen gong
|
drivers
|
net
|
ath, ath10k, wireless
|
ath11k: driver for qualcomm ieee 802.11ax devices
|
ath11k is a new driver for qualcomm ieee 802.11ax devices, first supporting only ipq8074 soc using the shared memory ahb bus. ath11k uses mac80211 and supports ap, station and mesh modes.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
driver for qualcomm ieee 802.11ax devices
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath11k ']
|
['c', 'kconfig', 'makefile', 'h']
| 47
| 48,188
| 0
|
--- diff --git a/drivers/net/wireless/ath/kconfig b/drivers/net/wireless/ath/kconfig --- a/drivers/net/wireless/ath/kconfig +++ b/drivers/net/wireless/ath/kconfig +source "drivers/net/wireless/ath/ath11k/kconfig" diff --git a/drivers/net/wireless/ath/makefile b/drivers/net/wireless/ath/makefile --- a/drivers/net/wireless/ath/makefile +++ b/drivers/net/wireless/ath/makefile +obj-$(config_ath11k) += ath11k/ diff --git a/drivers/net/wireless/ath/ath11k/kconfig b/drivers/net/wireless/ath/ath11k/kconfig --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/kconfig +# spdx-license-identifier: bsd-3-clause-clear +config ath11k + tristate "qualcomm technologies 802.11ax chipset support" + depends on mac80211 && has_dma + depends on remoteproc + depends on arch_qcom || compile_test + select ath_common + select qcom_qmi_helpers + ---help--- + this module adds support for qualcomm technologies 802.11ax family of + chipsets. + + if you choose to build a module, it'll be called ath11k. + +config ath11k_debug + bool "qca ath11k debugging" + depends on ath11k + ---help--- + enables debug support + + if unsure, say y to make it easier to debug problems. + +config ath11k_debugfs + bool "qca ath11k debugfs support" + depends on ath11k && debug_fs + ---help--- + enable ath11k debugfs support + + if unsure, say y to make it easier to debug problems. + +config ath11k_tracing + bool "ath11k tracing support" + depends on ath11k && event_tracing + ---help--- + select this to use ath11k tracing infrastructure. diff --git a/drivers/net/wireless/ath/ath11k/makefile b/drivers/net/wireless/ath/ath11k/makefile --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/makefile +# spdx-license-identifier: bsd-3-clause-clear +obj-$(config_ath11k) += ath11k.o +ath11k-y += core.o \ + hal.o \ + hal_tx.o \ + hal_rx.o \ + ahb.o \ + wmi.o \ + mac.o \ + reg.o \ + htc.o \ + qmi.o \ + dp.o \ + dp_tx.o \ + dp_rx.o \ + debug.o \ + ce.o \ + peer.o + +ath11k-$(config_ath11k_debugfs) += debug_htt_stats.o +ath11k-$(config_mac80211_debugfs) += debugfs_sta.o +ath11k-$(config_nl80211_testmode) += testmode.o +ath11k-$(config_ath11k_tracing) += trace.o + +# for tracing framework to find trace.h +cflags_trace.o := -i$(src) diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/ahb.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/dma-mapping.h> +#include "ahb.h" +#include "debug.h" +#include <linux/remoteproc.h> + +static const struct of_device_id ath11k_ahb_of_match[] = { + /* todo: should we change the compatible string to something similar + * to one that ath10k uses? + */ + { .compatible = "qcom,ipq8074-wifi", + .data = (void *)ath11k_hw_ipq8074, + }, + { } +}; + +module_device_table(of, ath11k_ahb_of_match); + +/* target firmware's copy engine configuration. */ +static const struct ce_pipe_config target_ce_config_wlan[] = { + /* ce0: host->target htc control and raw streams */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(pipedir_out), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce1: target->host htt + htc control */ + { + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(pipedir_in), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce2: target->host wmi */ + { + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(pipedir_in), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce3: host->target wmi */ + { + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(pipedir_out), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce4: host->target htt */ + { + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(pipedir_out), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(ce_attr_flags | ce_attr_dis_intr), + .reserved = __cpu_to_le32(0), + }, + + /* ce5: target->host pktlog */ + { + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(pipedir_in), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(0), + .reserved = __cpu_to_le32(0), + }, + + /* ce6: reserved for target autonomous hif_memcpy */ + { + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(pipedir_inout), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(65535), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce7 used only by host */ + { + .pipenum = __cpu_to_le32(7), + .pipedir = __cpu_to_le32(pipedir_out), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce8 target->host used only by ipa */ + { + .pipenum = __cpu_to_le32(8), + .pipedir = __cpu_to_le32(pipedir_inout), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(65535), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce9 host->target htt */ + { + .pipenum = __cpu_to_le32(9), + .pipedir = __cpu_to_le32(pipedir_out), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce10 target->host htt */ + { + .pipenum = __cpu_to_le32(10), + .pipedir = __cpu_to_le32(pipedir_inout_h2h), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, + + /* ce11 not used */ + { + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(0), + .nentries = __cpu_to_le32(0), + .nbytes_max = __cpu_to_le32(0), + .flags = __cpu_to_le32(ce_attr_flags), + .reserved = __cpu_to_le32(0), + }, +}; + +/* map from service/endpoint to copy engine. + * this table is derived from the ce_pci table, above. + * it is passed to the target at startup for use by firmware. + */ +static const struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_data_vo), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_data_vo), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_data_bk), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_data_bk), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_data_be), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_data_be), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_data_vi), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_data_vi), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_control), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(3), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_control), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_control_mac1), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(7), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_control_mac1), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_control_mac2), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(9), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_wmi_control_mac2), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(2), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_rsvd_ctrl), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(0), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_rsvd_ctrl), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(1), + }, + { /* not used */ + .service_id = __cpu_to_le32(ath11k_htc_svc_id_test_raw_streams), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(0), + }, + { /* not used */ + .service_id = __cpu_to_le32(ath11k_htc_svc_id_test_raw_streams), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(1), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_htt_data_msg), + .pipedir = __cpu_to_le32(pipedir_out), /* out = ul = host -> target */ + .pipenum = __cpu_to_le32(4), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_htt_data_msg), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(1), + }, + { + .service_id = __cpu_to_le32(ath11k_htc_svc_id_pkt_log), + .pipedir = __cpu_to_le32(pipedir_in), /* in = dl = target -> host */ + .pipenum = __cpu_to_le32(5), + }, + + /* (additions here) */ + + { /* terminator entry */ } +}; + +#define ath11k_irq_ce0_offset 4 + +static const char *irq_name[ath11k_irq_num_max] = { + "misc-pulse1", + "misc-latch", + "sw-exception", + "watchdog", + "ce0", + "ce1", + "ce2", + "ce3", + "ce4", + "ce5", + "ce6", + "ce7", + "ce8", + "ce9", + "ce10", + "ce11", + "host2wbm-desc-feed", + "host2reo-re-injection", + "host2reo-command", + "host2rxdma-monitor-ring3", + "host2rxdma-monitor-ring2", + "host2rxdma-monitor-ring1", + "reo2ost-exception", + "wbm2host-rx-release", + "reo2host-status", + "reo2host-destination-ring4", + "reo2host-destination-ring3", + "reo2host-destination-ring2", + "reo2host-destination-ring1", + "rxdma2host-monitor-destination-mac3", + "rxdma2host-monitor-destination-mac2", + "rxdma2host-monitor-destination-mac1", + "ppdu-end-interrupts-mac3", + "ppdu-end-interrupts-mac2", + "ppdu-end-interrupts-mac1", + "rxdma2host-monitor-status-ring-mac3", + "rxdma2host-monitor-status-ring-mac2", + "rxdma2host-monitor-status-ring-mac1", + "host2rxdma-host-buf-ring-mac3", + "host2rxdma-host-buf-ring-mac2", + "host2rxdma-host-buf-ring-mac1", + "rxdma2host-destination-ring-mac3", + "rxdma2host-destination-ring-mac2", + "rxdma2host-destination-ring-mac1", + "host2tcl-input-ring4", + "host2tcl-input-ring3", + "host2tcl-input-ring2", + "host2tcl-input-ring1", + "wbm2host-tx-completions-ring3", + "wbm2host-tx-completions-ring2", + "wbm2host-tx-completions-ring1", + "tcl2host-status-ring", +}; + +#define ath11k_tx_ring_mask_0 0x1 +#define ath11k_tx_ring_mask_1 0x2 +#define ath11k_tx_ring_mask_2 0x4 + +#define ath11k_rx_ring_mask_0 0x1 +#define ath11k_rx_ring_mask_1 0x2 +#define ath11k_rx_ring_mask_2 0x4 +#define ath11k_rx_ring_mask_3 0x8 + +#define ath11k_rx_err_ring_mask_0 0x1 + +#define ath11k_rx_wbm_rel_ring_mask_0 0x1 + +#define ath11k_reo_status_ring_mask_0 0x1 + +#define ath11k_rxdma2host_ring_mask_0 0x1 +#define ath11k_rxdma2host_ring_mask_1 0x2 +#define ath11k_rxdma2host_ring_mask_2 0x4 + +#define ath11k_host2rxdma_ring_mask_0 0x1 +#define ath11k_host2rxdma_ring_mask_1 0x2 +#define ath11k_host2rxdma_ring_mask_2 0x4 + +#define ath11k_rx_mon_status_ring_mask_0 0x1 +#define ath11k_rx_mon_status_ring_mask_1 0x2 +#define ath11k_rx_mon_status_ring_mask_2 0x4 + +const u8 ath11k_tx_ring_mask[ath11k_ext_irq_grp_num_max] = { + ath11k_tx_ring_mask_0, + ath11k_tx_ring_mask_1, + ath11k_tx_ring_mask_2, +}; + +const u8 rx_mon_status_ring_mask[ath11k_ext_irq_grp_num_max] = { + 0, 0, 0, 0, + ath11k_rx_mon_status_ring_mask_0, + ath11k_rx_mon_status_ring_mask_1, + ath11k_rx_mon_status_ring_mask_2, +}; + +const u8 ath11k_rx_ring_mask[ath11k_ext_irq_grp_num_max] = { + 0, 0, 0, 0, 0, 0, 0, + ath11k_rx_ring_mask_0, + ath11k_rx_ring_mask_1, + ath11k_rx_ring_mask_2, + ath11k_rx_ring_mask_3, +}; + +const u8 ath11k_rx_err_ring_mask[ath11k_ext_irq_grp_num_max] = { + ath11k_rx_err_ring_mask_0, +}; + +const u8 ath11k_rx_wbm_rel_ring_mask[ath11k_ext_irq_grp_num_max] = { + ath11k_rx_wbm_rel_ring_mask_0, +}; + +const u8 ath11k_reo_status_ring_mask[ath11k_ext_irq_grp_num_max] = { + ath11k_reo_status_ring_mask_0, +}; + +const u8 ath11k_rxdma2host_ring_mask[ath11k_ext_irq_grp_num_max] = { + ath11k_rxdma2host_ring_mask_0, + ath11k_rxdma2host_ring_mask_1, + ath11k_rxdma2host_ring_mask_2, +}; + +const u8 ath11k_host2rxdma_ring_mask[ath11k_ext_irq_grp_num_max] = { + ath11k_host2rxdma_ring_mask_0, + ath11k_host2rxdma_ring_mask_1, + ath11k_host2rxdma_ring_mask_2, +}; + +/* enum ext_irq_num - irq numbers that can be used by external modules + * like datapath + */ +enum ext_irq_num { + host2wbm_desc_feed = 16, + host2reo_re_injection, + host2reo_command, + host2rxdma_monitor_ring3, + host2rxdma_monitor_ring2, + host2rxdma_monitor_ring1, + reo2host_exception, + wbm2host_rx_release, + reo2host_status, + reo2host_destination_ring4, + reo2host_destination_ring3, + reo2host_destination_ring2, + reo2host_destination_ring1, + rxdma2host_monitor_destination_mac3, + rxdma2host_monitor_destination_mac2, + rxdma2host_monitor_destination_mac1, + ppdu_end_interrupts_mac3, + ppdu_end_interrupts_mac2, + ppdu_end_interrupts_mac1, + rxdma2host_monitor_status_ring_mac3, + rxdma2host_monitor_status_ring_mac2, + rxdma2host_monitor_status_ring_mac1, + host2rxdma_host_buf_ring_mac3, + host2rxdma_host_buf_ring_mac2, + host2rxdma_host_buf_ring_mac1, + rxdma2host_destination_ring_mac3, + rxdma2host_destination_ring_mac2, + rxdma2host_destination_ring_mac1, + host2tcl_input_ring4, + host2tcl_input_ring3, + host2tcl_input_ring2, + host2tcl_input_ring1, + wbm2host_tx_completions_ring3, + wbm2host_tx_completions_ring2, + wbm2host_tx_completions_ring1, + tcl2host_status_ring, +}; + +static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab) +{ + int i; + + for (i = 0; i < ce_count; i++) { + struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; + + if (ath11k_ce_get_attr_flags(i) & ce_attr_dis_intr) + continue; + + tasklet_kill(&ce_pipe->intr_tq); + } +} + +static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) +{ + int i; + + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) +{ + struct sk_buff *skb; + int i; + + for (i = 0; i < ath11k_ext_irq_grp_num_max; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + ath11k_ahb_ext_grp_disable(irq_grp); + + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + + while ((skb = __skb_dequeue(&irq_grp->pending_q))) + dev_kfree_skb_any(skb); + } +} + +static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) +{ + int i; + + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset) +{ + u32 val; + + val = ath11k_ahb_read32(ab, offset); + ath11k_ahb_write32(ab, offset, val | bit(bit)); +} + +static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) +{ + u32 val; + + val = ath11k_ahb_read32(ab, offset); + ath11k_ahb_write32(ab, offset, val & ~bit(bit)); +} + +static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) +{ + const struct ce_pipe_config *ce_config; + + ce_config = &target_ce_config_wlan[ce_id]; + if (__le32_to_cpu(ce_config->pipedir) & pipedir_out) + ath11k_ahb_setbit32(ab, ce_id, ce_host_ie_address); + + if (__le32_to_cpu(ce_config->pipedir) & pipedir_in) { + ath11k_ahb_setbit32(ab, ce_id, ce_host_ie_2_address); + ath11k_ahb_setbit32(ab, ce_id + ce_host_ie_3_shift, + ce_host_ie_3_address); + } +} + +static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) +{ + const struct ce_pipe_config *ce_config; + + ce_config = &target_ce_config_wlan[ce_id]; + if (__le32_to_cpu(ce_config->pipedir) & pipedir_out) + ath11k_ahb_clearbit32(ab, ce_id, ce_host_ie_address); + + if (__le32_to_cpu(ce_config->pipedir) & pipedir_in) { + ath11k_ahb_clearbit32(ab, ce_id, ce_host_ie_2_address); + ath11k_ahb_clearbit32(ab, ce_id + ce_host_ie_3_shift, + ce_host_ie_3_address); + } +} + +static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab) +{ + int i; + int irq_idx; + + for (i = 0; i < ce_count; i++) { + if (ath11k_ce_get_attr_flags(i) & ce_attr_dis_intr) + continue; + + irq_idx = ath11k_irq_ce0_offset + i; + synchronize_irq(ab->irq_num[irq_idx]); + } +} + +static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab) +{ + int i, j; + int irq_idx; + + for (i = 0; i < ath11k_ext_irq_grp_num_max; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) { + irq_idx = irq_grp->irqs[j]; + synchronize_irq(ab->irq_num[irq_idx]); + } + } +} + +static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab) +{ + int i; + + for (i = 0; i < ce_count; i++) { + if (ath11k_ce_get_attr_flags(i) & ce_attr_dis_intr) + continue; + ath11k_ahb_ce_irq_enable(ab, i); + } +} + +static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab) +{ + int i; + + for (i = 0; i < ce_count; i++) { + if (ath11k_ce_get_attr_flags(i) & ce_attr_dis_intr) + continue; + ath11k_ahb_ce_irq_disable(ab, i); + } +} + +int ath11k_ahb_start(struct ath11k_base *ab) +{ + ath11k_ahb_ce_irqs_enable(ab); + ath11k_ce_rx_post_buf(ab); + + return 0; +} + +void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) +{ + int i; + + for (i = 0; i < ath11k_ext_irq_grp_num_max; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + napi_enable(&irq_grp->napi); + ath11k_ahb_ext_grp_enable(irq_grp); + } +} + +void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) +{ + __ath11k_ahb_ext_irq_disable(ab); + ath11k_ahb_sync_ext_irqs(ab); +} + +void ath11k_ahb_stop(struct ath11k_base *ab) +{ + if (!test_bit(ath11k_flag_crash_flush, &ab->dev_flags)) + ath11k_ahb_ce_irqs_disable(ab); + ath11k_ahb_sync_ce_irqs(ab); + ath11k_ahb_kill_tasklets(ab); + del_timer_sync(&ab->rx_replenish_retry); + ath11k_ce_cleanup_pipes(ab); +} + +int ath11k_ahb_power_up(struct ath11k_base *ab) +{ + int ret; + + ret = rproc_boot(ab->tgt_rproc); + if (ret) + ath11k_err(ab, "failed to boot the remote processor q6 "); + + return ret; +} + +void ath11k_ahb_power_down(struct ath11k_base *ab) +{ + rproc_shutdown(ab->tgt_rproc); +} + +static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) +{ + struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; + + cfg->tgt_ce = (u8 *)target_ce_config_wlan; + cfg->tgt_ce_len = sizeof(target_ce_config_wlan); + + cfg->svc_to_ce_map = (u8 *)target_service_to_ce_map_wlan; + cfg->svc_to_ce_map_len = sizeof(target_service_to_ce_map_wlan); +} + +static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) +{ + int i, j; + + for (i = 0; i < ath11k_ext_irq_grp_num_max; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) + free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); + } +} + +static void ath11k_ahb_free_irq(struct ath11k_base *ab) +{ + int irq_idx; + int i; + + for (i = 0; i < ce_count; i++) { + if (ath11k_ce_get_attr_flags(i) & ce_attr_dis_intr) + continue; + irq_idx = ath11k_irq_ce0_offset + i; + free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); + } + + ath11k_ahb_free_ext_irq(ab); +} + +static void ath11k_ahb_ce_tasklet(unsigned long data) +{ + struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data; + + ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); + + ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); +} + +static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg) +{ + struct ath11k_ce_pipe *ce_pipe = arg; + + ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); + + tasklet_schedule(&ce_pipe->intr_tq); + + return irq_handled; +} + +static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget) +{ + struct ath11k_ext_irq_grp *irq_grp = container_of(napi, + struct ath11k_ext_irq_grp, + napi); + struct ath11k_base *ab = irq_grp->ab; + int work_done; + + work_done = ath11k_dp_service_srng(ab, irq_grp, budget); + if (work_done < budget) { + napi_complete_done(napi, work_done); + ath11k_ahb_ext_grp_enable(irq_grp); + } + + if (work_done > budget) + work_done = budget; + + return work_done; +} + +static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg) +{ + struct ath11k_ext_irq_grp *irq_grp = arg; + + ath11k_ahb_ext_grp_disable(irq_grp); + + napi_schedule(&irq_grp->napi); + + return irq_handled; +} + +static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab) +{ + int i, j; + int irq; + int ret; + + for (i = 0; i < ath11k_ext_irq_grp_num_max; i++) { + struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + u32 num_irq = 0; + + irq_grp->ab = ab; + irq_grp->grp_id = i; + init_dummy_netdev(&irq_grp->napi_ndev); + netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + ath11k_ahb_ext_grp_napi_poll, napi_poll_weight); + __skb_queue_head_init(&irq_grp->pending_q); + + for (j = 0; j < ath11k_ext_irq_num_max; j++) { + if (ath11k_tx_ring_mask[i] & bit(j)) { + irq_grp->irqs[num_irq++] = + wbm2host_tx_completions_ring1 - j; + } + + if (ath11k_rx_ring_mask[i] & bit(j)) { + irq_grp->irqs[num_irq++] = + reo2host_destination_ring1 - j; + } + + if (ath11k_rx_err_ring_mask[i] & bit(j)) + irq_grp->irqs[num_irq++] = reo2host_exception; + + if (ath11k_rx_wbm_rel_ring_mask[i] & bit(j)) + irq_grp->irqs[num_irq++] = wbm2host_rx_release; + + if (ath11k_reo_status_ring_mask[i] & bit(j)) + irq_grp->irqs[num_irq++] = reo2host_status; + + if (j < max_radios) { + if (ath11k_rxdma2host_ring_mask[i] & bit(j)) { + irq_grp->irqs[num_irq++] = + rxdma2host_destination_ring_mac1 + - ath11k_core_get_hw_mac_id(ab, j); + } + + if (ath11k_host2rxdma_ring_mask[i] & bit(j)) { + irq_grp->irqs[num_irq++] = + host2rxdma_host_buf_ring_mac1 + - ath11k_core_get_hw_mac_id(ab, j); + } + + if (rx_mon_status_ring_mask[i] & bit(j)) { + irq_grp->irqs[num_irq++] = + ppdu_end_interrupts_mac1 - + ath11k_core_get_hw_mac_id(ab, j); + irq_grp->irqs[num_irq++] = + rxdma2host_monitor_status_ring_mac1 - + ath11k_core_get_hw_mac_id(ab, j); + } + } + } + irq_grp->num_irq = num_irq; + + for (j = 0; j < irq_grp->num_irq; j++) { + int irq_idx = irq_grp->irqs[j]; + + irq = platform_get_irq_byname(ab->pdev, + irq_name[irq_idx]); + ab->irq_num[irq_idx] = irq; + irq_set_status_flags(irq, irq_noautoen); + ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler, + irqf_trigger_rising, + irq_name[irq_idx], irq_grp); + if (ret) { + ath11k_err(ab, "failed request_irq for %d ", + irq); + } + } + } + + return 0; +} + +static int ath11k_ahb_config_irq(struct ath11k_base *ab) +{ + int irq, irq_idx, i; + int ret; + + /* configure ce irqs */ + for (i = 0; i < ce_count; i++) { + struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; + + if (ath11k_ce_get_attr_flags(i) & ce_attr_dis_intr) + continue; + + irq_idx = ath11k_irq_ce0_offset + i; + + tasklet_init(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet, + (unsigned long)ce_pipe); + irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); + ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler, + irqf_trigger_rising, irq_name[irq_idx], + ce_pipe); + if (ret) + return ret; + + ab->irq_num[irq_idx] = irq; + } + + /* configure external interrupts */ + ret = ath11k_ahb_ext_irq_config(ab); + + return ret; +} + +int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + for (i = 0; i < array_size(target_service_to_ce_map_wlan); i++) { + entry = &target_service_to_ce_map_wlan[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case pipedir_none: + break; + case pipedir_in: + warn_on(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case pipedir_out: + warn_on(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case pipedir_inout: + warn_on(dl_set); + warn_on(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + if (warn_on(!ul_set || !dl_set)) + return -enoent; + + return 0; +} + +static int ath11k_ahb_probe(struct platform_device *pdev) +{ + struct ath11k_base *ab; + const struct of_device_id *of_id; + struct resource *mem_res; + void __iomem *mem; + int ret; + + of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "failed to find matching device tree id "); + return -einval; + } + + mem_res = platform_get_resource(pdev, ioresource_mem, 0); + if (!mem_res) { + dev_err(&pdev->dev, "failed to get io memory resource "); + return -enxio; + } + + mem = devm_ioremap_resource(&pdev->dev, mem_res); + if (is_err(mem)) { + dev_err(&pdev->dev, "ioremap error "); + return ptr_err(mem); + } + + ret = dma_set_mask_and_coherent(&pdev->dev, dma_bit_mask(32)); + if (ret) { + dev_err(&pdev->dev, "failed to set 32-bit consistent dma "); + return ret; + } + + ab = ath11k_core_alloc(&pdev->dev); + if (!ab) { + dev_err(&pdev->dev, "failed to allocate ath11k base "); + return -enomem; + } + + ab->pdev = pdev; + ab->hw_rev = (enum ath11k_hw_rev)of_id->data; + ab->mem = mem; + ab->mem_len = resource_size(mem_res); + platform_set_drvdata(pdev, ab); + + ret = ath11k_hal_srng_init(ab); + if (ret) + goto err_core_free; + + ret = ath11k_ce_alloc_pipes(ab); + if (ret) { + ath11k_err(ab, "failed to allocate ce pipes: %d ", ret); + goto err_hal_srng_deinit; + } + + ath11k_ahb_init_qmi_ce_config(ab); + + ret = ath11k_ahb_config_irq(ab); + if (ret) { + ath11k_err(ab, "failed to configure irq: %d ", ret); + goto err_ce_free; + } + + ret = ath11k_core_init(ab); + if (ret) { + ath11k_err(ab, "failed to init core: %d ", ret); + goto err_ce_free; + } + + return 0; + +err_ce_free: + ath11k_ce_free_pipes(ab); + +err_hal_srng_deinit: + ath11k_hal_srng_deinit(ab); + +err_core_free: + ath11k_core_free(ab); + platform_set_drvdata(pdev, null); + + return ret; +} + +static int ath11k_ahb_remove(struct platform_device *pdev) +{ + struct ath11k_base *ab = platform_get_drvdata(pdev); + + reinit_completion(&ab->driver_recovery); + + if (test_bit(ath11k_flag_recovery, &ab->dev_flags)) + wait_for_completion_timeout(&ab->driver_recovery, + ath11k_ahb_recovery_timeout); + + set_bit(ath11k_flag_unregistering, &ab->dev_flags); + cancel_work_sync(&ab->restart_work); + + ath11k_core_deinit(ab); + ath11k_ahb_free_irq(ab); + + ath11k_hal_srng_deinit(ab); + ath11k_ce_free_pipes(ab); + ath11k_core_free(ab); + platform_set_drvdata(pdev, null); + + return 0; +} + +static struct platform_driver ath11k_ahb_driver = { + .driver = { + .name = "ath11k", + .of_match_table = ath11k_ahb_of_match, + }, + .probe = ath11k_ahb_probe, + .remove = ath11k_ahb_remove, +}; + +int ath11k_ahb_init(void) +{ + return platform_driver_register(&ath11k_ahb_driver); +} + +void ath11k_ahb_exit(void) +{ + platform_driver_unregister(&ath11k_ahb_driver); +} diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/ahb.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ +#ifndef ath11k_ahb_h +#define ath11k_ahb_h + +#include "core.h" + +#define ath11k_ahb_recovery_timeout (3 * hz) +struct ath11k_base; + +static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset) +{ + return ioread32(ab->mem + offset); +} + +static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value) +{ + iowrite32(value, ab->mem + offset); +} + +void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab); +void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab); +int ath11k_ahb_start(struct ath11k_base *ab); +void ath11k_ahb_stop(struct ath11k_base *ab); +int ath11k_ahb_power_up(struct ath11k_base *ab); +void ath11k_ahb_power_down(struct ath11k_base *ab); +int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); + +int ath11k_ahb_init(void); +void ath11k_ahb_exit(void); + +#endif diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/ce.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include "dp_rx.h" +#include "debug.h" + +static const struct ce_attr host_ce_config_wlan[] = { + /* ce0: host->target htc control and raw streams */ + { + .flags = ce_attr_flags, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* ce1: target->host htt + htc control */ + { + .flags = ce_attr_flags, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath11k_htc_rx_completion_handler, + }, + + /* ce2: target->host wmi */ + { + .flags = ce_attr_flags, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath11k_htc_rx_completion_handler, + }, + + /* ce3: host->target wmi (mac0) */ + { + .flags = ce_attr_flags, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* ce4: host->target htt */ + { + .flags = ce_attr_flags | ce_attr_dis_intr, + .src_nentries = 2048, + .src_sz_max = 256, + .dest_nentries = 0, + }, + + /* ce5: target->host pktlog */ + { + .flags = ce_attr_flags, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler, + }, + + /* ce6: target autonomous hif_memcpy */ + { + .flags = ce_attr_flags | ce_attr_dis_intr, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* ce7: host->target wmi (mac1) */ + { + .flags = ce_attr_flags, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* ce8: target autonomous hif_memcpy */ + { + .flags = ce_attr_flags, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* ce9: host->target wmi (mac2) */ + { + .flags = ce_attr_flags, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + }, + + /* ce10: target->host htt */ + { + .flags = ce_attr_flags, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath11k_htc_rx_completion_handler, + }, + + /* ce11: not used */ + { + .flags = ce_attr_flags, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, +}; + +static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe, + struct sk_buff *skb, dma_addr_t paddr) +{ + struct ath11k_base *ab = pipe->ab; + struct ath11k_ce_ring *ring = pipe->dest_ring; + struct hal_srng *srng; + unsigned int write_index; + unsigned int nentries_mask = ring->nentries_mask; + u32 *desc; + int ret; + + lockdep_assert_held(&ab->ce.ce_lock); + + write_index = ring->write_index; + + srng = &ab->hal.srng_list[ring->hal_ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { + ret = -enospc; + goto exit; + } + + desc = ath11k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) { + ret = -enospc; + goto exit; + } + + ath11k_hal_ce_dst_set_desc(desc, paddr); + + ring->skb[write_index] = skb; + write_index = ce_ring_idx_incr(nentries_mask, write_index); + ring->write_index = write_index; + + pipe->rx_buf_needed--; + + ret = 0; +exit: + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return ret; +} + +static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe) +{ + struct ath11k_base *ab = pipe->ab; + struct sk_buff *skb; + dma_addr_t paddr; + int ret = 0; + + if (!(pipe->dest_ring || pipe->status_ring)) + return 0; + + spin_lock_bh(&ab->ce.ce_lock); + while (pipe->rx_buf_needed) { + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) { + ret = -enomem; + goto exit; + } + + warn_on_once(!is_aligned((unsigned long)skb->data, 4)); + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + dma_from_device); + if (unlikely(dma_mapping_error(ab->dev, paddr))) { + ath11k_warn(ab, "failed to dma map ce rx buf "); + dev_kfree_skb_any(skb); + ret = -eio; + goto exit; + } + + ath11k_skb_rxcb(skb)->paddr = paddr; + + ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr); + + if (ret) { + ath11k_warn(ab, "failed to enqueue rx buf: %d ", ret); + dma_unmap_single(ab->dev, paddr, + skb->len + skb_tailroom(skb), + dma_from_device); + dev_kfree_skb_any(skb); + goto exit; + } + } + +exit: + spin_unlock_bh(&ab->ce.ce_lock); + return ret; +} + +static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe, + struct sk_buff **skb, int *nbytes) +{ + struct ath11k_base *ab = pipe->ab; + struct hal_srng *srng; + unsigned int sw_index; + unsigned int nentries_mask; + u32 *desc; + int ret = 0; + + spin_lock_bh(&ab->ce.ce_lock); + + sw_index = pipe->dest_ring->sw_index; + nentries_mask = pipe->dest_ring->nentries_mask; + + srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); + if (!desc) { + ret = -eio; + goto err; + } + + *nbytes = ath11k_hal_ce_dst_status_get_length(desc); + if (*nbytes == 0) { + ret = -eio; + goto err; + } + + *skb = pipe->dest_ring->skb[sw_index]; + pipe->dest_ring->skb[sw_index] = null; + + sw_index = ce_ring_idx_incr(nentries_mask, sw_index); + pipe->dest_ring->sw_index = sw_index; + + pipe->rx_buf_needed++; +err: + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + spin_unlock_bh(&ab->ce.ce_lock); + + return ret; +} + +static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe) +{ + struct ath11k_base *ab = pipe->ab; + struct sk_buff *skb; + struct sk_buff_head list; + unsigned int nbytes, max_nbytes; + int ret; + + __skb_queue_head_init(&list); + while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) { + max_nbytes = skb->len + skb_tailroom(skb); + dma_unmap_single(ab->dev, ath11k_skb_rxcb(skb)->paddr, + max_nbytes, dma_from_device); + + if (unlikely(max_nbytes < nbytes)) { + ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + dev_kfree_skb_any(skb); + continue; + } + + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath11k_dbg(ab, ath11k_dbg_ahb, "rx ce pipe %d len %d ", + pipe->pipe_num, skb->len); + pipe->recv_cb(ab, skb); + } + + ret = ath11k_ce_rx_post_pipe(pipe); + if (ret && ret != -enospc) { + ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d ", + pipe->pipe_num, ret); + mod_timer(&ab->rx_replenish_retry, + jiffies + ath11k_ce_rx_post_retry_jiffies); + } +} + +static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe) +{ + struct ath11k_base *ab = pipe->ab; + struct hal_srng *srng; + unsigned int sw_index; + unsigned int nentries_mask; + struct sk_buff *skb; + u32 *desc; + + spin_lock_bh(&ab->ce.ce_lock); + + sw_index = pipe->src_ring->sw_index; + nentries_mask = pipe->src_ring->nentries_mask; + + srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + desc = ath11k_hal_srng_src_reap_next(ab, srng); + if (!desc) { + skb = err_ptr(-eio); + goto err_unlock; + } + + skb = pipe->src_ring->skb[sw_index]; + + pipe->src_ring->skb[sw_index] = null; + + sw_index = ce_ring_idx_incr(nentries_mask, sw_index); + pipe->src_ring->sw_index = sw_index; + +err_unlock: + spin_unlock_bh(&srng->lock); + + spin_unlock_bh(&ab->ce.ce_lock); + + return skb; +} + +static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe) +{ + struct ath11k_base *ab = pipe->ab; + struct sk_buff *skb; + + while (!is_err(skb = ath11k_ce_completed_send_next(pipe))) { + if (!skb) + continue; + + dma_unmap_single(ab->dev, ath11k_skb_cb(skb)->paddr, skb->len, + dma_to_device); + dev_kfree_skb_any(skb); + } +} + +static int ath11k_ce_init_ring(struct ath11k_base *ab, + struct ath11k_ce_ring *ce_ring, + int ce_id, enum hal_ring_type type) +{ + struct hal_srng_params params = { 0 }; + int ret; + + params.ring_base_paddr = ce_ring->base_addr_ce_space; + params.ring_base_vaddr = ce_ring->base_addr_owner_space; + params.num_entries = ce_ring->nentries; + + switch (type) { + case hal_ce_src: + if (!(ce_attr_dis_intr & host_ce_config_wlan[ce_id].flags)) + params.intr_batch_cntr_thres_entries = 1; + break; + case hal_ce_dst: + params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max; + if (!(host_ce_config_wlan[ce_id].flags & ce_attr_dis_intr)) { + params.intr_timer_thres_us = 1024; + params.flags |= hal_srng_flags_low_thresh_intr_en; + params.low_threshold = ce_ring->nentries - 3; + } + break; + case hal_ce_dst_status: + if (!(host_ce_config_wlan[ce_id].flags & ce_attr_dis_intr)) { + params.intr_batch_cntr_thres_entries = 1; + params.intr_timer_thres_us = 0x1000; + } + break; + default: + ath11k_warn(ab, "invalid ce ring type %d ", type); + return -einval; + } + + /* todo: init other params needed by hal to init the ring */ + + ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, ¶ms); + if (ret < 0) { + ath11k_warn(ab, "failed to setup srng: %d ring_id %d ", + ret, ce_id); + return ret; + } + ce_ring->hal_ring_id = ret; + + return 0; +} + +static struct ath11k_ce_ring * +ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz) +{ + struct ath11k_ce_ring *ce_ring; + dma_addr_t base_addr; + + ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), gfp_kernel); + if (ce_ring == null) + return err_ptr(-enomem); + + ce_ring->nentries = nentries; + ce_ring->nentries_mask = nentries - 1; + + /* legacy platforms that do not support cache + * coherent dma are unsupported + */ + ce_ring->base_addr_owner_space_unaligned = + dma_alloc_coherent(ab->dev, + nentries * desc_sz + ce_desc_ring_align, + &base_addr, gfp_kernel); + if (!ce_ring->base_addr_owner_space_unaligned) { + kfree(ce_ring); + return err_ptr(-enomem); + } + + ce_ring->base_addr_ce_space_unaligned = base_addr; + + ce_ring->base_addr_owner_space = ptr_align( + ce_ring->base_addr_owner_space_unaligned, + ce_desc_ring_align); + ce_ring->base_addr_ce_space = align( + ce_ring->base_addr_ce_space_unaligned, + ce_desc_ring_align); + + return ce_ring; +} + +static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id) +{ + struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; + const struct ce_attr *attr = &host_ce_config_wlan[ce_id]; + int nentries; + int desc_sz; + + pipe->attr_flags = attr->flags; + + if (attr->src_nentries) { + pipe->send_cb = ath11k_ce_send_done_cb; + nentries = roundup_pow_of_two(attr->src_nentries); + desc_sz = ath11k_hal_ce_get_desc_size(hal_ce_desc_src); + pipe->src_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); + if (!pipe->src_ring) + return -enomem; + } + + if (attr->dest_nentries) { + pipe->recv_cb = attr->recv_cb; + nentries = roundup_pow_of_two(attr->dest_nentries); + desc_sz = ath11k_hal_ce_get_desc_size(hal_ce_desc_dst); + pipe->dest_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); + + if (!pipe->dest_ring) + return -enomem; + + desc_sz = ath11k_hal_ce_get_desc_size(hal_ce_desc_dst_status); + pipe->status_ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz); + if (!pipe->status_ring) + return -enomem; + } + + return 0; +} + +void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id) +{ + struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id]; + + if (pipe->send_cb) + pipe->send_cb(pipe); + + if (pipe->recv_cb) + ath11k_ce_recv_process_cb(pipe); +} + +void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id) +{ + struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; + + if ((pipe->attr_flags & ce_attr_dis_intr) && pipe->send_cb) + pipe->send_cb(pipe); +} + +int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id, + u16 transfer_id) +{ + struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id]; + struct hal_srng *srng; + u32 *desc; + unsigned int write_index, sw_index; + unsigned int nentries_mask; + int ret = 0; + u8 byte_swap_data = 0; + int num_used; + + /* check if some entries could be regained by handling tx completion if + * the ce has interrupts disabled and the used entries is more than the + * defined usage threshold. + */ + if (pipe->attr_flags & ce_attr_dis_intr) { + spin_lock_bh(&ab->ce.ce_lock); + write_index = pipe->src_ring->write_index; + + sw_index = pipe->src_ring->sw_index; + + if (write_index >= sw_index) + num_used = write_index - sw_index; + else + num_used = pipe->src_ring->nentries - sw_index + + write_index; + + spin_unlock_bh(&ab->ce.ce_lock); + + if (num_used > ath11k_ce_usage_threshold) + ath11k_ce_poll_send_completed(ab, pipe->pipe_num); + } + + if (test_bit(ath11k_flag_crash_flush, &ab->dev_flags)) + return -eshutdown; + + spin_lock_bh(&ab->ce.ce_lock); + + write_index = pipe->src_ring->write_index; + nentries_mask = pipe->src_ring->nentries_mask; + + srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) { + ath11k_hal_srng_access_end(ab, srng); + ret = -enobufs; + goto err_unlock; + } + + desc = ath11k_hal_srng_src_get_next_reaped(ab, srng); + if (!desc) { + ath11k_hal_srng_access_end(ab, srng); + ret = -enobufs; + goto err_unlock; + } + + if (pipe->attr_flags & ce_attr_byte_swap_data) + byte_swap_data = 1; + + ath11k_hal_ce_src_set_desc(desc, ath11k_skb_cb(skb)->paddr, + skb->len, transfer_id, byte_swap_data); + + pipe->src_ring->skb[write_index] = skb; + pipe->src_ring->write_index = ce_ring_idx_incr(nentries_mask, + write_index); + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + spin_unlock_bh(&ab->ce.ce_lock); + + return 0; + +err_unlock: + spin_unlock_bh(&srng->lock); + + spin_unlock_bh(&ab->ce.ce_lock); + + return ret; +} + +static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe) +{ + struct ath11k_base *ab = pipe->ab; + struct ath11k_ce_ring *ring = pipe->dest_ring; + struct sk_buff *skb; + int i; + + if (!(ring && pipe->buf_sz)) + return; + + for (i = 0; i < ring->nentries; i++) { + skb = ring->skb[i]; + if (!skb) + continue; + + ring->skb[i] = null; + dma_unmap_single(ab->dev, ath11k_skb_rxcb(skb)->paddr, + skb->len + skb_tailroom(skb), dma_from_device); + dev_kfree_skb_any(skb); + } +} + +void ath11k_ce_cleanup_pipes(struct ath11k_base *ab) +{ + struct ath11k_ce_pipe *pipe; + int pipe_num; + + for (pipe_num = 0; pipe_num < ce_count; pipe_num++) { + pipe = &ab->ce.ce_pipe[pipe_num]; + ath11k_ce_rx_pipe_cleanup(pipe); + + /* cleanup any src ce's which have interrupts disabled */ + ath11k_ce_poll_send_completed(ab, pipe_num); + + /* note: should we also clean up tx buffer in all pipes? */ + } +} + +void ath11k_ce_rx_post_buf(struct ath11k_base *ab) +{ + struct ath11k_ce_pipe *pipe; + int i; + int ret; + + for (i = 0; i < ce_count; i++) { + pipe = &ab->ce.ce_pipe[i]; + ret = ath11k_ce_rx_post_pipe(pipe); + if (ret) { + if (ret == -enospc) + continue; + + ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d ", + i, ret); + mod_timer(&ab->rx_replenish_retry, + jiffies + ath11k_ce_rx_post_retry_jiffies); + + return; + } + } +} + +void ath11k_ce_rx_replenish_retry(struct timer_list *t) +{ + struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry); + + ath11k_ce_rx_post_buf(ab); +} + +int ath11k_ce_init_pipes(struct ath11k_base *ab) +{ + struct ath11k_ce_pipe *pipe; + int i; + int ret; + + for (i = 0; i < ce_count; i++) { + pipe = &ab->ce.ce_pipe[i]; + + if (pipe->src_ring) { + ret = ath11k_ce_init_ring(ab, pipe->src_ring, i, + hal_ce_src); + if (ret) { + ath11k_warn(ab, "failed to init src ring: %d ", + ret); + /* should we clear any partial init */ + return ret; + } + + pipe->src_ring->write_index = 0; + pipe->src_ring->sw_index = 0; + } + + if (pipe->dest_ring) { + ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i, + hal_ce_dst); + if (ret) { + ath11k_warn(ab, "failed to init dest ring: %d ", + ret); + /* should we clear any partial init */ + return ret; + } + + pipe->rx_buf_needed = pipe->dest_ring->nentries ? + pipe->dest_ring->nentries - 2 : 0; + + pipe->dest_ring->write_index = 0; + pipe->dest_ring->sw_index = 0; + } + + if (pipe->status_ring) { + ret = ath11k_ce_init_ring(ab, pipe->status_ring, i, + hal_ce_dst_status); + if (ret) { + ath11k_warn(ab, "failed to init dest status ing: %d ", + ret); + /* should we clear any partial init */ + return ret; + } + + pipe->status_ring->write_index = 0; + pipe->status_ring->sw_index = 0; + } + } + + return 0; +} + +void ath11k_ce_free_pipes(struct ath11k_base *ab) +{ + struct ath11k_ce_pipe *pipe; + int desc_sz; + int i; + + for (i = 0; i < ce_count; i++) { + pipe = &ab->ce.ce_pipe[i]; + + if (pipe->src_ring) { + desc_sz = ath11k_hal_ce_get_desc_size(hal_ce_desc_src); + dma_free_coherent(ab->dev, + pipe->src_ring->nentries * desc_sz + + ce_desc_ring_align, + pipe->src_ring->base_addr_owner_space, + pipe->src_ring->base_addr_ce_space); + kfree(pipe->src_ring); + pipe->src_ring = null; + } + + if (pipe->dest_ring) { + desc_sz = ath11k_hal_ce_get_desc_size(hal_ce_desc_dst); + dma_free_coherent(ab->dev, + pipe->dest_ring->nentries * desc_sz + + ce_desc_ring_align, + pipe->dest_ring->base_addr_owner_space, + pipe->dest_ring->base_addr_ce_space); + kfree(pipe->dest_ring); + pipe->dest_ring = null; + } + + if (pipe->status_ring) { + desc_sz = + ath11k_hal_ce_get_desc_size(hal_ce_desc_dst_status); + dma_free_coherent(ab->dev, + pipe->status_ring->nentries * desc_sz + + ce_desc_ring_align, + pipe->status_ring->base_addr_owner_space, + pipe->status_ring->base_addr_ce_space); + kfree(pipe->status_ring); + pipe->status_ring = null; + } + } +} + +int ath11k_ce_alloc_pipes(struct ath11k_base *ab) +{ + struct ath11k_ce_pipe *pipe; + int i; + int ret; + const struct ce_attr *attr; + + spin_lock_init(&ab->ce.ce_lock); + + for (i = 0; i < ce_count; i++) { + attr = &host_ce_config_wlan[i]; + pipe = &ab->ce.ce_pipe[i]; + pipe->pipe_num = i; + pipe->ab = ab; + pipe->buf_sz = attr->src_sz_max; + + ret = ath11k_ce_alloc_pipe(ab, i); + if (ret) { + /* free any parial successful allocation */ + ath11k_ce_free_pipes(ab); + return ret; + } + } + + return 0; +} + +/* for big endian host, copy engine byte_swap is enabled + * when copy engine does byte_swap, need to byte swap again for the + * host to get/put buffer content in the correct byte order + */ +void ath11k_ce_byte_swap(void *mem, u32 len) +{ + int i; + + if (is_enabled(config_cpu_big_endian)) { + if (!mem) + return; + + for (i = 0; i < (len / 4); i++) { + *(u32 *)mem = swab32(*(u32 *)mem); + mem += 4; + } + } +} + +int ath11k_ce_get_attr_flags(int ce_id) +{ + if (ce_id >= ce_count) + return -einval; + + return host_ce_config_wlan[ce_id].flags; +} diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/ce.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_ce_h +#define ath11k_ce_h + +#define ce_count 12 + +/* byte swap data words */ +#define ce_attr_byte_swap_data 2 + +/* no interrupt on copy completion */ +#define ce_attr_dis_intr 8 + +/* host software's copy engine configuration. */ +#ifdef __big_endian +#define ce_attr_flags ce_attr_byte_swap_data +#else +#define ce_attr_flags 0 +#endif + +/* threshold to poll for tx completion in case of interrupt disabled ce's */ +#define ath11k_ce_usage_threshold 32 + +void ath11k_ce_byte_swap(void *mem, u32 len); + +/* + * directions for interconnect pipe configuration. + * these definitions may be used during configuration and are shared + * between host and target. + * + * pipe directions are relative to the host, so pipedir_in means + * "coming in over air through target to host" as with a wifi rx operation. + * conversely, pipedir_out means "going out from host through target over air" + * as with a wifi tx operation. this is somewhat awkward for the "middle-man" + * target since things that are "pipedir_out" are coming in to the target + * over the interconnect. + */ +#define pipedir_none 0 +#define pipedir_in 1 /* target-->host, wifi rx direction */ +#define pipedir_out 2 /* host->target, wifi tx direction */ +#define pipedir_inout 3 /* bidirectional */ +#define pipedir_inout_h2h 4 /* bidirectional, host to host */ + +/* ce address/mask */ +#define ce_host_ie_address 0x00a1803c +#define ce_host_ie_2_address 0x00a18040 +#define ce_host_ie_3_address ce_host_ie_address + +#define ce_host_ie_3_shift 0xc + +#define ce_ring_idx_incr(nentries_mask, idx) (((idx) + 1) & (nentries_mask)) + +#define ath11k_ce_rx_post_retry_jiffies 50 + +struct ath11k_base; + +/* establish a mapping between a service/direction and a pipe. */ +struct service_to_pipe { + __le32 service_id; + __le32 pipedir; + __le32 pipenum; +}; + +/* + * configuration information for a copy engine pipe. + * passed from host to target during startup (one per ce). + * + * note: structure is shared between host software and target firmware! + */ +struct ce_pipe_config { + __le32 pipenum; + __le32 pipedir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; + __le32 reserved; +}; + +struct ce_attr { + /* ce_attr_* values */ + unsigned int flags; + + /* #entries in source ring - must be a power of 2 */ + unsigned int src_nentries; + + /* + * max source send size for this ce. + * this is also the minimum size of a destination buffer. + */ + unsigned int src_sz_max; + + /* #entries in destination ring - must be a power of 2 */ + unsigned int dest_nentries; + + void (*recv_cb)(struct ath11k_base *, struct sk_buff *); +}; + +#define ce_desc_ring_align 8 + +struct ath11k_ce_ring { + /* number of entries in this ring; must be power of 2 */ + unsigned int nentries; + unsigned int nentries_mask; + + /* for dest ring, this is the next index to be processed + * by software after it was/is received into. + * + * for src ring, this is the last descriptor that was sent + * and completion processed by software. + * + * regardless of src or dest ring, this is an invariant + * (modulo ring size): + * write index >= read index >= sw_index + */ + unsigned int sw_index; + /* cached copy */ + unsigned int write_index; + + /* start of dma-coherent area reserved for descriptors */ + /* host address space */ + void *base_addr_owner_space_unaligned; + /* ce address space */ + u32 base_addr_ce_space_unaligned; + + /* actual start of descriptors. + * aligned to descriptor-size boundary. + * points into reserved dma-coherent area, above. + */ + /* host address space */ + void *base_addr_owner_space; + + /* ce address space */ + u32 base_addr_ce_space; + + /* hal ring id */ + u32 hal_ring_id; + + /* keep last */ + struct sk_buff *skb[0]; +}; + +struct ath11k_ce_pipe { + struct ath11k_base *ab; + u16 pipe_num; + unsigned int attr_flags; + unsigned int buf_sz; + unsigned int rx_buf_needed; + + void (*send_cb)(struct ath11k_ce_pipe *); + void (*recv_cb)(struct ath11k_base *, struct sk_buff *); + + struct tasklet_struct intr_tq; + struct ath11k_ce_ring *src_ring; + struct ath11k_ce_ring *dest_ring; + struct ath11k_ce_ring *status_ring; +}; + +struct ath11k_ce { + struct ath11k_ce_pipe ce_pipe[ce_count]; + /* protects rings of all ce pipes */ + spinlock_t ce_lock; +}; + +void ath11k_ce_cleanup_pipes(struct ath11k_base *ab); +void ath11k_ce_rx_replenish_retry(struct timer_list *t); +void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id); +int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id, + u16 transfer_id); +void ath11k_ce_rx_post_buf(struct ath11k_base *ab); +int ath11k_ce_init_pipes(struct ath11k_base *ab); +int ath11k_ce_alloc_pipes(struct ath11k_base *ab); +void ath11k_ce_free_pipes(struct ath11k_base *ab); +int ath11k_ce_get_attr_flags(int ce_id); +void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id); +#endif diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/core.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/remoteproc.h> +#include <linux/firmware.h> +#include "ahb.h" +#include "core.h" +#include "dp_tx.h" +#include "debug.h" + +unsigned int ath11k_debug_mask; +module_param_named(debug_mask, ath11k_debug_mask, uint, 0644); +module_parm_desc(debug_mask, "debugging mask"); + +static const struct ath11k_hw_params ath11k_hw_params = { + .name = "ipq8074", + .fw = { + .dir = ipq8074_fw_dir, + .board_size = ipq8074_max_board_data_sz, + .cal_size = ipq8074_max_cal_data_sz, + }, +}; + +/* map from pdev index to hw mac index */ +u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx) +{ + switch (pdev_idx) { + case 0: + return 0; + case 1: + return 2; + case 2: + return 1; + default: + ath11k_warn(ab, "invalid pdev idx %d ", pdev_idx); + return ath11k_invalid_hw_mac_id; + } +} + +static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, + size_t name_len) +{ + /* note: bus is fixed to ahb. when other bus type supported, + * make it to dynamic. + */ + scnprintf(name, name_len, + "bus=ahb,qmi-chip-id=%d,qmi-board-id=%d", + ab->qmi.target.chip_id, + ab->qmi.target.board_id); + + ath11k_dbg(ab, ath11k_dbg_boot, "boot using board name '%s' ", name); + + return 0; +} + +static const struct firmware *ath11k_fetch_fw_file(struct ath11k_base *ab, + const char *dir, + const char *file) +{ + char filename[100]; + const struct firmware *fw; + int ret; + + if (file == null) + return err_ptr(-enoent); + + if (dir == null) + dir = "."; + + snprintf(filename, sizeof(filename), "%s/%s", dir, file); + ret = firmware_request_nowarn(&fw, filename, ab->dev); + ath11k_dbg(ab, ath11k_dbg_boot, "boot fw request '%s': %d ", + filename, ret); + + if (ret) + return err_ptr(ret); + ath11k_warn(ab, "downloading bdf: %s, size: %zu ", + filename, fw->size); + + return fw; +} + +void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) +{ + if (!is_err(bd->fw)) + release_firmware(bd->fw); + + memset(bd, 0, sizeof(*bd)); +} + +static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, + struct ath11k_board_data *bd, + const void *buf, size_t buf_len, + const char *boardname, + int bd_ie_type) +{ + const struct ath11k_fw_ie *hdr; + bool name_match_found; + int ret, board_ie_id; + size_t board_ie_len; + const void *board_ie_data; + + name_match_found = false; + + /* go through ath11k_bd_ie_board_ elements */ + while (buf_len > sizeof(struct ath11k_fw_ie)) { + hdr = buf; + board_ie_id = le32_to_cpu(hdr->id); + board_ie_len = le32_to_cpu(hdr->len); + board_ie_data = hdr->data; + + buf_len -= sizeof(*hdr); + buf += sizeof(*hdr); + + if (buf_len < align(board_ie_len, 4)) { + ath11k_err(ab, "invalid ath11k_bd_ie_board length: %zu < %zu ", + buf_len, align(board_ie_len, 4)); + ret = -einval; + goto out; + } + + switch (board_ie_id) { + case ath11k_bd_ie_board_name: + ath11k_dbg_dump(ab, ath11k_dbg_boot, "board name", "", + board_ie_data, board_ie_len); + + if (board_ie_len != strlen(boardname)) + break; + + ret = memcmp(board_ie_data, boardname, strlen(boardname)); + if (ret) + break; + + name_match_found = true; + ath11k_dbg(ab, ath11k_dbg_boot, + "boot found match for name '%s'", + boardname); + break; + case ath11k_bd_ie_board_data: + if (!name_match_found) + /* no match found */ + break; + + ath11k_dbg(ab, ath11k_dbg_boot, + "boot found board data for '%s'", boardname); + + bd->data = board_ie_data; + bd->len = board_ie_len; + + ret = 0; + goto out; + default: + ath11k_warn(ab, "unknown ath11k_bd_ie_board found: %d ", + board_ie_id); + break; + } + + /* jump over the padding */ + board_ie_len = align(board_ie_len, 4); + + buf_len -= board_ie_len; + buf += board_ie_len; + } + + /* no match found */ + ret = -enoent; + +out: + return ret; +} + +static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, + struct ath11k_board_data *bd, + const char *boardname) +{ + size_t len, magic_len; + const u8 *data; + char *filename = ath11k_board_api2_file; + size_t ie_len; + struct ath11k_fw_ie *hdr; + int ret, ie_id; + + if (!bd->fw) + bd->fw = ath11k_fetch_fw_file(ab, + ab->hw_params.fw.dir, + filename); + if (is_err(bd->fw)) + return ptr_err(bd->fw); + + data = bd->fw->data; + len = bd->fw->size; + + /* magic has extra null byte padded */ + magic_len = strlen(ath11k_board_magic) + 1; + if (len < magic_len) { + ath11k_err(ab, "failed to find magic value in %s/%s, file too short: %zu ", + ab->hw_params.fw.dir, filename, len); + ret = -einval; + goto err; + } + + if (memcmp(data, ath11k_board_magic, magic_len)) { + ath11k_err(ab, "found invalid board magic "); + ret = -einval; + goto err; + } + + /* magic is padded to 4 bytes */ + magic_len = align(magic_len, 4); + if (len < magic_len) { + ath11k_err(ab, "failed: %s/%s too small to contain board data, len: %zu ", + ab->hw_params.fw.dir, filename, len); + ret = -einval; + goto err; + } + + data += magic_len; + len -= magic_len; + + while (len > sizeof(struct ath11k_fw_ie)) { + hdr = (struct ath11k_fw_ie *)data; + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data = hdr->data; + + if (len < align(ie_len, 4)) { + ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu ", + ie_id, ie_len, len); + return -einval; + } + + switch (ie_id) { + case ath11k_bd_ie_board: + ret = ath11k_core_parse_bd_ie_board(ab, bd, data, + ie_len, + boardname, + ath11k_bd_ie_board); + if (ret == -enoent) + /* no match found, continue */ + break; + else if (ret) + /* there was an error, bail out */ + goto err; + /* either found or error, so stop searching */ + goto out; + } + + /* jump over the padding */ + ie_len = align(ie_len, 4); + + len -= ie_len; + data += ie_len; + } + +out: + if (!bd->data || !bd->len) { + ath11k_err(ab, + "failed to fetch board data for %s from %s/%s ", + boardname, ab->hw_params.fw.dir, filename); + ret = -enodata; + goto err; + } + + return 0; + +err: + ath11k_core_free_bdf(ab, bd); + return ret; +} + +static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, + struct ath11k_board_data *bd) +{ + bd->fw = ath11k_fetch_fw_file(ab, + ab->hw_params.fw.dir, + ath11k_default_board_file); + if (is_err(bd->fw)) + return ptr_err(bd->fw); + + bd->data = bd->fw->data; + bd->len = bd->fw->size; + + return 0; +} + +#define board_name_size 100 +int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) +{ + char boardname[board_name_size]; + int ret; + + ret = ath11k_core_create_board_name(ab, boardname, board_name_size); + if (ret) { + ath11k_err(ab, "failed to create board name: %d", ret); + return ret; + } + + ab->bd_api = 2; + ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname); + if (!ret) + goto success; + + ab->bd_api = 1; + ret = ath11k_core_fetch_board_data_api_1(ab, bd); + if (ret) { + ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s ", + ab->hw_params.fw.dir); + return ret; + } + +success: + ath11k_dbg(ab, ath11k_dbg_boot, "using board api %d ", ab->bd_api); + return 0; +} + +static void ath11k_core_stop(struct ath11k_base *ab) +{ + if (!test_bit(ath11k_flag_crash_flush, &ab->dev_flags)) + ath11k_qmi_firmware_stop(ab); + ath11k_ahb_stop(ab); + ath11k_wmi_detach(ab); + + /* de-init of components as needed */ +} + +static int ath11k_core_soc_create(struct ath11k_base *ab) +{ + int ret; + + ret = ath11k_qmi_init_service(ab); + if (ret) { + ath11k_err(ab, "failed to initialize qmi :%d ", ret); + return ret; + } + + ret = ath11k_debug_soc_create(ab); + if (ret) { + ath11k_err(ab, "failed to create ath11k debugfs "); + goto err_qmi_deinit; + } + + ret = ath11k_ahb_power_up(ab); + if (ret) { + ath11k_err(ab, "failed to power up :%d ", ret); + goto err_debugfs_reg; + } + + return 0; + +err_debugfs_reg: + ath11k_debug_soc_destroy(ab); +err_qmi_deinit: + ath11k_qmi_deinit_service(ab); + return ret; +} + +static void ath11k_core_soc_destroy(struct ath11k_base *ab) +{ + ath11k_debug_soc_destroy(ab); + ath11k_dp_free(ab); + ath11k_reg_free(ab); + ath11k_qmi_deinit_service(ab); +} + +static int ath11k_core_pdev_create(struct ath11k_base *ab) +{ + int ret; + + ret = ath11k_debug_pdev_create(ab); + if (ret) { + ath11k_err(ab, "failed to create core pdev debugfs: %d ", ret); + return ret; + } + + ret = ath11k_mac_create(ab); + if (ret) { + ath11k_err(ab, "failed to create new hw device with mac80211 :%d ", + ret); + goto err_pdev_debug; + } + + ret = ath11k_dp_pdev_alloc(ab); + if (ret) { + ath11k_err(ab, "failed to attach dp pdev: %d ", ret); + goto err_mac_destroy; + } + + return 0; + +err_mac_destroy: + ath11k_mac_destroy(ab); + +err_pdev_debug: + ath11k_debug_pdev_destroy(ab); + + return ret; +} + +static void ath11k_core_pdev_destroy(struct ath11k_base *ab) +{ + ath11k_mac_unregister(ab); + ath11k_ahb_ext_irq_disable(ab); + ath11k_dp_pdev_free(ab); + ath11k_debug_pdev_destroy(ab); +} + +static int ath11k_core_start(struct ath11k_base *ab, + enum ath11k_firmware_mode mode) +{ + int ret; + + ret = ath11k_qmi_firmware_start(ab, mode); + if (ret) { + ath11k_err(ab, "failed to attach wmi: %d ", ret); + return ret; + } + + ret = ath11k_wmi_attach(ab); + if (ret) { + ath11k_err(ab, "failed to attach wmi: %d ", ret); + goto err_firmware_stop; + } + + ret = ath11k_htc_init(ab); + if (ret) { + ath11k_err(ab, "failed to init htc: %d ", ret); + goto err_wmi_detach; + } + + ret = ath11k_ahb_start(ab); + if (ret) { + ath11k_err(ab, "failed to start hif: %d ", ret); + goto err_wmi_detach; + } + + ret = ath11k_htc_wait_target(&ab->htc); + if (ret) { + ath11k_err(ab, "failed to connect to htc: %d ", ret); + goto err_hif_stop; + } + + ret = ath11k_dp_htt_connect(&ab->dp); + if (ret) { + ath11k_err(ab, "failed to connect to htt: %d ", ret); + goto err_hif_stop; + } + + ret = ath11k_wmi_connect(ab); + if (ret) { + ath11k_err(ab, "failed to connect wmi: %d ", ret); + goto err_hif_stop; + } + + ret = ath11k_htc_start(&ab->htc); + if (ret) { + ath11k_err(ab, "failed to start htc: %d ", ret); + goto err_hif_stop; + } + + ret = ath11k_wmi_wait_for_service_ready(ab); + if (ret) { + ath11k_err(ab, "failed to receive wmi service ready event: %d ", + ret); + goto err_hif_stop; + } + + ret = ath11k_wmi_cmd_init(ab); + if (ret) { + ath11k_err(ab, "failed to send wmi init cmd: %d ", ret); + goto err_hif_stop; + } + + ret = ath11k_wmi_wait_for_unified_ready(ab); + if (ret) { + ath11k_err(ab, "failed to receive wmi unified ready event: %d ", + ret); + goto err_hif_stop; + } + + ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab); + if (ret) { + ath11k_err(ab, "failed to send htt version request message: %d ", + ret); + goto err_hif_stop; + } + + return 0; + +err_hif_stop: + ath11k_ahb_stop(ab); +err_wmi_detach: + ath11k_wmi_detach(ab); +err_firmware_stop: + ath11k_qmi_firmware_stop(ab); + + return ret; +} + +int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab) +{ + int ret; + + ret = ath11k_ce_init_pipes(ab); + if (ret) { + ath11k_err(ab, "failed to initialize ce: %d ", ret); + return ret; + } + + ret = ath11k_dp_alloc(ab); + if (ret) { + ath11k_err(ab, "failed to init dp: %d ", ret); + return ret; + } + + mutex_lock(&ab->core_lock); + ret = ath11k_core_start(ab, ath11k_firmware_mode_normal); + if (ret) { + ath11k_err(ab, "failed to start core: %d ", ret); + goto err_dp_free; + } + + ret = ath11k_core_pdev_create(ab); + if (ret) { + ath11k_err(ab, "failed to create pdev core: %d ", ret); + goto err_core_stop; + } + ath11k_ahb_ext_irq_enable(ab); + mutex_unlock(&ab->core_lock); + + return 0; + +err_core_stop: + ath11k_core_stop(ab); +err_dp_free: + ath11k_dp_free(ab); + return ret; +} + +static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) +{ + int ret; + + mutex_lock(&ab->core_lock); + ath11k_ahb_ext_irq_disable(ab); + ath11k_dp_pdev_free(ab); + ath11k_ahb_stop(ab); + ath11k_wmi_detach(ab); + mutex_unlock(&ab->core_lock); + + ath11k_dp_free(ab); + ath11k_hal_srng_deinit(ab); + + ab->free_vdev_map = (1ll << (ab->num_radios * target_num_vdevs)) - 1; + + ret = ath11k_hal_srng_init(ab); + if (ret) + return ret; + + clear_bit(ath11k_flag_crash_flush, &ab->dev_flags); + + ret = ath11k_core_qmi_firmware_ready(ab); + if (ret) + goto err_hal_srng_deinit; + + clear_bit(ath11k_flag_recovery, &ab->dev_flags); + + return 0; + +err_hal_srng_deinit: + ath11k_hal_srng_deinit(ab); + return ret; +} + +void ath11k_core_halt(struct ath11k *ar) +{ + struct ath11k_base *ab = ar->ab; + + lockdep_assert_held(&ar->conf_mutex); + + ar->num_created_vdevs = 0; + + ath11k_mac_scan_finish(ar); + ath11k_mac_peer_cleanup_all(ar); + cancel_delayed_work_sync(&ar->scan.timeout); + cancel_work_sync(&ar->regd_update_work); + + rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], null); + synchronize_rcu(); + init_list_head(&ar->arvifs); + idr_init(&ar->txmgmt_idr); +} + +static void ath11k_core_restart(struct work_struct *work) +{ + struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work); + struct ath11k *ar; + struct ath11k_pdev *pdev; + int i, ret = 0; + + spin_lock_bh(&ab->base_lock); + ab->stats.fw_crash_counter++; + spin_unlock_bh(&ab->base_lock); + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (!ar || ar->state == ath11k_state_off) + continue; + + ieee80211_stop_queues(ar->hw); + ath11k_mac_drain_tx(ar); + complete(&ar->scan.started); + complete(&ar->scan.completed); + complete(&ar->peer_assoc_done); + complete(&ar->install_key_done); + complete(&ar->vdev_setup_done); + complete(&ar->bss_survey_done); + + wake_up(&ar->dp.tx_empty_waitq); + idr_for_each(&ar->txmgmt_idr, + ath11k_mac_tx_mgmt_pending_free, ar); + idr_destroy(&ar->txmgmt_idr); + } + + wake_up(&ab->wmi_sc.tx_credits_wq); + wake_up(&ab->peer_mapping_wq); + + ret = ath11k_core_reconfigure_on_crash(ab); + if (ret) { + ath11k_err(ab, "failed to reconfigure driver on crash recovery "); + return; + } + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (!ar || ar->state == ath11k_state_off) + continue; + + mutex_lock(&ar->conf_mutex); + + switch (ar->state) { + case ath11k_state_on: + ar->state = ath11k_state_restarting; + ath11k_core_halt(ar); + ieee80211_restart_hw(ar->hw); + break; + case ath11k_state_off: + ath11k_warn(ab, + "cannot restart radio %d that hasn't been started ", + i); + break; + case ath11k_state_restarting: + break; + case ath11k_state_restarted: + ar->state = ath11k_state_wedged; + /* fall through */ + case ath11k_state_wedged: + ath11k_warn(ab, + "device is wedged, will not restart radio %d ", i); + break; + } + mutex_unlock(&ar->conf_mutex); + } + complete(&ab->driver_recovery); +} + +int ath11k_core_init(struct ath11k_base *ab) +{ + struct device *dev = ab->dev; + struct rproc *prproc; + phandle rproc_phandle; + int ret; + + if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) { + ath11k_err(ab, "failed to get q6_rproc handle "); + return -enoent; + } + + prproc = rproc_get_by_phandle(rproc_phandle); + if (!prproc) { + ath11k_err(ab, "failed to get rproc "); + return -einval; + } + ab->tgt_rproc = prproc; + ab->hw_params = ath11k_hw_params; + + ret = ath11k_core_soc_create(ab); + if (ret) { + ath11k_err(ab, "failed to create soc core: %d ", ret); + return ret; + } + + return 0; +} + +void ath11k_core_deinit(struct ath11k_base *ab) +{ + mutex_lock(&ab->core_lock); + + ath11k_core_pdev_destroy(ab); + ath11k_core_stop(ab); + + mutex_unlock(&ab->core_lock); + + ath11k_ahb_power_down(ab); + ath11k_mac_destroy(ab); + ath11k_core_soc_destroy(ab); +} + +void ath11k_core_free(struct ath11k_base *ab) +{ + kfree(ab); +} + +struct ath11k_base *ath11k_core_alloc(struct device *dev) +{ + struct ath11k_base *ab; + + ab = kzalloc(sizeof(*ab), gfp_kernel); + if (!ab) + return null; + + init_completion(&ab->driver_recovery); + + ab->workqueue = create_singlethread_workqueue("ath11k_wq"); + if (!ab->workqueue) + goto err_sc_free; + + mutex_init(&ab->core_lock); + spin_lock_init(&ab->base_lock); + + init_list_head(&ab->peers); + init_waitqueue_head(&ab->peer_mapping_wq); + init_waitqueue_head(&ab->wmi_sc.tx_credits_wq); + init_work(&ab->restart_work, ath11k_core_restart); + timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); + ab->dev = dev; + + return ab; + +err_sc_free: + kfree(ab); + return null; +} + +static int __init ath11k_init(void) +{ + int ret; + + ret = ath11k_ahb_init(); + if (ret) + printk(kern_err "failed to register ath11k ahb driver: %d ", + ret); + return ret; +} +module_init(ath11k_init); + +static void __exit ath11k_exit(void) +{ + ath11k_ahb_exit(); +} +module_exit(ath11k_exit); + +module_description("driver support for qualcomm technologies 802.11ax wireless chip"); +module_license("dual bsd/gpl"); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/core.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_core_h +#define ath11k_core_h + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/bitfield.h> +#include "qmi.h" +#include "htc.h" +#include "wmi.h" +#include "hal.h" +#include "dp.h" +#include "ce.h" +#include "mac.h" +#include "hw.h" +#include "hal_rx.h" +#include "reg.h" + +#define sm(_v, _f) (((_v) << _f##_lsb) & _f##_mask) + +#define ath11k_tx_mgmt_num_pending_max 512 + +#define ath11k_tx_mgmt_target_max_support_wmi 64 + +/* pending management packets threshold for dropping probe responses */ +#define ath11k_prb_rsp_drop_threshold ((ath11k_tx_mgmt_target_max_support_wmi * 3) / 4) + +#define ath11k_invalid_hw_mac_id 0xff + +enum ath11k_supported_bw { + ath11k_bw_20 = 0, + ath11k_bw_40 = 1, + ath11k_bw_80 = 2, + ath11k_bw_160 = 3, +}; + +enum wme_ac { + wme_ac_be, + wme_ac_bk, + wme_ac_vi, + wme_ac_vo, + wme_num_ac +}; + +#define ath11k_ht_mcs_max 7 +#define ath11k_vht_mcs_max 9 +#define ath11k_he_mcs_max 11 + +static inline enum wme_ac ath11k_tid_to_ac(u32 tid) +{ + return (((tid == 0) || (tid == 3)) ? wme_ac_be : + ((tid == 1) || (tid == 2)) ? wme_ac_bk : + ((tid == 4) || (tid == 5)) ? wme_ac_vi : + wme_ac_vo); +} + +struct ath11k_skb_cb { + dma_addr_t paddr; + u8 eid; + struct ath11k *ar; + struct ieee80211_vif *vif; +} __packed; + +struct ath11k_skb_rxcb { + dma_addr_t paddr; + bool is_first_msdu; + bool is_last_msdu; + bool is_continuation; + struct hal_rx_desc *rx_desc; + u8 err_rel_src; + u8 err_code; + u8 mac_id; + u8 unmapped; +}; + +enum ath11k_hw_rev { + ath11k_hw_ipq8074, +}; + +enum ath11k_firmware_mode { + /* the default mode, standard 802.11 functionality */ + ath11k_firmware_mode_normal, + + /* factory tests etc */ + ath11k_firmware_mode_ftm, +}; + +#define ath11k_irq_num_max 52 +#define ath11k_ext_irq_grp_num_max 11 +#define ath11k_ext_irq_num_max 16 + +extern const u8 ath11k_reo_status_ring_mask[ath11k_ext_irq_grp_num_max]; +extern const u8 ath11k_tx_ring_mask[ath11k_ext_irq_grp_num_max]; +extern const u8 ath11k_rx_ring_mask[ath11k_ext_irq_grp_num_max]; +extern const u8 ath11k_rx_err_ring_mask[ath11k_ext_irq_grp_num_max]; +extern const u8 ath11k_rx_wbm_rel_ring_mask[ath11k_ext_irq_grp_num_max]; +extern const u8 ath11k_rxdma2host_ring_mask[ath11k_ext_irq_grp_num_max]; +extern const u8 ath11k_host2rxdma_ring_mask[ath11k_ext_irq_grp_num_max]; +extern const u8 rx_mon_status_ring_mask[ath11k_ext_irq_grp_num_max]; + +struct ath11k_ext_irq_grp { + struct ath11k_base *ab; + u32 irqs[ath11k_ext_irq_num_max]; + u32 num_irq; + u32 grp_id; + struct napi_struct napi; + struct net_device napi_ndev; + /* queue of pending packets, not expected to be accessed concurrently + * to avoid locking overhead. + */ + struct sk_buff_head pending_q; +}; + +#define hehandle_cap_phyinfo_size 3 +#define hecap_phyinfo_size 9 +#define hecap_macinfo_size 5 +#define hecap_txrx_mcs_nss_size 2 +#define hecap_ppet16_ppet8_max_size 25 + +#define he_ppet16_ppet8_size 8 + +/* 802.11ax ppe (ppdu packet extension) threshold */ +struct he_ppe_threshold { + u32 numss_m1; + u32 ru_mask; + u32 ppet16_ppet8_ru3_ru0[he_ppet16_ppet8_size]; +}; + +struct ath11k_he { + u8 hecap_macinfo[hecap_macinfo_size]; + u32 hecap_rxmcsnssmap; + u32 hecap_txmcsnssmap; + u32 hecap_phyinfo[hehandle_cap_phyinfo_size]; + struct he_ppe_threshold hecap_ppet; + u32 heop_param; +}; + +#define max_radios 3 + +enum { + wmi_host_tp_scale_max = 0, + wmi_host_tp_scale_50 = 1, + wmi_host_tp_scale_25 = 2, + wmi_host_tp_scale_12 = 3, + wmi_host_tp_scale_min = 4, + wmi_host_tp_scale_size = 5, +}; + +enum ath11k_scan_state { + ath11k_scan_idle, + ath11k_scan_starting, + ath11k_scan_running, + ath11k_scan_aborting, +}; + +enum ath11k_dev_flags { + ath11k_cac_running, + ath11k_flag_core_registered, + ath11k_flag_crash_flush, + ath11k_flag_raw_mode, + ath11k_flag_hw_crypto_disabled, + ath11k_flag_btcoex, + ath11k_flag_recovery, + ath11k_flag_unregistering, + ath11k_flag_registered, +}; + +enum ath11k_monitor_flags { + ath11k_flag_monitor_enabled, +}; + +struct ath11k_vif { + u32 vdev_id; + enum wmi_vdev_type vdev_type; + enum wmi_vdev_subtype vdev_subtype; + u32 beacon_interval; + u32 dtim_period; + u16 ast_hash; + u16 tcl_metadata; + u8 hal_addr_search_flags; + u8 search_type; + + struct ath11k *ar; + struct ieee80211_vif *vif; + + u16 tx_seq_no; + struct wmi_wmm_params_all_arg wmm_params; + struct list_head list; + union { + struct { + u32 uapsd; + } sta; + struct { + /* 127 stations; wmi limit */ + u8 tim_bitmap[16]; + u8 tim_len; + u32 ssid_len; + u8 ssid[ieee80211_max_ssid_len]; + bool hidden_ssid; + /* p2p_ie with noa attribute for p2p_go case */ + u32 noa_len; + u8 *noa_data; + } ap; + } u; + + bool is_started; + bool is_up; + u32 aid; + u8 bssid[eth_alen]; + struct cfg80211_bitrate_mask bitrate_mask; + int num_legacy_stations; + int rtscts_prot_mode; + int txpower; +}; + +struct ath11k_vif_iter { + u32 vdev_id; + struct ath11k_vif *arvif; +}; + +struct ath11k_rx_peer_stats { + u64 num_msdu; + u64 num_mpdu_fcs_ok; + u64 num_mpdu_fcs_err; + u64 tcp_msdu_count; + u64 udp_msdu_count; + u64 other_msdu_count; + u64 ampdu_msdu_count; + u64 non_ampdu_msdu_count; + u64 stbc_count; + u64 beamformed_count; + u64 mcs_count[hal_rx_max_mcs + 1]; + u64 nss_count[hal_rx_max_nss]; + u64 bw_count[hal_rx_bw_max]; + u64 gi_count[hal_rx_gi_max]; + u64 coding_count[hal_rx_su_mu_coding_max]; + u64 tid_count[ieee80211_num_tids + 1]; + u64 pream_cnt[hal_rx_preamble_max]; + u64 reception_type[hal_rx_reception_type_max]; + u64 rx_duration; +}; + +#define ath11k_he_mcs_num 12 +#define ath11k_vht_mcs_num 10 +#define ath11k_bw_num 4 +#define ath11k_nss_num 4 +#define ath11k_legacy_num 12 +#define ath11k_gi_num 4 +#define ath11k_ht_mcs_num 32 + +enum ath11k_pkt_rx_err { + ath11k_pkt_rx_err_fcs, + ath11k_pkt_rx_err_tkip, + ath11k_pkt_rx_err_crypt, + ath11k_pkt_rx_err_peer_idx_inval, + ath11k_pkt_rx_err_max, +}; + +enum ath11k_ampdu_subfrm_num { + ath11k_ampdu_subfrm_num_10, + ath11k_ampdu_subfrm_num_20, + ath11k_ampdu_subfrm_num_30, + ath11k_ampdu_subfrm_num_40, + ath11k_ampdu_subfrm_num_50, + ath11k_ampdu_subfrm_num_60, + ath11k_ampdu_subfrm_num_more, + ath11k_ampdu_subfrm_num_max, +}; + +enum ath11k_amsdu_subfrm_num { + ath11k_amsdu_subfrm_num_1, + ath11k_amsdu_subfrm_num_2, + ath11k_amsdu_subfrm_num_3, + ath11k_amsdu_subfrm_num_4, + ath11k_amsdu_subfrm_num_more, + ath11k_amsdu_subfrm_num_max, +}; + +enum ath11k_counter_type { + ath11k_counter_type_bytes, + ath11k_counter_type_pkts, + ath11k_counter_type_max, +}; + +enum ath11k_stats_type { + ath11k_stats_type_succ, + ath11k_stats_type_fail, + ath11k_stats_type_retry, + ath11k_stats_type_ampdu, + ath11k_stats_type_max, +}; + +struct ath11k_htt_data_stats { + u64 legacy[ath11k_counter_type_max][ath11k_legacy_num]; + u64 ht[ath11k_counter_type_max][ath11k_ht_mcs_num]; + u64 vht[ath11k_counter_type_max][ath11k_vht_mcs_num]; + u64 he[ath11k_counter_type_max][ath11k_he_mcs_num]; + u64 bw[ath11k_counter_type_max][ath11k_bw_num]; + u64 nss[ath11k_counter_type_max][ath11k_nss_num]; + u64 gi[ath11k_counter_type_max][ath11k_gi_num]; +}; + +struct ath11k_htt_tx_stats { + struct ath11k_htt_data_stats stats[ath11k_stats_type_max]; + u64 tx_duration; + u64 ba_fails; + u64 ack_fails; +}; + +struct ath11k_per_ppdu_tx_stats { + u16 succ_pkts; + u16 failed_pkts; + u16 retry_pkts; + u32 succ_bytes; + u32 failed_bytes; + u32 retry_bytes; +}; + +struct ath11k_sta { + struct ath11k_vif *arvif; + + /* the following are protected by ar->data_lock */ + u32 changed; /* ieee80211_rc_* */ + u32 bw; + u32 nss; + u32 smps; + + struct work_struct update_wk; + struct ieee80211_tx_info tx_info; + struct rate_info txrate; + struct rate_info last_txrate; + u64 rx_duration; + u8 rssi_comb; + struct ath11k_htt_tx_stats *tx_stats; + struct ath11k_rx_peer_stats *rx_stats; +}; + +#define ath11k_num_chans 41 +#define ath11k_max_5g_chan 173 + +enum ath11k_state { + ath11k_state_off, + ath11k_state_on, + ath11k_state_restarting, + ath11k_state_restarted, + ath11k_state_wedged, + /* add other states as required */ +}; + +/* antenna noise floor */ +#define ath11k_default_noise_floor -95 + +struct ath11k_fw_stats { + struct dentry *debugfs_fwstats; + u32 pdev_id; + u32 stats_id; + struct list_head pdevs; + struct list_head vdevs; + struct list_head bcn; +}; + +struct ath11k_dbg_htt_stats { + u8 type; + u8 reset; + struct debug_htt_stats_req *stats_req; + /* protects shared stats req buffer */ + spinlock_t lock; +}; + +struct ath11k_debug { + struct dentry *debugfs_pdev; + struct ath11k_dbg_htt_stats htt_stats; + u32 extd_tx_stats; + struct ath11k_fw_stats fw_stats; + struct completion fw_stats_complete; + bool fw_stats_done; + u32 extd_rx_stats; + u32 pktlog_filter; + u32 pktlog_mode; + u32 pktlog_peer_valid; + u8 pktlog_peer_addr[eth_alen]; +}; + +struct ath11k_per_peer_tx_stats { + u32 succ_bytes; + u32 retry_bytes; + u32 failed_bytes; + u16 succ_pkts; + u16 retry_pkts; + u16 failed_pkts; + u32 duration; + u8 ba_fails; + bool is_ampdu; +}; + +#define ath11k_flush_timeout (5 * hz) + +struct ath11k_vdev_stop_status { + bool stop_in_progress; + u32 vdev_id; +}; + +struct ath11k { + struct ath11k_base *ab; + struct ath11k_pdev *pdev; + struct ieee80211_hw *hw; + struct ieee80211_ops *ops; + struct ath11k_pdev_wmi *wmi; + struct ath11k_pdev_dp dp; + u8 mac_addr[eth_alen]; + u32 ht_cap_info; + u32 vht_cap_info; + struct ath11k_he ar_he; + enum ath11k_state state; + struct { + struct completion started; + struct completion completed; + struct completion on_channel; + struct delayed_work timeout; + enum ath11k_scan_state state; + bool is_roc; + int vdev_id; + int roc_freq; + bool roc_notify; + } scan; + + struct { + struct ieee80211_supported_band sbands[num_nl80211_bands]; + } mac; + unsigned long dev_flags; + unsigned int filter_flags; + unsigned long monitor_flags; + u32 min_tx_power; + u32 max_tx_power; + u32 txpower_limit_2g; + u32 txpower_limit_5g; + u32 txpower_scale; + u32 power_scale; + u32 chan_tx_pwr; + u32 num_stations; + u32 max_num_stations; + bool monitor_present; + /* to synchronize concurrent synchronous mac80211 callback operations, + * concurrent debugfs configuration and concurrent fw statistics events. + */ + struct mutex conf_mutex; + /* protects the radio specific data like debug stats, ppdu_stats_info stats, + * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info, + * channel context data, survey info, test mode data. + */ + spinlock_t data_lock; + + struct list_head arvifs; + /* should never be null; needed for regular htt rx */ + struct ieee80211_channel *rx_channel; + + /* valid during scan; needed for mgmt rx during scan */ + struct ieee80211_channel *scan_channel; + + u8 cfg_tx_chainmask; + u8 cfg_rx_chainmask; + u8 num_rx_chains; + u8 num_tx_chains; + /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */ + u8 pdev_idx; + u8 lmac_id; + + struct completion peer_assoc_done; + + int install_key_status; + struct completion install_key_done; + + int last_wmi_vdev_start_status; + struct ath11k_vdev_stop_status vdev_stop_status; + struct completion vdev_setup_done; + + int num_peers; + int max_num_peers; + u32 num_started_vdevs; + u32 num_created_vdevs; + + struct idr txmgmt_idr; + /* protects txmgmt_idr data */ + spinlock_t txmgmt_idr_lock; + atomic_t num_pending_mgmt_tx; + + /* cycle count is reported twice for each visited channel during scan. + * access protected by data_lock + */ + u32 survey_last_rx_clear_count; + u32 survey_last_cycle_count; + + /* channel info events are expected to come in pairs without and with + * complete flag set respectively for each channel visit during scan. + * + * however there are deviations from this rule. this flag is used to + * avoid reporting garbage data. + */ + bool ch_info_can_report_survey; + struct survey_info survey[ath11k_num_chans]; + struct completion bss_survey_done; + + struct work_struct regd_update_work; + + struct work_struct wmi_mgmt_tx_work; + struct sk_buff_head wmi_mgmt_tx_queue; + + struct ath11k_per_peer_tx_stats peer_tx_stats; + struct list_head ppdu_stats_info; + u32 ppdu_stat_list_depth; + + struct ath11k_per_peer_tx_stats cached_stats; + u32 last_ppdu_id; + u32 cached_ppdu_id; +#ifdef config_ath11k_debugfs + struct ath11k_debug debug; +#endif + bool dfs_block_radar_events; +}; + +struct ath11k_band_cap { + u32 max_bw_supported; + u32 ht_cap_info; + u32 he_cap_info[2]; + u32 he_mcs; + u32 he_cap_phy_info[psoc_host_max_phy_size]; + struct ath11k_ppe_threshold he_ppet; +}; + +struct ath11k_pdev_cap { + u32 supported_bands; + u32 ampdu_density; + u32 vht_cap; + u32 vht_mcs; + u32 he_mcs; + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 tx_chain_mask_shift; + u32 rx_chain_mask_shift; + struct ath11k_band_cap band[num_nl80211_bands]; +}; + +struct ath11k_pdev { + struct ath11k *ar; + u32 pdev_id; + struct ath11k_pdev_cap cap; + u8 mac_addr[eth_alen]; +}; + +struct ath11k_board_data { + const struct firmware *fw; + const void *data; + size_t len; +}; + +/* ipq8074 hw channel counters frequency value in hertz */ +#define ipq8074_cc_freq_hertz 320000 + +struct ath11k_soc_dp_rx_stats { + u32 err_ring_pkts; + u32 invalid_rbm; + u32 rxdma_error[hal_reo_entr_ring_rxdma_ecode_max]; + u32 reo_error[hal_reo_dest_ring_error_code_max]; + u32 hal_reo_error[dp_reo_dst_ring_max]; +}; + +/* master structure to hold the hw data which may be used in core module */ +struct ath11k_base { + enum ath11k_hw_rev hw_rev; + struct platform_device *pdev; + struct device *dev; + struct ath11k_qmi qmi; + struct ath11k_wmi_base wmi_sc; + struct completion fw_ready; + struct rproc *tgt_rproc; + int num_radios; + /* hw channel counters frequency value in hertz common to all macs */ + u32 cc_freq_hz; + + struct ath11k_htc htc; + + struct ath11k_dp dp; + + void __iomem *mem; + unsigned long mem_len; + + const struct ath11k_hif_ops *hif_ops; + + struct ath11k_ce ce; + struct timer_list rx_replenish_retry; + struct ath11k_hal hal; + /* to synchronize core_start/core_stop */ + struct mutex core_lock; + /* protects data like peers */ + spinlock_t base_lock; + struct ath11k_pdev pdevs[max_radios]; + struct ath11k_pdev __rcu *pdevs_active[max_radios]; + struct ath11k_hal_reg_capabilities_ext hal_reg_cap[max_radios]; + unsigned long long free_vdev_map; + struct list_head peers; + wait_queue_head_t peer_mapping_wq; + u8 mac_addr[eth_alen]; + bool wmi_ready; + u32 wlan_init_status; + int irq_num[ath11k_irq_num_max]; + struct ath11k_ext_irq_grp ext_irq_grp[ath11k_ext_irq_grp_num_max]; + struct napi_struct *napi; + struct ath11k_targ_cap target_caps; + u32 ext_service_bitmap[wmi_service_ext_bm_size]; + bool pdevs_macaddr_valid; + int bd_api; + struct ath11k_hw_params hw_params; + const struct firmware *cal_file; + + /* below regd's are protected by ab->data_lock */ + /* this is the regd set for every radio + * by the firmware during initializatin + */ + struct ieee80211_regdomain *default_regd[max_radios]; + /* this regd is set during dynamic country setting + * this may or may not be used during the runtime + */ + struct ieee80211_regdomain *new_regd[max_radios]; + + /* current dfs regulatory */ + enum ath11k_dfs_region dfs_region; +#ifdef config_ath11k_debugfs + struct dentry *debugfs_soc; + struct dentry *debugfs_ath11k; +#endif + struct ath11k_soc_dp_rx_stats soc_stats; + + unsigned long dev_flags; + struct completion driver_recovery; + struct workqueue_struct *workqueue; + struct work_struct restart_work; + struct { + /* protected by data_lock */ + u32 fw_crash_counter; + } stats; +}; + +struct ath11k_fw_stats_pdev { + struct list_head list; + + /* pdev stats */ + s32 ch_noise_floor; + /* cycles spent transmitting frames */ + u32 tx_frame_count; + /* cycles spent receiving frames */ + u32 rx_frame_count; + /* total channel busy time, evidently */ + u32 rx_clear_count; + /* total on-channel time */ + u32 cycle_count; + u32 phy_err_count; + u32 chan_tx_power; + u32 ack_rx_bad; + u32 rts_bad; + u32 rts_good; + u32 fcs_bad; + u32 no_beacons; + u32 mib_int_count; + + /* pdev tx stats */ + /* num htt cookies queued to dispatch list */ + s32 comp_queued; + /* num htt cookies dispatched */ + s32 comp_delivered; + /* num msdu queued to wal */ + s32 msdu_enqued; + /* num mpdu queue to wal */ + s32 mpdu_enqued; + /* num msdus dropped by wmm limit */ + s32 wmm_drop; + /* num local frames queued */ + s32 local_enqued; + /* num local frames done */ + s32 local_freed; + /* num queued to hw */ + s32 hw_queued; + /* num ppdu reaped from hw */ + s32 hw_reaped; + /* num underruns */ + s32 underrun; + /* num ppdus cleaned up in tx abort */ + s32 tx_abort; + /* num mpdus requed by sw */ + s32 mpdus_requed; + /* excessive retries */ + u32 tx_ko; + /* data hw rate code */ + u32 data_rc; + /* scheduler self triggers */ + u32 self_triggers; + /* frames dropped due to excessive sw retries */ + u32 sw_retry_failure; + /* illegal rate phy errors */ + u32 illgl_rate_phy_err; + /* wal pdev continuous xretry */ + u32 pdev_cont_xretry; + /* wal pdev tx timeouts */ + u32 pdev_tx_timeout; + /* wal pdev resets */ + u32 pdev_resets; + /* frames dropped due to non-availability of stateless tids */ + u32 stateless_tid_alloc_failure; + /* phy/bb underrun */ + u32 phy_underrun; + /* mpdu is more than txop limit */ + u32 txop_ovf; + + /* pdev rx stats */ + /* cnts any change in ring routing mid-ppdu */ + s32 mid_ppdu_route_change; + /* total number of statuses processed */ + s32 status_rcvd; + /* extra frags on rings 0-3 */ + s32 r0_frags; + s32 r1_frags; + s32 r2_frags; + s32 r3_frags; + /* msdus / mpdus delivered to htt */ + s32 htt_msdus; + s32 htt_mpdus; + /* msdus / mpdus delivered to local stack */ + s32 loc_msdus; + s32 loc_mpdus; + /* amsdus that have more msdus than the status ring size */ + s32 oversize_amsdu; + /* number of phy errors */ + s32 phy_errs; + /* number of phy errors drops */ + s32 phy_err_drop; + /* number of mpdu errors - fcs, mic, enc etc. */ + s32 mpdu_errs; +}; + +struct ath11k_fw_stats_vdev { + struct list_head list; + + u32 vdev_id; + u32 beacon_snr; + u32 data_snr; + u32 num_tx_frames[wlan_max_ac]; + u32 num_rx_frames; + u32 num_tx_frames_retries[wlan_max_ac]; + u32 num_tx_frames_failures[wlan_max_ac]; + u32 num_rts_fail; + u32 num_rts_success; + u32 num_rx_err; + u32 num_rx_discard; + u32 num_tx_not_acked; + u32 tx_rate_history[max_tx_rate_values]; + u32 beacon_rssi_history[max_tx_rate_values]; +}; + +struct ath11k_fw_stats_bcn { + struct list_head list; + + u32 vdev_id; + u32 tx_bcn_succ_cnt; + u32 tx_bcn_outage_cnt; +}; + +void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id); +void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id, + u8 *mac_addr, u16 ast_hash); +struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id, + const u8 *addr); +struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab, + const u8 *addr); +struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id); +int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab); +int ath11k_core_init(struct ath11k_base *ath11k); +void ath11k_core_deinit(struct ath11k_base *ath11k); +struct ath11k_base *ath11k_core_alloc(struct device *dev); +void ath11k_core_free(struct ath11k_base *ath11k); +int ath11k_core_fetch_bdf(struct ath11k_base *ath11k, + struct ath11k_board_data *bd); +void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd); + +void ath11k_core_halt(struct ath11k *ar); +u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx); + +static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state) +{ + switch (state) { + case ath11k_scan_idle: + return "idle"; + case ath11k_scan_starting: + return "starting"; + case ath11k_scan_running: + return "running"; + case ath11k_scan_aborting: + return "aborting"; + } + + return "unknown"; +} + +static inline struct ath11k_skb_cb *ath11k_skb_cb(struct sk_buff *skb) +{ + return (struct ath11k_skb_cb *)&ieee80211_skb_cb(skb)->driver_data; +} + +static inline struct ath11k_skb_rxcb *ath11k_skb_rxcb(struct sk_buff *skb) +{ + build_bug_on(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb)); + return (struct ath11k_skb_rxcb *)skb->cb; +} + +static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif) +{ + return (struct ath11k_vif *)vif->drv_priv; +} + +#endif /* _core_h_ */ diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/debug.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include <linux/vmalloc.h> +#include "core.h" +#include "debug.h" +#include "wmi.h" +#include "hal_rx.h" +#include "dp_tx.h" +#include "debug_htt_stats.h" +#include "peer.h" + +void ath11k_info(struct ath11k_base *ab, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + dev_info(ab->dev, "%pv", &vaf); + /* todo: trace the log */ + va_end(args); +} + +void ath11k_err(struct ath11k_base *ab, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + dev_err(ab->dev, "%pv", &vaf); + /* todo: trace the log */ + va_end(args); +} + +void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + dev_warn_ratelimited(ab->dev, "%pv", &vaf); + /* todo: trace the log */ + va_end(args); +} + +#ifdef config_ath11k_debug +void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (ath11k_debug_mask & mask) + dev_printk(kern_debug, ab->dev, "%pv", &vaf); + + /* todo: trace log */ + + va_end(args); +} + +void ath11k_dbg_dump(struct ath11k_base *ab, + enum ath11k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len) +{ + char linebuf[256]; + size_t linebuflen; + const void *ptr; + + if (ath11k_debug_mask & mask) { + if (msg) + __ath11k_dbg(ab, mask, "%s ", msg); + + for (ptr = buf; (ptr - buf) < len; ptr += 16) { + linebuflen = 0; + linebuflen += scnprintf(linebuf + linebuflen, + sizeof(linebuf) - linebuflen, + "%s%08x: ", + (prefix ? prefix : ""), + (unsigned int)(ptr - buf)); + hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1, + linebuf + linebuflen, + sizeof(linebuf) - linebuflen, true); + dev_printk(kern_debug, ab->dev, "%s ", linebuf); + } + } +} + +#endif + +#ifdef config_ath11k_debugfs +static void ath11k_fw_stats_pdevs_free(struct list_head *head) +{ + struct ath11k_fw_stats_pdev *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath11k_fw_stats_vdevs_free(struct list_head *head) +{ + struct ath11k_fw_stats_vdev *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath11k_fw_stats_bcn_free(struct list_head *head) +{ + struct ath11k_fw_stats_bcn *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath11k_debug_fw_stats_reset(struct ath11k *ar) +{ + spin_lock_bh(&ar->data_lock); + ar->debug.fw_stats_done = false; + ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); + ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); + spin_unlock_bh(&ar->data_lock); +} + +void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct ath11k_fw_stats stats = {}; + struct ath11k *ar; + struct ath11k_pdev *pdev; + bool is_end; + static unsigned int num_vdev, num_bcn; + size_t total_vdevs_started = 0; + int i, ret; + + init_list_head(&stats.pdevs); + init_list_head(&stats.vdevs); + init_list_head(&stats.bcn); + + ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats); + if (ret) { + ath11k_warn(ab, "failed to pull fw stats: %d ", ret); + goto free; + } + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id); + if (!ar) { + rcu_read_unlock(); + ath11k_warn(ab, "failed to get ar for pdev_id %d: %d ", + stats.pdev_id, ret); + goto free; + } + + spin_lock_bh(&ar->data_lock); + + if (stats.stats_id == wmi_request_pdev_stat) { + list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs); + ar->debug.fw_stats_done = true; + goto complete; + } + + if (stats.stats_id == wmi_request_vdev_stat) { + if (list_empty(&stats.vdevs)) { + ath11k_warn(ab, "empty vdev stats"); + goto complete; + } + /* fw sends all the active vdev stats irrespective of pdev, + * hence limit until the count of all vdevs started + */ + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) + total_vdevs_started += ar->num_started_vdevs; + } + + is_end = ((++num_vdev) == total_vdevs_started ? true : false); + + list_splice_tail_init(&stats.vdevs, + &ar->debug.fw_stats.vdevs); + + if (is_end) { + ar->debug.fw_stats_done = true; + num_vdev = 0; + } + goto complete; + } + + if (stats.stats_id == wmi_request_bcn_stat) { + if (list_empty(&stats.bcn)) { + ath11k_warn(ab, "empty bcn stats"); + goto complete; + } + /* mark end until we reached the count of all started vdevs + * within the pdev + */ + is_end = ((++num_bcn) == ar->num_started_vdevs ? true : false); + + list_splice_tail_init(&stats.bcn, + &ar->debug.fw_stats.bcn); + + if (is_end) { + ar->debug.fw_stats_done = true; + num_bcn = 0; + } + } +complete: + complete(&ar->debug.fw_stats_complete); + rcu_read_unlock(); + spin_unlock_bh(&ar->data_lock); + +free: + ath11k_fw_stats_pdevs_free(&stats.pdevs); + ath11k_fw_stats_vdevs_free(&stats.vdevs); + ath11k_fw_stats_bcn_free(&stats.bcn); +} + +static int ath11k_debug_fw_stats_request(struct ath11k *ar, + struct stats_request_params *req_param) +{ + struct ath11k_base *ab = ar->ab; + unsigned long timeout, time_left; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + /* fw stats can get split when exceeding the stats data buffer limit. + * in that case, since there is no end marking for the back-to-back + * received 'update stats' event, we keep a 3 seconds timeout in case, + * fw_stats_done is not marked yet + */ + timeout = jiffies + msecs_to_jiffies(3 * hz); + + ath11k_debug_fw_stats_reset(ar); + + reinit_completion(&ar->debug.fw_stats_complete); + + ret = ath11k_wmi_send_stats_request_cmd(ar, req_param); + + if (ret) { + ath11k_warn(ab, "could not request fw stats (%d) ", + ret); + return ret; + } + + time_left = + wait_for_completion_timeout(&ar->debug.fw_stats_complete, + 1 * hz); + if (!time_left) + return -etimedout; + + for (;;) { + if (time_after(jiffies, timeout)) + break; + + spin_lock_bh(&ar->data_lock); + if (ar->debug.fw_stats_done) { + spin_unlock_bh(&ar->data_lock); + break; + } + spin_unlock_bh(&ar->data_lock); + } + return 0; +} + +static int ath11k_open_pdev_stats(struct inode *inode, struct file *file) +{ + struct ath11k *ar = inode->i_private; + struct ath11k_base *ab = ar->ab; + struct stats_request_params req_param; + void *buf = null; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ath11k_state_on) { + ret = -enetdown; + goto err_unlock; + } + + buf = vmalloc(ath11k_fw_stats_buf_size); + if (!buf) { + ret = -enomem; + goto err_unlock; + } + + req_param.pdev_id = ar->pdev->pdev_id; + req_param.vdev_id = 0; + req_param.stats_id = wmi_request_pdev_stat; + + ret = ath11k_debug_fw_stats_request(ar, &req_param); + if (ret) { + ath11k_warn(ab, "failed to request fw pdev stats: %d ", ret); + goto err_free; + } + + ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, + buf); + + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath11k_release_pdev_stats(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath11k_read_pdev_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_pdev_stats = { + .open = ath11k_open_pdev_stats, + .release = ath11k_release_pdev_stats, + .read = ath11k_read_pdev_stats, + .owner = this_module, + .llseek = default_llseek, +}; + +static int ath11k_open_vdev_stats(struct inode *inode, struct file *file) +{ + struct ath11k *ar = inode->i_private; + struct stats_request_params req_param; + void *buf = null; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ath11k_state_on) { + ret = -enetdown; + goto err_unlock; + } + + buf = vmalloc(ath11k_fw_stats_buf_size); + if (!buf) { + ret = -enomem; + goto err_unlock; + } + + req_param.pdev_id = ar->pdev->pdev_id; + /* vdev stats is always sent for all active vdevs from fw */ + req_param.vdev_id = 0; + req_param.stats_id = wmi_request_vdev_stat; + + ret = ath11k_debug_fw_stats_request(ar, &req_param); + if (ret) { + ath11k_warn(ar->ab, "failed to request fw vdev stats: %d ", ret); + goto err_free; + } + + ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, + buf); + + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath11k_release_vdev_stats(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath11k_read_vdev_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_vdev_stats = { + .open = ath11k_open_vdev_stats, + .release = ath11k_release_vdev_stats, + .read = ath11k_read_vdev_stats, + .owner = this_module, + .llseek = default_llseek, +}; + +static int ath11k_open_bcn_stats(struct inode *inode, struct file *file) +{ + struct ath11k *ar = inode->i_private; + struct ath11k_vif *arvif; + struct stats_request_params req_param; + void *buf = null; + int ret; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ath11k_state_on) { + ret = -enetdown; + goto err_unlock; + } + + buf = vmalloc(ath11k_fw_stats_buf_size); + if (!buf) { + ret = -enomem; + goto err_unlock; + } + + req_param.stats_id = wmi_request_bcn_stat; + req_param.pdev_id = ar->pdev->pdev_id; + + /* loop all active vdevs for bcn stats */ + list_for_each_entry(arvif, &ar->arvifs, list) { + if (!arvif->is_up) + continue; + + req_param.vdev_id = arvif->vdev_id; + ret = ath11k_debug_fw_stats_request(ar, &req_param); + if (ret) { + ath11k_warn(ar->ab, "failed to request fw bcn stats: %d ", ret); + goto err_free; + } + } + + ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id, + buf); + + /* since beacon stats request is looped for all active vdevs, saved fw + * stats is not freed for each request until done for all active vdevs + */ + spin_lock_bh(&ar->data_lock); + ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn); + spin_unlock_bh(&ar->data_lock); + + file->private_data = buf; + + mutex_unlock(&ar->conf_mutex); + return 0; + +err_free: + vfree(buf); + +err_unlock: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int ath11k_release_bcn_stats(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static ssize_t ath11k_read_bcn_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_bcn_stats = { + .open = ath11k_open_bcn_stats, + .release = ath11k_release_bcn_stats, + .read = ath11k_read_bcn_stats, + .owner = this_module, + .llseek = default_llseek, +}; + +static ssize_t ath11k_read_simulate_fw_crash(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char buf[] = + "to simulate firmware crash write one of the keywords to this file: " + "'assert' - this will send wmi_force_fw_hang_cmdid to firmware to cause assert. " + "'hw-restart' - this will simply queue hw restart without fw/hw actually crashing. "; + + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); +} + +/* simulate firmware crash: + * 'soft': call wmi command causing firmware hang. this firmware hang is + * recoverable by warm firmware reset. + * 'hard': force firmware crash by setting any vdev parameter for not allowed + * vdev id. this is hard firmware crash because it is recoverable only by cold + * firmware reset. + */ +static ssize_t ath11k_write_simulate_fw_crash(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k_base *ab = file->private_data; + struct ath11k_pdev *pdev; + struct ath11k *ar = ab->pdevs[0].ar; + char buf[32] = {0}; + ssize_t rc; + int i, ret, radioup; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (ar && ar->state == ath11k_state_on) { + radioup = 1; + break; + } + } + /* filter partial writes and invalid commands */ + if (*ppos != 0 || count >= sizeof(buf) || count == 0) + return -einval; + + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); + if (rc < 0) + return rc; + + /* drop the possible ' ' from the end */ + if (buf[*ppos - 1] == ' ') + buf[*ppos - 1] = ''; + + if (radioup == 0) { + ret = -enetdown; + goto exit; + } + + if (!strcmp(buf, "assert")) { + ath11k_info(ab, "simulating firmware assert crash "); + ret = ath11k_wmi_force_fw_hang_cmd(ar, + ath11k_wmi_fw_hang_assert_type, + ath11k_wmi_fw_hang_delay); + } else { + ret = -einval; + goto exit; + } + + if (ret) { + ath11k_warn(ab, "failed to simulate firmware crash: %d ", ret); + goto exit; + } + + ret = count; + +exit: + return ret; +} + +static const struct file_operations fops_simulate_fw_crash = { + .read = ath11k_read_simulate_fw_crash, + .write = ath11k_write_simulate_fw_crash, + .open = simple_open, + .owner = this_module, + .llseek = default_llseek, +}; + +static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + u32 filter; + int ret; + + if (kstrtouint_from_user(ubuf, count, 0, &filter)) + return -einval; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ath11k_state_on) { + ret = -enetdown; + goto out; + } + + if (filter == ar->debug.extd_tx_stats) { + ret = count; + goto out; + } + + ar->debug.extd_tx_stats = filter; + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file, + char __user *ubuf, + size_t count, loff_t *ppos) + +{ + char buf[32] = {0}; + struct ath11k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%08x ", + ar->debug.extd_tx_stats); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_extd_tx_stats = { + .read = ath11k_read_enable_extd_tx_stats, + .write = ath11k_write_enable_extd_tx_stats, + .open = simple_open +}; + +static ssize_t ath11k_write_extd_rx_stats(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + u32 enable, rx_filter = 0, ring_id; + int ret; + + if (kstrtouint_from_user(ubuf, count, 0, &enable)) + return -einval; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ath11k_state_on) { + ret = -enetdown; + goto exit; + } + + if (enable > 1) { + ret = -einval; + goto exit; + } + + if (enable == ar->debug.extd_rx_stats) { + ret = count; + goto exit; + } + + if (enable) { + rx_filter = htt_rx_filter_tlv_flags_mpdu_start; + rx_filter |= htt_rx_filter_tlv_flags_ppdu_start; + rx_filter |= htt_rx_filter_tlv_flags_ppdu_end; + rx_filter |= htt_rx_filter_tlv_flags_ppdu_end_user_stats; + rx_filter |= htt_rx_filter_tlv_flags_ppdu_end_user_stats_ext; + rx_filter |= htt_rx_filter_tlv_flags_ppdu_end_status_done; + + tlv_filter.rx_filter = rx_filter; + tlv_filter.pkt_filter_flags0 = htt_rx_fp_mgmt_filter_flags0; + tlv_filter.pkt_filter_flags1 = htt_rx_fp_mgmt_filter_flags1; + tlv_filter.pkt_filter_flags2 = htt_rx_fp_ctrl_filter_flasg2; + tlv_filter.pkt_filter_flags3 = htt_rx_fp_ctrl_filter_flasg3 | + htt_rx_fp_data_filter_flasg3; + } else { + tlv_filter = ath11k_mac_mon_status_filter_default; + } + + ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id; + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, + hal_rxdma_monitor_status, + dp_rx_buffer_size, &tlv_filter); + + if (ret) { + ath11k_warn(ar->ab, "failed to set rx filter for moniter status ring "); + goto exit; + } + + ar->debug.extd_rx_stats = enable; + ret = count; +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath11k_read_extd_rx_stats(struct file *file, + char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + char buf[32]; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%d ", + ar->debug.extd_rx_stats); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_extd_rx_stats = { + .read = ath11k_read_extd_rx_stats, + .write = ath11k_write_extd_rx_stats, + .open = simple_open, +}; + +static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k_base *ab = file->private_data; + struct ath11k_soc_dp_rx_stats *soc_stats = &ab->soc_stats; + int len = 0, i, retval; + const int size = 4096; + static const char *rxdma_err[hal_reo_entr_ring_rxdma_ecode_max] = { + "overflow", "mpdu len", "fcs", "decrypt", "tkip mic", + "unencrypt", "msdu len", "msdu limit", "wifi parse", + "amsdu parse", "sa timeout", "da timeout", + "flow timeout", "flush req"}; + static const char *reo_err[hal_reo_dest_ring_error_code_max] = { + "desc addr zero", "desc inval", "ampdu in non ba", + "non ba dup", "ba dup", "frame 2k jump", "bar 2k jump", + "frame oor", "bar oor", "no ba session", + "frame sn equal ssn", "pn check fail", "2k err", + "pn err", "desc blocked"}; + + char *buf; + + buf = kzalloc(size, gfp_kernel); + if (!buf) + return -enomem; + + len += scnprintf(buf + len, size - len, "soc rx stats: "); + len += scnprintf(buf + len, size - len, "err ring pkts: %u ", + soc_stats->err_ring_pkts); + len += scnprintf(buf + len, size - len, "invalid rbm: %u ", + soc_stats->invalid_rbm); + len += scnprintf(buf + len, size - len, "rxdma errors: "); + for (i = 0; i < hal_reo_entr_ring_rxdma_ecode_max; i++) + len += scnprintf(buf + len, size - len, "%s: %u ", + rxdma_err[i], soc_stats->rxdma_error[i]); + + len += scnprintf(buf + len, size - len, " reo errors: "); + for (i = 0; i < hal_reo_dest_ring_error_code_max; i++) + len += scnprintf(buf + len, size - len, "%s: %u ", + reo_err[i], soc_stats->reo_error[i]); + + len += scnprintf(buf + len, size - len, " hal reo errors: "); + len += scnprintf(buf + len, size - len, + "ring0: %u ring1: %u ring2: %u ring3: %u ", + soc_stats->hal_reo_error[0], + soc_stats->hal_reo_error[1], + soc_stats->hal_reo_error[2], + soc_stats->hal_reo_error[3]); + + if (len > size) + len = size; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + +static const struct file_operations fops_soc_rx_stats = { + .read = ath11k_debug_dump_soc_rx_stats, + .open = simple_open, + .owner = this_module, + .llseek = default_llseek, +}; + +int ath11k_debug_pdev_create(struct ath11k_base *ab) +{ + ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k); + + if (is_err_or_null(ab->debugfs_soc)) { + if (is_err(ab->debugfs_soc)) + return ptr_err(ab->debugfs_soc); + return -enomem; + } + + debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, + &fops_simulate_fw_crash); + + debugfs_create_file("soc_rx_stats", 0600, ab->debugfs_soc, ab, + &fops_soc_rx_stats); + + return 0; +} + +void ath11k_debug_pdev_destroy(struct ath11k_base *ab) +{ + debugfs_remove_recursive(ab->debugfs_ath11k); + ab->debugfs_ath11k = null; +} + +int ath11k_debug_soc_create(struct ath11k_base *ab) +{ + ab->debugfs_ath11k = debugfs_create_dir("ath11k", null); + + if (is_err_or_null(ab->debugfs_ath11k)) { + if (is_err(ab->debugfs_ath11k)) + return ptr_err(ab->debugfs_ath11k); + return -enomem; + } + + return 0; +} + +void ath11k_debug_soc_destroy(struct ath11k_base *ab) +{ + debugfs_remove_recursive(ab->debugfs_soc); + ab->debugfs_soc = null; +} + +void ath11k_debug_fw_stats_init(struct ath11k *ar) +{ + struct dentry *fwstats_dir = debugfs_create_dir("fw_stats", + ar->debug.debugfs_pdev); + + ar->debug.fw_stats.debugfs_fwstats = fwstats_dir; + + /* all stats debugfs files created are under "fw_stats" directory + * created per pdev + */ + debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar, + &fops_pdev_stats); + debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar, + &fops_vdev_stats); + debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar, + &fops_bcn_stats); + + init_list_head(&ar->debug.fw_stats.pdevs); + init_list_head(&ar->debug.fw_stats.vdevs); + init_list_head(&ar->debug.fw_stats.bcn); + + init_completion(&ar->debug.fw_stats_complete); +} + +static ssize_t ath11k_write_pktlog_filter(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + u32 rx_filter = 0, ring_id, filter, mode; + u8 buf[128] = {0}; + int ret; + ssize_t rc; + + mutex_lock(&ar->conf_mutex); + if (ar->state != ath11k_state_on) { + ret = -enetdown; + goto out; + } + + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count); + if (rc < 0) { + ret = rc; + goto out; + } + buf[rc] = ''; + + ret = sscanf(buf, "0x%x %u", &filter, &mode); + if (ret != 2) { + ret = -einval; + goto out; + } + + if (filter) { + ret = ath11k_wmi_pdev_pktlog_enable(ar, filter); + if (ret) { + ath11k_warn(ar->ab, + "failed to enable pktlog filter %x: %d ", + ar->debug.pktlog_filter, ret); + goto out; + } + } else { + ret = ath11k_wmi_pdev_pktlog_disable(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to disable pktlog: %d ", ret); + goto out; + } + } + +#define htt_rx_filter_tlv_lite_mode \ + (htt_rx_filter_tlv_flags_ppdu_start | \ + htt_rx_filter_tlv_flags_ppdu_end | \ + htt_rx_filter_tlv_flags_ppdu_end_user_stats | \ + htt_rx_filter_tlv_flags_ppdu_end_user_stats_ext | \ + htt_rx_filter_tlv_flags_ppdu_end_status_done | \ + htt_rx_filter_tlv_flags_mpdu_start) + + if (mode == ath11k_pktlog_mode_full) { + rx_filter = htt_rx_filter_tlv_lite_mode | + htt_rx_filter_tlv_flags_msdu_start | + htt_rx_filter_tlv_flags_msdu_end | + htt_rx_filter_tlv_flags_mpdu_end | + htt_rx_filter_tlv_flags_packet_header | + htt_rx_filter_tlv_flags_attention; + } else if (mode == ath11k_pktlog_mode_lite) { + rx_filter = htt_rx_filter_tlv_lite_mode; + } + + tlv_filter.rx_filter = rx_filter; + if (rx_filter) { + tlv_filter.pkt_filter_flags0 = htt_rx_fp_mgmt_filter_flags0; + tlv_filter.pkt_filter_flags1 = htt_rx_fp_mgmt_filter_flags1; + tlv_filter.pkt_filter_flags2 = htt_rx_fp_ctrl_filter_flasg2; + tlv_filter.pkt_filter_flags3 = htt_rx_fp_ctrl_filter_flasg3 | + htt_rx_fp_data_filter_flasg3; + } + + ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id; + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, + hal_rxdma_monitor_status, + dp_rx_buffer_size, &tlv_filter); + if (ret) { + ath11k_warn(ar->ab, "failed to set rx filter for moniter status ring "); + goto out; + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, "pktlog filter %d mode %s ", + filter, ((mode == ath11k_pktlog_mode_full) ? "full" : "lite")); + + ar->debug.pktlog_filter = filter; + ar->debug.pktlog_mode = mode; + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath11k_read_pktlog_filter(struct file *file, + char __user *ubuf, + size_t count, loff_t *ppos) + +{ + char buf[32] = {0}; + struct ath11k *ar = file->private_data; + int len = 0; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf) - len, "%08x %08x ", + ar->debug.pktlog_filter, + ar->debug.pktlog_mode); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_pktlog_filter = { + .read = ath11k_read_pktlog_filter, + .write = ath11k_write_pktlog_filter, + .open = simple_open +}; + +static ssize_t ath11k_write_simulate_radar(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + int ret; + + ret = ath11k_wmi_simulate_radar(ar); + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_simulate_radar = { + .write = ath11k_write_simulate_radar, + .open = simple_open +}; + +int ath11k_debug_register(struct ath11k *ar) +{ + struct ath11k_base *ab = ar->ab; + char pdev_name[5]; + char buf[100] = {0}; + + snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx); + + ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc); + + if (is_err_or_null(ar->debug.debugfs_pdev)) { + if (is_err(ar->debug.debugfs_pdev)) + return ptr_err(ar->debug.debugfs_pdev); + + return -enomem; + } + + /* create a symlink under ieee80211/phy* */ + snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev); + debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf); + + ath11k_debug_htt_stats_init(ar); + + ath11k_debug_fw_stats_init(ar); + + debugfs_create_file("ext_tx_stats", 0644, + ar->debug.debugfs_pdev, ar, + &fops_extd_tx_stats); + debugfs_create_file("ext_rx_stats", 0644, + ar->debug.debugfs_pdev, ar, + &fops_extd_rx_stats); + debugfs_create_file("pktlog_filter", 0644, + ar->debug.debugfs_pdev, ar, + &fops_pktlog_filter); + + if (ar->hw->wiphy->bands[nl80211_band_5ghz]) { + debugfs_create_file("dfs_simulate_radar", 0200, + ar->debug.debugfs_pdev, ar, + &fops_simulate_radar); + debugfs_create_bool("dfs_block_radar_events", 0200, + ar->debug.debugfs_pdev, + &ar->dfs_block_radar_events); + } + + return 0; +} + +void ath11k_debug_unregister(struct ath11k *ar) +{ +} +#endif /* config_ath11k_debugfs */ diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/debug.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef _ath11k_debug_h_ +#define _ath11k_debug_h_ + +#include "hal_tx.h" +#include "trace.h" + +enum ath11k_debug_mask { + ath11k_dbg_ahb = 0x00000001, + ath11k_dbg_wmi = 0x00000002, + ath11k_dbg_htc = 0x00000004, + ath11k_dbg_dp_htt = 0x00000008, + ath11k_dbg_mac = 0x00000010, + ath11k_dbg_boot = 0x00000020, + ath11k_dbg_qmi = 0x00000040, + ath11k_dbg_data = 0x00000080, + ath11k_dbg_mgmt = 0x00000100, + ath11k_dbg_reg = 0x00000200, + ath11k_dbg_testmode = 0x00000400, + ath11k_dbg_hal = 0x00000800, + ath11k_dbg_any = 0xffffffff, +}; + +/* htt_dbg_ext_stats_type */ +enum ath11k_dbg_htt_ext_stats_type { + ath11k_dbg_htt_ext_stats_reset = 0, + ath11k_dbg_htt_ext_stats_pdev_tx = 1, + ath11k_dbg_htt_ext_stats_pdev_rx = 2, + ath11k_dbg_htt_ext_stats_pdev_tx_hwq = 3, + ath11k_dbg_htt_ext_stats_pdev_tx_sched = 4, + ath11k_dbg_htt_ext_stats_pdev_error = 5, + ath11k_dbg_htt_ext_stats_pdev_tqm = 6, + ath11k_dbg_htt_ext_stats_tqm_cmdq = 7, + ath11k_dbg_htt_ext_stats_tx_de_info = 8, + ath11k_dbg_htt_ext_stats_pdev_tx_rate = 9, + ath11k_dbg_htt_ext_stats_pdev_rx_rate = 10, + ath11k_dbg_htt_ext_stats_peer_info = 11, + ath11k_dbg_htt_ext_stats_tx_selfgen_info = 12, + ath11k_dbg_htt_ext_stats_tx_mu_hwq = 13, + ath11k_dbg_htt_ext_stats_ring_if_info = 14, + ath11k_dbg_htt_ext_stats_srng_info = 15, + ath11k_dbg_htt_ext_stats_sfm_info = 16, + ath11k_dbg_htt_ext_stats_pdev_tx_mu = 17, + ath11k_dbg_htt_ext_stats_active_peers_list = 18, + ath11k_dbg_htt_ext_stats_pdev_cca_stats = 19, + ath11k_dbg_htt_ext_stats_twt_sessions = 20, + ath11k_dbg_htt_ext_stats_reo_resource_stats = 21, + ath11k_dbg_htt_ext_stats_tx_sounding_info = 22, + + /* keep this last */ + ath11k_dbg_htt_num_ext_stats, +}; + +struct debug_htt_stats_req { + bool done; + u8 pdev_id; + u8 type; + u8 peer_addr[eth_alen]; + struct completion cmpln; + u32 buf_len; + u8 buf[0]; +}; + +#define ath11k_htt_stats_buf_size (1024 * 512) + +#define ath11k_fw_stats_buf_size (1024 * 1024) + +#define ath11k_htt_pktlog_max_size 2048 + +enum ath11k_pktlog_filter { + ath11k_pktlog_rx = 0x000000001, + ath11k_pktlog_tx = 0x000000002, + ath11k_pktlog_rcfind = 0x000000004, + ath11k_pktlog_rcupdate = 0x000000008, + ath11k_pktlog_event_smart_ant = 0x000000020, + ath11k_pktlog_event_sw = 0x000000040, + ath11k_pktlog_any = 0x00000006f, +}; + +enum ath11k_pktlog_mode { + ath11k_pktlog_mode_lite = 1, + ath11k_pktlog_mode_full = 2, +}; + +enum ath11k_pktlog_enum { + ath11k_pktlog_type_tx_ctrl = 1, + ath11k_pktlog_type_tx_stat = 2, + ath11k_pktlog_type_tx_msdu_id = 3, + ath11k_pktlog_type_rx_stat = 5, + ath11k_pktlog_type_rc_find = 6, + ath11k_pktlog_type_rc_update = 7, + ath11k_pktlog_type_tx_virt_addr = 8, + ath11k_pktlog_type_rx_cbf = 10, + ath11k_pktlog_type_rx_statbuf = 22, + ath11k_pktlog_type_ppdu_stats = 23, + ath11k_pktlog_type_lite_rx = 24, +}; + +__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...); +__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...); +__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...); + +extern unsigned int ath11k_debug_mask; + +#ifdef config_ath11k_debug +__printf(3, 4) void __ath11k_dbg(struct ath11k_base *ab, + enum ath11k_debug_mask mask, + const char *fmt, ...); +void ath11k_dbg_dump(struct ath11k_base *ab, + enum ath11k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len); +#else /* config_ath11k_debug */ +static inline int __ath11k_dbg(struct ath11k_base *ab, + enum ath11k_debug_mask dbg_mask, + const char *fmt, ...) +{ + return 0; +} + +static inline void ath11k_dbg_dump(struct ath11k_base *ab, + enum ath11k_debug_mask mask, + const char *msg, const char *prefix, + const void *buf, size_t len) +{ +} +#endif /* config_ath11k_debug */ + +#ifdef config_ath11k_debugfs +int ath11k_debug_soc_create(struct ath11k_base *ab); +void ath11k_debug_soc_destroy(struct ath11k_base *ab); +int ath11k_debug_pdev_create(struct ath11k_base *ab); +void ath11k_debug_pdev_destroy(struct ath11k_base *ab); +int ath11k_debug_register(struct ath11k *ar); +void ath11k_debug_unregister(struct ath11k *ar); +void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, + struct sk_buff *skb); +void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb); + +void ath11k_debug_fw_stats_init(struct ath11k *ar); +int ath11k_dbg_htt_stats_req(struct ath11k *ar); + +static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar) +{ + return (ar->debug.pktlog_mode == ath11k_pktlog_mode_lite); +} + +static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar) +{ + return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode); +} + +static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr) +{ + return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode && + ether_addr_equal(addr, ar->debug.pktlog_peer_addr)); +} + +static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar) +{ + return ar->debug.extd_tx_stats; +} + +static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar) +{ + return ar->debug.extd_rx_stats; +} +#else +static inline int ath11k_debug_soc_create(struct ath11k_base *ab) +{ + return 0; +} + +static inline void ath11k_debug_soc_destroy(struct ath11k_base *ab) +{ +} + +static inline int ath11k_debug_pdev_create(struct ath11k_base *ab) +{ + return 0; +} + +static inline void ath11k_debug_pdev_destroy(struct ath11k_base *ab) +{ +} + +static inline int ath11k_debug_register(struct ath11k *ar) +{ + return 0; +} + +static inline void ath11k_debug_unregister(struct ath11k *ar) +{ +} + +static inline void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, + struct sk_buff *skb) +{ +} + +static inline void ath11k_debug_fw_stats_process(struct ath11k_base *ab, + struct sk_buff *skb) +{ +} + +static inline void ath11k_debug_fw_stats_init(struct ath11k *ar) +{ +} + +static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar) +{ + return 0; +} + +static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar) +{ + return 0; +} + +static inline int ath11k_dbg_htt_stats_req(struct ath11k *ar) +{ + return 0; +} + +static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar) +{ + return false; +} + +static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar) +{ + return false; +} + +static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr) +{ + return false; +} +#endif /* config_ath11k_debugfs */ + +#ifdef config_mac80211_debugfs +void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct dentry *dir); +void +ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta, + struct ath11k_per_peer_tx_stats *peer_stats, + u8 legacy_rate_idx); +void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar, + struct sk_buff *msdu, + struct hal_tx_status *ts); +#else /* !config_mac80211_debugfs */ +static inline void +ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta, + struct ath11k_per_peer_tx_stats *peer_stats, + u8 legacy_rate_idx) +{ +} + +static inline void +ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar, + struct sk_buff *msdu, + struct hal_tx_status *ts) +{ +} + +#endif /* config_mac80211_debugfs*/ + +#define ath11k_dbg(ar, dbg_mask, fmt, ...) \ +do { \ + if (ath11k_debug_mask & dbg_mask) \ + __ath11k_dbg(ar, dbg_mask, fmt, ##__va_args__); \ +} while (0) + +#endif /* _ath11k_debug_h_ */ diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include <linux/vmalloc.h> +#include "core.h" +#include "dp_tx.h" +#include "dp_rx.h" +#include "debug.h" +#include "debug_htt_stats.h" + +#define htt_dbg_out(buf, len, fmt, ...) \ + scnprintf(buf, len, fmt " ", ##__va_args__) + +#define htt_max_string_len 256 +#define htt_max_print_char_per_elem 15 + +#define htt_tlv_hdr_len 4 + +#define array_to_string(out, arr, len) \ + do { \ + int index = 0; u8 i; \ + for (i = 0; i < len; i++) { \ + index += snprintf(out + index, htt_max_string_len - index, \ + " %u:%u,", i, arr[i]); \ + if (index < 0 || index >= htt_max_string_len) \ + break; \ + } \ + } while (0) + +static inline void htt_print_stats_string_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_stats_string_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u8 i; + u16 index = 0; + char data[htt_max_string_len] = {0}; + + tag_len = tag_len >> 2; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_stats_string_tlv:"); + + for (i = 0; i < tag_len; i++) { + index += snprintf(&data[index], + htt_max_string_len - index, + "%.*s", 4, (char *)&(htt_stats_buf->data[i])); + if (index >= htt_max_string_len) + break; + } + + len += htt_dbg_out(buf + len, buf_len - len, "data = %s ", data); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_pdev_stats_cmn_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "hw_queued = %u", + htt_stats_buf->hw_queued); + len += htt_dbg_out(buf + len, buf_len - len, "hw_reaped = %u", + htt_stats_buf->hw_reaped); + len += htt_dbg_out(buf + len, buf_len - len, "underrun = %u", + htt_stats_buf->underrun); + len += htt_dbg_out(buf + len, buf_len - len, "hw_paused = %u", + htt_stats_buf->hw_paused); + len += htt_dbg_out(buf + len, buf_len - len, "hw_flush = %u", + htt_stats_buf->hw_flush); + len += htt_dbg_out(buf + len, buf_len - len, "hw_filt = %u", + htt_stats_buf->hw_filt); + len += htt_dbg_out(buf + len, buf_len - len, "tx_abort = %u", + htt_stats_buf->tx_abort); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_requeued = %u", + htt_stats_buf->mpdu_requed); + len += htt_dbg_out(buf + len, buf_len - len, "tx_xretry = %u", + htt_stats_buf->tx_xretry); + len += htt_dbg_out(buf + len, buf_len - len, "data_rc = %u", + htt_stats_buf->data_rc); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_dropped_xretry = %u", + htt_stats_buf->mpdu_dropped_xretry); + len += htt_dbg_out(buf + len, buf_len - len, "illegal_rate_phy_err = %u", + htt_stats_buf->illgl_rate_phy_err); + len += htt_dbg_out(buf + len, buf_len - len, "cont_xretry = %u", + htt_stats_buf->cont_xretry); + len += htt_dbg_out(buf + len, buf_len - len, "tx_timeout = %u", + htt_stats_buf->tx_timeout); + len += htt_dbg_out(buf + len, buf_len - len, "pdev_resets = %u", + htt_stats_buf->pdev_resets); + len += htt_dbg_out(buf + len, buf_len - len, "phy_underrun = %u", + htt_stats_buf->phy_underrun); + len += htt_dbg_out(buf + len, buf_len - len, "txop_ovf = %u", + htt_stats_buf->txop_ovf); + len += htt_dbg_out(buf + len, buf_len - len, "seq_posted = %u", + htt_stats_buf->seq_posted); + len += htt_dbg_out(buf + len, buf_len - len, "seq_failed_queueing = %u", + htt_stats_buf->seq_failed_queueing); + len += htt_dbg_out(buf + len, buf_len - len, "seq_completed = %u", + htt_stats_buf->seq_completed); + len += htt_dbg_out(buf + len, buf_len - len, "seq_restarted = %u", + htt_stats_buf->seq_restarted); + len += htt_dbg_out(buf + len, buf_len - len, "mu_seq_posted = %u", + htt_stats_buf->mu_seq_posted); + len += htt_dbg_out(buf + len, buf_len - len, "seq_switch_hw_paused = %u", + htt_stats_buf->seq_switch_hw_paused); + len += htt_dbg_out(buf + len, buf_len - len, "next_seq_posted_dsr = %u", + htt_stats_buf->next_seq_posted_dsr); + len += htt_dbg_out(buf + len, buf_len - len, "seq_posted_isr = %u", + htt_stats_buf->seq_posted_isr); + len += htt_dbg_out(buf + len, buf_len - len, "seq_ctrl_cached = %u", + htt_stats_buf->seq_ctrl_cached); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_count_tqm = %u", + htt_stats_buf->mpdu_count_tqm); + len += htt_dbg_out(buf + len, buf_len - len, "msdu_count_tqm = %u", + htt_stats_buf->msdu_count_tqm); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_removed_tqm = %u", + htt_stats_buf->mpdu_removed_tqm); + len += htt_dbg_out(buf + len, buf_len - len, "msdu_removed_tqm = %u", + htt_stats_buf->msdu_removed_tqm); + len += htt_dbg_out(buf + len, buf_len - len, "mpdus_sw_flush = %u", + htt_stats_buf->mpdus_sw_flush); + len += htt_dbg_out(buf + len, buf_len - len, "mpdus_hw_filter = %u", + htt_stats_buf->mpdus_hw_filter); + len += htt_dbg_out(buf + len, buf_len - len, "mpdus_truncated = %u", + htt_stats_buf->mpdus_truncated); + len += htt_dbg_out(buf + len, buf_len - len, "mpdus_ack_failed = %u", + htt_stats_buf->mpdus_ack_failed); + len += htt_dbg_out(buf + len, buf_len - len, "mpdus_expired = %u", + htt_stats_buf->mpdus_expired); + len += htt_dbg_out(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u", + htt_stats_buf->mpdus_seq_hw_retry); + len += htt_dbg_out(buf + len, buf_len - len, "ack_tlv_proc = %u", + htt_stats_buf->ack_tlv_proc); + len += htt_dbg_out(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u", + htt_stats_buf->coex_abort_mpdu_cnt_valid); + len += htt_dbg_out(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u", + htt_stats_buf->coex_abort_mpdu_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u", + htt_stats_buf->num_total_ppdus_tried_ota); + len += htt_dbg_out(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u", + htt_stats_buf->num_data_ppdus_tried_ota); + len += htt_dbg_out(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u", + htt_stats_buf->local_ctrl_mgmt_enqued); + len += htt_dbg_out(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u", + htt_stats_buf->local_ctrl_mgmt_freed); + len += htt_dbg_out(buf + len, buf_len - len, "local_data_enqued = %u", + htt_stats_buf->local_data_enqued); + len += htt_dbg_out(buf + len, buf_len - len, "local_data_freed = %u", + htt_stats_buf->local_data_freed); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_tried = %u", + htt_stats_buf->mpdu_tried); + len += htt_dbg_out(buf + len, buf_len - len, "isr_wait_seq_posted = %u", + htt_stats_buf->isr_wait_seq_posted); + len += htt_dbg_out(buf + len, buf_len - len, "tx_active_dur_us_low = %u", + htt_stats_buf->tx_active_dur_us_low); + len += htt_dbg_out(buf + len, buf_len - len, "tx_active_dur_us_high = %u ", + htt_stats_buf->tx_active_dur_us_high); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char urrn_stats[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_tx_pdev_max_urrn_stats); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_pdev_stats_urrn_tlv_v:"); + + array_to_string(urrn_stats, htt_stats_buf->urrn_stats, num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "urrn_stats = %s ", urrn_stats); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char flush_errs[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_tx_pdev_max_flush_reason_stats); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_pdev_stats_flush_tlv_v:"); + + array_to_string(flush_errs, htt_stats_buf->flush_errs, num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "flush_errs = %s ", flush_errs); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char sifs_status[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_tx_pdev_max_sifs_burst_stats); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_pdev_stats_sifs_tlv_v:"); + + array_to_string(sifs_status, htt_stats_buf->sifs_status, num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "sifs_status = %s ", + sifs_status); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char phy_errs[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_tx_pdev_max_phy_err_stats); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_pdev_stats_phy_err_tlv_v:"); + + array_to_string(phy_errs, htt_stats_buf->phy_errs, num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "phy_errs = %s ", phy_errs); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char sifs_hist_status[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_tx_pdev_max_sifs_burst_hist_stats); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_pdev_stats_sifs_hist_tlv_v:"); + + array_to_string(sifs_hist_status, htt_stats_buf->sifs_hist_status, num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "sifs_hist_status = %s ", + sifs_hist_status); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_pdev_stats_tx_ppdu_stats_tlv_v:"); + + len += htt_dbg_out(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u", + htt_stats_buf->num_data_ppdus_legacy_su); + + len += htt_dbg_out(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u", + htt_stats_buf->num_data_ppdus_ac_su); + + len += htt_dbg_out(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u", + htt_stats_buf->num_data_ppdus_ax_su); + + len += htt_dbg_out(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u", + htt_stats_buf->num_data_ppdus_ac_su_txbf); + + len += htt_dbg_out(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u ", + htt_stats_buf->num_data_ppdus_ax_su_txbf); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char tried_mpdu_cnt_hist[htt_max_string_len] = {0}; + u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2); + u32 required_buffer_size = htt_max_print_char_per_elem * num_elements; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v:"); + len += htt_dbg_out(buf + len, buf_len - len, "tried_mpdu_cnt_hist_bin_size : %u", + htt_stats_buf->hist_bin_size); + + if (required_buffer_size < htt_max_string_len) { + array_to_string(tried_mpdu_cnt_hist, + htt_stats_buf->tried_mpdu_cnt_hist, + num_elements); + len += htt_dbg_out(buf + len, buf_len - len, "tried_mpdu_cnt_hist = %s ", + tried_mpdu_cnt_hist); + } else { + len += htt_dbg_out(buf + len, buf_len - len, + "insufficient print buffer "); + } + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char hw_intr_name[htt_stats_max_hw_intr_name_len + 1] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_hw_stats_intr_misc_tlv:"); + memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]), + htt_stats_max_hw_intr_name_len); + len += htt_dbg_out(buf + len, buf_len - len, "hw_intr_name = %s ", hw_intr_name); + len += htt_dbg_out(buf + len, buf_len - len, "mask = %u", + htt_stats_buf->mask); + len += htt_dbg_out(buf + len, buf_len - len, "count = %u ", + htt_stats_buf->count); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char hw_module_name[htt_stats_max_hw_module_name_len + 1] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_hw_stats_wd_timeout_tlv:"); + memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]), + htt_stats_max_hw_module_name_len); + len += htt_dbg_out(buf + len, buf_len - len, "hw_module_name = %s ", + hw_module_name); + len += htt_dbg_out(buf + len, buf_len - len, "count = %u", + htt_stats_buf->count); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_hw_stats_pdev_errs_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "tx_abort = %u", + htt_stats_buf->tx_abort); + len += htt_dbg_out(buf + len, buf_len - len, "tx_abort_fail_count = %u", + htt_stats_buf->tx_abort_fail_count); + len += htt_dbg_out(buf + len, buf_len - len, "rx_abort = %u", + htt_stats_buf->rx_abort); + len += htt_dbg_out(buf + len, buf_len - len, "rx_abort_fail_count = %u", + htt_stats_buf->rx_abort_fail_count); + len += htt_dbg_out(buf + len, buf_len - len, "warm_reset = %u", + htt_stats_buf->warm_reset); + len += htt_dbg_out(buf + len, buf_len - len, "cold_reset = %u", + htt_stats_buf->cold_reset); + len += htt_dbg_out(buf + len, buf_len - len, "tx_flush = %u", + htt_stats_buf->tx_flush); + len += htt_dbg_out(buf + len, buf_len - len, "tx_glb_reset = %u", + htt_stats_buf->tx_glb_reset); + len += htt_dbg_out(buf + len, buf_len - len, "tx_txq_reset = %u", + htt_stats_buf->tx_txq_reset); + len += htt_dbg_out(buf + len, buf_len - len, "rx_timeout_reset = %u ", + htt_stats_buf->rx_timeout_reset); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_msdu_flow_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "last_update_timestamp = %u", + htt_stats_buf->last_update_timestamp); + len += htt_dbg_out(buf + len, buf_len - len, "last_add_timestamp = %u", + htt_stats_buf->last_add_timestamp); + len += htt_dbg_out(buf + len, buf_len - len, "last_remove_timestamp = %u", + htt_stats_buf->last_remove_timestamp); + len += htt_dbg_out(buf + len, buf_len - len, "total_processed_msdu_count = %u", + htt_stats_buf->total_processed_msdu_count); + len += htt_dbg_out(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u", + htt_stats_buf->cur_msdu_count_in_flowq); + len += htt_dbg_out(buf + len, buf_len - len, "sw_peer_id = %u", + htt_stats_buf->sw_peer_id); + len += htt_dbg_out(buf + len, buf_len - len, "tx_flow_no = %u", + htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "tid_num = %u", + (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xf0000) >> + 16); + len += htt_dbg_out(buf + len, buf_len - len, "drop_rule = %u", + (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0x100000) >> + 20); + len += htt_dbg_out(buf + len, buf_len - len, "last_cycle_enqueue_count = %u", + htt_stats_buf->last_cycle_enqueue_count); + len += htt_dbg_out(buf + len, buf_len - len, "last_cycle_dequeue_count = %u", + htt_stats_buf->last_cycle_dequeue_count); + len += htt_dbg_out(buf + len, buf_len - len, "last_cycle_drop_count = %u", + htt_stats_buf->last_cycle_drop_count); + len += htt_dbg_out(buf + len, buf_len - len, "current_drop_th = %u ", + htt_stats_buf->current_drop_th); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char tid_name[max_htt_tid_name + 1] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_tid_stats_tlv:"); + memcpy(tid_name, &(htt_stats_buf->tid_name[0]), max_htt_tid_name); + len += htt_dbg_out(buf + len, buf_len - len, "tid_name = %s ", tid_name); + len += htt_dbg_out(buf + len, buf_len - len, "sw_peer_id = %u", + htt_stats_buf->sw_peer_id__tid_num & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "tid_num = %u", + (htt_stats_buf->sw_peer_id__tid_num & 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "num_sched_pending = %u", + htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "num_ppdu_in_hwq = %u", + (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & + 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "tid_flags = 0x%x", + htt_stats_buf->tid_flags); + len += htt_dbg_out(buf + len, buf_len - len, "hw_queued = %u", + htt_stats_buf->hw_queued); + len += htt_dbg_out(buf + len, buf_len - len, "hw_reaped = %u", + htt_stats_buf->hw_reaped); + len += htt_dbg_out(buf + len, buf_len - len, "mpdus_hw_filter = %u", + htt_stats_buf->mpdus_hw_filter); + len += htt_dbg_out(buf + len, buf_len - len, "qdepth_bytes = %u", + htt_stats_buf->qdepth_bytes); + len += htt_dbg_out(buf + len, buf_len - len, "qdepth_num_msdu = %u", + htt_stats_buf->qdepth_num_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "qdepth_num_mpdu = %u", + htt_stats_buf->qdepth_num_mpdu); + len += htt_dbg_out(buf + len, buf_len - len, "last_scheduled_tsmp = %u", + htt_stats_buf->last_scheduled_tsmp); + len += htt_dbg_out(buf + len, buf_len - len, "pause_module_id = %u", + htt_stats_buf->pause_module_id); + len += htt_dbg_out(buf + len, buf_len - len, "block_module_id = %u ", + htt_stats_buf->block_module_id); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char tid_name[max_htt_tid_name + 1] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_tid_stats_v1_tlv:"); + memcpy(tid_name, &(htt_stats_buf->tid_name[0]), max_htt_tid_name); + len += htt_dbg_out(buf + len, buf_len - len, "tid_name = %s ", tid_name); + len += htt_dbg_out(buf + len, buf_len - len, "sw_peer_id = %u", + htt_stats_buf->sw_peer_id__tid_num & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "tid_num = %u", + (htt_stats_buf->sw_peer_id__tid_num & 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "num_sched_pending = %u", + htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "num_ppdu_in_hwq = %u", + (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & + 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "tid_flags = 0x%x", + htt_stats_buf->tid_flags); + len += htt_dbg_out(buf + len, buf_len - len, "max_qdepth_bytes = %u", + htt_stats_buf->max_qdepth_bytes); + len += htt_dbg_out(buf + len, buf_len - len, "max_qdepth_n_msdus = %u", + htt_stats_buf->max_qdepth_n_msdus); + len += htt_dbg_out(buf + len, buf_len - len, "rsvd = %u", + htt_stats_buf->rsvd); + len += htt_dbg_out(buf + len, buf_len - len, "qdepth_bytes = %u", + htt_stats_buf->qdepth_bytes); + len += htt_dbg_out(buf + len, buf_len - len, "qdepth_num_msdu = %u", + htt_stats_buf->qdepth_num_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "qdepth_num_mpdu = %u", + htt_stats_buf->qdepth_num_mpdu); + len += htt_dbg_out(buf + len, buf_len - len, "last_scheduled_tsmp = %u", + htt_stats_buf->last_scheduled_tsmp); + len += htt_dbg_out(buf + len, buf_len - len, "pause_module_id = %u", + htt_stats_buf->pause_module_id); + len += htt_dbg_out(buf + len, buf_len - len, "block_module_id = %u", + htt_stats_buf->block_module_id); + len += htt_dbg_out(buf + len, buf_len - len, "allow_n_flags = 0x%x", + htt_stats_buf->allow_n_flags); + len += htt_dbg_out(buf + len, buf_len - len, "sendn_frms_allowed = %u ", + htt_stats_buf->sendn_frms_allowed); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char tid_name[max_htt_tid_name + 1] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_rx_tid_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "sw_peer_id = %u", + htt_stats_buf->sw_peer_id__tid_num & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "tid_num = %u", + (htt_stats_buf->sw_peer_id__tid_num & 0xffff0000) >> 16); + memcpy(tid_name, &(htt_stats_buf->tid_name[0]), max_htt_tid_name); + len += htt_dbg_out(buf + len, buf_len - len, "tid_name = %s ", tid_name); + len += htt_dbg_out(buf + len, buf_len - len, "dup_in_reorder = %u", + htt_stats_buf->dup_in_reorder); + len += htt_dbg_out(buf + len, buf_len - len, "dup_past_outside_window = %u", + htt_stats_buf->dup_past_outside_window); + len += htt_dbg_out(buf + len, buf_len - len, "dup_past_within_window = %u", + htt_stats_buf->dup_past_within_window); + len += htt_dbg_out(buf + len, buf_len - len, "rxdesc_err_decrypt = %u ", + htt_stats_buf->rxdesc_err_decrypt); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_counter_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_counter_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char counter_name[htt_max_string_len] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_counter_tlv:"); + + array_to_string(counter_name, + htt_stats_buf->counter_name, + htt_max_counter_name); + len += htt_dbg_out(buf + len, buf_len - len, "counter_name = %s ", counter_name); + len += htt_dbg_out(buf + len, buf_len - len, "count = %u ", + htt_stats_buf->count); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_peer_stats_cmn_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "ppdu_cnt = %u", + htt_stats_buf->ppdu_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_cnt = %u", + htt_stats_buf->mpdu_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "msdu_cnt = %u", + htt_stats_buf->msdu_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "pause_bitmap = %u", + htt_stats_buf->pause_bitmap); + len += htt_dbg_out(buf + len, buf_len - len, "block_bitmap = %u", + htt_stats_buf->block_bitmap); + len += htt_dbg_out(buf + len, buf_len - len, "last_rssi = %d", + htt_stats_buf->rssi); + len += htt_dbg_out(buf + len, buf_len - len, "enqueued_count = %llu", + htt_stats_buf->peer_enqueued_count_low | + ((u64)htt_stats_buf->peer_enqueued_count_high << 32)); + len += htt_dbg_out(buf + len, buf_len - len, "dequeued_count = %llu", + htt_stats_buf->peer_dequeued_count_low | + ((u64)htt_stats_buf->peer_dequeued_count_high << 32)); + len += htt_dbg_out(buf + len, buf_len - len, "dropped_count = %llu", + htt_stats_buf->peer_dropped_count_low | + ((u64)htt_stats_buf->peer_dropped_count_high << 32)); + len += htt_dbg_out(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu", + htt_stats_buf->ppdu_transmitted_bytes_low | + ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32)); + len += htt_dbg_out(buf + len, buf_len - len, "ttl_removed_count = %u", + htt_stats_buf->peer_ttl_removed_count); + len += htt_dbg_out(buf + len, buf_len - len, "inactive_time = %u ", + htt_stats_buf->inactive_time); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_peer_details_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_peer_details_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_peer_details_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "peer_type = %u", + htt_stats_buf->peer_type); + len += htt_dbg_out(buf + len, buf_len - len, "sw_peer_id = %u", + htt_stats_buf->sw_peer_id); + len += htt_dbg_out(buf + len, buf_len - len, "vdev_id = %u", + htt_stats_buf->vdev_pdev_ast_idx & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "pdev_id = %u", + (htt_stats_buf->vdev_pdev_ast_idx & 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "ast_idx = %u", + (htt_stats_buf->vdev_pdev_ast_idx & 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, + "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x", + htt_stats_buf->mac_addr.mac_addr_l32 & 0xff, + (htt_stats_buf->mac_addr.mac_addr_l32 & 0xff00) >> 8, + (htt_stats_buf->mac_addr.mac_addr_l32 & 0xff0000) >> 16, + (htt_stats_buf->mac_addr.mac_addr_l32 & 0xff000000) >> 24, + (htt_stats_buf->mac_addr.mac_addr_h16 & 0xff), + (htt_stats_buf->mac_addr.mac_addr_h16 & 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "peer_flags = 0x%x", + htt_stats_buf->peer_flags); + len += htt_dbg_out(buf + len, buf_len - len, "qpeer_flags = 0x%x ", + htt_stats_buf->qpeer_flags); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char str_buf[htt_max_string_len] = {0}; + char *tx_gi[htt_tx_peer_stats_num_gi_counters]; + u8 j; + + for (j = 0; j < htt_tx_peer_stats_num_gi_counters; j++) + tx_gi[j] = kmalloc(htt_max_string_len, gfp_atomic); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_peer_rate_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "tx_ldpc = %u", + htt_stats_buf->tx_ldpc); + len += htt_dbg_out(buf + len, buf_len - len, "rts_cnt = %u", + htt_stats_buf->rts_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "ack_rssi = %u", + htt_stats_buf->ack_rssi); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_mcs, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_su_mcs, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_su_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_mu_mcs, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_mu_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, + htt_stats_buf->tx_nss, + htt_tx_pdev_stats_num_spatial_streams); + len += htt_dbg_out(buf + len, buf_len - len, "tx_nss = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, + htt_stats_buf->tx_bw, + htt_tx_pdev_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_bw = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_stbc, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_stbc = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_pream, + htt_tx_pdev_stats_num_preamble_types); + len += htt_dbg_out(buf + len, buf_len - len, "tx_pream = %s ", str_buf); + + for (j = 0; j < htt_tx_peer_stats_num_gi_counters; j++) { + array_to_string(tx_gi[j], + htt_stats_buf->tx_gi[j], + htt_tx_peer_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_gi[%u] = %s ", + j, tx_gi[j]); + } + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, + htt_stats_buf->tx_dcm, + htt_tx_pdev_stats_num_dcm_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_dcm = %s ", str_buf); + + for (j = 0; j < htt_tx_peer_stats_num_gi_counters; j++) + kfree(tx_gi[j]); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u8 j; + char *rssi_chain[htt_rx_peer_stats_num_spatial_streams]; + char *rx_gi[htt_rx_peer_stats_num_gi_counters]; + char str_buf[htt_max_string_len] = {0}; + + for (j = 0; j < htt_rx_peer_stats_num_spatial_streams; j++) + rssi_chain[j] = kmalloc(htt_max_string_len, gfp_atomic); + + for (j = 0; j < htt_rx_peer_stats_num_gi_counters; j++) + rx_gi[j] = kmalloc(htt_max_string_len, gfp_atomic); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_rx_peer_rate_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "nsts = %u", + htt_stats_buf->nsts); + len += htt_dbg_out(buf + len, buf_len - len, "rx_ldpc = %u", + htt_stats_buf->rx_ldpc); + len += htt_dbg_out(buf + len, buf_len - len, "rts_cnt = %u", + htt_stats_buf->rts_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_mgmt = %u", + htt_stats_buf->rssi_mgmt); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_data = %u", + htt_stats_buf->rssi_data); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_comb = %u", + htt_stats_buf->rssi_comb); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_mcs, + htt_rx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_nss, + htt_rx_pdev_stats_num_spatial_streams); + len += htt_dbg_out(buf + len, buf_len - len, "rx_nss = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_dcm, + htt_rx_pdev_stats_num_dcm_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_dcm = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_stbc, + htt_rx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_stbc = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_bw, + htt_rx_pdev_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_bw = %s ", str_buf); + + for (j = 0; j < htt_rx_peer_stats_num_spatial_streams; j++) { + array_to_string(rssi_chain[j], htt_stats_buf->rssi_chain[j], + htt_rx_peer_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_chain[%u] = %s ", + j, rssi_chain[j]); + } + + for (j = 0; j < htt_rx_peer_stats_num_gi_counters; j++) { + array_to_string(rx_gi[j], htt_stats_buf->rx_gi[j], + htt_rx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_gi[%u] = %s ", + j, rx_gi[j]); + } + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_pream, + htt_rx_pdev_stats_num_preamble_types); + len += htt_dbg_out(buf + len, buf_len - len, "rx_pream = %s ", str_buf); + + for (j = 0; j < htt_rx_peer_stats_num_spatial_streams; j++) + kfree(rssi_chain[j]); + + for (j = 0; j < htt_rx_peer_stats_num_gi_counters; j++) + kfree(rx_gi[j]); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_hwq_mu_mimo_sch_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_sch_posted = %u", + htt_stats_buf->mu_mimo_sch_posted); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_sch_failed = %u", + htt_stats_buf->mu_mimo_sch_failed); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u ", + htt_stats_buf->mu_mimo_ppdu_posted); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_hwq_mu_mimo_mpdu_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u", + htt_stats_buf->mu_mimo_mpdus_queued_usr); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u", + htt_stats_buf->mu_mimo_mpdus_tried_usr); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u", + htt_stats_buf->mu_mimo_mpdus_failed_usr); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u", + htt_stats_buf->mu_mimo_mpdus_requeued_usr); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u", + htt_stats_buf->mu_mimo_err_no_ba_usr); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u", + htt_stats_buf->mu_mimo_mpdu_underrun_usr); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u ", + htt_stats_buf->mu_mimo_ampdu_underrun_usr); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_hwq_mu_mimo_cmn_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__hwq_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "hwq_id = %u ", + (htt_stats_buf->mac_id__hwq_id__word & 0xff00) >> 8); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + /* todo: hkdbg */ + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_hwq_stats_cmn_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__hwq_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "hwq_id = %u", + (htt_stats_buf->mac_id__hwq_id__word & 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "xretry = %u", + htt_stats_buf->xretry); + len += htt_dbg_out(buf + len, buf_len - len, "underrun_cnt = %u", + htt_stats_buf->underrun_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "flush_cnt = %u", + htt_stats_buf->flush_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "filt_cnt = %u", + htt_stats_buf->filt_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "null_mpdu_bmap = %u", + htt_stats_buf->null_mpdu_bmap); + len += htt_dbg_out(buf + len, buf_len - len, "user_ack_failure = %u", + htt_stats_buf->user_ack_failure); + len += htt_dbg_out(buf + len, buf_len - len, "ack_tlv_proc = %u", + htt_stats_buf->ack_tlv_proc); + len += htt_dbg_out(buf + len, buf_len - len, "sched_id_proc = %u", + htt_stats_buf->sched_id_proc); + len += htt_dbg_out(buf + len, buf_len - len, "null_mpdu_tx_count = %u", + htt_stats_buf->null_mpdu_tx_count); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u", + htt_stats_buf->mpdu_bmap_not_recvd); + len += htt_dbg_out(buf + len, buf_len - len, "num_bar = %u", + htt_stats_buf->num_bar); + len += htt_dbg_out(buf + len, buf_len - len, "rts = %u", + htt_stats_buf->rts); + len += htt_dbg_out(buf + len, buf_len - len, "cts2self = %u", + htt_stats_buf->cts2self); + len += htt_dbg_out(buf + len, buf_len - len, "qos_null = %u", + htt_stats_buf->qos_null); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_tried_cnt = %u", + htt_stats_buf->mpdu_tried_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_queued_cnt = %u", + htt_stats_buf->mpdu_queued_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u", + htt_stats_buf->mpdu_ack_fail_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_filt_cnt = %u", + htt_stats_buf->mpdu_filt_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "false_mpdu_ack_count = %u", + htt_stats_buf->false_mpdu_ack_count); + len += htt_dbg_out(buf + len, buf_len - len, "txq_timeout = %u ", + htt_stats_buf->txq_timeout); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u16 data_len = min_t(u16, (tag_len >> 2), htt_tx_hwq_max_difs_latency_bins); + char difs_latency_hist[htt_max_string_len] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_hwq_difs_latency_stats_tlv_v:"); + len += htt_dbg_out(buf + len, buf_len - len, "hist_intvl = %u", + htt_stats_buf->hist_intvl); + + array_to_string(difs_latency_hist, htt_stats_buf->difs_latency_hist, + data_len); + len += htt_dbg_out(buf + len, buf_len - len, "difs_latency_hist = %s ", + difs_latency_hist); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u16 data_len; + char cmd_result[htt_max_string_len] = {0}; + + data_len = min_t(u16, (tag_len >> 2), htt_tx_hwq_max_cmd_result_stats); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_hwq_cmd_result_stats_tlv_v:"); + + array_to_string(cmd_result, htt_stats_buf->cmd_result, data_len); + + len += htt_dbg_out(buf + len, buf_len - len, "cmd_result = %s ", cmd_result); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u16 num_elems; + char cmd_stall_status[htt_max_string_len] = {0}; + + num_elems = min_t(u16, (tag_len >> 2), htt_tx_hwq_max_cmd_stall_stats); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_hwq_cmd_stall_stats_tlv_v:"); + + array_to_string(cmd_stall_status, htt_stats_buf->cmd_stall_status, num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "cmd_stall_status = %s ", + cmd_stall_status); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u16 num_elems; + char fes_result[htt_max_string_len] = {0}; + + num_elems = min_t(u16, (tag_len >> 2), htt_tx_hwq_max_fes_result_stats); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_hwq_fes_result_stats_tlv_v:"); + + array_to_string(fes_result, htt_stats_buf->fes_result, num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "fes_result = %s ", fes_result); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char tried_mpdu_cnt_hist[htt_max_string_len] = {0}; + u32 num_elements = ((tag_len - + sizeof(htt_stats_buf->hist_bin_size)) >> 2); + u32 required_buffer_size = htt_max_print_char_per_elem * num_elements; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v:"); + len += htt_dbg_out(buf + len, buf_len - len, "tried_mpdu_cnt_hist_bin_size : %u", + htt_stats_buf->hist_bin_size); + + if (required_buffer_size < htt_max_string_len) { + array_to_string(tried_mpdu_cnt_hist, + htt_stats_buf->tried_mpdu_cnt_hist, + num_elements); + len += htt_dbg_out(buf + len, buf_len - len, + "tried_mpdu_cnt_hist = %s ", + tried_mpdu_cnt_hist); + } else { + len += htt_dbg_out(buf + len, buf_len - len, + "insufficient print buffer "); + } + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char txop_used_cnt_hist[htt_max_string_len] = {0}; + u32 num_elements = tag_len >> 2; + u32 required_buffer_size = htt_max_print_char_per_elem * num_elements; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_hwq_txop_used_cnt_hist_tlv_v:"); + + if (required_buffer_size < htt_max_string_len) { + array_to_string(txop_used_cnt_hist, + htt_stats_buf->txop_used_cnt_hist, + num_elements); + len += htt_dbg_out(buf + len, buf_len - len, "txop_used_cnt_hist = %s ", + txop_used_cnt_hist); + } else { + len += htt_dbg_out(buf + len, buf_len - len, + "insufficient print buffer "); + } + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + s32 i; + const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + const u32 *cbf_20 = htt_stats_buf->cbf_20; + const u32 *cbf_40 = htt_stats_buf->cbf_40; + const u32 *cbf_80 = htt_stats_buf->cbf_80; + const u32 *cbf_160 = htt_stats_buf->cbf_160; + + if (htt_stats_buf->tx_sounding_mode == htt_tx_ac_sounding_mode) { + len += htt_dbg_out(buf + len, buf_len - len, + " htt_tx_ac_sounding_stats_tlv: "); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_cbf_20 = ibf : %u, su_sifs : %u, su_rbo : %u, mu_sifs : %u, mu_rbo : %u ", + cbf_20[htt_implicit_txbf_steer_stats], + cbf_20[htt_explicit_txbf_su_sifs_steer_stats], + cbf_20[htt_explicit_txbf_su_rbo_steer_stats], + cbf_20[htt_explicit_txbf_mu_sifs_steer_stats], + cbf_20[htt_explicit_txbf_mu_rbo_steer_stats]); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_cbf_40 = ibf : %u, su_sifs : %u, su_rbo : %u, mu_sifs : %u, mu_rbo : %u", + cbf_40[htt_implicit_txbf_steer_stats], + cbf_40[htt_explicit_txbf_su_sifs_steer_stats], + cbf_40[htt_explicit_txbf_su_rbo_steer_stats], + cbf_40[htt_explicit_txbf_mu_sifs_steer_stats], + cbf_40[htt_explicit_txbf_mu_rbo_steer_stats]); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_cbf_80 = ibf : %u, su_sifs : %u, su_rbo : %u, mu_sifs : %u, mu_rbo : %u", + cbf_80[htt_implicit_txbf_steer_stats], + cbf_80[htt_explicit_txbf_su_sifs_steer_stats], + cbf_80[htt_explicit_txbf_su_rbo_steer_stats], + cbf_80[htt_explicit_txbf_mu_sifs_steer_stats], + cbf_80[htt_explicit_txbf_mu_rbo_steer_stats]); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_cbf_160 = ibf : %u, su_sifs : %u, su_rbo : %u, mu_sifs : %u, mu_rbo : %u", + cbf_160[htt_implicit_txbf_steer_stats], + cbf_160[htt_explicit_txbf_su_sifs_steer_stats], + cbf_160[htt_explicit_txbf_su_rbo_steer_stats], + cbf_160[htt_explicit_txbf_mu_sifs_steer_stats], + cbf_160[htt_explicit_txbf_mu_rbo_steer_stats]); + + for (i = 0; i < htt_tx_pdev_stats_num_ac_mumimo_user_stats; i++) { + len += htt_dbg_out(buf + len, buf_len - len, + "sounding user %u = 20mhz: %u, 40mhz : %u, 80mhz: %u, 160mhz: %u ", + i, + htt_stats_buf->sounding[0], + htt_stats_buf->sounding[1], + htt_stats_buf->sounding[2], + htt_stats_buf->sounding[3]); + } + } else if (htt_stats_buf->tx_sounding_mode == htt_tx_ax_sounding_mode) { + len += htt_dbg_out(buf + len, buf_len - len, + " htt_tx_ax_sounding_stats_tlv: "); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_cbf_20 = ibf : %u, su_sifs : %u, su_rbo : %u, mu_sifs : %u, mu_rbo : %u ", + cbf_20[htt_implicit_txbf_steer_stats], + cbf_20[htt_explicit_txbf_su_sifs_steer_stats], + cbf_20[htt_explicit_txbf_su_rbo_steer_stats], + cbf_20[htt_explicit_txbf_mu_sifs_steer_stats], + cbf_20[htt_explicit_txbf_mu_rbo_steer_stats]); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_cbf_40 = ibf : %u, su_sifs : %u, su_rbo : %u, mu_sifs : %u, mu_rbo : %u", + cbf_40[htt_implicit_txbf_steer_stats], + cbf_40[htt_explicit_txbf_su_sifs_steer_stats], + cbf_40[htt_explicit_txbf_su_rbo_steer_stats], + cbf_40[htt_explicit_txbf_mu_sifs_steer_stats], + cbf_40[htt_explicit_txbf_mu_rbo_steer_stats]); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_cbf_80 = ibf : %u, su_sifs : %u, su_rbo : %u, mu_sifs : %u, mu_rbo : %u", + cbf_80[htt_implicit_txbf_steer_stats], + cbf_80[htt_explicit_txbf_su_sifs_steer_stats], + cbf_80[htt_explicit_txbf_su_rbo_steer_stats], + cbf_80[htt_explicit_txbf_mu_sifs_steer_stats], + cbf_80[htt_explicit_txbf_mu_rbo_steer_stats]); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_cbf_160 = ibf : %u, su_sifs : %u, su_rbo : %u, mu_sifs : %u, mu_rbo : %u", + cbf_160[htt_implicit_txbf_steer_stats], + cbf_160[htt_explicit_txbf_su_sifs_steer_stats], + cbf_160[htt_explicit_txbf_su_rbo_steer_stats], + cbf_160[htt_explicit_txbf_mu_sifs_steer_stats], + cbf_160[htt_explicit_txbf_mu_rbo_steer_stats]); + + for (i = 0; i < htt_tx_pdev_stats_num_ax_mumimo_user_stats; i++) { + len += htt_dbg_out(buf + len, buf_len - len, + "sounding user %u = 20mhz: %u, 40mhz : %u, 80mhz: %u, 160mhz: %u ", + i, + htt_stats_buf->sounding[0], + htt_stats_buf->sounding[1], + htt_stats_buf->sounding[2], + htt_stats_buf->sounding[3]); + } + } + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_selfgen_cmn_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "su_bar = %u", + htt_stats_buf->su_bar); + len += htt_dbg_out(buf + len, buf_len - len, "rts = %u", + htt_stats_buf->rts); + len += htt_dbg_out(buf + len, buf_len - len, "cts2self = %u", + htt_stats_buf->cts2self); + len += htt_dbg_out(buf + len, buf_len - len, "qos_null = %u", + htt_stats_buf->qos_null); + len += htt_dbg_out(buf + len, buf_len - len, "delayed_bar_1 = %u", + htt_stats_buf->delayed_bar_1); + len += htt_dbg_out(buf + len, buf_len - len, "delayed_bar_2 = %u", + htt_stats_buf->delayed_bar_2); + len += htt_dbg_out(buf + len, buf_len - len, "delayed_bar_3 = %u", + htt_stats_buf->delayed_bar_3); + len += htt_dbg_out(buf + len, buf_len - len, "delayed_bar_4 = %u", + htt_stats_buf->delayed_bar_4); + len += htt_dbg_out(buf + len, buf_len - len, "delayed_bar_5 = %u", + htt_stats_buf->delayed_bar_5); + len += htt_dbg_out(buf + len, buf_len - len, "delayed_bar_6 = %u", + htt_stats_buf->delayed_bar_6); + len += htt_dbg_out(buf + len, buf_len - len, "delayed_bar_7 = %u ", + htt_stats_buf->delayed_bar_7); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_selfgen_ac_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "ac_su_ndpa = %u", + htt_stats_buf->ac_su_ndpa); + len += htt_dbg_out(buf + len, buf_len - len, "ac_su_ndp = %u", + htt_stats_buf->ac_su_ndp); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u", + htt_stats_buf->ac_mu_mimo_ndpa); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u", + htt_stats_buf->ac_mu_mimo_ndp); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u", + htt_stats_buf->ac_mu_mimo_brpoll_1); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u", + htt_stats_buf->ac_mu_mimo_brpoll_2); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u ", + htt_stats_buf->ac_mu_mimo_brpoll_3); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_selfgen_ax_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "ax_su_ndpa = %u", + htt_stats_buf->ax_su_ndpa); + len += htt_dbg_out(buf + len, buf_len - len, "ax_su_ndp = %u", + htt_stats_buf->ax_su_ndp); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u", + htt_stats_buf->ax_mu_mimo_ndpa); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u", + htt_stats_buf->ax_mu_mimo_ndp); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u", + htt_stats_buf->ax_mu_mimo_brpoll_1); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u", + htt_stats_buf->ax_mu_mimo_brpoll_2); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u", + htt_stats_buf->ax_mu_mimo_brpoll_3); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u", + htt_stats_buf->ax_mu_mimo_brpoll_4); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u", + htt_stats_buf->ax_mu_mimo_brpoll_5); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u", + htt_stats_buf->ax_mu_mimo_brpoll_6); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u", + htt_stats_buf->ax_mu_mimo_brpoll_7); + len += htt_dbg_out(buf + len, buf_len - len, "ax_basic_trigger = %u", + htt_stats_buf->ax_basic_trigger); + len += htt_dbg_out(buf + len, buf_len - len, "ax_bsr_trigger = %u", + htt_stats_buf->ax_bsr_trigger); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_bar_trigger = %u", + htt_stats_buf->ax_mu_bar_trigger); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_rts_trigger = %u ", + htt_stats_buf->ax_mu_rts_trigger); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_selfgen_ac_err_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "ac_su_ndp_err = %u", + htt_stats_buf->ac_su_ndp_err); + len += htt_dbg_out(buf + len, buf_len - len, "ac_su_ndpa_err = %u", + htt_stats_buf->ac_su_ndpa_err); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u", + htt_stats_buf->ac_mu_mimo_ndpa_err); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u", + htt_stats_buf->ac_mu_mimo_ndp_err); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u", + htt_stats_buf->ac_mu_mimo_brp1_err); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u", + htt_stats_buf->ac_mu_mimo_brp2_err); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u ", + htt_stats_buf->ac_mu_mimo_brp3_err); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_selfgen_ax_err_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "ax_su_ndp_err = %u", + htt_stats_buf->ax_su_ndp_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_su_ndpa_err = %u", + htt_stats_buf->ax_su_ndpa_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u", + htt_stats_buf->ax_mu_mimo_ndpa_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u", + htt_stats_buf->ax_mu_mimo_ndp_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u", + htt_stats_buf->ax_mu_mimo_brp1_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u", + htt_stats_buf->ax_mu_mimo_brp2_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u", + htt_stats_buf->ax_mu_mimo_brp3_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u", + htt_stats_buf->ax_mu_mimo_brp4_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u", + htt_stats_buf->ax_mu_mimo_brp5_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u", + htt_stats_buf->ax_mu_mimo_brp6_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u", + htt_stats_buf->ax_mu_mimo_brp7_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_basic_trigger_err = %u", + htt_stats_buf->ax_basic_trigger_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_bsr_trigger_err = %u", + htt_stats_buf->ax_bsr_trigger_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u", + htt_stats_buf->ax_mu_bar_trigger_err); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u ", + htt_stats_buf->ax_mu_rts_trigger_err); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u8 i; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_pdev_mu_mimo_sch_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_sch_posted = %u", + htt_stats_buf->mu_mimo_sch_posted); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_sch_failed = %u", + htt_stats_buf->mu_mimo_sch_failed); + len += htt_dbg_out(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u ", + htt_stats_buf->mu_mimo_ppdu_posted); + + len += htt_dbg_out(buf + len, buf_len - len, "11ac mu_mimo sch stats:"); + + for (i = 0; i < htt_tx_pdev_stats_num_ac_mumimo_user_stats; i++) + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_sch_nusers_%u = %u", + i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]); + + len += htt_dbg_out(buf + len, buf_len - len, " 11ax mu_mimo sch stats:"); + + for (i = 0; i < htt_tx_pdev_stats_num_ax_mumimo_user_stats; i++) + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_sch_nusers_%u = %u", + i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]); + + len += htt_dbg_out(buf + len, buf_len - len, " 11ax ofdma sch stats:"); + + for (i = 0; i < htt_tx_pdev_stats_num_ofdma_user_stats; i++) + len += htt_dbg_out(buf + len, buf_len - len, + "ax_ofdma_sch_nusers_%u = %u", + i, htt_stats_buf->ax_ofdma_sch_nusers[i]); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + if (htt_stats_buf->tx_sched_mode == htt_stats_tx_sched_mode_mu_mimo_ac) { + if (!htt_stats_buf->user_index) + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_pdev_mu_mimo_ac_mpdu_stats: "); + + if (htt_stats_buf->user_index < + htt_tx_pdev_stats_num_ac_mumimo_user_stats) { + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_queued_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_queued_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_tried_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_tried_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_failed_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_failed_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_requeued_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_requeued_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_err_no_ba_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->err_no_ba_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_mpdu_underrun_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdu_underrun_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_ampdu_underrun_usr_%u = %u ", + htt_stats_buf->user_index, + htt_stats_buf->ampdu_underrun_usr); + } + } + + if (htt_stats_buf->tx_sched_mode == htt_stats_tx_sched_mode_mu_mimo_ax) { + if (!htt_stats_buf->user_index) + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_pdev_mu_mimo_ax_mpdu_stats: "); + + if (htt_stats_buf->user_index < + htt_tx_pdev_stats_num_ax_mumimo_user_stats) { + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_queued_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_queued_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_tried_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_tried_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_failed_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_failed_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_requeued_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_requeued_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_err_no_ba_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->err_no_ba_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_mpdu_underrun_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdu_underrun_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_ampdu_underrun_usr_%u = %u ", + htt_stats_buf->user_index, + htt_stats_buf->ampdu_underrun_usr); + } + } + + if (htt_stats_buf->tx_sched_mode == htt_stats_tx_sched_mode_mu_ofdma_ax) { + if (!htt_stats_buf->user_index) + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_pdev_ax_mu_ofdma_mpdu_stats: "); + + if (htt_stats_buf->user_index < htt_tx_pdev_stats_num_ofdma_user_stats) { + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_queued_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_queued_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_tried_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_tried_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_failed_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_failed_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_requeued_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdus_requeued_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_ofdma_err_no_ba_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->err_no_ba_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_ofdma_mpdu_underrun_usr_%u = %u", + htt_stats_buf->user_index, + htt_stats_buf->mpdu_underrun_usr); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_ofdma_ampdu_underrun_usr_%u = %u ", + htt_stats_buf->user_index, + htt_stats_buf->ampdu_underrun_usr); + } + } + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char sched_cmd_posted[htt_max_string_len] = {0}; + u16 num_elements = min_t(u16, (tag_len >> 2), htt_tx_pdev_sched_tx_mode_max); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_sched_txq_cmd_posted_tlv_v:"); + + array_to_string(sched_cmd_posted, htt_stats_buf->sched_cmd_posted, + num_elements); + len += htt_dbg_out(buf + len, buf_len - len, "sched_cmd_posted = %s ", + sched_cmd_posted); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char sched_cmd_reaped[htt_max_string_len] = {0}; + u16 num_elements = min_t(u16, (tag_len >> 2), htt_tx_pdev_sched_tx_mode_max); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_sched_txq_cmd_reaped_tlv_v:"); + + array_to_string(sched_cmd_reaped, htt_stats_buf->sched_cmd_reaped, + num_elements); + len += htt_dbg_out(buf + len, buf_len - len, "sched_cmd_reaped = %s ", + sched_cmd_reaped); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char sched_order_su[htt_max_string_len] = {0}; + /* each entry is u32, i.e. 4 bytes */ + u32 sched_order_su_num_entries = + min_t(u32, (tag_len >> 2), htt_tx_pdev_num_sched_order_log); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_sched_txq_sched_order_su_tlv_v:"); + + array_to_string(sched_order_su, htt_stats_buf->sched_order_su, + sched_order_su_num_entries); + len += htt_dbg_out(buf + len, buf_len - len, "sched_order_su = %s ", + sched_order_su); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char sched_ineligibility[htt_max_string_len] = {0}; + /* each entry is u32, i.e. 4 bytes */ + u32 sched_ineligibility_num_entries = tag_len >> 2; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_sched_txq_sched_ineligibility_v:"); + + array_to_string(sched_ineligibility, htt_stats_buf->sched_ineligibility, + sched_ineligibility_num_entries); + len += htt_dbg_out(buf + len, buf_len - len, "sched_ineligibility = %s ", + sched_ineligibility); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_pdev_stats_sched_per_txq_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__txq_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "txq_id = %u", + (htt_stats_buf->mac_id__txq_id__word & 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "sched_policy = %u", + htt_stats_buf->sched_policy); + len += htt_dbg_out(buf + len, buf_len - len, + "last_sched_cmd_posted_timestamp = %u", + htt_stats_buf->last_sched_cmd_posted_timestamp); + len += htt_dbg_out(buf + len, buf_len - len, + "last_sched_cmd_compl_timestamp = %u", + htt_stats_buf->last_sched_cmd_compl_timestamp); + len += htt_dbg_out(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u", + htt_stats_buf->sched_2_tac_lwm_count); + len += htt_dbg_out(buf + len, buf_len - len, "sched_2_tac_ring_full = %u", + htt_stats_buf->sched_2_tac_ring_full); + len += htt_dbg_out(buf + len, buf_len - len, "sched_cmd_post_failure = %u", + htt_stats_buf->sched_cmd_post_failure); + len += htt_dbg_out(buf + len, buf_len - len, "num_active_tids = %u", + htt_stats_buf->num_active_tids); + len += htt_dbg_out(buf + len, buf_len - len, "num_ps_schedules = %u", + htt_stats_buf->num_ps_schedules); + len += htt_dbg_out(buf + len, buf_len - len, "sched_cmds_pending = %u", + htt_stats_buf->sched_cmds_pending); + len += htt_dbg_out(buf + len, buf_len - len, "num_tid_register = %u", + htt_stats_buf->num_tid_register); + len += htt_dbg_out(buf + len, buf_len - len, "num_tid_unregister = %u", + htt_stats_buf->num_tid_unregister); + len += htt_dbg_out(buf + len, buf_len - len, "num_qstats_queried = %u", + htt_stats_buf->num_qstats_queried); + len += htt_dbg_out(buf + len, buf_len - len, "qstats_update_pending = %u", + htt_stats_buf->qstats_update_pending); + len += htt_dbg_out(buf + len, buf_len - len, "last_qstats_query_timestamp = %u", + htt_stats_buf->last_qstats_query_timestamp); + len += htt_dbg_out(buf + len, buf_len - len, "num_tqm_cmdq_full = %u", + htt_stats_buf->num_tqm_cmdq_full); + len += htt_dbg_out(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u", + htt_stats_buf->num_de_sched_algo_trigger); + len += htt_dbg_out(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u", + htt_stats_buf->num_rt_sched_algo_trigger); + len += htt_dbg_out(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u", + htt_stats_buf->num_tqm_sched_algo_trigger); + len += htt_dbg_out(buf + len, buf_len - len, "notify_sched = %u ", + htt_stats_buf->notify_sched); + len += htt_dbg_out(buf + len, buf_len - len, "dur_based_sendn_term = %u ", + htt_stats_buf->dur_based_sendn_term); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_stats_tx_sched_cmn_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "current_timestamp = %u ", + htt_stats_buf->current_timestamp); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char gen_mpdu_end_reason[htt_max_string_len] = {0}; + u16 num_elements = min_t(u16, (tag_len >> 2), + htt_tx_tqm_max_list_mpdu_end_reason); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_tqm_gen_mpdu_stats_tlv_v:"); + + array_to_string(gen_mpdu_end_reason, htt_stats_buf->gen_mpdu_end_reason, + num_elements); + len += htt_dbg_out(buf + len, buf_len - len, "gen_mpdu_end_reason = %s ", + gen_mpdu_end_reason); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char list_mpdu_end_reason[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_tx_tqm_max_list_mpdu_end_reason); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_tqm_list_mpdu_stats_tlv_v:"); + + array_to_string(list_mpdu_end_reason, htt_stats_buf->list_mpdu_end_reason, + num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "list_mpdu_end_reason = %s ", + list_mpdu_end_reason); + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char list_mpdu_cnt_hist[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), + htt_tx_tqm_max_list_mpdu_cnt_histogram_bins); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_tqm_list_mpdu_cnt_tlv_v:"); + + array_to_string(list_mpdu_cnt_hist, htt_stats_buf->list_mpdu_cnt_hist, + num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "list_mpdu_cnt_hist = %s ", + list_mpdu_cnt_hist); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_tqm_pdev_stats_tlv_v:"); + len += htt_dbg_out(buf + len, buf_len - len, "msdu_count = %u", + htt_stats_buf->msdu_count); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_count = %u", + htt_stats_buf->mpdu_count); + len += htt_dbg_out(buf + len, buf_len - len, "remove_msdu = %u", + htt_stats_buf->remove_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "remove_mpdu = %u", + htt_stats_buf->remove_mpdu); + len += htt_dbg_out(buf + len, buf_len - len, "remove_msdu_ttl = %u", + htt_stats_buf->remove_msdu_ttl); + len += htt_dbg_out(buf + len, buf_len - len, "send_bar = %u", + htt_stats_buf->send_bar); + len += htt_dbg_out(buf + len, buf_len - len, "bar_sync = %u", + htt_stats_buf->bar_sync); + len += htt_dbg_out(buf + len, buf_len - len, "notify_mpdu = %u", + htt_stats_buf->notify_mpdu); + len += htt_dbg_out(buf + len, buf_len - len, "sync_cmd = %u", + htt_stats_buf->sync_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "write_cmd = %u", + htt_stats_buf->write_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "hwsch_trigger = %u", + htt_stats_buf->hwsch_trigger); + len += htt_dbg_out(buf + len, buf_len - len, "ack_tlv_proc = %u", + htt_stats_buf->ack_tlv_proc); + len += htt_dbg_out(buf + len, buf_len - len, "gen_mpdu_cmd = %u", + htt_stats_buf->gen_mpdu_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "gen_list_cmd = %u", + htt_stats_buf->gen_list_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "remove_mpdu_cmd = %u", + htt_stats_buf->remove_mpdu_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u", + htt_stats_buf->remove_mpdu_tried_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u", + htt_stats_buf->mpdu_queue_stats_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_head_info_cmd = %u", + htt_stats_buf->mpdu_head_info_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u", + htt_stats_buf->msdu_flow_stats_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "remove_msdu_cmd = %u", + htt_stats_buf->remove_msdu_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u", + htt_stats_buf->remove_msdu_ttl_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "flush_cache_cmd = %u", + htt_stats_buf->flush_cache_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "update_mpduq_cmd = %u", + htt_stats_buf->update_mpduq_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "enqueue = %u", + htt_stats_buf->enqueue); + len += htt_dbg_out(buf + len, buf_len - len, "enqueue_notify = %u", + htt_stats_buf->enqueue_notify); + len += htt_dbg_out(buf + len, buf_len - len, "notify_mpdu_at_head = %u", + htt_stats_buf->notify_mpdu_at_head); + len += htt_dbg_out(buf + len, buf_len - len, "notify_mpdu_state_valid = %u", + htt_stats_buf->notify_mpdu_state_valid); + len += htt_dbg_out(buf + len, buf_len - len, "sched_udp_notify1 = %u", + htt_stats_buf->sched_udp_notify1); + len += htt_dbg_out(buf + len, buf_len - len, "sched_udp_notify2 = %u", + htt_stats_buf->sched_udp_notify2); + len += htt_dbg_out(buf + len, buf_len - len, "sched_nonudp_notify1 = %u", + htt_stats_buf->sched_nonudp_notify1); + len += htt_dbg_out(buf + len, buf_len - len, "sched_nonudp_notify2 = %u ", + htt_stats_buf->sched_nonudp_notify2); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_tqm_cmn_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "max_cmdq_id = %u", + htt_stats_buf->max_cmdq_id); + len += htt_dbg_out(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u", + htt_stats_buf->list_mpdu_cnt_hist_intvl); + len += htt_dbg_out(buf + len, buf_len - len, "add_msdu = %u", + htt_stats_buf->add_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "q_empty = %u", + htt_stats_buf->q_empty); + len += htt_dbg_out(buf + len, buf_len - len, "q_not_empty = %u", + htt_stats_buf->q_not_empty); + len += htt_dbg_out(buf + len, buf_len - len, "drop_notification = %u", + htt_stats_buf->drop_notification); + len += htt_dbg_out(buf + len, buf_len - len, "desc_threshold = %u ", + htt_stats_buf->desc_threshold); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_tqm_error_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "q_empty_failure = %u", + htt_stats_buf->q_empty_failure); + len += htt_dbg_out(buf + len, buf_len - len, "q_not_empty_failure = %u", + htt_stats_buf->q_not_empty_failure); + len += htt_dbg_out(buf + len, buf_len - len, "add_msdu_failure = %u ", + htt_stats_buf->add_msdu_failure); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_tqm_cmdq_status_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__cmdq_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "cmdq_id = %u ", + (htt_stats_buf->mac_id__cmdq_id__word & 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "sync_cmd = %u", + htt_stats_buf->sync_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "write_cmd = %u", + htt_stats_buf->write_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "gen_mpdu_cmd = %u", + htt_stats_buf->gen_mpdu_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u", + htt_stats_buf->mpdu_queue_stats_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_head_info_cmd = %u", + htt_stats_buf->mpdu_head_info_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u", + htt_stats_buf->msdu_flow_stats_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "remove_mpdu_cmd = %u", + htt_stats_buf->remove_mpdu_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "remove_msdu_cmd = %u", + htt_stats_buf->remove_msdu_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "flush_cache_cmd = %u", + htt_stats_buf->flush_cache_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "update_mpduq_cmd = %u", + htt_stats_buf->update_mpduq_cmd); + len += htt_dbg_out(buf + len, buf_len - len, "update_msduq_cmd = %u ", + htt_stats_buf->update_msduq_cmd); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_de_eapol_packets_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "m1_packets = %u", + htt_stats_buf->m1_packets); + len += htt_dbg_out(buf + len, buf_len - len, "m2_packets = %u", + htt_stats_buf->m2_packets); + len += htt_dbg_out(buf + len, buf_len - len, "m3_packets = %u", + htt_stats_buf->m3_packets); + len += htt_dbg_out(buf + len, buf_len - len, "m4_packets = %u", + htt_stats_buf->m4_packets); + len += htt_dbg_out(buf + len, buf_len - len, "g1_packets = %u", + htt_stats_buf->g1_packets); + len += htt_dbg_out(buf + len, buf_len - len, "g2_packets = %u ", + htt_stats_buf->g2_packets); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_de_classify_failed_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "ap_bss_peer_not_found = %u", + htt_stats_buf->ap_bss_peer_not_found); + len += htt_dbg_out(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u", + htt_stats_buf->ap_bcast_mcast_no_peer); + len += htt_dbg_out(buf + len, buf_len - len, "sta_delete_in_progress = %u", + htt_stats_buf->sta_delete_in_progress); + len += htt_dbg_out(buf + len, buf_len - len, "ibss_no_bss_peer = %u", + htt_stats_buf->ibss_no_bss_peer); + len += htt_dbg_out(buf + len, buf_len - len, "invalid_vdev_type = %u", + htt_stats_buf->invalid_vdev_type); + len += htt_dbg_out(buf + len, buf_len - len, "invalid_ast_peer_entry = %u", + htt_stats_buf->invalid_ast_peer_entry); + len += htt_dbg_out(buf + len, buf_len - len, "peer_entry_invalid = %u", + htt_stats_buf->peer_entry_invalid); + len += htt_dbg_out(buf + len, buf_len - len, "ethertype_not_ip = %u", + htt_stats_buf->ethertype_not_ip); + len += htt_dbg_out(buf + len, buf_len - len, "eapol_lookup_failed = %u", + htt_stats_buf->eapol_lookup_failed); + len += htt_dbg_out(buf + len, buf_len - len, "qpeer_not_allow_data = %u", + htt_stats_buf->qpeer_not_allow_data); + len += htt_dbg_out(buf + len, buf_len - len, "fse_tid_override = %u", + htt_stats_buf->fse_tid_override); + len += htt_dbg_out(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u", + htt_stats_buf->ipv6_jumbogram_zero_length); + len += htt_dbg_out(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u ", + htt_stats_buf->qos_to_non_qos_in_prog); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_de_classify_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_de_classify_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "arp_packets = %u", + htt_stats_buf->arp_packets); + len += htt_dbg_out(buf + len, buf_len - len, "igmp_packets = %u", + htt_stats_buf->igmp_packets); + len += htt_dbg_out(buf + len, buf_len - len, "dhcp_packets = %u", + htt_stats_buf->dhcp_packets); + len += htt_dbg_out(buf + len, buf_len - len, "host_inspected = %u", + htt_stats_buf->host_inspected); + len += htt_dbg_out(buf + len, buf_len - len, "htt_included = %u", + htt_stats_buf->htt_included); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_mcs = %u", + htt_stats_buf->htt_valid_mcs); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_nss = %u", + htt_stats_buf->htt_valid_nss); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_preamble_type = %u", + htt_stats_buf->htt_valid_preamble_type); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_chainmask = %u", + htt_stats_buf->htt_valid_chainmask); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_guard_interval = %u", + htt_stats_buf->htt_valid_guard_interval); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_retries = %u", + htt_stats_buf->htt_valid_retries); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_bw_info = %u", + htt_stats_buf->htt_valid_bw_info); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_power = %u", + htt_stats_buf->htt_valid_power); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x", + htt_stats_buf->htt_valid_key_flags); + len += htt_dbg_out(buf + len, buf_len - len, "htt_valid_no_encryption = %u", + htt_stats_buf->htt_valid_no_encryption); + len += htt_dbg_out(buf + len, buf_len - len, "fse_entry_count = %u", + htt_stats_buf->fse_entry_count); + len += htt_dbg_out(buf + len, buf_len - len, "fse_priority_be = %u", + htt_stats_buf->fse_priority_be); + len += htt_dbg_out(buf + len, buf_len - len, "fse_priority_high = %u", + htt_stats_buf->fse_priority_high); + len += htt_dbg_out(buf + len, buf_len - len, "fse_priority_low = %u", + htt_stats_buf->fse_priority_low); + len += htt_dbg_out(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u", + htt_stats_buf->fse_traffic_ptrn_be); + len += htt_dbg_out(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u", + htt_stats_buf->fse_traffic_ptrn_over_sub); + len += htt_dbg_out(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u", + htt_stats_buf->fse_traffic_ptrn_bursty); + len += htt_dbg_out(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u", + htt_stats_buf->fse_traffic_ptrn_interactive); + len += htt_dbg_out(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u", + htt_stats_buf->fse_traffic_ptrn_periodic); + len += htt_dbg_out(buf + len, buf_len - len, "fse_hwqueue_alloc = %u", + htt_stats_buf->fse_hwqueue_alloc); + len += htt_dbg_out(buf + len, buf_len - len, "fse_hwqueue_created = %u", + htt_stats_buf->fse_hwqueue_created); + len += htt_dbg_out(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u", + htt_stats_buf->fse_hwqueue_send_to_host); + len += htt_dbg_out(buf + len, buf_len - len, "mcast_entry = %u", + htt_stats_buf->mcast_entry); + len += htt_dbg_out(buf + len, buf_len - len, "bcast_entry = %u", + htt_stats_buf->bcast_entry); + len += htt_dbg_out(buf + len, buf_len - len, "htt_update_peer_cache = %u", + htt_stats_buf->htt_update_peer_cache); + len += htt_dbg_out(buf + len, buf_len - len, "htt_learning_frame = %u", + htt_stats_buf->htt_learning_frame); + len += htt_dbg_out(buf + len, buf_len - len, "fse_invalid_peer = %u", + htt_stats_buf->fse_invalid_peer); + len += htt_dbg_out(buf + len, buf_len - len, "mec_notify = %u ", + htt_stats_buf->mec_notify); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_de_classify_status_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "eok = %u", + htt_stats_buf->eok); + len += htt_dbg_out(buf + len, buf_len - len, "classify_done = %u", + htt_stats_buf->classify_done); + len += htt_dbg_out(buf + len, buf_len - len, "lookup_failed = %u", + htt_stats_buf->lookup_failed); + len += htt_dbg_out(buf + len, buf_len - len, "send_host_dhcp = %u", + htt_stats_buf->send_host_dhcp); + len += htt_dbg_out(buf + len, buf_len - len, "send_host_mcast = %u", + htt_stats_buf->send_host_mcast); + len += htt_dbg_out(buf + len, buf_len - len, "send_host_unknown_dest = %u", + htt_stats_buf->send_host_unknown_dest); + len += htt_dbg_out(buf + len, buf_len - len, "send_host = %u", + htt_stats_buf->send_host); + len += htt_dbg_out(buf + len, buf_len - len, "status_invalid = %u ", + htt_stats_buf->status_invalid); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_de_enqueue_packets_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "enqueued_pkts = %u", + htt_stats_buf->enqueued_pkts); + len += htt_dbg_out(buf + len, buf_len - len, "to_tqm = %u", + htt_stats_buf->to_tqm); + len += htt_dbg_out(buf + len, buf_len - len, "to_tqm_bypass = %u ", + htt_stats_buf->to_tqm_bypass); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_de_enqueue_discard_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "discarded_pkts = %u", + htt_stats_buf->discarded_pkts); + len += htt_dbg_out(buf + len, buf_len - len, "local_frames = %u", + htt_stats_buf->local_frames); + len += htt_dbg_out(buf + len, buf_len - len, "is_ext_msdu = %u ", + htt_stats_buf->is_ext_msdu); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_de_compl_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "tcl_dummy_frame = %u", + htt_stats_buf->tcl_dummy_frame); + len += htt_dbg_out(buf + len, buf_len - len, "tqm_dummy_frame = %u", + htt_stats_buf->tqm_dummy_frame); + len += htt_dbg_out(buf + len, buf_len - len, "tqm_notify_frame = %u", + htt_stats_buf->tqm_notify_frame); + len += htt_dbg_out(buf + len, buf_len - len, "fw2wbm_enq = %u", + htt_stats_buf->fw2wbm_enq); + len += htt_dbg_out(buf + len, buf_len - len, "tqm_bypass_frame = %u ", + htt_stats_buf->tqm_bypass_frame); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char fw2wbm_ring_full_hist[htt_max_string_len] = {0}; + u16 num_elements = tag_len >> 2; + u32 required_buffer_size = htt_max_print_char_per_elem * num_elements; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_tx_de_fw2wbm_ring_full_hist_tlv"); + + if (required_buffer_size < htt_max_string_len) { + array_to_string(fw2wbm_ring_full_hist, + htt_stats_buf->fw2wbm_ring_full_hist, + num_elements); + len += htt_dbg_out(buf + len, buf_len - len, + "fw2wbm_ring_full_hist = %s ", + fw2wbm_ring_full_hist); + } else { + len += htt_dbg_out(buf + len, buf_len - len, + "insufficient print buffer "); + } + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_de_cmn_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "tcl2fw_entry_count = %u", + htt_stats_buf->tcl2fw_entry_count); + len += htt_dbg_out(buf + len, buf_len - len, "not_to_fw = %u", + htt_stats_buf->not_to_fw); + len += htt_dbg_out(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u", + htt_stats_buf->invalid_pdev_vdev_peer); + len += htt_dbg_out(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u", + htt_stats_buf->tcl_res_invalid_addrx); + len += htt_dbg_out(buf + len, buf_len - len, "wbm2fw_entry_count = %u", + htt_stats_buf->wbm2fw_entry_count); + len += htt_dbg_out(buf + len, buf_len - len, "invalid_pdev = %u ", + htt_stats_buf->invalid_pdev); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_ring_if_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char low_wm_hit_count[htt_max_string_len] = {0}; + char high_wm_hit_count[htt_max_string_len] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_ring_if_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "base_addr = %u", + htt_stats_buf->base_addr); + len += htt_dbg_out(buf + len, buf_len - len, "elem_size = %u", + htt_stats_buf->elem_size); + len += htt_dbg_out(buf + len, buf_len - len, "num_elems = %u", + htt_stats_buf->num_elems__prefetch_tail_idx & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "prefetch_tail_idx = %u", + (htt_stats_buf->num_elems__prefetch_tail_idx & + 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "head_idx = %u", + htt_stats_buf->head_idx__tail_idx & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "tail_idx = %u", + (htt_stats_buf->head_idx__tail_idx & 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "shadow_head_idx = %u", + htt_stats_buf->shadow_head_idx__shadow_tail_idx & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "shadow_tail_idx = %u", + (htt_stats_buf->shadow_head_idx__shadow_tail_idx & + 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "num_tail_incr = %u", + htt_stats_buf->num_tail_incr); + len += htt_dbg_out(buf + len, buf_len - len, "lwm_thresh = %u", + htt_stats_buf->lwm_thresh__hwm_thresh & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "hwm_thresh = %u", + (htt_stats_buf->lwm_thresh__hwm_thresh & 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "overrun_hit_count = %u", + htt_stats_buf->overrun_hit_count); + len += htt_dbg_out(buf + len, buf_len - len, "underrun_hit_count = %u", + htt_stats_buf->underrun_hit_count); + len += htt_dbg_out(buf + len, buf_len - len, "prod_blockwait_count = %u", + htt_stats_buf->prod_blockwait_count); + len += htt_dbg_out(buf + len, buf_len - len, "cons_blockwait_count = %u", + htt_stats_buf->cons_blockwait_count); + + array_to_string(low_wm_hit_count, htt_stats_buf->low_wm_hit_count, + htt_stats_low_wm_bins); + len += htt_dbg_out(buf + len, buf_len - len, "low_wm_hit_count = %s ", + low_wm_hit_count); + + array_to_string(high_wm_hit_count, htt_stats_buf->high_wm_hit_count, + htt_stats_high_wm_bins); + len += htt_dbg_out(buf + len, buf_len - len, "high_wm_hit_count = %s ", + high_wm_hit_count); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_ring_if_cmn_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "num_records = %u ", + htt_stats_buf->num_records); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char dwords_used_by_user_n[htt_max_string_len] = {0}; + u16 num_elems = tag_len >> 2; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_sfm_client_user_tlv_v:"); + + array_to_string(dwords_used_by_user_n, + htt_stats_buf->dwords_used_by_user_n, + num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "dwords_used_by_user_n = %s ", + dwords_used_by_user_n); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_sfm_client_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_sfm_client_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "client_id = %u", + htt_stats_buf->client_id); + len += htt_dbg_out(buf + len, buf_len - len, "buf_min = %u", + htt_stats_buf->buf_min); + len += htt_dbg_out(buf + len, buf_len - len, "buf_max = %u", + htt_stats_buf->buf_max); + len += htt_dbg_out(buf + len, buf_len - len, "buf_busy = %u", + htt_stats_buf->buf_busy); + len += htt_dbg_out(buf + len, buf_len - len, "buf_alloc = %u", + htt_stats_buf->buf_alloc); + len += htt_dbg_out(buf + len, buf_len - len, "buf_avail = %u", + htt_stats_buf->buf_avail); + len += htt_dbg_out(buf + len, buf_len - len, "num_users = %u ", + htt_stats_buf->num_users); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_sfm_cmn_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_sfm_cmn_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "buf_total = %u", + htt_stats_buf->buf_total); + len += htt_dbg_out(buf + len, buf_len - len, "mem_empty = %u", + htt_stats_buf->mem_empty); + len += htt_dbg_out(buf + len, buf_len - len, "deallocate_bufs = %u", + htt_stats_buf->deallocate_bufs); + len += htt_dbg_out(buf + len, buf_len - len, "num_records = %u ", + htt_stats_buf->num_records); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_sring_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_sring_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__ring_id__arena__ep & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "ring_id = %u", + (htt_stats_buf->mac_id__ring_id__arena__ep & 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "arena = %u", + (htt_stats_buf->mac_id__ring_id__arena__ep & 0xff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "ep = %u", + (htt_stats_buf->mac_id__ring_id__arena__ep & 0x1000000) >> 24); + len += htt_dbg_out(buf + len, buf_len - len, "base_addr_lsb = 0x%x", + htt_stats_buf->base_addr_lsb); + len += htt_dbg_out(buf + len, buf_len - len, "base_addr_msb = 0x%x", + htt_stats_buf->base_addr_msb); + len += htt_dbg_out(buf + len, buf_len - len, "ring_size = %u", + htt_stats_buf->ring_size); + len += htt_dbg_out(buf + len, buf_len - len, "elem_size = %u", + htt_stats_buf->elem_size); + len += htt_dbg_out(buf + len, buf_len - len, "num_avail_words = %u", + htt_stats_buf->num_avail_words__num_valid_words & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "num_valid_words = %u", + (htt_stats_buf->num_avail_words__num_valid_words & + 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "head_ptr = %u", + htt_stats_buf->head_ptr__tail_ptr & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "tail_ptr = %u", + (htt_stats_buf->head_ptr__tail_ptr & 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "consumer_empty = %u", + htt_stats_buf->consumer_empty__producer_full & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "producer_full = %u", + (htt_stats_buf->consumer_empty__producer_full & + 0xffff0000) >> 16); + len += htt_dbg_out(buf + len, buf_len - len, "prefetch_count = %u", + htt_stats_buf->prefetch_count__internal_tail_ptr & 0xffff); + len += htt_dbg_out(buf + len, buf_len - len, "internal_tail_ptr = %u ", + (htt_stats_buf->prefetch_count__internal_tail_ptr & + 0xffff0000) >> 16); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_sring_cmn_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_sring_cmn_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "num_records = %u ", + htt_stats_buf->num_records); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u8 j; + char str_buf[htt_max_string_len] = {0}; + char *tx_gi[htt_tx_peer_stats_num_gi_counters]; + + for (j = 0; j < htt_tx_peer_stats_num_gi_counters; j++) + tx_gi[j] = kmalloc(htt_max_string_len, gfp_atomic); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_tx_pdev_rate_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "tx_ldpc = %u", + htt_stats_buf->tx_ldpc); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u", + htt_stats_buf->ac_mu_mimo_tx_ldpc); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u", + htt_stats_buf->ax_mu_mimo_tx_ldpc); + len += htt_dbg_out(buf + len, buf_len - len, "ofdma_tx_ldpc = %u", + htt_stats_buf->ofdma_tx_ldpc); + len += htt_dbg_out(buf + len, buf_len - len, "rts_cnt = %u", + htt_stats_buf->rts_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rts_success = %u", + htt_stats_buf->rts_success); + len += htt_dbg_out(buf + len, buf_len - len, "ack_rssi = %u", + htt_stats_buf->ack_rssi); + + len += htt_dbg_out(buf + len, buf_len - len, + "legacy cck rates: 1 mbps: %u, 2 mbps: %u, 5.5 mbps: %u, 11 mbps: %u", + htt_stats_buf->tx_legacy_cck_rate[0], + htt_stats_buf->tx_legacy_cck_rate[1], + htt_stats_buf->tx_legacy_cck_rate[2], + htt_stats_buf->tx_legacy_cck_rate[3]); + + len += htt_dbg_out(buf + len, buf_len - len, + "legacy ofdm rates: 6 mbps: %u, 9 mbps: %u, 12 mbps: %u, 18 mbps: %u " + " 24 mbps: %u, 36 mbps: %u, 48 mbps: %u, 54 mbps: %u", + htt_stats_buf->tx_legacy_ofdm_rate[0], + htt_stats_buf->tx_legacy_ofdm_rate[1], + htt_stats_buf->tx_legacy_ofdm_rate[2], + htt_stats_buf->tx_legacy_ofdm_rate[3], + htt_stats_buf->tx_legacy_ofdm_rate[4], + htt_stats_buf->tx_legacy_ofdm_rate[5], + htt_stats_buf->tx_legacy_ofdm_rate[6], + htt_stats_buf->tx_legacy_ofdm_rate[7]); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_mcs, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ac_mu_mimo_tx_mcs, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_tx_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ax_mu_mimo_tx_mcs, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_tx_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ofdma_tx_mcs, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "ofdma_tx_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_nss, + htt_tx_pdev_stats_num_spatial_streams); + len += htt_dbg_out(buf + len, buf_len - len, "tx_nss = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ac_mu_mimo_tx_nss, + htt_tx_pdev_stats_num_spatial_streams); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_tx_nss = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ax_mu_mimo_tx_nss, + htt_tx_pdev_stats_num_spatial_streams); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_tx_nss = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ofdma_tx_nss, + htt_tx_pdev_stats_num_spatial_streams); + len += htt_dbg_out(buf + len, buf_len - len, "ofdma_tx_nss = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_bw, + htt_tx_pdev_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_bw = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ac_mu_mimo_tx_bw, + htt_tx_pdev_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "ac_mu_mimo_tx_bw = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ax_mu_mimo_tx_bw, + htt_tx_pdev_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "ax_mu_mimo_tx_bw = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->ofdma_tx_bw, + htt_tx_pdev_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "ofdma_tx_bw = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_stbc, + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_stbc = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_pream, + htt_tx_pdev_stats_num_preamble_types); + len += htt_dbg_out(buf + len, buf_len - len, "tx_pream = %s ", str_buf); + + len += htt_dbg_out(buf + len, buf_len - len, "he ltf: 1x: %u, 2x: %u, 4x: %u", + htt_stats_buf->tx_he_ltf[1], + htt_stats_buf->tx_he_ltf[2], + htt_stats_buf->tx_he_ltf[3]); + + /* su gi stats */ + for (j = 0; j < htt_tx_pdev_stats_num_gi_counters; j++) { + array_to_string(tx_gi[j], htt_stats_buf->tx_gi[j], + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_gi[%u] = %s ", + j, tx_gi[j]); + } + + /* ac mu-mimo gi stats */ + for (j = 0; j < htt_tx_pdev_stats_num_gi_counters; j++) { + array_to_string(tx_gi[j], htt_stats_buf->ac_mu_mimo_tx_gi[j], + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, + "ac_mu_mimo_tx_gi[%u] = %s ", + j, tx_gi[j]); + } + + /* ax mu-mimo gi stats */ + for (j = 0; j < htt_tx_pdev_stats_num_gi_counters; j++) { + array_to_string(tx_gi[j], htt_stats_buf->ax_mu_mimo_tx_gi[j], + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, + "ax_mu_mimo_tx_gi[%u] = %s ", + j, tx_gi[j]); + } + + /* dl ofdma gi stats */ + for (j = 0; j < htt_tx_pdev_stats_num_gi_counters; j++) { + array_to_string(tx_gi[j], htt_stats_buf->ofdma_tx_gi[j], + htt_tx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s ", + j, tx_gi[j]); + } + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->tx_dcm, + htt_tx_pdev_stats_num_dcm_counters); + len += htt_dbg_out(buf + len, buf_len - len, "tx_dcm = %s ", str_buf); + + for (j = 0; j < htt_tx_peer_stats_num_gi_counters; j++) + kfree(tx_gi[j]); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u8 j; + char *rssi_chain[htt_rx_pdev_stats_num_spatial_streams]; + char *rx_gi[htt_rx_pdev_stats_num_gi_counters]; + char str_buf[htt_max_string_len] = {0}; + + for (j = 0; j < htt_rx_pdev_stats_num_spatial_streams; j++) + rssi_chain[j] = kmalloc(htt_max_string_len, gfp_atomic); + + for (j = 0; j < htt_rx_pdev_stats_num_gi_counters; j++) + rx_gi[j] = kmalloc(htt_max_string_len, gfp_atomic); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_rx_pdev_rate_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "nsts = %u", + htt_stats_buf->nsts); + len += htt_dbg_out(buf + len, buf_len - len, "rx_ldpc = %u", + htt_stats_buf->rx_ldpc); + len += htt_dbg_out(buf + len, buf_len - len, "rts_cnt = %u", + htt_stats_buf->rts_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_mgmt = %u", + htt_stats_buf->rssi_mgmt); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_data = %u", + htt_stats_buf->rssi_data); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_comb = %u", + htt_stats_buf->rssi_comb); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_in_dbm = %d", + htt_stats_buf->rssi_in_dbm); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_mcs, + htt_rx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_mcs = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_nss, + htt_rx_pdev_stats_num_spatial_streams); + len += htt_dbg_out(buf + len, buf_len - len, "rx_nss = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_dcm, + htt_rx_pdev_stats_num_dcm_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_dcm = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_stbc, + htt_rx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_stbc = %s ", str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_bw, + htt_rx_pdev_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_bw = %s ", str_buf); + + for (j = 0; j < htt_rx_pdev_stats_num_spatial_streams; j++) { + array_to_string(rssi_chain[j], htt_stats_buf->rssi_chain[j], + htt_rx_pdev_stats_num_bw_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rssi_chain[%u] = %s ", + j, rssi_chain[j]); + } + + for (j = 0; j < htt_rx_pdev_stats_num_gi_counters; j++) { + array_to_string(rx_gi[j], htt_stats_buf->rx_gi[j], + htt_rx_pdev_stats_num_mcs_counters); + len += htt_dbg_out(buf + len, buf_len - len, "rx_gi[%u] = %s ", + j, rx_gi[j]); + } + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_pream, + htt_rx_pdev_stats_num_preamble_types); + len += htt_dbg_out(buf + len, buf_len - len, "rx_pream = %s", str_buf); + + for (j = 0; j < htt_rx_pdev_stats_num_spatial_streams; j++) + kfree(rssi_chain[j]); + + for (j = 0; j < htt_rx_pdev_stats_num_gi_counters; j++) + kfree(rx_gi[j]); + + len += htt_dbg_out(buf + len, buf_len - len, "rx_11ax_su_ext = %u", + htt_stats_buf->rx_11ax_su_ext); + len += htt_dbg_out(buf + len, buf_len - len, "rx_11ac_mumimo = %u", + htt_stats_buf->rx_11ac_mumimo); + len += htt_dbg_out(buf + len, buf_len - len, "rx_11ax_mumimo = %u", + htt_stats_buf->rx_11ax_mumimo); + len += htt_dbg_out(buf + len, buf_len - len, "rx_11ax_ofdma = %u", + htt_stats_buf->rx_11ax_ofdma); + len += htt_dbg_out(buf + len, buf_len - len, "txbf = %u", + htt_stats_buf->txbf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_legacy_cck_rate, + htt_rx_pdev_stats_num_legacy_cck_stats); + len += htt_dbg_out(buf + len, buf_len - len, "rx_legacy_cck_rate = %s ", + str_buf); + + memset(str_buf, 0x0, htt_max_string_len); + array_to_string(str_buf, htt_stats_buf->rx_legacy_ofdm_rate, + htt_rx_pdev_stats_num_legacy_ofdm_stats); + len += htt_dbg_out(buf + len, buf_len - len, "rx_legacy_ofdm_rate = %s ", + str_buf); + + len += htt_dbg_out(buf + len, buf_len - len, "rx_active_dur_us_low = %u", + htt_stats_buf->rx_active_dur_us_low); + len += htt_dbg_out(buf + len, buf_len - len, "rx_active_dur_us_high = %u ", + htt_stats_buf->rx_active_dur_us_high); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_rx_soc_fw_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u", + htt_stats_buf->fw_reo_ring_data_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u", + htt_stats_buf->fw_to_host_data_msdu_bcmc); + len += htt_dbg_out(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u", + htt_stats_buf->fw_to_host_data_msdu_uc); + len += htt_dbg_out(buf + len, buf_len - len, + "ofld_remote_data_buf_recycle_cnt = %u", + htt_stats_buf->ofld_remote_data_buf_recycle_cnt); + len += htt_dbg_out(buf + len, buf_len - len, + "ofld_remote_free_buf_indication_cnt = %u", + htt_stats_buf->ofld_remote_free_buf_indication_cnt); + len += htt_dbg_out(buf + len, buf_len - len, + "ofld_buf_to_host_data_msdu_uc = %u", + htt_stats_buf->ofld_buf_to_host_data_msdu_uc); + len += htt_dbg_out(buf + len, buf_len - len, + "reo_fw_ring_to_host_data_msdu_uc = %u", + htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc); + len += htt_dbg_out(buf + len, buf_len - len, "wbm_sw_ring_reap = %u", + htt_stats_buf->wbm_sw_ring_reap); + len += htt_dbg_out(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u", + htt_stats_buf->wbm_forward_to_host_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u", + htt_stats_buf->wbm_target_recycle_cnt); + len += htt_dbg_out(buf + len, buf_len - len, + "target_refill_ring_recycle_cnt = %u", + htt_stats_buf->target_refill_ring_recycle_cnt); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char refill_ring_empty_cnt[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_rx_stats_refill_max_ring); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_rx_soc_fw_refill_ring_empty_tlv_v:"); + + array_to_string(refill_ring_empty_cnt, + htt_stats_buf->refill_ring_empty_cnt, + num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "refill_ring_empty_cnt = %s ", + refill_ring_empty_cnt); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf = + tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char rxdma_err_cnt[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_rx_rxdma_max_err_code); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v:"); + + array_to_string(rxdma_err_cnt, + htt_stats_buf->rxdma_err, + num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "rxdma_err = %s ", + rxdma_err_cnt); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char reo_err_cnt[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_rx_reo_max_err_code); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v:"); + + array_to_string(reo_err_cnt, + htt_stats_buf->reo_err, + num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "reo_err = %s ", + reo_err_cnt); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_rx_reo_resource_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "sample_id = %u", + htt_stats_buf->sample_id); + len += htt_dbg_out(buf + len, buf_len - len, "total_max = %u", + htt_stats_buf->total_max); + len += htt_dbg_out(buf + len, buf_len - len, "total_avg = %u", + htt_stats_buf->total_avg); + len += htt_dbg_out(buf + len, buf_len - len, "total_sample = %u", + htt_stats_buf->total_sample); + len += htt_dbg_out(buf + len, buf_len - len, "non_zeros_avg = %u", + htt_stats_buf->non_zeros_avg); + len += htt_dbg_out(buf + len, buf_len - len, "non_zeros_sample = %u", + htt_stats_buf->non_zeros_sample); + len += htt_dbg_out(buf + len, buf_len - len, "last_non_zeros_max = %u", + htt_stats_buf->last_non_zeros_max); + len += htt_dbg_out(buf + len, buf_len - len, "last_non_zeros_min %u", + htt_stats_buf->last_non_zeros_min); + len += htt_dbg_out(buf + len, buf_len - len, "last_non_zeros_avg %u", + htt_stats_buf->last_non_zeros_avg); + len += htt_dbg_out(buf + len, buf_len - len, "last_non_zeros_sample %u ", + htt_stats_buf->last_non_zeros_sample); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char refill_ring_num_refill[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_rx_stats_refill_max_ring); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_rx_soc_fw_refill_ring_num_refill_tlv_v:"); + + array_to_string(refill_ring_num_refill, + htt_stats_buf->refill_ring_num_refill, + num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "refill_ring_num_refill = %s ", + refill_ring_num_refill); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char fw_ring_mgmt_subtype[htt_max_string_len] = {0}; + char fw_ring_ctrl_subtype[htt_max_string_len] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_rx_pdev_fw_stats_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "ppdu_recvd = %u", + htt_stats_buf->ppdu_recvd); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u", + htt_stats_buf->mpdu_cnt_fcs_ok); + len += htt_dbg_out(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u", + htt_stats_buf->mpdu_cnt_fcs_err); + len += htt_dbg_out(buf + len, buf_len - len, "tcp_msdu_cnt = %u", + htt_stats_buf->tcp_msdu_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u", + htt_stats_buf->tcp_ack_msdu_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "udp_msdu_cnt = %u", + htt_stats_buf->udp_msdu_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "other_msdu_cnt = %u", + htt_stats_buf->other_msdu_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u", + htt_stats_buf->fw_ring_mpdu_ind); + + array_to_string(fw_ring_mgmt_subtype, + htt_stats_buf->fw_ring_mgmt_subtype, + htt_stats_subtype_max); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_mgmt_subtype = %s ", + fw_ring_mgmt_subtype); + + array_to_string(fw_ring_ctrl_subtype, + htt_stats_buf->fw_ring_ctrl_subtype, + htt_stats_subtype_max); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_ctrl_subtype = %s ", + fw_ring_ctrl_subtype); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u", + htt_stats_buf->fw_ring_mcast_data_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u", + htt_stats_buf->fw_ring_bcast_data_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u", + htt_stats_buf->fw_ring_ucast_data_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u", + htt_stats_buf->fw_ring_null_data_msdu); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u", + htt_stats_buf->fw_ring_mpdu_drop); + len += htt_dbg_out(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u", + htt_stats_buf->ofld_local_data_ind_cnt); + len += htt_dbg_out(buf + len, buf_len - len, + "ofld_local_data_buf_recycle_cnt = %u", + htt_stats_buf->ofld_local_data_buf_recycle_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u", + htt_stats_buf->drx_local_data_ind_cnt); + len += htt_dbg_out(buf + len, buf_len - len, + "drx_local_data_buf_recycle_cnt = %u", + htt_stats_buf->drx_local_data_buf_recycle_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "local_nondata_ind_cnt = %u", + htt_stats_buf->local_nondata_ind_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u", + htt_stats_buf->local_nondata_buf_recycle_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u", + htt_stats_buf->fw_status_buf_ring_refill_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u", + htt_stats_buf->fw_status_buf_ring_empty_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u", + htt_stats_buf->fw_pkt_buf_ring_refill_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u", + htt_stats_buf->fw_pkt_buf_ring_empty_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u", + htt_stats_buf->fw_link_buf_ring_refill_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u", + htt_stats_buf->fw_link_buf_ring_empty_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u", + htt_stats_buf->host_pkt_buf_ring_refill_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u", + htt_stats_buf->host_pkt_buf_ring_empty_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u", + htt_stats_buf->mon_pkt_buf_ring_refill_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u", + htt_stats_buf->mon_pkt_buf_ring_empty_cnt); + len += htt_dbg_out(buf + len, buf_len - len, + "mon_status_buf_ring_refill_cnt = %u", + htt_stats_buf->mon_status_buf_ring_refill_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u", + htt_stats_buf->mon_status_buf_ring_empty_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u", + htt_stats_buf->mon_desc_buf_ring_refill_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u", + htt_stats_buf->mon_desc_buf_ring_empty_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u", + htt_stats_buf->mon_dest_ring_update_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u", + htt_stats_buf->mon_dest_ring_full_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rx_suspend_cnt = %u", + htt_stats_buf->rx_suspend_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u", + htt_stats_buf->rx_suspend_fail_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rx_resume_cnt = %u", + htt_stats_buf->rx_resume_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rx_resume_fail_cnt = %u", + htt_stats_buf->rx_resume_fail_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rx_ring_switch_cnt = %u", + htt_stats_buf->rx_ring_switch_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rx_ring_restore_cnt = %u", + htt_stats_buf->rx_ring_restore_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rx_flush_cnt = %u", + htt_stats_buf->rx_flush_cnt); + len += htt_dbg_out(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u ", + htt_stats_buf->rx_recovery_reset_cnt); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char fw_ring_mpdu_err[htt_max_string_len] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_rx_pdev_fw_ring_mpdu_err_tlv_v:"); + + array_to_string(fw_ring_mpdu_err, + htt_stats_buf->fw_ring_mpdu_err, + htt_rx_stats_rxdma_max_err); + len += htt_dbg_out(buf + len, buf_len - len, "fw_ring_mpdu_err = %s ", + fw_ring_mpdu_err); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char fw_mpdu_drop[htt_max_string_len] = {0}; + u16 num_elems = min_t(u16, (tag_len >> 2), htt_rx_stats_fw_drop_reason_max); + + len += htt_dbg_out(buf + len, buf_len - len, "htt_rx_pdev_fw_mpdu_drop_tlv_v:"); + + array_to_string(fw_mpdu_drop, + htt_stats_buf->fw_mpdu_drop, + num_elems); + len += htt_dbg_out(buf + len, buf_len - len, "fw_mpdu_drop = %s ", fw_mpdu_drop); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + char phy_errs[htt_max_string_len] = {0}; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_rx_pdev_fw_stats_phy_err_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id__word = %u", + htt_stats_buf->mac_id__word); + len += htt_dbg_out(buf + len, buf_len - len, "tota_phy_err_nct = %u", + htt_stats_buf->total_phy_err_cnt); + + array_to_string(phy_errs, + htt_stats_buf->phy_err, + htt_stats_phy_err_max); + len += htt_dbg_out(buf + len, buf_len - len, "phy_errs = %s ", phy_errs); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, " htt_pdev_cca_stats_hist_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "chan_num = %u", + htt_stats_buf->chan_num); + len += htt_dbg_out(buf + len, buf_len - len, "num_records = %u", + htt_stats_buf->num_records); + len += htt_dbg_out(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x", + htt_stats_buf->valid_cca_counters_bitmap); + len += htt_dbg_out(buf + len, buf_len - len, "collection_interval = %u ", + htt_stats_buf->collection_interval); + + len += htt_dbg_out(buf + len, buf_len - len, + "htt_pdev_stats_cca_counters_tlv:(in usec)"); + len += htt_dbg_out(buf + len, buf_len - len, + "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|"); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, + "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|", + htt_stats_buf->tx_frame_usec, + htt_stats_buf->rx_frame_usec, + htt_stats_buf->rx_clear_usec, + htt_stats_buf->my_rx_frame_usec, + htt_stats_buf->usec_cnt, + htt_stats_buf->med_rx_idle_usec, + htt_stats_buf->med_tx_idle_global_usec, + htt_stats_buf->cca_obss_usec); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_hw_stats_whal_tx_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "mac_id = %u", + htt_stats_buf->mac_id__word & 0xff); + len += htt_dbg_out(buf + len, buf_len - len, "last_unpause_ppdu_id = %u", + htt_stats_buf->last_unpause_ppdu_id); + len += htt_dbg_out(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u", + htt_stats_buf->hwsch_unpause_wait_tqm_write); + len += htt_dbg_out(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u", + htt_stats_buf->hwsch_dummy_tlv_skipped); + len += htt_dbg_out(buf + len, buf_len - len, + "hwsch_misaligned_offset_received = %u", + htt_stats_buf->hwsch_misaligned_offset_received); + len += htt_dbg_out(buf + len, buf_len - len, "hwsch_reset_count = %u", + htt_stats_buf->hwsch_reset_count); + len += htt_dbg_out(buf + len, buf_len - len, "hwsch_dev_reset_war = %u", + htt_stats_buf->hwsch_dev_reset_war); + len += htt_dbg_out(buf + len, buf_len - len, "hwsch_delayed_pause = %u", + htt_stats_buf->hwsch_delayed_pause); + len += htt_dbg_out(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u", + htt_stats_buf->hwsch_long_delayed_pause); + len += htt_dbg_out(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u", + htt_stats_buf->sch_rx_ppdu_no_response); + len += htt_dbg_out(buf + len, buf_len - len, "sch_selfgen_response = %u", + htt_stats_buf->sch_selfgen_response); + len += htt_dbg_out(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u ", + htt_stats_buf->sch_rx_sifs_resp_trigger); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_pdev_stats_twt_sessions_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "pdev_id = %u", + htt_stats_buf->pdev_id); + len += htt_dbg_out(buf + len, buf_len - len, "num_sessions = %u ", + htt_stats_buf->num_sessions); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_pdev_stats_twt_session_tlv(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "htt_pdev_stats_twt_session_tlv:"); + len += htt_dbg_out(buf + len, buf_len - len, "vdev_id = %u", + htt_stats_buf->vdev_id); + len += htt_dbg_out(buf + len, buf_len - len, + "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x", + htt_stats_buf->peer_mac.mac_addr_l32 & 0xff, + (htt_stats_buf->peer_mac.mac_addr_l32 & 0xff00) >> 8, + (htt_stats_buf->peer_mac.mac_addr_l32 & 0xff0000) >> 16, + (htt_stats_buf->peer_mac.mac_addr_l32 & 0xff000000) >> 24, + (htt_stats_buf->peer_mac.mac_addr_h16 & 0xff), + (htt_stats_buf->peer_mac.mac_addr_h16 & 0xff00) >> 8); + len += htt_dbg_out(buf + len, buf_len - len, "flow_id_flags = %u", + htt_stats_buf->flow_id_flags); + len += htt_dbg_out(buf + len, buf_len - len, "dialog_id = %u", + htt_stats_buf->dialog_id); + len += htt_dbg_out(buf + len, buf_len - len, "wake_dura_us = %u", + htt_stats_buf->wake_dura_us); + len += htt_dbg_out(buf + len, buf_len - len, "wake_intvl_us = %u", + htt_stats_buf->wake_intvl_us); + len += htt_dbg_out(buf + len, buf_len - len, "sp_offset_us = %u ", + htt_stats_buf->sp_offset_us); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void +htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + + len += htt_dbg_out(buf + len, buf_len - len, "obss tx success ppdu = %u", + htt_stats_buf->num_obss_tx_ppdu_success); + len += htt_dbg_out(buf + len, buf_len - len, "obss tx failures ppdu = %u ", + htt_stats_buf->num_obss_tx_ppdu_failure); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static inline void htt_htt_stats_debug_dump(const u32 *tag_buf, + struct debug_htt_stats_req *stats_req) +{ + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ath11k_htt_stats_buf_size; + u32 tlv_len = 0, i = 0, word_len = 0; + + tlv_len = field_get(htt_tlv_len, *tag_buf) + htt_tlv_hdr_len; + word_len = (tlv_len % 4) == 0 ? (tlv_len / 4) : ((tlv_len / 4) + 1); + len += htt_dbg_out(buf + len, buf_len - len, + "============================================"); + len += htt_dbg_out(buf + len, buf_len - len, + "hkdbg tlv dump: (tag_len=%u bytes, words=%u)", + tlv_len, word_len); + + for (i = 0; i + 3 < word_len; i += 4) { + len += htt_dbg_out(buf + len, buf_len - len, + "0x%08x 0x%08x 0x%08x 0x%08x", + tag_buf[i], tag_buf[i + 1], + tag_buf[i + 2], tag_buf[i + 3]); + } + + if (i + 3 == word_len) { + len += htt_dbg_out(buf + len, buf_len - len, "0x%08x 0x%08x 0x%08x ", + tag_buf[i], tag_buf[i + 1], tag_buf[i + 2]); + } else if (i + 2 == word_len) { + len += htt_dbg_out(buf + len, buf_len - len, "0x%08x 0x%08x ", + tag_buf[i], tag_buf[i + 1]); + } else if (i + 1 == word_len) { + len += htt_dbg_out(buf + len, buf_len - len, "0x%08x ", + tag_buf[i]); + } + len += htt_dbg_out(buf + len, buf_len - len, + "============================================"); + + if (len >= buf_len) + buf[buf_len - 1] = 0; + else + buf[len] = 0; + + stats_req->buf_len = len; +} + +static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab, + u16 tag, u16 len, const void *tag_buf, + void *user_data) +{ + struct debug_htt_stats_req *stats_req = user_data; + + switch (tag) { + case htt_stats_tx_pdev_cmn_tag: + htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req); + break; + case htt_stats_tx_pdev_underrun_tag: + htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req); + break; + case htt_stats_tx_pdev_sifs_tag: + htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req); + break; + case htt_stats_tx_pdev_flush_tag: + htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req); + break; + case htt_stats_tx_pdev_phy_err_tag: + htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req); + break; + case htt_stats_tx_pdev_sifs_hist_tag: + htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_pdev_tx_ppdu_stats_tag: + htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req); + break; + + case htt_stats_tx_pdev_tried_mpdu_cnt_hist_tag: + htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, + stats_req); + break; + + case htt_stats_string_tag: + htt_print_stats_string_tlv(tag_buf, len, stats_req); + break; + + case htt_stats_tx_hwq_cmn_tag: + htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_hwq_difs_latency_tag: + htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_hwq_cmd_result_tag: + htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_hwq_cmd_stall_tag: + htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_hwq_fes_status_tag: + htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_hwq_tried_mpdu_cnt_hist_tag: + htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_hwq_txop_used_cnt_hist_tag: + htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req); + break; + case htt_stats_tx_tqm_gen_mpdu_tag: + htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_tqm_list_mpdu_tag: + htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_tqm_list_mpdu_cnt_tag: + htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_tx_tqm_cmn_tag: + htt_print_tx_tqm_cmn_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_tqm_pdev_tag: + htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req); + break; + + case htt_stats_tx_tqm_cmdq_status_tag: + htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_de_eapol_packets_tag: + htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_de_classify_failed_tag: + htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_de_classify_stats_tag: + htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_de_classify_status_tag: + htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_de_enqueue_packets_tag: + htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_de_enqueue_discard_tag: + htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_de_fw2wbm_ring_full_hist_tag: + htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req); + break; + + case htt_stats_tx_de_cmn_tag: + htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_ring_if_tag: + htt_print_ring_if_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_pdev_mu_mimo_stats_tag: + htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_sfm_cmn_tag: + htt_print_sfm_cmn_tlv(tag_buf, stats_req); + break; + + case htt_stats_sring_stats_tag: + htt_print_sring_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_rx_pdev_fw_stats_tag: + htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_rx_pdev_fw_ring_mpdu_err_tag: + htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req); + break; + + case htt_stats_rx_pdev_fw_mpdu_drop_tag: + htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_rx_soc_fw_stats_tag: + htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_rx_soc_fw_refill_ring_empty_tag: + htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_rx_soc_fw_refill_ring_num_refill_tag: + htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v( + tag_buf, len, stats_req); + break; + case htt_stats_rx_refill_rxdma_err_tag: + htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v( + tag_buf, len, stats_req); + break; + + case htt_stats_rx_refill_reo_err_tag: + htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v( + tag_buf, len, stats_req); + break; + + case htt_stats_rx_reo_resource_stats_tag: + htt_print_rx_reo_debug_stats_tlv_v( + tag_buf, stats_req); + break; + case htt_stats_rx_pdev_fw_stats_phy_err_tag: + htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_pdev_rate_stats_tag: + htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_rx_pdev_rate_stats_tag: + htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_pdev_scheduler_txq_stats_tag: + htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req); + break; + case htt_stats_tx_sched_cmn_tag: + htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_pdev_mpdu_stats_tag: + htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_sched_txq_cmd_posted_tag: + htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_ring_if_cmn_tag: + htt_print_ring_if_cmn_tlv(tag_buf, stats_req); + break; + + case htt_stats_sfm_client_user_tag: + htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_sfm_client_tag: + htt_print_sfm_client_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_tqm_error_stats_tag: + htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_sched_txq_cmd_reaped_tag: + htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_sring_cmn_tag: + htt_print_sring_cmn_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_sounding_stats_tag: + htt_print_tx_sounding_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_selfgen_ac_err_stats_tag: + htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_selfgen_cmn_stats_tag: + htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_selfgen_ac_stats_tag: + htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_selfgen_ax_stats_tag: + htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_selfgen_ax_err_stats_tag: + htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_hwq_mumimo_sch_stats_tag: + htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_hwq_mumimo_mpdu_stats_tag: + htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_hwq_mumimo_cmn_stats_tag: + htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_hw_intr_misc_tag: + htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req); + break; + + case htt_stats_hw_wd_timeout_tag: + htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req); + break; + + case htt_stats_hw_pdev_errs_tag: + htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req); + break; + + case htt_stats_counter_name_tag: + htt_print_counter_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_tid_details_tag: + htt_print_tx_tid_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_tid_details_v1_tag: + htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req); + break; + + case htt_stats_rx_tid_details_tag: + htt_print_rx_tid_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_peer_stats_cmn_tag: + htt_print_peer_stats_cmn_tlv(tag_buf, stats_req); + break; + + case htt_stats_peer_details_tag: + htt_print_peer_details_tlv(tag_buf, stats_req); + break; + + case htt_stats_peer_msdu_flowq_tag: + htt_print_msdu_flow_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_peer_tx_rate_stats_tag: + htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_peer_rx_rate_stats_tag: + htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_tx_de_compl_stats_tag: + htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req); + break; + + case htt_stats_pdev_cca_1sec_hist_tag: + case htt_stats_pdev_cca_100msec_hist_tag: + case htt_stats_pdev_cca_stat_cumulative_tag: + htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req); + break; + + case htt_stats_pdev_cca_counters_tag: + htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req); + break; + + case htt_stats_whal_tx_tag: + htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req); + break; + + case htt_stats_pdev_twt_sessions_tag: + htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req); + break; + + case htt_stats_pdev_twt_session_tag: + htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req); + break; + + case htt_stats_sched_txq_sched_order_su_tag: + htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_sched_txq_sched_ineligibility_tag: + htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req); + break; + + case htt_stats_pdev_obss_pd_tag: + htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req); + break; + default: + break; + } + + return 0; +} + +void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct ath11k_htt_extd_stats_msg *msg; + struct debug_htt_stats_req *stats_req; + struct ath11k *ar; + u32 len; + u64 cookie; + int ret; + u8 pdev_id; + + msg = (struct ath11k_htt_extd_stats_msg *)skb->data; + cookie = msg->cookie; + + if (field_get(htt_stats_cookie_msb, cookie) != htt_stats_magic_value) { + ath11k_warn(ab, "received invalid htt ext stats event "); + return; + } + + pdev_id = field_get(htt_stats_cookie_lsb, cookie); + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); + rcu_read_unlock(); + if (!ar) { + ath11k_warn(ab, "failed to get ar for pdev_id %d ", pdev_id); + return; + } + + stats_req = ar->debug.htt_stats.stats_req; + if (!stats_req) + return; + + spin_lock_bh(&ar->debug.htt_stats.lock); + if (stats_req->done) { + spin_unlock_bh(&ar->debug.htt_stats.lock); + return; + } + stats_req->done = true; + spin_unlock_bh(&ar->debug.htt_stats.lock); + + len = field_get(htt_t2h_ext_stats_info1_length, msg->info1); + ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, + ath11k_dbg_htt_ext_stats_parse, + stats_req); + if (ret) + ath11k_warn(ab, "failed to parse tlv %d ", ret); + + complete(&stats_req->cmpln); +} + +static ssize_t ath11k_read_htt_stats_type(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + char buf[32]; + size_t len; + + len = scnprintf(buf, sizeof(buf), "%u ", ar->debug.htt_stats.type); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath11k_write_htt_stats_type(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + u8 type; + int ret; + + ret = kstrtou8_from_user(user_buf, count, 0, &type); + if (ret) + return ret; + + if (type >= ath11k_dbg_htt_num_ext_stats) + return -e2big; + + if (type == ath11k_dbg_htt_ext_stats_reset || + type == ath11k_dbg_htt_ext_stats_peer_info) + return -eperm; + + ar->debug.htt_stats.type = type; + + ret = count; + + return ret; +} + +static const struct file_operations fops_htt_stats_type = { + .read = ath11k_read_htt_stats_type, + .write = ath11k_write_htt_stats_type, + .open = simple_open, + .owner = this_module, + .llseek = default_llseek, +}; + +static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type, + const u8 *mac_addr, + struct htt_ext_stats_cfg_params *cfg_params) +{ + if (!cfg_params) + return -einval; + + switch (type) { + case ath11k_dbg_htt_ext_stats_pdev_tx_hwq: + case ath11k_dbg_htt_ext_stats_tx_mu_hwq: + cfg_params->cfg0 = htt_stat_default_cfg0_all_hwqs; + break; + case ath11k_dbg_htt_ext_stats_pdev_tx_sched: + cfg_params->cfg0 = htt_stat_default_cfg0_all_txqs; + break; + case ath11k_dbg_htt_ext_stats_tqm_cmdq: + cfg_params->cfg0 = htt_stat_default_cfg0_all_cmdqs; + break; + case ath11k_dbg_htt_ext_stats_peer_info: + cfg_params->cfg0 = htt_stat_peer_info_mac_addr; + cfg_params->cfg0 |= field_prep(genmask(15, 1), + htt_peer_stats_req_mode_flush_tqm); + cfg_params->cfg1 = htt_stat_default_peer_req_type; + cfg_params->cfg2 |= field_prep(genmask(7, 0), mac_addr[0]); + cfg_params->cfg2 |= field_prep(genmask(15, 8), mac_addr[1]); + cfg_params->cfg2 |= field_prep(genmask(23, 16), mac_addr[2]); + cfg_params->cfg2 |= field_prep(genmask(31, 24), mac_addr[3]); + cfg_params->cfg3 |= field_prep(genmask(7, 0), mac_addr[4]); + cfg_params->cfg3 |= field_prep(genmask(15, 8), mac_addr[5]); + break; + case ath11k_dbg_htt_ext_stats_ring_if_info: + case ath11k_dbg_htt_ext_stats_srng_info: + cfg_params->cfg0 = htt_stat_default_cfg0_all_rings; + break; + case ath11k_dbg_htt_ext_stats_active_peers_list: + cfg_params->cfg0 = htt_stat_default_cfg0_active_peers; + break; + case ath11k_dbg_htt_ext_stats_pdev_cca_stats: + cfg_params->cfg0 = htt_stat_default_cfg0_cca_cumulative; + break; + case ath11k_dbg_htt_ext_stats_tx_sounding_info: + cfg_params->cfg0 = htt_stat_default_cfg0_active_vdevs; + break; + default: + break; + } + + return 0; +} + +int ath11k_dbg_htt_stats_req(struct ath11k *ar) +{ + struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req; + u8 type = stats_req->type; + u64 cookie = 0; + int ret, pdev_id = ar->pdev->pdev_id; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + + init_completion(&stats_req->cmpln); + + stats_req->done = false; + stats_req->pdev_id = pdev_id; + + cookie = field_prep(htt_stats_cookie_msb, htt_stats_magic_value) | + field_prep(htt_stats_cookie_lsb, pdev_id); + + ret = ath11k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr, + &cfg_params); + if (ret) { + ath11k_warn(ar->ab, "failed to set htt stats cfg params: %d ", ret); + return ret; + } + + ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie); + if (ret) { + ath11k_warn(ar->ab, "failed to send htt stats request: %d ", ret); + mutex_unlock(&ar->conf_mutex); + return ret; + } + + while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * hz)) { + spin_lock_bh(&ar->debug.htt_stats.lock); + if (!stats_req->done) { + stats_req->done = true; + spin_unlock_bh(&ar->debug.htt_stats.lock); + ath11k_warn(ar->ab, "stats request timed out "); + return -etimedout; + } + spin_unlock_bh(&ar->debug.htt_stats.lock); + } + + return 0; +} + +static int ath11k_open_htt_stats(struct inode *inode, struct file *file) +{ + struct ath11k *ar = inode->i_private; + struct debug_htt_stats_req *stats_req; + u8 type = ar->debug.htt_stats.type; + int ret; + + if (type == ath11k_dbg_htt_ext_stats_reset) + return -eperm; + + stats_req = vzalloc(sizeof(*stats_req) + ath11k_htt_stats_buf_size); + if (!stats_req) + return -enomem; + + mutex_lock(&ar->conf_mutex); + ar->debug.htt_stats.stats_req = stats_req; + stats_req->type = type; + ret = ath11k_dbg_htt_stats_req(ar); + mutex_unlock(&ar->conf_mutex); + if (ret < 0) + goto out; + + file->private_data = stats_req; + return 0; +out: + vfree(stats_req); + return ret; +} + +static int ath11k_release_htt_stats(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + return 0; +} + +static ssize_t ath11k_read_htt_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct debug_htt_stats_req *stats_req = file->private_data; + char *buf; + u32 length = 0; + + buf = stats_req->buf; + length = min_t(u32, stats_req->buf_len, ath11k_htt_stats_buf_size); + return simple_read_from_buffer(user_buf, count, ppos, buf, length); +} + +static const struct file_operations fops_dump_htt_stats = { + .open = ath11k_open_htt_stats, + .release = ath11k_release_htt_stats, + .read = ath11k_read_htt_stats, + .owner = this_module, + .llseek = default_llseek, +}; + +static ssize_t ath11k_read_htt_stats_reset(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + char buf[32]; + size_t len; + + len = scnprintf(buf, sizeof(buf), "%u ", ar->debug.htt_stats.reset); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath11k_write_htt_stats_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + u8 type; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + int ret; + + ret = kstrtou8_from_user(user_buf, count, 0, &type); + if (ret) + return ret; + + if (type >= ath11k_dbg_htt_num_ext_stats || + type == ath11k_dbg_htt_ext_stats_reset) + return -e2big; + + mutex_lock(&ar->conf_mutex); + cfg_params.cfg0 = htt_stat_default_reset_start_offset; + cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type); + ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, + ath11k_dbg_htt_ext_stats_reset, + &cfg_params, + 0ull); + if (ret) { + ath11k_warn(ar->ab, "failed to send htt stats request: %d ", ret); + mutex_unlock(&ar->conf_mutex); + return ret; + } + + ar->debug.htt_stats.reset = type; + mutex_unlock(&ar->conf_mutex); + + ret = count; + + return ret; +} + +static const struct file_operations fops_htt_stats_reset = { + .read = ath11k_read_htt_stats_reset, + .write = ath11k_write_htt_stats_reset, + .open = simple_open, + .owner = this_module, + .llseek = default_llseek, +}; + +void ath11k_debug_htt_stats_init(struct ath11k *ar) +{ + spin_lock_init(&ar->debug.htt_stats.lock); + debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev, + ar, &fops_htt_stats_type); + debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev, + ar, &fops_dump_htt_stats); + debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev, + ar, &fops_htt_stats_reset); +} diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/debug_htt_stats.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef debug_htt_stats_h +#define debug_htt_stats_h + +#define htt_stats_cookie_lsb genmask_ull(31, 0) +#define htt_stats_cookie_msb genmask_ull(63, 32) +#define htt_stats_magic_value 0xf0f0f0f0 + +enum htt_tlv_tag_t { + htt_stats_tx_pdev_cmn_tag = 0, + htt_stats_tx_pdev_underrun_tag = 1, + htt_stats_tx_pdev_sifs_tag = 2, + htt_stats_tx_pdev_flush_tag = 3, + htt_stats_tx_pdev_phy_err_tag = 4, + htt_stats_string_tag = 5, + htt_stats_tx_hwq_cmn_tag = 6, + htt_stats_tx_hwq_difs_latency_tag = 7, + htt_stats_tx_hwq_cmd_result_tag = 8, + htt_stats_tx_hwq_cmd_stall_tag = 9, + htt_stats_tx_hwq_fes_status_tag = 10, + htt_stats_tx_tqm_gen_mpdu_tag = 11, + htt_stats_tx_tqm_list_mpdu_tag = 12, + htt_stats_tx_tqm_list_mpdu_cnt_tag = 13, + htt_stats_tx_tqm_cmn_tag = 14, + htt_stats_tx_tqm_pdev_tag = 15, + htt_stats_tx_tqm_cmdq_status_tag = 16, + htt_stats_tx_de_eapol_packets_tag = 17, + htt_stats_tx_de_classify_failed_tag = 18, + htt_stats_tx_de_classify_stats_tag = 19, + htt_stats_tx_de_classify_status_tag = 20, + htt_stats_tx_de_enqueue_packets_tag = 21, + htt_stats_tx_de_enqueue_discard_tag = 22, + htt_stats_tx_de_cmn_tag = 23, + htt_stats_ring_if_tag = 24, + htt_stats_tx_pdev_mu_mimo_stats_tag = 25, + htt_stats_sfm_cmn_tag = 26, + htt_stats_sring_stats_tag = 27, + htt_stats_rx_pdev_fw_stats_tag = 28, + htt_stats_rx_pdev_fw_ring_mpdu_err_tag = 29, + htt_stats_rx_pdev_fw_mpdu_drop_tag = 30, + htt_stats_rx_soc_fw_stats_tag = 31, + htt_stats_rx_soc_fw_refill_ring_empty_tag = 32, + htt_stats_rx_soc_fw_refill_ring_num_refill_tag = 33, + htt_stats_tx_pdev_rate_stats_tag = 34, + htt_stats_rx_pdev_rate_stats_tag = 35, + htt_stats_tx_pdev_scheduler_txq_stats_tag = 36, + htt_stats_tx_sched_cmn_tag = 37, + htt_stats_tx_pdev_mumimo_mpdu_stats_tag = 38, + htt_stats_sched_txq_cmd_posted_tag = 39, + htt_stats_ring_if_cmn_tag = 40, + htt_stats_sfm_client_user_tag = 41, + htt_stats_sfm_client_tag = 42, + htt_stats_tx_tqm_error_stats_tag = 43, + htt_stats_sched_txq_cmd_reaped_tag = 44, + htt_stats_sring_cmn_tag = 45, + htt_stats_tx_selfgen_ac_err_stats_tag = 46, + htt_stats_tx_selfgen_cmn_stats_tag = 47, + htt_stats_tx_selfgen_ac_stats_tag = 48, + htt_stats_tx_selfgen_ax_stats_tag = 49, + htt_stats_tx_selfgen_ax_err_stats_tag = 50, + htt_stats_tx_hwq_mumimo_sch_stats_tag = 51, + htt_stats_tx_hwq_mumimo_mpdu_stats_tag = 52, + htt_stats_tx_hwq_mumimo_cmn_stats_tag = 53, + htt_stats_hw_intr_misc_tag = 54, + htt_stats_hw_wd_timeout_tag = 55, + htt_stats_hw_pdev_errs_tag = 56, + htt_stats_counter_name_tag = 57, + htt_stats_tx_tid_details_tag = 58, + htt_stats_rx_tid_details_tag = 59, + htt_stats_peer_stats_cmn_tag = 60, + htt_stats_peer_details_tag = 61, + htt_stats_peer_tx_rate_stats_tag = 62, + htt_stats_peer_rx_rate_stats_tag = 63, + htt_stats_peer_msdu_flowq_tag = 64, + htt_stats_tx_de_compl_stats_tag = 65, + htt_stats_whal_tx_tag = 66, + htt_stats_tx_pdev_sifs_hist_tag = 67, + htt_stats_rx_pdev_fw_stats_phy_err_tag = 68, + htt_stats_tx_tid_details_v1_tag = 69, + htt_stats_pdev_cca_1sec_hist_tag = 70, + htt_stats_pdev_cca_100msec_hist_tag = 71, + htt_stats_pdev_cca_stat_cumulative_tag = 72, + htt_stats_pdev_cca_counters_tag = 73, + htt_stats_tx_pdev_mpdu_stats_tag = 74, + htt_stats_pdev_twt_sessions_tag = 75, + htt_stats_pdev_twt_session_tag = 76, + htt_stats_rx_refill_rxdma_err_tag = 77, + htt_stats_rx_refill_reo_err_tag = 78, + htt_stats_rx_reo_resource_stats_tag = 79, + htt_stats_tx_sounding_stats_tag = 80, + htt_stats_tx_pdev_tx_ppdu_stats_tag = 81, + htt_stats_tx_pdev_tried_mpdu_cnt_hist_tag = 82, + htt_stats_tx_hwq_tried_mpdu_cnt_hist_tag = 83, + htt_stats_tx_hwq_txop_used_cnt_hist_tag = 84, + htt_stats_tx_de_fw2wbm_ring_full_hist_tag = 85, + htt_stats_sched_txq_sched_order_su_tag = 86, + htt_stats_sched_txq_sched_ineligibility_tag = 87, + htt_stats_pdev_obss_pd_tag = 88, + + htt_stats_max_tag, +}; + +#define htt_stats_max_string_sz32 4 +#define htt_stats_macid_invalid 0xff +#define htt_tx_hwq_max_difs_latency_bins 10 +#define htt_tx_hwq_max_cmd_result_stats 13 +#define htt_tx_hwq_max_cmd_stall_stats 5 +#define htt_tx_hwq_max_fes_result_stats 10 + +enum htt_tx_pdev_underrun_enum { + htt_stats_tx_pdev_no_data_underrun = 0, + htt_stats_tx_pdev_data_underrun_between_mpdu = 1, + htt_stats_tx_pdev_data_underrun_within_mpdu = 2, + htt_tx_pdev_max_urrn_stats = 3, +}; + +#define htt_tx_pdev_max_flush_reason_stats 71 +#define htt_tx_pdev_max_sifs_burst_stats 9 +#define htt_tx_pdev_max_sifs_burst_hist_stats 10 +#define htt_tx_pdev_max_phy_err_stats 18 +#define htt_tx_pdev_sched_tx_mode_max 4 +#define htt_tx_pdev_num_sched_order_log 20 + +#define htt_rx_stats_refill_max_ring 4 +#define htt_rx_stats_rxdma_max_err 16 +#define htt_rx_stats_fw_drop_reason_max 16 + +/* bytes stored in little endian order */ +/* length should be multiple of dword */ +struct htt_stats_string_tlv { + u32 data[0]; /* can be variable length */ +} __packed; + +/* == tx pdev stats == */ +struct htt_tx_pdev_stats_cmn_tlv { + u32 mac_id__word; + u32 hw_queued; + u32 hw_reaped; + u32 underrun; + u32 hw_paused; + u32 hw_flush; + u32 hw_filt; + u32 tx_abort; + u32 mpdu_requed; + u32 tx_xretry; + u32 data_rc; + u32 mpdu_dropped_xretry; + u32 illgl_rate_phy_err; + u32 cont_xretry; + u32 tx_timeout; + u32 pdev_resets; + u32 phy_underrun; + u32 txop_ovf; + u32 seq_posted; + u32 seq_failed_queueing; + u32 seq_completed; + u32 seq_restarted; + u32 mu_seq_posted; + u32 seq_switch_hw_paused; + u32 next_seq_posted_dsr; + u32 seq_posted_isr; + u32 seq_ctrl_cached; + u32 mpdu_count_tqm; + u32 msdu_count_tqm; + u32 mpdu_removed_tqm; + u32 msdu_removed_tqm; + u32 mpdus_sw_flush; + u32 mpdus_hw_filter; + u32 mpdus_truncated; + u32 mpdus_ack_failed; + u32 mpdus_expired; + u32 mpdus_seq_hw_retry; + u32 ack_tlv_proc; + u32 coex_abort_mpdu_cnt_valid; + u32 coex_abort_mpdu_cnt; + u32 num_total_ppdus_tried_ota; + u32 num_data_ppdus_tried_ota; + u32 local_ctrl_mgmt_enqued; + u32 local_ctrl_mgmt_freed; + u32 local_data_enqued; + u32 local_data_freed; + u32 mpdu_tried; + u32 isr_wait_seq_posted; + + u32 tx_active_dur_us_low; + u32 tx_active_dur_us_high; +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_pdev_stats_urrn_tlv_v { + u32 urrn_stats[0]; /* htt_tx_pdev_max_urrn_stats */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_pdev_stats_flush_tlv_v { + u32 flush_errs[0]; /* htt_tx_pdev_max_flush_reason_stats */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_pdev_stats_sifs_tlv_v { + u32 sifs_status[0]; /* htt_tx_pdev_max_sifs_burst_stats */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_pdev_stats_phy_err_tlv_v { + u32 phy_errs[0]; /* htt_tx_pdev_max_phy_err_stats */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_pdev_stats_sifs_hist_tlv_v { + u32 sifs_hist_status[0]; /* htt_tx_pdev_sifs_burst_hist_stats */ +}; + +struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v { + u32 num_data_ppdus_legacy_su; + u32 num_data_ppdus_ac_su; + u32 num_data_ppdus_ax_su; + u32 num_data_ppdus_ac_su_txbf; + u32 num_data_ppdus_ax_su_txbf; +}; + +/* note: variable length tlv, use length spec to infer array size . + * + * tried_mpdu_cnt_hist is the histogram of mpdus tries per hwq. + * the tries here is the count of the mpdus within a ppdu that the + * hw had attempted to transmit on air, for the hwsch schedule + * command submitted by fw.it is not the retry attempts. + * the histogram bins are 0-29, 30-59, 60-89 and so on. the are + * 10 bins in this histogram. they are defined in fw using the + * following macros + * #define wal_max_tried_mpdu_cnt_histogram 9 + * #define wal_tried_mpdu_cnt_histogram_interval 30 + */ +struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v { + u32 hist_bin_size; + u32 tried_mpdu_cnt_hist[0]; /* htt_tx_pdev_tried_mpdu_cnt_hist */ +}; + +/* == soc error stats == */ + +/* =============== pdev error stats ============== */ +#define htt_stats_max_hw_intr_name_len 8 +struct htt_hw_stats_intr_misc_tlv { + /* stored as little endian */ + u8 hw_intr_name[htt_stats_max_hw_intr_name_len]; + u32 mask; + u32 count; +}; + +#define htt_stats_max_hw_module_name_len 8 +struct htt_hw_stats_wd_timeout_tlv { + /* stored as little endian */ + u8 hw_module_name[htt_stats_max_hw_module_name_len]; + u32 count; +}; + +struct htt_hw_stats_pdev_errs_tlv { + u32 mac_id__word; /* bit [ 7 : 0] : mac_id */ + u32 tx_abort; + u32 tx_abort_fail_count; + u32 rx_abort; + u32 rx_abort_fail_count; + u32 warm_reset; + u32 cold_reset; + u32 tx_flush; + u32 tx_glb_reset; + u32 tx_txq_reset; + u32 rx_timeout_reset; +}; + +struct htt_hw_stats_whal_tx_tlv { + u32 mac_id__word; + u32 last_unpause_ppdu_id; + u32 hwsch_unpause_wait_tqm_write; + u32 hwsch_dummy_tlv_skipped; + u32 hwsch_misaligned_offset_received; + u32 hwsch_reset_count; + u32 hwsch_dev_reset_war; + u32 hwsch_delayed_pause; + u32 hwsch_long_delayed_pause; + u32 sch_rx_ppdu_no_response; + u32 sch_selfgen_response; + u32 sch_rx_sifs_resp_trigger; +}; + +/* ============ peer stats ============ */ +struct htt_msdu_flow_stats_tlv { + u32 last_update_timestamp; + u32 last_add_timestamp; + u32 last_remove_timestamp; + u32 total_processed_msdu_count; + u32 cur_msdu_count_in_flowq; + u32 sw_peer_id; + u32 tx_flow_no__tid_num__drop_rule; + u32 last_cycle_enqueue_count; + u32 last_cycle_dequeue_count; + u32 last_cycle_drop_count; + u32 current_drop_th; +}; + +#define max_htt_tid_name 8 + +/* tidq stats */ +struct htt_tx_tid_stats_tlv { + /* stored as little endian */ + u8 tid_name[max_htt_tid_name]; + u32 sw_peer_id__tid_num; + u32 num_sched_pending__num_ppdu_in_hwq; + u32 tid_flags; + u32 hw_queued; + u32 hw_reaped; + u32 mpdus_hw_filter; + + u32 qdepth_bytes; + u32 qdepth_num_msdu; + u32 qdepth_num_mpdu; + u32 last_scheduled_tsmp; + u32 pause_module_id; + u32 block_module_id; + u32 tid_tx_airtime; +}; + +/* tidq stats */ +struct htt_tx_tid_stats_v1_tlv { + /* stored as little endian */ + u8 tid_name[max_htt_tid_name]; + u32 sw_peer_id__tid_num; + u32 num_sched_pending__num_ppdu_in_hwq; + u32 tid_flags; + u32 max_qdepth_bytes; + u32 max_qdepth_n_msdus; + u32 rsvd; + + u32 qdepth_bytes; + u32 qdepth_num_msdu; + u32 qdepth_num_mpdu; + u32 last_scheduled_tsmp; + u32 pause_module_id; + u32 block_module_id; + u32 tid_tx_airtime; + u32 allow_n_flags; + u32 sendn_frms_allowed; +}; + +struct htt_rx_tid_stats_tlv { + u32 sw_peer_id__tid_num; + u8 tid_name[max_htt_tid_name]; + u32 dup_in_reorder; + u32 dup_past_outside_window; + u32 dup_past_within_window; + u32 rxdesc_err_decrypt; + u32 tid_rx_airtime; +}; + +#define htt_max_counter_name 8 +struct htt_counter_tlv { + u8 counter_name[htt_max_counter_name]; + u32 count; +}; + +struct htt_peer_stats_cmn_tlv { + u32 ppdu_cnt; + u32 mpdu_cnt; + u32 msdu_cnt; + u32 pause_bitmap; + u32 block_bitmap; + u32 current_timestamp; + u32 peer_tx_airtime; + u32 peer_rx_airtime; + s32 rssi; + u32 peer_enqueued_count_low; + u32 peer_enqueued_count_high; + u32 peer_dequeued_count_low; + u32 peer_dequeued_count_high; + u32 peer_dropped_count_low; + u32 peer_dropped_count_high; + u32 ppdu_transmitted_bytes_low; + u32 ppdu_transmitted_bytes_high; + u32 peer_ttl_removed_count; + u32 inactive_time; +}; + +struct htt_peer_details_tlv { + u32 peer_type; + u32 sw_peer_id; + u32 vdev_pdev_ast_idx; + struct htt_mac_addr mac_addr; + u32 peer_flags; + u32 qpeer_flags; +}; + +enum htt_stats_param_type { + htt_stats_pream_ofdm, + htt_stats_pream_cck, + htt_stats_pream_ht, + htt_stats_pream_vht, + htt_stats_pream_he, + htt_stats_pream_rsvd, + htt_stats_pream_rsvd1, + + htt_stats_pream_count, +}; + +#define htt_tx_peer_stats_num_mcs_counters 12 +#define htt_tx_peer_stats_num_gi_counters 4 +#define htt_tx_peer_stats_num_dcm_counters 5 +#define htt_tx_peer_stats_num_bw_counters 4 +#define htt_tx_peer_stats_num_spatial_streams 8 +#define htt_tx_peer_stats_num_preamble_types htt_stats_pream_count + +struct htt_tx_peer_rate_stats_tlv { + u32 tx_ldpc; + u32 rts_cnt; + u32 ack_rssi; + + u32 tx_mcs[htt_tx_peer_stats_num_mcs_counters]; + u32 tx_su_mcs[htt_tx_peer_stats_num_mcs_counters]; + u32 tx_mu_mcs[htt_tx_peer_stats_num_mcs_counters]; + /* element 0,1, ...7 -> nss 1,2, ...8 */ + u32 tx_nss[htt_tx_peer_stats_num_spatial_streams]; + /* element 0: 20 mhz, 1: 40 mhz, 2: 80 mhz, 3: 160 and 80+80 mhz */ + u32 tx_bw[htt_tx_peer_stats_num_bw_counters]; + u32 tx_stbc[htt_tx_peer_stats_num_mcs_counters]; + u32 tx_pream[htt_tx_peer_stats_num_preamble_types]; + + /* counters to track number of tx packets in each gi + * (400us, 800us, 1600us & 3200us) in each mcs (0-11) + */ + u32 tx_gi[htt_tx_peer_stats_num_gi_counters][htt_tx_peer_stats_num_mcs_counters]; + + /* counters to track packets in dcm mcs (mcs 0, 1, 3, 4) */ + u32 tx_dcm[htt_tx_peer_stats_num_dcm_counters]; + +}; + +#define htt_rx_peer_stats_num_mcs_counters 12 +#define htt_rx_peer_stats_num_gi_counters 4 +#define htt_rx_peer_stats_num_dcm_counters 5 +#define htt_rx_peer_stats_num_bw_counters 4 +#define htt_rx_peer_stats_num_spatial_streams 8 +#define htt_rx_peer_stats_num_preamble_types htt_stats_pream_count + +struct htt_rx_peer_rate_stats_tlv { + u32 nsts; + + /* number of rx ldpc packets */ + u32 rx_ldpc; + /* number of rx rts packets */ + u32 rts_cnt; + + u32 rssi_mgmt; /* units = db above noise floor */ + u32 rssi_data; /* units = db above noise floor */ + u32 rssi_comb; /* units = db above noise floor */ + u32 rx_mcs[htt_rx_peer_stats_num_mcs_counters]; + /* element 0,1, ...7 -> nss 1,2, ...8 */ + u32 rx_nss[htt_rx_peer_stats_num_spatial_streams]; + u32 rx_dcm[htt_rx_peer_stats_num_dcm_counters]; + u32 rx_stbc[htt_rx_peer_stats_num_mcs_counters]; + /* element 0: 20 mhz, 1: 40 mhz, 2: 80 mhz, 3: 160 and 80+80 mhz */ + u32 rx_bw[htt_rx_peer_stats_num_bw_counters]; + u32 rx_pream[htt_rx_peer_stats_num_preamble_types]; + /* units = db above noise floor */ + u8 rssi_chain[htt_rx_peer_stats_num_spatial_streams] + [htt_rx_peer_stats_num_bw_counters]; + + /* counters to track number of rx packets in each gi in each mcs (0-11) */ + u32 rx_gi[htt_rx_peer_stats_num_gi_counters] + [htt_rx_peer_stats_num_mcs_counters]; +}; + +enum htt_peer_stats_req_mode { + htt_peer_stats_req_mode_no_query, + htt_peer_stats_req_mode_query_tqm, + htt_peer_stats_req_mode_flush_tqm, +}; + +enum htt_peer_stats_tlv_enum { + htt_peer_stats_cmn_tlv = 0, + htt_peer_details_tlv = 1, + htt_tx_peer_rate_stats_tlv = 2, + htt_rx_peer_rate_stats_tlv = 3, + htt_tx_tid_stats_tlv = 4, + htt_rx_tid_stats_tlv = 5, + htt_msdu_flow_stats_tlv = 6, + + htt_peer_stats_max_tlv = 31, +}; + +/* =========== mumimo hwq stats =========== */ +/* mu mimo stats per hwq */ +struct htt_tx_hwq_mu_mimo_sch_stats_tlv { + u32 mu_mimo_sch_posted; + u32 mu_mimo_sch_failed; + u32 mu_mimo_ppdu_posted; +}; + +struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv { + u32 mu_mimo_mpdus_queued_usr; + u32 mu_mimo_mpdus_tried_usr; + u32 mu_mimo_mpdus_failed_usr; + u32 mu_mimo_mpdus_requeued_usr; + u32 mu_mimo_err_no_ba_usr; + u32 mu_mimo_mpdu_underrun_usr; + u32 mu_mimo_ampdu_underrun_usr; +}; + +struct htt_tx_hwq_mu_mimo_cmn_stats_tlv { + u32 mac_id__hwq_id__word; +}; + +/* == tx hwq stats == */ +struct htt_tx_hwq_stats_cmn_tlv { + u32 mac_id__hwq_id__word; + + /* ppdu level stats */ + u32 xretry; + u32 underrun_cnt; + u32 flush_cnt; + u32 filt_cnt; + u32 null_mpdu_bmap; + u32 user_ack_failure; + u32 ack_tlv_proc; + u32 sched_id_proc; + u32 null_mpdu_tx_count; + u32 mpdu_bmap_not_recvd; + + /* selfgen stats per hwq */ + u32 num_bar; + u32 rts; + u32 cts2self; + u32 qos_null; + + /* mpdu level stats */ + u32 mpdu_tried_cnt; + u32 mpdu_queued_cnt; + u32 mpdu_ack_fail_cnt; + u32 mpdu_filt_cnt; + u32 false_mpdu_ack_count; + + u32 txq_timeout; +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_hwq_difs_latency_stats_tlv_v { + u32 hist_intvl; + /* histogram of ppdu post to hwsch - > cmd status received */ + u32 difs_latency_hist[0]; /* htt_tx_hwq_max_difs_latency_bins */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_hwq_cmd_result_stats_tlv_v { + /* histogram of sched cmd result */ + u32 cmd_result[0]; /* htt_tx_hwq_max_cmd_result_stats */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_hwq_cmd_stall_stats_tlv_v { + /* histogram of various pause conitions */ + u32 cmd_stall_status[0]; /* htt_tx_hwq_max_cmd_stall_stats */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_hwq_fes_result_stats_tlv_v { + /* histogram of number of user fes result */ + u32 fes_result[0]; /* htt_tx_hwq_max_fes_result_stats */ +}; + +/* note: variable length tlv, use length spec to infer array size + * + * the hwq_tried_mpdu_cnt_hist is a histogram of mpdus tries per hwq. + * the tries here is the count of the mpdus within a ppdu that the hw + * had attempted to transmit on air, for the hwsch schedule command + * submitted by fw in this hwq .it is not the retry attempts. the + * histogram bins are 0-29, 30-59, 60-89 and so on. the are 10 bins + * in this histogram. + * they are defined in fw using the following macros + * #define wal_max_tried_mpdu_cnt_histogram 9 + * #define wal_tried_mpdu_cnt_histogram_interval 30 + */ +struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v { + u32 hist_bin_size; + /* histogram of number of mpdus on tried mpdu */ + u32 tried_mpdu_cnt_hist[0]; /* htt_tx_hwq_tried_mpdu_cnt_hist */ +}; + +/* note: variable length tlv, use length spec to infer array size + * + * the txop_used_cnt_hist is the histogram of txop per burst. after + * completing the burst, we identify the txop used in the burst and + * incr the corresponding bin. + * each bin represents 1ms & we have 10 bins in this histogram. + * they are deined in fw using the following macros + * #define wal_max_txop_used_cnt_histogram 10 + * #define wal_txop_used_histogram_interval 1000 ( 1 ms ) + */ +struct htt_tx_hwq_txop_used_cnt_hist_tlv_v { + /* histogram of txop used cnt */ + u32 txop_used_cnt_hist[0]; /* htt_tx_hwq_txop_used_cnt_hist */ +}; + +/* == tx selfgen stats == */ +struct htt_tx_selfgen_cmn_stats_tlv { + u32 mac_id__word; + u32 su_bar; + u32 rts; + u32 cts2self; + u32 qos_null; + u32 delayed_bar_1; /* mu user 1 */ + u32 delayed_bar_2; /* mu user 2 */ + u32 delayed_bar_3; /* mu user 3 */ + u32 delayed_bar_4; /* mu user 4 */ + u32 delayed_bar_5; /* mu user 5 */ + u32 delayed_bar_6; /* mu user 6 */ + u32 delayed_bar_7; /* mu user 7 */ +}; + +struct htt_tx_selfgen_ac_stats_tlv { + /* 11ac */ + u32 ac_su_ndpa; + u32 ac_su_ndp; + u32 ac_mu_mimo_ndpa; + u32 ac_mu_mimo_ndp; + u32 ac_mu_mimo_brpoll_1; /* mu user 1 */ + u32 ac_mu_mimo_brpoll_2; /* mu user 2 */ + u32 ac_mu_mimo_brpoll_3; /* mu user 3 */ +}; + +struct htt_tx_selfgen_ax_stats_tlv { + /* 11ax */ + u32 ax_su_ndpa; + u32 ax_su_ndp; + u32 ax_mu_mimo_ndpa; + u32 ax_mu_mimo_ndp; + u32 ax_mu_mimo_brpoll_1; /* mu user 1 */ + u32 ax_mu_mimo_brpoll_2; /* mu user 2 */ + u32 ax_mu_mimo_brpoll_3; /* mu user 3 */ + u32 ax_mu_mimo_brpoll_4; /* mu user 4 */ + u32 ax_mu_mimo_brpoll_5; /* mu user 5 */ + u32 ax_mu_mimo_brpoll_6; /* mu user 6 */ + u32 ax_mu_mimo_brpoll_7; /* mu user 7 */ + u32 ax_basic_trigger; + u32 ax_bsr_trigger; + u32 ax_mu_bar_trigger; + u32 ax_mu_rts_trigger; +}; + +struct htt_tx_selfgen_ac_err_stats_tlv { + /* 11ac error stats */ + u32 ac_su_ndp_err; + u32 ac_su_ndpa_err; + u32 ac_mu_mimo_ndpa_err; + u32 ac_mu_mimo_ndp_err; + u32 ac_mu_mimo_brp1_err; + u32 ac_mu_mimo_brp2_err; + u32 ac_mu_mimo_brp3_err; +}; + +struct htt_tx_selfgen_ax_err_stats_tlv { + /* 11ax error stats */ + u32 ax_su_ndp_err; + u32 ax_su_ndpa_err; + u32 ax_mu_mimo_ndpa_err; + u32 ax_mu_mimo_ndp_err; + u32 ax_mu_mimo_brp1_err; + u32 ax_mu_mimo_brp2_err; + u32 ax_mu_mimo_brp3_err; + u32 ax_mu_mimo_brp4_err; + u32 ax_mu_mimo_brp5_err; + u32 ax_mu_mimo_brp6_err; + u32 ax_mu_mimo_brp7_err; + u32 ax_basic_trigger_err; + u32 ax_bsr_trigger_err; + u32 ax_mu_bar_trigger_err; + u32 ax_mu_rts_trigger_err; +}; + +/* == tx mu stats == */ +#define htt_tx_pdev_stats_num_ac_mumimo_user_stats 4 +#define htt_tx_pdev_stats_num_ax_mumimo_user_stats 8 +#define htt_tx_pdev_stats_num_ofdma_user_stats 74 + +struct htt_tx_pdev_mu_mimo_sch_stats_tlv { + /* mu-mimo sw sched cmd stats */ + u32 mu_mimo_sch_posted; + u32 mu_mimo_sch_failed; + /* mu ppdu stats per hwq */ + u32 mu_mimo_ppdu_posted; + /* + * counts the number of users in each transmission of + * the given tx mode. + * + * index is the number of users - 1. + */ + u32 ac_mu_mimo_sch_nusers[htt_tx_pdev_stats_num_ac_mumimo_user_stats]; + u32 ax_mu_mimo_sch_nusers[htt_tx_pdev_stats_num_ax_mumimo_user_stats]; + u32 ax_ofdma_sch_nusers[htt_tx_pdev_stats_num_ofdma_user_stats]; +}; + +struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv { + u32 mu_mimo_mpdus_queued_usr; + u32 mu_mimo_mpdus_tried_usr; + u32 mu_mimo_mpdus_failed_usr; + u32 mu_mimo_mpdus_requeued_usr; + u32 mu_mimo_err_no_ba_usr; + u32 mu_mimo_mpdu_underrun_usr; + u32 mu_mimo_ampdu_underrun_usr; + + u32 ax_mu_mimo_mpdus_queued_usr; + u32 ax_mu_mimo_mpdus_tried_usr; + u32 ax_mu_mimo_mpdus_failed_usr; + u32 ax_mu_mimo_mpdus_requeued_usr; + u32 ax_mu_mimo_err_no_ba_usr; + u32 ax_mu_mimo_mpdu_underrun_usr; + u32 ax_mu_mimo_ampdu_underrun_usr; + + u32 ax_ofdma_mpdus_queued_usr; + u32 ax_ofdma_mpdus_tried_usr; + u32 ax_ofdma_mpdus_failed_usr; + u32 ax_ofdma_mpdus_requeued_usr; + u32 ax_ofdma_err_no_ba_usr; + u32 ax_ofdma_mpdu_underrun_usr; + u32 ax_ofdma_ampdu_underrun_usr; +}; + +#define htt_stats_tx_sched_mode_mu_mimo_ac 1 +#define htt_stats_tx_sched_mode_mu_mimo_ax 2 +#define htt_stats_tx_sched_mode_mu_ofdma_ax 3 + +struct htt_tx_pdev_mpdu_stats_tlv { + /* mpdu level stats */ + u32 mpdus_queued_usr; + u32 mpdus_tried_usr; + u32 mpdus_failed_usr; + u32 mpdus_requeued_usr; + u32 err_no_ba_usr; + u32 mpdu_underrun_usr; + u32 ampdu_underrun_usr; + u32 user_index; + u32 tx_sched_mode; /* htt_stats_tx_sched_mode_xxx */ +}; + +/* == tx sched stats == */ +/* note: variable length tlv, use length spec to infer array size */ +struct htt_sched_txq_cmd_posted_tlv_v { + u32 sched_cmd_posted[0]; /* htt_tx_pdev_sched_tx_mode_max */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_sched_txq_cmd_reaped_tlv_v { + u32 sched_cmd_reaped[0]; /* htt_tx_pdev_sched_tx_mode_max */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_sched_txq_sched_order_su_tlv_v { + u32 sched_order_su[0]; /* htt_tx_pdev_num_sched_order_log */ +}; + +enum htt_sched_txq_sched_ineligibility_tlv_enum { + htt_sched_tid_skip_sched_mask_disabled = 0, + htt_sched_tid_skip_notify_mpdu, + htt_sched_tid_skip_mpdu_state_invalid, + htt_sched_tid_skip_sched_disabled, + htt_sched_tid_skip_tqm_bypass_cmd_pending, + htt_sched_tid_skip_second_su_schedule, + + htt_sched_tid_skip_cmd_slot_not_avail, + htt_sched_tid_skip_no_enq, + htt_sched_tid_skip_low_enq, + htt_sched_tid_skip_paused, + htt_sched_tid_skip_ul, + htt_sched_tid_remove_paused, + htt_sched_tid_remove_no_enq, + htt_sched_tid_remove_ul, + htt_sched_tid_query, + htt_sched_tid_su_only, + htt_sched_tid_eligible, + htt_sched_ineligibility_max, +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_sched_txq_sched_ineligibility_tlv_v { + /* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */ + u32 sched_ineligibility[0]; +}; + +struct htt_tx_pdev_stats_sched_per_txq_tlv { + u32 mac_id__txq_id__word; + u32 sched_policy; + u32 last_sched_cmd_posted_timestamp; + u32 last_sched_cmd_compl_timestamp; + u32 sched_2_tac_lwm_count; + u32 sched_2_tac_ring_full; + u32 sched_cmd_post_failure; + u32 num_active_tids; + u32 num_ps_schedules; + u32 sched_cmds_pending; + u32 num_tid_register; + u32 num_tid_unregister; + u32 num_qstats_queried; + u32 qstats_update_pending; + u32 last_qstats_query_timestamp; + u32 num_tqm_cmdq_full; + u32 num_de_sched_algo_trigger; + u32 num_rt_sched_algo_trigger; + u32 num_tqm_sched_algo_trigger; + u32 notify_sched; + u32 dur_based_sendn_term; +}; + +struct htt_stats_tx_sched_cmn_tlv { + /* bit [ 7 : 0] :- mac_id + * bit [31 : 8] :- reserved + */ + u32 mac_id__word; + /* current timestamp */ + u32 current_timestamp; +}; + +/* == tqm stats == */ +#define htt_tx_tqm_max_gen_mpdu_end_reason 16 +#define htt_tx_tqm_max_list_mpdu_end_reason 16 +#define htt_tx_tqm_max_list_mpdu_cnt_histogram_bins 16 + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_tqm_gen_mpdu_stats_tlv_v { + u32 gen_mpdu_end_reason[0]; /* htt_tx_tqm_max_gen_mpdu_end_reason */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_tqm_list_mpdu_stats_tlv_v { + u32 list_mpdu_end_reason[0]; /* htt_tx_tqm_max_list_mpdu_end_reason */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_tx_tqm_list_mpdu_cnt_tlv_v { + u32 list_mpdu_cnt_hist[0]; + /* htt_tx_tqm_max_list_mpdu_cnt_histogram_bins */ +}; + +struct htt_tx_tqm_pdev_stats_tlv_v { + u32 msdu_count; + u32 mpdu_count; + u32 remove_msdu; + u32 remove_mpdu; + u32 remove_msdu_ttl; + u32 send_bar; + u32 bar_sync; + u32 notify_mpdu; + u32 sync_cmd; + u32 write_cmd; + u32 hwsch_trigger; + u32 ack_tlv_proc; + u32 gen_mpdu_cmd; + u32 gen_list_cmd; + u32 remove_mpdu_cmd; + u32 remove_mpdu_tried_cmd; + u32 mpdu_queue_stats_cmd; + u32 mpdu_head_info_cmd; + u32 msdu_flow_stats_cmd; + u32 remove_msdu_cmd; + u32 remove_msdu_ttl_cmd; + u32 flush_cache_cmd; + u32 update_mpduq_cmd; + u32 enqueue; + u32 enqueue_notify; + u32 notify_mpdu_at_head; + u32 notify_mpdu_state_valid; + /* + * on receiving tqm_flow_not_empty_status from tqm, (on msdus being enqueued + * the flow is non empty), if the number of msdus is greater than the threshold, + * notify is incremented. udp_thresh counters are for udp msdus, and nonudp are + * for non-udp msdus. + * msduq_swnotify_udp_thresh1 threshold - sched_udp_notify1 is incremented + * msduq_swnotify_udp_thresh2 threshold - sched_udp_notify2 is incremented + * msduq_swnotify_nonudp_thresh1 threshold - sched_nonudp_notify1 is incremented + * msduq_swnotify_nonudp_thresh2 threshold - sched_nonudp_notify2 is incremented + * + * notify signifies that we trigger the scheduler. + */ + u32 sched_udp_notify1; + u32 sched_udp_notify2; + u32 sched_nonudp_notify1; + u32 sched_nonudp_notify2; +}; + +struct htt_tx_tqm_cmn_stats_tlv { + u32 mac_id__word; + u32 max_cmdq_id; + u32 list_mpdu_cnt_hist_intvl; + + /* global stats */ + u32 add_msdu; + u32 q_empty; + u32 q_not_empty; + u32 drop_notification; + u32 desc_threshold; +}; + +struct htt_tx_tqm_error_stats_tlv { + /* error stats */ + u32 q_empty_failure; + u32 q_not_empty_failure; + u32 add_msdu_failure; +}; + +/* == tqm cmdq stats == */ +struct htt_tx_tqm_cmdq_status_tlv { + u32 mac_id__cmdq_id__word; + u32 sync_cmd; + u32 write_cmd; + u32 gen_mpdu_cmd; + u32 mpdu_queue_stats_cmd; + u32 mpdu_head_info_cmd; + u32 msdu_flow_stats_cmd; + u32 remove_mpdu_cmd; + u32 remove_msdu_cmd; + u32 flush_cache_cmd; + u32 update_mpduq_cmd; + u32 update_msduq_cmd; +}; + +/* == tx-de stats == */ +/* structures for tx de stats */ +struct htt_tx_de_eapol_packets_stats_tlv { + u32 m1_packets; + u32 m2_packets; + u32 m3_packets; + u32 m4_packets; + u32 g1_packets; + u32 g2_packets; +}; + +struct htt_tx_de_classify_failed_stats_tlv { + u32 ap_bss_peer_not_found; + u32 ap_bcast_mcast_no_peer; + u32 sta_delete_in_progress; + u32 ibss_no_bss_peer; + u32 invalid_vdev_type; + u32 invalid_ast_peer_entry; + u32 peer_entry_invalid; + u32 ethertype_not_ip; + u32 eapol_lookup_failed; + u32 qpeer_not_allow_data; + u32 fse_tid_override; + u32 ipv6_jumbogram_zero_length; + u32 qos_to_non_qos_in_prog; +}; + +struct htt_tx_de_classify_stats_tlv { + u32 arp_packets; + u32 igmp_packets; + u32 dhcp_packets; + u32 host_inspected; + u32 htt_included; + u32 htt_valid_mcs; + u32 htt_valid_nss; + u32 htt_valid_preamble_type; + u32 htt_valid_chainmask; + u32 htt_valid_guard_interval; + u32 htt_valid_retries; + u32 htt_valid_bw_info; + u32 htt_valid_power; + u32 htt_valid_key_flags; + u32 htt_valid_no_encryption; + u32 fse_entry_count; + u32 fse_priority_be; + u32 fse_priority_high; + u32 fse_priority_low; + u32 fse_traffic_ptrn_be; + u32 fse_traffic_ptrn_over_sub; + u32 fse_traffic_ptrn_bursty; + u32 fse_traffic_ptrn_interactive; + u32 fse_traffic_ptrn_periodic; + u32 fse_hwqueue_alloc; + u32 fse_hwqueue_created; + u32 fse_hwqueue_send_to_host; + u32 mcast_entry; + u32 bcast_entry; + u32 htt_update_peer_cache; + u32 htt_learning_frame; + u32 fse_invalid_peer; + /* + * mec_notify is htt tx wbm multicast echo check notification + * from firmware to host. fw sends sa addresses to host for all + * multicast/broadcast packets received on sta side. + */ + u32 mec_notify; +}; + +struct htt_tx_de_classify_status_stats_tlv { + u32 eok; + u32 classify_done; + u32 lookup_failed; + u32 send_host_dhcp; + u32 send_host_mcast; + u32 send_host_unknown_dest; + u32 send_host; + u32 status_invalid; +}; + +struct htt_tx_de_enqueue_packets_stats_tlv { + u32 enqueued_pkts; + u32 to_tqm; + u32 to_tqm_bypass; +}; + +struct htt_tx_de_enqueue_discard_stats_tlv { + u32 discarded_pkts; + u32 local_frames; + u32 is_ext_msdu; +}; + +struct htt_tx_de_compl_stats_tlv { + u32 tcl_dummy_frame; + u32 tqm_dummy_frame; + u32 tqm_notify_frame; + u32 fw2wbm_enq; + u32 tqm_bypass_frame; +}; + +/* + * the htt_tx_de_fw2wbm_ring_full_hist_tlv is a histogram of time we waited + * for the fw2wbm ring buffer. we are requesting a buffer in fw2wbm release + * ring,which may fail, due to non availability of buffer. hence we sleep for + * 200us & again request for it. this is a histogram of time we wait, with + * bin of 200ms & there are 10 bin (2 seconds max) + * they are defined by the following macros in fw + * #define entries_per_bin_count 1000 // per bin 1000 * 200us = 200ms + * #define ring_full_bin_entries (wal_tx_de_fw2wbm_alloc_timeout_count / + * entries_per_bin_count) + */ +struct htt_tx_de_fw2wbm_ring_full_hist_tlv { + u32 fw2wbm_ring_full_hist[0]; +}; + +struct htt_tx_de_cmn_stats_tlv { + u32 mac_id__word; + + /* global stats */ + u32 tcl2fw_entry_count; + u32 not_to_fw; + u32 invalid_pdev_vdev_peer; + u32 tcl_res_invalid_addrx; + u32 wbm2fw_entry_count; + u32 invalid_pdev; +}; + +/* == ring-if stats == */ +#define htt_stats_low_wm_bins 5 +#define htt_stats_high_wm_bins 5 + +struct htt_ring_if_stats_tlv { + u32 base_addr; /* dword aligned base memory address of the ring */ + u32 elem_size; + u32 num_elems__prefetch_tail_idx; + u32 head_idx__tail_idx; + u32 shadow_head_idx__shadow_tail_idx; + u32 num_tail_incr; + u32 lwm_thresh__hwm_thresh; + u32 overrun_hit_count; + u32 underrun_hit_count; + u32 prod_blockwait_count; + u32 cons_blockwait_count; + u32 low_wm_hit_count[htt_stats_low_wm_bins]; + u32 high_wm_hit_count[htt_stats_high_wm_bins]; +}; + +struct htt_ring_if_cmn_tlv { + u32 mac_id__word; + u32 num_records; +}; + +/* == sfm stats == */ +/* note: variable length tlv, use length spec to infer array size */ +struct htt_sfm_client_user_tlv_v { + /* number of dwords used per user and per client */ + u32 dwords_used_by_user_n[0]; +}; + +struct htt_sfm_client_tlv { + /* client id */ + u32 client_id; + /* minimum number of buffers */ + u32 buf_min; + /* maximum number of buffers */ + u32 buf_max; + /* number of busy buffers */ + u32 buf_busy; + /* number of allocated buffers */ + u32 buf_alloc; + /* number of available/usable buffers */ + u32 buf_avail; + /* number of users */ + u32 num_users; +}; + +struct htt_sfm_cmn_tlv { + u32 mac_id__word; + /* indicates the total number of 128 byte buffers + * in the cmem that are available for buffer sharing + */ + u32 buf_total; + /* indicates for certain client or all the clients + * there is no dowrd saved in sfm, refer to sfm_r1_mem_empty + */ + u32 mem_empty; + /* deallocate_buffers, refer to register sfm_r0_deallocate_buffers */ + u32 deallocate_bufs; + /* number of records */ + u32 num_records; +}; + +/* == srng stats == */ +struct htt_sring_stats_tlv { + u32 mac_id__ring_id__arena__ep; + u32 base_addr_lsb; /* dword aligned base memory address of the ring */ + u32 base_addr_msb; + u32 ring_size; + u32 elem_size; + + u32 num_avail_words__num_valid_words; + u32 head_ptr__tail_ptr; + u32 consumer_empty__producer_full; + u32 prefetch_count__internal_tail_ptr; +}; + +struct htt_sring_cmn_tlv { + u32 num_records; +}; + +/* == pdev tx rate ctrl stats == */ +#define htt_tx_pdev_stats_num_mcs_counters 12 +#define htt_tx_pdev_stats_num_gi_counters 4 +#define htt_tx_pdev_stats_num_dcm_counters 5 +#define htt_tx_pdev_stats_num_bw_counters 4 +#define htt_tx_pdev_stats_num_spatial_streams 8 +#define htt_tx_pdev_stats_num_preamble_types htt_stats_pream_count +#define htt_tx_pdev_stats_num_legacy_cck_stats 4 +#define htt_tx_pdev_stats_num_legacy_ofdm_stats 8 +#define htt_tx_pdev_stats_num_ltf 4 + +#define htt_tx_num_of_sounding_stats_words \ + (htt_tx_pdev_stats_num_bw_counters * \ + htt_tx_pdev_stats_num_ax_mumimo_user_stats) + +struct htt_tx_pdev_rate_stats_tlv { + u32 mac_id__word; + u32 tx_ldpc; + u32 rts_cnt; + /* rssi value of last ack packet (units = db above noise floor) */ + u32 ack_rssi; + + u32 tx_mcs[htt_tx_pdev_stats_num_mcs_counters]; + + u32 tx_su_mcs[htt_tx_pdev_stats_num_mcs_counters]; + u32 tx_mu_mcs[htt_tx_pdev_stats_num_mcs_counters]; + + /* element 0,1, ...7 -> nss 1,2, ...8 */ + u32 tx_nss[htt_tx_pdev_stats_num_spatial_streams]; + /* element 0: 20 mhz, 1: 40 mhz, 2: 80 mhz, 3: 160 and 80+80 mhz */ + u32 tx_bw[htt_tx_pdev_stats_num_bw_counters]; + u32 tx_stbc[htt_tx_pdev_stats_num_mcs_counters]; + u32 tx_pream[htt_tx_pdev_stats_num_preamble_types]; + + /* counters to track number of tx packets + * in each gi (400us, 800us, 1600us & 3200us) in each mcs (0-11) + */ + u32 tx_gi[htt_tx_pdev_stats_num_gi_counters][htt_tx_pdev_stats_num_mcs_counters]; + + /* counters to track packets in dcm mcs (mcs 0, 1, 3, 4) */ + u32 tx_dcm[htt_tx_pdev_stats_num_dcm_counters]; + /* number of cts-acknowledged rts packets */ + u32 rts_success; + + /* + * counters for legacy 11a and 11b transmissions. + * + * the index corresponds to: + * + * cck: 0: 1 mbps, 1: 2 mbps, 2: 5.5 mbps, 3: 11 mbps + * + * ofdm: 0: 6 mbps, 1: 9 mbps, 2: 12 mbps, 3: 18 mbps, + * 4: 24 mbps, 5: 36 mbps, 6: 48 mbps, 7: 54 mbps + */ + u32 tx_legacy_cck_rate[htt_tx_pdev_stats_num_legacy_cck_stats]; + u32 tx_legacy_ofdm_rate[htt_tx_pdev_stats_num_legacy_ofdm_stats]; + + u32 ac_mu_mimo_tx_ldpc; + u32 ax_mu_mimo_tx_ldpc; + u32 ofdma_tx_ldpc; + + /* + * counters for 11ax he ltf selection during tx. + * + * the index corresponds to: + * + * 0: unused, 1: 1x ltf, 2: 2x ltf, 3: 4x ltf + */ + u32 tx_he_ltf[htt_tx_pdev_stats_num_ltf]; + + u32 ac_mu_mimo_tx_mcs[htt_tx_pdev_stats_num_mcs_counters]; + u32 ax_mu_mimo_tx_mcs[htt_tx_pdev_stats_num_mcs_counters]; + u32 ofdma_tx_mcs[htt_tx_pdev_stats_num_mcs_counters]; + + u32 ac_mu_mimo_tx_nss[htt_tx_pdev_stats_num_spatial_streams]; + u32 ax_mu_mimo_tx_nss[htt_tx_pdev_stats_num_spatial_streams]; + u32 ofdma_tx_nss[htt_tx_pdev_stats_num_spatial_streams]; + + u32 ac_mu_mimo_tx_bw[htt_tx_pdev_stats_num_bw_counters]; + u32 ax_mu_mimo_tx_bw[htt_tx_pdev_stats_num_bw_counters]; + u32 ofdma_tx_bw[htt_tx_pdev_stats_num_bw_counters]; + + u32 ac_mu_mimo_tx_gi[htt_tx_pdev_stats_num_gi_counters] + [htt_tx_pdev_stats_num_mcs_counters]; + u32 ax_mu_mimo_tx_gi[htt_tx_pdev_stats_num_gi_counters] + [htt_tx_pdev_stats_num_mcs_counters]; + u32 ofdma_tx_gi[htt_tx_pdev_stats_num_gi_counters] + [htt_tx_pdev_stats_num_mcs_counters]; +}; + +/* == pdev rx rate ctrl stats == */ +#define htt_rx_pdev_stats_num_legacy_cck_stats 4 +#define htt_rx_pdev_stats_num_legacy_ofdm_stats 8 +#define htt_rx_pdev_stats_num_mcs_counters 12 +#define htt_rx_pdev_stats_num_gi_counters 4 +#define htt_rx_pdev_stats_num_dcm_counters 5 +#define htt_rx_pdev_stats_num_bw_counters 4 +#define htt_rx_pdev_stats_num_spatial_streams 8 +#define htt_rx_pdev_stats_num_preamble_types htt_stats_pream_count + +struct htt_rx_pdev_rate_stats_tlv { + u32 mac_id__word; + u32 nsts; + + u32 rx_ldpc; + u32 rts_cnt; + + u32 rssi_mgmt; /* units = db above noise floor */ + u32 rssi_data; /* units = db above noise floor */ + u32 rssi_comb; /* units = db above noise floor */ + u32 rx_mcs[htt_rx_pdev_stats_num_mcs_counters]; + /* element 0,1, ...7 -> nss 1,2, ...8 */ + u32 rx_nss[htt_rx_pdev_stats_num_spatial_streams]; + u32 rx_dcm[htt_rx_pdev_stats_num_dcm_counters]; + u32 rx_stbc[htt_rx_pdev_stats_num_mcs_counters]; + /* element 0: 20 mhz, 1: 40 mhz, 2: 80 mhz, 3: 160 and 80+80 mhz */ + u32 rx_bw[htt_rx_pdev_stats_num_bw_counters]; + u32 rx_pream[htt_rx_pdev_stats_num_preamble_types]; + u8 rssi_chain[htt_rx_pdev_stats_num_spatial_streams] + [htt_rx_pdev_stats_num_bw_counters]; + /* units = db above noise floor */ + + /* counters to track number of rx packets + * in each gi in each mcs (0-11) + */ + u32 rx_gi[htt_rx_pdev_stats_num_gi_counters][htt_rx_pdev_stats_num_mcs_counters]; + s32 rssi_in_dbm; /* rx signal strength value in dbm unit */ + + u32 rx_11ax_su_ext; + u32 rx_11ac_mumimo; + u32 rx_11ax_mumimo; + u32 rx_11ax_ofdma; + u32 txbf; + u32 rx_legacy_cck_rate[htt_rx_pdev_stats_num_legacy_cck_stats]; + u32 rx_legacy_ofdm_rate[htt_rx_pdev_stats_num_legacy_ofdm_stats]; + u32 rx_active_dur_us_low; + u32 rx_active_dur_us_high; +}; + +/* == rx pdev/soc stats == */ +struct htt_rx_soc_fw_stats_tlv { + u32 fw_reo_ring_data_msdu; + u32 fw_to_host_data_msdu_bcmc; + u32 fw_to_host_data_msdu_uc; + u32 ofld_remote_data_buf_recycle_cnt; + u32 ofld_remote_free_buf_indication_cnt; + + u32 ofld_buf_to_host_data_msdu_uc; + u32 reo_fw_ring_to_host_data_msdu_uc; + + u32 wbm_sw_ring_reap; + u32 wbm_forward_to_host_cnt; + u32 wbm_target_recycle_cnt; + + u32 target_refill_ring_recycle_cnt; +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_rx_soc_fw_refill_ring_empty_tlv_v { + u32 refill_ring_empty_cnt[0]; /* htt_rx_stats_refill_max_ring */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v { + u32 refill_ring_num_refill[0]; /* htt_rx_stats_refill_max_ring */ +}; + +/* rxdma error code from wbm released packets */ +enum htt_rx_rxdma_error_code_enum { + htt_rx_rxdma_overflow_err = 0, + htt_rx_rxdma_mpdu_length_err = 1, + htt_rx_rxdma_fcs_err = 2, + htt_rx_rxdma_decrypt_err = 3, + htt_rx_rxdma_tkip_mic_err = 4, + htt_rx_rxdma_unecrypted_err = 5, + htt_rx_rxdma_msdu_len_err = 6, + htt_rx_rxdma_msdu_limit_err = 7, + htt_rx_rxdma_wifi_parse_err = 8, + htt_rx_rxdma_amsdu_parse_err = 9, + htt_rx_rxdma_sa_timeout_err = 10, + htt_rx_rxdma_da_timeout_err = 11, + htt_rx_rxdma_flow_timeout_err = 12, + htt_rx_rxdma_flush_request = 13, + htt_rx_rxdma_err_code_rvsd0 = 14, + htt_rx_rxdma_err_code_rvsd1 = 15, + + /* this max_err_code should not be used in any host/target messages, + * so that even though it is defined within a host/target interface + * definition header file, it isn't actually part of the host/target + * interface, and thus can be modified. + */ + htt_rx_rxdma_max_err_code +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v { + u32 rxdma_err[0]; /* htt_rx_rxdma_max_err_code */ +}; + +/* reo error code from wbm released packets */ +enum htt_rx_reo_error_code_enum { + htt_rx_reo_queue_desc_addr_zero = 0, + htt_rx_reo_queue_desc_not_valid = 1, + htt_rx_ampdu_in_non_ba = 2, + htt_rx_non_ba_duplicate = 3, + htt_rx_ba_duplicate = 4, + htt_rx_regular_frame_2k_jump = 5, + htt_rx_bar_frame_2k_jump = 6, + htt_rx_regular_frame_oor = 7, + htt_rx_bar_frame_oor = 8, + htt_rx_bar_frame_no_ba_session = 9, + htt_rx_bar_frame_sn_equals_ssn = 10, + htt_rx_pn_check_failed = 11, + htt_rx_2k_error_handling_flag_set = 12, + htt_rx_pn_error_handling_flag_set = 13, + htt_rx_queue_descriptor_blocked_set = 14, + htt_rx_reo_err_code_rvsd = 15, + + /* this max_err_code should not be used in any host/target messages, + * so that even though it is defined within a host/target interface + * definition header file, it isn't actually part of the host/target + * interface, and thus can be modified. + */ + htt_rx_reo_max_err_code +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v { + u32 reo_err[0]; /* htt_rx_reo_max_err_code */ +}; + +/* == rx pdev stats == */ +#define htt_stats_subtype_max 16 + +struct htt_rx_pdev_fw_stats_tlv { + u32 mac_id__word; + u32 ppdu_recvd; + u32 mpdu_cnt_fcs_ok; + u32 mpdu_cnt_fcs_err; + u32 tcp_msdu_cnt; + u32 tcp_ack_msdu_cnt; + u32 udp_msdu_cnt; + u32 other_msdu_cnt; + u32 fw_ring_mpdu_ind; + u32 fw_ring_mgmt_subtype[htt_stats_subtype_max]; + u32 fw_ring_ctrl_subtype[htt_stats_subtype_max]; + u32 fw_ring_mcast_data_msdu; + u32 fw_ring_bcast_data_msdu; + u32 fw_ring_ucast_data_msdu; + u32 fw_ring_null_data_msdu; + u32 fw_ring_mpdu_drop; + u32 ofld_local_data_ind_cnt; + u32 ofld_local_data_buf_recycle_cnt; + u32 drx_local_data_ind_cnt; + u32 drx_local_data_buf_recycle_cnt; + u32 local_nondata_ind_cnt; + u32 local_nondata_buf_recycle_cnt; + + u32 fw_status_buf_ring_refill_cnt; + u32 fw_status_buf_ring_empty_cnt; + u32 fw_pkt_buf_ring_refill_cnt; + u32 fw_pkt_buf_ring_empty_cnt; + u32 fw_link_buf_ring_refill_cnt; + u32 fw_link_buf_ring_empty_cnt; + + u32 host_pkt_buf_ring_refill_cnt; + u32 host_pkt_buf_ring_empty_cnt; + u32 mon_pkt_buf_ring_refill_cnt; + u32 mon_pkt_buf_ring_empty_cnt; + u32 mon_status_buf_ring_refill_cnt; + u32 mon_status_buf_ring_empty_cnt; + u32 mon_desc_buf_ring_refill_cnt; + u32 mon_desc_buf_ring_empty_cnt; + u32 mon_dest_ring_update_cnt; + u32 mon_dest_ring_full_cnt; + + u32 rx_suspend_cnt; + u32 rx_suspend_fail_cnt; + u32 rx_resume_cnt; + u32 rx_resume_fail_cnt; + u32 rx_ring_switch_cnt; + u32 rx_ring_restore_cnt; + u32 rx_flush_cnt; + u32 rx_recovery_reset_cnt; +}; + +#define htt_stats_phy_err_max 43 + +struct htt_rx_pdev_fw_stats_phy_err_tlv { + u32 mac_id__word; + u32 total_phy_err_cnt; + /* counts of different types of phy errs + * the mapping of phy error types to phy_err array elements is hw dependent. + * the only currently-supported mapping is shown below: + * + * 0 phyrx_err_phy_off reception aborted due to receiving a phy_off tlv + * 1 phyrx_err_synth_off + * 2 phyrx_err_ofdma_timing + * 3 phyrx_err_ofdma_signal_parity + * 4 phyrx_err_ofdma_rate_illegal + * 5 phyrx_err_ofdma_length_illegal + * 6 phyrx_err_ofdma_restart + * 7 phyrx_err_ofdma_service + * 8 phyrx_err_ppdu_ofdma_power_drop + * 9 phyrx_err_cck_blokker + * 10 phyrx_err_cck_timing + * 11 phyrx_err_cck_header_crc + * 12 phyrx_err_cck_rate_illegal + * 13 phyrx_err_cck_length_illegal + * 14 phyrx_err_cck_restart + * 15 phyrx_err_cck_service + * 16 phyrx_err_cck_power_drop + * 17 phyrx_err_ht_crc_err + * 18 phyrx_err_ht_length_illegal + * 19 phyrx_err_ht_rate_illegal + * 20 phyrx_err_ht_zlf + * 21 phyrx_err_false_radar_ext + * 22 phyrx_err_green_field + * 23 phyrx_err_bw_gt_dyn_bw + * 24 phyrx_err_leg_ht_mismatch + * 25 phyrx_err_vht_crc_error + * 26 phyrx_err_vht_siga_unsupported + * 27 phyrx_err_vht_lsig_len_invalid + * 28 phyrx_err_vht_ndp_or_zlf + * 29 phyrx_err_vht_nsym_lt_zero + * 30 phyrx_err_vht_rx_extra_symbol_mismatch + * 31 phyrx_err_vht_rx_skip_group_id0 + * 32 phyrx_err_vht_rx_skip_group_id1to62 + * 33 phyrx_err_vht_rx_skip_group_id63 + * 34 phyrx_err_ofdm_ldpc_decoder_disabled + * 35 phyrx_err_defer_nap + * 36 phyrx_err_fdomain_timeout + * 37 phyrx_err_lsig_rel_check + * 38 phyrx_err_bt_collision + * 39 phyrx_err_unsupported_mu_feedback + * 40 phyrx_err_ppdu_tx_interrupt_rx + * 41 phyrx_err_unsupported_cbf + * 42 phyrx_err_other + */ + u32 phy_err[htt_stats_phy_err_max]; +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v { + /* num error mpdu for each rxdma error type */ + u32 fw_ring_mpdu_err[0]; /* htt_rx_stats_rxdma_max_err */ +}; + +/* note: variable length tlv, use length spec to infer array size */ +struct htt_rx_pdev_fw_mpdu_drop_tlv_v { + /* num mpdu dropped */ + u32 fw_mpdu_drop[0]; /* htt_rx_stats_fw_drop_reason_max */ +}; + +#define htt_pdev_cca_stats_tx_frame_info_present (0x1) +#define htt_pdev_cca_stats_rx_frame_info_present (0x2) +#define htt_pdev_cca_stats_rx_clear_info_present (0x4) +#define htt_pdev_cca_stats_my_rx_frame_info_present (0x8) +#define htt_pdev_cca_stats_usec_cnt_info_present (0x10) +#define htt_pdev_cca_stats_med_rx_idle_info_present (0x20) +#define htt_pdev_cca_stats_med_tx_idle_global_info_present (0x40) +#define htt_pdev_cca_stats_cca_obbs_usec_info_present (0x80) + +struct htt_pdev_stats_cca_counters_tlv { + /* below values are obtained from the hw cycles counter registers */ + u32 tx_frame_usec; + u32 rx_frame_usec; + u32 rx_clear_usec; + u32 my_rx_frame_usec; + u32 usec_cnt; + u32 med_rx_idle_usec; + u32 med_tx_idle_global_usec; + u32 cca_obss_usec; +}; + +struct htt_pdev_cca_stats_hist_v1_tlv { + u32 chan_num; + /* num of cca records (num of htt_pdev_stats_cca_counters_tlv)*/ + u32 num_records; + u32 valid_cca_counters_bitmap; + u32 collection_interval; + + /* this will be followed by an array which contains the cca stats + * collected in the last n intervals, + * if the indication is for last n intervals cca stats. + * then the pdev_cca_stats[0] element contains the oldest cca stats + * and pdev_cca_stats[n-1] will have the most recent cca stats. + * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1]; + */ +}; + +struct htt_pdev_stats_twt_session_tlv { + u32 vdev_id; + struct htt_mac_addr peer_mac; + u32 flow_id_flags; + + /* twt_dialog_id_unavailable is used + * when twt session is not initiated by host + */ + u32 dialog_id; + u32 wake_dura_us; + u32 wake_intvl_us; + u32 sp_offset_us; +}; + +struct htt_pdev_stats_twt_sessions_tlv { + u32 pdev_id; + u32 num_sessions; + struct htt_pdev_stats_twt_session_tlv twt_session[0]; +}; + +enum htt_rx_reo_resource_sample_id_enum { + /* global link descriptor queued in reo */ + htt_rx_reo_resource_global_link_desc_count_0 = 0, + htt_rx_reo_resource_global_link_desc_count_1 = 1, + htt_rx_reo_resource_global_link_desc_count_2 = 2, + /*number of queue descriptors of this aging group */ + htt_rx_reo_resource_buffers_used_ac0 = 3, + htt_rx_reo_resource_buffers_used_ac1 = 4, + htt_rx_reo_resource_buffers_used_ac2 = 5, + htt_rx_reo_resource_buffers_used_ac3 = 6, + /* total number of msdus buffered in ac */ + htt_rx_reo_resource_aging_num_queues_ac0 = 7, + htt_rx_reo_resource_aging_num_queues_ac1 = 8, + htt_rx_reo_resource_aging_num_queues_ac2 = 9, + htt_rx_reo_resource_aging_num_queues_ac3 = 10, + + htt_rx_reo_resource_stats_max = 16 +}; + +struct htt_rx_reo_resource_stats_tlv_v { + /* variable based on the number of records. htt_rx_reo_resource_stats_max */ + u32 sample_id; + u32 total_max; + u32 total_avg; + u32 total_sample; + u32 non_zeros_avg; + u32 non_zeros_sample; + u32 last_non_zeros_max; + u32 last_non_zeros_min; + u32 last_non_zeros_avg; + u32 last_non_zeros_sample; +}; + +/* == tx sounding stats == */ + +enum htt_txbf_sound_steer_modes { + htt_implicit_txbf_steer_stats = 0, + htt_explicit_txbf_su_sifs_steer_stats = 1, + htt_explicit_txbf_su_rbo_steer_stats = 2, + htt_explicit_txbf_mu_sifs_steer_stats = 3, + htt_explicit_txbf_mu_rbo_steer_stats = 4, + htt_txbf_max_num_of_modes = 5 +}; + +enum htt_stats_sounding_tx_mode { + htt_tx_ac_sounding_mode = 0, + htt_tx_ax_sounding_mode = 1, +}; + +struct htt_tx_sounding_stats_tlv { + u32 tx_sounding_mode; /* htt_tx_xx_sounding_mode */ + /* counts number of soundings for all steering modes in each bw */ + u32 cbf_20[htt_txbf_max_num_of_modes]; + u32 cbf_40[htt_txbf_max_num_of_modes]; + u32 cbf_80[htt_txbf_max_num_of_modes]; + u32 cbf_160[htt_txbf_max_num_of_modes]; + /* + * the sounding array is a 2-d array stored as an 1-d array of + * u32. the stats for a particular user/bw combination is + * referenced with the following: + * + * sounding[(user* max_bw) + bw] + * + * ... where max_bw == 4 for 160mhz + */ + u32 sounding[htt_tx_num_of_sounding_stats_words]; +}; + +struct htt_pdev_obss_pd_stats_tlv { + u32 num_obss_tx_ppdu_success; + u32 num_obss_tx_ppdu_failure; +}; + +void ath11k_debug_htt_stats_init(struct ath11k *ar); +#endif diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include <linux/vmalloc.h> + +#include "core.h" +#include "peer.h" +#include "debug.h" + +void +ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta, + struct ath11k_per_peer_tx_stats *peer_stats, + u8 legacy_rate_idx) +{ + struct rate_info *txrate = &arsta->txrate; + struct ath11k_htt_tx_stats *tx_stats; + int gi, mcs, bw, nss; + + if (!arsta->tx_stats) + return; + + tx_stats = arsta->tx_stats; + gi = field_get(rate_info_flags_short_gi, arsta->txrate.flags); + mcs = txrate->mcs; + bw = txrate->bw; + nss = txrate->nss - 1; + +#define stats_op_fmt(name) tx_stats->stats[ath11k_stats_type_##name] + + if (txrate->flags & rate_info_flags_he_mcs) { + stats_op_fmt(succ).he[0][mcs] += peer_stats->succ_bytes; + stats_op_fmt(succ).he[1][mcs] += peer_stats->succ_pkts; + stats_op_fmt(fail).he[0][mcs] += peer_stats->failed_bytes; + stats_op_fmt(fail).he[1][mcs] += peer_stats->failed_pkts; + stats_op_fmt(retry).he[0][mcs] += peer_stats->retry_bytes; + stats_op_fmt(retry).he[1][mcs] += peer_stats->retry_pkts; + } else if (txrate->flags & rate_info_flags_vht_mcs) { + stats_op_fmt(succ).vht[0][mcs] += peer_stats->succ_bytes; + stats_op_fmt(succ).vht[1][mcs] += peer_stats->succ_pkts; + stats_op_fmt(fail).vht[0][mcs] += peer_stats->failed_bytes; + stats_op_fmt(fail).vht[1][mcs] += peer_stats->failed_pkts; + stats_op_fmt(retry).vht[0][mcs] += peer_stats->retry_bytes; + stats_op_fmt(retry).vht[1][mcs] += peer_stats->retry_pkts; + } else if (txrate->flags & rate_info_flags_mcs) { + stats_op_fmt(succ).ht[0][mcs] += peer_stats->succ_bytes; + stats_op_fmt(succ).ht[1][mcs] += peer_stats->succ_pkts; + stats_op_fmt(fail).ht[0][mcs] += peer_stats->failed_bytes; + stats_op_fmt(fail).ht[1][mcs] += peer_stats->failed_pkts; + stats_op_fmt(retry).ht[0][mcs] += peer_stats->retry_bytes; + stats_op_fmt(retry).ht[1][mcs] += peer_stats->retry_pkts; + } else { + mcs = legacy_rate_idx; + + stats_op_fmt(succ).legacy[0][mcs] += peer_stats->succ_bytes; + stats_op_fmt(succ).legacy[1][mcs] += peer_stats->succ_pkts; + stats_op_fmt(fail).legacy[0][mcs] += peer_stats->failed_bytes; + stats_op_fmt(fail).legacy[1][mcs] += peer_stats->failed_pkts; + stats_op_fmt(retry).legacy[0][mcs] += peer_stats->retry_bytes; + stats_op_fmt(retry).legacy[1][mcs] += peer_stats->retry_pkts; + } + + if (peer_stats->is_ampdu) { + tx_stats->ba_fails += peer_stats->ba_fails; + + if (txrate->flags & rate_info_flags_he_mcs) { + stats_op_fmt(ampdu).he[0][mcs] += + peer_stats->succ_bytes + peer_stats->retry_bytes; + stats_op_fmt(ampdu).he[1][mcs] += + peer_stats->succ_pkts + peer_stats->retry_pkts; + } else if (txrate->flags & rate_info_flags_mcs) { + stats_op_fmt(ampdu).ht[0][mcs] += + peer_stats->succ_bytes + peer_stats->retry_bytes; + stats_op_fmt(ampdu).ht[1][mcs] += + peer_stats->succ_pkts + peer_stats->retry_pkts; + } else { + stats_op_fmt(ampdu).vht[0][mcs] += + peer_stats->succ_bytes + peer_stats->retry_bytes; + stats_op_fmt(ampdu).vht[1][mcs] += + peer_stats->succ_pkts + peer_stats->retry_pkts; + } + stats_op_fmt(ampdu).bw[0][bw] += + peer_stats->succ_bytes + peer_stats->retry_bytes; + stats_op_fmt(ampdu).nss[0][nss] += + peer_stats->succ_bytes + peer_stats->retry_bytes; + stats_op_fmt(ampdu).gi[0][gi] += + peer_stats->succ_bytes + peer_stats->retry_bytes; + stats_op_fmt(ampdu).bw[1][bw] += + peer_stats->succ_pkts + peer_stats->retry_pkts; + stats_op_fmt(ampdu).nss[1][nss] += + peer_stats->succ_pkts + peer_stats->retry_pkts; + stats_op_fmt(ampdu).gi[1][gi] += + peer_stats->succ_pkts + peer_stats->retry_pkts; + } else { + tx_stats->ack_fails += peer_stats->ba_fails; + } + + stats_op_fmt(succ).bw[0][bw] += peer_stats->succ_bytes; + stats_op_fmt(succ).nss[0][nss] += peer_stats->succ_bytes; + stats_op_fmt(succ).gi[0][gi] += peer_stats->succ_bytes; + + stats_op_fmt(succ).bw[1][bw] += peer_stats->succ_pkts; + stats_op_fmt(succ).nss[1][nss] += peer_stats->succ_pkts; + stats_op_fmt(succ).gi[1][gi] += peer_stats->succ_pkts; + + stats_op_fmt(fail).bw[0][bw] += peer_stats->failed_bytes; + stats_op_fmt(fail).nss[0][nss] += peer_stats->failed_bytes; + stats_op_fmt(fail).gi[0][gi] += peer_stats->failed_bytes; + + stats_op_fmt(fail).bw[1][bw] += peer_stats->failed_pkts; + stats_op_fmt(fail).nss[1][nss] += peer_stats->failed_pkts; + stats_op_fmt(fail).gi[1][gi] += peer_stats->failed_pkts; + + stats_op_fmt(retry).bw[0][bw] += peer_stats->retry_bytes; + stats_op_fmt(retry).nss[0][nss] += peer_stats->retry_bytes; + stats_op_fmt(retry).gi[0][gi] += peer_stats->retry_bytes; + + stats_op_fmt(retry).bw[1][bw] += peer_stats->retry_pkts; + stats_op_fmt(retry).nss[1][nss] += peer_stats->retry_pkts; + stats_op_fmt(retry).gi[1][gi] += peer_stats->retry_pkts; + + tx_stats->tx_duration += peer_stats->duration; +} + +void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar, + struct sk_buff *msdu, + struct hal_tx_status *ts) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; + struct ath11k_peer *peer; + struct ath11k_sta *arsta; + struct ieee80211_sta *sta; + u16 rate; + u8 rate_idx; + int ret; + + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ts->peer_id); + if (!peer || !peer->sta) { + ath11k_warn(ab, "failed to find the peer "); + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + return; + } + + sta = peer->sta; + arsta = (struct ath11k_sta *)sta->drv_priv; + + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + + if (ts->pkt_type == hal_tx_rate_stats_pkt_type_11a || + ts->pkt_type == hal_tx_rate_stats_pkt_type_11b) { + ret = ath11k_mac_hw_ratecode_to_legacy_rate(ts->mcs, + ts->pkt_type, + &rate_idx, + &rate); + if (ret < 0) { + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + return; + } + arsta->txrate.legacy = rate; + } else if (ts->pkt_type == hal_tx_rate_stats_pkt_type_11n) { + if (ts->mcs > 7) { + ath11k_warn(ab, "invalid ht mcs index %d ", ts->mcs); + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + return; + } + + arsta->txrate.mcs = ts->mcs + 8 * (arsta->last_txrate.nss - 1); + arsta->txrate.flags = rate_info_flags_mcs; + if (ts->sgi) + arsta->txrate.flags |= rate_info_flags_short_gi; + } else if (ts->pkt_type == hal_tx_rate_stats_pkt_type_11ac) { + if (ts->mcs > 9) { + ath11k_warn(ab, "invalid vht mcs index %d ", ts->mcs); + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + return; + } + + arsta->txrate.mcs = ts->mcs; + arsta->txrate.flags = rate_info_flags_vht_mcs; + if (ts->sgi) + arsta->txrate.flags |= rate_info_flags_short_gi; + } else { + /*todo: update he rates */ + } + + arsta->txrate.nss = arsta->last_txrate.nss; + arsta->txrate.bw = ts->bw; + + ath11k_accumulate_per_peer_tx_stats(arsta, peer_stats, rate_idx); + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); +} + +static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k *ar = arsta->arvif->ar; + struct ath11k_htt_data_stats *stats; + static const char *str_name[ath11k_stats_type_max] = {"succ", "fail", + "retry", "ampdu"}; + static const char *str[ath11k_counter_type_max] = {"bytes", "packets"}; + int len = 0, i, j, k, retval = 0; + const int size = 2 * 4096; + char *buf; + + buf = kzalloc(size, gfp_kernel); + if (!buf) + return -enomem; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + for (k = 0; k < ath11k_stats_type_max; k++) { + for (j = 0; j < ath11k_counter_type_max; j++) { + stats = &arsta->tx_stats->stats[k]; + len += scnprintf(buf + len, size - len, "%s_%s ", + str_name[k], + str[j]); + len += scnprintf(buf + len, size - len, + " he mcs %s ", + str[j]); + for (i = 0; i < ath11k_he_mcs_num; i++) + len += scnprintf(buf + len, size - len, + " %llu ", + stats->he[j][i]); + len += scnprintf(buf + len, size - len, " "); + len += scnprintf(buf + len, size - len, + " vht mcs %s ", + str[j]); + for (i = 0; i < ath11k_vht_mcs_num; i++) + len += scnprintf(buf + len, size - len, + " %llu ", + stats->vht[j][i]); + len += scnprintf(buf + len, size - len, " "); + len += scnprintf(buf + len, size - len, " ht mcs %s ", + str[j]); + for (i = 0; i < ath11k_ht_mcs_num; i++) + len += scnprintf(buf + len, size - len, + " %llu ", stats->ht[j][i]); + len += scnprintf(buf + len, size - len, " "); + len += scnprintf(buf + len, size - len, + " bw %s (20,40,80,160 mhz) ", str[j]); + len += scnprintf(buf + len, size - len, + " %llu %llu %llu %llu ", + stats->bw[j][0], stats->bw[j][1], + stats->bw[j][2], stats->bw[j][3]); + len += scnprintf(buf + len, size - len, + " nss %s (1x1,2x2,3x3,4x4) ", str[j]); + len += scnprintf(buf + len, size - len, + " %llu %llu %llu %llu ", + stats->nss[j][0], stats->nss[j][1], + stats->nss[j][2], stats->nss[j][3]); + len += scnprintf(buf + len, size - len, + " gi %s (0.4us,0.8us,1.6us,3.2us) ", + str[j]); + len += scnprintf(buf + len, size - len, + " %llu %llu %llu %llu ", + stats->gi[j][0], stats->gi[j][1], + stats->gi[j][2], stats->gi[j][3]); + len += scnprintf(buf + len, size - len, + " legacy rate %s (1,2 ... mbps) ", + str[j]); + for (i = 0; i < ath11k_legacy_num; i++) + len += scnprintf(buf + len, size - len, "%llu ", + stats->legacy[j][i]); + len += scnprintf(buf + len, size - len, " "); + } + } + + len += scnprintf(buf + len, size - len, + " tx duration %llu usecs ", + arsta->tx_stats->tx_duration); + len += scnprintf(buf + len, size - len, + "ba fails %llu ", arsta->tx_stats->ba_fails); + len += scnprintf(buf + len, size - len, + "ack fails %llu ", arsta->tx_stats->ack_fails); + spin_unlock_bh(&ar->data_lock); + + if (len > size) + len = size; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + mutex_unlock(&ar->conf_mutex); + return retval; +} + +static const struct file_operations fops_tx_stats = { + .read = ath11k_dbg_sta_dump_tx_stats, + .open = simple_open, + .owner = this_module, + .llseek = default_llseek, +}; + +static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k *ar = arsta->arvif->ar; + struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; + int len = 0, i, retval = 0; + const int size = 4096; + char *buf; + + if (!rx_stats) + return -enoent; + + buf = kzalloc(size, gfp_kernel); + if (!buf) + return -enomem; + + mutex_lock(&ar->conf_mutex); + spin_lock_bh(&ar->ab->base_lock); + + len += scnprintf(buf + len, size - len, "rx peer stats: "); + len += scnprintf(buf + len, size - len, "num of msdus: %llu ", + rx_stats->num_msdu); + len += scnprintf(buf + len, size - len, "num of msdus with tcp l4: %llu ", + rx_stats->tcp_msdu_count); + len += scnprintf(buf + len, size - len, "num of msdus with udp l4: %llu ", + rx_stats->udp_msdu_count); + len += scnprintf(buf + len, size - len, "num of msdus part of ampdu: %llu ", + rx_stats->ampdu_msdu_count); + len += scnprintf(buf + len, size - len, "num of msdus not part of ampdu: %llu ", + rx_stats->non_ampdu_msdu_count); + len += scnprintf(buf + len, size - len, "num of msdus using stbc: %llu ", + rx_stats->stbc_count); + len += scnprintf(buf + len, size - len, "num of msdus beamformed: %llu ", + rx_stats->beamformed_count); + len += scnprintf(buf + len, size - len, "num of mpdus with fcs ok: %llu ", + rx_stats->num_mpdu_fcs_ok); + len += scnprintf(buf + len, size - len, "num of mpdus with fcs error: %llu ", + rx_stats->num_mpdu_fcs_err); + len += scnprintf(buf + len, size - len, + "gi: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu ", + rx_stats->gi_count[0], rx_stats->gi_count[1], + rx_stats->gi_count[2], rx_stats->gi_count[3]); + len += scnprintf(buf + len, size - len, + "bw: 20mhz %llu 40mhz %llu 80mhz %llu 160mhz %llu ", + rx_stats->bw_count[0], rx_stats->bw_count[1], + rx_stats->bw_count[2], rx_stats->bw_count[3]); + len += scnprintf(buf + len, size - len, "bcc %llu ldpc %llu ", + rx_stats->coding_count[0], rx_stats->coding_count[1]); + len += scnprintf(buf + len, size - len, + "preamble: 11a %llu 11b %llu 11n %llu 11ac %llu 11ax %llu ", + rx_stats->pream_cnt[0], rx_stats->pream_cnt[1], + rx_stats->pream_cnt[2], rx_stats->pream_cnt[3], + rx_stats->pream_cnt[4]); + len += scnprintf(buf + len, size - len, + "reception type: su %llu mu_mimo %llu mu_ofdma %llu mu_ofdma_mimo %llu ", + rx_stats->reception_type[0], rx_stats->reception_type[1], + rx_stats->reception_type[2], rx_stats->reception_type[3]); + len += scnprintf(buf + len, size - len, "tid(0-15) legacy tid(16):"); + for (i = 0; i <= ieee80211_num_tids; i++) + len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]); + len += scnprintf(buf + len, size - len, " mcs(0-11) legacy mcs(12):"); + for (i = 0; i < hal_rx_max_mcs + 1; i++) + len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]); + len += scnprintf(buf + len, size - len, " nss(1-8):"); + for (i = 0; i < hal_rx_max_nss; i++) + len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]); + len += scnprintf(buf + len, size - len, " rx duration:%llu ", + rx_stats->rx_duration); + len += scnprintf(buf + len, size - len, " "); + + spin_unlock_bh(&ar->ab->base_lock); + + if (len > size) + len = size; + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + mutex_unlock(&ar->conf_mutex); + return retval; +} + +static const struct file_operations fops_rx_stats = { + .read = ath11k_dbg_sta_dump_rx_stats, + .open = simple_open, + .owner = this_module, + .llseek = default_llseek, +}; + +static int +ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file) +{ + struct ieee80211_sta *sta = inode->i_private; + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k *ar = arsta->arvif->ar; + struct debug_htt_stats_req *stats_req; + int ret; + + stats_req = vzalloc(sizeof(*stats_req) + ath11k_htt_stats_buf_size); + if (!stats_req) + return -enomem; + + mutex_lock(&ar->conf_mutex); + ar->debug.htt_stats.stats_req = stats_req; + stats_req->type = ath11k_dbg_htt_ext_stats_peer_info; + memcpy(stats_req->peer_addr, sta->addr, eth_alen); + ret = ath11k_dbg_htt_stats_req(ar); + mutex_unlock(&ar->conf_mutex); + if (ret < 0) + goto out; + + file->private_data = stats_req; + return 0; +out: + vfree(stats_req); + return ret; +} + +static int +ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + return 0; +} + +static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct debug_htt_stats_req *stats_req = file->private_data; + char *buf; + u32 length = 0; + + buf = stats_req->buf; + length = min_t(u32, stats_req->buf_len, ath11k_htt_stats_buf_size); + return simple_read_from_buffer(user_buf, count, ppos, buf, length); +} + +static const struct file_operations fops_htt_peer_stats = { + .open = ath11k_dbg_sta_open_htt_peer_stats, + .release = ath11k_dbg_sta_release_htt_peer_stats, + .read = ath11k_dbg_sta_read_htt_peer_stats, + .owner = this_module, + .llseek = default_llseek, +}; + +static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k *ar = arsta->arvif->ar; + int ret, enable; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ath11k_state_on) { + ret = -enetdown; + goto out; + } + + ret = kstrtoint_from_user(buf, count, 0, &enable); + if (ret) + goto out; + + ar->debug.pktlog_peer_valid = enable; + memcpy(ar->debug.pktlog_peer_addr, sta->addr, eth_alen); + + /* send peer based pktlog enable/disable */ + ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable); + if (ret) { + ath11k_warn(ar->ab, "failed to set peer pktlog filter %pm: %d ", + sta->addr, ret); + goto out; + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, "peer pktlog filter set to %d ", + enable); + ret = count; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file, + char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k *ar = arsta->arvif->ar; + char buf[32] = {0}; + int len; + + mutex_lock(&ar->conf_mutex); + len = scnprintf(buf, sizeof(buf), "%08x %pm ", + ar->debug.pktlog_peer_valid, + ar->debug.pktlog_peer_addr); + mutex_unlock(&ar->conf_mutex); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_peer_pktlog = { + .write = ath11k_dbg_sta_write_peer_pktlog, + .read = ath11k_dbg_sta_read_peer_pktlog, + .open = simple_open, + .owner = this_module, + .llseek = default_llseek, +}; + +void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, struct dentry *dir) +{ + struct ath11k *ar = hw->priv; + + if (ath11k_debug_is_extd_tx_stats_enabled(ar)) + debugfs_create_file("tx_stats", 0400, dir, sta, + &fops_tx_stats); + if (ath11k_debug_is_extd_rx_stats_enabled(ar)) + debugfs_create_file("rx_stats", 0400, dir, sta, + &fops_rx_stats); + + debugfs_create_file("htt_peer_stats", 0400, dir, sta, + &fops_htt_peer_stats); + + debugfs_create_file("peer_pktlog", 0644, dir, sta, + &fops_peer_pktlog); +} diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/dp.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include <linux/kfifo.h> +#include "core.h" +#include "dp_tx.h" +#include "hal_tx.h" +#include "debug.h" +#include "dp_rx.h" +#include "peer.h" + +static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab, + struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_peer *peer; + + /* todo: any other peer specific dp cleanup */ + + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find(ab, vdev_id, addr); + if (!peer) { + ath11k_warn(ab, "failed to lookup peer %pm on vdev %d ", + addr, vdev_id); + spin_unlock_bh(&ab->base_lock); + return; + } + + ath11k_peer_rx_tid_cleanup(ar, peer); + spin_unlock_bh(&ab->base_lock); +} + +int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr) +{ + struct ath11k_base *ab = ar->ab; + u32 reo_dest; + int ret; + + /* note: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */ + reo_dest = ar->dp.mac_id + 1; + ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id, + wmi_peer_set_default_routing, + dp_rx_hash_enable | (reo_dest << 1)); + + if (ret) { + ath11k_warn(ab, "failed to set default routing %d peer :%pm vdev_id :%d ", + ret, addr, vdev_id); + return ret; + } + + ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, + hal_desc_reo_non_qos_tid, 1, 0); + if (ret) { + ath11k_warn(ab, "failed to setup rxd tid queue for non-qos tid %d ", + ret); + return ret; + } + + ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, 0, 1, 0); + if (ret) { + ath11k_warn(ab, "failed to setup rxd tid queue for tid 0 %d ", + ret); + return ret; + } + + /* todo: setup other peer specific resource used in data path */ + + return 0; +} + +void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) +{ + if (!ring->vaddr_unaligned) + return; + + dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, + ring->paddr_unaligned); + + ring->vaddr_unaligned = null; +} + +int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, + enum hal_ring_type type, int ring_num, + int mac_id, int num_entries) +{ + struct hal_srng_params params = { 0 }; + int entry_sz = ath11k_hal_srng_get_entrysize(type); + int max_entries = ath11k_hal_srng_get_max_entries(type); + int ret; + + if (max_entries < 0 || entry_sz < 0) + return -einval; + + if (num_entries > max_entries) + num_entries = max_entries; + + ring->size = (num_entries * entry_sz) + hal_ring_base_align - 1; + ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, + &ring->paddr_unaligned, + gfp_kernel); + if (!ring->vaddr_unaligned) + return -enomem; + + ring->vaddr = ptr_align(ring->vaddr_unaligned, hal_ring_base_align); + ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr - + (unsigned long)ring->vaddr_unaligned); + + params.ring_base_vaddr = ring->vaddr; + params.ring_base_paddr = ring->paddr; + params.num_entries = num_entries; + + switch (type) { + case hal_reo_dst: + params.intr_batch_cntr_thres_entries = + hal_srng_int_batch_threshold_rx; + params.intr_timer_thres_us = hal_srng_int_timer_threshold_rx; + break; + case hal_rxdma_buf: + case hal_rxdma_monitor_buf: + case hal_rxdma_monitor_status: + params.low_threshold = num_entries >> 3; + params.flags |= hal_srng_flags_low_thresh_intr_en; + params.intr_batch_cntr_thres_entries = 0; + params.intr_timer_thres_us = hal_srng_int_timer_threshold_rx; + break; + case hal_wbm2sw_release: + if (ring_num < 3) { + params.intr_batch_cntr_thres_entries = + hal_srng_int_batch_threshold_tx; + params.intr_timer_thres_us = + hal_srng_int_timer_threshold_tx; + break; + } + /* follow through when ring_num >= 3 */ + /* fall through */ + case hal_reo_exception: + case hal_reo_reinject: + case hal_reo_cmd: + case hal_reo_status: + case hal_tcl_data: + case hal_tcl_cmd: + case hal_tcl_status: + case hal_wbm_idle_link: + case hal_sw2wbm_release: + case hal_rxdma_dst: + case hal_rxdma_monitor_dst: + case hal_rxdma_monitor_desc: + case hal_rxdma_dir_buf: + params.intr_batch_cntr_thres_entries = + hal_srng_int_batch_threshold_other; + params.intr_timer_thres_us = hal_srng_int_timer_threshold_other; + break; + default: + ath11k_warn(ab, "not a valid ring type in dp :%d ", type); + return -einval; + } + + ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms); + if (ret < 0) { + ath11k_warn(ab, "failed to setup srng: %d ring_id %d ", + ret, ring_num); + return ret; + } + + ring->ring_id = ret; + + return 0; +} + +static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab) +{ + struct ath11k_dp *dp = &ab->dp; + int i; + + ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); + ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); + ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring); + for (i = 0; i < dp_tcl_num_ring_max; i++) { + ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); + ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); + } + ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring); + ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring); + ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring); + ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring); + ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring); +} + +static int ath11k_dp_srng_common_setup(struct ath11k_base *ab) +{ + struct ath11k_dp *dp = &ab->dp; + struct hal_srng *srng; + int i, ret; + + ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring, + hal_sw2wbm_release, 0, 0, + dp_wbm_release_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d ", + ret); + goto err; + } + + ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, hal_tcl_cmd, 0, 0, + dp_tcl_cmd_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up tcl_cmd ring :%d ", ret); + goto err; + } + + ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, hal_tcl_status, + 0, 0, dp_tcl_status_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up tcl_status ring :%d ", ret); + goto err; + } + + for (i = 0; i < dp_tcl_num_ring_max; i++) { + ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring, + hal_tcl_data, i, 0, + dp_tcl_data_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d ", + i, ret); + goto err; + } + + ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring, + hal_wbm2sw_release, i, 0, + dp_tx_comp_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up tcl_comp ring ring (%d) :%d ", + i, ret); + goto err; + } + + srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id]; + ath11k_hal_tx_init_data_ring(ab, srng); + } + + ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, hal_reo_reinject, + 0, 0, dp_reo_reinject_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up reo_reinject ring :%d ", + ret); + goto err; + } + + ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, hal_wbm2sw_release, + 3, 0, dp_rx_release_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up rx_rel ring :%d ", ret); + goto err; + } + + ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, hal_reo_exception, + 0, 0, dp_reo_exception_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up reo_exception ring :%d ", + ret); + goto err; + } + + ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, hal_reo_cmd, + 0, 0, dp_reo_cmd_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up reo_cmd ring :%d ", ret); + goto err; + } + + srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; + ath11k_hal_reo_init_cmd_ring(ab, srng); + + ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, hal_reo_status, + 0, 0, dp_reo_status_ring_size); + if (ret) { + ath11k_warn(ab, "failed to set up reo_status ring :%d ", ret); + goto err; + } + + ath11k_hal_reo_hw_setup(ab); + + return 0; + +err: + ath11k_dp_srng_common_cleanup(ab); + + return ret; +} + +static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab) +{ + struct ath11k_dp *dp = &ab->dp; + struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; + int i; + + for (i = 0; i < dp_idle_scatter_bufs_max; i++) { + if (!slist[i].vaddr) + continue; + + dma_free_coherent(ab->dev, hal_wbm_idle_scatter_buf_size_max, + slist[i].vaddr, slist[i].paddr); + slist[i].vaddr = null; + } +} + +static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab, + int size, + u32 n_link_desc_bank, + u32 n_link_desc, + u32 last_bank_sz) +{ + struct ath11k_dp *dp = &ab->dp; + struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks; + struct hal_wbm_idle_scatter_list *slist = dp->scatter_list; + u32 n_entries_per_buf; + int num_scatter_buf, scatter_idx; + struct hal_wbm_link_desc *scatter_buf; + int align_bytes, n_entries; + dma_addr_t paddr; + int rem_entries; + int i; + int ret = 0; + u32 end_offset; + + n_entries_per_buf = hal_wbm_idle_scatter_buf_size / + ath11k_hal_srng_get_entrysize(hal_wbm_idle_link); + num_scatter_buf = div_round_up(size, hal_wbm_idle_scatter_buf_size); + + if (num_scatter_buf > dp_idle_scatter_bufs_max) + return -einval; + + for (i = 0; i < num_scatter_buf; i++) { + slist[i].vaddr = dma_alloc_coherent(ab->dev, + hal_wbm_idle_scatter_buf_size_max, + &slist[i].paddr, gfp_kernel); + if (!slist[i].vaddr) { + ret = -enomem; + goto err; + } + } + + scatter_idx = 0; + scatter_buf = slist[scatter_idx].vaddr; + rem_entries = n_entries_per_buf; + + for (i = 0; i < n_link_desc_bank; i++) { + align_bytes = link_desc_banks[i].vaddr - + link_desc_banks[i].vaddr_unaligned; + n_entries = (dp_link_desc_alloc_size_thresh - align_bytes) / + hal_link_desc_size; + paddr = link_desc_banks[i].paddr; + while (n_entries) { + ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr); + n_entries--; + paddr += hal_link_desc_size; + if (rem_entries) { + rem_entries--; + scatter_buf++; + continue; + } + + rem_entries = n_entries_per_buf; + scatter_idx++; + scatter_buf = slist[scatter_idx].vaddr; + } + } + + end_offset = (scatter_buf - slist[scatter_idx].vaddr) * + sizeof(struct hal_wbm_link_desc); + ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf, + n_link_desc, end_offset); + + return 0; + +err: + ath11k_dp_scatter_idle_link_desc_cleanup(ab); + + return ret; +} + +static void +ath11k_dp_link_desc_bank_free(struct ath11k_base *ab, + struct dp_link_desc_bank *link_desc_banks) +{ + int i; + + for (i = 0; i < dp_link_desc_banks_max; i++) { + if (link_desc_banks[i].vaddr_unaligned) { + dma_free_coherent(ab->dev, + link_desc_banks[i].size, + link_desc_banks[i].vaddr_unaligned, + link_desc_banks[i].paddr_unaligned); + link_desc_banks[i].vaddr_unaligned = null; + } + } +} + +static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab, + struct dp_link_desc_bank *desc_bank, + int n_link_desc_bank, + int last_bank_sz) +{ + struct ath11k_dp *dp = &ab->dp; + int i; + int ret = 0; + int desc_sz = dp_link_desc_alloc_size_thresh; + + for (i = 0; i < n_link_desc_bank; i++) { + if (i == (n_link_desc_bank - 1) && last_bank_sz) + desc_sz = last_bank_sz; + + desc_bank[i].vaddr_unaligned = + dma_alloc_coherent(ab->dev, desc_sz, + &desc_bank[i].paddr_unaligned, + gfp_kernel); + if (!desc_bank[i].vaddr_unaligned) { + ret = -enomem; + goto err; + } + + desc_bank[i].vaddr = ptr_align(desc_bank[i].vaddr_unaligned, + hal_link_desc_align); + desc_bank[i].paddr = desc_bank[i].paddr_unaligned + + ((unsigned long)desc_bank[i].vaddr - + (unsigned long)desc_bank[i].vaddr_unaligned); + desc_bank[i].size = desc_sz; + } + + return 0; + +err: + ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks); + + return ret; +} + +void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, + struct dp_link_desc_bank *desc_bank, + u32 ring_type, struct dp_srng *ring) +{ + ath11k_dp_link_desc_bank_free(ab, desc_bank); + + if (ring_type != hal_rxdma_monitor_desc) { + ath11k_dp_srng_cleanup(ab, ring); + ath11k_dp_scatter_idle_link_desc_cleanup(ab); + } +} + +static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc) +{ + struct ath11k_dp *dp = &ab->dp; + u32 n_mpdu_link_desc, n_mpdu_queue_desc; + u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc; + int ret = 0; + + n_mpdu_link_desc = (dp_num_tids_max * dp_avg_mpdus_per_tid_max) / + hal_num_mpdus_per_link_desc; + + n_mpdu_queue_desc = n_mpdu_link_desc / + hal_num_mpdu_links_per_queue_desc; + + n_tx_msdu_link_desc = (dp_num_tids_max * dp_avg_flows_per_tid * + dp_avg_msdus_per_flow) / + hal_num_tx_msdus_per_link_desc; + + n_rx_msdu_link_desc = (dp_num_tids_max * dp_avg_mpdus_per_tid_max * + dp_avg_msdus_per_mpdu) / + hal_num_rx_msdus_per_link_desc; + + *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc + + n_tx_msdu_link_desc + n_rx_msdu_link_desc; + + ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring, + hal_wbm_idle_link, 0, 0, *n_link_desc); + if (ret) { + ath11k_warn(ab, "failed to setup wbm_idle_ring: %d ", ret); + return ret; + } + return ret; +} + +int ath11k_dp_link_desc_setup(struct ath11k_base *ab, + struct dp_link_desc_bank *link_desc_banks, + u32 ring_type, struct hal_srng *srng, + u32 n_link_desc) +{ + u32 tot_mem_sz; + u32 n_link_desc_bank, last_bank_sz; + u32 entry_sz, align_bytes, n_entries; + u32 paddr; + u32 *desc; + int i, ret; + + if (n_link_desc & (n_link_desc - 1)) + n_link_desc = 1 << fls(n_link_desc); + + tot_mem_sz = n_link_desc * hal_link_desc_size; + tot_mem_sz += hal_link_desc_align; + + if (tot_mem_sz <= dp_link_desc_alloc_size_thresh) { + n_link_desc_bank = 1; + last_bank_sz = tot_mem_sz; + } else { + n_link_desc_bank = tot_mem_sz / + (dp_link_desc_alloc_size_thresh - + hal_link_desc_align); + last_bank_sz = tot_mem_sz % + (dp_link_desc_alloc_size_thresh - + hal_link_desc_align); + + if (last_bank_sz) + n_link_desc_bank += 1; + } + + if (n_link_desc_bank > dp_link_desc_banks_max) + return -einval; + + ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks, + n_link_desc_bank, last_bank_sz); + if (ret) + return ret; + + /* setup link desc idle list for hw internal usage */ + entry_sz = ath11k_hal_srng_get_entrysize(ring_type); + tot_mem_sz = entry_sz * n_link_desc; + + /* setup scatter desc list when the total memory requirement is more */ + if (tot_mem_sz > dp_link_desc_alloc_size_thresh && + ring_type != hal_rxdma_monitor_desc) { + ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz, + n_link_desc_bank, + n_link_desc, + last_bank_sz); + if (ret) { + ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d ", + ret); + goto fail_desc_bank_free; + } + + return 0; + } + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + for (i = 0; i < n_link_desc_bank; i++) { + align_bytes = link_desc_banks[i].vaddr - + link_desc_banks[i].vaddr_unaligned; + n_entries = (link_desc_banks[i].size - align_bytes) / + hal_link_desc_size; + paddr = link_desc_banks[i].paddr; + while (n_entries && + (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) { + ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc, + i, paddr); + n_entries--; + paddr += hal_link_desc_size; + } + } + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return 0; + +fail_desc_bank_free: + ath11k_dp_link_desc_bank_free(ab, link_desc_banks); + + return ret; +} + +int ath11k_dp_service_srng(struct ath11k_base *ab, + struct ath11k_ext_irq_grp *irq_grp, + int budget) +{ + struct napi_struct *napi = &irq_grp->napi; + int grp_id = irq_grp->grp_id; + int work_done = 0; + int i = 0; + int tot_work_done = 0; + + while (ath11k_tx_ring_mask[grp_id] >> i) { + if (ath11k_tx_ring_mask[grp_id] & bit(i)) + ath11k_dp_tx_completion_handler(ab, i); + i++; + } + + if (ath11k_rx_err_ring_mask[grp_id]) { + work_done = ath11k_dp_process_rx_err(ab, napi, budget); + budget -= work_done; + tot_work_done += work_done; + if (budget <= 0) + goto done; + } + + if (ath11k_rx_wbm_rel_ring_mask[grp_id]) { + work_done = ath11k_dp_rx_process_wbm_err(ab, + napi, + budget); + budget -= work_done; + tot_work_done += work_done; + + if (budget <= 0) + goto done; + } + + if (ath11k_rx_ring_mask[grp_id]) { + for (i = 0; i < ab->num_radios; i++) { + if (ath11k_rx_ring_mask[grp_id] & bit(i)) { + work_done = ath11k_dp_process_rx(ab, i, napi, + &irq_grp->pending_q, + budget); + budget -= work_done; + tot_work_done += work_done; + } + if (budget <= 0) + goto done; + } + } + + if (rx_mon_status_ring_mask[grp_id]) { + for (i = 0; i < ab->num_radios; i++) { + if (rx_mon_status_ring_mask[grp_id] & bit(i)) { + work_done = + ath11k_dp_rx_process_mon_rings(ab, + i, napi, + budget); + budget -= work_done; + tot_work_done += work_done; + } + if (budget <= 0) + goto done; + } + } + + if (ath11k_reo_status_ring_mask[grp_id]) + ath11k_dp_process_reo_status(ab); + + for (i = 0; i < ab->num_radios; i++) { + if (ath11k_rxdma2host_ring_mask[grp_id] & bit(i)) { + work_done = ath11k_dp_process_rxdma_err(ab, i, budget); + budget -= work_done; + tot_work_done += work_done; + } + + if (budget <= 0) + goto done; + + if (ath11k_host2rxdma_ring_mask[grp_id] & bit(i)) { + struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + + ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0, + hal_rx_buf_rbm_sw3_bm, + gfp_atomic); + } + } + /* todo: implement handler for other interrupts */ + +done: + return tot_work_done; +} + +void ath11k_dp_pdev_free(struct ath11k_base *ab) +{ + struct ath11k *ar; + int i; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + ath11k_dp_rx_pdev_free(ab, i); + ath11k_debug_unregister(ar); + ath11k_dp_rx_pdev_mon_detach(ar); + } +} + +int ath11k_dp_pdev_alloc(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_pdev_dp *dp; + int ret; + int i; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + dp = &ar->dp; + dp->mac_id = i; + idr_init(&dp->rx_refill_buf_ring.bufs_idr); + spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); + atomic_set(&dp->num_tx_pending, 0); + init_waitqueue_head(&dp->tx_empty_waitq); + idr_init(&dp->rx_mon_status_refill_ring.bufs_idr); + spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock); + idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); + spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); + } + + /* todo:per-pdev rx ring unlike tx ring which is mapped to different ac's */ + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + ret = ath11k_dp_rx_pdev_alloc(ab, i); + if (ret) { + ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d ", + i); + goto err; + } + ret = ath11k_dp_rx_pdev_mon_attach(ar); + if (ret) { + ath11k_warn(ab, "failed to initialize mon pdev %d ", + i); + goto err; + } + } + + return 0; + +err: + ath11k_dp_pdev_free(ab); + + return ret; +} + +int ath11k_dp_htt_connect(struct ath11k_dp *dp) +{ + struct ath11k_htc_svc_conn_req conn_req; + struct ath11k_htc_svc_conn_resp conn_resp; + int status; + + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + + conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler; + + /* connect to control service */ + conn_req.service_id = ath11k_htc_svc_id_htt_data_msg; + + status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req, + &conn_resp); + + if (status) + return status; + + dp->eid = conn_resp.eid; + + return 0; +} + +static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif) +{ + /* enable addry (sa based search) for sta mode. all other modes it + * is going to be addrx (da based search). for sta mode, set search + * type based on ast value. + */ + switch (arvif->vdev_type) { + case wmi_vdev_type_sta: + arvif->hal_addr_search_flags = hal_tx_addry_en; + arvif->search_type = hal_tx_addr_search_index; + break; + case wmi_vdev_type_ap: + case wmi_vdev_type_ibss: + arvif->hal_addr_search_flags = hal_tx_addrx_en; + arvif->search_type = hal_tx_addr_search_default; + break; + case wmi_vdev_type_monitor: + default: + return; + } +} + +void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif) +{ + arvif->tcl_metadata |= field_prep(htt_tcl_meta_data_type, 1) | + field_prep(htt_tcl_meta_data_vdev_id, + arvif->vdev_id) | + field_prep(htt_tcl_meta_data_pdev_id, + ar->pdev->pdev_id); + + /* set htt extension valid bit to 0 by default */ + arvif->tcl_metadata &= ~htt_tcl_meta_data_valid_htt; + + ath11k_dp_update_vdev_search(arvif); +} + +static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx) +{ + struct ath11k_base *ab = (struct ath11k_base *)ctx; + struct sk_buff *msdu = skb; + + dma_unmap_single(ab->dev, ath11k_skb_cb(msdu)->paddr, msdu->len, + dma_to_device); + + dev_kfree_skb_any(msdu); + + return 0; +} + +void ath11k_dp_free(struct ath11k_base *ab) +{ + struct ath11k_dp *dp = &ab->dp; + int i; + + ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, + hal_wbm_idle_link, &dp->wbm_idle_ring); + + ath11k_dp_srng_common_cleanup(ab); + + ath11k_dp_reo_cmd_list_cleanup(ab); + + for (i = 0; i < dp_tcl_num_ring_max; i++) { + spin_lock_bh(&dp->tx_ring[i].tx_idr_lock); + idr_for_each(&dp->tx_ring[i].txbuf_idr, + ath11k_dp_tx_pending_cleanup, ab); + idr_destroy(&dp->tx_ring[i].txbuf_idr); + spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock); + + spin_lock_bh(&dp->tx_ring[i].tx_status_lock); + kfifo_free(&dp->tx_ring[i].tx_status_fifo); + spin_unlock_bh(&dp->tx_ring[i].tx_status_lock); + } + + /* deinit any soc level resource */ +} + +int ath11k_dp_alloc(struct ath11k_base *ab) +{ + struct ath11k_dp *dp = &ab->dp; + struct hal_srng *srng = null; + size_t size = 0; + u32 n_link_desc = 0; + int ret; + int i; + + dp->ab = ab; + + init_list_head(&dp->reo_cmd_list); + init_list_head(&dp->reo_cmd_cache_flush_list); + spin_lock_init(&dp->reo_cmd_lock); + + ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc); + if (ret) { + ath11k_warn(ab, "failed to setup wbm_idle_ring: %d ", ret); + return ret; + } + + srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id]; + + ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks, + hal_wbm_idle_link, srng, n_link_desc); + if (ret) { + ath11k_warn(ab, "failed to setup link desc: %d ", ret); + return ret; + } + + ret = ath11k_dp_srng_common_setup(ab); + if (ret) + goto fail_link_desc_cleanup; + + size = roundup_pow_of_two(dp_tx_comp_ring_size); + + for (i = 0; i < dp_tcl_num_ring_max; i++) { + idr_init(&dp->tx_ring[i].txbuf_idr); + spin_lock_init(&dp->tx_ring[i].tx_idr_lock); + dp->tx_ring[i].tcl_data_ring_id = i; + + spin_lock_init(&dp->tx_ring[i].tx_status_lock); + ret = kfifo_alloc(&dp->tx_ring[i].tx_status_fifo, size, + gfp_kernel); + if (ret) + goto fail_cmn_srng_cleanup; + } + + for (i = 0; i < hal_dscp_tid_map_tbl_num_entries_max; i++) + ath11k_hal_tx_set_dscp_tid_map(ab, i); + + /* init any soc level resource for dp */ + + return 0; + +fail_cmn_srng_cleanup: + ath11k_dp_srng_common_cleanup(ab); + +fail_link_desc_cleanup: + ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks, + hal_wbm_idle_link, &dp->wbm_idle_ring); + + return ret; +} diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/dp.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_dp_h +#define ath11k_dp_h + +#include <linux/kfifo.h> +#include "hal_rx.h" + +struct ath11k_base; +struct ath11k_peer; +struct ath11k_dp; +struct ath11k_vif; +struct hal_tcl_status_ring; +struct ath11k_ext_irq_grp; + +struct dp_rx_tid { + u8 tid; + u32 *vaddr; + dma_addr_t paddr; + u32 size; + u32 ba_win_sz; + bool active; +}; + +#define dp_reo_desc_free_timeout_ms 1000 + +struct dp_reo_cache_flush_elem { + struct list_head list; + struct dp_rx_tid data; + unsigned long ts; +}; + +struct dp_reo_cmd { + struct list_head list; + struct dp_rx_tid data; + int cmd_num; + void (*handler)(struct ath11k_dp *, void *, + enum hal_reo_cmd_status status); +}; + +struct dp_srng { + u32 *vaddr_unaligned; + u32 *vaddr; + dma_addr_t paddr_unaligned; + dma_addr_t paddr; + int size; + u32 ring_id; +}; + +struct dp_rxdma_ring { + struct dp_srng refill_buf_ring; + struct idr bufs_idr; + /* protects bufs_idr */ + spinlock_t idr_lock; + int bufs_max; +}; + +struct dp_tx_ring { + u8 tcl_data_ring_id; + struct dp_srng tcl_data_ring; + struct dp_srng tcl_comp_ring; + struct idr txbuf_idr; + u32 num_tx_pending; + /* protects txbuf_idr and num_pending */ + spinlock_t tx_idr_lock; + declare_kfifo_ptr(tx_status_fifo, struct hal_wbm_release_ring); + /* lock to protect tx_status_fifo because tx_status_fifo can be + * accessed concurrently. + */ + spinlock_t tx_status_lock; +}; + +struct ath11k_pdev_mon_stats { + u32 status_ppdu_state; + u32 status_ppdu_start; + u32 status_ppdu_end; + u32 status_ppdu_compl; + u32 status_ppdu_start_mis; + u32 status_ppdu_end_mis; + u32 status_ppdu_done; + u32 dest_ppdu_done; + u32 dest_mpdu_done; + u32 dest_mpdu_drop; + u32 dup_mon_linkdesc_cnt; + u32 dup_mon_buf_cnt; +}; + +struct dp_link_desc_bank { + void *vaddr_unaligned; + void *vaddr; + dma_addr_t paddr_unaligned; + dma_addr_t paddr; + u32 size; +}; + +/* size to enforce scatter idle list mode */ +#define dp_link_desc_alloc_size_thresh 0x200000 +#define dp_link_desc_banks_max 8 + +#define dp_rx_desc_cookie_index_max 0x3ffff +#define dp_rx_desc_cookie_pool_id_max 0x1c0000 +#define dp_rx_desc_cookie_max \ + (dp_rx_desc_cookie_index_max | dp_rx_desc_cookie_pool_id_max) +#define dp_not_ppdu_id_wrap_around 20000 + +enum ath11k_dp_ppdu_state { + dp_ppdu_status_start, + dp_ppdu_status_done, +}; + +struct ath11k_mon_data { + struct dp_link_desc_bank link_desc_banks[dp_link_desc_banks_max]; + struct hal_rx_mon_ppdu_info mon_ppdu_info; + + u32 mon_ppdu_status; + u32 mon_last_buf_cookie; + u64 mon_last_linkdesc_paddr; + u16 chan_noise_floor; + + struct ath11k_pdev_mon_stats rx_mon_stats; + /* lock for monitor data */ + spinlock_t mon_lock; + struct sk_buff_head rx_status_q; +}; + +struct ath11k_pdev_dp { + u32 mac_id; + atomic_t num_tx_pending; + wait_queue_head_t tx_empty_waitq; + struct dp_srng reo_dst_ring; + struct dp_rxdma_ring rx_refill_buf_ring; + struct dp_srng rxdma_err_dst_ring; + struct dp_srng rxdma_mon_dst_ring; + struct dp_srng rxdma_mon_desc_ring; + + struct dp_rxdma_ring rxdma_mon_buf_ring; + struct dp_rxdma_ring rx_mon_status_refill_ring; + struct ieee80211_rx_status rx_status; + struct ath11k_mon_data mon_data; +}; + +#define dp_num_clients_max 64 +#define dp_avg_tids_per_client 2 +#define dp_num_tids_max (dp_num_clients_max * dp_avg_tids_per_client) +#define dp_avg_msdus_per_flow 128 +#define dp_avg_flows_per_tid 2 +#define dp_avg_mpdus_per_tid_max 128 +#define dp_avg_msdus_per_mpdu 4 + +#define dp_rx_hash_enable 0 /* disable hash based rx steering */ + +#define dp_ba_win_sz_max 256 + +#define dp_tcl_num_ring_max 3 + +#define dp_idle_scatter_bufs_max 16 + +#define dp_wbm_release_ring_size 64 +#define dp_tcl_data_ring_size 512 +#define dp_tx_comp_ring_size 8192 +#define dp_tx_idr_size (dp_tx_comp_ring_size << 1) +#define dp_tcl_cmd_ring_size 32 +#define dp_tcl_status_ring_size 32 +#define dp_reo_dst_ring_max 4 +#define dp_reo_dst_ring_size 2048 +#define dp_reo_reinject_ring_size 32 +#define dp_rx_release_ring_size 1024 +#define dp_reo_exception_ring_size 128 +#define dp_reo_cmd_ring_size 128 +#define dp_reo_status_ring_size 256 +#define dp_rxdma_buf_ring_size 4096 +#define dp_rxdma_refill_ring_size 2048 +#define dp_rxdma_err_dst_ring_size 1024 +#define dp_rxdma_mon_status_ring_size 1024 +#define dp_rxdma_monitor_buf_ring_size 4096 +#define dp_rxdma_monitor_dst_ring_size 2048 +#define dp_rxdma_monitor_desc_ring_size 4096 + +#define dp_rx_buffer_size 2048 +#define dp_rx_buffer_align_size 128 + +#define dp_rxdma_buf_cookie_buf_id genmask(17, 0) +#define dp_rxdma_buf_cookie_pdev_id genmask(20, 18) + +#define dp_hw2sw_macid(mac_id) ((mac_id) ? ((mac_id) - 1) : 0) +#define dp_sw2hw_macid(mac_id) ((mac_id) + 1) + +#define dp_tx_desc_id_mac_id genmask(1, 0) +#define dp_tx_desc_id_msdu_id genmask(18, 2) +#define dp_tx_desc_id_pool_id genmask(20, 19) + +struct ath11k_dp { + struct ath11k_base *ab; + enum ath11k_htc_ep_id eid; + struct completion htt_tgt_version_received; + u8 htt_tgt_ver_major; + u8 htt_tgt_ver_minor; + struct dp_link_desc_bank link_desc_banks[dp_link_desc_banks_max]; + struct dp_srng wbm_idle_ring; + struct dp_srng wbm_desc_rel_ring; + struct dp_srng tcl_cmd_ring; + struct dp_srng tcl_status_ring; + struct dp_srng reo_reinject_ring; + struct dp_srng rx_rel_ring; + struct dp_srng reo_except_ring; + struct dp_srng reo_cmd_ring; + struct dp_srng reo_status_ring; + struct dp_tx_ring tx_ring[dp_tcl_num_ring_max]; + struct hal_wbm_idle_scatter_list scatter_list[dp_idle_scatter_bufs_max]; + struct list_head reo_cmd_list; + struct list_head reo_cmd_cache_flush_list; + /* protects access to reo_cmd_list and reo_cmd_cache_flush_list */ + spinlock_t reo_cmd_lock; +}; + +/* htt definitions */ + +#define htt_tcl_meta_data_type bit(0) +#define htt_tcl_meta_data_valid_htt bit(1) + +/* vdev meta data */ +#define htt_tcl_meta_data_vdev_id genmask(9, 2) +#define htt_tcl_meta_data_pdev_id genmask(11, 10) +#define htt_tcl_meta_data_host_inspected bit(12) + +/* peer meta data */ +#define htt_tcl_meta_data_peer_id genmask(15, 2) + +#define htt_tx_wbm_comp_status_offset 8 + +/* htt tx completion is overlayed in wbm_release_ring */ +#define htt_tx_wbm_comp_info0_status genmask(12, 9) +#define htt_tx_wbm_comp_info0_reinject_reason genmask(16, 13) +#define htt_tx_wbm_comp_info0_reinject_reason genmask(16, 13) + +#define htt_tx_wbm_comp_info1_ack_rssi genmask(31, 24) + +struct htt_tx_wbm_completion { + u32 info0; + u32 info1; + u32 info2; + u32 info3; +} __packed; + +enum htt_h2t_msg_type { + htt_h2t_msg_type_version_req = 0, + htt_h2t_msg_type_sring_setup = 0xb, + htt_h2t_msg_type_rx_ring_selection_cfg = 0xc, + htt_h2t_msg_type_ext_stats_cfg = 0x10, + htt_h2t_msg_type_ppdu_stats_cfg = 0x11, +}; + +#define htt_ver_req_info_msg_id genmask(7, 0) + +struct htt_ver_req_cmd { + u32 ver_reg_info; +} __packed; + +enum htt_srng_ring_type { + htt_hw_to_sw_ring, + htt_sw_to_hw_ring, + htt_sw_to_sw_ring, +}; + +enum htt_srng_ring_id { + htt_rxdma_host_buf_ring, + htt_rxdma_monitor_status_ring, + htt_rxdma_monitor_buf_ring, + htt_rxdma_monitor_desc_ring, + htt_rxdma_monitor_dest_ring, + htt_host1_to_fw_rxbuf_ring, + htt_host2_to_fw_rxbuf_ring, + htt_rxdma_non_monitor_dest_ring, +}; + +/* host -> target htt_sring_setup message + * + * after target is booted up, host can send sring setup message for + * each host facing lmac sring. target setups up hw registers based + * on setup message and confirms back to host if response_required is set. + * host should wait for confirmation message before sending new sring + * setup message + * + * the message would appear as follows: + * + * |31 24|23 20|19|18 16|15|14 8|7 0| + * |--------------- +-----------------+----------------+------------------| + * | ring_type | ring_id | pdev_id | msg_type | + * |----------------------------------------------------------------------| + * | ring_base_addr_lo | + * |----------------------------------------------------------------------| + * | ring_base_addr_hi | + * |----------------------------------------------------------------------| + * |ring_misc_cfg_flag|ring_entry_size| ring_size | + * |----------------------------------------------------------------------| + * | ring_head_offset32_remote_addr_lo | + * |----------------------------------------------------------------------| + * | ring_head_offset32_remote_addr_hi | + * |----------------------------------------------------------------------| + * | ring_tail_offset32_remote_addr_lo | + * |----------------------------------------------------------------------| + * | ring_tail_offset32_remote_addr_hi | + * |----------------------------------------------------------------------| + * | ring_msi_addr_lo | + * |----------------------------------------------------------------------| + * | ring_msi_addr_hi | + * |----------------------------------------------------------------------| + * | ring_msi_data | + * |----------------------------------------------------------------------| + * | intr_timer_th |im| intr_batch_counter_th | + * |----------------------------------------------------------------------| + * | reserved |rr|ptcf| intr_low_threshold | + * |----------------------------------------------------------------------| + * where + * im = sw_intr_mode + * rr = response_required + * ptcf = prefetch_timer_cfg + * + * the message is interpreted as follows: + * dword0 - b'0:7 - msg_type: this will be set to + * htt_h2t_msg_type_sring_setup + * b'8:15 - pdev_id: + * 0 (for rings at soc/umac level), + * 1/2/3 mac id (for rings at lmac level) + * b'16:23 - ring_id: identify which ring is to setup, + * more details can be got from enum htt_srng_ring_id + * b'24:31 - ring_type: identify type of host rings, + * more details can be got from enum htt_srng_ring_type + * dword1 - b'0:31 - ring_base_addr_lo: lower 32bits of ring base address + * dword2 - b'0:31 - ring_base_addr_hi: upper 32bits of ring base address + * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words + * b'16:23 - ring_entry_size: size of each entry in 4-byte word units + * b'24:31 - ring_misc_cfg_flag: valid only for hw_to_sw_ring and + * sw_to_hw_ring. + * refer to htt_sring_setup_ring_misc_cfg_ring defs. + * dword4 - b'0:31 - ring_head_off32_remote_addr_lo: + * lower 32 bits of memory address of the remote variable + * storing the 4-byte word offset that identifies the head + * element within the ring. + * (the head offset variable has type u32.) + * valid for hw_to_sw and sw_to_sw rings. + * dword5 - b'0:31 - ring_head_off32_remote_addr_hi: + * upper 32 bits of memory address of the remote variable + * storing the 4-byte word offset that identifies the head + * element within the ring. + * (the head offset variable has type u32.) + * valid for hw_to_sw and sw_to_sw rings. + * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo: + * lower 32 bits of memory address of the remote variable + * storing the 4-byte word offset that identifies the tail + * element within the ring. + * (the tail offset variable has type u32.) + * valid for hw_to_sw and sw_to_sw rings. + * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi: + * upper 32 bits of memory address of the remote variable + * storing the 4-byte word offset that identifies the tail + * element within the ring. + * (the tail offset variable has type u32.) + * valid for hw_to_sw and sw_to_sw rings. + * dword8 - b'0:31 - ring_msi_addr_lo: lower 32bits of msi cfg address + * valid only for hw_to_sw_ring and sw_to_hw_ring + * dword9 - b'0:31 - ring_msi_addr_hi: upper 32bits of msi cfg address + * valid only for hw_to_sw_ring and sw_to_hw_ring + * dword10 - b'0:31 - ring_msi_data: msi data + * refer to htt_sring_setup_ring_msc_cfg_xxx defs + * valid only for hw_to_sw_ring and sw_to_hw_ring + * dword11 - b'0:14 - intr_batch_counter_th: + * batch counter threshold is in units of 4-byte words. + * hw internally maintains and increments batch count. + * (see sring spec for detail description). + * when batch count reaches threshold value, an interrupt + * is generated by hw. + * b'15 - sw_intr_mode: + * this configuration shall be static. + * only programmed at power up. + * 0: generate pulse style sw interrupts + * 1: generate level style sw interrupts + * b'16:31 - intr_timer_th: + * the timer init value when timer is idle or is + * initialized to start downcounting. + * in 8us units (to cover a range of 0 to 524 ms) + * dword12 - b'0:15 - intr_low_threshold: + * used only by consumer ring to generate ring_sw_int_p. + * ring entries low threshold water mark, that is used + * in combination with the interrupt timer as well as + * the the clearing of the level interrupt. + * b'16:18 - prefetch_timer_cfg: + * used only by consumer ring to set timer mode to + * support application prefetch handling. + * the external tail offset/pointer will be updated + * at following intervals: + * 3'b000: (prefetch feature disabled; used only for debug) + * 3'b001: 1 usec + * 3'b010: 4 usec + * 3'b011: 8 usec (default) + * 3'b100: 16 usec + * others: reserverd + * b'19 - response_required: + * host needs htt_t2h_msg_type_sring_setup_done as response + * b'20:31 - reserved: reserved for future use + */ + +#define htt_srng_setup_cmd_info0_msg_type genmask(7, 0) +#define htt_srng_setup_cmd_info0_pdev_id genmask(15, 8) +#define htt_srng_setup_cmd_info0_ring_id genmask(23, 16) +#define htt_srng_setup_cmd_info0_ring_type genmask(31, 24) + +#define htt_srng_setup_cmd_info1_ring_size genmask(15, 0) +#define htt_srng_setup_cmd_info1_ring_entry_size genmask(23, 16) +#define htt_srng_setup_cmd_info1_ring_loop_cnt_dis bit(25) +#define htt_srng_setup_cmd_info1_ring_flags_msi_swap bit(27) +#define htt_srng_setup_cmd_info1_ring_flags_host_fw_swap bit(28) +#define htt_srng_setup_cmd_info1_ring_flags_tlv_swap bit(29) + +#define htt_srng_setup_cmd_intr_info_batch_counter_thresh genmask(14, 0) +#define htt_srng_setup_cmd_intr_info_sw_intr_mode bit(15) +#define htt_srng_setup_cmd_intr_info_intr_timer_thresh genmask(31, 16) + +#define htt_srng_setup_cmd_info2_intr_low_thresh genmask(15, 0) +#define htt_srng_setup_cmd_info2_pre_fetch_timer_cfg bit(16) +#define htt_srng_setup_cmd_info2_response_required bit(19) + +struct htt_srng_setup_cmd { + u32 info0; + u32 ring_base_addr_lo; + u32 ring_base_addr_hi; + u32 info1; + u32 ring_head_off32_remote_addr_lo; + u32 ring_head_off32_remote_addr_hi; + u32 ring_tail_off32_remote_addr_lo; + u32 ring_tail_off32_remote_addr_hi; + u32 ring_msi_addr_lo; + u32 ring_msi_addr_hi; + u32 msi_data; + u32 intr_info; + u32 info2; +} __packed; + +/* host -> target fw ppdu_stats config message + * + * @details + * the following field definitions describe the format of the htt host + * to target fw for ppdu_stats_cfg msg. + * the message allows the host to configure the ppdu_stats_ind messages + * produced by the target. + * + * |31 24|23 16|15 8|7 0| + * |-----------------------------------------------------------| + * | req bit mask | pdev_mask | msg type | + * |-----------------------------------------------------------| + * header fields: + * - msg_type + * bits 7:0 + * purpose: identifies this is a req to configure ppdu_stats_ind from target + * value: 0x11 + * - pdev_mask + * bits 8:15 + * purpose: identifies which pdevs this ppdu stats configuration applies to + * value: this is a overloaded field, refer to usage and interpretation of + * pdev in interface document. + * bit 8 : reserved for soc stats + * bit 9 - 15 : indicates pdev_mask in dbdc + * indicates macid_mask in dbs + * - req_tlv_bit_mask + * bits 16:31 + * purpose: each set bit indicates the corresponding ppdu stats tlv type + * needs to be included in the target's ppdu_stats_ind messages. + * value: refer htt_ppdu_stats_tlv_tag_t <<<??? + * + */ + +struct htt_ppdu_stats_cfg_cmd { + u32 msg; +} __packed; + +#define htt_ppdu_stats_cfg_msg_type genmask(7, 0) +#define htt_ppdu_stats_cfg_pdev_id genmask(16, 9) +#define htt_ppdu_stats_cfg_tlv_type_bitmask genmask(31, 16) + +enum htt_ppdu_stats_tag_type { + htt_ppdu_stats_tag_common, + htt_ppdu_stats_tag_usr_common, + htt_ppdu_stats_tag_usr_rate, + htt_ppdu_stats_tag_usr_mpdu_enq_bitmap_64, + htt_ppdu_stats_tag_usr_mpdu_enq_bitmap_256, + htt_ppdu_stats_tag_sch_cmd_status, + htt_ppdu_stats_tag_usr_compltn_common, + htt_ppdu_stats_tag_usr_compltn_ba_bitmap_64, + htt_ppdu_stats_tag_usr_compltn_ba_bitmap_256, + htt_ppdu_stats_tag_usr_compltn_ack_ba_status, + htt_ppdu_stats_tag_usr_compltn_flush, + htt_ppdu_stats_tag_usr_common_array, + htt_ppdu_stats_tag_info, + htt_ppdu_stats_tag_tx_mgmtctrl_payload, + + /* new tlv's are added above to this line */ + htt_ppdu_stats_tag_max, +}; + +#define htt_ppdu_stats_tag_default (bit(htt_ppdu_stats_tag_common) \ + | bit(htt_ppdu_stats_tag_usr_common) \ + | bit(htt_ppdu_stats_tag_usr_rate) \ + | bit(htt_ppdu_stats_tag_sch_cmd_status) \ + | bit(htt_ppdu_stats_tag_usr_compltn_common) \ + | bit(htt_ppdu_stats_tag_usr_compltn_ack_ba_status) \ + | bit(htt_ppdu_stats_tag_usr_compltn_flush) \ + | bit(htt_ppdu_stats_tag_usr_common_array)) + +/* htt_h2t_msg_type_rx_ring_selection_cfg message + * + * details: + * htt_h2t_msg_type_rx_ring_selection_cfg message is sent by host to + * configure rxdma rings. + * the configuration is per ring based and includes both packet subtypes + * and ppdu/mpdu tlvs. + * + * the message would appear as follows: + * + * |31 26|25|24|23 16|15 8|7 0| + * |-----------------+----------------+----------------+---------------| + * | rsvd1 |ps|ss| ring_id | pdev_id | msg_type | + * |-------------------------------------------------------------------| + * | rsvd2 | ring_buffer_size | + * |-------------------------------------------------------------------| + * | packet_type_enable_flags_0 | + * |-------------------------------------------------------------------| + * | packet_type_enable_flags_1 | + * |-------------------------------------------------------------------| + * | packet_type_enable_flags_2 | + * |-------------------------------------------------------------------| + * | packet_type_enable_flags_3 | + * |-------------------------------------------------------------------| + * | tlv_filter_in_flags | + * |-------------------------------------------------------------------| + * where: + * ps = pkt_swap + * ss = status_swap + * the message is interpreted as follows: + * dword0 - b'0:7 - msg_type: this will be set to + * htt_h2t_msg_type_rx_ring_selection_cfg + * b'8:15 - pdev_id: + * 0 (for rings at soc/umac level), + * 1/2/3 mac id (for rings at lmac level) + * b'16:23 - ring_id : identify the ring to configure. + * more details can be got from enum htt_srng_ring_id + * b'24 - status_swap: 1 is to swap status tlv + * b'25 - pkt_swap: 1 is to swap packet tlv + * b'26:31 - rsvd1: reserved for future use + * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring, + * in byte units. + * valid only for hw_to_sw_ring and sw_to_hw_ring + * - b'16:31 - rsvd2: reserved for future use + * dword2 - b'0:31 - packet_type_enable_flags_0: + * enable mgmt packet from 0b0000 to 0b1001 + * bits from low to high: fp, md, mo - 3 bits + * fp: filter_pass + * md: monitor_direct + * mo: monitor_other + * 10 mgmt subtypes * 3 bits -> 30 bits + * refer to pkt_type_enable_flag0_xxx_mgmt_xxx defs + * dword3 - b'0:31 - packet_type_enable_flags_1: + * enable mgmt packet from 0b1010 to 0b1111 + * bits from low to high: fp, md, mo - 3 bits + * refer to pkt_type_enable_flag1_xxx_mgmt_xxx defs + * dword4 - b'0:31 - packet_type_enable_flags_2: + * enable ctrl packet from 0b0000 to 0b1001 + * bits from low to high: fp, md, mo - 3 bits + * refer to pkt_type_enable_flag2_xxx_ctrl_xxx defs + * dword5 - b'0:31 - packet_type_enable_flags_3: + * enable ctrl packet from 0b1010 to 0b1111, + * mcast_data, ucast_data, null_data + * bits from low to high: fp, md, mo - 3 bits + * refer to pkt_type_enable_flag3_xxx_ctrl_xxx defs + * dword6 - b'0:31 - tlv_filter_in_flags: + * filter in attention/mpdu/ppdu/header/user tlvs + * refer to cfg_tlv_filter_in_flag defs + */ + +#define htt_rx_ring_selection_cfg_cmd_info0_msg_type genmask(7, 0) +#define htt_rx_ring_selection_cfg_cmd_info0_pdev_id genmask(15, 8) +#define htt_rx_ring_selection_cfg_cmd_info0_ring_id genmask(23, 16) +#define htt_rx_ring_selection_cfg_cmd_info0_ss bit(24) +#define htt_rx_ring_selection_cfg_cmd_info0_ps bit(25) + +#define htt_rx_ring_selection_cfg_cmd_info1_buf_size genmask(15, 0) + +enum htt_rx_filter_tlv_flags { + htt_rx_filter_tlv_flags_mpdu_start = bit(0), + htt_rx_filter_tlv_flags_msdu_start = bit(1), + htt_rx_filter_tlv_flags_rx_packet = bit(2), + htt_rx_filter_tlv_flags_msdu_end = bit(3), + htt_rx_filter_tlv_flags_mpdu_end = bit(4), + htt_rx_filter_tlv_flags_packet_header = bit(5), + htt_rx_filter_tlv_flags_per_msdu_header = bit(6), + htt_rx_filter_tlv_flags_attention = bit(7), + htt_rx_filter_tlv_flags_ppdu_start = bit(8), + htt_rx_filter_tlv_flags_ppdu_end = bit(9), + htt_rx_filter_tlv_flags_ppdu_end_user_stats = bit(10), + htt_rx_filter_tlv_flags_ppdu_end_user_stats_ext = bit(11), + htt_rx_filter_tlv_flags_ppdu_end_status_done = bit(12), +}; + +enum htt_rx_mgmt_pkt_filter_tlv_flags0 { + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_assoc_req = bit(0), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_assoc_req = bit(1), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_assoc_req = bit(2), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_assoc_resp = bit(3), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_assoc_resp = bit(4), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_assoc_resp = bit(5), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_reassoc_req = bit(6), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_reassoc_req = bit(7), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_reassoc_req = bit(8), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_reassoc_resp = bit(9), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_reassoc_resp = bit(10), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_reassoc_resp = bit(11), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_probe_req = bit(12), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_probe_req = bit(13), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_probe_req = bit(14), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_probe_resp = bit(15), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_probe_resp = bit(16), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_probe_resp = bit(17), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_probe_timing_adv = bit(18), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_probe_timing_adv = bit(19), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_probe_timing_adv = bit(20), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_reserved_7 = bit(21), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_reserved_7 = bit(22), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_reserved_7 = bit(23), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_beacon = bit(24), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_beacon = bit(25), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_beacon = bit(26), + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_atim = bit(27), + htt_rx_md_mgmt_pkt_filter_tlv_flags0_atim = bit(28), + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_atim = bit(29), +}; + +enum htt_rx_mgmt_pkt_filter_tlv_flags1 { + htt_rx_fp_mgmt_pkt_filter_tlv_flags1_disassoc = bit(0), + htt_rx_md_mgmt_pkt_filter_tlv_flags1_disassoc = bit(1), + htt_rx_mo_mgmt_pkt_filter_tlv_flags1_disassoc = bit(2), + htt_rx_fp_mgmt_pkt_filter_tlv_flags1_auth = bit(3), + htt_rx_md_mgmt_pkt_filter_tlv_flags1_auth = bit(4), + htt_rx_mo_mgmt_pkt_filter_tlv_flags1_auth = bit(5), + htt_rx_fp_mgmt_pkt_filter_tlv_flags1_deauth = bit(6), + htt_rx_md_mgmt_pkt_filter_tlv_flags1_deauth = bit(7), + htt_rx_mo_mgmt_pkt_filter_tlv_flags1_deauth = bit(8), + htt_rx_fp_mgmt_pkt_filter_tlv_flags1_action = bit(9), + htt_rx_md_mgmt_pkt_filter_tlv_flags1_action = bit(10), + htt_rx_mo_mgmt_pkt_filter_tlv_flags1_action = bit(11), + htt_rx_fp_mgmt_pkt_filter_tlv_flags1_action_noack = bit(12), + htt_rx_md_mgmt_pkt_filter_tlv_flags1_action_noack = bit(13), + htt_rx_mo_mgmt_pkt_filter_tlv_flags1_action_noack = bit(14), + htt_rx_fp_mgmt_pkt_filter_tlv_flags1_reserved_15 = bit(15), + htt_rx_md_mgmt_pkt_filter_tlv_flags1_reserved_15 = bit(16), + htt_rx_mo_mgmt_pkt_filter_tlv_flags1_reserved_15 = bit(17), +}; + +enum htt_rx_ctrl_pkt_filter_tlv_flags2 { + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_1 = bit(0), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_1 = bit(1), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_1 = bit(2), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_2 = bit(3), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_2 = bit(4), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_2 = bit(5), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_trigger = bit(6), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_trigger = bit(7), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_trigger = bit(8), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_4 = bit(9), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_4 = bit(10), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_4 = bit(11), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_bf_rep_poll = bit(12), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_bf_rep_poll = bit(13), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_bf_rep_poll = bit(14), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_vht_ndp = bit(15), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_vht_ndp = bit(16), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_vht_ndp = bit(17), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_frame_ext = bit(18), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_frame_ext = bit(19), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_frame_ext = bit(20), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_wrapper = bit(21), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_wrapper = bit(22), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_wrapper = bit(23), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_bar = bit(24), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_bar = bit(25), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_bar = bit(26), + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ba = bit(27), + htt_rx_md_ctrl_pkt_filter_tlv_flags2_ba = bit(28), + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ba = bit(29), +}; + +enum htt_rx_ctrl_pkt_filter_tlv_flags3 { + htt_rx_fp_ctrl_pkt_filter_tlv_flags3_pspoll = bit(0), + htt_rx_md_ctrl_pkt_filter_tlv_flags3_pspoll = bit(1), + htt_rx_mo_ctrl_pkt_filter_tlv_flags3_pspoll = bit(2), + htt_rx_fp_ctrl_pkt_filter_tlv_flags3_rts = bit(3), + htt_rx_md_ctrl_pkt_filter_tlv_flags3_rts = bit(4), + htt_rx_mo_ctrl_pkt_filter_tlv_flags3_rts = bit(5), + htt_rx_fp_ctrl_pkt_filter_tlv_flags3_cts = bit(6), + htt_rx_md_ctrl_pkt_filter_tlv_flags3_cts = bit(7), + htt_rx_mo_ctrl_pkt_filter_tlv_flags3_cts = bit(8), + htt_rx_fp_ctrl_pkt_filter_tlv_flags3_ack = bit(9), + htt_rx_md_ctrl_pkt_filter_tlv_flags3_ack = bit(10), + htt_rx_mo_ctrl_pkt_filter_tlv_flags3_ack = bit(11), + htt_rx_fp_ctrl_pkt_filter_tlv_flags3_cfend = bit(12), + htt_rx_md_ctrl_pkt_filter_tlv_flags3_cfend = bit(13), + htt_rx_mo_ctrl_pkt_filter_tlv_flags3_cfend = bit(14), + htt_rx_fp_ctrl_pkt_filter_tlv_flags3_cfend_ack = bit(15), + htt_rx_md_ctrl_pkt_filter_tlv_flags3_cfend_ack = bit(16), + htt_rx_mo_ctrl_pkt_filter_tlv_flags3_cfend_ack = bit(17), +}; + +enum htt_rx_data_pkt_filter_tlv_flasg3 { + htt_rx_fp_data_pkt_filter_tlv_flasg3_mcast = bit(18), + htt_rx_md_data_pkt_filter_tlv_flasg3_mcast = bit(19), + htt_rx_mo_data_pkt_filter_tlv_flasg3_mcast = bit(20), + htt_rx_fp_data_pkt_filter_tlv_flasg3_ucast = bit(21), + htt_rx_md_data_pkt_filter_tlv_flasg3_ucast = bit(22), + htt_rx_mo_data_pkt_filter_tlv_flasg3_ucast = bit(23), + htt_rx_fp_data_pkt_filter_tlv_flasg3_null_data = bit(24), + htt_rx_md_data_pkt_filter_tlv_flasg3_null_data = bit(25), + htt_rx_mo_data_pkt_filter_tlv_flasg3_null_data = bit(26), +}; + +#define htt_rx_fp_mgmt_filter_flags0 \ + (htt_rx_fp_mgmt_pkt_filter_tlv_flags0_assoc_req \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags0_assoc_resp \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags0_reassoc_req \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags0_reassoc_resp \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags0_probe_req \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags0_probe_resp \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags0_probe_timing_adv \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags0_beacon \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags0_atim) + +#define htt_rx_md_mgmt_filter_flags0 \ + (htt_rx_md_mgmt_pkt_filter_tlv_flags0_assoc_req \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags0_assoc_resp \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags0_reassoc_req \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags0_reassoc_resp \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags0_probe_req \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags0_probe_resp \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags0_probe_timing_adv \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags0_beacon \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags0_atim) + +#define htt_rx_mo_mgmt_filter_flags0 \ + (htt_rx_mo_mgmt_pkt_filter_tlv_flags0_assoc_req \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags0_assoc_resp \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags0_reassoc_req \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags0_reassoc_resp \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags0_probe_req \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags0_probe_resp \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags0_probe_timing_adv \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags0_beacon \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags0_atim) + +#define htt_rx_fp_mgmt_filter_flags1 (htt_rx_fp_mgmt_pkt_filter_tlv_flags1_disassoc \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags1_auth \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags1_deauth \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags1_action \ + | htt_rx_fp_mgmt_pkt_filter_tlv_flags1_action_noack) + +#define htt_rx_md_mgmt_filter_flags1 (htt_rx_md_mgmt_pkt_filter_tlv_flags1_disassoc \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags1_auth \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags1_deauth \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags1_action \ + | htt_rx_md_mgmt_pkt_filter_tlv_flags1_action_noack) + +#define htt_rx_mo_mgmt_filter_flags1 (htt_rx_mo_mgmt_pkt_filter_tlv_flags1_disassoc \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags1_auth \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags1_deauth \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags1_action \ + | htt_rx_mo_mgmt_pkt_filter_tlv_flags1_action_noack) + +#define htt_rx_fp_ctrl_filter_flasg2 (htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_wrapper \ + | htt_rx_fp_ctrl_pkt_filter_tlv_flags2_bar \ + | htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ba) + +#define htt_rx_md_ctrl_filter_flasg2 (htt_rx_md_ctrl_pkt_filter_tlv_flags2_ctrl_wrapper \ + | htt_rx_md_ctrl_pkt_filter_tlv_flags2_bar \ + | htt_rx_md_ctrl_pkt_filter_tlv_flags2_ba) + +#define htt_rx_mo_ctrl_filter_flasg2 (htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_wrapper \ + | htt_rx_mo_ctrl_pkt_filter_tlv_flags2_bar \ + | htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ba) + +#define htt_rx_fp_ctrl_filter_flasg3 (htt_rx_fp_ctrl_pkt_filter_tlv_flags3_pspoll \ + | htt_rx_fp_ctrl_pkt_filter_tlv_flags3_rts \ + | htt_rx_fp_ctrl_pkt_filter_tlv_flags3_cts \ + | htt_rx_fp_ctrl_pkt_filter_tlv_flags3_ack \ + | htt_rx_fp_ctrl_pkt_filter_tlv_flags3_cfend \ + | htt_rx_fp_ctrl_pkt_filter_tlv_flags3_cfend_ack) + +#define htt_rx_md_ctrl_filter_flasg3 (htt_rx_md_ctrl_pkt_filter_tlv_flags3_pspoll \ + | htt_rx_md_ctrl_pkt_filter_tlv_flags3_rts \ + | htt_rx_md_ctrl_pkt_filter_tlv_flags3_cts \ + | htt_rx_md_ctrl_pkt_filter_tlv_flags3_ack \ + | htt_rx_md_ctrl_pkt_filter_tlv_flags3_cfend \ + | htt_rx_md_ctrl_pkt_filter_tlv_flags3_cfend_ack) + +#define htt_rx_mo_ctrl_filter_flasg3 (htt_rx_mo_ctrl_pkt_filter_tlv_flags3_pspoll \ + | htt_rx_mo_ctrl_pkt_filter_tlv_flags3_rts \ + | htt_rx_mo_ctrl_pkt_filter_tlv_flags3_cts \ + | htt_rx_mo_ctrl_pkt_filter_tlv_flags3_ack \ + | htt_rx_mo_ctrl_pkt_filter_tlv_flags3_cfend \ + | htt_rx_mo_ctrl_pkt_filter_tlv_flags3_cfend_ack) + +#define htt_rx_fp_data_filter_flasg3 (htt_rx_fp_data_pkt_filter_tlv_flasg3_mcast \ + | htt_rx_fp_data_pkt_filter_tlv_flasg3_ucast \ + | htt_rx_fp_data_pkt_filter_tlv_flasg3_null_data) + +#define htt_rx_md_data_filter_flasg3 (htt_rx_md_data_pkt_filter_tlv_flasg3_mcast \ + | htt_rx_md_data_pkt_filter_tlv_flasg3_ucast \ + | htt_rx_md_data_pkt_filter_tlv_flasg3_null_data) + +#define htt_rx_mo_data_filter_flasg3 (htt_rx_mo_data_pkt_filter_tlv_flasg3_mcast \ + | htt_rx_mo_data_pkt_filter_tlv_flasg3_ucast \ + | htt_rx_mo_data_pkt_filter_tlv_flasg3_null_data) + +#define htt_rx_mon_fp_mgmt_filter_flags0 \ + (htt_rx_fp_mgmt_filter_flags0 | \ + htt_rx_fp_mgmt_pkt_filter_tlv_flags0_reserved_7) + +#define htt_rx_mon_mo_mgmt_filter_flags0 \ + (htt_rx_mo_mgmt_filter_flags0 | \ + htt_rx_mo_mgmt_pkt_filter_tlv_flags0_reserved_7) + +#define htt_rx_mon_fp_mgmt_filter_flags1 \ + (htt_rx_fp_mgmt_filter_flags1 | \ + htt_rx_fp_mgmt_pkt_filter_tlv_flags1_reserved_15) + +#define htt_rx_mon_mo_mgmt_filter_flags1 \ + (htt_rx_mo_mgmt_filter_flags1 | \ + htt_rx_mo_mgmt_pkt_filter_tlv_flags1_reserved_15) + +#define htt_rx_mon_fp_ctrl_filter_flasg2 \ + (htt_rx_fp_ctrl_filter_flasg2 | \ + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_1 | \ + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_2 | \ + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_trigger | \ + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_4 | \ + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_bf_rep_poll | \ + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_vht_ndp | \ + htt_rx_fp_ctrl_pkt_filter_tlv_flags2_ctrl_frame_ext) + +#define htt_rx_mon_mo_ctrl_filter_flasg2 \ + (htt_rx_mo_ctrl_filter_flasg2 | \ + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_1 | \ + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_2 | \ + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_trigger | \ + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_reserved_4 | \ + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_bf_rep_poll | \ + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_vht_ndp | \ + htt_rx_mo_ctrl_pkt_filter_tlv_flags2_ctrl_frame_ext) + +#define htt_rx_mon_fp_ctrl_filter_flasg3 htt_rx_fp_ctrl_filter_flasg3 + +#define htt_rx_mon_mo_ctrl_filter_flasg3 htt_rx_mo_ctrl_filter_flasg3 + +#define htt_rx_mon_fp_data_filter_flasg3 htt_rx_fp_data_filter_flasg3 + +#define htt_rx_mon_mo_data_filter_flasg3 htt_rx_mo_data_filter_flasg3 + +#define htt_rx_mon_filter_tlv_flags \ + (htt_rx_filter_tlv_flags_mpdu_start | \ + htt_rx_filter_tlv_flags_ppdu_start | \ + htt_rx_filter_tlv_flags_ppdu_end | \ + htt_rx_filter_tlv_flags_ppdu_end_user_stats | \ + htt_rx_filter_tlv_flags_ppdu_end_user_stats_ext | \ + htt_rx_filter_tlv_flags_ppdu_end_status_done) + +#define htt_rx_mon_filter_tlv_flags_mon_status_ring \ + (htt_rx_filter_tlv_flags_mpdu_start | \ + htt_rx_filter_tlv_flags_ppdu_start | \ + htt_rx_filter_tlv_flags_ppdu_end | \ + htt_rx_filter_tlv_flags_ppdu_end_user_stats | \ + htt_rx_filter_tlv_flags_ppdu_end_user_stats_ext | \ + htt_rx_filter_tlv_flags_ppdu_end_status_done) + +#define htt_rx_mon_filter_tlv_flags_mon_buf_ring \ + (htt_rx_filter_tlv_flags_mpdu_start | \ + htt_rx_filter_tlv_flags_msdu_start | \ + htt_rx_filter_tlv_flags_rx_packet | \ + htt_rx_filter_tlv_flags_msdu_end | \ + htt_rx_filter_tlv_flags_mpdu_end | \ + htt_rx_filter_tlv_flags_packet_header | \ + htt_rx_filter_tlv_flags_per_msdu_header | \ + htt_rx_filter_tlv_flags_attention) + +struct htt_rx_ring_selection_cfg_cmd { + u32 info0; + u32 info1; + u32 pkt_type_en_flags0; + u32 pkt_type_en_flags1; + u32 pkt_type_en_flags2; + u32 pkt_type_en_flags3; + u32 rx_filter_tlv; +} __packed; + +struct htt_rx_ring_tlv_filter { + u32 rx_filter; /* see htt_rx_filter_tlv_flags */ + u32 pkt_filter_flags0; /* mgmt */ + u32 pkt_filter_flags1; /* mgmt */ + u32 pkt_filter_flags2; /* ctrl */ + u32 pkt_filter_flags3; /* data */ +}; + +/* htt message target->host */ + +enum htt_t2h_msg_type { + htt_t2h_msg_type_version_conf, + htt_t2h_msg_type_rx_addba = 0x5, + htt_t2h_msg_type_pktlog = 0x8, + htt_t2h_msg_type_sec_ind = 0xb, + htt_t2h_msg_type_peer_map = 0x1e, + htt_t2h_msg_type_peer_unmap = 0x1f, + htt_t2h_msg_type_ppdu_stats_ind = 0x1d, + htt_t2h_msg_type_ext_stats_conf = 0x1c, +}; + +#define htt_target_version_major 3 + +#define htt_t2h_msg_type genmask(7, 0) +#define htt_t2h_version_conf_minor genmask(15, 8) +#define htt_t2h_version_conf_major genmask(23, 16) + +struct htt_t2h_version_conf_msg { + u32 version; +} __packed; + +#define htt_t2h_peer_map_info_vdev_id genmask(15, 8) +#define htt_t2h_peer_map_info_peer_id genmask(31, 16) +#define htt_t2h_peer_map_info1_mac_addr_h16 genmask(15, 0) +#define htt_t2h_peer_map_info1_hw_peer_id genmask(31, 16) +#define htt_t2h_peer_map_info2_ast_hash_val genmask(15, 0) +#define htt_t2h_peer_map_info2_next_hop_m bit(16) +#define htt_t2h_peer_map_info2_next_hop_s 16 + +struct htt_t2h_peer_map_event { + u32 info; + u32 mac_addr_l32; + u32 info1; + u32 info2; +} __packed; + +#define htt_t2h_peer_unmap_info_vdev_id htt_t2h_peer_map_info_vdev_id +#define htt_t2h_peer_unmap_info_peer_id htt_t2h_peer_map_info_peer_id +#define htt_t2h_peer_unmap_info1_mac_addr_h16 \ + htt_t2h_peer_map_info1_mac_addr_h16 +#define htt_t2h_peer_map_info1_next_hop_m htt_t2h_peer_map_info2_next_hop_m +#define htt_t2h_peer_map_info1_next_hop_s htt_t2h_peer_map_info2_next_hop_s + +struct htt_t2h_peer_unmap_event { + u32 info; + u32 mac_addr_l32; + u32 info1; +} __packed; + +struct htt_resp_msg { + union { + struct htt_t2h_version_conf_msg version_msg; + struct htt_t2h_peer_map_event peer_map_ev; + struct htt_t2h_peer_unmap_event peer_unmap_ev; + }; +} __packed; + +/* ppdu stats + * + * @details + * the following field definitions describe the format of the htt target + * to host ppdu stats indication message. + * + * + * |31 16|15 12|11 10|9 8|7 0 | + * |----------------------------------------------------------------------| + * | payload_size | rsvd |pdev_id|mac_id | msg type | + * |----------------------------------------------------------------------| + * | ppdu_id | + * |----------------------------------------------------------------------| + * | timestamp in us | + * |----------------------------------------------------------------------| + * | reserved | + * |----------------------------------------------------------------------| + * | type-specific stats info | + * | (see htt_ppdu_stats.h) | + * |----------------------------------------------------------------------| + * header fields: + * - msg_type + * bits 7:0 + * purpose: identifies this is a ppdu stats indication + * message. + * value: 0x1d + * - mac_id + * bits 9:8 + * purpose: mac_id of this ppdu_id + * value: 0-3 + * - pdev_id + * bits 11:10 + * purpose: pdev_id of this ppdu_id + * value: 0-3 + * 0 (for rings at soc level), + * 1/2/3 pdev -> 0/1/2 + * - payload_size + * bits 31:16 + * purpose: total tlv size + * value: payload_size in bytes + */ + +#define htt_t2h_ppdu_stats_info_pdev_id genmask(11, 10) +#define htt_t2h_ppdu_stats_info_payload_size genmask(31, 16) + +struct ath11k_htt_ppdu_stats_msg { + u32 info; + u32 ppdu_id; + u32 timestamp; + u32 rsvd; + u8 data[0]; +} __packed; + +struct htt_tlv { + u32 header; + u8 value[0]; +} __packed; + +#define htt_tlv_tag genmask(11, 0) +#define htt_tlv_len genmask(23, 12) + +enum htt_ppdu_stats_bw { + htt_ppdu_stats_bandwidth_5mhz = 0, + htt_ppdu_stats_bandwidth_10mhz = 1, + htt_ppdu_stats_bandwidth_20mhz = 2, + htt_ppdu_stats_bandwidth_40mhz = 3, + htt_ppdu_stats_bandwidth_80mhz = 4, + htt_ppdu_stats_bandwidth_160mhz = 5, /* includes 80+80 */ + htt_ppdu_stats_bandwidth_dyn = 6, +}; + +#define htt_ppdu_stats_cmn_flags_frame_type_m genmask(7, 0) +#define htt_ppdu_stats_cmn_flags_queue_type_m genmask(15, 8) +/* bw - htt_ppdu_stats_bw */ +#define htt_ppdu_stats_cmn_flags_bw_m genmask(19, 16) + +struct htt_ppdu_stats_common { + u32 ppdu_id; + u16 sched_cmdid; + u8 ring_id; + u8 num_users; + u32 flags; /* %htt_ppdu_stats_common_flags_*/ + u32 chain_mask; + u32 fes_duration_us; /* frame exchange sequence */ + u32 ppdu_sch_eval_start_tstmp_us; + u32 ppdu_sch_end_tstmp_us; + u32 ppdu_start_tstmp_us; + /* bit [15 : 0] - phy mode (wlan_phy_mode) with which ppdu was transmitted + * bit [31 : 16] - bandwidth (in mhz) with which ppdu was transmitted + */ + u16 phy_mode; + u16 bw_mhz; +} __packed; + +#define htt_ppdu_stats_user_rate_info0_user_pos_m genmask(3, 0) +#define htt_ppdu_stats_user_rate_info0_mu_group_id_m genmask(11, 4) + +#define htt_ppdu_stats_user_rate_info1_resp_type_vald_m bit(0) +#define htt_ppdu_stats_user_rate_info1_ppdu_type_m genmask(5, 1) + +#define htt_ppdu_stats_user_rate_flags_ltf_size_m genmask(1, 0) +#define htt_ppdu_stats_user_rate_flags_stbc_m bit(2) +#define htt_ppdu_stats_user_rate_flags_he_re_m bit(3) +#define htt_ppdu_stats_user_rate_flags_txbf_m genmask(7, 4) +#define htt_ppdu_stats_user_rate_flags_bw_m genmask(11, 8) +#define htt_ppdu_stats_user_rate_flags_nss_m genmask(15, 12) +#define htt_ppdu_stats_user_rate_flags_mcs_m genmask(19, 16) +#define htt_ppdu_stats_user_rate_flags_preamble_m genmask(23, 20) +#define htt_ppdu_stats_user_rate_flags_gi_m genmask(27, 24) +#define htt_ppdu_stats_user_rate_flags_dcm_m bit(28) +#define htt_ppdu_stats_user_rate_flags_ldpc_m bit(29) + +#define htt_usr_rate_preamble(_val) \ + field_get(htt_ppdu_stats_user_rate_flags_preamble_m, _val) +#define htt_usr_rate_bw(_val) \ + field_get(htt_ppdu_stats_user_rate_flags_bw_m, _val) +#define htt_usr_rate_nss(_val) \ + field_get(htt_ppdu_stats_user_rate_flags_nss_m, _val) +#define htt_usr_rate_mcs(_val) \ + field_get(htt_ppdu_stats_user_rate_flags_mcs_m, _val) +#define htt_usr_rate_gi(_val) \ + field_get(htt_ppdu_stats_user_rate_flags_gi_m, _val) + +#define htt_ppdu_stats_user_rate_resp_flags_ltf_size_m genmask(1, 0) +#define htt_ppdu_stats_user_rate_resp_flags_stbc_m bit(2) +#define htt_ppdu_stats_user_rate_resp_flags_he_re_m bit(3) +#define htt_ppdu_stats_user_rate_resp_flags_txbf_m genmask(7, 4) +#define htt_ppdu_stats_user_rate_resp_flags_bw_m genmask(11, 8) +#define htt_ppdu_stats_user_rate_resp_flags_nss_m genmask(15, 12) +#define htt_ppdu_stats_user_rate_resp_flags_mcs_m genmask(19, 16) +#define htt_ppdu_stats_user_rate_resp_flags_preamble_m genmask(23, 20) +#define htt_ppdu_stats_user_rate_resp_flags_gi_m genmask(27, 24) +#define htt_ppdu_stats_user_rate_resp_flags_dcm_m bit(28) +#define htt_ppdu_stats_user_rate_resp_flags_ldpc_m bit(29) + +struct htt_ppdu_stats_user_rate { + u8 tid_num; + u8 reserved0; + u16 sw_peer_id; + u32 info0; /* %htt_ppdu_stats_user_rate_info0_*/ + u16 ru_end; + u16 ru_start; + u16 resp_ru_end; + u16 resp_ru_start; + u32 info1; /* %htt_ppdu_stats_user_rate_info1_ */ + u32 rate_flags; /* %htt_ppdu_stats_user_rate_flags_ */ + /* note: resp_rate_info is only valid for if resp_type is ul */ + u32 resp_rate_flags; /* %htt_ppdu_stats_user_rate_resp_flags_ */ +} __packed; + +#define htt_ppdu_stats_tx_info_flags_ratecode_m genmask(7, 0) +#define htt_ppdu_stats_tx_info_flags_is_ampdu_m bit(8) +#define htt_ppdu_stats_tx_info_flags_ba_ack_failed_m genmask(10, 9) +#define htt_ppdu_stats_tx_info_flags_bw_m genmask(13, 11) +#define htt_ppdu_stats_tx_info_flags_sgi_m bit(14) +#define htt_ppdu_stats_tx_info_flags_peerid_m genmask(31, 16) + +#define htt_tx_info_is_amsdu(_flags) \ + field_get(htt_ppdu_stats_tx_info_flags_is_ampdu_m, _flags) +#define htt_tx_info_ba_ack_failed(_flags) \ + field_get(htt_ppdu_stats_tx_info_flags_ba_ack_failed_m, _flags) +#define htt_tx_info_ratecode(_flags) \ + field_get(htt_ppdu_stats_tx_info_flags_ratecode_m, _flags) +#define htt_tx_info_peerid(_flags) \ + field_get(htt_ppdu_stats_tx_info_flags_peerid_m, _flags) + +struct htt_tx_ppdu_stats_info { + struct htt_tlv tlv_hdr; + u32 tx_success_bytes; + u32 tx_retry_bytes; + u32 tx_failed_bytes; + u32 flags; /* %htt_ppdu_stats_tx_info_flags_ */ + u16 tx_success_msdus; + u16 tx_retry_msdus; + u16 tx_failed_msdus; + u16 tx_duration; /* united in us */ +} __packed; + +enum htt_ppdu_stats_usr_compln_status { + htt_ppdu_stats_user_status_ok, + htt_ppdu_stats_user_status_filtered, + htt_ppdu_stats_user_status_resp_timeout, + htt_ppdu_stats_user_status_resp_mismatch, + htt_ppdu_stats_user_status_abort, +}; + +#define htt_ppdu_stats_usr_cmpltn_cmn_flags_long_retry_m genmask(3, 0) +#define htt_ppdu_stats_usr_cmpltn_cmn_flags_short_retry_m genmask(7, 4) +#define htt_ppdu_stats_usr_cmpltn_cmn_flags_is_ampdu_m bit(8) +#define htt_ppdu_stats_usr_cmpltn_cmn_flags_resp_type_m genmask(12, 9) + +#define htt_usr_cmpltn_is_ampdu(_val) \ + field_get(htt_ppdu_stats_usr_cmpltn_cmn_flags_is_ampdu_m, _val) +#define htt_usr_cmpltn_long_retry(_val) \ + field_get(htt_ppdu_stats_usr_cmpltn_cmn_flags_long_retry_m, _val) +#define htt_usr_cmpltn_short_retry(_val) \ + field_get(htt_ppdu_stats_usr_cmpltn_cmn_flags_short_retry_m, _val) + +struct htt_ppdu_stats_usr_cmpltn_cmn { + u8 status; + u8 tid_num; + u16 sw_peer_id; + /* rssi value of last ack packet (units = db above noise floor) */ + u32 ack_rssi; + u16 mpdu_tried; + u16 mpdu_success; + u32 flags; /* %htt_ppdu_stats_usr_cmpltn_cmn_flags_long_retries*/ +} __packed; + +#define htt_ppdu_stats_ack_ba_info_num_mpdu_m genmask(8, 0) +#define htt_ppdu_stats_ack_ba_info_num_msdu_m genmask(24, 9) +#define htt_ppdu_stats_ack_ba_info_tid_num genmask(3, 0) + +struct htt_ppdu_stats_usr_cmpltn_ack_ba_status { + u32 ppdu_id; + u16 sw_peer_id; + u16 reserved0; + u32 info; /* %htt_ppdu_stats_usr_cmpltn_cmn_info_ */ + u16 current_seq; + u16 start_seq; + u32 success_bytes; +} __packed; + +struct htt_ppdu_stats_usr_cmn_array { + struct htt_tlv tlv_hdr; + u32 num_ppdu_stats; + /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info + * elements. + * tx_ppdu_stats_info is variable length, with length = + * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info) + */ + struct htt_tx_ppdu_stats_info tx_ppdu_info[0]; +} __packed; + +struct htt_ppdu_user_stats { + u16 peer_id; + u32 tlv_flags; + bool is_valid_peer_id; + struct htt_ppdu_stats_user_rate rate; + struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn; + struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba; +}; + +#define htt_ppdu_stats_max_users 8 +#define htt_ppdu_desc_max_depth 16 + +struct htt_ppdu_stats { + struct htt_ppdu_stats_common common; + struct htt_ppdu_user_stats user_stats[htt_ppdu_stats_max_users]; +}; + +struct htt_ppdu_stats_info { + u32 ppdu_id; + struct htt_ppdu_stats ppdu_stats; + struct list_head list; +}; + +/** + * @brief target -> host packet log message + * + * @details + * the following field definitions describe the format of the packet log + * message sent from the target to the host. + * the message consists of a 4-octet header,followed by a variable number + * of 32-bit character values. + * + * |31 16|15 12|11 10|9 8|7 0| + * |------------------------------------------------------------------| + * | payload_size | rsvd |pdev_id|mac_id| msg type | + * |------------------------------------------------------------------| + * | payload | + * |------------------------------------------------------------------| + * - msg_type + * bits 7:0 + * purpose: identifies this as a pktlog message + * value: htt_t2h_msg_type_pktlog + * - mac_id + * bits 9:8 + * purpose: identifies which mac/phy instance generated this pktlog info + * value: 0-3 + * - pdev_id + * bits 11:10 + * purpose: pdev_id + * value: 0-3 + * 0 (for rings at soc level), + * 1/2/3 pdev -> 0/1/2 + * - payload_size + * bits 31:16 + * purpose: explicitly specify the payload size + * value: payload size in bytes (payload size is a multiple of 4 bytes) + */ +struct htt_pktlog_msg { + u32 hdr; + u8 payload[0]; +}; + +/** + * @brief host -> target fw extended statistics retrieve + * + * @details + * the following field definitions describe the format of the htt host + * to target fw extended stats retrieve message. + * the message specifies the type of stats the host wants to retrieve. + * + * |31 24|23 16|15 8|7 0| + * |-----------------------------------------------------------| + * | reserved | stats type | pdev_mask | msg type | + * |-----------------------------------------------------------| + * | config param [0] | + * |-----------------------------------------------------------| + * | config param [1] | + * |-----------------------------------------------------------| + * | config param [2] | + * |-----------------------------------------------------------| + * | config param [3] | + * |-----------------------------------------------------------| + * | reserved | + * |-----------------------------------------------------------| + * | cookie lsbs | + * |-----------------------------------------------------------| + * | cookie msbs | + * |-----------------------------------------------------------| + * header fields: + * - msg_type + * bits 7:0 + * purpose: identifies this is a extended stats upload request message + * value: 0x10 + * - pdev_mask + * bits 8:15 + * purpose: identifies the mask of pdevs to retrieve stats from + * value: this is a overloaded field, refer to usage and interpretation of + * pdev in interface document. + * bit 8 : reserved for soc stats + * bit 9 - 15 : indicates pdev_mask in dbdc + * indicates macid_mask in dbs + * - stats_type + * bits 23:16 + * purpose: identifies which fw statistics to upload + * value: defined by htt_dbg_ext_stats_type (see htt_stats.h) + * - reserved + * bits 31:24 + * - config_param [0] + * bits 31:0 + * purpose: give an opaque configuration value to the specified stats type + * value: stats-type specific configuration value + * refer to htt_stats.h for interpretation for each stats sub_type + * - config_param [1] + * bits 31:0 + * purpose: give an opaque configuration value to the specified stats type + * value: stats-type specific configuration value + * refer to htt_stats.h for interpretation for each stats sub_type + * - config_param [2] + * bits 31:0 + * purpose: give an opaque configuration value to the specified stats type + * value: stats-type specific configuration value + * refer to htt_stats.h for interpretation for each stats sub_type + * - config_param [3] + * bits 31:0 + * purpose: give an opaque configuration value to the specified stats type + * value: stats-type specific configuration value + * refer to htt_stats.h for interpretation for each stats sub_type + * - reserved [31:0] for future use. + * - cookie_lsbs + * bits 31:0 + * purpose: provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * value: lsbs of the opaque cookie specified by the host-side requestor + * - cookie_msbs + * bits 31:0 + * purpose: provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * value: msbs of the opaque cookie specified by the host-side requestor + */ + +struct htt_ext_stats_cfg_hdr { + u8 msg_type; + u8 pdev_mask; + u8 stats_type; + u8 reserved; +} __packed; + +struct htt_ext_stats_cfg_cmd { + struct htt_ext_stats_cfg_hdr hdr; + u32 cfg_param0; + u32 cfg_param1; + u32 cfg_param2; + u32 cfg_param3; + u32 reserved; + u32 cookie_lsb; + u32 cookie_msb; +} __packed; + +/* htt stats config default params */ +#define htt_stat_default_reset_start_offset 0 +#define htt_stat_default_cfg0_all_hwqs 0xffffffff +#define htt_stat_default_cfg0_all_txqs 0xffffffff +#define htt_stat_default_cfg0_all_cmdqs 0xffff +#define htt_stat_default_cfg0_all_rings 0xffff +#define htt_stat_default_cfg0_active_peers 0xff +#define htt_stat_default_cfg0_cca_cumulative 0x10 +#define htt_stat_default_cfg0_active_vdevs 0xff + +/* htt_dbg_ext_stats_peer_info + * params: + * @config_param0: + * [bit0] - [0] for sw_peer_id, [1] for mac_addr based request + * [bit15 : bit 1] htt_peer_stats_req_mode_t + * [bit31 : bit16] sw_peer_id + * @config_param1: + * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum) + * 0 bit htt_peer_stats_cmn_tlv + * 1 bit htt_peer_details_tlv + * 2 bit htt_tx_peer_rate_stats_tlv + * 3 bit htt_rx_peer_rate_stats_tlv + * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv + * 5 bit htt_rx_tid_stats_tlv + * 6 bit htt_msdu_flow_stats_tlv + * @config_param2: [bit31 : bit0] mac_addr31to0 + * @config_param3: [bit15 : bit0] mac_addr47to32 + * [bit31 : bit16] reserved + */ +#define htt_stat_peer_info_mac_addr bit(0) +#define htt_stat_default_peer_req_type 0x7f + +/* used to set different configs to the specified stats type.*/ +struct htt_ext_stats_cfg_params { + u32 cfg0; + u32 cfg1; + u32 cfg2; + u32 cfg3; +}; + +/** + * @brief target -> host extended statistics upload + * + * @details + * the following field definitions describe the format of the htt target + * to host stats upload confirmation message. + * the message contains a cookie echoed from the htt host->target stats + * upload request, which identifies which request the confirmation is + * for, and a single stats can span over multiple htt stats indication + * due to the htt message size limitation so every htt ext stats indication + * will have tag-length-value stats information elements. + * the tag-length header for each htt stats ind message also includes a + * status field, to indicate whether the request for the stat type in + * question was fully met, partially met, unable to be met, or invalid + * (if the stat type in question is disabled in the target). + * a done bit 1's indicate the end of the of stats info elements. + * + * + * |31 16|15 12|11|10 8|7 5|4 0| + * |--------------------------------------------------------------| + * | reserved | msg type | + * |--------------------------------------------------------------| + * | cookie lsbs | + * |--------------------------------------------------------------| + * | cookie msbs | + * |--------------------------------------------------------------| + * | stats entry length | rsvd | d| s | stat type | + * |--------------------------------------------------------------| + * | type-specific stats info | + * | (see htt_stats.h) | + * |--------------------------------------------------------------| + * header fields: + * - msg_type + * bits 7:0 + * purpose: identifies this is a extended statistics upload confirmation + * message. + * value: 0x1c + * - cookie_lsbs + * bits 31:0 + * purpose: provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * value: lsbs of the opaque cookie specified by the host-side requestor + * - cookie_msbs + * bits 31:0 + * purpose: provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * value: msbs of the opaque cookie specified by the host-side requestor + * + * stats information element tag-length header fields: + * - stat_type + * bits 7:0 + * purpose: identifies the type of statistics info held in the + * following information element + * value: htt_dbg_ext_stats_type + * - status + * bits 10:8 + * purpose: indicate whether the requested stats are present + * value: htt_dbg_ext_stats_status + * - done + * bits 11 + * purpose: + * indicates the completion of the stats entry, this will be the last + * stats conf htt segment for the requested stats type. + * value: + * 0 -> the stats retrieval is ongoing + * 1 -> the stats retrieval is complete + * - length + * bits 31:16 + * purpose: indicate the stats information size + * value: this field specifies the number of bytes of stats information + * that follows the element tag-length header. + * it is expected but not required that this length is a multiple of + * 4 bytes. + */ + +#define htt_t2h_ext_stats_info1_length genmask(31, 16) + +struct ath11k_htt_extd_stats_msg { + u32 info0; + u64 cookie; + u32 info1; + u8 data[0]; +} __packed; + +struct htt_mac_addr { + u32 mac_addr_l32; + u32 mac_addr_h16; +}; + +static inline void ath11k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr) +{ + if (is_enabled(config_cpu_big_endian)) { + addr_l32 = swab32(addr_l32); + addr_h16 = swab16(addr_h16); + } + + memcpy(addr, &addr_l32, 4); + memcpy(addr + 4, &addr_h16, eth_alen - 4); +} + +int ath11k_dp_service_srng(struct ath11k_base *ab, + struct ath11k_ext_irq_grp *irq_grp, + int budget); +int ath11k_dp_htt_connect(struct ath11k_dp *dp); +void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif); +void ath11k_dp_free(struct ath11k_base *ab); +int ath11k_dp_alloc(struct ath11k_base *ab); +int ath11k_dp_pdev_alloc(struct ath11k_base *ab); +void ath11k_dp_pdev_free(struct ath11k_base *ab); +int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type); +int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr); +void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr); +void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring); +int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, + enum hal_ring_type type, int ring_num, + int mac_id, int num_entries); +void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, + struct dp_link_desc_bank *desc_bank, + u32 ring_type, struct dp_srng *ring); +int ath11k_dp_link_desc_setup(struct ath11k_base *ab, + struct dp_link_desc_bank *link_desc_banks, + u32 ring_type, struct hal_srng *srng, + u32 n_link_desc); + +#endif diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include <linux/ieee80211.h> +#include "core.h" +#include "debug.h" +#include "hal_desc.h" +#include "hw.h" +#include "dp_rx.h" +#include "hal_rx.h" +#include "dp_tx.h" +#include "peer.h" + +static u8 *ath11k_dp_rx_h_80211_hdr(struct hal_rx_desc *desc) +{ + return desc->hdr_status; +} + +static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct hal_rx_desc *desc) +{ + if (!(__le32_to_cpu(desc->mpdu_start.info1) & + rx_mpdu_start_info1_encrypt_info_valid)) + return hal_encrypt_type_open; + + return field_get(rx_mpdu_start_info2_enc_type, + __le32_to_cpu(desc->mpdu_start.info2)); +} + +static u8 ath11k_dp_rx_h_mpdu_start_decap_type(struct hal_rx_desc *desc) +{ + return field_get(rx_mpdu_start_info5_decap_type, + __le32_to_cpu(desc->mpdu_start.info5)); +} + +static bool ath11k_dp_rx_h_attn_msdu_done(struct hal_rx_desc *desc) +{ + return !!field_get(rx_attention_info2_msdu_done, + __le32_to_cpu(desc->attention.info2)); +} + +static bool ath11k_dp_rx_h_attn_first_mpdu(struct hal_rx_desc *desc) +{ + return !!field_get(rx_attention_info1_first_mpdu, + __le32_to_cpu(desc->attention.info1)); +} + +static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct hal_rx_desc *desc) +{ + return !!field_get(rx_attention_info1_tcp_udp_cksum_fail, + __le32_to_cpu(desc->attention.info1)); +} + +static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct hal_rx_desc *desc) +{ + return !!field_get(rx_attention_info1_ip_cksum_fail, + __le32_to_cpu(desc->attention.info1)); +} + +static bool ath11k_dp_rx_h_attn_is_decrypted(struct hal_rx_desc *desc) +{ + return (field_get(rx_attention_info2_dcrypt_status_code, + __le32_to_cpu(desc->attention.info2)) == + rx_desc_decrypt_status_code_ok); +} + +static u32 ath11k_dp_rx_h_attn_mpdu_err(struct hal_rx_desc *desc) +{ + u32 info = __le32_to_cpu(desc->attention.info1); + u32 errmap = 0; + + if (info & rx_attention_info1_fcs_err) + errmap |= dp_rx_mpdu_err_fcs; + + if (info & rx_attention_info1_decrypt_err) + errmap |= dp_rx_mpdu_err_decrypt; + + if (info & rx_attention_info1_tkip_mic_err) + errmap |= dp_rx_mpdu_err_tkip_mic; + + if (info & rx_attention_info1_a_msdu_error) + errmap |= dp_rx_mpdu_err_amsdu_err; + + if (info & rx_attention_info1_overflow_err) + errmap |= dp_rx_mpdu_err_overflow; + + if (info & rx_attention_info1_msdu_len_err) + errmap |= dp_rx_mpdu_err_msdu_len; + + if (info & rx_attention_info1_mpdu_len_err) + errmap |= dp_rx_mpdu_err_mpdu_len; + + return errmap; +} + +static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct hal_rx_desc *desc) +{ + return field_get(rx_msdu_start_info1_msdu_length, + __le32_to_cpu(desc->msdu_start.info1)); +} + +static u8 ath11k_dp_rx_h_msdu_start_sgi(struct hal_rx_desc *desc) +{ + return field_get(rx_msdu_start_info3_sgi, + __le32_to_cpu(desc->msdu_start.info3)); +} + +static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct hal_rx_desc *desc) +{ + return field_get(rx_msdu_start_info3_rate_mcs, + __le32_to_cpu(desc->msdu_start.info3)); +} + +static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct hal_rx_desc *desc) +{ + return field_get(rx_msdu_start_info3_recv_bw, + __le32_to_cpu(desc->msdu_start.info3)); +} + +static u32 ath11k_dp_rx_h_msdu_start_freq(struct hal_rx_desc *desc) +{ + return __le32_to_cpu(desc->msdu_start.phy_meta_data); +} + +static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct hal_rx_desc *desc) +{ + return field_get(rx_msdu_start_info3_pkt_type, + __le32_to_cpu(desc->msdu_start.info3)); +} + +static u8 ath11k_dp_rx_h_msdu_start_nss(struct hal_rx_desc *desc) +{ + u8 mimo_ss_bitmap = field_get(rx_msdu_start_info3_mimo_ss_bitmap, + __le32_to_cpu(desc->msdu_start.info3)); + + return hweight8(mimo_ss_bitmap); +} + +static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct hal_rx_desc *desc) +{ + return field_get(rx_msdu_end_info2_l3_hdr_padding, + __le32_to_cpu(desc->msdu_end.info2)); +} + +static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct hal_rx_desc *desc) +{ + return !!field_get(rx_msdu_end_info2_first_msdu, + __le32_to_cpu(desc->msdu_end.info2)); +} + +static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct hal_rx_desc *desc) +{ + return !!field_get(rx_msdu_end_info2_last_msdu, + __le32_to_cpu(desc->msdu_end.info2)); +} + +static void ath11k_dp_rx_desc_end_tlv_copy(struct hal_rx_desc *fdesc, + struct hal_rx_desc *ldesc) +{ + memcpy((u8 *)&fdesc->msdu_end, (u8 *)&ldesc->msdu_end, + sizeof(struct rx_msdu_end)); + memcpy((u8 *)&fdesc->attention, (u8 *)&ldesc->attention, + sizeof(struct rx_attention)); + memcpy((u8 *)&fdesc->mpdu_end, (u8 *)&ldesc->mpdu_end, + sizeof(struct rx_mpdu_end)); +} + +static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct hal_rx_desc *rx_desc) +{ + struct rx_attention *rx_attn; + + rx_attn = &rx_desc->attention; + + return field_get(rx_attention_info1_mpdu_len_err, + __le32_to_cpu(rx_attn->info1)); +} + +static u32 ath11k_dp_rxdesc_get_decap_format(struct hal_rx_desc *rx_desc) +{ + struct rx_msdu_start *rx_msdu_start; + + rx_msdu_start = &rx_desc->msdu_start; + + return field_get(rx_msdu_start_info2_decap_format, + __le32_to_cpu(rx_msdu_start->info2)); +} + +static u8 *ath11k_dp_rxdesc_get_80211hdr(struct hal_rx_desc *rx_desc) +{ + u8 *rx_pkt_hdr; + + rx_pkt_hdr = &rx_desc->msdu_payload[0]; + + return rx_pkt_hdr; +} + +static bool ath11k_dp_rxdesc_mpdu_valid(struct hal_rx_desc *rx_desc) +{ + u32 tlv_tag; + + tlv_tag = field_get(hal_tlv_hdr_tag, + __le32_to_cpu(rx_desc->mpdu_start_tag)); + + return tlv_tag == hal_rx_mpdu_start ? true : false; +} + +static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc) +{ + return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id); +} + +/* returns number of rx buffers replenished */ +int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, + struct dp_rxdma_ring *rx_ring, + int req_entries, + enum hal_rx_buf_return_buf_manager mgr, + gfp_t gfp) +{ + struct hal_srng *srng; + u32 *desc; + struct sk_buff *skb; + int num_free; + int num_remain; + int buf_id; + u32 cookie; + dma_addr_t paddr; + + req_entries = min(req_entries, rx_ring->bufs_max); + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + num_free = ath11k_hal_srng_src_num_free(ab, srng, true); + if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4)) + req_entries = num_free; + + req_entries = min(num_free, req_entries); + num_remain = req_entries; + + while (num_remain > 0) { + skb = dev_alloc_skb(dp_rx_buffer_size + + dp_rx_buffer_align_size); + if (!skb) + break; + + if (!is_aligned((unsigned long)skb->data, + dp_rx_buffer_align_size)) { + skb_pull(skb, + ptr_align(skb->data, dp_rx_buffer_align_size) - + skb->data); + } + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + dma_from_device); + if (dma_mapping_error(ab->dev, paddr)) + goto fail_free_skb; + + spin_lock_bh(&rx_ring->idr_lock); + buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, + rx_ring->bufs_max * 3, gfp); + spin_unlock_bh(&rx_ring->idr_lock); + if (buf_id < 0) + goto fail_dma_unmap; + + desc = ath11k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) + goto fail_idr_remove; + + ath11k_skb_rxcb(skb)->paddr = paddr; + + cookie = field_prep(dp_rxdma_buf_cookie_pdev_id, mac_id) | + field_prep(dp_rxdma_buf_cookie_buf_id, buf_id); + + num_remain--; + + ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); + } + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; + +fail_idr_remove: + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); +fail_dma_unmap: + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + dma_from_device); +fail_free_skb: + dev_kfree_skb_any(skb); + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; +} + +static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar, + struct dp_rxdma_ring *rx_ring) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct sk_buff *skb; + int buf_id; + + spin_lock_bh(&rx_ring->idr_lock); + idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { + idr_remove(&rx_ring->bufs_idr, buf_id); + /* todo: understand where internal driver does this dma_unmap of + * of rxdma_buffer. + */ + dma_unmap_single(ar->ab->dev, ath11k_skb_rxcb(skb)->paddr, + skb->len + skb_tailroom(skb), dma_from_device); + dev_kfree_skb_any(skb); + } + + idr_destroy(&rx_ring->bufs_idr); + spin_unlock_bh(&rx_ring->idr_lock); + + rx_ring = &dp->rx_mon_status_refill_ring; + + spin_lock_bh(&rx_ring->idr_lock); + idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) { + idr_remove(&rx_ring->bufs_idr, buf_id); + /* xxx: understand where internal driver does this dma_unmap of + * of rxdma_buffer. + */ + dma_unmap_single(ar->ab->dev, ath11k_skb_rxcb(skb)->paddr, + skb->len + skb_tailroom(skb), dma_bidirectional); + dev_kfree_skb_any(skb); + } + + idr_destroy(&rx_ring->bufs_idr); + spin_unlock_bh(&rx_ring->idr_lock); + return 0; +} + +static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + + ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); + + rx_ring = &dp->rxdma_mon_buf_ring; + ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); + + rx_ring = &dp->rx_mon_status_refill_ring; + ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); + return 0; +} + +static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar, + struct dp_rxdma_ring *rx_ring, + u32 ringtype) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + int num_entries; + + num_entries = rx_ring->refill_buf_ring.size / + ath11k_hal_srng_get_entrysize(ringtype); + + rx_ring->bufs_max = num_entries; + ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries, + hal_rx_buf_rbm_sw3_bm, gfp_kernel); + return 0; +} + +static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + + ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, hal_rxdma_buf); + + rx_ring = &dp->rxdma_mon_buf_ring; + ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, hal_rxdma_monitor_buf); + + rx_ring = &dp->rx_mon_status_refill_ring; + ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, hal_rxdma_monitor_status); + + return 0; +} + +static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + + ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring); + ath11k_dp_srng_cleanup(ar->ab, &dp->reo_dst_ring); + ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring); + ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring); + ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); +} + +static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct dp_srng *srng = null; + int ret; + + ret = ath11k_dp_srng_setup(ar->ab, + &dp->rx_refill_buf_ring.refill_buf_ring, + hal_rxdma_buf, 0, + dp->mac_id, dp_rxdma_buf_ring_size); + if (ret) { + ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring "); + return ret; + } + + ret = ath11k_dp_srng_setup(ar->ab, &dp->reo_dst_ring, hal_reo_dst, + dp->mac_id, dp->mac_id, + dp_reo_dst_ring_size); + if (ret) { + ath11k_warn(ar->ab, "failed to setup reo_dst_ring "); + return ret; + } + + ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring, + hal_rxdma_dst, 0, dp->mac_id, + dp_rxdma_err_dst_ring_size); + if (ret) { + ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring "); + return ret; + } + + srng = &dp->rx_mon_status_refill_ring.refill_buf_ring; + ret = ath11k_dp_srng_setup(ar->ab, + srng, + hal_rxdma_monitor_status, 0, dp->mac_id, + dp_rxdma_mon_status_ring_size); + if (ret) { + ath11k_warn(ar->ab, + "failed to setup rx_mon_status_refill_ring "); + return ret; + } + ret = ath11k_dp_srng_setup(ar->ab, + &dp->rxdma_mon_buf_ring.refill_buf_ring, + hal_rxdma_monitor_buf, 0, dp->mac_id, + dp_rxdma_monitor_buf_ring_size); + if (ret) { + ath11k_warn(ar->ab, + "failed to setup hal_rxdma_monitor_buf "); + return ret; + } + + ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring, + hal_rxdma_monitor_dst, 0, dp->mac_id, + dp_rxdma_monitor_dst_ring_size); + if (ret) { + ath11k_warn(ar->ab, + "failed to setup hal_rxdma_monitor_dst "); + return ret; + } + + ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring, + hal_rxdma_monitor_desc, 0, dp->mac_id, + dp_rxdma_monitor_desc_ring_size); + if (ret) { + ath11k_warn(ar->ab, + "failed to setup hal_rxdma_monitor_desc "); + return ret; + } + + return 0; +} + +void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) +{ + struct ath11k_dp *dp = &ab->dp; + struct dp_reo_cmd *cmd, *tmp; + struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache; + + spin_lock_bh(&dp->reo_cmd_lock); + list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { + list_del(&cmd->list); + dma_unmap_single(ab->dev, cmd->data.paddr, + cmd->data.size, dma_bidirectional); + kfree(cmd->data.vaddr); + kfree(cmd); + } + + list_for_each_entry_safe(cmd_cache, tmp_cache, + &dp->reo_cmd_cache_flush_list, list) { + list_del(&cmd_cache->list); + dma_unmap_single(ab->dev, cmd_cache->data.paddr, + cmd_cache->data.size, dma_bidirectional); + kfree(cmd_cache->data.vaddr); + kfree(cmd_cache); + } + spin_unlock_bh(&dp->reo_cmd_lock); +} + +static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, + enum hal_reo_cmd_status status) +{ + struct dp_rx_tid *rx_tid = ctx; + + if (status != hal_reo_cmd_success) + ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d ", + rx_tid->tid, status); + + dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, + dma_bidirectional); + kfree(rx_tid->vaddr); +} + +static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, + struct dp_rx_tid *rx_tid) +{ + struct ath11k_hal_reo_cmd cmd = {0}; + unsigned long tot_desc_sz, desc_sz; + int ret; + + tot_desc_sz = rx_tid->size; + desc_sz = ath11k_hal_reo_qdesc_size(0, hal_desc_reo_non_qos_tid); + + while (tot_desc_sz > desc_sz) { + tot_desc_sz -= desc_sz; + cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, + hal_reo_cmd_flush_cache, &cmd, + null); + if (ret) + ath11k_warn(ab, + "failed to send hal_reo_cmd_flush_cache, tid %d (%d) ", + rx_tid->tid, ret); + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.addr_lo = lower_32_bits(rx_tid->paddr); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + cmd.flag |= hal_reo_cmd_flg_need_status; + ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid, + hal_reo_cmd_flush_cache, + &cmd, ath11k_dp_reo_cmd_free); + if (ret) { + ath11k_err(ab, "failed to send hal_reo_cmd_flush_cache cmd, tid %d (%d) ", + rx_tid->tid, ret); + dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, + dma_bidirectional); + kfree(rx_tid->vaddr); + } +} + +static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, + enum hal_reo_cmd_status status) +{ + struct ath11k_base *ab = dp->ab; + struct dp_rx_tid *rx_tid = ctx; + struct dp_reo_cache_flush_elem *elem, *tmp; + + if (status == hal_reo_cmd_drain) { + goto free_desc; + } else if (status != hal_reo_cmd_success) { + /* shouldn't happen! cleanup in case of other failure? */ + ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d ", + rx_tid->tid, status); + return; + } + + elem = kzalloc(sizeof(*elem), gfp_atomic); + if (!elem) + goto free_desc; + + elem->ts = jiffies; + memcpy(&elem->data, rx_tid, sizeof(*rx_tid)); + + spin_lock_bh(&dp->reo_cmd_lock); + list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); + spin_unlock_bh(&dp->reo_cmd_lock); + + /* flush and invalidate aged reo desc from hw cache */ + spin_lock_bh(&dp->reo_cmd_lock); + list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, + list) { + if (time_after(jiffies, elem->ts + + msecs_to_jiffies(dp_reo_desc_free_timeout_ms))) { + list_del(&elem->list); + spin_unlock_bh(&dp->reo_cmd_lock); + + ath11k_dp_reo_cache_flush(ab, &elem->data); + kfree(elem); + spin_lock_bh(&dp->reo_cmd_lock); + } + } + spin_unlock_bh(&dp->reo_cmd_lock); + + return; +free_desc: + dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, + dma_bidirectional); + kfree(rx_tid->vaddr); +} + +static void ath11k_peer_rx_tid_delete(struct ath11k *ar, + struct ath11k_peer *peer, u8 tid) +{ + struct ath11k_hal_reo_cmd cmd = {0}; + struct dp_rx_tid *rx_tid = &peer->rx_tid[tid]; + int ret; + + if (!rx_tid->active) + return; + + cmd.flag = hal_reo_cmd_flg_need_status; + cmd.addr_lo = lower_32_bits(rx_tid->paddr); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + cmd.upd0 |= hal_reo_cmd_upd0_vld; + ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, + hal_reo_cmd_update_rx_queue, &cmd, + ath11k_dp_rx_tid_del_func); + if (ret) { + ath11k_err(ar->ab, "failed to send hal_reo_cmd_update_rx_queue cmd, tid %d (%d) ", + tid, ret); + dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, + dma_bidirectional); + kfree(rx_tid->vaddr); + } + + rx_tid->active = false; +} + +void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer) +{ + int i; + + for (i = 0; i <= ieee80211_num_tids; i++) + ath11k_peer_rx_tid_delete(ar, peer, i); +} + +static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar, + struct ath11k_peer *peer, + struct dp_rx_tid *rx_tid, + u32 ba_win_sz, u16 ssn) +{ + struct ath11k_hal_reo_cmd cmd = {0}; + int ret; + + cmd.addr_lo = lower_32_bits(rx_tid->paddr); + cmd.addr_hi = upper_32_bits(rx_tid->paddr); + cmd.flag = hal_reo_cmd_flg_need_status; + cmd.upd0 = hal_reo_cmd_upd0_ba_window_size | + hal_reo_cmd_upd0_ssn; + cmd.ba_window_size = ba_win_sz; + cmd.upd2 = field_prep(hal_reo_cmd_upd2_ssn, ssn); + + ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid, + hal_reo_cmd_update_rx_queue, &cmd, + null); + if (ret) { + ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d) ", + rx_tid->tid, ret); + return ret; + } + + rx_tid->ba_win_sz = ba_win_sz; + + return 0; +} + +static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, + const u8 *peer_mac, int vdev_id, u8 tid) +{ + struct ath11k_peer *peer; + struct dp_rx_tid *rx_tid; + + spin_lock_bh(&ab->base_lock); + + peer = ath11k_peer_find(ab, vdev_id, peer_mac); + if (!peer) { + ath11k_warn(ab, "failed to find the peer to free up rx tid mem "); + goto unlock_exit; + } + + rx_tid = &peer->rx_tid[tid]; + if (!rx_tid->active) + goto unlock_exit; + + dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, + dma_bidirectional); + kfree(rx_tid->vaddr); + + rx_tid->active = false; + +unlock_exit: + spin_unlock_bh(&ab->base_lock); +} + +int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, + u8 tid, u32 ba_win_sz, u16 ssn) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_peer *peer; + struct dp_rx_tid *rx_tid; + u32 hw_desc_sz; + u32 *addr_aligned; + void *vaddr; + dma_addr_t paddr; + int ret; + + spin_lock_bh(&ab->base_lock); + + peer = ath11k_peer_find(ab, vdev_id, peer_mac); + if (!peer) { + ath11k_warn(ab, "failed to find the peer to set up rx tid "); + spin_unlock_bh(&ab->base_lock); + return -enoent; + } + + rx_tid = &peer->rx_tid[tid]; + /* update the tid queue if it is already setup */ + if (rx_tid->active) { + paddr = rx_tid->paddr; + ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, + ba_win_sz, ssn); + spin_unlock_bh(&ab->base_lock); + if (ret) { + ath11k_warn(ab, "failed to update reo for rx tid %d ", tid); + return ret; + } + + ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, + peer_mac, paddr, + tid, 1, ba_win_sz); + if (ret) + ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d) ", + tid, ret); + return ret; + } + + rx_tid->tid = tid; + + rx_tid->ba_win_sz = ba_win_sz; + + /* todo: optimize the memory allocation for qos tid based on the + * the actual ba window size in reo tid update path. + */ + if (tid == hal_desc_reo_non_qos_tid) + hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid); + else + hw_desc_sz = ath11k_hal_reo_qdesc_size(dp_ba_win_sz_max, tid); + + vaddr = kzalloc(hw_desc_sz + hal_link_desc_align - 1, gfp_kernel); + if (!vaddr) { + spin_unlock_bh(&ab->base_lock); + return -enomem; + } + + addr_aligned = ptr_align(vaddr, hal_link_desc_align); + + ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, ssn); + + paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, + dma_bidirectional); + + ret = dma_mapping_error(ab->dev, paddr); + if (ret) { + spin_unlock_bh(&ab->base_lock); + goto err_mem_free; + } + + rx_tid->vaddr = vaddr; + rx_tid->paddr = paddr; + rx_tid->size = hw_desc_sz; + rx_tid->active = true; + + spin_unlock_bh(&ab->base_lock); + + ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, + paddr, tid, 1, ba_win_sz); + if (ret) { + ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d) ", + tid, ret); + ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); + } + + return ret; + +err_mem_free: + kfree(vaddr); + + return ret; +} + +int ath11k_dp_rx_ampdu_start(struct ath11k *ar, + struct ieee80211_ampdu_params *params) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_sta *arsta = (void *)params->sta->drv_priv; + int vdev_id = arsta->arvif->vdev_id; + int ret; + + ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id, + params->tid, params->buf_size, + params->ssn); + if (ret) + ath11k_warn(ab, "failed to setup rx tid %d ", ret); + + return ret; +} + +int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, + struct ieee80211_ampdu_params *params) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_peer *peer; + struct ath11k_sta *arsta = (void *)params->sta->drv_priv; + int vdev_id = arsta->arvif->vdev_id; + dma_addr_t paddr; + bool active; + int ret; + + spin_lock_bh(&ab->base_lock); + + peer = ath11k_peer_find(ab, vdev_id, params->sta->addr); + if (!peer) { + ath11k_warn(ab, "failed to find the peer to stop rx aggregation "); + spin_unlock_bh(&ab->base_lock); + return -enoent; + } + + paddr = peer->rx_tid[params->tid].paddr; + active = peer->rx_tid[params->tid].active; + + ath11k_peer_rx_tid_delete(ar, peer, params->tid); + + spin_unlock_bh(&ab->base_lock); + + if (!active) + return 0; + + ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, + params->sta->addr, paddr, + params->tid, 1, 1); + if (ret) + ath11k_warn(ab, "failed to send wmi to delete rx tid %d ", + ret); + + return ret; +} + +static int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats, + u16 peer_id) +{ + int i; + + for (i = 0; i < htt_ppdu_stats_max_users - 1; i++) { + if (ppdu_stats->user_stats[i].is_valid_peer_id) { + if (peer_id == ppdu_stats->user_stats[i].peer_id) + return i; + } else { + return i; + } + } + + return -einval; +} + +static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab, + u16 tag, u16 len, const void *ptr, + void *data) +{ + struct htt_ppdu_stats_info *ppdu_info; + struct htt_ppdu_user_stats *user_stats; + int cur_user; + u16 peer_id; + + ppdu_info = (struct htt_ppdu_stats_info *)data; + + switch (tag) { + case htt_ppdu_stats_tag_common: + if (len < sizeof(struct htt_ppdu_stats_common)) { + ath11k_warn(ab, "invalid len %d for the tag 0x%x ", + len, tag); + return -einval; + } + memcpy((void *)&ppdu_info->ppdu_stats.common, ptr, + sizeof(struct htt_ppdu_stats_common)); + break; + case htt_ppdu_stats_tag_usr_rate: + if (len < sizeof(struct htt_ppdu_stats_user_rate)) { + ath11k_warn(ab, "invalid len %d for the tag 0x%x ", + len, tag); + return -einval; + } + + peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id; + cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, + peer_id); + if (cur_user < 0) + return -einval; + user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; + user_stats->peer_id = peer_id; + user_stats->is_valid_peer_id = true; + memcpy((void *)&user_stats->rate, ptr, + sizeof(struct htt_ppdu_stats_user_rate)); + user_stats->tlv_flags |= bit(tag); + break; + case htt_ppdu_stats_tag_usr_compltn_common: + if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) { + ath11k_warn(ab, "invalid len %d for the tag 0x%x ", + len, tag); + return -einval; + } + + peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id; + cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, + peer_id); + if (cur_user < 0) + return -einval; + user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; + user_stats->peer_id = peer_id; + user_stats->is_valid_peer_id = true; + memcpy((void *)&user_stats->cmpltn_cmn, ptr, + sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)); + user_stats->tlv_flags |= bit(tag); + break; + case htt_ppdu_stats_tag_usr_compltn_ack_ba_status: + if (len < + sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) { + ath11k_warn(ab, "invalid len %d for the tag 0x%x ", + len, tag); + return -einval; + } + + peer_id = + ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id; + cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats, + peer_id); + if (cur_user < 0) + return -einval; + user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user]; + user_stats->peer_id = peer_id; + user_stats->is_valid_peer_id = true; + memcpy((void *)&user_stats->ack_ba, ptr, + sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)); + user_stats->tlv_flags |= bit(tag); + break; + } + return 0; +} + +int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data) +{ + const struct htt_tlv *tlv; + const void *begin = ptr; + u16 tlv_tag, tlv_len; + int ret = -einval; + + while (len > 0) { + if (len < sizeof(*tlv)) { + ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected) ", + ptr - begin, len, sizeof(*tlv)); + return -einval; + } + tlv = (struct htt_tlv *)ptr; + tlv_tag = field_get(htt_tlv_tag, tlv->header); + tlv_len = field_get(htt_tlv_len, tlv->header); + ptr += sizeof(*tlv); + len -= sizeof(*tlv); + + if (tlv_len > len) { + ath11k_err(ab, "htt tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected) ", + tlv_tag, ptr - begin, len, tlv_len); + return -einval; + } + ret = iter(ab, tlv_tag, tlv_len, ptr, data); + if (ret == -enomem) + return ret; + + ptr += tlv_len; + len -= tlv_len; + } + return 0; +} + +static u8 ath11k_bw_to_mac80211_bw(u8 bw) +{ + u8 ret = 0; + + switch (bw) { + case ath11k_bw_20: + ret = rate_info_bw_20; + break; + case ath11k_bw_40: + ret = rate_info_bw_40; + break; + case ath11k_bw_80: + ret = rate_info_bw_80; + break; + case ath11k_bw_160: + ret = rate_info_bw_160; + break; + } + + return ret; +} + +static u32 ath11k_bw_to_mac80211_bwflags(u8 bw) +{ + u32 bwflags = 0; + + switch (bw) { + case ath11k_bw_40: + bwflags = ieee80211_tx_rc_40_mhz_width; + break; + case ath11k_bw_80: + bwflags = ieee80211_tx_rc_80_mhz_width; + break; + case ath11k_bw_160: + bwflags = ieee80211_tx_rc_160_mhz_width; + break; + } + + return bwflags; +} + +static void +ath11k_update_per_peer_tx_stats(struct ath11k *ar, + struct htt_ppdu_stats *ppdu_stats, u8 user) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_peer *peer; + struct ieee80211_sta *sta; + struct ath11k_sta *arsta; + struct htt_ppdu_stats_user_rate *user_rate; + struct ieee80211_chanctx_conf *conf = null; + struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; + struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; + struct htt_ppdu_stats_common *common = &ppdu_stats->common; + int ret; + u8 flags, mcs, nss, bw, sgi, rate_idx = 0; + u32 succ_bytes = 0; + u16 rate = 0, succ_pkts = 0; + u32 tx_duration = 0; + bool is_ampdu = false; + + if (!usr_stats) + return; + + if (!(usr_stats->tlv_flags & bit(htt_ppdu_stats_tag_usr_rate))) + return; + + if (usr_stats->tlv_flags & bit(htt_ppdu_stats_tag_usr_compltn_common)) + is_ampdu = + htt_usr_cmpltn_is_ampdu(usr_stats->cmpltn_cmn.flags); + + if (usr_stats->tlv_flags & + bit(htt_ppdu_stats_tag_usr_compltn_ack_ba_status)) { + succ_bytes = usr_stats->ack_ba.success_bytes; + succ_pkts = field_get(htt_ppdu_stats_ack_ba_info_num_msdu_m, + usr_stats->ack_ba.info); + } + + if (common->fes_duration_us) + tx_duration = common->fes_duration_us; + + user_rate = &usr_stats->rate; + flags = htt_usr_rate_preamble(user_rate->rate_flags); + bw = htt_usr_rate_bw(user_rate->rate_flags) - 2; + nss = htt_usr_rate_nss(user_rate->rate_flags) + 1; + mcs = htt_usr_rate_mcs(user_rate->rate_flags); + sgi = htt_usr_rate_gi(user_rate->rate_flags); + + /* note: if host configured fixed rates and in some other special + * cases, the broadcast/management frames are sent in different rates. + * firmware rate's control to be skipped for this? + */ + + if (flags == wmi_rate_preamble_vht && mcs > 9) { + ath11k_warn(ab, "invalid vht mcs %hhd peer stats", mcs); + return; + } + + if (flags == wmi_rate_preamble_ht && (mcs > 7 || nss < 1)) { + ath11k_warn(ab, "invalid ht mcs %hhd nss %hhd peer stats", + mcs, nss); + return; + } + + if (flags == wmi_rate_preamble_cck || flags == wmi_rate_preamble_ofdm) { + ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, + flags, + &rate_idx, + &rate); + if (ret < 0) + return; + } + + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id); + + if (!peer || !peer->sta) { + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + return; + } + + sta = peer->sta; + arsta = (struct ath11k_sta *)sta->drv_priv; + + memset(&arsta->txrate, 0, sizeof(arsta->txrate)); + memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); + + switch (flags) { + case wmi_rate_preamble_ofdm: + arsta->txrate.legacy = rate; + if (arsta->arvif && arsta->arvif->vif) + conf = rcu_dereference(arsta->arvif->vif->chanctx_conf); + if (conf && conf->def.chan->band == nl80211_band_5ghz) + arsta->tx_info.status.rates[0].idx = rate_idx - 4; + break; + case wmi_rate_preamble_cck: + arsta->txrate.legacy = rate; + arsta->tx_info.status.rates[0].idx = rate_idx; + if (mcs > ath11k_hw_rate_cck_lp_1m && + mcs <= ath11k_hw_rate_cck_sp_2m) + arsta->tx_info.status.rates[0].flags |= + ieee80211_tx_rc_use_short_preamble; + break; + case wmi_rate_preamble_ht: + arsta->txrate.mcs = mcs + 8 * (nss - 1); + arsta->tx_info.status.rates[0].idx = arsta->txrate.mcs; + arsta->txrate.flags = rate_info_flags_mcs; + arsta->tx_info.status.rates[0].flags |= ieee80211_tx_rc_mcs; + if (sgi) { + arsta->txrate.flags |= rate_info_flags_short_gi; + arsta->tx_info.status.rates[0].flags |= + ieee80211_tx_rc_short_gi; + } + break; + case wmi_rate_preamble_vht: + arsta->txrate.mcs = mcs; + ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], mcs, nss); + arsta->txrate.flags = rate_info_flags_vht_mcs; + arsta->tx_info.status.rates[0].flags |= ieee80211_tx_rc_vht_mcs; + if (sgi) { + arsta->txrate.flags |= rate_info_flags_short_gi; + arsta->tx_info.status.rates[0].flags |= + ieee80211_tx_rc_short_gi; + } + break; + } + + arsta->txrate.nss = nss; + arsta->txrate.bw = ath11k_bw_to_mac80211_bw(bw); + arsta->tx_info.status.rates[0].flags |= ath11k_bw_to_mac80211_bwflags(bw); + + memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info)); + + if (succ_pkts) { + arsta->tx_info.flags = ieee80211_tx_stat_ack; + arsta->tx_info.status.rates[0].count = 1; + ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); + } + + memset(peer_stats, 0, sizeof(*peer_stats)); + + peer_stats->succ_pkts = succ_pkts; + peer_stats->succ_bytes = succ_bytes; + peer_stats->is_ampdu = is_ampdu; + peer_stats->duration = tx_duration; + peer_stats->ba_fails = + htt_usr_cmpltn_long_retry(usr_stats->cmpltn_cmn.flags) + + htt_usr_cmpltn_short_retry(usr_stats->cmpltn_cmn.flags); + + if (ath11k_debug_is_extd_tx_stats_enabled(ar)) + ath11k_accumulate_per_peer_tx_stats(arsta, + peer_stats, rate_idx); + + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); +} + +static void ath11k_htt_update_ppdu_stats(struct ath11k *ar, + struct htt_ppdu_stats *ppdu_stats) +{ + u8 user; + + for (user = 0; user < htt_ppdu_stats_max_users - 1; user++) + ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user); +} + +static +struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar, + u32 ppdu_id) +{ + struct htt_ppdu_stats_info *ppdu_info = null; + + spin_lock_bh(&ar->data_lock); + if (!list_empty(&ar->ppdu_stats_info)) { + list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) { + if (ppdu_info && ppdu_info->ppdu_id == ppdu_id) { + spin_unlock_bh(&ar->data_lock); + return ppdu_info; + } + } + + if (ar->ppdu_stat_list_depth > htt_ppdu_desc_max_depth) { + ppdu_info = list_first_entry(&ar->ppdu_stats_info, + typeof(*ppdu_info), list); + list_del(&ppdu_info->list); + ar->ppdu_stat_list_depth--; + ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats); + kfree(ppdu_info); + } + } + spin_unlock_bh(&ar->data_lock); + + ppdu_info = kzalloc(sizeof(*ppdu_info), gfp_kernel); + if (!ppdu_info) + return null; + + spin_lock_bh(&ar->data_lock); + list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info); + ar->ppdu_stat_list_depth++; + spin_unlock_bh(&ar->data_lock); + + return ppdu_info; +} + +static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct ath11k_htt_ppdu_stats_msg *msg; + struct htt_ppdu_stats_info *ppdu_info; + struct ath11k *ar; + int ret; + u8 pdev_id; + u32 ppdu_id, len; + + msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data; + len = field_get(htt_t2h_ppdu_stats_info_payload_size, msg->info); + pdev_id = field_get(htt_t2h_ppdu_stats_info_pdev_id, msg->info); + ppdu_id = msg->ppdu_id; + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id); + if (!ar) { + ret = -einval; + goto exit; + } + + if (ath11k_debug_is_pktlog_lite_mode_enabled(ar)) + trace_ath11k_htt_ppdu_stats(ar, skb->data, len); + + ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id); + if (!ppdu_info) { + ret = -einval; + goto exit; + } + + ppdu_info->ppdu_id = ppdu_id; + ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len, + ath11k_htt_tlv_ppdu_stats_parse, + (void *)ppdu_info); + if (ret) { + ath11k_warn(ab, "failed to parse tlv %d ", ret); + goto exit; + } + +exit: + rcu_read_unlock(); + + return ret; +} + +static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data; + struct ath11k *ar; + u32 len; + u8 pdev_id; + + len = field_get(htt_t2h_ppdu_stats_info_payload_size, data->hdr); + + if (len > ath11k_htt_pktlog_max_size) { + ath11k_warn(ab, "htt pktlog buffer size %d, expected < %d ", + len, + ath11k_htt_pktlog_max_size); + return; + } + + pdev_id = field_get(htt_t2h_ppdu_stats_info_pdev_id, data->hdr); + pdev_id = dp_hw2sw_macid(pdev_id); + ar = ab->pdevs[pdev_id].ar; + + trace_ath11k_htt_pktlog(ar, data->payload, len); +} + +void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct ath11k_dp *dp = &ab->dp; + struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data; + enum htt_t2h_msg_type type = field_get(htt_t2h_msg_type, *(u32 *)resp); + u16 peer_id; + u8 vdev_id; + u8 mac_addr[eth_alen]; + u16 peer_mac_h16; + u16 ast_hash; + + ath11k_dbg(ab, ath11k_dbg_dp_htt, "dp_htt rx msg type :0x%0x ", type); + + switch (type) { + case htt_t2h_msg_type_version_conf: + dp->htt_tgt_ver_major = field_get(htt_t2h_version_conf_major, + resp->version_msg.version); + dp->htt_tgt_ver_minor = field_get(htt_t2h_version_conf_minor, + resp->version_msg.version); + complete(&dp->htt_tgt_version_received); + break; + case htt_t2h_msg_type_peer_map: + vdev_id = field_get(htt_t2h_peer_map_info_vdev_id, + resp->peer_map_ev.info); + peer_id = field_get(htt_t2h_peer_map_info_peer_id, + resp->peer_map_ev.info); + peer_mac_h16 = field_get(htt_t2h_peer_map_info1_mac_addr_h16, + resp->peer_map_ev.info1); + ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32, + peer_mac_h16, mac_addr); + ast_hash = field_get(htt_t2h_peer_map_info2_ast_hash_val, + resp->peer_map_ev.info1); + ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash); + break; + case htt_t2h_msg_type_peer_unmap: + peer_id = field_get(htt_t2h_peer_unmap_info_peer_id, + resp->peer_unmap_ev.info); + ath11k_peer_unmap_event(ab, peer_id); + break; + case htt_t2h_msg_type_ppdu_stats_ind: + ath11k_htt_pull_ppdu_stats(ab, skb); + break; + case htt_t2h_msg_type_ext_stats_conf: + ath11k_dbg_htt_ext_stats_handler(ab, skb); + break; + case htt_t2h_msg_type_pktlog: + ath11k_htt_pktlog(ab, skb); + break; + default: + ath11k_warn(ab, "htt event %d not handled ", type); + break; + } + + dev_kfree_skb_any(skb); +} + +static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar, + struct sk_buff_head *msdu_list, + struct sk_buff *first, struct sk_buff *last, + u8 l3pad_bytes, int msdu_len) +{ + struct sk_buff *skb; + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(first); + struct hal_rx_desc *ldesc; + int space_extra; + int rem_len; + int buf_len; + + if (!rxcb->is_continuation) { + skb_put(first, hal_rx_desc_size + l3pad_bytes + msdu_len); + skb_pull(first, hal_rx_desc_size + l3pad_bytes); + return 0; + } + + if (warn_on_once(msdu_len <= (dp_rx_buffer_size - + (hal_rx_desc_size + l3pad_bytes)))) { + skb_put(first, hal_rx_desc_size + l3pad_bytes + msdu_len); + skb_pull(first, hal_rx_desc_size + l3pad_bytes); + return 0; + } + + ldesc = (struct hal_rx_desc *)last->data; + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ldesc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ldesc); + + /* msdu spans over multiple buffers because the length of the msdu + * exceeds dp_rx_buffer_size - hal_rx_desc_size. so assume the data + * in the first buf is of length dp_rx_buffer_size - hal_rx_desc_size. + */ + skb_put(first, dp_rx_buffer_size); + skb_pull(first, hal_rx_desc_size + l3pad_bytes); + + space_extra = msdu_len - (dp_rx_buffer_size + skb_tailroom(first)); + if (space_extra > 0 && + (pskb_expand_head(first, 0, space_extra, gfp_atomic) < 0)) { + /* free up all buffers of the msdu */ + while ((skb = __skb_dequeue(msdu_list)) != null) { + rxcb = ath11k_skb_rxcb(skb); + if (!rxcb->is_continuation) { + dev_kfree_skb_any(skb); + break; + } + dev_kfree_skb_any(skb); + } + return -enomem; + } + + /* when an msdu spread over multiple buffers attention, msdu_end and + * mpdu_end tlvs are valid only in the last buffer. copy those tlvs. + */ + ath11k_dp_rx_desc_end_tlv_copy(rxcb->rx_desc, ldesc); + + rem_len = msdu_len - + (dp_rx_buffer_size - hal_rx_desc_size - l3pad_bytes); + while ((skb = __skb_dequeue(msdu_list)) != null && rem_len > 0) { + rxcb = ath11k_skb_rxcb(skb); + if (rxcb->is_continuation) + buf_len = dp_rx_buffer_size - hal_rx_desc_size; + else + buf_len = rem_len; + + if (buf_len > (dp_rx_buffer_size - hal_rx_desc_size)) { + warn_on_once(1); + dev_kfree_skb_any(skb); + return -einval; + } + + skb_put(skb, buf_len + hal_rx_desc_size); + skb_pull(skb, hal_rx_desc_size); + skb_copy_from_linear_data(skb, skb_put(first, buf_len), + buf_len); + dev_kfree_skb_any(skb); + + rem_len -= buf_len; + if (!rxcb->is_continuation) + break; + } + + return 0; +} + +static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list, + struct sk_buff *first) +{ + struct sk_buff *skb; + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(first); + + if (!rxcb->is_continuation) + return first; + + skb_queue_walk(msdu_list, skb) { + rxcb = ath11k_skb_rxcb(skb); + if (!rxcb->is_continuation) + return skb; + } + + return null; +} + +static int ath11k_dp_rx_retrieve_amsdu(struct ath11k *ar, + struct sk_buff_head *msdu_list, + struct sk_buff_head *amsdu_list) +{ + struct sk_buff *msdu = skb_peek(msdu_list); + struct sk_buff *last_buf; + struct ath11k_skb_rxcb *rxcb; + struct ieee80211_hdr *hdr; + struct hal_rx_desc *rx_desc, *lrx_desc; + u16 msdu_len; + u8 l3_pad_bytes; + u8 *hdr_status; + int ret; + + if (!msdu) + return -enoent; + + rx_desc = (struct hal_rx_desc *)msdu->data; + hdr_status = ath11k_dp_rx_h_80211_hdr(rx_desc); + hdr = (struct ieee80211_hdr *)hdr_status; + /* process only data frames */ + if (!ieee80211_is_data(hdr->frame_control)) { + __skb_unlink(msdu, msdu_list); + dev_kfree_skb_any(msdu); + return -einval; + } + + do { + __skb_unlink(msdu, msdu_list); + last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu); + if (!last_buf) { + ath11k_warn(ar->ab, + "no valid rx buffer to access atten/msdu_end/mpdu_end tlvs "); + ret = -eio; + goto free_out; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + lrx_desc = (struct hal_rx_desc *)last_buf->data; + + if (!ath11k_dp_rx_h_attn_msdu_done(lrx_desc)) { + ath11k_warn(ar->ab, "msdu_done bit in attention is not set "); + ret = -eio; + goto free_out; + } + + rxcb = ath11k_skb_rxcb(msdu); + rxcb->rx_desc = rx_desc; + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); + l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(lrx_desc); + + if (!rxcb->is_continuation) { + skb_put(msdu, hal_rx_desc_size + l3_pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_size + l3_pad_bytes); + } else { + ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list, + msdu, last_buf, + l3_pad_bytes, msdu_len); + if (ret) { + ath11k_warn(ar->ab, + "failed to coalesce msdu rx buffer%d ", ret); + goto free_out; + } + } + __skb_queue_tail(amsdu_list, msdu); + + /* should we also consider msdu_cnt from mpdu_meta while + * preparing amsdu list? + */ + if (rxcb->is_last_msdu) + break; + } while ((msdu = skb_peek(msdu_list)) != null); + + return 0; + +free_out: + dev_kfree_skb_any(msdu); + __skb_queue_purge(amsdu_list); + + return ret; +} + +static void ath11k_dp_rx_h_csum_offload(struct sk_buff *msdu) +{ + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(msdu); + bool ip_csum_fail, l4_csum_fail; + + ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rxcb->rx_desc); + l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rxcb->rx_desc); + + msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ? + checksum_none : checksum_unnecessary; +} + +static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, + enum hal_encrypt_type enctype) +{ + switch (enctype) { + case hal_encrypt_type_open: + case hal_encrypt_type_tkip_no_mic: + case hal_encrypt_type_tkip_mic: + return 0; + case hal_encrypt_type_ccmp_128: + return ieee80211_ccmp_mic_len; + case hal_encrypt_type_ccmp_256: + return ieee80211_ccmp_256_mic_len; + case hal_encrypt_type_gcmp_128: + case hal_encrypt_type_aes_gcmp_256: + return ieee80211_gcmp_mic_len; + case hal_encrypt_type_wep_40: + case hal_encrypt_type_wep_104: + case hal_encrypt_type_wep_128: + case hal_encrypt_type_wapi_gcm_sm4: + case hal_encrypt_type_wapi: + break; + } + + ath11k_warn(ar->ab, "unsupported encryption type %d for mic len ", enctype); + return 0; +} + +static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar, + enum hal_encrypt_type enctype) +{ + switch (enctype) { + case hal_encrypt_type_open: + return 0; + case hal_encrypt_type_tkip_no_mic: + case hal_encrypt_type_tkip_mic: + return ieee80211_tkip_iv_len; + case hal_encrypt_type_ccmp_128: + return ieee80211_ccmp_hdr_len; + case hal_encrypt_type_ccmp_256: + return ieee80211_ccmp_256_hdr_len; + case hal_encrypt_type_gcmp_128: + case hal_encrypt_type_aes_gcmp_256: + return ieee80211_gcmp_hdr_len; + case hal_encrypt_type_wep_40: + case hal_encrypt_type_wep_104: + case hal_encrypt_type_wep_128: + case hal_encrypt_type_wapi_gcm_sm4: + case hal_encrypt_type_wapi: + break; + } + + ath11k_warn(ar->ab, "unsupported encryption type %d ", enctype); + return 0; +} + +static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar, + enum hal_encrypt_type enctype) +{ + switch (enctype) { + case hal_encrypt_type_open: + case hal_encrypt_type_ccmp_128: + case hal_encrypt_type_ccmp_256: + case hal_encrypt_type_gcmp_128: + case hal_encrypt_type_aes_gcmp_256: + return 0; + case hal_encrypt_type_tkip_no_mic: + case hal_encrypt_type_tkip_mic: + return ieee80211_tkip_icv_len; + case hal_encrypt_type_wep_40: + case hal_encrypt_type_wep_104: + case hal_encrypt_type_wep_128: + case hal_encrypt_type_wapi_gcm_sm4: + case hal_encrypt_type_wapi: + break; + } + + ath11k_warn(ar->ab, "unsupported encryption type %d ", enctype); + return 0; +} + +static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar, + struct sk_buff *msdu, + u8 *first_hdr, + enum hal_encrypt_type enctype, + struct ieee80211_rx_status *status) +{ + struct ieee80211_hdr *hdr; + size_t hdr_len; + u8 da[eth_alen]; + u8 sa[eth_alen]; + + /* pull decapped header and copy sa & da */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(da, ieee80211_get_da(hdr)); + ether_addr_copy(sa, ieee80211_get_sa(hdr)); + skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control)); + + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if (!(status->flag & rx_flag_iv_stripped)) { + memcpy(skb_push(msdu, + ath11k_dp_rx_crypto_param_len(ar, enctype)), + (void *)hdr + hdr_len, + ath11k_dp_rx_crypto_param_len(ar, enctype)); + } + + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); + + /* original 802.11 header has a different da and in + * case of 4addr it may also have different sa + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_da(hdr), da); + ether_addr_copy(ieee80211_get_sa(hdr), sa); +} + +static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu, + enum hal_encrypt_type enctype, + struct ieee80211_rx_status *status, + bool decrypted) +{ + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(msdu); + struct ieee80211_hdr *hdr; + size_t hdr_len; + size_t crypto_len; + + if (!rxcb->is_first_msdu || + !(rxcb->is_first_msdu && rxcb->is_last_msdu)) { + warn_on_once(1); + return; + } + + skb_trim(msdu, msdu->len - fcs_len); + + if (!decrypted) + return; + + hdr = (void *)msdu->data; + + /* tail */ + if (status->flag & rx_flag_iv_stripped) { + skb_trim(msdu, msdu->len - + ath11k_dp_rx_crypto_mic_len(ar, enctype)); + + skb_trim(msdu, msdu->len - + ath11k_dp_rx_crypto_icv_len(ar, enctype)); + } else { + /* mic */ + if (status->flag & rx_flag_mic_stripped) + skb_trim(msdu, msdu->len - + ath11k_dp_rx_crypto_mic_len(ar, enctype)); + + /* icv */ + if (status->flag & rx_flag_icv_stripped) + skb_trim(msdu, msdu->len - + ath11k_dp_rx_crypto_icv_len(ar, enctype)); + } + + /* mmic */ + if ((status->flag & rx_flag_mmic_stripped) && + !ieee80211_has_morefrags(hdr->frame_control) && + enctype == hal_encrypt_type_tkip_mic) + skb_trim(msdu, msdu->len - ieee80211_ccmp_mic_len); + + /* head */ + if (status->flag & rx_flag_iv_stripped) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); + + memmove((void *)msdu->data + crypto_len, + (void *)msdu->data, hdr_len); + skb_pull(msdu, crypto_len); + } +} + +static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar, + struct sk_buff *msdu, + enum hal_encrypt_type enctype) +{ + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(msdu); + struct ieee80211_hdr *hdr; + size_t hdr_len, crypto_len; + void *rfc1042; + bool is_amsdu; + + is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu); + hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rxcb->rx_desc); + rfc1042 = hdr; + + if (rxcb->is_first_msdu) { + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype); + + rfc1042 += hdr_len + crypto_len; + } + + if (is_amsdu) + rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr); + + return rfc1042; +} + +static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar, + struct sk_buff *msdu, + u8 *first_hdr, + enum hal_encrypt_type enctype, + struct ieee80211_rx_status *status) +{ + struct ieee80211_hdr *hdr; + struct ethhdr *eth; + size_t hdr_len; + u8 da[eth_alen]; + u8 sa[eth_alen]; + void *rfc1042; + + rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype); + if (warn_on_once(!rfc1042)) + return; + + /* pull decapped header and copy sa & da */ + eth = (struct ethhdr *)msdu->data; + ether_addr_copy(da, eth->h_dest); + ether_addr_copy(sa, eth->h_source); + skb_pull(msdu, sizeof(struct ethhdr)); + + /* push rfc1042/llc/snap */ + memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042, + sizeof(struct ath11k_dp_rfc1042_hdr)); + + /* push original 802.11 header */ + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if (!(status->flag & rx_flag_iv_stripped)) { + memcpy(skb_push(msdu, + ath11k_dp_rx_crypto_param_len(ar, enctype)), + (void *)hdr + hdr_len, + ath11k_dp_rx_crypto_param_len(ar, enctype)); + } + + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); + + /* original 802.11 header has a different da and in + * case of 4addr it may also have different sa + */ + hdr = (struct ieee80211_hdr *)msdu->data; + ether_addr_copy(ieee80211_get_da(hdr), da); + ether_addr_copy(ieee80211_get_sa(hdr), sa); +} + +static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu, + struct hal_rx_desc *rx_desc, + enum hal_encrypt_type enctype, + struct ieee80211_rx_status *status, + bool decrypted) +{ + u8 *first_hdr; + u8 decap; + + first_hdr = ath11k_dp_rx_h_80211_hdr(rx_desc); + decap = ath11k_dp_rx_h_mpdu_start_decap_type(rx_desc); + + switch (decap) { + case dp_rx_decap_type_native_wifi: + ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr, + enctype, status); + break; + case dp_rx_decap_type_raw: + ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status, + decrypted); + break; + case dp_rx_decap_type_ethernet2_dix: + ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr, + enctype, status); + break; + case dp_rx_decap_type_8023: + /* todo: handle undecap for these formats */ + break; + } +} + +static void ath11k_dp_rx_h_mpdu(struct ath11k *ar, + struct sk_buff_head *amsdu_list, + struct hal_rx_desc *rx_desc, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_hdr *hdr; + enum hal_encrypt_type enctype; + struct sk_buff *last_msdu; + struct sk_buff *msdu; + struct ath11k_skb_rxcb *last_rxcb; + bool is_decrypted; + u32 err_bitmap; + u8 *qos; + + if (skb_queue_empty(amsdu_list)) + return; + + hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(rx_desc); + + /* each a-msdu subframe will use the original header as the base and be + * reported as a separate msdu so strip the a-msdu bit from qos ctl. + */ + if (ieee80211_is_data_qos(hdr->frame_control)) { + qos = ieee80211_get_qos_ctl(hdr); + qos[0] &= ~ieee80211_qos_ctl_a_msdu_present; + } + + is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); + enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); + + /* some attention flags are valid only in the last msdu. */ + last_msdu = skb_peek_tail(amsdu_list); + last_rxcb = ath11k_skb_rxcb(last_msdu); + + err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(last_rxcb->rx_desc); + + /* clear per-mpdu flags while leaving per-ppdu flags intact. */ + rx_status->flag &= ~(rx_flag_failed_fcs_crc | + rx_flag_mmic_error | + rx_flag_decrypted | + rx_flag_iv_stripped | + rx_flag_mmic_stripped); + + if (err_bitmap & dp_rx_mpdu_err_fcs) + rx_status->flag |= rx_flag_failed_fcs_crc; + + if (err_bitmap & dp_rx_mpdu_err_tkip_mic) + rx_status->flag |= rx_flag_mmic_error; + + if (is_decrypted) + rx_status->flag |= rx_flag_decrypted | rx_flag_mmic_stripped | + rx_flag_mic_stripped | rx_flag_icv_stripped; + + skb_queue_walk(amsdu_list, msdu) { + ath11k_dp_rx_h_csum_offload(msdu); + ath11k_dp_rx_h_undecap(ar, msdu, rx_desc, + enctype, rx_status, is_decrypted); + } +} + +static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_supported_band *sband; + enum rx_msdu_start_pkt_type pkt_type; + u8 bw; + u8 rate_mcs, nss; + u8 sgi; + bool is_cck; + + pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(rx_desc); + bw = ath11k_dp_rx_h_msdu_start_rx_bw(rx_desc); + rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(rx_desc); + nss = ath11k_dp_rx_h_msdu_start_nss(rx_desc); + sgi = ath11k_dp_rx_h_msdu_start_sgi(rx_desc); + + switch (pkt_type) { + case rx_msdu_start_pkt_type_11a: + case rx_msdu_start_pkt_type_11b: + is_cck = (pkt_type == rx_msdu_start_pkt_type_11b); + sband = &ar->mac.sbands[rx_status->band]; + rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs, + is_cck); + break; + case rx_msdu_start_pkt_type_11n: + rx_status->encoding = rx_enc_ht; + if (rate_mcs > ath11k_ht_mcs_max) { + ath11k_warn(ar->ab, + "received with invalid mcs in ht mode %d ", + rate_mcs); + break; + } + rx_status->rate_idx = rate_mcs + (8 * (nss - 1)); + if (sgi) + rx_status->enc_flags |= rx_enc_flag_short_gi; + rx_status->bw = ath11k_bw_to_mac80211_bw(bw); + break; + case rx_msdu_start_pkt_type_11ac: + rx_status->encoding = rx_enc_vht; + rx_status->rate_idx = rate_mcs; + if (rate_mcs > ath11k_vht_mcs_max) { + ath11k_warn(ar->ab, + "received with invalid mcs in vht mode %d ", + rate_mcs); + break; + } + rx_status->nss = nss; + if (sgi) + rx_status->enc_flags |= rx_enc_flag_short_gi; + rx_status->bw = ath11k_bw_to_mac80211_bw(bw); + break; + case rx_msdu_start_pkt_type_11ax: + rx_status->rate_idx = rate_mcs; + if (rate_mcs > ath11k_he_mcs_max) { + ath11k_warn(ar->ab, + "received with invalid mcs in he mode %d ", + rate_mcs); + break; + } + rx_status->encoding = rx_enc_he; + rx_status->nss = nss; + rx_status->bw = ath11k_bw_to_mac80211_bw(bw); + break; + } +} + +static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, + struct ieee80211_rx_status *rx_status) +{ + u8 channel_num; + + rx_status->freq = 0; + rx_status->rate_idx = 0; + rx_status->nss = 0; + rx_status->encoding = rx_enc_legacy; + rx_status->bw = rate_info_bw_20; + + rx_status->flag |= rx_flag_no_signal_val; + + channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); + + if (channel_num >= 1 && channel_num <= 14) { + rx_status->band = nl80211_band_2ghz; + } else if (channel_num >= 36 && channel_num <= 173) { + rx_status->band = nl80211_band_5ghz; + } else { + ath11k_warn(ar->ab, "unsupported channel info received %d ", + channel_num); + return; + } + + rx_status->freq = ieee80211_channel_to_frequency(channel_num, + rx_status->band); + + ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); +} + +static void ath11k_dp_rx_process_amsdu(struct ath11k *ar, + struct sk_buff_head *amsdu_list, + struct ieee80211_rx_status *rx_status) +{ + struct sk_buff *first; + struct ath11k_skb_rxcb *rxcb; + struct hal_rx_desc *rx_desc; + bool first_mpdu; + + if (skb_queue_empty(amsdu_list)) + return; + + first = skb_peek(amsdu_list); + rxcb = ath11k_skb_rxcb(first); + rx_desc = rxcb->rx_desc; + + first_mpdu = ath11k_dp_rx_h_attn_first_mpdu(rx_desc); + if (first_mpdu) + ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status); + + ath11k_dp_rx_h_mpdu(ar, amsdu_list, rx_desc, rx_status); +} + +static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out, + size_t size) +{ + u8 *qc; + int tid; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return ""; + + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & ieee80211_qos_ctl_tid_mask; + snprintf(out, size, "tid %d", tid); + + return out; +} + +static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi, + struct sk_buff *msdu) +{ + struct ieee80211_rx_status *status; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; + char tid[32]; + + status = ieee80211_skb_rxcb(msdu); + + ath11k_dbg(ar->ab, ath11k_dbg_data, + "rx skb %pk len %u peer %pm %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i ", + msdu, + msdu->len, + ieee80211_get_sa(hdr), + ath11k_print_get_tid(hdr, tid, sizeof(tid)), + is_multicast_ether_addr(ieee80211_get_da(hdr)) ? + "mcast" : "ucast", + (__le16_to_cpu(hdr->seq_ctrl) & ieee80211_sctl_seq) >> 4, + (status->encoding == rx_enc_legacy) ? "legacy" : "", + (status->encoding == rx_enc_ht) ? "ht" : "", + (status->encoding == rx_enc_vht) ? "vht" : "", + (status->encoding == rx_enc_he) ? "he" : "", + (status->bw == rate_info_bw_40) ? "40" : "", + (status->bw == rate_info_bw_80) ? "80" : "", + (status->bw == rate_info_bw_160) ? "160" : "", + status->enc_flags & rx_enc_flag_short_gi ? "sgi " : "", + status->rate_idx, + status->nss, + status->freq, + status->band, status->flag, + !!(status->flag & rx_flag_failed_fcs_crc), + !!(status->flag & rx_flag_mmic_error), + !!(status->flag & rx_flag_amsdu_more)); + + /* todo: trace rx packet */ + + ieee80211_rx_napi(ar->hw, null, msdu, napi); +} + +static void ath11k_dp_rx_pre_deliver_amsdu(struct ath11k *ar, + struct sk_buff_head *amsdu_list, + struct ieee80211_rx_status *rxs) +{ + struct sk_buff *msdu; + struct sk_buff *first_subframe; + struct ieee80211_rx_status *status; + + first_subframe = skb_peek(amsdu_list); + + skb_queue_walk(amsdu_list, msdu) { + /* setup per-msdu flags */ + if (skb_queue_empty(amsdu_list)) + rxs->flag &= ~rx_flag_amsdu_more; + else + rxs->flag |= rx_flag_amsdu_more; + + if (msdu == first_subframe) { + first_subframe = null; + rxs->flag &= ~rx_flag_allow_same_pn; + } else { + rxs->flag |= rx_flag_allow_same_pn; + } + rxs->flag |= rx_flag_skip_monitor; + + status = ieee80211_skb_rxcb(msdu); + *status = *rxs; + } +} + +static void ath11k_dp_rx_process_pending_packets(struct ath11k_base *ab, + struct napi_struct *napi, + struct sk_buff_head *pending_q, + int *quota, u8 mac_id) +{ + struct ath11k *ar; + struct sk_buff *msdu; + struct ath11k_pdev *pdev; + + if (skb_queue_empty(pending_q)) + return; + + ar = ab->pdevs[mac_id].ar; + + rcu_read_lock(); + pdev = rcu_dereference(ab->pdevs_active[mac_id]); + + while (*quota && (msdu = __skb_dequeue(pending_q))) { + if (!pdev) { + dev_kfree_skb_any(msdu); + continue; + } + + ath11k_dp_rx_deliver_msdu(ar, napi, msdu); + (*quota)--; + } + rcu_read_unlock(); +} + +int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, struct sk_buff_head *pending_q, + int budget) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + struct ath11k_pdev_dp *dp = &ar->dp; + struct ieee80211_rx_status *rx_status = &dp->rx_status; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + struct hal_srng *srng; + struct hal_rx_meta_info meta_info; + struct sk_buff *msdu; + struct sk_buff_head msdu_list; + struct sk_buff_head amsdu_list; + struct ath11k_skb_rxcb *rxcb; + u32 *rx_desc; + int buf_id; + int num_buffs_reaped = 0; + int quota = budget; + int ret; + bool done = false; + + /* process any pending packets from the previous napi poll. + * note: all msdu's in this pending_q corresponds to the same mac id + * due to pdev based reo dest mapping and also since each irq group id + * maps to specific reo dest ring. + */ + ath11k_dp_rx_process_pending_packets(ab, napi, pending_q, "a, + mac_id); + + /* if all quota is exhausted by processing the pending_q, + * wait for the next napi poll to reap the new info + */ + if (!quota) + goto exit; + + __skb_queue_head_init(&msdu_list); + + srng = &ab->hal.srng_list[dp->reo_dst_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + +try_again: + while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { + memset(&meta_info, 0, sizeof(meta_info)); + ath11k_hal_rx_parse_dst_ring_desc(ab, rx_desc, &meta_info); + + buf_id = field_get(dp_rxdma_buf_cookie_buf_id, + meta_info.msdu_meta.cookie); + spin_lock_bh(&rx_ring->idr_lock); + msdu = idr_find(&rx_ring->bufs_idr, buf_id); + if (!msdu) { + ath11k_warn(ab, "frame rx with invalid buf_id %d ", + buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + continue; + } + + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + rxcb = ath11k_skb_rxcb(msdu); + dma_unmap_single(ab->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + dma_from_device); + + num_buffs_reaped++; + + if (meta_info.push_reason != + hal_reo_dest_ring_push_reason_routing_instruction) { + /* todo: check if the msdu can be sent up for processing */ + dev_kfree_skb_any(msdu); + ab->soc_stats.hal_reo_error[dp->reo_dst_ring.ring_id]++; + continue; + } + + rxcb->is_first_msdu = meta_info.msdu_meta.first; + rxcb->is_last_msdu = meta_info.msdu_meta.last; + rxcb->is_continuation = meta_info.msdu_meta.continuation; + rxcb->mac_id = mac_id; + __skb_queue_tail(&msdu_list, msdu); + + /* stop reaping from the ring once quota is exhausted + * and we've received all msdu's in the the amsdu. the + * additional msdu's reaped in excess of quota here would + * be pushed into the pending queue to be processed during + * the next napi poll. + * note: more profiling can be done to see the impact on + * pending_q and throughput during various traffic & density + * and how use of budget instead of remaining quota affects it. + */ + if (num_buffs_reaped >= quota && rxcb->is_last_msdu && + !rxcb->is_continuation) { + done = true; + break; + } + } + + /* hw might have updated the head pointer after we cached it. + * in this case, even though there are entries in the ring we'll + * get rx_desc null. give the read another try with updated cached + * head pointer so that we can reap complete mpdu in the current + * rx processing. + */ + if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) { + ath11k_hal_srng_access_end(ab, srng); + goto try_again; + } + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + if (!num_buffs_reaped) + goto exit; + + /* should we reschedule it later if we are not able to replenish all + * the buffers? + */ + ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buffs_reaped, + hal_rx_buf_rbm_sw3_bm, gfp_atomic); + + rcu_read_lock(); + if (!rcu_dereference(ab->pdevs_active[mac_id])) { + __skb_queue_purge(&msdu_list); + goto rcu_unlock; + } + + if (test_bit(ath11k_cac_running, &ar->dev_flags)) { + __skb_queue_purge(&msdu_list); + goto rcu_unlock; + } + + while (!skb_queue_empty(&msdu_list)) { + __skb_queue_head_init(&amsdu_list); + ret = ath11k_dp_rx_retrieve_amsdu(ar, &msdu_list, &amsdu_list); + if (ret) { + if (ret == -eio) { + ath11k_err(ab, "rx ring got corrupted %d ", ret); + __skb_queue_purge(&msdu_list); + /* should stop processing any more rx in + * future from this ring? + */ + goto rcu_unlock; + } + + /* a-msdu retrieval got failed due to non-fatal condition, + * continue processing with the next msdu. + */ + continue; + } + + ath11k_dp_rx_process_amsdu(ar, &amsdu_list, rx_status); + + ath11k_dp_rx_pre_deliver_amsdu(ar, &amsdu_list, rx_status); + skb_queue_splice_tail(&amsdu_list, pending_q); + } + + while (quota && (msdu = __skb_dequeue(pending_q))) { + ath11k_dp_rx_deliver_msdu(ar, napi, msdu); + quota--; + } + +rcu_unlock: + rcu_read_unlock(); +exit: + return budget - quota; +} + +static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats; + u32 num_msdu; + + if (!rx_stats) + return; + + num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + + ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; + + rx_stats->num_msdu += num_msdu; + rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count + + ppdu_info->tcp_ack_msdu_count; + rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count; + rx_stats->other_msdu_count += ppdu_info->other_msdu_count; + + if (ppdu_info->preamble_type == hal_rx_preamble_11a || + ppdu_info->preamble_type == hal_rx_preamble_11b) { + ppdu_info->nss = 1; + ppdu_info->mcs = hal_rx_max_mcs; + ppdu_info->tid = ieee80211_num_tids; + } + + if (ppdu_info->nss > 0 && ppdu_info->nss <= hal_rx_max_nss) + rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu; + + if (ppdu_info->mcs <= hal_rx_max_mcs) + rx_stats->mcs_count[ppdu_info->mcs] += num_msdu; + + if (ppdu_info->gi < hal_rx_gi_max) + rx_stats->gi_count[ppdu_info->gi] += num_msdu; + + if (ppdu_info->bw < hal_rx_bw_max) + rx_stats->bw_count[ppdu_info->bw] += num_msdu; + + if (ppdu_info->ldpc < hal_rx_su_mu_coding_max) + rx_stats->coding_count[ppdu_info->ldpc] += num_msdu; + + if (ppdu_info->tid <= ieee80211_num_tids) + rx_stats->tid_count[ppdu_info->tid] += num_msdu; + + if (ppdu_info->preamble_type < hal_rx_preamble_max) + rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu; + + if (ppdu_info->reception_type < hal_rx_reception_type_max) + rx_stats->reception_type[ppdu_info->reception_type] += num_msdu; + + if (ppdu_info->is_stbc) + rx_stats->stbc_count += num_msdu; + + if (ppdu_info->beamformed) + rx_stats->beamformed_count += num_msdu; + + if (ppdu_info->num_mpdu_fcs_ok > 1) + rx_stats->ampdu_msdu_count += num_msdu; + else + rx_stats->non_ampdu_msdu_count += num_msdu; + + rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok; + rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err; + + arsta->rssi_comb = ppdu_info->rssi_comb; + rx_stats->rx_duration += ppdu_info->rx_duration; + arsta->rx_duration = rx_stats->rx_duration; +} + +static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab, + struct dp_rxdma_ring *rx_ring, + int *buf_id, gfp_t gfp) +{ + struct sk_buff *skb; + dma_addr_t paddr; + + skb = dev_alloc_skb(dp_rx_buffer_size + + dp_rx_buffer_align_size); + + if (!skb) + goto fail_alloc_skb; + + if (!is_aligned((unsigned long)skb->data, + dp_rx_buffer_align_size)) { + skb_pull(skb, ptr_align(skb->data, dp_rx_buffer_align_size) - + skb->data); + } + + paddr = dma_map_single(ab->dev, skb->data, + skb->len + skb_tailroom(skb), + dma_bidirectional); + if (unlikely(dma_mapping_error(ab->dev, paddr))) + goto fail_free_skb; + + spin_lock_bh(&rx_ring->idr_lock); + *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0, + rx_ring->bufs_max, gfp); + spin_unlock_bh(&rx_ring->idr_lock); + if (*buf_id < 0) + goto fail_dma_unmap; + + ath11k_skb_rxcb(skb)->paddr = paddr; + return skb; + +fail_dma_unmap: + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + dma_bidirectional); +fail_free_skb: + dev_kfree_skb_any(skb); +fail_alloc_skb: + return null; +} + +int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, + struct dp_rxdma_ring *rx_ring, + int req_entries, + enum hal_rx_buf_return_buf_manager mgr, + gfp_t gfp) +{ + struct hal_srng *srng; + u32 *desc; + struct sk_buff *skb; + int num_free; + int num_remain; + int buf_id; + u32 cookie; + dma_addr_t paddr; + + req_entries = min(req_entries, rx_ring->bufs_max); + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + num_free = ath11k_hal_srng_src_num_free(ab, srng, true); + + req_entries = min(num_free, req_entries); + num_remain = req_entries; + + while (num_remain > 0) { + skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, + &buf_id, gfp); + if (!skb) + break; + paddr = ath11k_skb_rxcb(skb)->paddr; + + desc = ath11k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) + goto fail_desc_get; + + cookie = field_prep(dp_rxdma_buf_cookie_pdev_id, mac_id) | + field_prep(dp_rxdma_buf_cookie_buf_id, buf_id); + + num_remain--; + + ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr); + } + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; + +fail_desc_get: + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb), + dma_bidirectional); + dev_kfree_skb_any(skb); + ath11k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return req_entries - num_remain; +} + +static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, + int *budget, struct sk_buff_head *skb_list) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + struct ath11k_pdev_dp *dp = &ar->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring; + struct hal_srng *srng; + void *rx_mon_status_desc; + struct sk_buff *skb; + struct ath11k_skb_rxcb *rxcb; + struct hal_tlv_hdr *tlv; + u32 cookie; + int buf_id; + dma_addr_t paddr; + u8 rbm; + int num_buffs_reaped = 0; + + srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + while (*budget) { + *budget -= 1; + rx_mon_status_desc = + ath11k_hal_srng_src_peek(ab, srng); + if (!rx_mon_status_desc) + break; + + ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr, + &cookie, &rbm); + if (paddr) { + buf_id = field_get(dp_rxdma_buf_cookie_buf_id, cookie); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + if (!skb) { + ath11k_warn(ab, "rx monitor status with invalid buf_id %d ", + buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + continue; + } + + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + rxcb = ath11k_skb_rxcb(skb); + + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + dma_from_device); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + dma_bidirectional); + + tlv = (struct hal_tlv_hdr *)skb->data; + if (field_get(hal_tlv_hdr_tag, tlv->tl) != + hal_rx_status_buffer_done) { + ath11k_hal_srng_src_get_next_entry(ab, srng); + continue; + } + + __skb_queue_tail(skb_list, skb); + } + + skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring, + &buf_id, gfp_atomic); + + if (!skb) { + ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0, + hal_rx_buf_rbm_sw3_bm); + num_buffs_reaped++; + break; + } + rxcb = ath11k_skb_rxcb(skb); + + cookie = field_prep(dp_rxdma_buf_cookie_pdev_id, mac_id) | + field_prep(dp_rxdma_buf_cookie_buf_id, buf_id); + + ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr, + cookie, hal_rx_buf_rbm_sw3_bm); + ath11k_hal_srng_src_get_next_entry(ab, srng); + num_buffs_reaped++; + } + ath11k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return num_buffs_reaped; +} + +int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, int budget) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + enum hal_rx_mon_status hal_status; + struct sk_buff *skb; + struct sk_buff_head skb_list; + struct hal_rx_mon_ppdu_info ppdu_info; + struct ath11k_peer *peer; + struct ath11k_sta *arsta; + int num_buffs_reaped = 0; + + __skb_queue_head_init(&skb_list); + + num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget, + &skb_list); + if (!num_buffs_reaped) + goto exit; + + while ((skb = __skb_dequeue(&skb_list))) { + memset(&ppdu_info, 0, sizeof(ppdu_info)); + ppdu_info.peer_id = hal_invalid_peerid; + + if (ath11k_debug_is_pktlog_rx_stats_enabled(ar)) + trace_ath11k_htt_rxdesc(ar, skb->data, dp_rx_buffer_size); + + hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb); + + if (ppdu_info.peer_id == hal_invalid_peerid || + hal_status != hal_rx_mon_status_ppdu_done) { + dev_kfree_skb_any(skb); + continue; + } + + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id); + + if (!peer || !peer->sta) { + ath11k_warn(ab, "failed to find the peer with peer_id %d ", + ppdu_info.peer_id); + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + dev_kfree_skb_any(skb); + continue; + } + + arsta = (struct ath11k_sta *)peer->sta->drv_priv; + ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info); + + if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr)) + trace_ath11k_htt_rxdesc(ar, skb->data, dp_rx_buffer_size); + + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); + + dev_kfree_skb_any(skb); + } +exit: + return num_buffs_reaped; +} + +static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, + u32 *link_desc, + enum hal_wbm_rel_bm_act action) +{ + struct ath11k_dp *dp = &ab->dp; + struct hal_srng *srng; + u32 *desc; + int ret = 0; + + srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + desc = ath11k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) { + ret = -enobufs; + goto exit; + } + + ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc, + action); + +exit: + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + return ret; +} + +static void ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar, + struct sk_buff *msdu, + struct hal_rx_desc *rx_desc, + struct ieee80211_rx_status *rx_status) +{ + u8 rx_channel; + enum hal_encrypt_type enctype; + bool is_decrypted; + u32 err_bitmap; + + is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc); + enctype = ath11k_dp_rx_h_mpdu_start_enctype(rx_desc); + err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc); + + if (err_bitmap & dp_rx_mpdu_err_fcs) + rx_status->flag |= rx_flag_failed_fcs_crc; + + if (err_bitmap & dp_rx_mpdu_err_tkip_mic) + rx_status->flag |= rx_flag_mmic_error; + + rx_status->encoding = rx_enc_legacy; + rx_status->bw = rate_info_bw_20; + + rx_status->flag |= rx_flag_no_signal_val; + + rx_channel = ath11k_dp_rx_h_msdu_start_freq(rx_desc); + + if (rx_channel >= 1 && rx_channel <= 14) { + rx_status->band = nl80211_band_2ghz; + } else if (rx_channel >= 36 && rx_channel <= 173) { + rx_status->band = nl80211_band_5ghz; + } else { + ath11k_warn(ar->ab, "unsupported channel info received %d ", + rx_channel); + return; + } + + rx_status->freq = ieee80211_channel_to_frequency(rx_channel, + rx_status->band); + ath11k_dp_rx_h_rate(ar, rx_desc, rx_status); + + /* rx fragments are received in raw mode */ + skb_trim(msdu, msdu->len - fcs_len); + + if (is_decrypted) { + rx_status->flag |= rx_flag_decrypted | rx_flag_mic_stripped; + skb_trim(msdu, msdu->len - + ath11k_dp_rx_crypto_mic_len(ar, enctype)); + } +} + +static int +ath11k_dp_process_rx_err_buf(struct ath11k *ar, struct napi_struct *napi, + int buf_id, bool frag) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring; + struct ieee80211_rx_status rx_status = {0}; + struct sk_buff *msdu; + struct ath11k_skb_rxcb *rxcb; + struct ieee80211_rx_status *status; + struct hal_rx_desc *rx_desc; + u16 msdu_len; + + spin_lock_bh(&rx_ring->idr_lock); + msdu = idr_find(&rx_ring->bufs_idr, buf_id); + if (!msdu) { + ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d ", + buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + return -einval; + } + + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + rxcb = ath11k_skb_rxcb(msdu); + dma_unmap_single(ar->ab->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + dma_from_device); + + if (!frag) { + /* process only rx fragments below, and drop + * msdu's indicated due to error reasons. + */ + dev_kfree_skb_any(msdu); + return 0; + } + + rcu_read_lock(); + if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) { + dev_kfree_skb_any(msdu); + goto exit; + } + + if (test_bit(ath11k_cac_running, &ar->dev_flags)) { + dev_kfree_skb_any(msdu); + goto exit; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(rx_desc); + skb_put(msdu, hal_rx_desc_size + msdu_len); + skb_pull(msdu, hal_rx_desc_size); + + ath11k_dp_rx_frag_h_mpdu(ar, msdu, rx_desc, &rx_status); + + status = ieee80211_skb_rxcb(msdu); + + *status = rx_status; + + ath11k_dp_rx_deliver_msdu(ar, napi, msdu); + +exit: + rcu_read_unlock(); + return 0; +} + +int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, + int budget) +{ + struct hal_rx_msdu_meta meta[hal_num_rx_msdus_per_link_desc]; + struct dp_link_desc_bank *link_desc_banks; + enum hal_rx_buf_return_buf_manager rbm; + int tot_n_bufs_reaped, quota, ret, i; + int n_bufs_reaped[max_radios] = {0}; + struct hal_rx_meta_info meta_info; + struct dp_rxdma_ring *rx_ring; + struct dp_srng *reo_except; + u32 desc_bank, num_msdus; + struct hal_srng *srng; + struct ath11k_dp *dp; + void *link_desc_va; + int buf_id, mac_id; + struct ath11k *ar; + dma_addr_t paddr; + u32 *desc; + bool is_frag; + + tot_n_bufs_reaped = 0; + quota = budget; + + dp = &ab->dp; + reo_except = &dp->reo_except_ring; + link_desc_banks = dp->link_desc_banks; + + srng = &ab->hal.srng_list[reo_except->ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + while (budget && + (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { + ab->soc_stats.err_ring_pkts++; + ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr, + &desc_bank); + if (ret) { + ath11k_warn(ab, "failed to parse error reo desc %d ", + ret); + continue; + } + link_desc_va = link_desc_banks[desc_bank].vaddr + + (paddr - link_desc_banks[desc_bank].paddr); + ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, meta, + &rbm); + if (rbm != hal_rx_buf_rbm_wbm_idle_desc_list && + rbm != hal_rx_buf_rbm_sw3_bm) { + ab->soc_stats.invalid_rbm++; + ath11k_warn(ab, "invalid return buffer manager %d ", rbm); + ath11k_dp_rx_link_desc_return(ab, desc, + hal_wbm_rel_bm_act_rel_msdu); + continue; + } + + memset(&meta_info, 0, sizeof(meta_info)); + ath11k_hal_rx_parse_dst_ring_desc(ab, desc, &meta_info); + + is_frag = meta_info.mpdu_meta.frag; + + /* return the link desc back to wbm idle list */ + ath11k_dp_rx_link_desc_return(ab, desc, + hal_wbm_rel_bm_act_put_in_idle); + + for (i = 0; i < num_msdus; i++) { + buf_id = field_get(dp_rxdma_buf_cookie_buf_id, + meta[i].cookie); + + mac_id = field_get(dp_rxdma_buf_cookie_pdev_id, + meta[i].cookie); + + ar = ab->pdevs[mac_id].ar; + + if (!ath11k_dp_process_rx_err_buf(ar, napi, buf_id, + is_frag)) { + n_bufs_reaped[mac_id]++; + tot_n_bufs_reaped++; + } + } + + if (tot_n_bufs_reaped >= quota) { + tot_n_bufs_reaped = quota; + goto exit; + } + + budget = quota - tot_n_bufs_reaped; + } + +exit: + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + for (i = 0; i < ab->num_radios; i++) { + if (!n_bufs_reaped[i]) + continue; + + ar = ab->pdevs[i].ar; + rx_ring = &ar->dp.rx_refill_buf_ring; + + ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i], + hal_rx_buf_rbm_sw3_bm, gfp_atomic); + } + + return tot_n_bufs_reaped; +} + +static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar, + int msdu_len, + struct sk_buff_head *msdu_list) +{ + struct sk_buff *skb, *tmp; + struct ath11k_skb_rxcb *rxcb; + int n_buffs; + + n_buffs = div_round_up(msdu_len, + (dp_rx_buffer_size - hal_rx_desc_size)); + + skb_queue_walk_safe(msdu_list, skb, tmp) { + rxcb = ath11k_skb_rxcb(skb); + if (rxcb->err_rel_src == hal_wbm_rel_src_module_reo && + rxcb->err_code == hal_reo_dest_ring_error_code_desc_addr_zero) { + if (!n_buffs) + break; + __skb_unlink(skb, msdu_list); + dev_kfree_skb_any(skb); + n_buffs--; + } + } +} + +static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu, + struct ieee80211_rx_status *status, + struct sk_buff_head *msdu_list) +{ + struct sk_buff_head amsdu_list; + u16 msdu_len; + struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; + u8 l3pad_bytes; + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(msdu); + + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); + + if ((msdu_len + hal_rx_desc_size) > dp_rx_buffer_size) { + /* first buffer will be freed by the caller, so deduct it's length */ + msdu_len = msdu_len - (dp_rx_buffer_size - hal_rx_desc_size); + ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list); + return -einval; + } + + if (!ath11k_dp_rx_h_attn_msdu_done(desc)) { + ath11k_warn(ar->ab, + "msdu_done bit not set in null_q_des processing "); + __skb_queue_purge(msdu_list); + return -eio; + } + + /* handle null queue descriptor violations arising out a missing + * reo queue for a given peer or a given tid. this typically + * may happen if a packet is received on a qos enabled tid before the + * addba negotiation for that tid, when the tid queue is setup. or + * it may also happen for mc/bc frames if they are not routed to the + * non-qos tid queue, in the absence of any other default tid queue. + * this error can show up both in a reo destination or wbm release ring. + */ + + __skb_queue_head_init(&amsdu_list); + + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); + + l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); + + if ((hal_rx_desc_size + l3pad_bytes + msdu_len) > dp_rx_buffer_size) + return -einval; + + skb_put(msdu, hal_rx_desc_size + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_size + l3pad_bytes); + + ath11k_dp_rx_h_ppdu(ar, desc, status); + + __skb_queue_tail(&amsdu_list, msdu); + + ath11k_dp_rx_h_mpdu(ar, &amsdu_list, desc, status); + + /* please note that caller will having the access to msdu and completing + * rx with mac80211. need not worry about cleaning up amsdu_list. + */ + + return 0; +} + +static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu, + struct ieee80211_rx_status *status, + struct sk_buff_head *msdu_list) +{ + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(msdu); + bool drop = false; + + ar->ab->soc_stats.reo_error[rxcb->err_code]++; + + switch (rxcb->err_code) { + case hal_reo_dest_ring_error_code_desc_addr_zero: + if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list)) + drop = true; + break; + default: + /* todo: review other errors and process them to mac80211 + * as appropriate. + */ + drop = true; + break; + } + + return drop; +} + +static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu, + struct ieee80211_rx_status *status) +{ + u16 msdu_len; + struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data; + u8 l3pad_bytes; + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(msdu); + + rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(desc); + rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(desc); + + l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(desc); + msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(desc); + skb_put(msdu, hal_rx_desc_size + l3pad_bytes + msdu_len); + skb_pull(msdu, hal_rx_desc_size + l3pad_bytes); + + ath11k_dp_rx_h_ppdu(ar, desc, status); + + status->flag |= (rx_flag_mmic_stripped | rx_flag_mmic_error | + rx_flag_decrypted); + + ath11k_dp_rx_h_undecap(ar, msdu, desc, + hal_encrypt_type_tkip_mic, status, false); +} + +static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu, + struct ieee80211_rx_status *status) +{ + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(msdu); + bool drop = false; + + ar->ab->soc_stats.rxdma_error[rxcb->err_code]++; + + switch (rxcb->err_code) { + case hal_reo_entr_ring_rxdma_ecode_tkip_mic_err: + ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status); + break; + default: + /* todo: review other rxdma error code to check if anything is + * worth reporting to mac80211 + */ + drop = true; + break; + } + + return drop; +} + +static void ath11k_dp_rx_wbm_err(struct ath11k *ar, + struct napi_struct *napi, + struct sk_buff *msdu, + struct sk_buff_head *msdu_list) +{ + struct ath11k_skb_rxcb *rxcb = ath11k_skb_rxcb(msdu); + struct ieee80211_rx_status rxs = {0}; + struct ieee80211_rx_status *status; + bool drop = true; + + switch (rxcb->err_rel_src) { + case hal_wbm_rel_src_module_reo: + drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list); + break; + case hal_wbm_rel_src_module_rxdma: + drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs); + break; + default: + /* msdu will get freed */ + break; + } + + if (drop) { + dev_kfree_skb_any(msdu); + return; + } + + status = ieee80211_skb_rxcb(msdu); + *status = rxs; + + ath11k_dp_rx_deliver_msdu(ar, napi, msdu); +} + +int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, + struct napi_struct *napi, int budget) +{ + struct ath11k *ar; + struct ath11k_dp *dp = &ab->dp; + struct dp_rxdma_ring *rx_ring; + struct hal_rx_wbm_rel_info err_info; + struct hal_srng *srng; + struct sk_buff *msdu; + struct sk_buff_head msdu_list[max_radios]; + struct ath11k_skb_rxcb *rxcb; + u32 *rx_desc; + int buf_id, mac_id; + int num_buffs_reaped[max_radios] = {0}; + int total_num_buffs_reaped = 0; + int ret, i; + + for (i = 0; i < max_radios; i++) + __skb_queue_head_init(&msdu_list[i]); + + srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + while (budget) { + rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng); + if (!rx_desc) + break; + + ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info); + if (ret) { + ath11k_warn(ab, + "failed to parse rx error in wbm_rel ring desc %d ", + ret); + continue; + } + + buf_id = field_get(dp_rxdma_buf_cookie_buf_id, err_info.cookie); + mac_id = field_get(dp_rxdma_buf_cookie_pdev_id, err_info.cookie); + + ar = ab->pdevs[mac_id].ar; + rx_ring = &ar->dp.rx_refill_buf_ring; + + spin_lock_bh(&rx_ring->idr_lock); + msdu = idr_find(&rx_ring->bufs_idr, buf_id); + if (!msdu) { + ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d ", + buf_id, mac_id); + spin_unlock_bh(&rx_ring->idr_lock); + continue; + } + + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + rxcb = ath11k_skb_rxcb(msdu); + dma_unmap_single(ab->dev, rxcb->paddr, + msdu->len + skb_tailroom(msdu), + dma_from_device); + + num_buffs_reaped[mac_id]++; + total_num_buffs_reaped++; + budget--; + + if (err_info.push_reason != + hal_reo_dest_ring_push_reason_err_detected) { + dev_kfree_skb_any(msdu); + continue; + } + + rxcb->err_rel_src = err_info.err_rel_src; + rxcb->err_code = err_info.err_code; + rxcb->rx_desc = (struct hal_rx_desc *)msdu->data; + __skb_queue_tail(&msdu_list[mac_id], msdu); + } + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + if (!total_num_buffs_reaped) + goto done; + + for (i = 0; i < ab->num_radios; i++) { + if (!num_buffs_reaped[i]) + continue; + + ar = ab->pdevs[i].ar; + rx_ring = &ar->dp.rx_refill_buf_ring; + + ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i], + hal_rx_buf_rbm_sw3_bm, gfp_atomic); + } + + rcu_read_lock(); + for (i = 0; i < ab->num_radios; i++) { + if (!rcu_dereference(ab->pdevs_active[i])) { + __skb_queue_purge(&msdu_list[i]); + continue; + } + + ar = ab->pdevs[i].ar; + + if (test_bit(ath11k_cac_running, &ar->dev_flags)) { + __skb_queue_purge(&msdu_list[i]); + continue; + } + + while ((msdu = __skb_dequeue(&msdu_list[i])) != null) + ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]); + } + rcu_read_unlock(); +done: + return total_num_buffs_reaped; +} + +int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring; + struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring; + struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks; + struct hal_srng *srng; + struct hal_rx_msdu_meta meta[hal_num_rx_msdus_per_link_desc]; + enum hal_rx_buf_return_buf_manager rbm; + enum hal_reo_entr_rxdma_ecode rxdma_err_code; + struct ath11k_skb_rxcb *rxcb; + struct sk_buff *skb; + struct hal_reo_entrance_ring *entr_ring; + void *desc; + int num_buf_freed = 0; + int quota = budget; + dma_addr_t paddr; + u32 desc_bank; + void *link_desc_va; + int num_msdus; + int i; + int buf_id; + + srng = &ab->hal.srng_list[err_ring->ring_id]; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + while (quota-- && + (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { + ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank); + + entr_ring = (struct hal_reo_entrance_ring *)desc; + rxdma_err_code = + field_get(hal_reo_entr_ring_info1_rxdma_error_code, + entr_ring->info1); + ab->soc_stats.rxdma_error[rxdma_err_code]++; + + link_desc_va = link_desc_banks[desc_bank].vaddr + + (paddr - link_desc_banks[desc_bank].paddr); + ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, meta, + &rbm); + + for (i = 0; i < num_msdus; i++) { + buf_id = field_get(dp_rxdma_buf_cookie_buf_id, + meta[i].cookie); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + if (!skb) { + ath11k_warn(ab, "rxdma error with invalid buf_id %d ", + buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + continue; + } + + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + rxcb = ath11k_skb_rxcb(skb); + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + dma_from_device); + dev_kfree_skb_any(skb); + + num_buf_freed++; + } + + ath11k_dp_rx_link_desc_return(ab, desc, + hal_wbm_rel_bm_act_put_in_idle); + } + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); + + if (num_buf_freed) + ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed, + hal_rx_buf_rbm_sw3_bm, gfp_atomic); + + return budget - quota; +} + +void ath11k_dp_process_reo_status(struct ath11k_base *ab) +{ + struct ath11k_dp *dp = &ab->dp; + struct hal_srng *srng; + struct dp_reo_cmd *cmd, *tmp; + bool found = false; + u32 *reo_desc; + u16 tag; + struct hal_reo_status reo_status; + + srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id]; + + memset(&reo_status, 0, sizeof(reo_status)); + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) { + tag = field_get(hal_srng_tlv_hdr_tag, *reo_desc); + + switch (tag) { + case hal_reo_get_queue_stats_status: + ath11k_hal_reo_status_queue_stats(ab, reo_desc, + &reo_status); + break; + case hal_reo_flush_queue_status: + ath11k_hal_reo_flush_queue_status(ab, reo_desc, + &reo_status); + break; + case hal_reo_flush_cache_status: + ath11k_hal_reo_flush_cache_status(ab, reo_desc, + &reo_status); + break; + case hal_reo_unblock_cache_status: + ath11k_hal_reo_unblk_cache_status(ab, reo_desc, + &reo_status); + break; + case hal_reo_flush_timeout_list_status: + ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc, + &reo_status); + break; + case hal_reo_descriptor_threshold_reached_status: + ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc, + &reo_status); + break; + case hal_reo_update_rx_reo_queue_status: + ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc, + &reo_status); + break; + default: + ath11k_warn(ab, "unknown reo status type %d ", tag); + continue; + } + + spin_lock_bh(&dp->reo_cmd_lock); + list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { + if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) { + found = true; + list_del(&cmd->list); + break; + } + } + spin_unlock_bh(&dp->reo_cmd_lock); + + if (found) { + cmd->handler(dp, (void *)&cmd->data, + reo_status.uniform_hdr.cmd_status); + kfree(cmd); + } + + found = false; + } + + ath11k_hal_srng_access_end(ab, srng); + + spin_unlock_bh(&srng->lock); +} + +void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + + ath11k_dp_rx_pdev_srng_free(ar); + ath11k_dp_rxdma_pdev_buf_free(ar); +} + +int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + struct ath11k_pdev_dp *dp = &ar->dp; + u32 ring_id; + int ret; + + ret = ath11k_dp_rx_pdev_srng_alloc(ar); + if (ret) { + ath11k_warn(ab, "failed to setup rx srngs "); + return ret; + } + + ret = ath11k_dp_rxdma_pdev_buf_setup(ar); + if (ret) { + ath11k_warn(ab, "failed to setup rxdma ring "); + return ret; + } + + ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id; + ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, hal_rxdma_buf); + if (ret) { + ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d ", + ret); + return ret; + } + + ring_id = dp->rxdma_err_dst_ring.ring_id; + ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, hal_rxdma_dst); + if (ret) { + ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d ", + ret); + return ret; + } + + ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; + ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, + mac_id, hal_rxdma_monitor_buf); + if (ret) { + ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d ", + ret); + return ret; + } + ret = ath11k_dp_tx_htt_srng_setup(ab, + dp->rxdma_mon_dst_ring.ring_id, + mac_id, hal_rxdma_monitor_dst); + if (ret) { + ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d ", + ret); + return ret; + } + ret = ath11k_dp_tx_htt_srng_setup(ab, + dp->rxdma_mon_desc_ring.ring_id, + mac_id, hal_rxdma_monitor_desc); + if (ret) { + ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d ", + ret); + return ret; + } + ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id; + ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, + hal_rxdma_monitor_status); + if (ret) { + ath11k_warn(ab, + "failed to configure mon_status_refill_ring %d ", + ret); + return ret; + } + return 0; +} + +static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len) +{ + if (*total_len >= (dp_rx_buffer_size - sizeof(struct hal_rx_desc))) { + *frag_len = dp_rx_buffer_size - sizeof(struct hal_rx_desc); + *total_len -= *frag_len; + } else { + *frag_len = *total_len; + *total_len = 0; + } +} + +static +int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar, + void *p_last_buf_addr_info, + u8 mac_id) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct dp_srng *dp_srng; + void *hal_srng; + void *src_srng_desc; + int ret = 0; + + dp_srng = &dp->rxdma_mon_desc_ring; + hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id]; + + ath11k_hal_srng_access_begin(ar->ab, hal_srng); + + src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng); + + if (src_srng_desc) { + struct ath11k_buffer_addr *src_desc = + (struct ath11k_buffer_addr *)src_srng_desc; + + *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info); + } else { + ath11k_dbg(ar->ab, ath11k_dbg_data, + "monitor link desc ring %d full", mac_id); + ret = -enomem; + } + + ath11k_hal_srng_access_end(ar->ab, hal_srng); + return ret; +} + +static +void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc, + dma_addr_t *paddr, u32 *sw_cookie, + void **pp_buf_addr_info) +{ + struct hal_rx_msdu_link *msdu_link = + (struct hal_rx_msdu_link *)rx_msdu_link_desc; + struct ath11k_buffer_addr *buf_addr_info; + u8 rbm = 0; + + buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info; + + ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm); + + *pp_buf_addr_info = (void *)buf_addr_info; +} + +static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len) +{ + if (skb->len > len) { + skb_trim(skb, len); + } else { + if (skb_tailroom(skb) < len - skb->len) { + if ((pskb_expand_head(skb, 0, + len - skb->len - skb_tailroom(skb), + gfp_atomic))) { + dev_kfree_skb_any(skb); + return -enomem; + } + } + skb_put(skb, (len - skb->len)); + } + return 0; +} + +static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar, + void *msdu_link_desc, + struct hal_rx_msdu_list *msdu_list, + u16 *num_msdus) +{ + struct hal_rx_msdu_details *msdu_details = null; + struct rx_msdu_desc *msdu_desc_info = null; + struct hal_rx_msdu_link *msdu_link = null; + int i; + u32 last = field_prep(rx_msdu_desc_info0_last_msdu_in_mpdu, 1); + u32 first = field_prep(rx_msdu_desc_info0_first_msdu_in_mpdu, 1); + u8 tmp = 0; + + msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc; + msdu_details = &msdu_link->msdu_link[0]; + + for (i = 0; i < hal_rx_num_msdu_desc; i++) { + if (field_get(buffer_addr_info0_addr, + msdu_details[i].buf_addr_info.info0) == 0) { + msdu_desc_info = &msdu_details[i - 1].rx_msdu_info; + msdu_desc_info->info0 |= last; + ; + break; + } + msdu_desc_info = &msdu_details[i].rx_msdu_info; + + if (!i) + msdu_desc_info->info0 |= first; + else if (i == (hal_rx_num_msdu_desc - 1)) + msdu_desc_info->info0 |= last; + msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0; + msdu_list->msdu_info[i].msdu_len = + hal_rx_msdu_pkt_length_get(msdu_desc_info->info0); + msdu_list->sw_cookie[i] = + field_get(buffer_addr_info1_sw_cookie, + msdu_details[i].buf_addr_info.info1); + tmp = field_get(buffer_addr_info1_ret_buf_mgr, + msdu_details[i].buf_addr_info.info1); + msdu_list->rbm[i] = tmp; + } + *num_msdus = i; +} + +static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id, + u32 *rx_bufs_used) +{ + u32 ret = 0; + + if ((*ppdu_id < msdu_ppdu_id) && + ((msdu_ppdu_id - *ppdu_id) < dp_not_ppdu_id_wrap_around)) { + *ppdu_id = msdu_ppdu_id; + ret = msdu_ppdu_id; + } else if ((*ppdu_id > msdu_ppdu_id) && + ((*ppdu_id - msdu_ppdu_id) > dp_not_ppdu_id_wrap_around)) { + /* mon_dst is behind than mon_status + * skip dst_ring and free it + */ + *rx_bufs_used += 1; + *ppdu_id = msdu_ppdu_id; + ret = msdu_ppdu_id; + } + return ret; +} + +static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info, + bool *is_frag, u32 *total_len, + u32 *frag_len, u32 *msdu_cnt) +{ + if (info->msdu_flags & rx_msdu_desc_info0_msdu_continuation) { + if (!*is_frag) { + *total_len = info->msdu_len; + *is_frag = true; + } + ath11k_dp_mon_set_frag_len(total_len, + frag_len); + } else { + if (*is_frag) { + ath11k_dp_mon_set_frag_len(total_len, + frag_len); + } else { + *frag_len = info->msdu_len; + } + *is_frag = false; + *msdu_cnt -= 1; + } +} + +static u32 +ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, + void *ring_entry, struct sk_buff **head_msdu, + struct sk_buff **tail_msdu, u32 *npackets, + u32 *ppdu_id) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; + struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring; + struct sk_buff *msdu = null, *last = null; + struct hal_rx_msdu_list msdu_list; + void *p_buf_addr_info, *p_last_buf_addr_info; + struct hal_rx_desc *rx_desc; + void *rx_msdu_link_desc; + dma_addr_t paddr; + u16 num_msdus = 0; + u32 rx_buf_size, rx_pkt_offset, sw_cookie; + u32 rx_bufs_used = 0, i = 0; + u32 msdu_ppdu_id = 0, msdu_cnt = 0; + u32 total_len = 0, frag_len = 0; + bool is_frag, is_first_msdu; + bool drop_mpdu = false; + struct ath11k_skb_rxcb *rxcb; + struct hal_reo_entrance_ring *ent_desc = + (struct hal_reo_entrance_ring *)ring_entry; + int buf_id; + + ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr, + &sw_cookie, &p_last_buf_addr_info, + &msdu_cnt); + + if (field_get(hal_reo_entr_ring_info1_rxdma_push_reason, + ent_desc->info1) == + hal_reo_dest_ring_push_reason_err_detected) { + u8 rxdma_err = + field_get(hal_reo_entr_ring_info1_rxdma_error_code, + ent_desc->info1); + if (rxdma_err == hal_reo_entr_ring_rxdma_ecode_flush_request_err || + rxdma_err == hal_reo_entr_ring_rxdma_ecode_mpdu_len_err || + rxdma_err == hal_reo_entr_ring_rxdma_ecode_overflow_err) { + drop_mpdu = true; + pmon->rx_mon_stats.dest_mpdu_drop++; + } + } + + is_frag = false; + is_first_msdu = true; + + do { + if (pmon->mon_last_linkdesc_paddr == paddr) { + pmon->rx_mon_stats.dup_mon_linkdesc_cnt++; + return rx_bufs_used; + } + + rx_msdu_link_desc = + (void *)pmon->link_desc_banks[sw_cookie].vaddr + + (paddr - pmon->link_desc_banks[sw_cookie].paddr); + + ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list, + &num_msdus); + + for (i = 0; i < num_msdus; i++) { + u32 l2_hdr_offset; + + if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) { + ath11k_dbg(ar->ab, ath11k_dbg_data, + "i %d last_cookie %d is same ", + i, pmon->mon_last_buf_cookie); + drop_mpdu = true; + pmon->rx_mon_stats.dup_mon_buf_cnt++; + continue; + } + buf_id = field_get(dp_rxdma_buf_cookie_buf_id, + msdu_list.sw_cookie[i]); + + spin_lock_bh(&rx_ring->idr_lock); + msdu = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + if (!msdu) { + ath11k_dbg(ar->ab, ath11k_dbg_data, + "msdu_pop: invalid buf_id %d ", buf_id); + break; + } + rxcb = ath11k_skb_rxcb(msdu); + if (!rxcb->unmapped) { + dma_unmap_single(ar->ab->dev, rxcb->paddr, + msdu->len + + skb_tailroom(msdu), + dma_from_device); + rxcb->unmapped = 1; + } + if (drop_mpdu) { + ath11k_dbg(ar->ab, ath11k_dbg_data, + "i %d drop msdu %p *ppdu_id %x ", + i, msdu, *ppdu_id); + dev_kfree_skb_any(msdu); + msdu = null; + goto next_msdu; + } + + rx_desc = (struct hal_rx_desc *)msdu->data; + + rx_pkt_offset = sizeof(struct hal_rx_desc); + l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(rx_desc); + + if (is_first_msdu) { + if (!ath11k_dp_rxdesc_mpdu_valid(rx_desc)) { + drop_mpdu = true; + dev_kfree_skb_any(msdu); + msdu = null; + pmon->mon_last_linkdesc_paddr = paddr; + goto next_msdu; + } + + msdu_ppdu_id = + ath11k_dp_rxdesc_get_ppduid(rx_desc); + + if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id, + ppdu_id, + &rx_bufs_used)) + return rx_bufs_used; + pmon->mon_last_linkdesc_paddr = paddr; + is_first_msdu = false; + } + ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i], + &is_frag, &total_len, + &frag_len, &msdu_cnt); + rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len; + + ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size); + + if (!(*head_msdu)) + *head_msdu = msdu; + else if (last) + last->next = msdu; + + last = msdu; +next_msdu: + pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i]; + rx_bufs_used++; + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + } + + ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr, + &sw_cookie, + &p_buf_addr_info); + + if (ath11k_dp_rx_monitor_link_desc_return(ar, + p_last_buf_addr_info, + dp->mac_id)) + ath11k_dbg(ar->ab, ath11k_dbg_data, + "dp_rx_monitor_link_desc_return failed"); + + p_last_buf_addr_info = p_buf_addr_info; + + } while (paddr && msdu_cnt); + + if (last) + last->next = null; + + *tail_msdu = msdu; + + if (msdu_cnt == 0) + *npackets = 1; + + return rx_bufs_used; +} + +static void ath11k_dp_rx_msdus_set_payload(struct sk_buff *msdu) +{ + u32 rx_pkt_offset, l2_hdr_offset; + + rx_pkt_offset = sizeof(struct hal_rx_desc); + l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad((struct hal_rx_desc *)msdu->data); + skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); +} + +static struct sk_buff * +ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar, + u32 mac_id, struct sk_buff *head_msdu, + struct sk_buff *last_msdu, + struct ieee80211_rx_status *rxs) +{ + struct sk_buff *msdu, *mpdu_buf, *prev_buf; + u32 decap_format, wifi_hdr_len; + struct hal_rx_desc *rx_desc; + char *hdr_desc; + u8 *dest; + struct ieee80211_hdr_3addr *wh; + + mpdu_buf = null; + + if (!head_msdu) + goto err_merge_fail; + + rx_desc = (struct hal_rx_desc *)head_msdu->data; + + if (ath11k_dp_rxdesc_get_mpdulen_err(rx_desc)) + return null; + + decap_format = ath11k_dp_rxdesc_get_decap_format(rx_desc); + + ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs); + + if (decap_format == dp_rx_decap_type_raw) { + ath11k_dp_rx_msdus_set_payload(head_msdu); + + prev_buf = head_msdu; + msdu = head_msdu->next; + + while (msdu) { + ath11k_dp_rx_msdus_set_payload(msdu); + + prev_buf = msdu; + msdu = msdu->next; + } + + prev_buf->next = null; + + skb_trim(prev_buf, prev_buf->len - hal_rx_fcs_len); + } else if (decap_format == dp_rx_decap_type_native_wifi) { + __le16 qos_field; + u8 qos_pkt = 0; + + rx_desc = (struct hal_rx_desc *)head_msdu->data; + hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); + + /* base size */ + wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr); + wh = (struct ieee80211_hdr_3addr *)hdr_desc; + + if (ieee80211_is_data_qos(wh->frame_control)) { + struct ieee80211_qos_hdr *qwh = + (struct ieee80211_qos_hdr *)hdr_desc; + + qos_field = qwh->qos_ctrl; + qos_pkt = 1; + } + msdu = head_msdu; + + while (msdu) { + rx_desc = (struct hal_rx_desc *)msdu->data; + hdr_desc = ath11k_dp_rxdesc_get_80211hdr(rx_desc); + + if (qos_pkt) { + dest = skb_push(msdu, sizeof(__le16)); + if (!dest) + goto err_merge_fail; + memcpy(dest, hdr_desc, wifi_hdr_len); + memcpy(dest + wifi_hdr_len, + (u8 *)&qos_field, sizeof(__le16)); + } + ath11k_dp_rx_msdus_set_payload(msdu); + prev_buf = msdu; + msdu = msdu->next; + } + dest = skb_put(prev_buf, hal_rx_fcs_len); + if (!dest) + goto err_merge_fail; + + ath11k_dbg(ar->ab, ath11k_dbg_data, + "mpdu_buf %pk mpdu_buf->len %u", + prev_buf, prev_buf->len); + } else { + ath11k_dbg(ar->ab, ath11k_dbg_data, + "decap format %d is not supported! ", + decap_format); + goto err_merge_fail; + } + + return head_msdu; + +err_merge_fail: + if (mpdu_buf && decap_format != dp_rx_decap_type_raw) { + ath11k_dbg(ar->ab, ath11k_dbg_data, + "err_merge_fail mpdu_buf %pk", mpdu_buf); + /* free the head buffer */ + dev_kfree_skb_any(mpdu_buf); + } + return null; +} + +static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id, + struct sk_buff *head_msdu, + struct sk_buff *tail_msdu, + struct napi_struct *napi) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct sk_buff *mon_skb, *skb_next, *header; + struct ieee80211_rx_status *rxs = &dp->rx_status, *status; + + mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu, + tail_msdu, rxs); + + if (!mon_skb) + goto mon_deliver_fail; + + header = mon_skb; + + rxs->flag = 0; + do { + skb_next = mon_skb->next; + if (!skb_next) + rxs->flag &= ~rx_flag_amsdu_more; + else + rxs->flag |= rx_flag_amsdu_more; + + if (mon_skb == header) { + header = null; + rxs->flag &= ~rx_flag_allow_same_pn; + } else { + rxs->flag |= rx_flag_allow_same_pn; + } + rxs->flag |= rx_flag_only_monitor; + + status = ieee80211_skb_rxcb(mon_skb); + *status = *rxs; + + ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb); + mon_skb = skb_next; + } while (mon_skb && (mon_skb != tail_msdu)); + rxs->flag = 0; + + return 0; + +mon_deliver_fail: + mon_skb = head_msdu; + while (mon_skb) { + skb_next = mon_skb->next; + dev_kfree_skb_any(mon_skb); + mon_skb = skb_next; + } + return -einval; +} + +static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota, + struct napi_struct *napi) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; + void *ring_entry; + void *mon_dst_srng; + u32 ppdu_id; + u32 rx_bufs_used; + struct ath11k_pdev_mon_stats *rx_mon_stats; + u32 npackets = 0; + + mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + + if (!mon_dst_srng) { + ath11k_warn(ar->ab, + "hal monitor destination ring init failed -- %pk", + mon_dst_srng); + return; + } + + spin_lock_bh(&pmon->mon_lock); + + ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); + + ppdu_id = pmon->mon_ppdu_info.ppdu_id; + rx_bufs_used = 0; + rx_mon_stats = &pmon->rx_mon_stats; + + while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { + struct sk_buff *head_msdu, *tail_msdu; + + head_msdu = null; + tail_msdu = null; + + rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry, + &head_msdu, + &tail_msdu, + &npackets, &ppdu_id); + + if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) { + pmon->mon_ppdu_status = dp_ppdu_status_start; + ath11k_dbg(ar->ab, ath11k_dbg_data, + "dest_rx: new ppdu_id %x != status ppdu_id %x", + ppdu_id, pmon->mon_ppdu_info.ppdu_id); + break; + } + if (head_msdu && tail_msdu) { + ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu, + tail_msdu, napi); + rx_mon_stats->dest_mpdu_done++; + } + + ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab, + mon_dst_srng); + } + ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + + spin_unlock_bh(&pmon->mon_lock); + + if (rx_bufs_used) { + rx_mon_stats->dest_ppdu_done++; + ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, + &dp->rxdma_mon_buf_ring, + rx_bufs_used, + hal_rx_buf_rbm_sw3_bm, gfp_atomic); + } +} + +static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar, + u32 quota, + struct napi_struct *napi) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; + struct hal_rx_mon_ppdu_info *ppdu_info; + struct sk_buff *status_skb; + u32 tlv_status = hal_tlv_status_buf_done; + struct ath11k_pdev_mon_stats *rx_mon_stats; + + ppdu_info = &pmon->mon_ppdu_info; + rx_mon_stats = &pmon->rx_mon_stats; + + if (pmon->mon_ppdu_status != dp_ppdu_status_start) + return; + + while (!skb_queue_empty(&pmon->rx_status_q)) { + status_skb = skb_dequeue(&pmon->rx_status_q); + + tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info, + status_skb); + if (tlv_status == hal_tlv_status_ppdu_done) { + rx_mon_stats->status_ppdu_done++; + pmon->mon_ppdu_status = dp_ppdu_status_done; + ath11k_dp_rx_mon_dest_process(ar, quota, napi); + pmon->mon_ppdu_status = dp_ppdu_status_start; + } + dev_kfree_skb_any(status_skb); + } +} + +static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, int budget) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; + int num_buffs_reaped = 0; + + num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget, + &pmon->rx_status_q); + if (num_buffs_reaped) + ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi); + + return num_buffs_reaped; +} + +int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, int budget) +{ + struct ath11k *ar = ab->pdevs[mac_id].ar; + int ret = 0; + + if (test_bit(ath11k_flag_monitor_enabled, &ar->monitor_flags)) + ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget); + else + ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget); + return ret; +} + +static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; + + skb_queue_head_init(&pmon->rx_status_q); + + pmon->mon_ppdu_status = dp_ppdu_status_start; + + memset(&pmon->rx_mon_stats, 0, + sizeof(pmon->rx_mon_stats)); + return 0; +} + +int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = &dp->mon_data; + struct hal_srng *mon_desc_srng = null; + struct dp_srng *dp_srng; + int ret = 0; + u32 n_link_desc = 0; + + ret = ath11k_dp_rx_pdev_mon_status_attach(ar); + if (ret) { + ath11k_warn(ar->ab, "pdev_mon_status_attach() failed"); + return ret; + } + + dp_srng = &dp->rxdma_mon_desc_ring; + n_link_desc = dp_srng->size / + ath11k_hal_srng_get_entrysize(hal_rxdma_monitor_desc); + mon_desc_srng = + &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id]; + + ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks, + hal_rxdma_monitor_desc, mon_desc_srng, + n_link_desc); + if (ret) { + ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed"); + return ret; + } + pmon->mon_last_linkdesc_paddr = 0; + pmon->mon_last_buf_cookie = dp_rx_desc_cookie_max + 1; + spin_lock_init(&pmon->mon_lock); + return 0; +} + +static int ath11k_dp_mon_link_free(struct ath11k *ar) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct ath11k_mon_data *pmon = &dp->mon_data; + + ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks, + hal_rxdma_monitor_desc, + &dp->rxdma_mon_desc_ring); + return 0; +} + +int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar) +{ + ath11k_dp_mon_link_free(ar); + return 0; +} diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/dp_rx.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ +#ifndef ath11k_dp_rx_h +#define ath11k_dp_rx_h + +#include "core.h" +#include "rx_desc.h" +#include "debug.h" + +#define dp_rx_mpdu_err_fcs bit(0) +#define dp_rx_mpdu_err_decrypt bit(1) +#define dp_rx_mpdu_err_tkip_mic bit(2) +#define dp_rx_mpdu_err_amsdu_err bit(3) +#define dp_rx_mpdu_err_overflow bit(4) +#define dp_rx_mpdu_err_msdu_len bit(5) +#define dp_rx_mpdu_err_mpdu_len bit(6) +#define dp_rx_mpdu_err_unencrypted_frame bit(7) + +enum dp_rx_decap_type { + dp_rx_decap_type_raw, + dp_rx_decap_type_native_wifi, + dp_rx_decap_type_ethernet2_dix, + dp_rx_decap_type_8023, +}; + +struct ath11k_dp_amsdu_subframe_hdr { + u8 dst[eth_alen]; + u8 src[eth_alen]; + __be16 len; +} __packed; + +struct ath11k_dp_rfc1042_hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + __be16 snap_type; +} __packed; + +int ath11k_dp_rx_ampdu_start(struct ath11k *ar, + struct ieee80211_ampdu_params *params); +int ath11k_dp_rx_ampdu_stop(struct ath11k *ar, + struct ieee80211_ampdu_params *params); +void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer); +int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, + u8 tid, u32 ba_win_sz, u16 ssn); +void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, + struct sk_buff *skb); +int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int pdev_idx); +void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int pdev_idx); +void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab); +void ath11k_dp_process_reo_status(struct ath11k_base *ab); +int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget); +int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab, + struct napi_struct *napi, int budget); +int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, + int budget); +int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, struct sk_buff_head *pending_q, + int budget); +int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id, + struct dp_rxdma_ring *rx_ring, + int req_entries, + enum hal_rx_buf_return_buf_manager mgr, + gfp_t gfp); +int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath11k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data); +int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, int budget); +int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id, + struct napi_struct *napi, int budget); +int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id, + struct dp_rxdma_ring *rx_ring, + int req_entries, + enum hal_rx_buf_return_buf_manager mgr, + gfp_t gfp); +int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar); +int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar); + +#endif /* ath11k_dp_rx_h */ diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include "core.h" +#include "dp_tx.h" +#include "debug.h" +#include "hw.h" + +/* note: any of the mapped ring id value must not exceed dp_tcl_num_ring_max */ +static const u8 +ath11k_txq_tcl_ring_map[ath11k_hw_max_queues] = { 0x0, 0x1, 0x2, 0x2 }; + +static enum hal_tcl_encap_type +ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb) +{ + /* todo: determine encap type based on vif_type and configuration */ + return hal_tcl_encap_type_native_wifi; +} + +static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + u8 *qos_ctl; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return; + + qos_ctl = ieee80211_get_qos_ctl(hdr); + memmove(skb->data + ieee80211_qos_ctl_len, + skb->data, (void *)qos_ctl - (void *)skb->data); + skb_pull(skb, ieee80211_qos_ctl_len); + + hdr = (void *)skb->data; + hdr->frame_control &= ~__cpu_to_le16(ieee80211_stype_qos_data); +} + +static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return hal_desc_reo_non_qos_tid; + else + return skb->priority & ieee80211_qos_ctl_tid_mask; +} + +static enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher) +{ + switch (cipher) { + case wlan_cipher_suite_wep40: + return hal_encrypt_type_wep_40; + case wlan_cipher_suite_wep104: + return hal_encrypt_type_wep_104; + case wlan_cipher_suite_tkip: + return hal_encrypt_type_tkip_mic; + case wlan_cipher_suite_ccmp: + return hal_encrypt_type_ccmp_128; + case wlan_cipher_suite_ccmp_256: + return hal_encrypt_type_ccmp_256; + case wlan_cipher_suite_gcmp: + return hal_encrypt_type_gcmp_128; + case wlan_cipher_suite_gcmp_256: + return hal_encrypt_type_aes_gcmp_256; + default: + return hal_encrypt_type_open; + } +} + +int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, + struct sk_buff *skb) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_dp *dp = &ab->dp; + struct hal_tx_info ti = {0}; + struct ieee80211_tx_info *info = ieee80211_skb_cb(skb); + struct ath11k_skb_cb *skb_cb = ath11k_skb_cb(skb); + struct hal_srng *tcl_ring; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct dp_tx_ring *tx_ring; + u8 cached_desc[hal_tcl_desc_len]; + void *hal_tcl_desc; + u8 pool_id; + u8 hal_ring_id; + int ret; + + if (test_bit(ath11k_flag_crash_flush, &ar->ab->dev_flags)) + return -eshutdown; + + if (!ieee80211_is_data(hdr->frame_control)) + return -enotsupp; + + pool_id = skb_get_queue_mapping(skb) & (ath11k_hw_max_queues - 1); + ti.ring_id = ath11k_txq_tcl_ring_map[pool_id]; + + tx_ring = &dp->tx_ring[ti.ring_id]; + + spin_lock_bh(&tx_ring->tx_idr_lock); + ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0, + dp_tx_idr_size - 1, gfp_atomic); + spin_unlock_bh(&tx_ring->tx_idr_lock); + + if (ret < 0) + return -enospc; + + ti.desc_id = field_prep(dp_tx_desc_id_mac_id, ar->pdev_idx) | + field_prep(dp_tx_desc_id_msdu_id, ret) | + field_prep(dp_tx_desc_id_pool_id, pool_id); + ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb); + ti.meta_data_flags = arvif->tcl_metadata; + + if (info->control.hw_key) + ti.encrypt_type = + ath11k_dp_tx_get_encrypt_type(info->control.hw_key->cipher); + else + ti.encrypt_type = hal_encrypt_type_open; + + ti.addr_search_flags = arvif->hal_addr_search_flags; + ti.search_type = arvif->search_type; + ti.type = hal_tcl_desc_type_buffer; + ti.pkt_offset = 0; + ti.lmac_id = ar->lmac_id; + ti.bss_ast_hash = arvif->ast_hash; + ti.dscp_tid_tbl_idx = 0; + + if (skb->ip_summed == checksum_partial) { + ti.flags0 |= field_prep(hal_tcl_data_cmd_info1_ip4_cksum_en, 1) | + field_prep(hal_tcl_data_cmd_info1_udp4_cksum_en, 1) | + field_prep(hal_tcl_data_cmd_info1_udp6_cksum_en, 1) | + field_prep(hal_tcl_data_cmd_info1_tcp4_cksum_en, 1) | + field_prep(hal_tcl_data_cmd_info1_tcp6_cksum_en, 1); + } + + if (ieee80211_vif_is_mesh(arvif->vif)) + ti.flags1 |= field_prep(hal_tcl_data_cmd_info2_mesh_enable, 1); + + ti.flags1 |= field_prep(hal_tcl_data_cmd_info2_tid_overwrite, 1); + + ti.tid = ath11k_dp_tx_get_tid(skb); + + switch (ti.encap_type) { + case hal_tcl_encap_type_native_wifi: + ath11k_dp_tx_encap_nwifi(skb); + break; + case hal_tcl_encap_type_raw: + /* todo: for checksum_partial case in raw mode, hw checksum offload + * is not applicable, hence manual checksum calculation using + * skb_checksum_help() is needed + */ + case hal_tcl_encap_type_ethernet: + case hal_tcl_encap_type_802_3: + /* todo: take care of other encap modes as well */ + ret = -einval; + goto fail_remove_idr; + } + + ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, dma_to_device); + if (dma_mapping_error(ab->dev, ti.paddr)) { + ath11k_warn(ab, "failed to dma map data tx buffer "); + ret = -enomem; + goto fail_remove_idr; + } + + ti.data_len = skb->len; + skb_cb->paddr = ti.paddr; + skb_cb->vif = arvif->vif; + skb_cb->ar = ar; + + memset(cached_desc, 0, hal_tcl_desc_len); + + ath11k_hal_tx_cmd_desc_setup(ab, cached_desc, &ti); + + hal_ring_id = tx_ring->tcl_data_ring.ring_id; + tcl_ring = &ab->hal.srng_list[hal_ring_id]; + + spin_lock_bh(&tcl_ring->lock); + + ath11k_hal_srng_access_begin(ab, tcl_ring); + + hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring); + if (!hal_tcl_desc) { + /* note: it is highly unlikely we'll be running out of tcl_ring + * desc because the desc is directly enqueued onto hw queue. + * so add tx packet throttling logic in future if required. + */ + ath11k_hal_srng_access_end(ab, tcl_ring); + spin_unlock_bh(&tcl_ring->lock); + ret = -enomem; + goto fail_unmap_dma; + } + + ath11k_hal_tx_desc_sync(cached_desc, hal_tcl_desc); + + ath11k_hal_srng_access_end(ab, tcl_ring); + + spin_unlock_bh(&tcl_ring->lock); + + spin_lock_bh(&tx_ring->tx_idr_lock); + tx_ring->num_tx_pending++; + spin_unlock_bh(&tx_ring->tx_idr_lock); + + atomic_inc(&ar->dp.num_tx_pending); + + return 0; + +fail_unmap_dma: + dma_unmap_single(ab->dev, ti.paddr, ti.data_len, dma_to_device); + +fail_remove_idr: + spin_lock_bh(&tx_ring->tx_idr_lock); + idr_remove(&tx_ring->txbuf_idr, + field_get(dp_tx_desc_id_msdu_id, ti.desc_id)); + spin_unlock_bh(&tx_ring->tx_idr_lock); + + return ret; +} + +static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id, + int msdu_id, + struct dp_tx_ring *tx_ring) +{ + struct ath11k *ar; + struct sk_buff *msdu; + struct ath11k_skb_cb *skb_cb; + + spin_lock_bh(&tx_ring->tx_idr_lock); + msdu = idr_find(&tx_ring->txbuf_idr, msdu_id); + if (!msdu) { + ath11k_warn(ab, "tx completion for unknown msdu_id %d ", + msdu_id); + spin_unlock_bh(&tx_ring->tx_idr_lock); + return; + } + + skb_cb = ath11k_skb_cb(msdu); + + idr_remove(&tx_ring->txbuf_idr, msdu_id); + tx_ring->num_tx_pending--; + spin_unlock_bh(&tx_ring->tx_idr_lock); + + dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, dma_to_device); + dev_kfree_skb_any(msdu); + + ar = ab->pdevs[mac_id].ar; + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); +} + +static void +ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, + struct dp_tx_ring *tx_ring, + struct ath11k_dp_htt_wbm_tx_status *ts) +{ + struct sk_buff *msdu; + struct ieee80211_tx_info *info; + struct ath11k_skb_cb *skb_cb; + struct ath11k *ar; + + spin_lock_bh(&tx_ring->tx_idr_lock); + msdu = idr_find(&tx_ring->txbuf_idr, ts->msdu_id); + if (!msdu) { + ath11k_warn(ab, "htt tx completion for unknown msdu_id %d ", + ts->msdu_id); + spin_unlock_bh(&tx_ring->tx_idr_lock); + return; + } + + skb_cb = ath11k_skb_cb(msdu); + info = ieee80211_skb_cb(msdu); + + ar = skb_cb->ar; + + idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); + tx_ring->num_tx_pending--; + spin_unlock_bh(&tx_ring->tx_idr_lock); + + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); + + dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, dma_to_device); + + memset(&info->status, 0, sizeof(info->status)); + + if (ts->acked) { + if (!(info->flags & ieee80211_tx_ctl_no_ack)) { + info->flags |= ieee80211_tx_stat_ack; + info->status.ack_signal = ath11k_default_noise_floor + + ts->ack_rssi; + info->status.is_valid_ack_signal = true; + } else { + info->flags |= ieee80211_tx_stat_noack_transmitted; + } + } + + ieee80211_tx_status(ar->hw, msdu); +} + +static void +ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab, + void *desc, u8 mac_id, + u32 msdu_id, struct dp_tx_ring *tx_ring) +{ + struct htt_tx_wbm_completion *status_desc; + struct ath11k_dp_htt_wbm_tx_status ts = {0}; + enum hal_wbm_htt_tx_comp_status wbm_status; + + status_desc = desc + htt_tx_wbm_comp_status_offset; + + wbm_status = field_get(htt_tx_wbm_comp_info0_status, + status_desc->info0); + + switch (wbm_status) { + case hal_wbm_rel_htt_tx_comp_status_ok: + case hal_wbm_rel_htt_tx_comp_status_drop: + case hal_wbm_rel_htt_tx_comp_status_ttl: + ts.acked = (wbm_status == hal_wbm_rel_htt_tx_comp_status_ok); + ts.msdu_id = msdu_id; + ts.ack_rssi = field_get(htt_tx_wbm_comp_info1_ack_rssi, + status_desc->info1); + ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts); + break; + case hal_wbm_rel_htt_tx_comp_status_reinj: + case hal_wbm_rel_htt_tx_comp_status_inspect: + ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring); + break; + case hal_wbm_rel_htt_tx_comp_status_mec_notify: + /* this event is to be handled only when the driver decides to + * use wds offload functionality. + */ + break; + default: + ath11k_warn(ab, "unknown htt tx status %d ", wbm_status); + break; + } +} + +static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar, + struct sk_buff *msdu, + struct hal_tx_status *ts) +{ + struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; + + if (ts->try_cnt > 1) { + peer_stats->retry_pkts += ts->try_cnt - 1; + peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len; + + if (ts->status != hal_wbm_tqm_rel_reason_frame_acked) { + peer_stats->failed_pkts += 1; + peer_stats->failed_bytes += msdu->len; + } + } +} + +static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, + struct sk_buff *msdu, + struct hal_tx_status *ts) +{ + struct ath11k_base *ab = ar->ab; + struct ieee80211_tx_info *info; + struct ath11k_skb_cb *skb_cb; + + if (warn_on_once(ts->buf_rel_source != hal_wbm_rel_src_module_tqm)) { + /* must not happen */ + return; + } + + skb_cb = ath11k_skb_cb(msdu); + + dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, dma_to_device); + + rcu_read_lock(); + + if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) { + dev_kfree_skb_any(msdu); + goto exit; + } + + if (!skb_cb->vif) { + dev_kfree_skb_any(msdu); + goto exit; + } + + info = ieee80211_skb_cb(msdu); + memset(&info->status, 0, sizeof(info->status)); + + /* skip tx rate update from ieee80211_status*/ + info->status.rates[0].idx = -1; + + if (ts->status == hal_wbm_tqm_rel_reason_frame_acked && + !(info->flags & ieee80211_tx_ctl_no_ack)) { + info->flags |= ieee80211_tx_stat_ack; + info->status.ack_signal = ath11k_default_noise_floor + + ts->ack_rssi; + info->status.is_valid_ack_signal = true; + } + + if (ts->status == hal_wbm_tqm_rel_reason_cmd_remove_tx && + (info->flags & ieee80211_tx_ctl_no_ack)) + info->flags |= ieee80211_tx_stat_noack_transmitted; + + if (ath11k_debug_is_extd_tx_stats_enabled(ar)) { + if (ts->flags & hal_tx_status_flags_first_msdu) { + if (ar->last_ppdu_id == 0) { + ar->last_ppdu_id = ts->ppdu_id; + } else if (ar->last_ppdu_id == ts->ppdu_id || + ar->cached_ppdu_id == ar->last_ppdu_id) { + ar->cached_ppdu_id = ar->last_ppdu_id; + ar->cached_stats.is_ampdu = true; + ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts); + memset(&ar->cached_stats, 0, + sizeof(struct ath11k_per_peer_tx_stats)); + } else { + ar->cached_stats.is_ampdu = false; + ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts); + memset(&ar->cached_stats, 0, + sizeof(struct ath11k_per_peer_tx_stats)); + } + ar->last_ppdu_id = ts->ppdu_id; + } + + ath11k_dp_tx_cache_peer_stats(ar, msdu, ts); + } + + /* note: tx rate status reporting. tx completion status does not have + * necessary information (for example nss) to build the tx rate. + * might end up reporting it out-of-band from htt stats. + */ + + ieee80211_tx_status(ar->hw, msdu); + +exit: + rcu_read_unlock(); +} + +void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) +{ + struct ath11k *ar; + struct ath11k_dp *dp = &ab->dp; + int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id; + struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id]; + struct sk_buff *msdu; + struct hal_wbm_release_ring tx_status; + struct hal_tx_status ts; + struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id]; + u32 *desc; + u32 msdu_id; + u8 mac_id; + + spin_lock_bh(&status_ring->lock); + + ath11k_hal_srng_access_begin(ab, status_ring); + + spin_lock_bh(&tx_ring->tx_status_lock); + while (!kfifo_is_full(&tx_ring->tx_status_fifo) && + (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) { + ath11k_hal_tx_status_desc_sync((void *)desc, + (void *)&tx_status); + kfifo_put(&tx_ring->tx_status_fifo, tx_status); + } + + if ((ath11k_hal_srng_dst_peek(ab, status_ring) != null) && + kfifo_is_full(&tx_ring->tx_status_fifo)) { + /* todo: process pending tx_status messages when kfifo_is_full() */ + ath11k_warn(ab, "unable to process some of the tx_status ring desc because status_fifo is full "); + } + + spin_unlock_bh(&tx_ring->tx_status_lock); + + ath11k_hal_srng_access_end(ab, status_ring); + spin_unlock_bh(&status_ring->lock); + + spin_lock_bh(&tx_ring->tx_status_lock); + while (kfifo_get(&tx_ring->tx_status_fifo, &tx_status)) { + memset(&ts, 0, sizeof(ts)); + ath11k_hal_tx_status_parse(ab, &tx_status, &ts); + + mac_id = field_get(dp_tx_desc_id_mac_id, ts.desc_id); + msdu_id = field_get(dp_tx_desc_id_msdu_id, ts.desc_id); + + if (ts.buf_rel_source == hal_wbm_rel_src_module_fw) { + ath11k_dp_tx_process_htt_tx_complete(ab, + (void *)&tx_status, + mac_id, msdu_id, + tx_ring); + continue; + } + + spin_lock_bh(&tx_ring->tx_idr_lock); + msdu = idr_find(&tx_ring->txbuf_idr, msdu_id); + if (!msdu) { + ath11k_warn(ab, "tx completion for unknown msdu_id %d ", + msdu_id); + spin_unlock_bh(&tx_ring->tx_idr_lock); + continue; + } + idr_remove(&tx_ring->txbuf_idr, msdu_id); + tx_ring->num_tx_pending--; + spin_unlock_bh(&tx_ring->tx_idr_lock); + + ar = ab->pdevs[mac_id].ar; + + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); + + /* todo: locking optimization so that tx_completion for an msdu + * is not called with tx_status_lock acquired + */ + ath11k_dp_tx_complete_msdu(ar, msdu, &ts); + } + spin_unlock_bh(&tx_ring->tx_status_lock); +} + +int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid, + enum hal_reo_cmd_type type, + struct ath11k_hal_reo_cmd *cmd, + void (*cb)(struct ath11k_dp *, void *, + enum hal_reo_cmd_status)) +{ + struct ath11k_dp *dp = &ab->dp; + struct dp_reo_cmd *dp_cmd; + struct hal_srng *cmd_ring; + int cmd_num; + + cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; + cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); + + /* reo cmd ring descriptors has cmd_num starting from 1 */ + if (cmd_num <= 0) + return -einval; + + if (!cb) + return 0; + + /* can this be optimized so that we keep the pending command list only + * for tid delete command to free up the resoruce on the command status + * indication? + */ + dp_cmd = kzalloc(sizeof(*dp_cmd), gfp_atomic); + + if (!dp_cmd) + return -enomem; + + memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid)); + dp_cmd->cmd_num = cmd_num; + dp_cmd->handler = cb; + + spin_lock_bh(&dp->reo_cmd_lock); + list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); + spin_unlock_bh(&dp->reo_cmd_lock); + + return 0; +} + +static int +ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab, + int mac_id, u32 ring_id, + enum hal_ring_type ring_type, + enum htt_srng_ring_type *htt_ring_type, + enum htt_srng_ring_id *htt_ring_id) +{ + int lmac_ring_id_offset = 0; + int ret = 0; + + switch (ring_type) { + case hal_rxdma_buf: + lmac_ring_id_offset = mac_id * hal_srng_rings_per_lmac; + if (!(ring_id == (hal_srng_ring_id_wmac1_sw2rxdma0_buf + + lmac_ring_id_offset) || + ring_id == (hal_srng_ring_id_wmac1_sw2rxdma1_buf + + lmac_ring_id_offset))) { + ret = -einval; + } + *htt_ring_id = htt_rxdma_host_buf_ring; + *htt_ring_type = htt_sw_to_hw_ring; + break; + case hal_rxdma_dst: + *htt_ring_id = htt_rxdma_non_monitor_dest_ring; + *htt_ring_type = htt_hw_to_sw_ring; + break; + case hal_rxdma_monitor_buf: + *htt_ring_id = htt_rxdma_monitor_buf_ring; + *htt_ring_type = htt_sw_to_hw_ring; + break; + case hal_rxdma_monitor_status: + *htt_ring_id = htt_rxdma_monitor_status_ring; + *htt_ring_type = htt_sw_to_hw_ring; + break; + case hal_rxdma_monitor_dst: + *htt_ring_id = htt_rxdma_monitor_dest_ring; + *htt_ring_type = htt_hw_to_sw_ring; + break; + case hal_rxdma_monitor_desc: + *htt_ring_id = htt_rxdma_monitor_desc_ring; + *htt_ring_type = htt_sw_to_hw_ring; + break; + default: + ath11k_warn(ab, "unsupported ring type in dp :%d ", ring_type); + ret = -einval; + } + return ret; +} + +int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type) +{ + struct htt_srng_setup_cmd *cmd; + struct hal_srng *srng = &ab->hal.srng_list[ring_id]; + struct hal_srng_params params; + struct sk_buff *skb; + u32 ring_entry_sz; + int len = sizeof(*cmd); + dma_addr_t hp_addr, tp_addr; + enum htt_srng_ring_type htt_ring_type; + enum htt_srng_ring_id htt_ring_id; + int ret = 0; + + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -enomem; + + memset(¶ms, 0, sizeof(params)); + ath11k_hal_srng_get_params(ab, srng, ¶ms); + + hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng); + tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng); + + if (ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, + ring_type, &htt_ring_type, + &htt_ring_id)) + goto err_free; + + skb_put(skb, len); + cmd = (struct htt_srng_setup_cmd *)skb->data; + cmd->info0 = field_prep(htt_srng_setup_cmd_info0_msg_type, + htt_h2t_msg_type_sring_setup); + if (htt_ring_type == htt_sw_to_hw_ring || + htt_ring_type == htt_hw_to_sw_ring) + cmd->info0 |= field_prep(htt_srng_setup_cmd_info0_pdev_id, + dp_sw2hw_macid(mac_id)); + else + cmd->info0 |= field_prep(htt_srng_setup_cmd_info0_pdev_id, + mac_id); + cmd->info0 |= field_prep(htt_srng_setup_cmd_info0_ring_type, + htt_ring_type); + cmd->info0 |= field_prep(htt_srng_setup_cmd_info0_ring_id, htt_ring_id); + + cmd->ring_base_addr_lo = params.ring_base_paddr & + hal_addr_lsb_reg_mask; + + cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >> + hal_addr_msb_reg_shift; + + ret = ath11k_hal_srng_get_entrysize(ring_type); + if (ret < 0) + return -einval; + + ring_entry_sz = ret; + + ring_entry_sz >>= 2; + cmd->info1 = field_prep(htt_srng_setup_cmd_info1_ring_entry_size, + ring_entry_sz); + cmd->info1 |= field_prep(htt_srng_setup_cmd_info1_ring_size, + params.num_entries * ring_entry_sz); + cmd->info1 |= field_prep(htt_srng_setup_cmd_info1_ring_flags_msi_swap, + !!(params.flags & hal_srng_flags_msi_swap)); + cmd->info1 |= field_prep( + htt_srng_setup_cmd_info1_ring_flags_tlv_swap, + !!(params.flags & hal_srng_flags_data_tlv_swap)); + cmd->info1 |= field_prep( + htt_srng_setup_cmd_info1_ring_flags_host_fw_swap, + !!(params.flags & hal_srng_flags_ring_ptr_swap)); + if (htt_ring_type == htt_sw_to_hw_ring) + cmd->info1 |= htt_srng_setup_cmd_info1_ring_loop_cnt_dis; + + cmd->ring_head_off32_remote_addr_lo = hp_addr & hal_addr_lsb_reg_mask; + cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >> + hal_addr_msb_reg_shift; + + cmd->ring_tail_off32_remote_addr_lo = tp_addr & hal_addr_lsb_reg_mask; + cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >> + hal_addr_msb_reg_shift; + + cmd->ring_msi_addr_lo = 0; + cmd->ring_msi_addr_hi = 0; + cmd->msi_data = 0; + + cmd->intr_info = field_prep( + htt_srng_setup_cmd_intr_info_batch_counter_thresh, + params.intr_batch_cntr_thres_entries * ring_entry_sz); + cmd->intr_info |= field_prep( + htt_srng_setup_cmd_intr_info_intr_timer_thresh, + params.intr_timer_thres_us >> 3); + + cmd->info2 = 0; + if (params.flags & hal_srng_flags_low_thresh_intr_en) { + cmd->info2 = field_prep( + htt_srng_setup_cmd_info2_intr_low_thresh, + params.low_threshold); + } + + ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); + if (ret) + goto err_free; + + return 0; + +err_free: + dev_kfree_skb_any(skb); + + return ret; +} + +#define htt_target_version_timeout_hz (3 * hz) + +int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab) +{ + struct ath11k_dp *dp = &ab->dp; + struct sk_buff *skb; + struct htt_ver_req_cmd *cmd; + int len = sizeof(*cmd); + int ret; + + init_completion(&dp->htt_tgt_version_received); + + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -enomem; + + skb_put(skb, len); + cmd = (struct htt_ver_req_cmd *)skb->data; + cmd->ver_reg_info = field_prep(htt_ver_req_info_msg_id, + htt_h2t_msg_type_version_req); + + ret = ath11k_htc_send(&ab->htc, dp->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + ret = wait_for_completion_timeout(&dp->htt_tgt_version_received, + htt_target_version_timeout_hz); + if (ret == 0) { + ath11k_warn(ab, "htt target version request timed out "); + return -etimedout; + } + + if (dp->htt_tgt_ver_major != htt_target_version_major) { + ath11k_err(ab, "unsupported htt major version %d supported version is %d ", + dp->htt_tgt_ver_major, htt_target_version_major); + return -enotsupp; + } + + return 0; +} + +int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_dp *dp = &ab->dp; + struct sk_buff *skb; + struct htt_ppdu_stats_cfg_cmd *cmd; + int len = sizeof(*cmd); + u8 pdev_mask; + int ret; + + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -enomem; + + skb_put(skb, len); + cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; + cmd->msg = field_prep(htt_ppdu_stats_cfg_msg_type, + htt_h2t_msg_type_ppdu_stats_cfg); + + pdev_mask = 1 << (ar->pdev_idx); + cmd->msg |= field_prep(htt_ppdu_stats_cfg_pdev_id, pdev_mask); + cmd->msg |= field_prep(htt_ppdu_stats_cfg_tlv_type_bitmask, mask); + + ret = ath11k_htc_send(&ab->htc, dp->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type, + int rx_buf_size, + struct htt_rx_ring_tlv_filter *tlv_filter) +{ + struct htt_rx_ring_selection_cfg_cmd *cmd; + struct hal_srng *srng = &ab->hal.srng_list[ring_id]; + struct hal_srng_params params; + struct sk_buff *skb; + int len = sizeof(*cmd); + enum htt_srng_ring_type htt_ring_type; + enum htt_srng_ring_id htt_ring_id; + int ret = 0; + + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -enomem; + + memset(¶ms, 0, sizeof(params)); + ath11k_hal_srng_get_params(ab, srng, ¶ms); + + if (ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, + ring_type, &htt_ring_type, + &htt_ring_id)) + goto err_free; + + skb_put(skb, len); + cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data; + cmd->info0 = field_prep(htt_rx_ring_selection_cfg_cmd_info0_msg_type, + htt_h2t_msg_type_rx_ring_selection_cfg); + if (htt_ring_type == htt_sw_to_hw_ring || + htt_ring_type == htt_hw_to_sw_ring) + cmd->info0 |= + field_prep(htt_rx_ring_selection_cfg_cmd_info0_pdev_id, + dp_sw2hw_macid(mac_id)); + else + cmd->info0 |= + field_prep(htt_rx_ring_selection_cfg_cmd_info0_pdev_id, + mac_id); + cmd->info0 |= field_prep(htt_rx_ring_selection_cfg_cmd_info0_ring_id, + htt_ring_id); + cmd->info0 |= field_prep(htt_rx_ring_selection_cfg_cmd_info0_ss, + !!(params.flags & hal_srng_flags_msi_swap)); + cmd->info0 |= field_prep(htt_rx_ring_selection_cfg_cmd_info0_ps, + !!(params.flags & hal_srng_flags_data_tlv_swap)); + + cmd->info1 = field_prep(htt_rx_ring_selection_cfg_cmd_info1_buf_size, + rx_buf_size); + cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0; + cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1; + cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2; + cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3; + cmd->rx_filter_tlv = tlv_filter->rx_filter; + + ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); + if (ret) + goto err_free; + + return 0; + +err_free: + dev_kfree_skb_any(skb); + + return ret; +} + +int +ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, + struct htt_ext_stats_cfg_params *cfg_params, + u64 cookie) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_dp *dp = &ab->dp; + struct sk_buff *skb; + struct htt_ext_stats_cfg_cmd *cmd; + int len = sizeof(*cmd); + int ret; + + skb = ath11k_htc_alloc_skb(ab, len); + if (!skb) + return -enomem; + + skb_put(skb, len); + + cmd = (struct htt_ext_stats_cfg_cmd *)skb->data; + memset(cmd, 0, sizeof(*cmd)); + cmd->hdr.msg_type = htt_h2t_msg_type_ext_stats_cfg; + + cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id; + + cmd->hdr.stats_type = type; + cmd->cfg_param0 = cfg_params->cfg0; + cmd->cfg_param1 = cfg_params->cfg1; + cmd->cfg_param2 = cfg_params->cfg2; + cmd->cfg_param3 = cfg_params->cfg3; + cmd->cookie_lsb = lower_32_bits(cookie); + cmd->cookie_msb = upper_32_bits(cookie); + + ret = ath11k_htc_send(&ab->htc, dp->eid, skb); + if (ret) { + ath11k_warn(ab, "failed to send htt type stats request: %d", + ret); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) +{ + struct ath11k_pdev_dp *dp = &ar->dp; + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + int ret = 0, ring_id = 0; + + ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; + + if (!reset) { + tlv_filter.rx_filter = htt_rx_mon_filter_tlv_flags_mon_buf_ring; + tlv_filter.pkt_filter_flags0 = + htt_rx_mon_fp_mgmt_filter_flags0 | + htt_rx_mon_mo_mgmt_filter_flags0; + tlv_filter.pkt_filter_flags1 = + htt_rx_mon_fp_mgmt_filter_flags1 | + htt_rx_mon_mo_mgmt_filter_flags1; + tlv_filter.pkt_filter_flags2 = + htt_rx_mon_fp_ctrl_filter_flasg2 | + htt_rx_mon_mo_ctrl_filter_flasg2; + tlv_filter.pkt_filter_flags3 = + htt_rx_mon_fp_ctrl_filter_flasg3 | + htt_rx_mon_mo_ctrl_filter_flasg3 | + htt_rx_mon_fp_data_filter_flasg3 | + htt_rx_mon_mo_data_filter_flasg3; + } + + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, + hal_rxdma_monitor_buf, + dp_rxdma_refill_ring_size, + &tlv_filter); + if (ret) + return ret; + + ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id; + if (!reset) + tlv_filter.rx_filter = + htt_rx_mon_filter_tlv_flags_mon_status_ring; + else + tlv_filter = ath11k_mac_mon_status_filter_default; + + ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, + hal_rxdma_monitor_status, + dp_rxdma_refill_ring_size, + &tlv_filter); + return ret; +} diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/dp_tx.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_dp_tx_h +#define ath11k_dp_tx_h + +#include "core.h" +#include "hal_tx.h" + +struct ath11k_dp_htt_wbm_tx_status { + u32 msdu_id; + bool acked; + int ack_rssi; +}; + +int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab); +int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, + struct sk_buff *skb); +void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id); +int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid, + enum hal_reo_cmd_type type, + struct ath11k_hal_reo_cmd *cmd, + void (*func)(struct ath11k_dp *, void *, + enum hal_reo_cmd_status)); + +int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask); +int +ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, + struct htt_ext_stats_cfg_params *cfg_params, + u64 cookie); +int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset); + +int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id, + int mac_id, enum hal_ring_type ring_type, + int rx_buf_size, + struct htt_rx_ring_tlv_filter *tlv_filter); + +#endif diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hal.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ +#include <linux/dma-mapping.h> +#include "ahb.h" +#include "hal_tx.h" +#include "debug.h" +#include "hal_desc.h" + +static const struct hal_srng_config hw_srng_config[] = { + /* todo: max_rings can populated by querying hw capabilities */ + { /* reo_dst */ + .start_ring_id = hal_srng_ring_id_reo2sw1, + .max_rings = 4, + .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_dst, + .reg_start = { + hal_seq_wcss_umac_reo_reg + hal_reo1_ring_base_lsb, + hal_seq_wcss_umac_reo_reg + hal_reo1_ring_hp, + }, + .reg_size = { + hal_reo2_ring_base_lsb - hal_reo1_ring_base_lsb, + hal_reo2_ring_hp - hal_reo1_ring_hp, + }, + .max_size = hal_reo_reo2sw1_ring_base_msb_ring_size, + }, + { /* reo_exception */ + /* designating reo2tcl ring as exception ring. this ring is + * similar to other reo2sw rings though it is named as reo2tcl. + * any of thereo2sw rings can be used as exception ring. + */ + .start_ring_id = hal_srng_ring_id_reo2tcl, + .max_rings = 1, + .entry_size = sizeof(struct hal_reo_dest_ring) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_dst, + .reg_start = { + hal_seq_wcss_umac_reo_reg + hal_reo_tcl_ring_base_lsb, + hal_seq_wcss_umac_reo_reg + hal_reo_tcl_ring_hp, + }, + .max_size = hal_reo_reo2tcl_ring_base_msb_ring_size, + }, + { /* reo_reinject */ + .start_ring_id = hal_srng_ring_id_sw2reo, + .max_rings = 1, + .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_src, + .reg_start = { + hal_seq_wcss_umac_reo_reg + hal_sw2reo_ring_base_lsb, + hal_seq_wcss_umac_reo_reg + hal_sw2reo_ring_hp, + }, + .max_size = hal_reo_sw2reo_ring_base_msb_ring_size, + }, + { /* reo_cmd */ + .start_ring_id = hal_srng_ring_id_reo_cmd, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_hdr) + + sizeof(struct hal_reo_get_queue_stats)) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_src, + .reg_start = { + hal_seq_wcss_umac_reo_reg + hal_reo_cmd_ring_base_lsb, + hal_seq_wcss_umac_reo_reg + hal_reo_cmd_hp, + }, + .max_size = hal_reo_cmd_ring_base_msb_ring_size, + }, + { /* reo_status */ + .start_ring_id = hal_srng_ring_id_reo_status, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_hdr) + + sizeof(struct hal_reo_get_queue_stats_status)) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_dst, + .reg_start = { + hal_seq_wcss_umac_reo_reg + + hal_reo_status_ring_base_lsb, + hal_seq_wcss_umac_reo_reg + hal_reo_status_hp, + }, + .max_size = hal_reo_status_ring_base_msb_ring_size, + }, + { /* tcl_data */ + .start_ring_id = hal_srng_ring_id_sw2tcl1, + .max_rings = 3, + .entry_size = (sizeof(struct hal_tlv_hdr) + + sizeof(struct hal_tcl_data_cmd)) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_src, + .reg_start = { + hal_seq_wcss_umac_tcl_reg + hal_tcl1_ring_base_lsb, + hal_seq_wcss_umac_tcl_reg + hal_tcl1_ring_hp, + }, + .reg_size = { + hal_tcl2_ring_base_lsb - hal_tcl1_ring_base_lsb, + hal_tcl2_ring_hp - hal_tcl1_ring_hp, + }, + .max_size = hal_sw2tcl1_ring_base_msb_ring_size, + }, + { /* tcl_cmd */ + .start_ring_id = hal_srng_ring_id_sw2tcl_cmd, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_hdr) + + sizeof(struct hal_tcl_gse_cmd)) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_src, + .reg_start = { + hal_seq_wcss_umac_tcl_reg + hal_tcl_ring_base_lsb, + hal_seq_wcss_umac_tcl_reg + hal_tcl_ring_hp, + }, + .max_size = hal_sw2tcl1_cmd_ring_base_msb_ring_size, + }, + { /* tcl_status */ + .start_ring_id = hal_srng_ring_id_tcl_status, + .max_rings = 1, + .entry_size = (sizeof(struct hal_tlv_hdr) + + sizeof(struct hal_tcl_status_ring)) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_dst, + .reg_start = { + hal_seq_wcss_umac_tcl_reg + + hal_tcl_status_ring_base_lsb, + hal_seq_wcss_umac_tcl_reg + hal_tcl_status_ring_hp, + }, + .max_size = hal_tcl_status_ring_base_msb_ring_size, + }, + { /* ce_src */ + .start_ring_id = hal_srng_ring_id_ce0_src, + .max_rings = 12, + .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_src, + .reg_start = { + (hal_seq_wcss_umac_ce0_src_reg + + hal_ce_dst_ring_base_lsb), + hal_seq_wcss_umac_ce0_src_reg + hal_ce_dst_ring_hp, + }, + .reg_size = { + (hal_seq_wcss_umac_ce1_src_reg - + hal_seq_wcss_umac_ce0_src_reg), + (hal_seq_wcss_umac_ce1_src_reg - + hal_seq_wcss_umac_ce0_src_reg), + }, + .max_size = hal_ce_src_ring_base_msb_ring_size, + }, + { /* ce_dst */ + .start_ring_id = hal_srng_ring_id_ce0_dst, + .max_rings = 12, + .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_src, + .reg_start = { + (hal_seq_wcss_umac_ce0_dst_reg + + hal_ce_dst_ring_base_lsb), + hal_seq_wcss_umac_ce0_dst_reg + hal_ce_dst_ring_hp, + }, + .reg_size = { + (hal_seq_wcss_umac_ce1_dst_reg - + hal_seq_wcss_umac_ce0_dst_reg), + (hal_seq_wcss_umac_ce1_dst_reg - + hal_seq_wcss_umac_ce0_dst_reg), + }, + .max_size = hal_ce_dst_ring_base_msb_ring_size, + }, + { /* ce_dst_status */ + .start_ring_id = hal_srng_ring_id_ce0_dst_status, + .max_rings = 12, + .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_dst, + .reg_start = { + (hal_seq_wcss_umac_ce0_dst_reg + + hal_ce_dst_status_ring_base_lsb), + (hal_seq_wcss_umac_ce0_dst_reg + + hal_ce_dst_status_ring_hp), + }, + .reg_size = { + (hal_seq_wcss_umac_ce1_dst_reg - + hal_seq_wcss_umac_ce0_dst_reg), + (hal_seq_wcss_umac_ce1_dst_reg - + hal_seq_wcss_umac_ce0_dst_reg), + }, + .max_size = hal_ce_dst_status_ring_base_msb_ring_size, + }, + { /* wbm_idle_link */ + .start_ring_id = hal_srng_ring_id_wbm_idle_link, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_link_desc) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_src, + .reg_start = { + (hal_seq_wcss_umac_wbm_reg + + hal_wbm_idle_link_ring_base_lsb), + (hal_seq_wcss_umac_wbm_reg + hal_wbm_idle_link_ring_hp), + }, + .max_size = hal_wbm_idle_link_ring_base_msb_ring_size, + }, + { /* sw2wbm_release */ + .start_ring_id = hal_srng_ring_id_wbm_sw_release, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_src, + .reg_start = { + (hal_seq_wcss_umac_wbm_reg + + hal_wbm_release_ring_base_lsb), + (hal_seq_wcss_umac_wbm_reg + hal_wbm_release_ring_hp), + }, + .max_size = hal_sw2wbm_release_ring_base_msb_ring_size, + }, + { /* wbm2sw_release */ + .start_ring_id = hal_srng_ring_id_wbm2sw0_release, + .max_rings = 4, + .entry_size = sizeof(struct hal_wbm_release_ring) >> 2, + .lmac_ring = false, + .ring_dir = hal_srng_dir_dst, + .reg_start = { + (hal_seq_wcss_umac_wbm_reg + + hal_wbm0_release_ring_base_lsb), + (hal_seq_wcss_umac_wbm_reg + hal_wbm0_release_ring_hp), + }, + .reg_size = { + (hal_wbm1_release_ring_base_lsb - + hal_wbm0_release_ring_base_lsb), + (hal_wbm1_release_ring_hp - hal_wbm0_release_ring_hp), + }, + .max_size = hal_wbm2sw_release_ring_base_msb_ring_size, + }, + { /* rxdma_buf */ + .start_ring_id = hal_srng_ring_id_wmac1_sw2rxdma0_buf, + .max_rings = 2, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .lmac_ring = true, + .ring_dir = hal_srng_dir_src, + .max_size = hal_rxdma_ring_max_size, + }, + { /* rxdma_dst */ + .start_ring_id = hal_srng_ring_id_wmac1_rxdma2sw0, + .max_rings = 1, + .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, + .lmac_ring = true, + .ring_dir = hal_srng_dir_dst, + .max_size = hal_rxdma_ring_max_size, + }, + { /* rxdma_monitor_buf */ + .start_ring_id = hal_srng_ring_id_wmac1_sw2rxdma2_buf, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .lmac_ring = true, + .ring_dir = hal_srng_dir_src, + .max_size = hal_rxdma_ring_max_size, + }, + { /* rxdma_monitor_status */ + .start_ring_id = hal_srng_ring_id_wmac1_sw2rxdma1_statbuf, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .lmac_ring = true, + .ring_dir = hal_srng_dir_src, + .max_size = hal_rxdma_ring_max_size, + }, + { /* rxdma_monitor_dst */ + .start_ring_id = hal_srng_ring_id_wmac1_rxdma2sw1, + .max_rings = 1, + .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2, + .lmac_ring = true, + .ring_dir = hal_srng_dir_dst, + .max_size = hal_rxdma_ring_max_size, + }, + { /* rxdma_monitor_desc */ + .start_ring_id = hal_srng_ring_id_wmac1_sw2rxdma1_desc, + .max_rings = 1, + .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2, + .lmac_ring = true, + .ring_dir = hal_srng_dir_src, + .max_size = hal_rxdma_ring_max_size, + }, + { /* rxdma dir buf */ + .start_ring_id = hal_srng_ring_id_rxdma_dir_buf, + .max_rings = 1, + .entry_size = 8 >> 2, /* todo: define the struct */ + .lmac_ring = true, + .ring_dir = hal_srng_dir_src, + .max_size = hal_rxdma_ring_max_size, + }, +}; + +static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + size_t size; + + size = sizeof(u32) * hal_srng_ring_id_max; + hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr, + gfp_kernel); + if (!hal->rdp.vaddr) + return -enomem; + + return 0; +} + +static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + size_t size; + + if (!hal->rdp.vaddr) + return; + + size = sizeof(u32) * hal_srng_ring_id_max; + dma_free_coherent(ab->dev, size, + hal->rdp.vaddr, hal->rdp.paddr); + hal->rdp.vaddr = null; +} + +static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + size_t size; + + size = sizeof(u32) * hal_srng_num_lmac_rings; + hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr, + gfp_kernel); + if (!hal->wrp.vaddr) + return -enomem; + + return 0; +} + +static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + size_t size; + + if (!hal->wrp.vaddr) + return; + + size = sizeof(u32) * hal_srng_num_lmac_rings; + dma_free_coherent(ab->dev, size, + hal->wrp.vaddr, hal->wrp.paddr); + hal->wrp.vaddr = null; +} + +static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab, + struct hal_srng *srng, int ring_num) +{ + const struct hal_srng_config *srng_config = &hw_srng_config[hal_ce_dst]; + u32 addr; + u32 val; + + addr = hal_ce_dst_ring_ctrl + + srng_config->reg_start[hal_srng_reg_grp_r0] + + ring_num * srng_config->reg_size[hal_srng_reg_grp_r0]; + val = ath11k_ahb_read32(ab, addr); + val &= ~hal_ce_dst_r0_dest_ctrl_max_len; + val |= field_prep(hal_ce_dst_r0_dest_ctrl_max_len, + srng->u.dst_ring.max_buffer_length); + ath11k_ahb_write32(ab, addr, val); +} + +static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab, + struct hal_srng *srng) +{ + struct ath11k_hal *hal = &ab->hal; + u32 val; + u64 hp_addr; + u32 reg_base; + + reg_base = srng->hwreg_base[hal_srng_reg_grp_r0]; + + if (srng->flags & hal_srng_flags_msi_intr) { + ath11k_ahb_write32(ab, reg_base + + hal_reo1_ring_msi1_base_lsb_offset, + (u32)srng->msi_addr); + + val = field_prep(hal_reo1_ring_msi1_base_msb_addr, + ((u64)srng->msi_addr >> + hal_addr_msb_reg_shift)) | + hal_reo1_ring_msi1_base_msb_msi1_enable; + ath11k_ahb_write32(ab, reg_base + + hal_reo1_ring_msi1_base_msb_offset, val); + + ath11k_ahb_write32(ab, + reg_base + hal_reo1_ring_msi1_data_offset, + srng->msi_data); + } + + ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr); + + val = field_prep(hal_reo1_ring_base_msb_ring_base_addr_msb, + ((u64)srng->ring_base_paddr >> + hal_addr_msb_reg_shift)) | + field_prep(hal_reo1_ring_base_msb_ring_size, + (srng->entry_size * srng->num_entries)); + ath11k_ahb_write32(ab, reg_base + hal_reo1_ring_base_msb_offset, val); + + val = field_prep(hal_reo1_ring_id_ring_id, srng->ring_id) | + field_prep(hal_reo1_ring_id_entry_size, srng->entry_size); + ath11k_ahb_write32(ab, reg_base + hal_reo1_ring_id_offset, val); + + /* interrupt setup */ + val = field_prep(hal_reo1_ring_prdr_int_setup_intr_tmr_thold, + (srng->intr_timer_thres_us >> 3)); + + val |= field_prep(hal_reo1_ring_prdr_int_setup_batch_counter_thold, + (srng->intr_batch_cntr_thres_entries * + srng->entry_size)); + + ath11k_ahb_write32(ab, + reg_base + hal_reo1_ring_producer_int_setup_offset, + val); + + hp_addr = hal->rdp.paddr + + ((unsigned long)srng->u.dst_ring.hp_addr - + (unsigned long)hal->rdp.vaddr); + ath11k_ahb_write32(ab, reg_base + hal_reo1_ring_hp_addr_lsb_offset, + hp_addr & hal_addr_lsb_reg_mask); + ath11k_ahb_write32(ab, reg_base + hal_reo1_ring_hp_addr_msb_offset, + hp_addr >> hal_addr_msb_reg_shift); + + /* initialize head and tail pointers to indicate ring is empty */ + reg_base = srng->hwreg_base[hal_srng_reg_grp_r2]; + ath11k_ahb_write32(ab, reg_base, 0); + ath11k_ahb_write32(ab, reg_base + hal_reo1_ring_tp_offset, 0); + *srng->u.dst_ring.hp_addr = 0; + + reg_base = srng->hwreg_base[hal_srng_reg_grp_r0]; + val = 0; + if (srng->flags & hal_srng_flags_data_tlv_swap) + val |= hal_reo1_ring_misc_data_tlv_swap; + if (srng->flags & hal_srng_flags_ring_ptr_swap) + val |= hal_reo1_ring_misc_host_fw_swap; + if (srng->flags & hal_srng_flags_msi_swap) + val |= hal_reo1_ring_misc_msi_swap; + val |= hal_reo1_ring_misc_srng_enable; + + ath11k_ahb_write32(ab, reg_base + hal_reo1_ring_misc_offset, val); +} + +static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab, + struct hal_srng *srng) +{ + struct ath11k_hal *hal = &ab->hal; + u32 val; + u64 tp_addr; + u32 reg_base; + + reg_base = srng->hwreg_base[hal_srng_reg_grp_r0]; + + if (srng->flags & hal_srng_flags_msi_intr) { + ath11k_ahb_write32(ab, reg_base + + hal_tcl1_ring_msi1_base_lsb_offset, + (u32)srng->msi_addr); + + val = field_prep(hal_tcl1_ring_msi1_base_msb_addr, + ((u64)srng->msi_addr >> + hal_addr_msb_reg_shift)) | + hal_tcl1_ring_msi1_base_msb_msi1_enable; + ath11k_ahb_write32(ab, reg_base + + hal_tcl1_ring_msi1_base_msb_offset, + val); + + ath11k_ahb_write32(ab, reg_base + + hal_tcl1_ring_msi1_data_offset, + srng->msi_data); + } + + ath11k_ahb_write32(ab, reg_base, (u32)srng->ring_base_paddr); + + val = field_prep(hal_tcl1_ring_base_msb_ring_base_addr_msb, + ((u64)srng->ring_base_paddr >> + hal_addr_msb_reg_shift)) | + field_prep(hal_tcl1_ring_base_msb_ring_size, + (srng->entry_size * srng->num_entries)); + ath11k_ahb_write32(ab, reg_base + hal_tcl1_ring_base_msb_offset, val); + + val = field_prep(hal_reo1_ring_id_entry_size, srng->entry_size); + ath11k_ahb_write32(ab, reg_base + hal_tcl1_ring_id_offset, val); + + /* interrupt setup */ + /* note: ipq8074 v2 requires the interrupt timer threshold in the + * unit of 8 usecs instead of 1 usec (as required by v1). + */ + val = field_prep(hal_tcl1_ring_consr_int_setup_ix0_intr_tmr_thold, + srng->intr_timer_thres_us); + + val |= field_prep(hal_tcl1_ring_consr_int_setup_ix0_batch_counter_thold, + (srng->intr_batch_cntr_thres_entries * + srng->entry_size)); + + ath11k_ahb_write32(ab, + reg_base + hal_tcl1_ring_consr_int_setup_ix0_offset, + val); + + val = 0; + if (srng->flags & hal_srng_flags_low_thresh_intr_en) { + val |= field_prep(hal_tcl1_ring_consr_int_setup_ix1_low_thold, + srng->u.src_ring.low_threshold); + } + ath11k_ahb_write32(ab, + reg_base + hal_tcl1_ring_consr_int_setup_ix1_offset, + val); + + if (srng->ring_id != hal_srng_ring_id_wbm_idle_link) { + tp_addr = hal->rdp.paddr + + ((unsigned long)srng->u.src_ring.tp_addr - + (unsigned long)hal->rdp.vaddr); + ath11k_ahb_write32(ab, + reg_base + hal_tcl1_ring_tp_addr_lsb_offset, + tp_addr & hal_addr_lsb_reg_mask); + ath11k_ahb_write32(ab, + reg_base + hal_tcl1_ring_tp_addr_msb_offset, + tp_addr >> hal_addr_msb_reg_shift); + } + + /* initialize head and tail pointers to indicate ring is empty */ + reg_base = srng->hwreg_base[hal_srng_reg_grp_r2]; + ath11k_ahb_write32(ab, reg_base, 0); + ath11k_ahb_write32(ab, reg_base + hal_tcl1_ring_tp_offset, 0); + *srng->u.src_ring.tp_addr = 0; + + reg_base = srng->hwreg_base[hal_srng_reg_grp_r0]; + val = 0; + if (srng->flags & hal_srng_flags_data_tlv_swap) + val |= hal_tcl1_ring_misc_data_tlv_swap; + if (srng->flags & hal_srng_flags_ring_ptr_swap) + val |= hal_tcl1_ring_misc_host_fw_swap; + if (srng->flags & hal_srng_flags_msi_swap) + val |= hal_tcl1_ring_misc_msi_swap; + + /* loop count is not used for src rings */ + val |= hal_tcl1_ring_misc_msi_loopcnt_disable; + + val |= hal_tcl1_ring_misc_srng_enable; + + ath11k_ahb_write32(ab, reg_base + hal_tcl1_ring_misc_offset, val); +} + +static void ath11k_hal_srng_hw_init(struct ath11k_base *ab, + struct hal_srng *srng) +{ + if (srng->ring_dir == hal_srng_dir_src) + ath11k_hal_srng_src_hw_init(ab, srng); + else + ath11k_hal_srng_dst_hw_init(ab, srng); +} + +static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab, + enum hal_ring_type type, + int ring_num, int mac_id) +{ + const struct hal_srng_config *srng_config = &hw_srng_config[type]; + int ring_id; + + if (ring_num >= srng_config->max_rings) { + ath11k_warn(ab, "invalid ring number :%d ", ring_num); + return -einval; + } + + ring_id = srng_config->start_ring_id + ring_num; + if (srng_config->lmac_ring) + ring_id += mac_id * hal_srng_rings_per_lmac; + + if (warn_on(ring_id >= hal_srng_ring_id_max)) + return -einval; + + return ring_id; +} + +int ath11k_hal_srng_get_entrysize(u32 ring_type) +{ + const struct hal_srng_config *srng_config; + + if (warn_on(ring_type >= hal_max_ring_types)) + return -einval; + + srng_config = &hw_srng_config[ring_type]; + + return (srng_config->entry_size << 2); +} + +int ath11k_hal_srng_get_max_entries(u32 ring_type) +{ + const struct hal_srng_config *srng_config; + + if (warn_on(ring_type >= hal_max_ring_types)) + return -einval; + + srng_config = &hw_srng_config[ring_type]; + + return (srng_config->max_size / srng_config->entry_size); +} + +void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng, + struct hal_srng_params *params) +{ + params->ring_base_paddr = srng->ring_base_paddr; + params->ring_base_vaddr = srng->ring_base_vaddr; + params->num_entries = srng->num_entries; + params->intr_timer_thres_us = srng->intr_timer_thres_us; + params->intr_batch_cntr_thres_entries = + srng->intr_batch_cntr_thres_entries; + params->low_threshold = srng->u.src_ring.low_threshold; + params->flags = srng->flags; +} + +dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab, + struct hal_srng *srng) +{ + if (!(srng->flags & hal_srng_flags_lmac_ring)) + return 0; + + if (srng->ring_dir == hal_srng_dir_src) + return ab->hal.wrp.paddr + + ((unsigned long)srng->u.src_ring.hp_addr - + (unsigned long)ab->hal.wrp.vaddr); + else + return ab->hal.rdp.paddr + + ((unsigned long)srng->u.dst_ring.hp_addr - + (unsigned long)ab->hal.rdp.vaddr); +} + +dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab, + struct hal_srng *srng) +{ + if (!(srng->flags & hal_srng_flags_lmac_ring)) + return 0; + + if (srng->ring_dir == hal_srng_dir_src) + return ab->hal.rdp.paddr + + ((unsigned long)srng->u.src_ring.tp_addr - + (unsigned long)ab->hal.rdp.vaddr); + else + return ab->hal.wrp.paddr + + ((unsigned long)srng->u.dst_ring.tp_addr - + (unsigned long)ab->hal.wrp.vaddr); +} + +u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type) +{ + switch (type) { + case hal_ce_desc_src: + return sizeof(struct hal_ce_srng_src_desc); + case hal_ce_desc_dst: + return sizeof(struct hal_ce_srng_dest_desc); + case hal_ce_desc_dst_status: + return sizeof(struct hal_ce_srng_dst_status_desc); + } + + return 0; +} + +void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, + u8 byte_swap_data) +{ + struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf; + + desc->buffer_addr_low = paddr & hal_addr_lsb_reg_mask; + desc->buffer_addr_info = + field_prep(hal_ce_src_desc_addr_info_addr_hi, + ((u64)paddr >> hal_addr_msb_reg_shift)) | + field_prep(hal_ce_src_desc_addr_info_byte_swap, + byte_swap_data) | + field_prep(hal_ce_src_desc_addr_info_gather, 0) | + field_prep(hal_ce_src_desc_addr_info_len, len); + desc->meta_info = field_prep(hal_ce_src_desc_meta_info_data, id); +} + +void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr) +{ + struct hal_ce_srng_dest_desc *desc = + (struct hal_ce_srng_dest_desc *)buf; + + desc->buffer_addr_low = paddr & hal_addr_lsb_reg_mask; + desc->buffer_addr_info = + field_prep(hal_ce_dest_desc_addr_info_addr_hi, + ((u64)paddr >> hal_addr_msb_reg_shift)); +} + +u32 ath11k_hal_ce_dst_status_get_length(void *buf) +{ + struct hal_ce_srng_dst_status_desc *desc = + (struct hal_ce_srng_dst_status_desc *)buf; + u32 len; + + len = field_get(hal_ce_dst_status_desc_flags_len, desc->flags); + desc->flags &= ~hal_ce_dst_status_desc_flags_len; + + return len; +} + +void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, + dma_addr_t paddr) +{ + desc->buf_addr_info.info0 = field_prep(buffer_addr_info0_addr, + (paddr & hal_addr_lsb_reg_mask)); + desc->buf_addr_info.info1 = field_prep(buffer_addr_info1_addr, + ((u64)paddr >> hal_addr_msb_reg_shift)) | + field_prep(buffer_addr_info1_ret_buf_mgr, 1) | + field_prep(buffer_addr_info1_sw_cookie, cookie); +} + +u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) + return (srng->ring_base_vaddr + srng->u.dst_ring.tp); + + return null; +} + +u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, + struct hal_srng *srng) +{ + u32 *desc; + + lockdep_assert_held(&srng->lock); + + if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp) + return null; + + desc = srng->ring_base_vaddr + srng->u.dst_ring.tp; + + srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) % + srng->ring_size; + + return desc; +} + +int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng, + bool sync_hw_ptr) +{ + u32 tp, hp; + + lockdep_assert_held(&srng->lock); + + tp = srng->u.dst_ring.tp; + + if (sync_hw_ptr) { + hp = *srng->u.dst_ring.hp_addr; + srng->u.dst_ring.cached_hp = hp; + } else { + hp = srng->u.dst_ring.cached_hp; + } + + if (hp >= tp) + return (hp - tp) / srng->entry_size; + else + return (srng->ring_size - tp + hp) / srng->entry_size; +} + +/* returns number of available entries in src ring */ +int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng, + bool sync_hw_ptr) +{ + u32 tp, hp; + + lockdep_assert_held(&srng->lock); + + hp = srng->u.src_ring.hp; + + if (sync_hw_ptr) { + tp = *srng->u.src_ring.tp_addr; + srng->u.src_ring.cached_tp = tp; + } else { + tp = srng->u.src_ring.cached_tp; + } + + if (tp > hp) + return ((tp - hp) / srng->entry_size) - 1; + else + return ((srng->ring_size - hp + tp) / srng->entry_size) - 1; +} + +u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab, + struct hal_srng *srng) +{ + u32 *desc; + u32 next_hp; + + lockdep_assert_held(&srng->lock); + + /* todo: using % is expensive, but we have to do this since size of some + * srng rings is not power of 2 (due to descriptor sizes). need to see + * if separate function is defined for rings having power of 2 ring size + * (tcl2sw, reo2sw, sw2rxdma and ce rings) so that we can avoid the + * overhead of % by using mask (with &). + */ + next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; + + if (next_hp == srng->u.src_ring.cached_tp) + return null; + + desc = srng->ring_base_vaddr + srng->u.src_ring.hp; + srng->u.src_ring.hp = next_hp; + + /* todo: reap functionality is not used by all rings. if particular + * ring does not use reap functionality, we need not update reap_hp + * with next_hp pointer. need to make sure a separate function is used + * before doing any optimization by removing below code updating + * reap_hp. + */ + srng->u.src_ring.reap_hp = next_hp; + + return desc; +} + +u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab, + struct hal_srng *srng) +{ + u32 *desc; + u32 next_reap_hp; + + lockdep_assert_held(&srng->lock); + + next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) % + srng->ring_size; + + if (next_reap_hp == srng->u.src_ring.cached_tp) + return null; + + desc = srng->ring_base_vaddr + next_reap_hp; + srng->u.src_ring.reap_hp = next_reap_hp; + + return desc; +} + +u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, + struct hal_srng *srng) +{ + u32 *desc; + + lockdep_assert_held(&srng->lock); + + if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp) + return null; + + desc = srng->ring_base_vaddr + srng->u.src_ring.hp; + srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) % + srng->ring_size; + + return desc; +} + +u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) == + srng->u.src_ring.cached_tp) + return null; + + return srng->ring_base_vaddr + srng->u.src_ring.hp; +} + +void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + if (srng->ring_dir == hal_srng_dir_src) + srng->u.src_ring.cached_tp = + *(volatile u32 *)srng->u.src_ring.tp_addr; + else + srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr; +} + +/* update cached ring head/tail pointers to hw. ath11k_hal_srng_access_begin() + * should have been called before this. + */ +void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng) +{ + lockdep_assert_held(&srng->lock); + + /* todo: see if we need a write memory barrier here */ + if (srng->flags & hal_srng_flags_lmac_ring) { + /* for lmac rings, ring pointer updates are done through fw and + * hence written to a shared memory location that is read by fw + */ + if (srng->ring_dir == hal_srng_dir_src) + *srng->u.src_ring.hp_addr = srng->u.src_ring.hp; + else + *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp; + } else { + if (srng->ring_dir == hal_srng_dir_src) { + ath11k_ahb_write32(ab, + (unsigned long)srng->u.src_ring.hp_addr - + (unsigned long)ab->mem, + srng->u.src_ring.hp); + } else { + ath11k_ahb_write32(ab, + (unsigned long)srng->u.dst_ring.tp_addr - + (unsigned long)ab->mem, + srng->u.dst_ring.tp); + } + } +} + +void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, + struct hal_wbm_idle_scatter_list *sbuf, + u32 nsbufs, u32 tot_link_desc, + u32 end_offset) +{ + struct ath11k_buffer_addr *link_addr; + int i; + u32 reg_scatter_buf_sz = hal_wbm_idle_scatter_buf_size / 64; + + link_addr = (void *)sbuf[0].vaddr + hal_wbm_idle_scatter_buf_size; + + for (i = 1; i < nsbufs; i++) { + link_addr->info0 = sbuf[i].paddr & hal_addr_lsb_reg_mask; + link_addr->info1 = field_prep( + hal_wbm_scattered_desc_msb_base_addr_39_32, + (u64)sbuf[i].paddr >> hal_addr_msb_reg_shift) | + field_prep( + hal_wbm_scattered_desc_msb_base_addr_match_tag, + base_addr_match_tag_val); + + link_addr = (void *)sbuf[i].vaddr + + hal_wbm_idle_scatter_buf_size; + } + + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + hal_wbm_r0_idle_list_control_addr, + field_prep(hal_wbm_scatter_buffer_size, reg_scatter_buf_sz) | + field_prep(hal_wbm_link_desc_idle_list_mode, 0x1)); + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + hal_wbm_r0_idle_list_size_addr, + field_prep(hal_wbm_scatter_ring_size_of_idle_link_desc_list, + reg_scatter_buf_sz * nsbufs)); + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_scattered_ring_base_lsb, + field_prep(buffer_addr_info0_addr, + sbuf[0].paddr & hal_addr_lsb_reg_mask)); + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_scattered_ring_base_msb, + field_prep( + hal_wbm_scattered_desc_msb_base_addr_39_32, + (u64)sbuf[0].paddr >> hal_addr_msb_reg_shift) | + field_prep( + hal_wbm_scattered_desc_msb_base_addr_match_tag, + base_addr_match_tag_val)); + + /* setup head and tail pointers for the idle list */ + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_scattered_desc_ptr_head_info_ix0, + field_prep(buffer_addr_info0_addr, + sbuf[nsbufs - 1].paddr)); + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_scattered_desc_ptr_head_info_ix1, + field_prep( + hal_wbm_scattered_desc_msb_base_addr_39_32, + ((u64)sbuf[nsbufs - 1].paddr >> + hal_addr_msb_reg_shift)) | + field_prep(hal_wbm_scattered_desc_head_p_offset_ix1, + (end_offset >> 2))); + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_scattered_desc_ptr_head_info_ix0, + field_prep(buffer_addr_info0_addr, + sbuf[0].paddr)); + + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_scattered_desc_ptr_tail_info_ix0, + field_prep(buffer_addr_info0_addr, + sbuf[0].paddr)); + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_scattered_desc_ptr_tail_info_ix1, + field_prep( + hal_wbm_scattered_desc_msb_base_addr_39_32, + ((u64)sbuf[0].paddr >> hal_addr_msb_reg_shift)) | + field_prep(hal_wbm_scattered_desc_tail_p_offset_ix1, + 0)); + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_scattered_desc_ptr_hp_addr, + 2 * tot_link_desc); + + /* enable the srng */ + ath11k_ahb_write32(ab, + hal_seq_wcss_umac_wbm_reg + + hal_wbm_idle_link_ring_misc_addr, 0x40); +} + +int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, + int ring_num, int mac_id, + struct hal_srng_params *params) +{ + struct ath11k_hal *hal = &ab->hal; + const struct hal_srng_config *srng_config = &hw_srng_config[type]; + struct hal_srng *srng; + int ring_id; + u32 lmac_idx; + int i; + u32 reg_base; + + ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id); + if (ring_id < 0) + return ring_id; + + srng = &hal->srng_list[ring_id]; + + srng->ring_id = ring_id; + srng->ring_dir = srng_config->ring_dir; + srng->ring_base_paddr = params->ring_base_paddr; + srng->ring_base_vaddr = params->ring_base_vaddr; + srng->entry_size = srng_config->entry_size; + srng->num_entries = params->num_entries; + srng->ring_size = srng->entry_size * srng->num_entries; + srng->intr_batch_cntr_thres_entries = + params->intr_batch_cntr_thres_entries; + srng->intr_timer_thres_us = params->intr_timer_thres_us; + srng->flags = params->flags; + spin_lock_init(&srng->lock); + + for (i = 0; i < hal_srng_num_reg_grp; i++) { + srng->hwreg_base[i] = srng_config->reg_start[i] + + (ring_num * srng_config->reg_size[i]); + } + + memset(srng->ring_base_vaddr, 0, + (srng->entry_size * srng->num_entries) << 2); + + /* todo: add comments on these swap configurations */ + if (is_enabled(config_cpu_big_endian)) + srng->flags |= hal_srng_flags_msi_swap | hal_srng_flags_data_tlv_swap | + hal_srng_flags_ring_ptr_swap; + + reg_base = srng->hwreg_base[hal_srng_reg_grp_r2]; + + if (srng->ring_dir == hal_srng_dir_src) { + srng->u.src_ring.hp = 0; + srng->u.src_ring.cached_tp = 0; + srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size; + srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id); + srng->u.src_ring.low_threshold = params->low_threshold * + srng->entry_size; + if (srng_config->lmac_ring) { + lmac_idx = ring_id - hal_srng_ring_id_lmac1_id_start; + srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr + + lmac_idx); + srng->flags |= hal_srng_flags_lmac_ring; + } else { + srng->u.src_ring.hp_addr = + (u32 *)((unsigned long)ab->mem + reg_base); + } + } else { + /* during initialization loop count in all the descriptors + * will be set to zero, and hw will set it to 1 on completing + * descriptor update in first loop, and increments it by 1 on + * subsequent loops (loop count wraps around after reaching + * 0xffff). the 'loop_cnt' in sw ring state is the expected + * loop count in descriptors updated by hw (to be processed + * by sw). + */ + srng->u.dst_ring.loop_cnt = 1; + srng->u.dst_ring.tp = 0; + srng->u.dst_ring.cached_hp = 0; + srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id); + if (srng_config->lmac_ring) { + /* for lmac rings, tail pointer updates will be done + * through fw by writing to a shared memory location + */ + lmac_idx = ring_id - hal_srng_ring_id_lmac1_id_start; + srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr + + lmac_idx); + srng->flags |= hal_srng_flags_lmac_ring; + } else { + srng->u.dst_ring.tp_addr = + (u32 *)((unsigned long)ab->mem + reg_base + + (hal_reo1_ring_tp - hal_reo1_ring_hp)); + } + } + + if (srng_config->lmac_ring) + return ring_id; + + ath11k_hal_srng_hw_init(ab, srng); + + if (type == hal_ce_dst) { + srng->u.dst_ring.max_buffer_length = params->max_buffer_len; + ath11k_hal_ce_dst_setup(ab, srng, ring_num); + } + + return ring_id; +} + +int ath11k_hal_srng_init(struct ath11k_base *ab) +{ + struct ath11k_hal *hal = &ab->hal; + int ret; + + memset(hal, 0, sizeof(*hal)); + + hal->srng_config = hw_srng_config; + + ret = ath11k_hal_alloc_cont_rdp(ab); + if (ret) + goto err_hal; + + ret = ath11k_hal_alloc_cont_wrp(ab); + if (ret) + goto err_free_cont_rdp; + + return 0; + +err_free_cont_rdp: + ath11k_hal_free_cont_rdp(ab); + +err_hal: + return ret; +} + +void ath11k_hal_srng_deinit(struct ath11k_base *ab) +{ + ath11k_hal_free_cont_rdp(ab); + ath11k_hal_free_cont_wrp(ab); +} diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hal.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_hal_h +#define ath11k_hal_h + +#include "hal_desc.h" +#include "rx_desc.h" + +struct ath11k_base; + +#define hal_link_desc_size (32 << 2) +#define hal_link_desc_align 128 +#define hal_num_mpdus_per_link_desc 6 +#define hal_num_tx_msdus_per_link_desc 7 +#define hal_num_rx_msdus_per_link_desc 6 +#define hal_num_mpdu_links_per_queue_desc 12 +#define hal_max_avail_blk_res 3 + +#define hal_ring_base_align 8 + +#define hal_wbm_idle_scatter_buf_size_max 32704 +/* todo: check with hw team on the supported scatter buf size */ +#define hal_wbm_idle_scatter_next_ptr_size 8 +#define hal_wbm_idle_scatter_buf_size (hal_wbm_idle_scatter_buf_size_max - \ + hal_wbm_idle_scatter_next_ptr_size) + +#define hal_dscp_tid_map_tbl_num_entries_max 48 +#define hal_dscp_tid_tbl_size 24 + +/* calculate the register address from bar0 of shadow register x */ +#define shadow_base_address 0x00003024 +#define shadow_num_registers 36 + +/* wcss relative address */ +#define hal_seq_wcss_umac_reo_reg 0x00a38000 +#define hal_seq_wcss_umac_tcl_reg 0x00a44000 +#define hal_seq_wcss_umac_ce0_src_reg 0x00a00000 +#define hal_seq_wcss_umac_ce0_dst_reg 0x00a01000 +#define hal_seq_wcss_umac_ce1_src_reg 0x00a02000 +#define hal_seq_wcss_umac_ce1_dst_reg 0x00a03000 +#define hal_seq_wcss_umac_wbm_reg 0x00a34000 + +/* sw2tcl(x) r0 ring configuration address */ +#define hal_tcl1_ring_cmn_ctrl_reg 0x00000014 +#define hal_tcl1_ring_dscp_tid_map 0x0000002c +#define hal_tcl1_ring_base_lsb 0x00000510 +#define hal_tcl1_ring_base_msb 0x00000514 +#define hal_tcl1_ring_id 0x00000518 +#define hal_tcl1_ring_misc 0x00000520 +#define hal_tcl1_ring_tp_addr_lsb 0x0000052c +#define hal_tcl1_ring_tp_addr_msb 0x00000530 +#define hal_tcl1_ring_consumer_int_setup_ix0 0x00000540 +#define hal_tcl1_ring_consumer_int_setup_ix1 0x00000544 +#define hal_tcl1_ring_msi1_base_lsb 0x00000558 +#define hal_tcl1_ring_msi1_base_msb 0x0000055c +#define hal_tcl1_ring_msi1_data 0x00000560 +#define hal_tcl2_ring_base_lsb 0x00000568 +#define hal_tcl_ring_base_lsb 0x00000618 + +#define hal_tcl1_ring_msi1_base_lsb_offset \ + (hal_tcl1_ring_msi1_base_lsb - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_msi1_base_msb_offset \ + (hal_tcl1_ring_msi1_base_msb - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_msi1_data_offset \ + (hal_tcl1_ring_msi1_data - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_base_msb_offset \ + (hal_tcl1_ring_base_msb - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_id_offset \ + (hal_tcl1_ring_id - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_consr_int_setup_ix0_offset \ + (hal_tcl1_ring_consumer_int_setup_ix0 - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_consr_int_setup_ix1_offset \ + (hal_tcl1_ring_consumer_int_setup_ix1 - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_tp_addr_lsb_offset \ + (hal_tcl1_ring_tp_addr_lsb - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_tp_addr_msb_offset \ + (hal_tcl1_ring_tp_addr_msb - hal_tcl1_ring_base_lsb) +#define hal_tcl1_ring_misc_offset \ + (hal_tcl1_ring_misc - hal_tcl1_ring_base_lsb) + +/* sw2tcl(x) r2 ring pointers (head/tail) address */ +#define hal_tcl1_ring_hp 0x00002000 +#define hal_tcl1_ring_tp 0x00002004 +#define hal_tcl2_ring_hp 0x00002008 +#define hal_tcl_ring_hp 0x00002018 + +#define hal_tcl1_ring_tp_offset \ + (hal_tcl1_ring_tp - hal_tcl1_ring_hp) + +/* tcl status ring address */ +#define hal_tcl_status_ring_base_lsb 0x00000720 +#define hal_tcl_status_ring_hp 0x00002030 + +/* reo2sw(x) r0 ring configuration address */ +#define hal_reo1_gen_enable 0x00000000 +#define hal_reo1_dest_ring_ctrl_ix_2 0x0000000c +#define hal_reo1_dest_ring_ctrl_ix_3 0x00000010 +#define hal_reo1_ring_base_lsb 0x0000029c +#define hal_reo1_ring_base_msb 0x000002a0 +#define hal_reo1_ring_id 0x000002a4 +#define hal_reo1_ring_misc 0x000002ac +#define hal_reo1_ring_hp_addr_lsb 0x000002b0 +#define hal_reo1_ring_hp_addr_msb 0x000002b4 +#define hal_reo1_ring_producer_int_setup 0x000002c0 +#define hal_reo1_ring_msi1_base_lsb 0x000002e4 +#define hal_reo1_ring_msi1_base_msb 0x000002e8 +#define hal_reo1_ring_msi1_data 0x000002ec +#define hal_reo2_ring_base_lsb 0x000002f4 +#define hal_reo1_aging_thresh_ix_0 0x00000564 +#define hal_reo1_aging_thresh_ix_1 0x00000568 +#define hal_reo1_aging_thresh_ix_2 0x0000056c +#define hal_reo1_aging_thresh_ix_3 0x00000570 + +#define hal_reo1_ring_msi1_base_lsb_offset \ + (hal_reo1_ring_msi1_base_lsb - hal_reo1_ring_base_lsb) +#define hal_reo1_ring_msi1_base_msb_offset \ + (hal_reo1_ring_msi1_base_msb - hal_reo1_ring_base_lsb) +#define hal_reo1_ring_msi1_data_offset \ + (hal_reo1_ring_msi1_data - hal_reo1_ring_base_lsb) +#define hal_reo1_ring_base_msb_offset \ + (hal_reo1_ring_base_msb - hal_reo1_ring_base_lsb) +#define hal_reo1_ring_id_offset (hal_reo1_ring_id - hal_reo1_ring_base_lsb) +#define hal_reo1_ring_producer_int_setup_offset \ + (hal_reo1_ring_producer_int_setup - hal_reo1_ring_base_lsb) +#define hal_reo1_ring_hp_addr_lsb_offset \ + (hal_reo1_ring_hp_addr_lsb - hal_reo1_ring_base_lsb) +#define hal_reo1_ring_hp_addr_msb_offset \ + (hal_reo1_ring_hp_addr_msb - hal_reo1_ring_base_lsb) +#define hal_reo1_ring_misc_offset (hal_reo1_ring_misc - hal_reo1_ring_base_lsb) + +/* reo2sw(x) r2 ring pointers (head/tail) address */ +#define hal_reo1_ring_hp 0x00003038 +#define hal_reo1_ring_tp 0x0000303c +#define hal_reo2_ring_hp 0x00003040 + +#define hal_reo1_ring_tp_offset (hal_reo1_ring_tp - hal_reo1_ring_hp) + +/* reo2tcl r0 ring configuration address */ +#define hal_reo_tcl_ring_base_lsb 0x000003fc + +/* reo2tcl r2 ring pointer (head/tail) address */ +#define hal_reo_tcl_ring_hp 0x00003058 + +/* reo cmd r0 address */ +#define hal_reo_cmd_ring_base_lsb 0x00000194 + +/* reo cmd r2 address */ +#define hal_reo_cmd_hp 0x00003020 + +/* sw2reo r0 address */ +#define hal_sw2reo_ring_base_lsb 0x000001ec + +/* sw2reo r2 address */ +#define hal_sw2reo_ring_hp 0x00003028 + +/* ce ring r0 address */ +#define hal_ce_dst_ring_base_lsb 0x00000000 +#define hal_ce_dst_status_ring_base_lsb 0x00000058 +#define hal_ce_dst_ring_ctrl 0x000000b0 + +/* ce ring r2 address */ +#define hal_ce_dst_ring_hp 0x00000400 +#define hal_ce_dst_status_ring_hp 0x00000408 + +/* reo status address */ +#define hal_reo_status_ring_base_lsb 0x00000504 +#define hal_reo_status_hp 0x00003070 + +/* wbm idle r0 address */ +#define hal_wbm_idle_link_ring_base_lsb 0x00000860 +#define hal_wbm_idle_link_ring_misc_addr 0x00000870 +#define hal_wbm_r0_idle_list_control_addr 0x00000048 +#define hal_wbm_r0_idle_list_size_addr 0x0000004c +#define hal_wbm_scattered_ring_base_lsb 0x00000058 +#define hal_wbm_scattered_ring_base_msb 0x0000005c +#define hal_wbm_scattered_desc_ptr_head_info_ix0 0x00000068 +#define hal_wbm_scattered_desc_ptr_head_info_ix1 0x0000006c +#define hal_wbm_scattered_desc_ptr_tail_info_ix0 0x00000078 +#define hal_wbm_scattered_desc_ptr_tail_info_ix1 0x0000007c +#define hal_wbm_scattered_desc_ptr_hp_addr 0x00000084 + +/* wbm idle r2 address */ +#define hal_wbm_idle_link_ring_hp 0x000030b0 + +/* sw2wbm r0 release address */ +#define hal_wbm_release_ring_base_lsb 0x000001d8 + +/* sw2wbm r2 release address */ +#define hal_wbm_release_ring_hp 0x00003018 + +/* wbm2sw r0 release address */ +#define hal_wbm0_release_ring_base_lsb 0x00000910 +#define hal_wbm1_release_ring_base_lsb 0x00000968 + +/* wbm2sw r2 release address */ +#define hal_wbm0_release_ring_hp 0x000030c0 +#define hal_wbm1_release_ring_hp 0x000030c8 + +/* tcl ring feild mask and offset */ +#define hal_tcl1_ring_base_msb_ring_size genmask(27, 8) +#define hal_tcl1_ring_base_msb_ring_base_addr_msb genmask(7, 0) +#define hal_tcl1_ring_id_entry_size genmask(7, 0) +#define hal_tcl1_ring_misc_msi_loopcnt_disable bit(1) +#define hal_tcl1_ring_misc_msi_swap bit(3) +#define hal_tcl1_ring_misc_host_fw_swap bit(4) +#define hal_tcl1_ring_misc_data_tlv_swap bit(5) +#define hal_tcl1_ring_misc_srng_enable bit(6) +#define hal_tcl1_ring_consr_int_setup_ix0_intr_tmr_thold genmask(31, 16) +#define hal_tcl1_ring_consr_int_setup_ix0_batch_counter_thold genmask(14, 0) +#define hal_tcl1_ring_consr_int_setup_ix1_low_thold genmask(15, 0) +#define hal_tcl1_ring_msi1_base_msb_msi1_enable bit(8) +#define hal_tcl1_ring_msi1_base_msb_addr genmask(7, 0) +#define hal_tcl1_ring_cmn_ctrl_dscp_tid_map_prog_en bit(17) +#define hal_tcl1_ring_field_dscp_tid_map genmask(31, 0) +#define hal_tcl1_ring_field_dscp_tid_map0 genmask(2, 0) +#define hal_tcl1_ring_field_dscp_tid_map1 genmask(5, 3) +#define hal_tcl1_ring_field_dscp_tid_map2 genmask(8, 6) +#define hal_tcl1_ring_field_dscp_tid_map3 genmask(11, 9) +#define hal_tcl1_ring_field_dscp_tid_map4 genmask(14, 12) +#define hal_tcl1_ring_field_dscp_tid_map5 genmask(17, 15) +#define hal_tcl1_ring_field_dscp_tid_map6 genmask(20, 18) +#define hal_tcl1_ring_field_dscp_tid_map7 genmask(23, 21) + +/* reo ring feild mask and offset */ +#define hal_reo1_ring_base_msb_ring_size genmask(27, 8) +#define hal_reo1_ring_base_msb_ring_base_addr_msb genmask(7, 0) +#define hal_reo1_ring_id_ring_id genmask(15, 8) +#define hal_reo1_ring_id_entry_size genmask(7, 0) +#define hal_reo1_ring_misc_msi_swap bit(3) +#define hal_reo1_ring_misc_host_fw_swap bit(4) +#define hal_reo1_ring_misc_data_tlv_swap bit(5) +#define hal_reo1_ring_misc_srng_enable bit(6) +#define hal_reo1_ring_prdr_int_setup_intr_tmr_thold genmask(31, 16) +#define hal_reo1_ring_prdr_int_setup_batch_counter_thold genmask(14, 0) +#define hal_reo1_ring_msi1_base_msb_msi1_enable bit(8) +#define hal_reo1_ring_msi1_base_msb_addr genmask(7, 0) +#define hal_reo1_gen_enable_frag_dst_ring genmask(25, 23) +#define hal_reo1_gen_enable_aging_list_enable bit(2) +#define hal_reo1_gen_enable_aging_flush_enable bit(3) + +/* ce ring bit field mask and shift */ +#define hal_ce_dst_r0_dest_ctrl_max_len genmask(15, 0) + +#define hal_addr_lsb_reg_mask 0xffffffff + +#define hal_addr_msb_reg_shift 32 + +/* wbm ring bit field mask and shift */ +#define hal_wbm_link_desc_idle_list_mode bit(1) +#define hal_wbm_scatter_buffer_size genmask(10, 2) +#define hal_wbm_scatter_ring_size_of_idle_link_desc_list genmask(31, 16) +#define hal_wbm_scattered_desc_msb_base_addr_39_32 genmask(7, 0) +#define hal_wbm_scattered_desc_msb_base_addr_match_tag genmask(31, 8) + +#define hal_wbm_scattered_desc_head_p_offset_ix1 genmask(20, 8) +#define hal_wbm_scattered_desc_tail_p_offset_ix1 genmask(20, 8) + +#define base_addr_match_tag_val 0x5 + +#define hal_reo_reo2sw1_ring_base_msb_ring_size 0x000fffff +#define hal_reo_reo2tcl_ring_base_msb_ring_size 0x000fffff +#define hal_reo_sw2reo_ring_base_msb_ring_size 0x0000ffff +#define hal_reo_cmd_ring_base_msb_ring_size 0x0000ffff +#define hal_reo_status_ring_base_msb_ring_size 0x0000ffff +#define hal_sw2tcl1_ring_base_msb_ring_size 0x000fffff +#define hal_sw2tcl1_cmd_ring_base_msb_ring_size 0x000fffff +#define hal_tcl_status_ring_base_msb_ring_size 0x0000ffff +#define hal_ce_src_ring_base_msb_ring_size 0x0000ffff +#define hal_ce_dst_ring_base_msb_ring_size 0x0000ffff +#define hal_ce_dst_status_ring_base_msb_ring_size 0x0000ffff +#define hal_wbm_idle_link_ring_base_msb_ring_size 0x0000ffff +#define hal_sw2wbm_release_ring_base_msb_ring_size 0x0000ffff +#define hal_wbm2sw_release_ring_base_msb_ring_size 0x000fffff +#define hal_rxdma_ring_max_size 0x0000ffff + +#define hal_rx_desc_size (sizeof(struct hal_rx_desc)) + +/* add any other errors here and return them in + * ath11k_hal_rx_desc_get_err(). + */ + +enum hal_srng_ring_id { + hal_srng_ring_id_reo2sw1 = 0, + hal_srng_ring_id_reo2sw2, + hal_srng_ring_id_reo2sw3, + hal_srng_ring_id_reo2sw4, + hal_srng_ring_id_reo2tcl, + hal_srng_ring_id_sw2reo, + + hal_srng_ring_id_reo_cmd = 8, + hal_srng_ring_id_reo_status, + + hal_srng_ring_id_sw2tcl1 = 16, + hal_srng_ring_id_sw2tcl2, + hal_srng_ring_id_sw2tcl3, + hal_srng_ring_id_sw2tcl4, + + hal_srng_ring_id_sw2tcl_cmd = 24, + hal_srng_ring_id_tcl_status, + + hal_srng_ring_id_ce0_src = 32, + hal_srng_ring_id_ce1_src, + hal_srng_ring_id_ce2_src, + hal_srng_ring_id_ce3_src, + hal_srng_ring_id_ce4_src, + hal_srng_ring_id_ce5_src, + hal_srng_ring_id_ce6_src, + hal_srng_ring_id_ce7_src, + hal_srng_ring_id_ce8_src, + hal_srng_ring_id_ce9_src, + hal_srng_ring_id_ce10_src, + hal_srng_ring_id_ce11_src, + + hal_srng_ring_id_ce0_dst = 56, + hal_srng_ring_id_ce1_dst, + hal_srng_ring_id_ce2_dst, + hal_srng_ring_id_ce3_dst, + hal_srng_ring_id_ce4_dst, + hal_srng_ring_id_ce5_dst, + hal_srng_ring_id_ce6_dst, + hal_srng_ring_id_ce7_dst, + hal_srng_ring_id_ce8_dst, + hal_srng_ring_id_ce9_dst, + hal_srng_ring_id_ce10_dst, + hal_srng_ring_id_ce11_dst, + + hal_srng_ring_id_ce0_dst_status = 80, + hal_srng_ring_id_ce1_dst_status, + hal_srng_ring_id_ce2_dst_status, + hal_srng_ring_id_ce3_dst_status, + hal_srng_ring_id_ce4_dst_status, + hal_srng_ring_id_ce5_dst_status, + hal_srng_ring_id_ce6_dst_status, + hal_srng_ring_id_ce7_dst_status, + hal_srng_ring_id_ce8_dst_status, + hal_srng_ring_id_ce9_dst_status, + hal_srng_ring_id_ce10_dst_status, + hal_srng_ring_id_ce11_dst_status, + + hal_srng_ring_id_wbm_idle_link = 104, + hal_srng_ring_id_wbm_sw_release, + hal_srng_ring_id_wbm2sw0_release, + hal_srng_ring_id_wbm2sw1_release, + hal_srng_ring_id_wbm2sw2_release, + hal_srng_ring_id_wbm2sw3_release, + + hal_srng_ring_id_umac_id_end = 127, + hal_srng_ring_id_lmac1_id_start, + + hal_srng_ring_id_wmac1_sw2rxdma0_buf = hal_srng_ring_id_lmac1_id_start, + hal_srng_ring_id_wmac1_sw2rxdma1_buf, + hal_srng_ring_id_wmac1_sw2rxdma2_buf, + hal_srng_ring_id_wmac1_sw2rxdma0_statbuf, + hal_srng_ring_id_wmac1_sw2rxdma1_statbuf, + hal_srng_ring_id_wmac1_rxdma2sw0, + hal_srng_ring_id_wmac1_rxdma2sw1, + hal_srng_ring_id_wmac1_sw2rxdma1_desc, + hal_srng_ring_id_rxdma_dir_buf, + + hal_srng_ring_id_lmac1_id_end = 143 +}; + +/* srng registers are split into two groups r0 and r2 */ +#define hal_srng_reg_grp_r0 0 +#define hal_srng_reg_grp_r2 1 +#define hal_srng_num_reg_grp 2 + +#define hal_srng_num_lmacs 3 +#define hal_srng_reo_exception hal_srng_ring_id_reo2sw1 +#define hal_srng_rings_per_lmac (hal_srng_ring_id_lmac1_id_end - \ + hal_srng_ring_id_lmac1_id_start) +#define hal_srng_num_lmac_rings (hal_srng_num_lmacs * hal_srng_rings_per_lmac) +#define hal_srng_ring_id_max (hal_srng_ring_id_umac_id_end + \ + hal_srng_num_lmac_rings) + +enum hal_ring_type { + hal_reo_dst, + hal_reo_exception, + hal_reo_reinject, + hal_reo_cmd, + hal_reo_status, + hal_tcl_data, + hal_tcl_cmd, + hal_tcl_status, + hal_ce_src, + hal_ce_dst, + hal_ce_dst_status, + hal_wbm_idle_link, + hal_sw2wbm_release, + hal_wbm2sw_release, + hal_rxdma_buf, + hal_rxdma_dst, + hal_rxdma_monitor_buf, + hal_rxdma_monitor_status, + hal_rxdma_monitor_dst, + hal_rxdma_monitor_desc, + hal_rxdma_dir_buf, + hal_max_ring_types, +}; + +#define hal_rx_max_ba_window 256 + +#define hal_default_reo_timeout_usec (40 * 1000) + +/** + * enum hal_reo_cmd_type: enum for reo command type + * @cmd_get_queue_stats: get reo queue status/stats + * @cmd_flush_queue: flush all frames in reo queue + * @cmd_flush_cache: flush descriptor entries in the cache + * @cmd_unblock_cache: unblock a descriptor's address that was blocked + * earlier with a 'reo_flush_cache' command + * @cmd_flush_timeout_list: flush buffers/descriptors from timeout list + * @cmd_update_rx_reo_queue: update reo queue settings + */ +enum hal_reo_cmd_type { + hal_reo_cmd_get_queue_stats = 0, + hal_reo_cmd_flush_queue = 1, + hal_reo_cmd_flush_cache = 2, + hal_reo_cmd_unblock_cache = 3, + hal_reo_cmd_flush_timeout_list = 4, + hal_reo_cmd_update_rx_queue = 5, +}; + +/** + * enum hal_reo_cmd_status: enum for execution status of reo command + * @hal_reo_cmd_success: command has successfully executed + * @hal_reo_cmd_blocked: command could not be executed as the queue + * or cache was blocked + * @hal_reo_cmd_failed: command execution failed, could be due to + * invalid queue desc + * @hal_reo_cmd_resource_blocked: + * @hal_reo_cmd_drain: + */ +enum hal_reo_cmd_status { + hal_reo_cmd_success = 0, + hal_reo_cmd_blocked = 1, + hal_reo_cmd_failed = 2, + hal_reo_cmd_resource_blocked = 3, + hal_reo_cmd_drain = 0xff, +}; + +struct hal_wbm_idle_scatter_list { + dma_addr_t paddr; + struct hal_wbm_link_desc *vaddr; +}; + +struct hal_srng_params { + dma_addr_t ring_base_paddr; + u32 *ring_base_vaddr; + int num_entries; + u32 intr_batch_cntr_thres_entries; + u32 intr_timer_thres_us; + u32 flags; + u32 max_buffer_len; + u32 low_threshold; + + /* add more params as needed */ +}; + +enum hal_srng_dir { + hal_srng_dir_src, + hal_srng_dir_dst +}; + +/* srng flags */ +#define hal_srng_flags_msi_swap 0x00000008 +#define hal_srng_flags_ring_ptr_swap 0x00000010 +#define hal_srng_flags_data_tlv_swap 0x00000020 +#define hal_srng_flags_low_thresh_intr_en 0x00010000 +#define hal_srng_flags_msi_intr 0x00020000 +#define hal_srng_flags_lmac_ring 0x80000000 + +#define hal_srng_tlv_hdr_tag genmask(9, 1) +#define hal_srng_tlv_hdr_len genmask(25, 10) + +/* common srng ring structure for source and destination rings */ +struct hal_srng { + /* unique srng ring id */ + u8 ring_id; + + /* ring initialization done */ + u8 initialized; + + /* interrupt/msi value assigned to this ring */ + int irq; + + /* physical base address of the ring */ + dma_addr_t ring_base_paddr; + + /* virtual base address of the ring */ + u32 *ring_base_vaddr; + + /* number of entries in ring */ + u32 num_entries; + + /* ring size */ + u32 ring_size; + + /* ring size mask */ + u32 ring_size_mask; + + /* size of ring entry */ + u32 entry_size; + + /* interrupt timer threshold - in micro seconds */ + u32 intr_timer_thres_us; + + /* interrupt batch counter threshold - in number of ring entries */ + u32 intr_batch_cntr_thres_entries; + + /* msi address */ + dma_addr_t msi_addr; + + /* msi data */ + u32 msi_data; + + /* misc flags */ + u32 flags; + + /* lock for serializing ring index updates */ + spinlock_t lock; + + /* start offset of srng register groups for this ring + * tbd: see if this is required - register address can be derived + * from ring id + */ + u32 hwreg_base[hal_srng_num_reg_grp]; + + /* source or destination ring */ + enum hal_srng_dir ring_dir; + + union { + struct { + /* sw tail pointer */ + u32 tp; + + /* shadow head pointer location to be updated by hw */ + volatile u32 *hp_addr; + + /* cached head pointer */ + u32 cached_hp; + + /* tail pointer location to be updated by sw - this + * will be a register address and need not be + * accessed through sw structure + */ + u32 *tp_addr; + + /* current sw loop cnt */ + u32 loop_cnt; + + /* max transfer size */ + u16 max_buffer_length; + } dst_ring; + + struct { + /* sw head pointer */ + u32 hp; + + /* sw reap head pointer */ + u32 reap_hp; + + /* shadow tail pointer location to be updated by hw */ + u32 *tp_addr; + + /* cached tail pointer */ + u32 cached_tp; + + /* head pointer location to be updated by sw - this + * will be a register address and need not be accessed + * through sw structure + */ + u32 *hp_addr; + + /* low threshold - in number of ring entries */ + u32 low_threshold; + } src_ring; + } u; +}; + +/* interrupt mitigation - batch threshold in terms of numer of frames */ +#define hal_srng_int_batch_threshold_tx 256 +#define hal_srng_int_batch_threshold_rx 128 +#define hal_srng_int_batch_threshold_other 1 + +/* interrupt mitigation - timer threshold in us */ +#define hal_srng_int_timer_threshold_tx 1000 +#define hal_srng_int_timer_threshold_rx 500 +#define hal_srng_int_timer_threshold_other 1000 + +/* hw srng configuration table */ +struct hal_srng_config { + int start_ring_id; + u16 max_rings; + u16 entry_size; + u32 reg_start[hal_srng_num_reg_grp]; + u16 reg_size[hal_srng_num_reg_grp]; + u8 lmac_ring; + enum hal_srng_dir ring_dir; + u32 max_size; +}; + +/** + * enum hal_rx_buf_return_buf_manager + * + * @hal_rx_buf_rbm_wbm_idle_buf_list: buffer returned to wbm idle buffer list + * @hal_rx_buf_rbm_wbm_idle_desc_list: descriptor returned to wbm idle + * descriptor list. + * @hal_rx_buf_rbm_fw_bm: buffer returned to fw + * @hal_rx_buf_rbm_sw0_bm: for tx completion -- returned to host + * @hal_rx_buf_rbm_sw1_bm: for tx completion -- returned to host + * @hal_rx_buf_rbm_sw2_bm: for tx completion -- returned to host + * @hal_rx_buf_rbm_sw3_bm: for rx release -- returned to host + */ + +enum hal_rx_buf_return_buf_manager { + hal_rx_buf_rbm_wbm_idle_buf_list, + hal_rx_buf_rbm_wbm_idle_desc_list, + hal_rx_buf_rbm_fw_bm, + hal_rx_buf_rbm_sw0_bm, + hal_rx_buf_rbm_sw1_bm, + hal_rx_buf_rbm_sw2_bm, + hal_rx_buf_rbm_sw3_bm, +}; + +#define hal_srng_desc_loop_cnt 0xf0000000 + +#define hal_reo_cmd_flg_need_status bit(0) +#define hal_reo_cmd_flg_stats_clear bit(1) +#define hal_reo_cmd_flg_flush_block_later bit(2) +#define hal_reo_cmd_flg_flush_release_blocking bit(3) +#define hal_reo_cmd_flg_flush_no_inval bit(4) +#define hal_reo_cmd_flg_flush_fwd_all_mpdus bit(5) +#define hal_reo_cmd_flg_flush_all bit(6) +#define hal_reo_cmd_flg_unblk_resource bit(7) +#define hal_reo_cmd_flg_unblk_cache bit(8) + +/* should be matching with hal_reo_upd_rx_queue_info0_upd_* feilds */ +#define hal_reo_cmd_upd0_rx_queue_num bit(8) +#define hal_reo_cmd_upd0_vld bit(9) +#define hal_reo_cmd_upd0_aldc bit(10) +#define hal_reo_cmd_upd0_dis_dup_detection bit(11) +#define hal_reo_cmd_upd0_soft_reorder_en bit(12) +#define hal_reo_cmd_upd0_ac bit(13) +#define hal_reo_cmd_upd0_bar bit(14) +#define hal_reo_cmd_upd0_retry bit(15) +#define hal_reo_cmd_upd0_check_2k_mode bit(16) +#define hal_reo_cmd_upd0_oor_mode bit(17) +#define hal_reo_cmd_upd0_ba_window_size bit(18) +#define hal_reo_cmd_upd0_pn_check bit(19) +#define hal_reo_cmd_upd0_even_pn bit(20) +#define hal_reo_cmd_upd0_uneven_pn bit(21) +#define hal_reo_cmd_upd0_pn_handle_enable bit(22) +#define hal_reo_cmd_upd0_pn_size bit(23) +#define hal_reo_cmd_upd0_ignore_ampdu_flg bit(24) +#define hal_reo_cmd_upd0_svld bit(25) +#define hal_reo_cmd_upd0_ssn bit(26) +#define hal_reo_cmd_upd0_seq_2k_err bit(27) +#define hal_reo_cmd_upd0_pn_err bit(28) +#define hal_reo_cmd_upd0_pn_valid bit(29) +#define hal_reo_cmd_upd0_pn bit(30) + +/* should be matching with hal_reo_upd_rx_queue_info1_* feilds */ +#define hal_reo_cmd_upd1_vld bit(16) +#define hal_reo_cmd_upd1_aldc genmask(18, 17) +#define hal_reo_cmd_upd1_dis_dup_detection bit(19) +#define hal_reo_cmd_upd1_soft_reorder_en bit(20) +#define hal_reo_cmd_upd1_ac genmask(22, 21) +#define hal_reo_cmd_upd1_bar bit(23) +#define hal_reo_cmd_upd1_retry bit(24) +#define hal_reo_cmd_upd1_check_2k_mode bit(25) +#define hal_reo_cmd_upd1_oor_mode bit(26) +#define hal_reo_cmd_upd1_pn_check bit(27) +#define hal_reo_cmd_upd1_even_pn bit(28) +#define hal_reo_cmd_upd1_uneven_pn bit(29) +#define hal_reo_cmd_upd1_pn_handle_enable bit(30) +#define hal_reo_cmd_upd1_ignore_ampdu_flg bit(31) + +/* should be matching with hal_reo_upd_rx_queue_info2_* feilds */ +#define hal_reo_cmd_upd2_svld bit(10) +#define hal_reo_cmd_upd2_ssn genmask(22, 11) +#define hal_reo_cmd_upd2_seq_2k_err bit(23) +#define hal_reo_cmd_upd2_pn_err bit(24) + +#define hal_reo_dest_ring_ctrl_hash_ring_map genmask(31, 8) + +struct ath11k_hal_reo_cmd { + u32 addr_lo; + u32 flag; + u32 upd0; + u32 upd1; + u32 upd2; + u32 pn[4]; + u16 rx_queue_num; + u16 min_rel; + u16 min_fwd; + u8 addr_hi; + u8 ac_list; + u8 blocking_idx; + u16 ba_window_size; + u8 pn_size; +}; + +enum hal_pn_type { + hal_pn_type_none, + hal_pn_type_wpa, + hal_pn_type_wapi_even, + hal_pn_type_wapi_uneven, +}; + +enum hal_ce_desc { + hal_ce_desc_src, + hal_ce_desc_dst, + hal_ce_desc_dst_status, +}; + +struct hal_reo_status_header { + u16 cmd_num; + enum hal_reo_cmd_status cmd_status; + u16 cmd_exe_time; + u32 timestamp; +}; + +struct hal_reo_status_queue_stats { + u16 ssn; + u16 curr_idx; + u32 pn[4]; + u32 last_rx_queue_ts; + u32 last_rx_dequeue_ts; + u32 rx_bitmap[8]; /* bitmap from 0-255 */ + u32 curr_mpdu_cnt; + u32 curr_msdu_cnt; + u16 fwd_due_to_bar_cnt; + u16 dup_cnt; + u32 frames_in_order_cnt; + u32 num_mpdu_processed_cnt; + u32 num_msdu_processed_cnt; + u32 total_num_processed_byte_cnt; + u32 late_rx_mpdu_cnt; + u32 reorder_hole_cnt; + u8 timeout_cnt; + u8 bar_rx_cnt; + u8 num_window_2k_jump_cnt; +}; + +struct hal_reo_status_flush_queue { + bool err_detected; +}; + +enum hal_reo_status_flush_cache_err_code { + hal_reo_status_flush_cache_err_code_success, + hal_reo_status_flush_cache_err_code_in_use, + hal_reo_status_flush_cache_err_code_not_found, +}; + +struct hal_reo_status_flush_cache { + bool err_detected; + enum hal_reo_status_flush_cache_err_code err_code; + bool cache_controller_flush_status_hit; + u8 cache_controller_flush_status_desc_type; + u8 cache_controller_flush_status_client_id; + u8 cache_controller_flush_status_err; + u8 cache_controller_flush_status_cnt; +}; + +enum hal_reo_status_unblock_cache_type { + hal_reo_status_unblock_blocking_resource, + hal_reo_status_unblock_entire_cache_usage, +}; + +struct hal_reo_status_unblock_cache { + bool err_detected; + enum hal_reo_status_unblock_cache_type unblock_type; +}; + +struct hal_reo_status_flush_timeout_list { + bool err_detected; + bool list_empty; + u16 release_desc_cnt; + u16 fwd_buf_cnt; +}; + +enum hal_reo_threshold_idx { + hal_reo_threshold_idx_desc_counter0, + hal_reo_threshold_idx_desc_counter1, + hal_reo_threshold_idx_desc_counter2, + hal_reo_threshold_idx_desc_counter_sum, +}; + +struct hal_reo_status_desc_thresh_reached { + enum hal_reo_threshold_idx threshold_idx; + u32 link_desc_counter0; + u32 link_desc_counter1; + u32 link_desc_counter2; + u32 link_desc_counter_sum; +}; + +struct hal_reo_status { + struct hal_reo_status_header uniform_hdr; + u8 loop_cnt; + union { + struct hal_reo_status_queue_stats queue_stats; + struct hal_reo_status_flush_queue flush_queue; + struct hal_reo_status_flush_cache flush_cache; + struct hal_reo_status_unblock_cache unblock_cache; + struct hal_reo_status_flush_timeout_list timeout_list; + struct hal_reo_status_desc_thresh_reached desc_thresh_reached; + } u; +}; + +/** + * hal context to be used to access srng apis (currently used by data path + * and transport (ce) modules) + */ +struct ath11k_hal { + /* hal internal state for all srng rings. + */ + struct hal_srng srng_list[hal_srng_ring_id_max]; + + /* srng configuration table */ + const struct hal_srng_config *srng_config; + + /* remote pointer memory for hw/fw updates */ + struct { + u32 *vaddr; + dma_addr_t paddr; + } rdp; + + /* shared memory for ring pointer updates from host to fw */ + struct { + u32 *vaddr; + dma_addr_t paddr; + } wrp; + + /* available reo blocking resources bitmap */ + u8 avail_blk_resource; + + u8 current_blk_index; + + /* shadow register configuration */ + u32 shadow_reg_addr[shadow_num_registers]; + int num_shadow_reg_configured; +}; + +u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid); +void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size, + u32 start_seqtype); +void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab, + struct hal_srng *srng); +void ath11k_hal_reo_hw_setup(struct ath11k_base *ab); +void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab, + struct hal_wbm_idle_scatter_list *sbuf, + u32 nsbufs, u32 tot_link_desc, + u32 end_offset); + +dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab, + struct hal_srng *srng); +dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab, + struct hal_srng *srng); +void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, + dma_addr_t paddr); +u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type); +void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id, + u8 byte_swap_data); +void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr); +u32 ath11k_hal_ce_dst_status_get_length(void *buf); +int ath11k_hal_srng_get_entrysize(u32 ring_type); +int ath11k_hal_srng_get_max_entries(u32 ring_type); +void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng, + struct hal_srng_params *params); +u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab, + struct hal_srng *srng); +u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng); +int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng, + bool sync_hw_ptr); +u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng); +u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, + struct hal_srng *srng); +u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab, + struct hal_srng *srng); +u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab, + struct hal_srng *srng); +int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng, + bool sync_hw_ptr); +void ath11k_hal_srng_access_begin(struct ath11k_base *ab, + struct hal_srng *srng); +void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng); +int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type, + int ring_num, int mac_id, + struct hal_srng_params *params); +int ath11k_hal_srng_init(struct ath11k_base *ath11k); +void ath11k_hal_srng_deinit(struct ath11k_base *ath11k); + +#endif diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hal_desc.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ +#ifndef ath11k_hal_desc_h +#define ath11k_hal_desc_h + +#define buffer_addr_info0_addr genmask(31, 0) + +#define buffer_addr_info1_addr genmask(7, 0) +#define buffer_addr_info1_ret_buf_mgr genmask(10, 8) +#define buffer_addr_info1_sw_cookie genmask(31, 11) + +struct ath11k_buffer_addr { + u32 info0; + u32 info1; +} __packed; + +/* ath11k_buffer_addr + * + * info0 + * address (lower 32 bits) of the msdu buffer or msdu extension + * descriptor or link descriptor + * + * addr + * address (upper 8 bits) of the msdu buffer or msdu extension + * descriptor or link descriptor + * + * return_buffer_manager (rbm) + * consumer: wbm + * producer: sw/fw + * indicates to which buffer manager the buffer or msdu_extension + * descriptor or link descriptor that is being pointed to shall be + * returned after the frame has been processed. it is used by wbm + * for routing purposes. + * + * values are defined in enum %hal_rx_buf_rbm_ + * + * sw_buffer_cookie + * cookie field exclusively used by sw. hw ignores the contents, + * accept that it passes the programmed value on to other + * descriptors together with the physical address. + * + * field can be used by sw to for example associate the buffers + * physical address with the virtual address. + */ + +enum hal_tlv_tag { + hal_mactx_cbf_start = 0 /* 0x0 */, + hal_phyrx_data = 1 /* 0x1 */, + hal_phyrx_cbf_data_resp = 2 /* 0x2 */, + hal_phyrx_abort_request = 3 /* 0x3 */, + hal_phyrx_user_abort_notification = 4 /* 0x4 */, + hal_mactx_data_resp = 5 /* 0x5 */, + hal_mactx_cbf_data = 6 /* 0x6 */, + hal_mactx_cbf_done = 7 /* 0x7 */, + hal_macrx_cbf_read_request = 8 /* 0x8 */, + hal_macrx_cbf_data_request = 9 /* 0x9 */, + hal_macrx_expect_ndp_reception = 10 /* 0xa */, + hal_macrx_freeze_capture_channel = 11 /* 0xb */, + hal_macrx_ndp_timeout = 12 /* 0xc */, + hal_macrx_abort_ack = 13 /* 0xd */, + hal_macrx_req_implicit_fb = 14 /* 0xe */, + hal_macrx_chain_mask = 15 /* 0xf */, + hal_macrx_nap_user = 16 /* 0x10 */, + hal_macrx_abort_request = 17 /* 0x11 */, + hal_phytx_other_transmit_info16 = 18 /* 0x12 */, + hal_phytx_abort_ack = 19 /* 0x13 */, + hal_phytx_abort_request = 20 /* 0x14 */, + hal_phytx_pkt_end = 21 /* 0x15 */, + hal_phytx_ppdu_header_info_request = 22 /* 0x16 */, + hal_phytx_request_ctrl_info = 23 /* 0x17 */, + hal_phytx_data_request = 24 /* 0x18 */, + hal_phytx_bf_cv_loading_done = 25 /* 0x19 */, + hal_phytx_nap_ack = 26 /* 0x1a */, + hal_phytx_nap_done = 27 /* 0x1b */, + hal_phytx_off_ack = 28 /* 0x1c */, + hal_phytx_on_ack = 29 /* 0x1d */, + hal_phytx_synth_off_ack = 30 /* 0x1e */, + hal_phytx_debug16 = 31 /* 0x1f */, + hal_mactx_abort_request = 32 /* 0x20 */, + hal_mactx_abort_ack = 33 /* 0x21 */, + hal_mactx_pkt_end = 34 /* 0x22 */, + hal_mactx_pre_phy_desc = 35 /* 0x23 */, + hal_mactx_bf_params_common = 36 /* 0x24 */, + hal_mactx_bf_params_per_user = 37 /* 0x25 */, + hal_mactx_prefetch_cv = 38 /* 0x26 */, + hal_mactx_user_desc_common = 39 /* 0x27 */, + hal_mactx_user_desc_per_user = 40 /* 0x28 */, + hal_example_user_tlv_16 = 41 /* 0x29 */, + hal_example_tlv_16 = 42 /* 0x2a */, + hal_mactx_phy_off = 43 /* 0x2b */, + hal_mactx_phy_on = 44 /* 0x2c */, + hal_mactx_synth_off = 45 /* 0x2d */, + hal_mactx_expect_cbf_common = 46 /* 0x2e */, + hal_mactx_expect_cbf_per_user = 47 /* 0x2f */, + hal_mactx_phy_desc = 48 /* 0x30 */, + hal_mactx_l_sig_a = 49 /* 0x31 */, + hal_mactx_l_sig_b = 50 /* 0x32 */, + hal_mactx_ht_sig = 51 /* 0x33 */, + hal_mactx_vht_sig_a = 52 /* 0x34 */, + hal_mactx_vht_sig_b_su20 = 53 /* 0x35 */, + hal_mactx_vht_sig_b_su40 = 54 /* 0x36 */, + hal_mactx_vht_sig_b_su80 = 55 /* 0x37 */, + hal_mactx_vht_sig_b_su160 = 56 /* 0x38 */, + hal_mactx_vht_sig_b_mu20 = 57 /* 0x39 */, + hal_mactx_vht_sig_b_mu40 = 58 /* 0x3a */, + hal_mactx_vht_sig_b_mu80 = 59 /* 0x3b */, + hal_mactx_vht_sig_b_mu160 = 60 /* 0x3c */, + hal_mactx_service = 61 /* 0x3d */, + hal_mactx_he_sig_a_su = 62 /* 0x3e */, + hal_mactx_he_sig_a_mu_dl = 63 /* 0x3f */, + hal_mactx_he_sig_a_mu_ul = 64 /* 0x40 */, + hal_mactx_he_sig_b1_mu = 65 /* 0x41 */, + hal_mactx_he_sig_b2_mu = 66 /* 0x42 */, + hal_mactx_he_sig_b2_ofdma = 67 /* 0x43 */, + hal_mactx_delete_cv = 68 /* 0x44 */, + hal_mactx_mu_uplink_common = 69 /* 0x45 */, + hal_mactx_mu_uplink_user_setup = 70 /* 0x46 */, + hal_mactx_other_transmit_info = 71 /* 0x47 */, + hal_mactx_phy_nap = 72 /* 0x48 */, + hal_mactx_debug = 73 /* 0x49 */, + hal_phyrx_abort_ack = 74 /* 0x4a */, + hal_phyrx_generated_cbf_details = 75 /* 0x4b */, + hal_phyrx_rssi_legacy = 76 /* 0x4c */, + hal_phyrx_rssi_ht = 77 /* 0x4d */, + hal_phyrx_user_info = 78 /* 0x4e */, + hal_phyrx_pkt_end = 79 /* 0x4f */, + hal_phyrx_debug = 80 /* 0x50 */, + hal_phyrx_cbf_transfer_done = 81 /* 0x51 */, + hal_phyrx_cbf_transfer_abort = 82 /* 0x52 */, + hal_phyrx_l_sig_a = 83 /* 0x53 */, + hal_phyrx_l_sig_b = 84 /* 0x54 */, + hal_phyrx_ht_sig = 85 /* 0x55 */, + hal_phyrx_vht_sig_a = 86 /* 0x56 */, + hal_phyrx_vht_sig_b_su20 = 87 /* 0x57 */, + hal_phyrx_vht_sig_b_su40 = 88 /* 0x58 */, + hal_phyrx_vht_sig_b_su80 = 89 /* 0x59 */, + hal_phyrx_vht_sig_b_su160 = 90 /* 0x5a */, + hal_phyrx_vht_sig_b_mu20 = 91 /* 0x5b */, + hal_phyrx_vht_sig_b_mu40 = 92 /* 0x5c */, + hal_phyrx_vht_sig_b_mu80 = 93 /* 0x5d */, + hal_phyrx_vht_sig_b_mu160 = 94 /* 0x5e */, + hal_phyrx_he_sig_a_su = 95 /* 0x5f */, + hal_phyrx_he_sig_a_mu_dl = 96 /* 0x60 */, + hal_phyrx_he_sig_a_mu_ul = 97 /* 0x61 */, + hal_phyrx_he_sig_b1_mu = 98 /* 0x62 */, + hal_phyrx_he_sig_b2_mu = 99 /* 0x63 */, + hal_phyrx_he_sig_b2_ofdma = 100 /* 0x64 */, + hal_phyrx_other_receive_info = 101 /* 0x65 */, + hal_phyrx_common_user_info = 102 /* 0x66 */, + hal_phyrx_data_done = 103 /* 0x67 */, + hal_receive_rssi_info = 104 /* 0x68 */, + hal_receive_user_info = 105 /* 0x69 */, + hal_mimo_control_info = 106 /* 0x6a */, + hal_rx_location_info = 107 /* 0x6b */, + hal_coex_tx_req = 108 /* 0x6c */, + hal_dummy = 109 /* 0x6d */, + hal_rx_timing_offset_info = 110 /* 0x6e */, + hal_example_tlv_32_name = 111 /* 0x6f */, + hal_mpdu_limit = 112 /* 0x70 */, + hal_na_length_end = 113 /* 0x71 */, + hal_ole_buf_status = 114 /* 0x72 */, + hal_pcu_ppdu_setup_done = 115 /* 0x73 */, + hal_pcu_ppdu_setup_end = 116 /* 0x74 */, + hal_pcu_ppdu_setup_init = 117 /* 0x75 */, + hal_pcu_ppdu_setup_start = 118 /* 0x76 */, + hal_pdg_fes_setup = 119 /* 0x77 */, + hal_pdg_response = 120 /* 0x78 */, + hal_pdg_tx_req = 121 /* 0x79 */, + hal_sch_wait_instr = 122 /* 0x7a */, + hal_scheduler_tlv = 123 /* 0x7b */, + hal_tqm_flow_empty_status = 124 /* 0x7c */, + hal_tqm_flow_not_empty_status = 125 /* 0x7d */, + hal_tqm_gen_mpdu_length_list = 126 /* 0x7e */, + hal_tqm_gen_mpdu_length_list_status = 127 /* 0x7f */, + hal_tqm_gen_mpdus = 128 /* 0x80 */, + hal_tqm_gen_mpdus_status = 129 /* 0x81 */, + hal_tqm_remove_mpdu = 130 /* 0x82 */, + hal_tqm_remove_mpdu_status = 131 /* 0x83 */, + hal_tqm_remove_msdu = 132 /* 0x84 */, + hal_tqm_remove_msdu_status = 133 /* 0x85 */, + hal_tqm_update_tx_mpdu_count = 134 /* 0x86 */, + hal_tqm_write_cmd = 135 /* 0x87 */, + hal_ofdma_trigger_details = 136 /* 0x88 */, + hal_tx_data = 137 /* 0x89 */, + hal_tx_fes_setup = 138 /* 0x8a */, + hal_rx_packet = 139 /* 0x8b */, + hal_expected_response = 140 /* 0x8c */, + hal_tx_mpdu_end = 141 /* 0x8d */, + hal_tx_mpdu_start = 142 /* 0x8e */, + hal_tx_msdu_end = 143 /* 0x8f */, + hal_tx_msdu_start = 144 /* 0x90 */, + hal_tx_sw_mode_setup = 145 /* 0x91 */, + hal_txpcu_buffer_status = 146 /* 0x92 */, + hal_txpcu_user_buffer_status = 147 /* 0x93 */, + hal_data_to_time_config = 148 /* 0x94 */, + hal_example_user_tlv_32 = 149 /* 0x95 */, + hal_mpdu_info = 150 /* 0x96 */, + hal_pdg_user_setup = 151 /* 0x97 */, + hal_tx_11ah_setup = 152 /* 0x98 */, + hal_reo_update_rx_reo_queue_status = 153 /* 0x99 */, + hal_tx_peer_entry = 154 /* 0x9a */, + hal_tx_raw_or_native_frame_setup = 155 /* 0x9b */, + hal_example_struct_name = 156 /* 0x9c */, + hal_pcu_ppdu_setup_end_info = 157 /* 0x9d */, + hal_ppdu_rate_setting = 158 /* 0x9e */, + hal_prot_rate_setting = 159 /* 0x9f */, + hal_rx_mpdu_details = 160 /* 0xa0 */, + hal_example_user_tlv_42 = 161 /* 0xa1 */, + hal_rx_msdu_link = 162 /* 0xa2 */, + hal_rx_reo_queue = 163 /* 0xa3 */, + hal_addr_search_entry = 164 /* 0xa4 */, + hal_scheduler_cmd = 165 /* 0xa5 */, + hal_tx_flush = 166 /* 0xa6 */, + hal_tqm_entrance_ring = 167 /* 0xa7 */, + hal_tx_data_word = 168 /* 0xa8 */, + hal_tx_mpdu_details = 169 /* 0xa9 */, + hal_tx_mpdu_link = 170 /* 0xaa */, + hal_tx_mpdu_link_ptr = 171 /* 0xab */, + hal_tx_mpdu_queue_head = 172 /* 0xac */, + hal_tx_mpdu_queue_ext = 173 /* 0xad */, + hal_tx_mpdu_queue_ext_ptr = 174 /* 0xae */, + hal_tx_msdu_details = 175 /* 0xaf */, + hal_tx_msdu_extension = 176 /* 0xb0 */, + hal_tx_msdu_flow = 177 /* 0xb1 */, + hal_tx_msdu_link = 178 /* 0xb2 */, + hal_tx_msdu_link_entry_ptr = 179 /* 0xb3 */, + hal_response_rate_setting = 180 /* 0xb4 */, + hal_txpcu_buffer_basics = 181 /* 0xb5 */, + hal_uniform_descriptor_header = 182 /* 0xb6 */, + hal_uniform_tqm_cmd_header = 183 /* 0xb7 */, + hal_uniform_tqm_status_header = 184 /* 0xb8 */, + hal_user_rate_setting = 185 /* 0xb9 */, + hal_wbm_buffer_ring = 186 /* 0xba */, + hal_wbm_link_descriptor_ring = 187 /* 0xbb */, + hal_wbm_release_ring = 188 /* 0xbc */, + hal_tx_flush_req = 189 /* 0xbd */, + hal_rx_msdu_details = 190 /* 0xbe */, + hal_tqm_write_cmd_status = 191 /* 0xbf */, + hal_tqm_get_mpdu_queue_stats = 192 /* 0xc0 */, + hal_tqm_get_msdu_flow_stats = 193 /* 0xc1 */, + hal_example_user_ctlv_32 = 194 /* 0xc2 */, + hal_tx_fes_status_start = 195 /* 0xc3 */, + hal_tx_fes_status_user_ppdu = 196 /* 0xc4 */, + hal_tx_fes_status_user_response = 197 /* 0xc5 */, + hal_tx_fes_status_end = 198 /* 0xc6 */, + hal_rx_trig_info = 199 /* 0xc7 */, + hal_rxpcu_tx_setup_clear = 200 /* 0xc8 */, + hal_rx_frame_bitmap_req = 201 /* 0xc9 */, + hal_rx_frame_bitmap_ack = 202 /* 0xca */, + hal_coex_rx_status = 203 /* 0xcb */, + hal_rx_start_param = 204 /* 0xcc */, + hal_rx_ppdu_start = 205 /* 0xcd */, + hal_rx_ppdu_end = 206 /* 0xce */, + hal_rx_mpdu_start = 207 /* 0xcf */, + hal_rx_mpdu_end = 208 /* 0xd0 */, + hal_rx_msdu_start = 209 /* 0xd1 */, + hal_rx_msdu_end = 210 /* 0xd2 */, + hal_rx_attention = 211 /* 0xd3 */, + hal_received_response_info = 212 /* 0xd4 */, + hal_rx_phy_sleep = 213 /* 0xd5 */, + hal_rx_header = 214 /* 0xd6 */, + hal_rx_peer_entry = 215 /* 0xd7 */, + hal_rx_flush = 216 /* 0xd8 */, + hal_rx_response_required_info = 217 /* 0xd9 */, + hal_rx_frameless_bar_details = 218 /* 0xda */, + hal_tqm_get_mpdu_queue_stats_status = 219 /* 0xdb */, + hal_tqm_get_msdu_flow_stats_status = 220 /* 0xdc */, + hal_tx_cbf_info = 221 /* 0xdd */, + hal_pcu_ppdu_setup_user = 222 /* 0xde */, + hal_rx_mpdu_pcu_start = 223 /* 0xdf */, + hal_rx_pm_info = 224 /* 0xe0 */, + hal_rx_user_ppdu_end = 225 /* 0xe1 */, + hal_rx_pre_ppdu_start = 226 /* 0xe2 */, + hal_rx_preamble = 227 /* 0xe3 */, + hal_tx_fes_setup_complete = 228 /* 0xe4 */, + hal_tx_last_mpdu_fetched = 229 /* 0xe5 */, + hal_txdma_stop_request = 230 /* 0xe6 */, + hal_rxpcu_setup = 231 /* 0xe7 */, + hal_rxpcu_user_setup = 232 /* 0xe8 */, + hal_tx_fes_status_ack_or_ba = 233 /* 0xe9 */, + hal_tqm_acked_mpdu = 234 /* 0xea */, + hal_coex_tx_resp = 235 /* 0xeb */, + hal_coex_tx_status = 236 /* 0xec */, + hal_mactx_coex_phy_ctrl = 237 /* 0xed */, + hal_coex_status_broadcast = 238 /* 0xee */, + hal_response_start_status = 239 /* 0xef */, + hal_response_end_status = 240 /* 0xf0 */, + hal_crypto_status = 241 /* 0xf1 */, + hal_received_trigger_info = 242 /* 0xf2 */, + hal_reo_entrance_ring = 243 /* 0xf3 */, + hal_rx_mpdu_link = 244 /* 0xf4 */, + hal_coex_tx_stop_ctrl = 245 /* 0xf5 */, + hal_rx_ppdu_ack_report = 246 /* 0xf6 */, + hal_rx_ppdu_no_ack_report = 247 /* 0xf7 */, + hal_sch_coex_status = 248 /* 0xf8 */, + hal_scheduler_command_status = 249 /* 0xf9 */, + hal_scheduler_rx_ppdu_no_response_status = 250 /* 0xfa */, + hal_tx_fes_status_prot = 251 /* 0xfb */, + hal_tx_fes_status_start_ppdu = 252 /* 0xfc */, + hal_tx_fes_status_start_prot = 253 /* 0xfd */, + hal_txpcu_phytx_debug32 = 254 /* 0xfe */, + hal_txpcu_phytx_other_transmit_info32 = 255 /* 0xff */, + hal_tx_mpdu_count_transfer_end = 256 /* 0x100 */, + hal_who_anchor_offset = 257 /* 0x101 */, + hal_who_anchor_value = 258 /* 0x102 */, + hal_who_cce_info = 259 /* 0x103 */, + hal_who_commit = 260 /* 0x104 */, + hal_who_commit_done = 261 /* 0x105 */, + hal_who_flush = 262 /* 0x106 */, + hal_who_l2_llc = 263 /* 0x107 */, + hal_who_l2_payload = 264 /* 0x108 */, + hal_who_l3_checksum = 265 /* 0x109 */, + hal_who_l3_info = 266 /* 0x10a */, + hal_who_l4_checksum = 267 /* 0x10b */, + hal_who_l4_info = 268 /* 0x10c */, + hal_who_msdu = 269 /* 0x10d */, + hal_who_msdu_misc = 270 /* 0x10e */, + hal_who_packet_data = 271 /* 0x10f */, + hal_who_packet_hdr = 272 /* 0x110 */, + hal_who_ppdu_end = 273 /* 0x111 */, + hal_who_ppdu_start = 274 /* 0x112 */, + hal_who_tso = 275 /* 0x113 */, + hal_who_wmac_header_pv0 = 276 /* 0x114 */, + hal_who_wmac_header_pv1 = 277 /* 0x115 */, + hal_who_wmac_iv = 278 /* 0x116 */, + hal_mpdu_info_end = 279 /* 0x117 */, + hal_mpdu_info_bitmap = 280 /* 0x118 */, + hal_tx_queue_extension = 281 /* 0x119 */, + hal_rx_peer_entry_details = 282 /* 0x11a */, + hal_rx_reo_queue_reference = 283 /* 0x11b */, + hal_rx_reo_queue_ext = 284 /* 0x11c */, + hal_scheduler_selfgen_response_status = 285 /* 0x11d */, + hal_tqm_update_tx_mpdu_count_status = 286 /* 0x11e */, + hal_tqm_acked_mpdu_status = 287 /* 0x11f */, + hal_tqm_add_msdu_status = 288 /* 0x120 */, + hal_rx_mpdu_link_ptr = 289 /* 0x121 */, + hal_reo_destination_ring = 290 /* 0x122 */, + hal_tqm_list_gen_done = 291 /* 0x123 */, + hal_who_terminate = 292 /* 0x124 */, + hal_tx_last_mpdu_end = 293 /* 0x125 */, + hal_tx_cv_data = 294 /* 0x126 */, + hal_tcl_entrance_from_ppe_ring = 295 /* 0x127 */, + hal_ppdu_tx_end = 296 /* 0x128 */, + hal_prot_tx_end = 297 /* 0x129 */, + hal_pdg_response_rate_setting = 298 /* 0x12a */, + hal_mpdu_info_global_end = 299 /* 0x12b */, + hal_tqm_sch_instr_global_end = 300 /* 0x12c */, + hal_rx_ppdu_end_user_stats = 301 /* 0x12d */, + hal_rx_ppdu_end_user_stats_ext = 302 /* 0x12e */, + hal_no_ack_report = 303 /* 0x12f */, + hal_ack_report = 304 /* 0x130 */, + hal_uniform_reo_cmd_header = 305 /* 0x131 */, + hal_reo_get_queue_stats = 306 /* 0x132 */, + hal_reo_flush_queue = 307 /* 0x133 */, + hal_reo_flush_cache = 308 /* 0x134 */, + hal_reo_unblock_cache = 309 /* 0x135 */, + hal_uniform_reo_status_header = 310 /* 0x136 */, + hal_reo_get_queue_stats_status = 311 /* 0x137 */, + hal_reo_flush_queue_status = 312 /* 0x138 */, + hal_reo_flush_cache_status = 313 /* 0x139 */, + hal_reo_unblock_cache_status = 314 /* 0x13a */, + hal_tqm_flush_cache = 315 /* 0x13b */, + hal_tqm_unblock_cache = 316 /* 0x13c */, + hal_tqm_flush_cache_status = 317 /* 0x13d */, + hal_tqm_unblock_cache_status = 318 /* 0x13e */, + hal_rx_ppdu_end_status_done = 319 /* 0x13f */, + hal_rx_status_buffer_done = 320 /* 0x140 */, + hal_buffer_addr_info = 321 /* 0x141 */, + hal_rx_msdu_desc_info = 322 /* 0x142 */, + hal_rx_mpdu_desc_info = 323 /* 0x143 */, + hal_tcl_data_cmd = 324 /* 0x144 */, + hal_tcl_gse_cmd = 325 /* 0x145 */, + hal_tcl_exit_base = 326 /* 0x146 */, + hal_tcl_compact_exit_ring = 327 /* 0x147 */, + hal_tcl_regular_exit_ring = 328 /* 0x148 */, + hal_tcl_extended_exit_ring = 329 /* 0x149 */, + hal_uplink_common_info = 330 /* 0x14a */, + hal_uplink_user_setup_info = 331 /* 0x14b */, + hal_tx_data_sync = 332 /* 0x14c */, + hal_phyrx_cbf_read_request_ack = 333 /* 0x14d */, + hal_tcl_status_ring = 334 /* 0x14e */, + hal_tqm_get_mpdu_head_info = 335 /* 0x14f */, + hal_tqm_sync_cmd = 336 /* 0x150 */, + hal_tqm_get_mpdu_head_info_status = 337 /* 0x151 */, + hal_tqm_sync_cmd_status = 338 /* 0x152 */, + hal_tqm_threshold_drop_notification_status = 339 /* 0x153 */, + hal_tqm_descriptor_threshold_reached_status = 340 /* 0x154 */, + hal_reo_flush_timeout_list = 341 /* 0x155 */, + hal_reo_flush_timeout_list_status = 342 /* 0x156 */, + hal_reo_to_ppe_ring = 343 /* 0x157 */, + hal_rx_mpdu_info = 344 /* 0x158 */, + hal_reo_descriptor_threshold_reached_status = 345 /* 0x159 */, + hal_scheduler_rx_sifs_response_trigger_status = 346 /* 0x15a */, + hal_example_user_tlv_32_name = 347 /* 0x15b */, + hal_rx_ppdu_start_user_info = 348 /* 0x15c */, + hal_rx_rxpcu_classification_overview = 349 /* 0x15d */, + hal_rx_ring_mask = 350 /* 0x15e */, + hal_who_classify_info = 351 /* 0x15f */, + hal_txpt_classify_info = 352 /* 0x160 */, + hal_rxpt_classify_info = 353 /* 0x161 */, + hal_tx_flow_search_entry = 354 /* 0x162 */, + hal_rx_flow_search_entry = 355 /* 0x163 */, + hal_received_trigger_info_details = 356 /* 0x164 */, + hal_coex_mac_nap = 357 /* 0x165 */, + hal_macrx_abort_request_info = 358 /* 0x166 */, + hal_mactx_abort_request_info = 359 /* 0x167 */, + hal_phyrx_abort_request_info = 360 /* 0x168 */, + hal_phytx_abort_request_info = 361 /* 0x169 */, + hal_rxpcu_ppdu_end_info = 362 /* 0x16a */, + hal_who_mesh_control = 363 /* 0x16b */, + hal_l_sig_a_info = 364 /* 0x16c */, + hal_l_sig_b_info = 365 /* 0x16d */, + hal_ht_sig_info = 366 /* 0x16e */, + hal_vht_sig_a_info = 367 /* 0x16f */, + hal_vht_sig_b_su20_info = 368 /* 0x170 */, + hal_vht_sig_b_su40_info = 369 /* 0x171 */, + hal_vht_sig_b_su80_info = 370 /* 0x172 */, + hal_vht_sig_b_su160_info = 371 /* 0x173 */, + hal_vht_sig_b_mu20_info = 372 /* 0x174 */, + hal_vht_sig_b_mu40_info = 373 /* 0x175 */, + hal_vht_sig_b_mu80_info = 374 /* 0x176 */, + hal_vht_sig_b_mu160_info = 375 /* 0x177 */, + hal_service_info = 376 /* 0x178 */, + hal_he_sig_a_su_info = 377 /* 0x179 */, + hal_he_sig_a_mu_dl_info = 378 /* 0x17a */, + hal_he_sig_a_mu_ul_info = 379 /* 0x17b */, + hal_he_sig_b1_mu_info = 380 /* 0x17c */, + hal_he_sig_b2_mu_info = 381 /* 0x17d */, + hal_he_sig_b2_ofdma_info = 382 /* 0x17e */, + hal_pdg_sw_mode_bw_start = 383 /* 0x17f */, + hal_pdg_sw_mode_bw_end = 384 /* 0x180 */, + hal_pdg_wait_for_mac_request = 385 /* 0x181 */, + hal_pdg_wait_for_phy_request = 386 /* 0x182 */, + hal_scheduler_end = 387 /* 0x183 */, + hal_peer_table_entry = 388 /* 0x184 */, + hal_sw_peer_info = 389 /* 0x185 */, + hal_rxole_cce_classify_info = 390 /* 0x186 */, + hal_tcl_cce_classify_info = 391 /* 0x187 */, + hal_rxole_cce_info = 392 /* 0x188 */, + hal_tcl_cce_info = 393 /* 0x189 */, + hal_tcl_cce_superrule = 394 /* 0x18a */, + hal_cce_rule = 395 /* 0x18b */, + hal_rx_ppdu_start_dropped = 396 /* 0x18c */, + hal_rx_ppdu_end_dropped = 397 /* 0x18d */, + hal_rx_ppdu_end_status_done_dropped = 398 /* 0x18e */, + hal_rx_mpdu_start_dropped = 399 /* 0x18f */, + hal_rx_msdu_start_dropped = 400 /* 0x190 */, + hal_rx_msdu_end_dropped = 401 /* 0x191 */, + hal_rx_mpdu_end_dropped = 402 /* 0x192 */, + hal_rx_attention_dropped = 403 /* 0x193 */, + hal_txpcu_user_setup = 404 /* 0x194 */, + hal_rxpcu_user_setup_ext = 405 /* 0x195 */, + hal_ce_src_desc = 406 /* 0x196 */, + hal_ce_stat_desc = 407 /* 0x197 */, + hal_rxole_cce_superrule = 408 /* 0x198 */, + hal_tx_rate_stats_info = 409 /* 0x199 */, + hal_cmd_part_0_end = 410 /* 0x19a */, + hal_mactx_synth_on = 411 /* 0x19b */, + hal_sch_critical_tlv_reference = 412 /* 0x19c */, + hal_tqm_mpdu_global_start = 413 /* 0x19d */, + hal_example_tlv_32 = 414 /* 0x19e */, + hal_tqm_update_tx_msdu_flow = 415 /* 0x19f */, + hal_tqm_update_tx_mpdu_queue_head = 416 /* 0x1a0 */, + hal_tqm_update_tx_msdu_flow_status = 417 /* 0x1a1 */, + hal_tqm_update_tx_mpdu_queue_head_status = 418 /* 0x1a2 */, + hal_reo_update_rx_reo_queue = 419 /* 0x1a3 */, + hal_ce_dst_desc = 420 /* 0x1a4 */, + hal_tlv_base = 511 /* 0x1ff */, +}; + +#define hal_tlv_hdr_tag genmask(9, 1) +#define hal_tlv_hdr_len genmask(25, 10) + +#define hal_tlv_align 4 + +struct hal_tlv_hdr { + u32 tl; + u8 value[0]; +} __packed; + +#define rx_mpdu_desc_info0_msdu_count genmask(7, 0) +#define rx_mpdu_desc_info0_seq_num genmask(19, 8) +#define rx_mpdu_desc_info0_frag_flag bit(20) +#define rx_mpdu_desc_info0_mpdu_retry bit(21) +#define rx_mpdu_desc_info0_ampdu_flag bit(22) +#define rx_mpdu_desc_info0_bar_frame bit(23) +#define rx_mpdu_desc_info0_valid_pn bit(24) +#define rx_mpdu_desc_info0_valid_sa bit(25) +#define rx_mpdu_desc_info0_sa_idx_timeout bit(26) +#define rx_mpdu_desc_info0_valid_da bit(27) +#define rx_mpdu_desc_info0_da_mcbc bit(28) +#define rx_mpdu_desc_info0_da_idx_timeout bit(29) +#define rx_mpdu_desc_info0_raw_mpdu bit(30) + +struct rx_mpdu_desc { + u32 info0; /* %rx_mpdu_desc_info */ + u32 meta_data; +} __packed; + +/* rx_mpdu_desc + * producer: rxdma + * consumer: reo/sw/fw + * + * msdu_count + * the number of msdus within the mpdu + * + * mpdu_sequence_number + * the field can have two different meanings based on the setting + * of field 'bar_frame'. if 'bar_frame' is set, it means the mpdu + * start sequence number from the bar frame otherwise it means + * the mpdu sequence number of the received frame. + * + * fragment_flag + * when set, this mpdu is a fragment and reo should forward this + * fragment mpdu to the reo destination ring without any reorder + * checks, pn checks or bitmap update. this implies that reo is + * forwarding the pointer to the msdu link descriptor. + * + * mpdu_retry_bit + * the retry bit setting from the mpdu header of the received frame + * + * ampdu_flag + * indicates the mpdu was received as part of an a-mpdu. + * + * bar_frame + * indicates the received frame is a bar frame. after processing, + * this frame shall be pushed to sw or deleted. + * + * valid_pn + * when not set, reo will not perform a pn sequence number check. + * + * valid_sa + * indicates ole found a valid sa entry for all msdus in this mpdu. + * + * sa_idx_timeout + * indicates, at least 1 msdu within the mpdu has an unsuccessful + * mac source address search due to the expiration of search timer. + * + * valid_da + * when set, ole found a valid da entry for all msdus in this mpdu. + * + * da_mcbc + * field only valid if valid_da is set. indicates at least one of + * the da addresses is a multicast or broadcast address. + * + * da_idx_timeout + * indicates, at least 1 msdu within the mpdu has an unsuccessful + * mac destination address search due to the expiration of search + * timer. + * + * raw_mpdu + * field only valid when first_msdu_in_mpdu_flag is set. indicates + * the contents in the msdu buffer contains a 'raw' mpdu. + */ + +enum hal_rx_msdu_desc_reo_dest_ind { + hal_rx_msdu_desc_reo_dest_ind_tcl, + hal_rx_msdu_desc_reo_dest_ind_sw1, + hal_rx_msdu_desc_reo_dest_ind_sw2, + hal_rx_msdu_desc_reo_dest_ind_sw3, + hal_rx_msdu_desc_reo_dest_ind_sw4, + hal_rx_msdu_desc_reo_dest_ind_release, + hal_rx_msdu_desc_reo_dest_ind_fw, +}; + +#define rx_msdu_desc_info0_first_msdu_in_mpdu bit(0) +#define rx_msdu_desc_info0_last_msdu_in_mpdu bit(1) +#define rx_msdu_desc_info0_msdu_continuation bit(2) +#define rx_msdu_desc_info0_msdu_length genmask(16, 3) +#define rx_msdu_desc_info0_reo_dest_ind genmask(21, 17) +#define rx_msdu_desc_info0_msdu_drop bit(22) +#define rx_msdu_desc_info0_valid_sa bit(23) +#define rx_msdu_desc_info0_sa_idx_timeout bit(24) +#define rx_msdu_desc_info0_valid_da bit(25) +#define rx_msdu_desc_info0_da_mcbc bit(26) +#define rx_msdu_desc_info0_da_idx_timeout bit(27) + +#define hal_rx_msdu_pkt_length_get(val) \ + (field_get(rx_msdu_desc_info0_msdu_length, (val))) + +struct rx_msdu_desc { + u32 info0; + u32 rsvd0; +} __packed; + +/* rx_msdu_desc + * + * first_msdu_in_mpdu + * indicates first msdu in mpdu. + * + * last_msdu_in_mpdu + * indicates last msdu in mpdu. this flag can be true only when + * 'msdu_continuation' set to 0. this implies that when an msdu + * is spread out over multiple buffers and thus msdu_continuation + * is set, only for the very last buffer of the msdu, can the + * 'last_msdu_in_mpdu' be set. + * + * when both first_msdu_in_mpdu and last_msdu_in_mpdu are set, + * the mpdu that this msdu belongs to only contains a single msdu. + * + * msdu_continuation + * when set, this msdu buffer was not able to hold the entire msdu. + * the next buffer will therefor contain additional information + * related to this msdu. + * + * msdu_length + * field is only valid in combination with the 'first_msdu_in_mpdu' + * being set. full msdu length in bytes after decapsulation. this + * field is still valid for mpdu frames without a-msdu. it still + * represents msdu length after decapsulation or in case of raw + * mpdus, it indicates the length of the entire mpdu (without fcs + * field). + * + * reo_destination_indication + * the id of the reo exit ring where the msdu frame shall push + * after (mpdu level) reordering has finished. values are defined + * in enum %hal_rx_msdu_desc_reo_dest_ind_. + * + * msdu_drop + * indicates that reo shall drop this msdu and not forward it to + * any other ring. + * + * valid_sa + * indicates ole found a valid sa entry for this msdu. + * + * sa_idx_timeout + * indicates, an unsuccessful mac source address search due to + * the expiration of search timer for this msdu. + * + * valid_da + * when set, ole found a valid da entry for this msdu. + * + * da_mcbc + * field only valid if valid_da is set. indicates the da address + * is a multicast or broadcast address for this msdu. + * + * da_idx_timeout + * indicates, an unsuccessful mac destination address search due + * to the expiration of search timer fot this msdu. + */ + +enum hal_reo_dest_ring_buffer_type { + hal_reo_dest_ring_buffer_type_msdu, + hal_reo_dest_ring_buffer_type_link_desc, +}; + +enum hal_reo_dest_ring_push_reason { + hal_reo_dest_ring_push_reason_err_detected, + hal_reo_dest_ring_push_reason_routing_instruction, +}; + +enum hal_reo_dest_ring_error_code { + hal_reo_dest_ring_error_code_desc_addr_zero, + hal_reo_dest_ring_error_code_desc_invalid, + hal_reo_dest_ring_error_code_ampdu_in_non_ba, + hal_reo_dest_ring_error_code_non_ba_duplicate, + hal_reo_dest_ring_error_code_ba_duplicate, + hal_reo_dest_ring_error_code_frame_2k_jump, + hal_reo_dest_ring_error_code_bar_2k_jump, + hal_reo_dest_ring_error_code_frame_oor, + hal_reo_dest_ring_error_code_bar_oor, + hal_reo_dest_ring_error_code_no_ba_session, + hal_reo_dest_ring_error_code_frame_sn_equals_ssn, + hal_reo_dest_ring_error_code_pn_check_failed, + hal_reo_dest_ring_error_code_2k_err_flag_set, + hal_reo_dest_ring_error_code_pn_err_flag_set, + hal_reo_dest_ring_error_code_desc_blocked, + hal_reo_dest_ring_error_code_max, +}; + +#define hal_reo_dest_ring_info0_queue_addr_hi genmask(7, 0) +#define hal_reo_dest_ring_info0_buffer_type bit(8) +#define hal_reo_dest_ring_info0_push_reason genmask(10, 9) +#define hal_reo_dest_ring_info0_error_code genmask(15, 11) +#define hal_reo_dest_ring_info0_rx_queue_num genmask(31, 16) + +#define hal_reo_dest_ring_info1_reorder_info_valid bit(0) +#define hal_reo_dest_ring_info1_reorder_opcode genmask(4, 1) +#define hal_reo_dest_ring_info1_reorder_slot_idx genmask(12, 5) + +#define hal_reo_dest_ring_info2_ring_id genmask(27, 20) +#define hal_reo_dest_ring_info2_looping_count genmask(31, 28) + +struct hal_reo_dest_ring { + struct ath11k_buffer_addr buf_addr_info; + struct rx_mpdu_desc rx_mpdu_info; + struct rx_msdu_desc rx_msdu_info; + u32 queue_addr_lo; + u32 info0; /* %hal_reo_dest_ring_info0_ */ + u32 info1; /* %hal_reo_dest_ring_info1_ */ + u32 rsvd0; + u32 rsvd1; + u32 rsvd2; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; + u32 info2; /* %hal_reo_dest_ring_info2_ */ +} __packed; + +/* hal_reo_dest_ring + * + * producer: rxdma + * consumer: reo/sw/fw + * + * buf_addr_info + * details of the physical address of a buffer or msdu + * link descriptor. + * + * rx_mpdu_info + * general information related to the mpdu that is passed + * on from reo entrance ring to the reo destination ring. + * + * rx_msdu_info + * general information related to the msdu that is passed + * on from rxdma all the way to to the reo destination ring. + * + * queue_addr_lo + * address (lower 32 bits) of the reo queue descriptor. + * + * queue_addr_hi + * address (upper 8 bits) of the reo queue descriptor. + * + * buffer_type + * indicates the type of address provided in the buf_addr_info. + * values are defined in enum %hal_reo_dest_ring_buffer_type_. + * + * push_reason + * reason for pushing this frame to this exit ring. values are + * defined in enum %hal_reo_dest_ring_push_reason_. + * + * error_code + * valid only when 'push_reason' is set. all error codes are + * defined in enum %hal_reo_dest_ring_error_code_. + * + * rx_queue_num + * indicates the reo mpdu reorder queue id from which this frame + * originated. + * + * reorder_info_valid + * when set, reo has been instructed to not perform the actual + * re-ordering of frames for this queue, but just to insert + * the reorder opcodes. + * + * reorder_opcode + * field is valid when 'reorder_info_valid' is set. this field is + * always valid for debug purpose as well. + * + * reorder_slot_idx + * valid only when 'reorder_info_valid' is set. + * + * ring_id + * the buffer pointer ring id. + * 0 - idle ring + * 1 - n refers to other rings. + * + * looping_count + * indicates the number of times the producer of entries into + * this ring has looped around the ring. + */ + +enum hal_reo_entr_rxdma_ecode { + hal_reo_entr_ring_rxdma_ecode_overflow_err, + hal_reo_entr_ring_rxdma_ecode_mpdu_len_err, + hal_reo_entr_ring_rxdma_ecode_fcs_err, + hal_reo_entr_ring_rxdma_ecode_decrypt_err, + hal_reo_entr_ring_rxdma_ecode_tkip_mic_err, + hal_reo_entr_ring_rxdma_ecode_unecrypted_err, + hal_reo_entr_ring_rxdma_ecode_msdu_len_err, + hal_reo_entr_ring_rxdma_ecode_msdu_limit_err, + hal_reo_entr_ring_rxdma_ecode_wifi_parse_err, + hal_reo_entr_ring_rxdma_ecode_amsdu_parse_err, + hal_reo_entr_ring_rxdma_ecode_sa_timeout_err, + hal_reo_entr_ring_rxdma_ecode_da_timeout_err, + hal_reo_entr_ring_rxdma_ecode_flow_timeout_err, + hal_reo_entr_ring_rxdma_ecode_flush_request_err, + hal_reo_entr_ring_rxdma_ecode_max, +}; + +#define hal_reo_entr_ring_info0_queue_addr_hi genmask(7, 0) +#define hal_reo_entr_ring_info0_mpdu_byte_count genmask(21, 8) +#define hal_reo_entr_ring_info0_dest_ind genmask(26, 22) +#define hal_reo_entr_ring_info0_frameless_bar bit(27) + +#define hal_reo_entr_ring_info1_rxdma_push_reason genmask(1, 0) +#define hal_reo_entr_ring_info1_rxdma_error_code genmask(6, 2) + +struct hal_reo_entrance_ring { + struct ath11k_buffer_addr buf_addr_info; + struct rx_mpdu_desc rx_mpdu_info; + u32 queue_addr_lo; + u32 info0; /* %hal_reo_entr_ring_info0_ */ + u32 info1; /* %hal_reo_entr_ring_info1_ */ + u32 info2; /* %hal_reo_dest_ring_info2_ */ + +} __packed; + +/* hal_reo_entrance_ring + * + * producer: rxdma + * consumer: reo + * + * buf_addr_info + * details of the physical address of a buffer or msdu + * link descriptor. + * + * rx_mpdu_info + * general information related to the mpdu that is passed + * on from reo entrance ring to the reo destination ring. + * + * queue_addr_lo + * address (lower 32 bits) of the reo queue descriptor. + * + * queue_addr_hi + * address (upper 8 bits) of the reo queue descriptor. + * + * mpdu_byte_count + * an approximation of the number of bytes received in this mpdu. + * used to keeps stats on the amount of data flowing + * through a queue. + * + * reo_destination_indication + * the id of the reo exit ring where the msdu frame shall push + * after (mpdu level) reordering has finished. values are defined + * in enum %hal_rx_msdu_desc_reo_dest_ind_. + * + * frameless_bar + * indicates that this reo entrance ring struct contains bar info + * from a multi tid bar frame. the original multi tid bar frame + * itself contained all the reo info for the first tid, but all + * the subsequent tid info and their linkage to the reo descriptors + * is passed down as 'frameless' bar info. + * + * the only fields valid in this descriptor when this bit is set + * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number, + * bar_frame and peer_meta_data. + * + * rxdma_push_reason + * reason for pushing this frame to this exit ring. values are + * defined in enum %hal_reo_dest_ring_push_reason_. + * + * rxdma_error_code + * valid only when 'push_reason' is set. all error codes are + * defined in enum %hal_reo_entr_ring_rxdma_ecode_. + * + * ring_id + * the buffer pointer ring id. + * 0 - idle ring + * 1 - n refers to other rings. + * + * looping_count + * indicates the number of times the producer of entries into + * this ring has looped around the ring. + */ + +#define hal_reo_cmd_hdr_info0_cmd_number genmask(15, 0) +#define hal_reo_cmd_hdr_info0_status_required bit(16) + +struct hal_reo_cmd_hdr { + u32 info0; +} __packed; + +#define hal_reo_get_queue_stats_info0_queue_addr_hi genmask(7, 0) +#define hal_reo_get_queue_stats_info0_clear_stats bit(8) + +struct hal_reo_get_queue_stats { + struct hal_reo_cmd_hdr cmd; + u32 queue_addr_lo; + u32 info0; + u32 rsvd0[6]; +} __packed; + +/* hal_reo_get_queue_stats + * producer: sw + * consumer: reo + * + * cmd + * details for command execution tracking purposes. + * + * queue_addr_lo + * address (lower 32 bits) of the reo queue descriptor. + * + * queue_addr_hi + * address (upper 8 bits) of the reo queue descriptor. + * + * clear_stats + * clear stats settings. when set, clear the stats after + * generating the status. + * + * following stats will be cleared. + * timeout_count + * forward_due_to_bar_count + * duplicate_count + * frames_in_order_count + * bar_received_count + * mpdu_frames_processed_count + * msdu_frames_processed_count + * total_processed_byte_count + * late_receive_mpdu_count + * window_jump_2k + * hole_count + */ + +#define hal_reo_flush_queue_info0_desc_addr_hi genmask(7, 0) +#define hal_reo_flush_queue_info0_block_desc_addr bit(8) +#define hal_reo_flush_queue_info0_block_resrc_idx genmask(10, 9) + +struct hal_reo_flush_queue { + struct hal_reo_cmd_hdr cmd; + u32 desc_addr_lo; + u32 info0; + u32 rsvd0[6]; +} __packed; + +#define hal_reo_flush_cache_info0_cache_addr_hi genmask(7, 0) +#define hal_reo_flush_cache_info0_fwd_all_mpdus bit(8) +#define hal_reo_flush_cache_info0_release_block_idx bit(9) +#define hal_reo_flush_cache_info0_block_resrc_idx genmask(11, 10) +#define hal_reo_flush_cache_info0_flush_wo_invalidate bit(12) +#define hal_reo_flush_cache_info0_block_cache_usage bit(13) +#define hal_reo_flush_cache_info0_flush_all bit(14) + +struct hal_reo_flush_cache { + struct hal_reo_cmd_hdr cmd; + u32 cache_addr_lo; + u32 info0; + u32 rsvd0[6]; +} __packed; + +#define hal_tcl_data_cmd_info0_desc_type bit(0) +#define hal_tcl_data_cmd_info0_epd bit(1) +#define hal_tcl_data_cmd_info0_encap_type genmask(3, 2) +#define hal_tcl_data_cmd_info0_encrypt_type genmask(7, 4) +#define hal_tcl_data_cmd_info0_src_buf_swap bit(8) +#define hal_tcl_data_cmd_info0_lnk_meta_swap bit(9) +#define hal_tcl_data_cmd_info0_search_type bit(12) +#define hal_tcl_data_cmd_info0_addrx_en bit(14) +#define hal_tcl_data_cmd_info0_addry_en bit(15) +#define hal_tcl_data_cmd_info0_cmd_num genmask(31, 16) + +#define hal_tcl_data_cmd_info1_data_len genmask(15, 0) +#define hal_tcl_data_cmd_info1_ip4_cksum_en bit(16) +#define hal_tcl_data_cmd_info1_udp4_cksum_en bit(17) +#define hal_tcl_data_cmd_info1_udp6_cksum_en bit(18) +#define hal_tcl_data_cmd_info1_tcp4_cksum_en bit(19) +#define hal_tcl_data_cmd_info1_tcp6_cksum_en bit(20) +#define hal_tcl_data_cmd_info1_to_fw bit(21) +#define hal_tcl_data_cmd_info1_pkt_offset genmask(31, 23) + +#define hal_tcl_data_cmd_info2_buf_timestamp genmask(18, 0) +#define hal_tcl_data_cmd_info2_buf_t_valid bit(19) +#define hal_tcl_data_cmd_info2_mesh_enable bit(20) +#define hal_tcl_data_cmd_info2_tid_overwrite bit(21) +#define hal_tcl_data_cmd_info2_tid genmask(25, 22) +#define hal_tcl_data_cmd_info2_lmac_id genmask(27, 26) + +#define hal_tcl_data_cmd_info3_dscp_tid_table_idx genmask(5, 0) +#define hal_tcl_data_cmd_info3_search_index genmask(25, 6) +#define hal_tcl_data_cmd_info3_cache_set_num genmask(29, 26) + +#define hal_tcl_data_cmd_info4_ring_id genmask(27, 20) +#define hal_tcl_data_cmd_info4_looping_count genmask(31, 28) + +enum hal_encrypt_type { + hal_encrypt_type_wep_40, + hal_encrypt_type_wep_104, + hal_encrypt_type_tkip_no_mic, + hal_encrypt_type_wep_128, + hal_encrypt_type_tkip_mic, + hal_encrypt_type_wapi, + hal_encrypt_type_ccmp_128, + hal_encrypt_type_open, + hal_encrypt_type_ccmp_256, + hal_encrypt_type_gcmp_128, + hal_encrypt_type_aes_gcmp_256, + hal_encrypt_type_wapi_gcm_sm4, +}; + +enum hal_tcl_encap_type { + hal_tcl_encap_type_raw, + hal_tcl_encap_type_native_wifi, + hal_tcl_encap_type_ethernet, + hal_tcl_encap_type_802_3 = 3, +}; + +enum hal_tcl_desc_type { + hal_tcl_desc_type_buffer, + hal_tcl_desc_type_ext_desc, +}; + +enum hal_wbm_htt_tx_comp_status { + hal_wbm_rel_htt_tx_comp_status_ok, + hal_wbm_rel_htt_tx_comp_status_drop, + hal_wbm_rel_htt_tx_comp_status_ttl, + hal_wbm_rel_htt_tx_comp_status_reinj, + hal_wbm_rel_htt_tx_comp_status_inspect, + hal_wbm_rel_htt_tx_comp_status_mec_notify, +}; + +struct hal_tcl_data_cmd { + struct ath11k_buffer_addr buf_addr_info; + u32 info0; + u32 info1; + u32 info2; + u32 info3; + u32 info4; +} __packed; + +/* hal_tcl_data_cmd + * + * buf_addr_info + * details of the physical address of a buffer or msdu + * link descriptor. + * + * desc_type + * indicates the type of address provided in the buf_addr_info. + * values are defined in enum %hal_reo_dest_ring_buffer_type_. + * + * epd + * when this bit is set then input packet is an epd type. + * + * encap_type + * indicates the encapsulation that hw will perform. values are + * defined in enum %hal_tcl_encap_type_. + * + * encrypt_type + * field only valid for encap_type: raw + * values are defined in enum %hal_encrypt_type_. + * + * src_buffer_swap + * treats source memory (packet buffer) organization as big-endian. + * 1'b0: source memory is little endian + * 1'b1: source memory is big endian + * + * link_meta_swap + * treats link descriptor and metadata as big-endian. + * 1'b0: memory is little endian + * 1'b1: memory is big endian + * + * search_type + * search type select + * 0 - normal search, 1 - index based address search, + * 2 - index based flow search + * + * addrx_en + * addry_en + * address x/y search enable in ase correspondingly. + * 1'b0: search disable + * 1'b1: search enable + * + * cmd_num + * this number can be used to match against status. + * + * data_length + * msdu length in case of direct descriptor. length of link + * extension descriptor in case of link extension descriptor. + * + * *_checksum_en + * enable checksum replacement for ipv4, udp_over_ipv4, ipv6, + * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6. + * + * to_fw + * forward packet to fw along with classification result. the + * packet will not be forward to tqm when this bit is set. + * 1'b0: use classification result to forward the packet. + * 1'b1: override classification result & forward packet only to fw + * + * packet_offset + * packet offset from metadata in case of direct buffer descriptor. + * + * buffer_timestamp + * buffer_timestamp_valid + * frame system entrance timestamp. it shall be filled by first + * module (sw, tcl or tqm) that sees the frames first. + * + * mesh_enable + * for raw wifi frames, this indicates transmission to a mesh sta, + * enabling the interpretation of the 'mesh control present' bit + * (bit 8) of qos control. + * for native wifi frames, this indicates that a 'mesh control' + * field is present between the header and the llc. + * + * hlos_tid_overwrite + * + * when set, tcl shall ignore the ip dscp and vlan pcp + * fields and use hlos_tid as the final tid. otherwise tcl + * shall consider the dscp and pcp fields as well as hlos_tid + * and choose a final tid based on the configured priority + * + * hlos_tid + * hlos msdu priority + * field is used when hlos_tid_overwrite is set. + * + * lmac_id + * tcl uses this lmac_id in address search, i.e, while + * finding matching entry for the packet in ast corresponding + * to given lmac_id + * + * if lmac id is all 1s (=> value 3), it indicates wildcard + * match for any mac + * + * dscp_tid_table_num + * dscp to tid mapping table number that need to be used + * for the msdu. + * + * search_index + * the index that will be used for index based address or + * flow search. the field is valid when 'search_type' is 1 or 2. + * + * cache_set_num + * + * cache set number that should be used to cache the index + * based search results, for address and flow search. this + * value should be equal to lsb four bits of the hash value of + * match data, in case of search index points to an entry which + * may be used in content based search also. the value can be + * anything when the entry pointed by search index will not be + * used for content based search. + * + * ring_id + * the buffer pointer ring id. + * 0 refers to the idle ring + * 1 - n refers to other rings + * + * looping_count + * + * a count value that indicates the number of times the + * producer of entries into the ring has looped around the + * ring. + * + * at initialization time, this value is set to 0. on the + * first loop, this value is set to 1. after the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * in case sw is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. this eliminates the need to + * check where the head pointer' of the ring is located once + * the sw starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * also note that sw if it wants only needs to look at the + * lsb bit of this count value. + */ + +#define hal_tcl_desc_len sizeof(struct hal_tcl_data_cmd) + +enum hal_tcl_gse_ctrl { + hal_tcl_gse_ctrl_rd_stat, + hal_tcl_gse_ctrl_srch_dis, + hal_tcl_gse_ctrl_wr_bk_single, + hal_tcl_gse_ctrl_wr_bk_all, + hal_tcl_gse_ctrl_inval_single, + hal_tcl_gse_ctrl_inval_all, + hal_tcl_gse_ctrl_wr_bk_inval_single, + hal_tcl_gse_ctrl_wr_bk_inval_all, + hal_tcl_gse_ctrl_clr_stat_single, +}; + +/* hal_tcl_gse_ctrl + * + * rd_stat + * report or read statistics + * srch_dis + * search disable. report only hash. + * wr_bk_single + * write back single entry + * wr_bk_all + * write back entire cache entry + * inval_single + * invalidate single cache entry + * inval_all + * invalidate entire cache + * wr_bk_inval_single + * write back and invalidate single entry in cache + * wr_bk_inval_all + * write back and invalidate entire cache + * clr_stat_single + * clear statistics for single entry + */ + +#define hal_tcl_gse_cmd_info0_ctrl_buf_addr_hi genmask(7, 0) +#define hal_tcl_gse_cmd_info0_gse_ctrl genmask(11, 8) +#define hal_tcl_gse_cmd_info0_gse_sel bit(12) +#define hal_tcl_gse_cmd_info0_status_dest_ring_id bit(13) +#define hal_tcl_gse_cmd_info0_swap bit(14) + +#define hal_tcl_gse_cmd_info1_ring_id genmask(27, 20) +#define hal_tcl_gse_cmd_info1_looping_count genmask(31, 28) + +struct hal_tcl_gse_cmd { + u32 ctrl_buf_addr_lo; + u32 info0; + u32 meta_data[2]; + u32 rsvd0[2]; + u32 info1; +} __packed; + +/* hal_tcl_gse_cmd + * + * ctrl_buf_addr_lo, ctrl_buf_addr_hi + * address of a control buffer containing additional info needed + * for this command execution. + * + * gse_ctrl + * gse control operations. this includes cache operations and table + * entry statistics read/clear operation. values are defined in + * enum %hal_tcl_gse_ctrl. + * + * gse_sel + * to select the ase/fse to do the operation mention by gse_ctrl. + * 0: fse select 1: ase select + * + * status_destination_ring_id + * tcl status ring to which the gse status needs to be send. + * + * swap + * bit to enable byte swapping of contents of buffer. + * + * meta_data + * meta data to be returned in the status descriptor + */ + +enum hal_tcl_cache_op_res { + hal_tcl_cache_op_res_done, + hal_tcl_cache_op_res_not_found, + hal_tcl_cache_op_res_timeout, +}; + +#define hal_tcl_status_ring_info0_gse_ctrl genmask(3, 0) +#define hal_tcl_status_ring_info0_gse_sel bit(4) +#define hal_tcl_status_ring_info0_cache_op_res genmask(6, 5) +#define hal_tcl_status_ring_info0_msdu_cnt genmask(31, 8) + +#define hal_tcl_status_ring_info1_hash_idx genmask(19, 0) + +#define hal_tcl_status_ring_info2_ring_id genmask(27, 20) +#define hal_tcl_status_ring_info2_looping_count genmask(31, 28) + +struct hal_tcl_status_ring { + u32 info0; + u32 msdu_byte_count; + u32 msdu_timestamp; + u32 meta_data[2]; + u32 info1; + u32 rsvd0; + u32 info2; +} __packed; + +/* hal_tcl_status_ring + * + * gse_ctrl + * gse control operations. this includes cache operations and table + * entry statistics read/clear operation. values are defined in + * enum %hal_tcl_gse_ctrl. + * + * gse_sel + * to select the ase/fse to do the operation mention by gse_ctrl. + * 0: fse select 1: ase select + * + * cache_op_res + * cache operation result. values are defined in enum + * %hal_tcl_cache_op_res_. + * + * msdu_cnt + * msdu_byte_count + * msdu count of entry and msdu byte count for entry 1. + * + * hash_indx + * hash value of the entry in case of search failed or disabled. + */ + +#define hal_ce_src_desc_addr_info_addr_hi genmask(7, 0) +#define hal_ce_src_desc_addr_info_hash_en bit(8) +#define hal_ce_src_desc_addr_info_byte_swap bit(9) +#define hal_ce_src_desc_addr_info_dest_swap bit(10) +#define hal_ce_src_desc_addr_info_gather bit(11) +#define hal_ce_src_desc_addr_info_len genmask(31, 16) + +#define hal_ce_src_desc_meta_info_data genmask(15, 0) + +#define hal_ce_src_desc_flags_ring_id genmask(27, 20) +#define hal_ce_src_desc_flags_loop_cnt hal_srng_desc_loop_cnt + +struct hal_ce_srng_src_desc { + u32 buffer_addr_low; + u32 buffer_addr_info; /* %hal_ce_src_desc_addr_info_ */ + u32 meta_info; /* %hal_ce_src_desc_meta_info_ */ + u32 flags; /* %hal_ce_src_desc_flags_ */ +} __packed; + +/* + * hal_ce_srng_src_desc + * + * buffer_addr_lo + * lsb 32 bits of the 40 bit pointer to the source buffer + * + * buffer_addr_hi + * msb 8 bits of the 40 bit pointer to the source buffer + * + * toeplitz_en + * enable generation of 32-bit toeplitz-lfsr hash for + * data transfer. in case of gather field in first source + * ring entry of the gather copy cycle in taken into account. + * + * src_swap + * treats source memory organization as big-endian. for + * each dword read (4 bytes), the byte 0 is swapped with byte 3 + * and byte 1 is swapped with byte 2. + * in case of gather field in first source ring entry of + * the gather copy cycle in taken into account. + * + * dest_swap + * treats destination memory organization as big-endian. + * for each dword write (4 bytes), the byte 0 is swapped with + * byte 3 and byte 1 is swapped with byte 2. + * in case of gather field in first source ring entry of + * the gather copy cycle in taken into account. + * + * gather + * enables gather of multiple copy engine source + * descriptors to one destination. + * + * ce_res_0 + * reserved + * + * + * length + * length of the buffer in units of octets of the current + * descriptor + * + * fw_metadata + * meta data used by fw. + * in case of gather field in first source ring entry of + * the gather copy cycle in taken into account. + * + * ce_res_1 + * reserved + * + * ce_res_2 + * reserved + * + * ring_id + * the buffer pointer ring id. + * 0 refers to the idle ring + * 1 - n refers to other rings + * helps with debugging when dumping ring contents. + * + * looping_count + * a count value that indicates the number of times the + * producer of entries into the ring has looped around the + * ring. + * + * at initialization time, this value is set to 0. on the + * first loop, this value is set to 1. after the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * in case sw is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. this eliminates the need to + * check where the head pointer' of the ring is located once + * the sw starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * also note that sw if it wants only needs to look at the + * lsb bit of this count value. + */ + +#define hal_ce_dest_desc_addr_info_addr_hi genmask(7, 0) +#define hal_ce_dest_desc_addr_info_ring_id genmask(27, 20) +#define hal_ce_dest_desc_addr_info_loop_cnt hal_srng_desc_loop_cnt + +struct hal_ce_srng_dest_desc { + u32 buffer_addr_low; + u32 buffer_addr_info; /* %hal_ce_dest_desc_addr_info_ */ +} __packed; + +/* hal_ce_srng_dest_desc + * + * dst_buffer_low + * lsb 32 bits of the 40 bit pointer to the destination + * buffer + * + * dst_buffer_high + * msb 8 bits of the 40 bit pointer to the destination + * buffer + * + * ce_res_4 + * reserved + * + * ring_id + * the buffer pointer ring id. + * 0 refers to the idle ring + * 1 - n refers to other rings + * helps with debugging when dumping ring contents. + * + * looping_count + * a count value that indicates the number of times the + * producer of entries into the ring has looped around the + * ring. + * + * at initialization time, this value is set to 0. on the + * first loop, this value is set to 1. after the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * in case sw is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. this eliminates the need to + * check where the head pointer' of the ring is located once + * the sw starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * also note that sw if it wants only needs to look at the + * lsb bit of this count value. + */ + +#define hal_ce_dst_status_desc_flags_hash_en bit(8) +#define hal_ce_dst_status_desc_flags_byte_swap bit(9) +#define hal_ce_dst_status_desc_flags_dest_swap bit(10) +#define hal_ce_dst_status_desc_flags_gather bit(11) +#define hal_ce_dst_status_desc_flags_len genmask(31, 16) + +#define hal_ce_dst_status_desc_meta_info_data genmask(7, 0) +#define hal_ce_dst_status_desc_meta_info_ring_id genmask(27, 20) +#define hal_ce_dst_status_desc_meta_info_loop_cnt hal_srng_desc_loop_cnt + +struct hal_ce_srng_dst_status_desc { + u32 flags; /* %hal_ce_dst_status_desc_flags_ */ + u32 toeplitz_hash0; + u32 toeplitz_hash1; + u32 meta_info; /* hal_ce_dst_status_desc_meta_info_ */ +} __packed; + +/* hal_ce_srng_dst_status_desc + * + * ce_res_5 + * reserved + * + * toeplitz_en + * + * src_swap + * source memory buffer swapped + * + * dest_swap + * destination memory buffer swapped + * + * gather + * gather of multiple copy engine source descriptors to one + * destination enabled + * + * ce_res_6 + * reserved + * + * length + * sum of all the lengths of the source descriptor in the + * gather chain + * + * toeplitz_hash_0 + * 32 ls bits of 64 bit toeplitz lfsr hash result + * + * toeplitz_hash_1 + * 32 ms bits of 64 bit toeplitz lfsr hash result + * + * fw_metadata + * meta data used by fw + * in case of gather field in first source ring entry of + * the gather copy cycle in taken into account. + * + * ce_res_7 + * reserved + * + * ring_id + * the buffer pointer ring id. + * 0 refers to the idle ring + * 1 - n refers to other rings + * helps with debugging when dumping ring contents. + * + * looping_count + * a count value that indicates the number of times the + * producer of entries into the ring has looped around the + * ring. + * + * at initialization time, this value is set to 0. on the + * first loop, this value is set to 1. after the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * in case sw is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. this eliminates the need to + * check where the head pointer' of the ring is located once + * the sw starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * also note that sw if it wants only needs to look at the + * lsb bit of this count value. + */ + +#define hal_tx_rate_stats_info0_valid bit(0) +#define hal_tx_rate_stats_info0_bw genmask(2, 1) +#define hal_tx_rate_stats_info0_pkt_type genmask(6, 3) +#define hal_tx_rate_stats_info0_stbc bit(7) +#define hal_tx_rate_stats_info0_ldpc bit(8) +#define hal_tx_rate_stats_info0_sgi genmask(10, 9) +#define hal_tx_rate_stats_info0_mcs genmask(14, 11) +#define hal_tx_rate_stats_info0_ofdma_tx bit(15) +#define hal_tx_rate_stats_info0_tones_in_ru genmask(27, 16) + +enum hal_tx_rate_stats_bw { + hal_tx_rate_stats_bw_20, + hal_tx_rate_stats_bw_40, + hal_tx_rate_stats_bw_80, + hal_tx_rate_stats_bw_160, +}; + +enum hal_tx_rate_stats_pkt_type { + hal_tx_rate_stats_pkt_type_11a, + hal_tx_rate_stats_pkt_type_11b, + hal_tx_rate_stats_pkt_type_11n, + hal_tx_rate_stats_pkt_type_11ac, + hal_tx_rate_stats_pkt_type_11ax, +}; + +enum hal_tx_rate_stats_sgi { + hal_tx_rate_stats_sgi_08us, + hal_tx_rate_stats_sgi_04us, + hal_tx_rate_stats_sgi_16us, + hal_tx_rate_stats_sgi_32us, +}; + +struct hal_tx_rate_stats { + u32 info0; + u32 tsf; +} __packed; + +struct hal_wbm_link_desc { + struct ath11k_buffer_addr buf_addr_info; +} __packed; + +/* hal_wbm_link_desc + * + * producer: wbm + * consumer: wbm + * + * buf_addr_info + * details of the physical address of a buffer or msdu + * link descriptor. + */ + +enum hal_wbm_rel_src_module { + hal_wbm_rel_src_module_tqm, + hal_wbm_rel_src_module_rxdma, + hal_wbm_rel_src_module_reo, + hal_wbm_rel_src_module_fw, + hal_wbm_rel_src_module_sw, +}; + +enum hal_wbm_rel_desc_type { + hal_wbm_rel_desc_type_rel_msdu, + hal_wbm_rel_desc_type_msdu_link, + hal_wbm_rel_desc_type_mpdu_link, + hal_wbm_rel_desc_type_msdu_ext, + hal_wbm_rel_desc_type_queue_ext, +}; + +/* hal_wbm_rel_desc_type + * + * msdu_buffer + * the address points to an msdu buffer + * + * msdu_link_descriptor + * the address points to an tx msdu link descriptor + * + * mpdu_link_descriptor + * the address points to an mpdu link descriptor + * + * msdu_ext_descriptor + * the address points to an msdu extension descriptor + * + * queue_ext_descriptor + * the address points to an tqm queue extension descriptor. wbm should + * treat this is the same way as a link descriptor. + */ + +enum hal_wbm_rel_bm_act { + hal_wbm_rel_bm_act_put_in_idle, + hal_wbm_rel_bm_act_rel_msdu, +}; + +/* hal_wbm_rel_bm_act + * + * put_in_idle_list + * put the buffer or descriptor back in the idle list. in case of msdu or + * mdpu link descriptor, bm does not need to check to release any + * individual msdu buffers. + * + * release_msdu_list + * this bm action can only be used in combination with desc_type being + * msdu_link_descriptor. field first_msdu_index points out which msdu + * pointer in the msdu link descriptor is the first of an mpdu that is + * released. bm shall release all the msdu buffers linked to this first + * msdu buffer pointer. all related msdu buffer pointer entries shall be + * set to value 0, which represents the 'null' pointer. when all msdu + * buffer pointers in the msdu link descriptor are 'null', the msdu link + * descriptor itself shall also be released. + */ + +#define hal_wbm_release_info0_rel_src_module genmask(2, 0) +#define hal_wbm_release_info0_bm_action genmask(5, 3) +#define hal_wbm_release_info0_desc_type genmask(8, 6) +#define hal_wbm_release_info0_first_msdu_idx genmask(12, 9) +#define hal_wbm_release_info0_tqm_release_reason genmask(16, 13) +#define hal_wbm_release_info0_rxdma_push_reason genmask(18, 17) +#define hal_wbm_release_info0_rxdma_error_code genmask(23, 19) +#define hal_wbm_release_info0_reo_push_reason genmask(25, 24) +#define hal_wbm_release_info0_reo_error_code genmask(30, 26) +#define hal_wbm_release_info0_wbm_internal_error bit(31) + +#define hal_wbm_release_info1_tqm_status_number genmask(23, 0) +#define hal_wbm_release_info1_transmit_count genmask(30, 24) + +#define hal_wbm_release_info2_ack_frame_rssi genmask(7, 0) +#define hal_wbm_release_info2_sw_rel_details_valid bit(8) +#define hal_wbm_release_info2_first_msdu bit(9) +#define hal_wbm_release_info2_last_msdu bit(10) +#define hal_wbm_release_info2_msdu_in_amsdu bit(11) +#define hal_wbm_release_info2_fw_tx_notif_frame bit(12) +#define hal_wbm_release_info2_buffer_timestamp genmask(31, 13) + +#define hal_wbm_release_info3_peer_id genmask(15, 0) +#define hal_wbm_release_info3_tid genmask(19, 16) +#define hal_wbm_release_info3_ring_id genmask(27, 20) +#define hal_wbm_release_info3_looping_count genmask(31, 28) + +#define hal_wbm_rel_htt_tx_comp_info0_status genmask(12, 9) +#define hal_wbm_rel_htt_tx_comp_info0_reinj_reason genmask(16, 13) +#define hal_wbm_rel_htt_tx_comp_info0_exp_frame bit(17) + +struct hal_wbm_release_ring { + struct ath11k_buffer_addr buf_addr_info; + u32 info0; + u32 info1; + u32 info2; + struct hal_tx_rate_stats rate_stats; + u32 info3; +} __packed; + +/* hal_wbm_release_ring + * + * producer: sw/tqm/rxdma/reo/switch + * consumer: wbm/sw/fw + * + * htt tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5 + * for software based completions. + * + * buf_addr_info + * details of the physical address of the buffer or link descriptor. + * + * release_source_module + * indicates which module initiated the release of this buffer/descriptor. + * values are defined in enum %hal_wbm_rel_src_module_. + * + * bm_action + * field only valid when the field return_buffer_manager in + * released_buff_or_desc_addr_info indicates: + * wbm_idle_buf_list / wbm_idle_desc_list + * values are defined in enum %hal_wbm_rel_bm_act_. + * + * buffer_or_desc_type + * field only valid when wbm is marked as the return_buffer_manager in + * the released_buffer_address_info. indicates that type of buffer or + * descriptor is being released. values are in enum %hal_wbm_rel_desc_type. + * + * first_msdu_index + * field only valid for the bm_action release_msdu_list. the index of the + * first msdu in an msdu link descriptor all belonging to the same mpdu. + * + * tqm_release_reason + * field only valid when release_source_module is set to release_source_tqm + * release reasons are defined in enum %hal_wbm_tqm_rel_reason_. + * + * rxdma_push_reason + * reo_push_reason + * indicates why rxdma/reo pushed the frame to this ring and values are + * defined in enum %hal_reo_dest_ring_push_reason_. + * + * rxdma_error_code + * field only valid when 'rxdma_push_reason' set to 'error_detected'. + * values are defined in enum %hal_reo_entr_ring_rxdma_ecode_. + * + * reo_error_code + * field only valid when 'reo_push_reason' set to 'error_detected'. values + * are defined in enum %hal_reo_dest_ring_error_code_. + * + * wbm_internal_error + * is set when wbm got a buffer pointer but the action was to push it to + * the idle link descriptor ring or do link related activity or + * is set when wbm got a link buffer pointer but the action was to push it + * to the buffer descriptor ring. + * + * tqm_status_number + * the value in this field is equal to tqm_cmd_number in tqm command. it is + * used to correlate the statu with tqm commands. only valid when + * release_source_module is tqm. + * + * transmit_count + * the number of times the frame has been transmitted, valid only when + * release source in tqm. + * + * ack_frame_rssi + * this field is only valid when the source is tqm. if this frame is + * removed as the result of the reception of an ack or ba, this field + * indicates the rssi of the received ack or ba frame. + * + * sw_release_details_valid + * this is set when wmb got a 'release_msdu_list' command from tqm and + * return buffer manager is not wmb. wbm will then de-aggregate all msdus + * and pass them one at a time on to the 'buffer owner'. + * + * first_msdu + * field only valid when sw_release_details_valid is set. + * when set, this msdu is the first msdu pointed to in the + * 'release_msdu_list' command. + * + * last_msdu + * field only valid when sw_release_details_valid is set. + * when set, this msdu is the last msdu pointed to in the + * 'release_msdu_list' command. + * + * msdu_part_of_amsdu + * field only valid when sw_release_details_valid is set. + * when set, this msdu was part of an a-msdu in mpdu + * + * fw_tx_notify_frame + * field only valid when sw_release_details_valid is set. + * + * buffer_timestamp + * field only valid when sw_release_details_valid is set. + * this is the buffer_timestamp field from the + * timestamp in units of 1024 us + * + * struct hal_tx_rate_stats rate_stats + * details for command execution tracking purposes. + * + * sw_peer_id + * tid + * field only valid when release_source_module is set to + * release_source_tqm + * + * 1) release of msdu buffer due to drop_frame = 1. flow is + * not fetched and hence sw_peer_id and tid = 0 + * + * buffer_or_desc_type = e_num 0 + * msdu_rel_buffertqm_release_reason = e_num 1 + * tqm_rr_rem_cmd_rem + * + * 2) release of msdu buffer due to flow is not fetched and + * hence sw_peer_id and tid = 0 + * + * buffer_or_desc_type = e_num 0 + * msdu_rel_buffertqm_release_reason = e_num 1 + * tqm_rr_rem_cmd_rem + * + * 3) release of msdu link due to remove_mpdu or acked_mpdu + * command. + * + * buffer_or_desc_type = e_num1 + * msdu_link_descriptortqm_release_reason can be:e_num 1 + * tqm_rr_rem_cmd_reme_num 2 tqm_rr_rem_cmd_tx + * e_num 3 tqm_rr_rem_cmd_notxe_num 4 tqm_rr_rem_cmd_aged + * + * this field represents the tid from the tx_msdu_flow + * descriptor or tx_mpdu_queue descriptor + * + * rind_id + * for debugging. + * this field is filled in by the srng module. + * it help to identify the ring that is being looked + * + * looping_count + * a count value that indicates the number of times the + * producer of entries into the buffer manager ring has looped + * around the ring. + * + * at initialization time, this value is set to 0. on the + * first loop, this value is set to 1. after the max value is + * reached allowed by the number of bits for this field, the + * count value continues with 0 again. + * + * in case sw is the consumer of the ring entries, it can + * use this field to figure out up to where the producer of + * entries has created new entries. this eliminates the need to + * check where the head pointer' of the ring is located once + * the sw starts processing an interrupt indicating that new + * entries have been put into this ring... + * + * also note that sw if it wants only needs to look at the + * lsb bit of this count value. + */ + +/** + * enum hal_wbm_tqm_rel_reason - tqm release reason code + * @hal_wbm_tqm_rel_reason_frame_acked: ack or back received for the frame + * @hal_wbm_tqm_rel_reason_cmd_remove_mpdu: command remove_mpdus initiated by sw + * @hal_wbm_tqm_rel_reason_cmd_remove_tx: command remove transmitted_mpdus + * initiated by sw. + * @hal_wbm_tqm_rel_reason_cmd_remove_notx: command remove untransmitted_mpdus + * initiated by sw. + * @hal_wbm_tqm_rel_reason_cmd_remove_aged_frames: command remove aged msdus or + * mpdus. + * @hal_wbm_tqm_rel_reason_cmd_remove_reseaon1: remove command initiated by + * fw with fw_reason1. + * @hal_wbm_tqm_rel_reason_cmd_remove_reseaon2: remove command initiated by + * fw with fw_reason2. + * @hal_wbm_tqm_rel_reason_cmd_remove_reseaon3: remove command initiated by + * fw with fw_reason3. + */ +enum hal_wbm_tqm_rel_reason { + hal_wbm_tqm_rel_reason_frame_acked, + hal_wbm_tqm_rel_reason_cmd_remove_mpdu, + hal_wbm_tqm_rel_reason_cmd_remove_tx, + hal_wbm_tqm_rel_reason_cmd_remove_notx, + hal_wbm_tqm_rel_reason_cmd_remove_aged_frames, + hal_wbm_tqm_rel_reason_cmd_remove_reseaon1, + hal_wbm_tqm_rel_reason_cmd_remove_reseaon2, + hal_wbm_tqm_rel_reason_cmd_remove_reseaon3, +}; + +struct hal_wbm_buffer_ring { + struct ath11k_buffer_addr buf_addr_info; +}; + +enum hal_desc_owner { + hal_desc_owner_wbm, + hal_desc_owner_sw, + hal_desc_owner_tqm, + hal_desc_owner_rxdma, + hal_desc_owner_reo, + hal_desc_owner_switch, +}; + +enum hal_desc_buf_type { + hal_desc_buf_type_tx_msdu_link, + hal_desc_buf_type_tx_mpdu_link, + hal_desc_buf_type_tx_mpdu_queue_head, + hal_desc_buf_type_tx_mpdu_queue_ext, + hal_desc_buf_type_tx_flow, + hal_desc_buf_type_tx_buffer, + hal_desc_buf_type_rx_msdu_link, + hal_desc_buf_type_rx_mpdu_link, + hal_desc_buf_type_rx_reo_queue, + hal_desc_buf_type_rx_reo_queue_ext, + hal_desc_buf_type_rx_buffer, + hal_desc_buf_type_idle_link, +}; + +#define hal_desc_reo_owned 4 +#define hal_desc_reo_queue_desc 8 +#define hal_desc_reo_queue_ext_desc 9 +#define hal_desc_reo_non_qos_tid 16 + +#define hal_desc_hdr_info0_owner genmask(3, 0) +#define hal_desc_hdr_info0_buf_type genmask(7, 4) +#define hal_desc_hdr_info0_dbg_reserved genmask(31, 8) + +struct hal_desc_header { + u32 info0; +} __packed; + +struct hal_rx_mpdu_link_ptr { + struct ath11k_buffer_addr addr_info; +} __packed; + +struct hal_rx_msdu_details { + struct ath11k_buffer_addr buf_addr_info; + struct rx_msdu_desc rx_msdu_info; +} __packed; + +#define hal_rx_msdu_lnk_info0_rx_queue_number genmask(15, 0) +#define hal_rx_msdu_lnk_info0_first_msdu_lnk bit(16) + +struct hal_rx_msdu_link { + struct hal_desc_header desc_hdr; + struct ath11k_buffer_addr buf_addr_info; + u32 info0; + u32 pn[4]; + struct hal_rx_msdu_details msdu_link[6]; +} __packed; + +struct hal_rx_reo_queue_ext { + struct hal_desc_header desc_hdr; + u32 rsvd; + struct hal_rx_mpdu_link_ptr mpdu_link[15]; +} __packed; + +/* hal_rx_reo_queue_ext + * consumer: reo + * producer: reo + * + * descriptor_header + * details about which module owns this struct. + * + * mpdu_link + * pointer to the next mpdu_link descriptor in the mpdu queue. + */ + +enum hal_rx_reo_queue_pn_size { + hal_rx_reo_queue_pn_size_24, + hal_rx_reo_queue_pn_size_48, + hal_rx_reo_queue_pn_size_128, +}; + +#define hal_rx_reo_queue_rx_queue_number genmask(15, 0) + +#define hal_rx_reo_queue_info0_vld bit(0) +#define hal_rx_reo_queue_info0_assoc_lnk_desc_counter genmask(2, 1) +#define hal_rx_reo_queue_info0_dis_dup_detection bit(3) +#define hal_rx_reo_queue_info0_soft_reorder_en bit(4) +#define hal_rx_reo_queue_info0_ac genmask(6, 5) +#define hal_rx_reo_queue_info0_bar bit(7) +#define hal_rx_reo_queue_info0_retry bit(8) +#define hal_rx_reo_queue_info0_check_2k_mode bit(9) +#define hal_rx_reo_queue_info0_oor_mode bit(10) +#define hal_rx_reo_queue_info0_ba_window_size genmask(18, 11) +#define hal_rx_reo_queue_info0_pn_check bit(19) +#define hal_rx_reo_queue_info0_even_pn bit(20) +#define hal_rx_reo_queue_info0_uneven_pn bit(21) +#define hal_rx_reo_queue_info0_pn_handle_enable bit(22) +#define hal_rx_reo_queue_info0_pn_size genmask(24, 23) +#define hal_rx_reo_queue_info0_ignore_ampdu_flg bit(25) + +#define hal_rx_reo_queue_info1_svld bit(0) +#define hal_rx_reo_queue_info1_ssn genmask(12, 1) +#define hal_rx_reo_queue_info1_current_idx genmask(20, 13) +#define hal_rx_reo_queue_info1_seq_2k_err bit(21) +#define hal_rx_reo_queue_info1_pn_err bit(22) +#define hal_rx_reo_queue_info1_pn_valid bit(31) + +#define hal_rx_reo_queue_info2_mpdu_count genmask(6, 0) +#define hal_rx_reo_queue_info2_msdu_count (31, 7) + +#define hal_rx_reo_queue_info3_timeout_count genmask(9, 4) +#define hal_rx_reo_queue_info3_fwd_due_to_bar_cnt genmask(15, 10) +#define hal_rx_reo_queue_info3_duplicate_count genmask(31, 10) + +#define hal_rx_reo_queue_info4_frame_in_ord_count genmask(23, 0) +#define hal_rx_reo_queue_info4_bar_recvd_count genmask(31, 24) + +#define hal_rx_reo_queue_info5_late_rx_mpdu_count genmask(11, 0) +#define hal_rx_reo_queue_info5_window_jump_2k genmask(15, 12) +#define hal_rx_reo_queue_info5_hole_count genmask(31, 16) + +struct hal_rx_reo_queue { + struct hal_desc_header desc_hdr; + u32 rx_queue_num; + u32 info0; + u32 info1; + u32 pn[4]; + u32 last_rx_enqueue_timestamp; + u32 last_rx_dequeue_timestamp; + u32 next_aging_queue[2]; + u32 prev_aging_queue[2]; + u32 rx_bitmap[8]; + u32 info2; + u32 info3; + u32 info4; + u32 processed_mpdus; + u32 processed_msdus; + u32 processed_total_bytes; + u32 info5; + u32 rsvd[3]; + struct hal_rx_reo_queue_ext ext_desc[0]; +} __packed; + +/* hal_rx_reo_queue + * + * descriptor_header + * details about which module owns this struct. note that sub field + * buffer_type shall be set to receive_reo_queue_descriptor. + * + * receive_queue_number + * indicates the mpdu queue id to which this mpdu link descriptor belongs. + * + * vld + * valid bit indicating a session is established and the queue descriptor + * is valid. + * associated_link_descriptor_counter + * indicates which of the 3 link descriptor counters shall be incremented + * or decremented when link descriptors are added or removed from this + * flow queue. + * disable_duplicate_detection + * when set, do not perform any duplicate detection. + * soft_reorder_enable + * when set, reo has been instructed to not perform the actual re-ordering + * of frames for this queue, but just to insert the reorder opcodes. + * ac + * indicates the access category of the queue descriptor. + * bar + * indicates if bar has been received. + * retry + * retry bit is checked if this bit is set. + * chk_2k_mode + * indicates what type of operation is expected from reo when the received + * frame sn falls within the 2k window. + * oor_mode + * indicates what type of operation is expected when the received frame + * falls within the oor window. + * ba_window_size + * indicates the negotiated (window size + 1). max of 256 bits. + * + * a value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-ba + * session, with window size of 0). the 3 values here are the main values + * validated, but other values should work as well. + * + * a ba window size of 0 (=> one frame entry bitmat), means that there is + * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory. + * a ba window size of 1 - 105, means that there is 1 rx_reo_queue_ext. + * a ba window size of 106 - 210, means that there are 2 rx_reo_queue_ext. + * a ba window size of 211 - 256, means that there are 3 rx_reo_queue_ext. + * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable, + * pn_size + * reo shall perform the pn increment check, even number check, uneven + * number check, pn error check and size of the pn field check. + * ignore_ampdu_flag + * reo shall ignore the ampdu_flag on entrance descriptor for this queue. + * + * svld + * sequence number in next field is valid one. + * ssn + * starting sequence number of the session. + * current_index + * points to last forwarded packet + * seq_2k_error_detected_flag + * reo has detected a 2k error jump in the sequence number and from that + * moment forward, all new frames are forwarded directly to fw, without + * duplicate detect, reordering, etc. + * pn_error_detected_flag + * reo has detected a pn error. + */ + +#define hal_reo_upd_rx_queue_info0_queue_addr_hi genmask(7, 0) +#define hal_reo_upd_rx_queue_info0_upd_rx_queue_num bit(8) +#define hal_reo_upd_rx_queue_info0_upd_vld bit(9) +#define hal_reo_upd_rx_queue_info0_upd_assoc_lnk_desc_cnt bit(10) +#define hal_reo_upd_rx_queue_info0_upd_dis_dup_detection bit(11) +#define hal_reo_upd_rx_queue_info0_upd_soft_reorder_en bit(12) +#define hal_reo_upd_rx_queue_info0_upd_ac bit(13) +#define hal_reo_upd_rx_queue_info0_upd_bar bit(14) +#define hal_reo_upd_rx_queue_info0_upd_retry bit(15) +#define hal_reo_upd_rx_queue_info0_upd_check_2k_mode bit(16) +#define hal_reo_upd_rx_queue_info0_upd_oor_mode bit(17) +#define hal_reo_upd_rx_queue_info0_upd_ba_window_size bit(18) +#define hal_reo_upd_rx_queue_info0_upd_pn_check bit(19) +#define hal_reo_upd_rx_queue_info0_upd_even_pn bit(20) +#define hal_reo_upd_rx_queue_info0_upd_uneven_pn bit(21) +#define hal_reo_upd_rx_queue_info0_upd_pn_handle_enable bit(22) +#define hal_reo_upd_rx_queue_info0_upd_pn_size bit(23) +#define hal_reo_upd_rx_queue_info0_upd_ignore_ampdu_flg bit(24) +#define hal_reo_upd_rx_queue_info0_upd_svld bit(25) +#define hal_reo_upd_rx_queue_info0_upd_ssn bit(26) +#define hal_reo_upd_rx_queue_info0_upd_seq_2k_err bit(27) +#define hal_reo_upd_rx_queue_info0_upd_pn_err bit(28) +#define hal_reo_upd_rx_queue_info0_upd_pn_valid bit(29) +#define hal_reo_upd_rx_queue_info0_upd_pn bit(30) + +#define hal_reo_upd_rx_queue_info1_rx_queue_number genmask(15, 0) +#define hal_reo_upd_rx_queue_info1_vld bit(16) +#define hal_reo_upd_rx_queue_info1_assoc_lnk_desc_counter genmask(18, 17) +#define hal_reo_upd_rx_queue_info1_dis_dup_detection bit(19) +#define hal_reo_upd_rx_queue_info1_soft_reorder_en bit(20) +#define hal_reo_upd_rx_queue_info1_ac genmask(22, 21) +#define hal_reo_upd_rx_queue_info1_bar bit(23) +#define hal_reo_upd_rx_queue_info1_retry bit(24) +#define hal_reo_upd_rx_queue_info1_check_2k_mode bit(25) +#define hal_reo_upd_rx_queue_info1_oor_mode bit(26) +#define hal_reo_upd_rx_queue_info1_pn_check bit(27) +#define hal_reo_upd_rx_queue_info1_even_pn bit(28) +#define hal_reo_upd_rx_queue_info1_uneven_pn bit(29) +#define hal_reo_upd_rx_queue_info1_pn_handle_enable bit(30) +#define hal_reo_upd_rx_queue_info1_ignore_ampdu_flg bit(31) + +#define hal_reo_upd_rx_queue_info2_ba_window_size genmask(7, 0) +#define hal_reo_upd_rx_queue_info2_pn_size genmask(9, 8) +#define hal_reo_upd_rx_queue_info2_svld bit(10) +#define hal_reo_upd_rx_queue_info2_ssn genmask(22, 11) +#define hal_reo_upd_rx_queue_info2_seq_2k_err bit(23) +#define hal_reo_upd_rx_queue_info2_pn_err bit(24) +#define hal_reo_upd_rx_queue_info2_pn_valid bit(25) + +struct hal_reo_update_rx_queue { + struct hal_reo_cmd_hdr cmd; + u32 queue_addr_lo; + u32 info0; + u32 info1; + u32 info2; + u32 pn[4]; +} __packed; + +#define hal_reo_unblock_cache_info0_unblk_cache bit(0) +#define hal_reo_unblock_cache_info0_resource_idx genmask(2, 1) + +struct hal_reo_unblock_cache { + struct hal_reo_cmd_hdr cmd; + u32 info0; + u32 rsvd[7]; +} __packed; + +enum hal_reo_exec_status { + hal_reo_exec_status_success, + hal_reo_exec_status_blocked, + hal_reo_exec_status_failed, + hal_reo_exec_status_resource_blocked, +}; + +#define hal_reo_status_hdr_info0_status_num genmask(15, 0) +#define hal_reo_status_hdr_info0_exec_time genmask(25, 16) +#define hal_reo_status_hdr_info0_exec_status genmask(27, 26) + +struct hal_reo_status_hdr { + u32 info0; + u32 timestamp; +} __packed; + +/* hal_reo_status_hdr + * producer: reo + * consumer: sw + * + * status_num + * the value in this field is equal to value of the reo command + * number. this field helps to correlate the statuses with the reo + * commands. + * + * execution_time (in us) + * the amount of time reo took to excecute the command. note that + * this time does not include the duration of the command waiting + * in the command ring, before the execution started. + * + * execution_status + * execution status of the command. values are defined in + * enum %hal_reo_exec_status_. + */ +#define hal_reo_get_queue_stats_status_info0_ssn genmask(11, 0) +#define hal_reo_get_queue_stats_status_info0_cur_idx genmask(19, 12) + +#define hal_reo_get_queue_stats_status_info1_mpdu_count genmask(6, 0) +#define hal_reo_get_queue_stats_status_info1_msdu_count genmask(31, 7) + +#define hal_reo_get_queue_stats_status_info2_timeout_count genmask(9, 4) +#define hal_reo_get_queue_stats_status_info2_fdtb_count genmask(15, 10) +#define hal_reo_get_queue_stats_status_info2_duplicate_count genmask(31, 16) + +#define hal_reo_get_queue_stats_status_info3_fio_count genmask(23, 0) +#define hal_reo_get_queue_stats_status_info3_bar_rcvd_cnt genmask(31, 24) + +#define hal_reo_get_queue_stats_status_info4_late_rx_mpdu genmask(11, 0) +#define hal_reo_get_queue_stats_status_info4_window_jmp2k genmask(15, 12) +#define hal_reo_get_queue_stats_status_info4_hole_count genmask(31, 16) + +#define hal_reo_get_queue_stats_status_info5_looping_cnt genmask(31, 28) + +struct hal_reo_get_queue_stats_status { + struct hal_reo_status_hdr hdr; + u32 info0; + u32 pn[4]; + u32 last_rx_enqueue_timestamp; + u32 last_rx_dequeue_timestamp; + u32 rx_bitmap[8]; + u32 info1; + u32 info2; + u32 info3; + u32 num_mpdu_frames; + u32 num_msdu_frames; + u32 total_bytes; + u32 info4; + u32 info5; +} __packed; + +/* hal_reo_get_queue_stats_status + * producer: reo + * consumer: sw + * + * status_hdr + * details that can link this status with the original command. it + * also contains info on how long reo took to execute this command. + * + * ssn + * starting sequence number of the session, this changes whenever + * window moves (can be filled by sw then maintained by reo). + * + * current_index + * points to last forwarded packet. + * + * pn + * bits of the pn number. + * + * last_rx_enqueue_timestamp + * last_rx_dequeue_timestamp + * timestamp of arrival of the last mpdu for this queue and + * timestamp of forwarding an mpdu accordingly. + * + * rx_bitmap + * when a bit is set, the corresponding frame is currently held + * in the re-order queue. the bitmap is fully managed by hw. + * + * current_mpdu_count + * current_msdu_count + * the number of mpdus and msdus in the queue. + * + * timeout_count + * the number of times reo started forwarding frames even though + * there is a hole in the bitmap. forwarding reason is timeout. + * + * forward_due_to_bar_count + * the number of times reo started forwarding frames even though + * there is a hole in the bitmap. fwd reason is reception of bar. + * + * duplicate_count + * the number of duplicate frames that have been detected. + * + * frames_in_order_count + * the number of frames that have been received in order (without + * a hole that prevented them from being forwarded immediately). + * + * bar_received_count + * the number of times a bar frame is received. + * + * mpdu_frames_processed_count + * msdu_frames_processed_count + * the total number of mpdu/msdu frames that have been processed. + * + * total_bytes + * an approximation of the number of bytes received for this queue. + * + * late_receive_mpdu_count + * the number of mpdus received after the window had already moved + * on. the 'late' sequence window is defined as + * (window ssn - 256) - (window ssn - 1). + * + * window_jump_2k + * the number of times the window moved more than 2k + * + * hole_count + * the number of times a hole was created in the receive bitmap. + * + * looping_count + * a count value that indicates the number of times the producer of + * entries into this ring has looped around the ring. + */ + +#define hal_reo_status_loop_cnt genmask(31, 28) + +#define hal_reo_flush_queue_info0_err_detected bit(0) +#define hal_reo_flush_queue_info0_rsvd genmask(31, 1) +#define hal_reo_flush_queue_info1_rsvd genmask(27, 0) + +struct hal_reo_flush_queue_status { + struct hal_reo_status_hdr hdr; + u32 info0; + u32 rsvd0[21]; + u32 info1; +} __packed; + +/* hal_reo_flush_queue_status + * producer: reo + * consumer: sw + * + * status_hdr + * details that can link this status with the original command. it + * also contains info on how long reo took to execute this command. + * + * error_detected + * status of blocking resource + * + * 0 - no error has been detected while executing this command + * 1 - error detected. the resource to be used for blocking was + * already in use. + * + * looping_count + * a count value that indicates the number of times the producer of + * entries into this ring has looped around the ring. + */ + +#define hal_reo_flush_cache_status_info0_is_err bit(0) +#define hal_reo_flush_cache_status_info0_block_err_code genmask(2, 1) +#define hal_reo_flush_cache_status_info0_flush_status_hit bit(8) +#define hal_reo_flush_cache_status_info0_flush_desc_type genmask(11, 9) +#define hal_reo_flush_cache_status_info0_flush_client_id genmask(15, 12) +#define hal_reo_flush_cache_status_info0_flush_err genmask(17, 16) +#define hal_reo_flush_cache_status_info0_flush_count genmask(25, 18) + +struct hal_reo_flush_cache_status { + struct hal_reo_status_hdr hdr; + u32 info0; + u32 rsvd0[21]; + u32 info1; +} __packed; + +/* hal_reo_flush_cache_status + * producer: reo + * consumer: sw + * + * status_hdr + * details that can link this status with the original command. it + * also contains info on how long reo took to execute this command. + * + * error_detected + * status for blocking resource handling + * + * 0 - no error has been detected while executing this command + * 1 - an error in the blocking resource management was detected + * + * block_error_details + * only valid when error_detected is set + * + * 0 - no blocking related errors found + * 1 - blocking resource is already in use + * 2 - resource requested to be unblocked, was not blocked + * + * cache_controller_flush_status_hit + * the status that the cache controller returned on executing the + * flush command. + * + * 0 - miss; 1 - hit + * + * cache_controller_flush_status_desc_type + * flush descriptor type + * + * cache_controller_flush_status_client_id + * module who made the flush request + * + * in reo, this is always 0 + * + * cache_controller_flush_status_error + * error condition + * + * 0 - no error found + * 1 - hw interface is still busy + * 2 - line currently locked. used for one line flush command + * 3 - at least one line is still locked. + * used for cache flush command. + * + * cache_controller_flush_count + * the number of lines that were actually flushed out + * + * looping_count + * a count value that indicates the number of times the producer of + * entries into this ring has looped around the ring. + */ + +#define hal_reo_unblock_cache_status_info0_is_err bit(0) +#define hal_reo_unblock_cache_status_info0_type bit(1) + +struct hal_reo_unblock_cache_status { + struct hal_reo_status_hdr hdr; + u32 info0; + u32 rsvd0[21]; + u32 info1; +} __packed; + +/* hal_reo_unblock_cache_status + * producer: reo + * consumer: sw + * + * status_hdr + * details that can link this status with the original command. it + * also contains info on how long reo took to execute this command. + * + * error_detected + * 0 - no error has been detected while executing this command + * 1 - the blocking resource was not in use, and therefore it could + * not be unblocked. + * + * unblock_type + * reference to the type of unblock command + * 0 - unblock a blocking resource + * 1 - the entire cache usage is unblock + * + * looping_count + * a count value that indicates the number of times the producer of + * entries into this ring has looped around the ring. + */ + +#define hal_reo_flush_timeout_status_info0_is_err bit(0) +#define hal_reo_flush_timeout_status_info0_list_empty bit(1) + +#define hal_reo_flush_timeout_status_info1_rel_desc_count genmask(15, 0) +#define hal_reo_flush_timeout_status_info1_fwd_buf_count genmask(31, 16) + +struct hal_reo_flush_timeout_list_status { + struct hal_reo_status_hdr hdr; + u32 info0; + u32 info1; + u32 rsvd0[20]; + u32 info2; +} __packed; + +/* hal_reo_flush_timeout_list_status + * producer: reo + * consumer: sw + * + * status_hdr + * details that can link this status with the original command. it + * also contains info on how long reo took to execute this command. + * + * error_detected + * 0 - no error has been detected while executing this command + * 1 - command not properly executed and returned with error + * + * timeout_list_empty + * when set, reo has depleted the timeout list and all entries are + * gone. + * + * release_desc_count + * producer: sw; consumer: reo + * the number of link descriptor released + * + * forward_buf_count + * producer: sw; consumer: reo + * the number of buffers forwarded to the reo destination rings + * + * looping_count + * a count value that indicates the number of times the producer of + * entries into this ring has looped around the ring. + */ + +#define hal_reo_desc_thresh_status_info0_thresh_index genmask(1, 0) +#define hal_reo_desc_thresh_status_info1_link_desc_counter0 genmask(23, 0) +#define hal_reo_desc_thresh_status_info2_link_desc_counter1 genmask(23, 0) +#define hal_reo_desc_thresh_status_info3_link_desc_counter2 genmask(23, 0) +#define hal_reo_desc_thresh_status_info4_link_desc_counter_sum genmask(23, 0) + +struct hal_reo_desc_thresh_reached_status { + struct hal_reo_status_hdr hdr; + u32 info0; + u32 info1; + u32 info2; + u32 info3; + u32 info4; + u32 rsvd0[17]; + u32 info5; +} __packed; + +/* hal_reo_desc_thresh_reached_status + * producer: reo + * consumer: sw + * + * status_hdr + * details that can link this status with the original command. it + * also contains info on how long reo took to execute this command. + * + * threshold_index + * the index of the threshold register whose value got reached + * + * link_descriptor_counter0 + * link_descriptor_counter1 + * link_descriptor_counter2 + * link_descriptor_counter_sum + * value of the respective counters at generation of this message + * + * looping_count + * a count value that indicates the number of times the producer of + * entries into this ring has looped around the ring. + */ + +#endif /* ath11k_hal_desc_h */ diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hal_rx.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include "ahb.h" +#include "debug.h" +#include "hal.h" +#include "hal_tx.h" +#include "hal_rx.h" +#include "hal_desc.h" + +static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr, + u8 owner, u8 buffer_type, u32 magic) +{ + hdr->info0 = field_prep(hal_desc_hdr_info0_owner, owner) | + field_prep(hal_desc_hdr_info0_buf_type, buffer_type); + + /* magic pattern in reserved bits for debugging */ + hdr->info0 |= field_prep(hal_desc_hdr_info0_dbg_reserved, magic); +} + +static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv, + struct ath11k_hal_reo_cmd *cmd) +{ + struct hal_reo_get_queue_stats *desc; + + tlv->tl = field_prep(hal_tlv_hdr_tag, hal_reo_get_queue_stats) | + field_prep(hal_tlv_hdr_len, sizeof(*desc)); + + desc = (struct hal_reo_get_queue_stats *)tlv->value; + memset(&desc->queue_addr_lo, 0, + (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr))); + + desc->cmd.info0 &= ~hal_reo_cmd_hdr_info0_status_required; + if (cmd->flag & hal_reo_cmd_flg_need_status) + desc->cmd.info0 |= hal_reo_cmd_hdr_info0_status_required; + + desc->queue_addr_lo = cmd->addr_lo; + desc->info0 = field_prep(hal_reo_get_queue_stats_info0_queue_addr_hi, + cmd->addr_hi); + if (cmd->flag & hal_reo_cmd_flg_stats_clear) + desc->info0 |= hal_reo_get_queue_stats_info0_clear_stats; + + return field_get(hal_reo_cmd_hdr_info0_cmd_number, desc->cmd.info0); +} + +static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv, + struct ath11k_hal_reo_cmd *cmd) +{ + struct hal_reo_flush_cache *desc; + u8 avail_slot = ffz(hal->avail_blk_resource); + + if (cmd->flag & hal_reo_cmd_flg_flush_block_later) { + if (avail_slot >= hal_max_avail_blk_res) + return -enospc; + + hal->current_blk_index = avail_slot; + } + + tlv->tl = field_prep(hal_tlv_hdr_tag, hal_reo_flush_cache) | + field_prep(hal_tlv_hdr_len, sizeof(*desc)); + + desc = (struct hal_reo_flush_cache *)tlv->value; + memset(&desc->cache_addr_lo, 0, + (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr))); + + desc->cmd.info0 &= ~hal_reo_cmd_hdr_info0_status_required; + if (cmd->flag & hal_reo_cmd_flg_need_status) + desc->cmd.info0 |= hal_reo_cmd_hdr_info0_status_required; + + desc->cache_addr_lo = cmd->addr_lo; + desc->info0 = field_prep(hal_reo_flush_cache_info0_cache_addr_hi, + cmd->addr_hi); + + if (cmd->flag & hal_reo_cmd_flg_flush_fwd_all_mpdus) + desc->info0 |= hal_reo_flush_cache_info0_fwd_all_mpdus; + + if (cmd->flag & hal_reo_cmd_flg_flush_block_later) { + desc->info0 |= hal_reo_flush_cache_info0_block_cache_usage; + desc->info0 |= + field_prep(hal_reo_flush_cache_info0_block_resrc_idx, + avail_slot); + } + + if (cmd->flag & hal_reo_cmd_flg_flush_no_inval) + desc->info0 |= hal_reo_flush_cache_info0_flush_wo_invalidate; + + if (cmd->flag & hal_reo_cmd_flg_flush_all) + desc->info0 |= hal_reo_flush_cache_info0_flush_all; + + return field_get(hal_reo_cmd_hdr_info0_cmd_number, desc->cmd.info0); +} + +static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv, + struct ath11k_hal_reo_cmd *cmd) +{ + struct hal_reo_update_rx_queue *desc; + + tlv->tl = field_prep(hal_tlv_hdr_tag, hal_reo_update_rx_reo_queue) | + field_prep(hal_tlv_hdr_len, sizeof(*desc)); + + desc = (struct hal_reo_update_rx_queue *)tlv->value; + memset(&desc->queue_addr_lo, 0, + (sizeof(*desc) - sizeof(struct hal_reo_cmd_hdr))); + + desc->cmd.info0 &= ~hal_reo_cmd_hdr_info0_status_required; + if (cmd->flag & hal_reo_cmd_flg_need_status) + desc->cmd.info0 |= hal_reo_cmd_hdr_info0_status_required; + + desc->queue_addr_lo = cmd->addr_lo; + desc->info0 = + field_prep(hal_reo_upd_rx_queue_info0_queue_addr_hi, + cmd->addr_hi) | + field_prep(hal_reo_upd_rx_queue_info0_upd_rx_queue_num, + !!(cmd->upd0 & hal_reo_cmd_upd0_rx_queue_num)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_vld, + !!(cmd->upd0 & hal_reo_cmd_upd0_vld)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_assoc_lnk_desc_cnt, + !!(cmd->upd0 & hal_reo_cmd_upd0_aldc)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_dis_dup_detection, + !!(cmd->upd0 & hal_reo_cmd_upd0_dis_dup_detection)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_soft_reorder_en, + !!(cmd->upd0 & hal_reo_cmd_upd0_soft_reorder_en)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_ac, + !!(cmd->upd0 & hal_reo_cmd_upd0_ac)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_bar, + !!(cmd->upd0 & hal_reo_cmd_upd0_bar)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_retry, + !!(cmd->upd0 & hal_reo_cmd_upd0_retry)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_check_2k_mode, + !!(cmd->upd0 & hal_reo_cmd_upd0_check_2k_mode)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_oor_mode, + !!(cmd->upd0 & hal_reo_cmd_upd0_oor_mode)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_ba_window_size, + !!(cmd->upd0 & hal_reo_cmd_upd0_ba_window_size)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_pn_check, + !!(cmd->upd0 & hal_reo_cmd_upd0_pn_check)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_even_pn, + !!(cmd->upd0 & hal_reo_cmd_upd0_even_pn)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_uneven_pn, + !!(cmd->upd0 & hal_reo_cmd_upd0_uneven_pn)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_pn_handle_enable, + !!(cmd->upd0 & hal_reo_cmd_upd0_pn_handle_enable)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_pn_size, + !!(cmd->upd0 & hal_reo_cmd_upd0_pn_size)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_ignore_ampdu_flg, + !!(cmd->upd0 & hal_reo_cmd_upd0_ignore_ampdu_flg)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_svld, + !!(cmd->upd0 & hal_reo_cmd_upd0_svld)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_ssn, + !!(cmd->upd0 & hal_reo_cmd_upd0_ssn)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_seq_2k_err, + !!(cmd->upd0 & hal_reo_cmd_upd0_seq_2k_err)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_pn_valid, + !!(cmd->upd0 & hal_reo_cmd_upd0_pn_valid)) | + field_prep(hal_reo_upd_rx_queue_info0_upd_pn, + !!(cmd->upd0 & hal_reo_cmd_upd0_pn)); + + desc->info1 = + field_prep(hal_reo_upd_rx_queue_info1_rx_queue_number, + cmd->rx_queue_num) | + field_prep(hal_reo_upd_rx_queue_info1_vld, + !!(cmd->upd1 & hal_reo_cmd_upd1_vld)) | + field_prep(hal_reo_upd_rx_queue_info1_assoc_lnk_desc_counter, + field_get(hal_reo_cmd_upd1_aldc, cmd->upd1)) | + field_prep(hal_reo_upd_rx_queue_info1_dis_dup_detection, + !!(cmd->upd1 & hal_reo_cmd_upd1_dis_dup_detection)) | + field_prep(hal_reo_upd_rx_queue_info1_soft_reorder_en, + !!(cmd->upd1 & hal_reo_cmd_upd1_soft_reorder_en)) | + field_prep(hal_reo_upd_rx_queue_info1_ac, + field_get(hal_reo_cmd_upd1_ac, cmd->upd1)) | + field_prep(hal_reo_upd_rx_queue_info1_bar, + !!(cmd->upd1 & hal_reo_cmd_upd1_bar)) | + field_prep(hal_reo_upd_rx_queue_info1_check_2k_mode, + !!(cmd->upd1 & hal_reo_cmd_upd1_check_2k_mode)) | + field_prep(hal_reo_upd_rx_queue_info1_retry, + !!(cmd->upd1 & hal_reo_cmd_upd1_retry)) | + field_prep(hal_reo_upd_rx_queue_info1_oor_mode, + !!(cmd->upd1 & hal_reo_cmd_upd1_oor_mode)) | + field_prep(hal_reo_upd_rx_queue_info1_pn_check, + !!(cmd->upd1 & hal_reo_cmd_upd1_pn_check)) | + field_prep(hal_reo_upd_rx_queue_info1_even_pn, + !!(cmd->upd1 & hal_reo_cmd_upd1_even_pn)) | + field_prep(hal_reo_upd_rx_queue_info1_uneven_pn, + !!(cmd->upd1 & hal_reo_cmd_upd1_uneven_pn)) | + field_prep(hal_reo_upd_rx_queue_info1_pn_handle_enable, + !!(cmd->upd1 & hal_reo_cmd_upd1_pn_handle_enable)) | + field_prep(hal_reo_upd_rx_queue_info1_ignore_ampdu_flg, + !!(cmd->upd1 & hal_reo_cmd_upd1_ignore_ampdu_flg)); + + if (cmd->pn_size == 24) + cmd->pn_size = hal_rx_reo_queue_pn_size_24; + else if (cmd->pn_size == 48) + cmd->pn_size = hal_rx_reo_queue_pn_size_48; + else if (cmd->pn_size == 128) + cmd->pn_size = hal_rx_reo_queue_pn_size_128; + + if (cmd->ba_window_size < 1) + cmd->ba_window_size = 1; + + if (cmd->ba_window_size == 1) + cmd->ba_window_size++; + + desc->info2 = + field_prep(hal_reo_upd_rx_queue_info2_ba_window_size, + cmd->ba_window_size - 1) | + field_prep(hal_reo_upd_rx_queue_info2_pn_size, cmd->pn_size) | + field_prep(hal_reo_upd_rx_queue_info2_svld, + !!(cmd->upd2 & hal_reo_cmd_upd2_svld)) | + field_prep(hal_reo_upd_rx_queue_info2_ssn, + field_get(hal_reo_cmd_upd2_ssn, cmd->upd2)) | + field_prep(hal_reo_upd_rx_queue_info2_seq_2k_err, + !!(cmd->upd2 & hal_reo_cmd_upd2_seq_2k_err)) | + field_prep(hal_reo_upd_rx_queue_info2_pn_err, + !!(cmd->upd2 & hal_reo_cmd_upd2_pn_err)); + + return field_get(hal_reo_cmd_hdr_info0_cmd_number, desc->cmd.info0); +} + +int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng, + enum hal_reo_cmd_type type, + struct ath11k_hal_reo_cmd *cmd) +{ + struct hal_tlv_hdr *reo_desc; + int ret; + + spin_lock_bh(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + reo_desc = (struct hal_tlv_hdr *)ath11k_hal_srng_src_get_next_entry(ab, srng); + if (!reo_desc) { + ret = -enobufs; + goto out; + } + + switch (type) { + case hal_reo_cmd_get_queue_stats: + ret = ath11k_hal_reo_cmd_queue_stats(reo_desc, cmd); + break; + case hal_reo_cmd_flush_cache: + ret = ath11k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd); + break; + case hal_reo_cmd_update_rx_queue: + ret = ath11k_hal_reo_cmd_update_rx_queue(reo_desc, cmd); + break; + case hal_reo_cmd_flush_queue: + case hal_reo_cmd_unblock_cache: + case hal_reo_cmd_flush_timeout_list: + ath11k_warn(ab, "unsupported reo command %d ", type); + ret = -enotsupp; + break; + default: + ath11k_warn(ab, "unknown reo command %d ", type); + ret = -einval; + break; + } + +out: + ath11k_hal_srng_access_end(ab, srng); + spin_unlock_bh(&srng->lock); + + return ret; +} + +void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr, + u32 cookie, u8 manager) +{ + struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc; + u32 paddr_lo, paddr_hi; + + paddr_lo = lower_32_bits(paddr); + paddr_hi = upper_32_bits(paddr); + binfo->info0 = field_prep(buffer_addr_info0_addr, paddr_lo); + binfo->info1 = field_prep(buffer_addr_info1_addr, paddr_hi) | + field_prep(buffer_addr_info1_sw_cookie, cookie) | + field_prep(buffer_addr_info1_ret_buf_mgr, manager); +} + +void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr, + u32 *cookie, u8 *rbm) +{ + struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc; + + *paddr = + (((u64)field_get(buffer_addr_info1_addr, binfo->info1)) << 32) | + field_get(buffer_addr_info0_addr, binfo->info0); + *cookie = field_get(buffer_addr_info1_sw_cookie, binfo->info1); + *rbm = field_get(buffer_addr_info1_ret_buf_mgr, binfo->info1); +} + +void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus, + struct hal_rx_msdu_meta *meta, + enum hal_rx_buf_return_buf_manager *rbm) +{ + struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc; + struct hal_rx_msdu_details *msdu; + int i; + + *num_msdus = hal_num_rx_msdus_per_link_desc; + + msdu = &link->msdu_link[0]; + *rbm = field_get(buffer_addr_info1_ret_buf_mgr, + msdu->buf_addr_info.info1); + + for (i = 0; i < *num_msdus; i++) { + msdu = &link->msdu_link[i]; + + if (!field_get(buffer_addr_info0_addr, + msdu->buf_addr_info.info0)) { + *num_msdus = i; + break; + } + meta->msdu_len = field_get(rx_msdu_desc_info0_msdu_length, + msdu->rx_msdu_info.info0); + meta->first = !!(msdu->rx_msdu_info.info0 & + rx_msdu_desc_info0_first_msdu_in_mpdu); + meta->last = !!(msdu->rx_msdu_info.info0 & + rx_msdu_desc_info0_last_msdu_in_mpdu); + meta->continuation = !!(msdu->rx_msdu_info.info0 & + rx_msdu_desc_info0_msdu_continuation); + meta->cookie = field_get(buffer_addr_info1_sw_cookie, + msdu->buf_addr_info.info1); + meta++; + } +} + +int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc, + dma_addr_t *paddr, u32 *desc_bank) +{ + struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc; + enum hal_reo_dest_ring_push_reason push_reason; + enum hal_reo_dest_ring_error_code err_code; + + push_reason = field_get(hal_reo_dest_ring_info0_push_reason, + desc->info0); + err_code = field_get(hal_reo_dest_ring_info0_error_code, + desc->info0); + ab->soc_stats.reo_error[err_code]++; + + if (push_reason != hal_reo_dest_ring_push_reason_err_detected && + push_reason != hal_reo_dest_ring_push_reason_routing_instruction) { + ath11k_warn(ab, "expected error push reason code, received %d ", + push_reason); + return -einval; + } + + if (field_get(hal_reo_dest_ring_info0_buffer_type, desc->info0) != + hal_reo_dest_ring_buffer_type_link_desc) { + ath11k_warn(ab, "expected buffer type link_desc"); + return -einval; + } + + ath11k_hal_rx_reo_ent_paddr_get(ab, rx_desc, paddr, desc_bank); + + return 0; +} + +void ath11k_hal_rx_parse_dst_ring_desc(struct ath11k_base *ab, u32 *rx_desc, + struct hal_rx_meta_info *meta_info) +{ + struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc; + struct rx_mpdu_desc *mpdu = &desc->rx_mpdu_info; + struct rx_msdu_desc *msdu = &desc->rx_msdu_info; + struct hal_rx_mpdu_meta *meta_mpdu = &meta_info->mpdu_meta; + struct hal_rx_msdu_meta *meta_msdu = &meta_info->msdu_meta; + + meta_info->push_reason = field_get(hal_reo_dest_ring_info0_push_reason, + desc->info0); + + meta_mpdu->msdu_cnt = field_get(rx_mpdu_desc_info0_msdu_count, + mpdu->info0); + meta_mpdu->seq_num = field_get(rx_mpdu_desc_info0_seq_num, mpdu->info0); + meta_mpdu->frag = !!(mpdu->info0 & rx_mpdu_desc_info0_frag_flag); + meta_mpdu->retry = !!(mpdu->info0 & rx_mpdu_desc_info0_mpdu_retry); + meta_mpdu->ampdu = !!(mpdu->info0 & rx_mpdu_desc_info0_ampdu_flag); + meta_mpdu->raw = !!(mpdu->info0 & rx_mpdu_desc_info0_raw_mpdu); + meta_mpdu->peer_meta = mpdu->meta_data; + + meta_msdu->cookie = field_get(buffer_addr_info1_sw_cookie, + desc->buf_addr_info.info1); + meta_msdu->msdu_len = field_get(rx_msdu_desc_info0_msdu_length, + msdu->info0); + meta_msdu->first = + !!(msdu->info0 & rx_msdu_desc_info0_first_msdu_in_mpdu); + meta_msdu->last = + !!(msdu->info0 & rx_msdu_desc_info0_last_msdu_in_mpdu); + meta_msdu->continuation = + !!(msdu->info0 & rx_msdu_desc_info0_msdu_continuation); +} + +int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc, + struct hal_rx_wbm_rel_info *rel_info) +{ + struct hal_wbm_release_ring *wbm_desc = desc; + enum hal_wbm_rel_desc_type type; + enum hal_wbm_rel_src_module rel_src; + + type = field_get(hal_wbm_release_info0_desc_type, + wbm_desc->info0); + /* we expect only wbm_rel buffer type */ + if (type != hal_wbm_rel_desc_type_rel_msdu) { + warn_on(1); + return -einval; + } + + rel_src = field_get(hal_wbm_release_info0_rel_src_module, + wbm_desc->info0); + if (rel_src != hal_wbm_rel_src_module_rxdma && + rel_src != hal_wbm_rel_src_module_reo) + return -einval; + + if (field_get(buffer_addr_info1_ret_buf_mgr, + wbm_desc->buf_addr_info.info1) != hal_rx_buf_rbm_sw3_bm) { + ab->soc_stats.invalid_rbm++; + return -einval; + } + + rel_info->cookie = field_get(buffer_addr_info1_sw_cookie, + wbm_desc->buf_addr_info.info1); + rel_info->err_rel_src = rel_src; + if (rel_src == hal_wbm_rel_src_module_reo) { + rel_info->push_reason = + field_get(hal_wbm_release_info0_reo_push_reason, + wbm_desc->info0); + rel_info->err_code = + field_get(hal_wbm_release_info0_reo_error_code, + wbm_desc->info0); + } else { + rel_info->push_reason = + field_get(hal_wbm_release_info0_rxdma_push_reason, + wbm_desc->info0); + rel_info->err_code = + field_get(hal_wbm_release_info0_rxdma_error_code, + wbm_desc->info0); + } + + rel_info->first_msdu = field_get(hal_wbm_release_info2_first_msdu, + wbm_desc->info2); + rel_info->last_msdu = field_get(hal_wbm_release_info2_last_msdu, + wbm_desc->info2); + return 0; +} + +void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc, + dma_addr_t *paddr, u32 *desc_bank) +{ + struct ath11k_buffer_addr *buff_addr = desc; + + *paddr = ((u64)(field_get(buffer_addr_info1_addr, buff_addr->info1)) << 32) | + field_get(buffer_addr_info0_addr, buff_addr->info0); + + *desc_bank = field_get(buffer_addr_info1_sw_cookie, buff_addr->info1); +} + +void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc, + void *link_desc, + enum hal_wbm_rel_bm_act action) +{ + struct hal_wbm_release_ring *dst_desc = desc; + struct hal_wbm_release_ring *src_desc = link_desc; + + dst_desc->buf_addr_info = src_desc->buf_addr_info; + dst_desc->info0 |= field_prep(hal_wbm_release_info0_rel_src_module, + hal_wbm_rel_src_module_sw) | + field_prep(hal_wbm_release_info0_bm_action, action) | + field_prep(hal_wbm_release_info0_desc_type, + hal_wbm_rel_desc_type_msdu_link); +} + +void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status) +{ + struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc; + struct hal_reo_get_queue_stats_status *desc = + (struct hal_reo_get_queue_stats_status *)tlv->value; + + status->uniform_hdr.cmd_num = + field_get(hal_reo_status_hdr_info0_status_num, + desc->hdr.info0); + status->uniform_hdr.cmd_status = + field_get(hal_reo_status_hdr_info0_exec_status, + desc->hdr.info0); + + ath11k_dbg(ab, ath11k_dbg_hal, "queue stats status: "); + ath11k_dbg(ab, ath11k_dbg_hal, "header: cmd_num %d status %d ", + status->uniform_hdr.cmd_num, + status->uniform_hdr.cmd_status); + ath11k_dbg(ab, ath11k_dbg_hal, "ssn %ld cur_idx %ld ", + field_get(hal_reo_get_queue_stats_status_info0_ssn, + desc->info0), + field_get(hal_reo_get_queue_stats_status_info0_cur_idx, + desc->info0)); + ath11k_dbg(ab, ath11k_dbg_hal, "pn = [%08x, %08x, %08x, %08x] ", + desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]); + ath11k_dbg(ab, ath11k_dbg_hal, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x ", + desc->last_rx_enqueue_timestamp, + desc->last_rx_dequeue_timestamp); + ath11k_dbg(ab, ath11k_dbg_hal, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x] ", + desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2], + desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5], + desc->rx_bitmap[6], desc->rx_bitmap[7]); + ath11k_dbg(ab, ath11k_dbg_hal, "count: cur_mpdu %ld cur_msdu %ld ", + field_get(hal_reo_get_queue_stats_status_info1_mpdu_count, + desc->info1), + field_get(hal_reo_get_queue_stats_status_info1_msdu_count, + desc->info1)); + ath11k_dbg(ab, ath11k_dbg_hal, "fwd_timeout %ld fwd_bar %ld dup_count %ld ", + field_get(hal_reo_get_queue_stats_status_info2_timeout_count, + desc->info2), + field_get(hal_reo_get_queue_stats_status_info2_fdtb_count, + desc->info2), + field_get(hal_reo_get_queue_stats_status_info2_duplicate_count, + desc->info2)); + ath11k_dbg(ab, ath11k_dbg_hal, "frames_in_order %ld bar_rcvd %ld ", + field_get(hal_reo_get_queue_stats_status_info3_fio_count, + desc->info3), + field_get(hal_reo_get_queue_stats_status_info3_bar_rcvd_cnt, + desc->info3)); + ath11k_dbg(ab, ath11k_dbg_hal, "num_mpdus %d num_msdus %d total_bytes %d ", + desc->num_mpdu_frames, desc->num_msdu_frames, + desc->total_bytes); + ath11k_dbg(ab, ath11k_dbg_hal, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld ", + field_get(hal_reo_get_queue_stats_status_info4_late_rx_mpdu, + desc->info4), + field_get(hal_reo_get_queue_stats_status_info4_window_jmp2k, + desc->info4), + field_get(hal_reo_get_queue_stats_status_info4_hole_count, + desc->info4)); + ath11k_dbg(ab, ath11k_dbg_hal, "looping count %ld ", + field_get(hal_reo_get_queue_stats_status_info5_looping_cnt, + desc->info5)); +} + +int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status) +{ + struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc; + struct hal_reo_status_hdr *hdr; + + hdr = (struct hal_reo_status_hdr *)tlv->value; + *status = field_get(hal_reo_status_hdr_info0_exec_status, hdr->info0); + + return field_get(hal_reo_status_hdr_info0_status_num, hdr->info0); +} + +void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status) +{ + struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc; + struct hal_reo_flush_queue_status *desc = + (struct hal_reo_flush_queue_status *)tlv->value; + + status->uniform_hdr.cmd_num = + field_get(hal_reo_status_hdr_info0_status_num, + desc->hdr.info0); + status->uniform_hdr.cmd_status = + field_get(hal_reo_status_hdr_info0_exec_status, + desc->hdr.info0); + status->u.flush_queue.err_detected = + field_get(hal_reo_flush_queue_info0_err_detected, + desc->info0); +} + +void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status) +{ + struct ath11k_hal *hal = &ab->hal; + struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc; + struct hal_reo_flush_cache_status *desc = + (struct hal_reo_flush_cache_status *)tlv->value; + + status->uniform_hdr.cmd_num = + field_get(hal_reo_status_hdr_info0_status_num, + desc->hdr.info0); + status->uniform_hdr.cmd_status = + field_get(hal_reo_status_hdr_info0_exec_status, + desc->hdr.info0); + + status->u.flush_cache.err_detected = + field_get(hal_reo_flush_cache_status_info0_is_err, + desc->info0); + status->u.flush_cache.err_code = + field_get(hal_reo_flush_cache_status_info0_block_err_code, + desc->info0); + if (!status->u.flush_cache.err_code) + hal->avail_blk_resource |= bit(hal->current_blk_index); + + status->u.flush_cache.cache_controller_flush_status_hit = + field_get(hal_reo_flush_cache_status_info0_flush_status_hit, + desc->info0); + + status->u.flush_cache.cache_controller_flush_status_desc_type = + field_get(hal_reo_flush_cache_status_info0_flush_desc_type, + desc->info0); + status->u.flush_cache.cache_controller_flush_status_client_id = + field_get(hal_reo_flush_cache_status_info0_flush_client_id, + desc->info0); + status->u.flush_cache.cache_controller_flush_status_err = + field_get(hal_reo_flush_cache_status_info0_flush_err, + desc->info0); + status->u.flush_cache.cache_controller_flush_status_cnt = + field_get(hal_reo_flush_cache_status_info0_flush_count, + desc->info0); +} + +void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status) +{ + struct ath11k_hal *hal = &ab->hal; + struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc; + struct hal_reo_unblock_cache_status *desc = + (struct hal_reo_unblock_cache_status *)tlv->value; + + status->uniform_hdr.cmd_num = + field_get(hal_reo_status_hdr_info0_status_num, + desc->hdr.info0); + status->uniform_hdr.cmd_status = + field_get(hal_reo_status_hdr_info0_exec_status, + desc->hdr.info0); + + status->u.unblock_cache.err_detected = + field_get(hal_reo_unblock_cache_status_info0_is_err, + desc->info0); + status->u.unblock_cache.unblock_type = + field_get(hal_reo_unblock_cache_status_info0_type, + desc->info0); + + if (!status->u.unblock_cache.err_detected && + status->u.unblock_cache.unblock_type == + hal_reo_status_unblock_blocking_resource) + hal->avail_blk_resource &= ~bit(hal->current_blk_index); +} + +void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab, + u32 *reo_desc, + struct hal_reo_status *status) +{ + struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc; + struct hal_reo_flush_timeout_list_status *desc = + (struct hal_reo_flush_timeout_list_status *)tlv->value; + + status->uniform_hdr.cmd_num = + field_get(hal_reo_status_hdr_info0_status_num, + desc->hdr.info0); + status->uniform_hdr.cmd_status = + field_get(hal_reo_status_hdr_info0_exec_status, + desc->hdr.info0); + + status->u.timeout_list.err_detected = + field_get(hal_reo_flush_timeout_status_info0_is_err, + desc->info0); + status->u.timeout_list.list_empty = + field_get(hal_reo_flush_timeout_status_info0_list_empty, + desc->info0); + + status->u.timeout_list.release_desc_cnt = + field_get(hal_reo_flush_timeout_status_info1_rel_desc_count, + desc->info1); + status->u.timeout_list.fwd_buf_cnt = + field_get(hal_reo_flush_timeout_status_info1_fwd_buf_count, + desc->info1); +} + +void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab, + u32 *reo_desc, + struct hal_reo_status *status) +{ + struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc; + struct hal_reo_desc_thresh_reached_status *desc = + (struct hal_reo_desc_thresh_reached_status *)tlv->value; + + status->uniform_hdr.cmd_num = + field_get(hal_reo_status_hdr_info0_status_num, + desc->hdr.info0); + status->uniform_hdr.cmd_status = + field_get(hal_reo_status_hdr_info0_exec_status, + desc->hdr.info0); + + status->u.desc_thresh_reached.threshold_idx = + field_get(hal_reo_desc_thresh_status_info0_thresh_index, + desc->info0); + + status->u.desc_thresh_reached.link_desc_counter0 = + field_get(hal_reo_desc_thresh_status_info1_link_desc_counter0, + desc->info1); + + status->u.desc_thresh_reached.link_desc_counter1 = + field_get(hal_reo_desc_thresh_status_info2_link_desc_counter1, + desc->info2); + + status->u.desc_thresh_reached.link_desc_counter2 = + field_get(hal_reo_desc_thresh_status_info3_link_desc_counter2, + desc->info3); + + status->u.desc_thresh_reached.link_desc_counter_sum = + field_get(hal_reo_desc_thresh_status_info4_link_desc_counter_sum, + desc->info4); +} + +void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab, + u32 *reo_desc, + struct hal_reo_status *status) +{ + struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc; + struct hal_reo_status_hdr *desc = + (struct hal_reo_status_hdr *)tlv->value; + + status->uniform_hdr.cmd_num = + field_get(hal_reo_status_hdr_info0_status_num, + desc->info0); + status->uniform_hdr.cmd_status = + field_get(hal_reo_status_hdr_info0_exec_status, + desc->info0); +} + +u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid) +{ + u32 num_ext_desc; + + if (ba_window_size <= 1) { + if (tid != hal_desc_reo_non_qos_tid) + num_ext_desc = 1; + else + num_ext_desc = 0; + } else if (ba_window_size <= 105) { + num_ext_desc = 1; + } else if (ba_window_size <= 210) { + num_ext_desc = 2; + } else { + num_ext_desc = 3; + } + + return sizeof(struct hal_rx_reo_queue) + + (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext)); +} + +void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size, + u32 start_seq) +{ + struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr; + struct hal_rx_reo_queue_ext *ext_desc; + + memset(qdesc, 0, sizeof(*qdesc)); + + ath11k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, hal_desc_reo_owned, + hal_desc_reo_queue_desc, + reo_queue_desc_magic_debug_pattern_0); + + qdesc->rx_queue_num = field_prep(hal_rx_reo_queue_rx_queue_number, tid); + + qdesc->info0 = + field_prep(hal_rx_reo_queue_info0_vld, 1) | + field_prep(hal_rx_reo_queue_info0_assoc_lnk_desc_counter, 1) | + field_prep(hal_rx_reo_queue_info0_ac, ath11k_tid_to_ac(tid)); + + if (ba_window_size < 1) + ba_window_size = 1; + + if (ba_window_size == 1 && tid != hal_desc_reo_non_qos_tid) + ba_window_size++; + + if (ba_window_size == 1) + qdesc->info0 |= field_prep(hal_rx_reo_queue_info0_retry, 1); + + qdesc->info0 |= field_prep(hal_rx_reo_queue_info0_ba_window_size, + ba_window_size - 1); + + /* todo: set ignore ampdu flags based on ba window size and/or + * ampdu capabilities + */ + qdesc->info0 |= field_prep(hal_rx_reo_queue_info0_ignore_ampdu_flg, 1); + + qdesc->info1 |= field_prep(hal_rx_reo_queue_info1_svld, 0); + + if (start_seq <= 0xfff) + qdesc->info1 = field_prep(hal_rx_reo_queue_info1_ssn, + start_seq); + + if (tid == hal_desc_reo_non_qos_tid) + return; + + ext_desc = qdesc->ext_desc; + + /* todo: hw queue descriptors are currently allocated for max ba + * window size for all qos tids so that same descriptor can be used + * later when addba request is recevied. this should be changed to + * allocate hw queue descriptors based on ba window size being + * negotiated (0 for non ba cases), and reallocate when ba window + * size changes and also send wmi message to fw to change the reo + * queue descriptor in rx peer entry as part of dp_rx_tid_update. + */ + memset(ext_desc, 0, 3 * sizeof(*ext_desc)); + ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, hal_desc_reo_owned, + hal_desc_reo_queue_ext_desc, + reo_queue_desc_magic_debug_pattern_1); + ext_desc++; + ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, hal_desc_reo_owned, + hal_desc_reo_queue_ext_desc, + reo_queue_desc_magic_debug_pattern_2); + ext_desc++; + ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, hal_desc_reo_owned, + hal_desc_reo_queue_ext_desc, + reo_queue_desc_magic_debug_pattern_3); +} + +void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab, + struct hal_srng *srng) +{ + struct hal_srng_params params; + struct hal_tlv_hdr *tlv; + struct hal_reo_get_queue_stats *desc; + int i, cmd_num = 1; + int entry_size; + u8 *entry; + + memset(¶ms, 0, sizeof(params)); + + entry_size = ath11k_hal_srng_get_entrysize(hal_reo_cmd); + ath11k_hal_srng_get_params(ab, srng, ¶ms); + entry = (u8 *)params.ring_base_vaddr; + + for (i = 0; i < params.num_entries; i++) { + tlv = (struct hal_tlv_hdr *)entry; + desc = (struct hal_reo_get_queue_stats *)tlv->value; + desc->cmd.info0 = + field_prep(hal_reo_cmd_hdr_info0_cmd_number, cmd_num++); + entry += entry_size; + } +} + +void ath11k_hal_reo_hw_setup(struct ath11k_base *ab) +{ + u32 reo_base = hal_seq_wcss_umac_reo_reg; + u32 val; + + val = ath11k_ahb_read32(ab, reo_base + hal_reo1_gen_enable); + + val &= ~hal_reo1_gen_enable_frag_dst_ring; + val |= field_prep(hal_reo1_gen_enable_frag_dst_ring, + hal_srng_ring_id_reo2sw1) | + field_prep(hal_reo1_gen_enable_aging_list_enable, 1) | + field_prep(hal_reo1_gen_enable_aging_flush_enable, 1); + ath11k_ahb_write32(ab, reo_base + hal_reo1_gen_enable, val); + + ath11k_ahb_write32(ab, reo_base + hal_reo1_aging_thresh_ix_0, + hal_default_reo_timeout_usec); + ath11k_ahb_write32(ab, reo_base + hal_reo1_aging_thresh_ix_1, + hal_default_reo_timeout_usec); + ath11k_ahb_write32(ab, reo_base + hal_reo1_aging_thresh_ix_2, + hal_default_reo_timeout_usec); + ath11k_ahb_write32(ab, reo_base + hal_reo1_aging_thresh_ix_3, + hal_default_reo_timeout_usec); +} + +static enum hal_rx_mon_status +ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab, + struct hal_rx_mon_ppdu_info *ppdu_info, + u32 tlv_tag, u8 *tlv_data) +{ + u32 info0, info1; + + switch (tlv_tag) { + case hal_rx_ppdu_start: { + struct hal_rx_ppdu_start *ppdu_start = + (struct hal_rx_ppdu_start *)tlv_data; + + ppdu_info->ppdu_id = + field_get(hal_rx_ppdu_start_info0_ppdu_id, + __le32_to_cpu(ppdu_start->info0)); + ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num); + ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts); + break; + } + case hal_rx_ppdu_end_user_stats: { + struct hal_rx_ppdu_end_user_stats *eu_stats = + (struct hal_rx_ppdu_end_user_stats *)tlv_data; + + info0 = __le32_to_cpu(eu_stats->info0); + info1 = __le32_to_cpu(eu_stats->info1); + + ppdu_info->tid = + ffs(field_get(hal_rx_ppdu_end_user_stats_info6_tid_bitmap, + __le32_to_cpu(eu_stats->info6))) - 1; + ppdu_info->tcp_msdu_count = + field_get(hal_rx_ppdu_end_user_stats_info4_tcp_msdu_cnt, + __le32_to_cpu(eu_stats->info4)); + ppdu_info->udp_msdu_count = + field_get(hal_rx_ppdu_end_user_stats_info4_udp_msdu_cnt, + __le32_to_cpu(eu_stats->info4)); + ppdu_info->other_msdu_count = + field_get(hal_rx_ppdu_end_user_stats_info5_other_msdu_cnt, + __le32_to_cpu(eu_stats->info5)); + ppdu_info->tcp_ack_msdu_count = + field_get(hal_rx_ppdu_end_user_stats_info5_tcp_ack_msdu_cnt, + __le32_to_cpu(eu_stats->info5)); + ppdu_info->preamble_type = + field_get(hal_rx_ppdu_end_user_stats_info1_pkt_type, info1); + ppdu_info->num_mpdu_fcs_ok = + field_get(hal_rx_ppdu_end_user_stats_info1_mpdu_cnt_fcs_ok, + info1); + ppdu_info->num_mpdu_fcs_err = + field_get(hal_rx_ppdu_end_user_stats_info0_mpdu_cnt_fcs_err, + info0); + break; + } + case hal_phyrx_ht_sig: { + struct hal_rx_ht_sig_info *ht_sig = + (struct hal_rx_ht_sig_info *)tlv_data; + + info0 = __le32_to_cpu(ht_sig->info0); + info1 = __le32_to_cpu(ht_sig->info1); + + ppdu_info->mcs = field_get(hal_rx_ht_sig_info_info0_mcs, info0); + ppdu_info->bw = field_get(hal_rx_ht_sig_info_info0_bw, info0); + ppdu_info->is_stbc = field_get(hal_rx_ht_sig_info_info1_stbc, + info1); + ppdu_info->ldpc = field_get(hal_rx_ht_sig_info_info1_fec_coding, info1); + ppdu_info->gi = info1 & hal_rx_ht_sig_info_info1_gi; + + switch (ppdu_info->mcs) { + case 0 ... 7: + ppdu_info->nss = 1; + break; + case 8 ... 15: + ppdu_info->nss = 2; + break; + case 16 ... 23: + ppdu_info->nss = 3; + break; + case 24 ... 31: + ppdu_info->nss = 4; + break; + } + + if (ppdu_info->nss > 1) + ppdu_info->mcs = ppdu_info->mcs % 8; + + ppdu_info->reception_type = hal_rx_reception_type_su; + break; + } + case hal_phyrx_l_sig_b: { + struct hal_rx_lsig_b_info *lsigb = + (struct hal_rx_lsig_b_info *)tlv_data; + + ppdu_info->rate = field_get(hal_rx_lsig_b_info_info0_rate, + __le32_to_cpu(lsigb->info0)); + ppdu_info->reception_type = hal_rx_reception_type_su; + break; + } + case hal_phyrx_l_sig_a: { + struct hal_rx_lsig_a_info *lsiga = + (struct hal_rx_lsig_a_info *)tlv_data; + + ppdu_info->rate = field_get(hal_rx_lsig_a_info_info0_rate, + __le32_to_cpu(lsiga->info0)); + ppdu_info->reception_type = hal_rx_reception_type_su; + break; + } + case hal_phyrx_vht_sig_a: { + struct hal_rx_vht_sig_a_info *vht_sig = + (struct hal_rx_vht_sig_a_info *)tlv_data; + u32 nsts; + u32 group_id; + + info0 = __le32_to_cpu(vht_sig->info0); + info1 = __le32_to_cpu(vht_sig->info1); + + ppdu_info->ldpc = field_get(hal_rx_vht_sig_a_info_info1_su_mu_coding, + info0); + ppdu_info->mcs = field_get(hal_rx_vht_sig_a_info_info1_mcs, + info1); + ppdu_info->gi = + field_get(hal_rx_vht_sig_a_info_info1_gi_setting, + info1); + ppdu_info->is_stbc = info0 & hal_rx_vht_sig_a_info_info0_stbc; + nsts = field_get(hal_rx_vht_sig_a_info_info0_nsts, info0); + if (ppdu_info->is_stbc && nsts > 0) + nsts = ((nsts + 1) >> 1) - 1; + + ppdu_info->nss = (nsts & vht_sig_su_nss_mask) + 1; + ppdu_info->bw = field_get(hal_rx_vht_sig_a_info_info0_bw, + info0); + ppdu_info->beamformed = info1 & + hal_rx_vht_sig_a_info_info1_beamformed; + group_id = field_get(hal_rx_vht_sig_a_info_info0_group_id, + info0); + if (group_id == 0 || group_id == 63) + ppdu_info->reception_type = hal_rx_reception_type_su; + else + ppdu_info->reception_type = + hal_rx_reception_type_mu_mimo; + break; + } + case hal_phyrx_he_sig_a_su: { + struct hal_rx_he_sig_a_su_info *he_sig_a = + (struct hal_rx_he_sig_a_su_info *)tlv_data; + u32 nsts, cp_ltf, dcm; + + info0 = __le32_to_cpu(he_sig_a->info0); + info1 = __le32_to_cpu(he_sig_a->info1); + + ppdu_info->mcs = + field_get(hal_rx_he_sig_a_su_info_info0_transmit_mcs, + info0); + ppdu_info->bw = + field_get(hal_rx_he_sig_a_su_info_info0_transmit_bw, + info0); + ppdu_info->ldpc = field_get(hal_rx_he_sig_a_su_info_info1_coding, info0); + ppdu_info->is_stbc = info1 & + hal_rx_he_sig_a_su_info_info1_stbc; + ppdu_info->beamformed = info1 & + hal_rx_he_sig_a_su_info_info1_txbf; + dcm = info0 & hal_rx_he_sig_a_su_info_info0_dcm; + cp_ltf = field_get(hal_rx_he_sig_a_su_info_info0_cp_ltf_size, + info0); + nsts = field_get(hal_rx_he_sig_a_su_info_info0_nsts, info0); + + switch (cp_ltf) { + case 0: + case 1: + ppdu_info->gi = hal_rx_gi_0_8_us; + break; + case 2: + ppdu_info->gi = hal_rx_gi_1_6_us; + break; + case 3: + if (dcm && ppdu_info->is_stbc) + ppdu_info->gi = hal_rx_gi_0_8_us; + else + ppdu_info->gi = hal_rx_gi_3_2_us; + break; + } + + ppdu_info->nss = nsts + 1; + ppdu_info->reception_type = hal_rx_reception_type_su; + break; + } + case hal_phyrx_he_sig_a_mu_dl: { + struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl = + (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data; + + u32 cp_ltf; + + info0 = __le32_to_cpu(he_sig_a_mu_dl->info0); + info1 = __le32_to_cpu(he_sig_a_mu_dl->info1); + + ppdu_info->bw = + field_get(hal_rx_he_sig_a_mu_dl_info_info0_transmit_bw, + info0); + cp_ltf = field_get(hal_rx_he_sig_a_mu_dl_info_info0_cp_ltf_size, + info0); + + switch (cp_ltf) { + case 0: + case 1: + ppdu_info->gi = hal_rx_gi_0_8_us; + break; + case 2: + ppdu_info->gi = hal_rx_gi_1_6_us; + break; + case 3: + ppdu_info->gi = hal_rx_gi_3_2_us; + break; + } + + ppdu_info->is_stbc = info1 & + hal_rx_he_sig_a_mu_dl_info_info1_stbc; + ppdu_info->reception_type = hal_rx_reception_type_mu_mimo; + break; + } + case hal_phyrx_he_sig_b1_mu: { + /* todo: check if resource unit(ru) allocation stats + * are required + */ + ppdu_info->reception_type = hal_rx_reception_type_mu_mimo; + break; + } + case hal_phyrx_he_sig_b2_mu: { + struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu = + (struct hal_rx_he_sig_b2_mu_info *)tlv_data; + + info0 = __le32_to_cpu(he_sig_b2_mu->info0); + + ppdu_info->mcs = + field_get(hal_rx_he_sig_b2_mu_info_info0_sta_mcs, + info0); + ppdu_info->nss = + field_get(hal_rx_he_sig_b2_mu_info_info0_sta_nsts, + info0) + 1; + ppdu_info->ldpc = field_get(hal_rx_he_sig_b2_mu_info_info0_sta_coding, + info0); + break; + } + case hal_phyrx_he_sig_b2_ofdma: { + struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma = + (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data; + + info0 = __le32_to_cpu(he_sig_b2_ofdma->info0); + + ppdu_info->mcs = + field_get(hal_rx_he_sig_b2_ofdma_info_info0_sta_mcs, + info0); + ppdu_info->nss = + field_get(hal_rx_he_sig_b2_ofdma_info_info0_sta_nsts, + info0) + 1; + ppdu_info->beamformed = + info0 & + hal_rx_he_sig_b2_ofdma_info_info0_sta_txbf; + ppdu_info->ldpc = field_get(hal_rx_he_sig_b2_ofdma_info_info0_sta_coding, + info0); + ppdu_info->reception_type = hal_rx_reception_type_mu_ofdma; + break; + } + case hal_phyrx_rssi_legacy: { + struct hal_rx_phyrx_rssi_legacy_info *rssi = + (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data; + + /* todo: please note that the combined rssi will not be accurate + * in mu case. rssi in mu needs to be retrieved from + * phyrx_other_receive_info tlv. + */ + ppdu_info->rssi_comb = + field_get(hal_rx_phyrx_rssi_legacy_info_info1_rssi_comb, + __le32_to_cpu(rssi->info0)); + break; + } + case hal_rx_mpdu_start: { + struct hal_rx_mpdu_info *mpdu_info = + (struct hal_rx_mpdu_info *)tlv_data; + u16 peer_id; + + peer_id = field_get(hal_rx_mpdu_info_info0_peerid, + __le32_to_cpu(mpdu_info->info0)); + if (peer_id) + ppdu_info->peer_id = peer_id; + break; + } + case hal_rxpcu_ppdu_end_info: { + struct hal_rx_ppdu_end_duration *ppdu_rx_duration = + (struct hal_rx_ppdu_end_duration *)tlv_data; + ppdu_info->rx_duration = + field_get(hal_rx_ppdu_end_duration, + __le32_to_cpu(ppdu_rx_duration->info0)); + break; + } + case hal_dummy: + return hal_rx_mon_status_buf_done; + case hal_rx_ppdu_end_status_done: + case 0: + return hal_rx_mon_status_ppdu_done; + default: + break; + } + + return hal_rx_mon_status_ppdu_not_done; +} + +enum hal_rx_mon_status +ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct sk_buff *skb) +{ + struct hal_tlv_hdr *tlv; + enum hal_rx_mon_status hal_status = hal_rx_mon_status_buf_done; + u16 tlv_tag; + u16 tlv_len; + u8 *ptr = skb->data; + + do { + tlv = (struct hal_tlv_hdr *)ptr; + tlv_tag = field_get(hal_tlv_hdr_tag, tlv->tl); + tlv_len = field_get(hal_tlv_hdr_len, tlv->tl); + ptr += sizeof(*tlv); + + /* the actual length of ppdu_end is the combined length of many phy + * tlvs that follow. skip the tlv header and + * rx_rxpcu_classification_overview that follows the header to get to + * next tlv. + */ + if (tlv_tag == hal_rx_ppdu_end) + tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview); + + hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info, + tlv_tag, ptr); + ptr += tlv_len; + ptr = ptr_align(ptr, hal_tlv_align); + + if ((ptr - skb->data) >= dp_rx_buffer_size) + break; + } while (hal_status == hal_rx_mon_status_ppdu_not_done); + + return hal_status; +} + +void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr, + u32 *sw_cookie, void **pp_buf_addr, + u32 *msdu_cnt) +{ + struct hal_reo_entrance_ring *reo_ent_ring = + (struct hal_reo_entrance_ring *)rx_desc; + struct ath11k_buffer_addr *buf_addr_info; + struct rx_mpdu_desc *rx_mpdu_desc_info_details; + + rx_mpdu_desc_info_details = + (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info; + + *msdu_cnt = field_get(rx_mpdu_desc_info0_msdu_count, + rx_mpdu_desc_info_details->info0); + + buf_addr_info = (struct ath11k_buffer_addr *)&reo_ent_ring->buf_addr_info; + + *paddr = (((u64)field_get(buffer_addr_info1_addr, + buf_addr_info->info1)) << 32) | + field_get(buffer_addr_info0_addr, + buf_addr_info->info0); + + *sw_cookie = field_get(buffer_addr_info1_sw_cookie, + buf_addr_info->info1); + + *pp_buf_addr = (void *)buf_addr_info; +} diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hal_rx.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_hal_rx_h +#define ath11k_hal_rx_h + +struct hal_rx_mpdu_meta { + u32 peer_meta; + u16 msdu_cnt; + u16 seq_num; + bool frag; + bool retry; + bool ampdu; + bool raw; +}; + +struct hal_rx_msdu_meta { + u32 cookie; + u32 msdu_len; + bool first; + bool last; + bool continuation; +}; + +struct hal_rx_meta_info { + enum hal_reo_dest_ring_push_reason push_reason; + struct hal_rx_mpdu_meta mpdu_meta; + struct hal_rx_msdu_meta msdu_meta; +}; + +struct hal_rx_wbm_rel_info { + u32 cookie; + enum hal_wbm_rel_src_module err_rel_src; + enum hal_reo_dest_ring_push_reason push_reason; + u32 err_code; + bool first_msdu; + bool last_msdu; +}; + +#define hal_invalid_peerid 0xffff +#define vht_sig_su_nss_mask 0x7 + +#define hal_rx_max_mcs 12 +#define hal_rx_max_nss 8 + +struct hal_rx_mon_status_tlv_hdr { + u32 hdr; + u8 value[0]; +}; + +enum hal_rx_su_mu_coding { + hal_rx_su_mu_coding_bcc, + hal_rx_su_mu_coding_ldpc, + hal_rx_su_mu_coding_max, +}; + +enum hal_rx_gi { + hal_rx_gi_0_8_us, + hal_rx_gi_0_4_us, + hal_rx_gi_1_6_us, + hal_rx_gi_3_2_us, + hal_rx_gi_max, +}; + +enum hal_rx_bw { + hal_rx_bw_20mhz, + hal_rx_bw_40mhz, + hal_rx_bw_80mhz, + hal_rx_bw_160mhz, + hal_rx_bw_max, +}; + +enum hal_rx_preamble { + hal_rx_preamble_11a, + hal_rx_preamble_11b, + hal_rx_preamble_11n, + hal_rx_preamble_11ac, + hal_rx_preamble_11ax, + hal_rx_preamble_max, +}; + +enum hal_rx_reception_type { + hal_rx_reception_type_su, + hal_rx_reception_type_mu_mimo, + hal_rx_reception_type_mu_ofdma, + hal_rx_reception_type_mu_ofdma_mimo, + hal_rx_reception_type_max, +}; + +#define hal_tlv_status_ppdu_not_done 0 +#define hal_tlv_status_ppdu_done 1 +#define hal_tlv_status_buf_done 2 +#define hal_tlv_status_ppdu_non_std_done 3 +#define hal_rx_fcs_len 4 + +enum hal_rx_mon_status { + hal_rx_mon_status_ppdu_not_done, + hal_rx_mon_status_ppdu_done, + hal_rx_mon_status_buf_done, +}; + +struct hal_rx_mon_ppdu_info { + u32 ppdu_id; + u32 ppdu_ts; + u32 num_mpdu_fcs_ok; + u32 num_mpdu_fcs_err; + u32 preamble_type; + u16 chan_num; + u16 tcp_msdu_count; + u16 tcp_ack_msdu_count; + u16 udp_msdu_count; + u16 other_msdu_count; + u16 peer_id; + u8 rate; + u8 mcs; + u8 nss; + u8 bw; + u8 is_stbc; + u8 gi; + u8 ldpc; + u8 beamformed; + u8 rssi_comb; + u8 tid; + u8 reception_type; + u64 rx_duration; +}; + +#define hal_rx_ppdu_start_info0_ppdu_id genmask(15, 0) + +struct hal_rx_ppdu_start { + __le32 info0; + __le32 chan_num; + __le32 ppdu_start_ts; +} __packed; + +#define hal_rx_ppdu_end_user_stats_info0_mpdu_cnt_fcs_err genmask(25, 16) + +#define hal_rx_ppdu_end_user_stats_info1_mpdu_cnt_fcs_ok genmask(8, 0) +#define hal_rx_ppdu_end_user_stats_info1_fc_valid bit(9) +#define hal_rx_ppdu_end_user_stats_info1_qos_ctrl_valid bit(10) +#define hal_rx_ppdu_end_user_stats_info1_ht_ctrl_valid bit(11) +#define hal_rx_ppdu_end_user_stats_info1_pkt_type genmask(23, 20) + +#define hal_rx_ppdu_end_user_stats_info2_ast_index genmask(15, 0) +#define hal_rx_ppdu_end_user_stats_info2_frame_ctrl genmask(31, 16) + +#define hal_rx_ppdu_end_user_stats_info3_qos_ctrl genmask(31, 16) + +#define hal_rx_ppdu_end_user_stats_info4_udp_msdu_cnt genmask(15, 0) +#define hal_rx_ppdu_end_user_stats_info4_tcp_msdu_cnt genmask(31, 16) + +#define hal_rx_ppdu_end_user_stats_info5_other_msdu_cnt genmask(15, 0) +#define hal_rx_ppdu_end_user_stats_info5_tcp_ack_msdu_cnt genmask(31, 16) + +#define hal_rx_ppdu_end_user_stats_info6_tid_bitmap genmask(15, 0) +#define hal_rx_ppdu_end_user_stats_info6_tid_eosp_bitmap genmask(31, 16) + +struct hal_rx_ppdu_end_user_stats { + __le32 rsvd0[2]; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 ht_ctrl; + __le32 rsvd1[2]; + __le32 info4; + __le32 info5; + __le32 info6; + __le32 rsvd2[11]; +} __packed; + +#define hal_rx_ht_sig_info_info0_mcs genmask(6, 0) +#define hal_rx_ht_sig_info_info0_bw bit(7) + +#define hal_rx_ht_sig_info_info1_stbc genmask(5, 4) +#define hal_rx_ht_sig_info_info1_fec_coding bit(6) +#define hal_rx_ht_sig_info_info1_gi bit(7) + +struct hal_rx_ht_sig_info { + __le32 info0; + __le32 info1; +} __packed; + +#define hal_rx_lsig_b_info_info0_rate genmask(3, 0) +#define hal_rx_lsig_b_info_info0_len genmask(15, 4) + +struct hal_rx_lsig_b_info { + __le32 info0; +} __packed; + +#define hal_rx_lsig_a_info_info0_rate genmask(3, 0) +#define hal_rx_lsig_a_info_info0_len genmask(16, 5) +#define hal_rx_lsig_a_info_info0_pkt_type genmask(27, 24) + +struct hal_rx_lsig_a_info { + __le32 info0; +} __packed; + +#define hal_rx_vht_sig_a_info_info0_bw genmask(1, 0) +#define hal_rx_vht_sig_a_info_info0_stbc bit(3) +#define hal_rx_vht_sig_a_info_info0_group_id genmask(9, 4) +#define hal_rx_vht_sig_a_info_info0_nsts genmask(21, 10) + +#define hal_rx_vht_sig_a_info_info1_gi_setting genmask(1, 0) +#define hal_rx_vht_sig_a_info_info1_su_mu_coding bit(2) +#define hal_rx_vht_sig_a_info_info1_mcs genmask(7, 4) +#define hal_rx_vht_sig_a_info_info1_beamformed bit(8) + +struct hal_rx_vht_sig_a_info { + __le32 info0; + __le32 info1; +} __packed; + +#define hal_rx_he_sig_a_su_info_info0_transmit_mcs genmask(6, 3) +#define hal_rx_he_sig_a_su_info_info0_dcm bit(7) +#define hal_rx_he_sig_a_su_info_info0_transmit_bw genmask(20, 19) +#define hal_rx_he_sig_a_su_info_info0_cp_ltf_size genmask(22, 21) +#define hal_rx_he_sig_a_su_info_info0_nsts genmask(25, 23) + +#define hal_rx_he_sig_a_su_info_info1_coding bit(7) +#define hal_rx_he_sig_a_su_info_info1_stbc bit(9) +#define hal_rx_he_sig_a_su_info_info1_txbf bit(10) + +struct hal_rx_he_sig_a_su_info { + __le32 info0; + __le32 info1; +} __packed; + +#define hal_rx_he_sig_a_mu_dl_info_info0_transmit_bw genmask(17, 15) +#define hal_rx_he_sig_a_mu_dl_info_info0_cp_ltf_size genmask(24, 23) + +#define hal_rx_he_sig_a_mu_dl_info_info1_stbc bit(12) + +struct hal_rx_he_sig_a_mu_dl_info { + __le32 info0; + __le32 info1; +} __packed; + +#define hal_rx_he_sig_b1_mu_info_info0_ru_allocation genmask(7, 0) + +struct hal_rx_he_sig_b1_mu_info { + __le32 info0; +} __packed; + +#define hal_rx_he_sig_b2_mu_info_info0_sta_mcs genmask(18, 15) +#define hal_rx_he_sig_b2_mu_info_info0_sta_coding bit(20) +#define hal_rx_he_sig_b2_mu_info_info0_sta_nsts genmask(31, 29) + +struct hal_rx_he_sig_b2_mu_info { + __le32 info0; +} __packed; + +#define hal_rx_he_sig_b2_ofdma_info_info0_sta_nsts genmask(13, 11) +#define hal_rx_he_sig_b2_ofdma_info_info0_sta_txbf bit(19) +#define hal_rx_he_sig_b2_ofdma_info_info0_sta_mcs genmask(18, 15) +#define hal_rx_he_sig_b2_ofdma_info_info0_sta_dcm bit(19) +#define hal_rx_he_sig_b2_ofdma_info_info0_sta_coding bit(20) + +struct hal_rx_he_sig_b2_ofdma_info { + __le32 info0; +} __packed; + +#define hal_rx_phyrx_rssi_legacy_info_info1_rssi_comb genmask(15, 8) + +struct hal_rx_phyrx_rssi_legacy_info { + __le32 rsvd[35]; + __le32 info0; +} __packed; + +#define hal_rx_mpdu_info_info0_peerid genmask(31, 16) +struct hal_rx_mpdu_info { + __le32 rsvd0; + __le32 info0; + __le32 rsvd1[21]; +} __packed; + +#define hal_rx_ppdu_end_duration genmask(23, 0) +struct hal_rx_ppdu_end_duration { + __le32 rsvd0[9]; + __le32 info0; + __le32 rsvd1[4]; +} __packed; + +struct hal_rx_rxpcu_classification_overview { + u32 rsvd0; +} __packed; + +struct hal_rx_msdu_desc_info { + u32 msdu_flags; + u16 msdu_len; /* 14 bits for length */ +}; + +#define hal_rx_num_msdu_desc 6 +struct hal_rx_msdu_list { + struct hal_rx_msdu_desc_info msdu_info[hal_rx_num_msdu_desc]; + u32 sw_cookie[hal_rx_num_msdu_desc]; + u8 rbm[hal_rx_num_msdu_desc]; +}; + +void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status); +void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status); +void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status); +void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status); +void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc, + struct hal_reo_status *status); +void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab, + u32 *reo_desc, + struct hal_reo_status *status); +void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab, + u32 *reo_desc, + struct hal_reo_status *status); +void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab, + u32 *reo_desc, + struct hal_reo_status *status); +int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status); +void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus, + struct hal_rx_msdu_meta *meta, + enum hal_rx_buf_return_buf_manager *rbm); +void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc, + void *link_desc, + enum hal_wbm_rel_bm_act action); +void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr, + u32 cookie, u8 manager); +void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr, + u32 *cookie, u8 *rbm); +int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc, + dma_addr_t *paddr, u32 *desc_bank); +void ath11k_hal_rx_parse_dst_ring_desc(struct ath11k_base *ab, u32 *rx_desc, + struct hal_rx_meta_info *meta_info); +int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc, + struct hal_rx_wbm_rel_info *rel_info); +void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc, + dma_addr_t *paddr, u32 *desc_bank); +void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, + dma_addr_t *paddr, u32 *sw_cookie, + void **pp_buf_addr_info, + u32 *msdu_cnt); +enum hal_rx_mon_status +ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab, + struct hal_rx_mon_ppdu_info *ppdu_info, + struct sk_buff *skb); +#define reo_queue_desc_magic_debug_pattern_0 0xddbeef +#define reo_queue_desc_magic_debug_pattern_1 0xadbeef +#define reo_queue_desc_magic_debug_pattern_2 0xbdbeef +#define reo_queue_desc_magic_debug_pattern_3 0xcdbeef +#endif diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hal_tx.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include "ahb.h" +#include "hal.h" +#include "hal_tx.h" + +#define dscp_tid_map_tbl_entry_size 64 + +/* dscp_tid_map - default dscp-tid mapping + * + * dscp tid + * 000000 0 + * 001000 1 + * 010000 2 + * 011000 3 + * 100000 4 + * 101000 5 + * 110000 6 + * 111000 7 + */ +static const u8 dscp_tid_map[dscp_tid_map_tbl_entry_size] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, + 5, 5, 5, 5, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 6, 6, + 7, 7, 7, 7, 7, 7, 7, 7, +}; + +void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd, + struct hal_tx_info *ti) +{ + struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd; + + tcl_cmd->buf_addr_info.info0 = + field_prep(buffer_addr_info0_addr, ti->paddr); + tcl_cmd->buf_addr_info.info1 = + field_prep(buffer_addr_info1_addr, + ((uint64_t)ti->paddr >> hal_addr_msb_reg_shift)); + tcl_cmd->buf_addr_info.info1 |= + field_prep(buffer_addr_info1_ret_buf_mgr, + (ti->ring_id + hal_rx_buf_rbm_sw0_bm)) | + field_prep(buffer_addr_info1_sw_cookie, ti->desc_id); + + tcl_cmd->info0 = + field_prep(hal_tcl_data_cmd_info0_desc_type, ti->type) | + field_prep(hal_tcl_data_cmd_info0_encap_type, ti->encap_type) | + field_prep(hal_tcl_data_cmd_info0_encrypt_type, + ti->encrypt_type) | + field_prep(hal_tcl_data_cmd_info0_addrx_en, + ti->addr_search_flags) | + field_prep(hal_tcl_data_cmd_info0_addry_en, + ti->addr_search_flags) | + field_prep(hal_tcl_data_cmd_info0_cmd_num, + ti->meta_data_flags); + + tcl_cmd->info1 = ti->flags0 | + field_prep(hal_tcl_data_cmd_info1_data_len, ti->data_len) | + field_prep(hal_tcl_data_cmd_info1_pkt_offset, ti->pkt_offset); + + tcl_cmd->info2 = ti->flags1 | + field_prep(hal_tcl_data_cmd_info2_tid, ti->tid) | + field_prep(hal_tcl_data_cmd_info2_lmac_id, ti->lmac_id); + + tcl_cmd->info3 = field_prep(hal_tcl_data_cmd_info3_dscp_tid_table_idx, + ti->dscp_tid_tbl_idx) | + field_prep(hal_tcl_data_cmd_info3_search_index, + ti->bss_ast_hash); +} + +/* commit the descriptor to hardware */ +void ath11k_hal_tx_desc_sync(void *tx_desc_cached, void *hw_desc) +{ + memcpy(hw_desc + sizeof(struct hal_tlv_hdr), tx_desc_cached, + sizeof(struct hal_tcl_data_cmd)); +} + +/* get the descriptor status from hardware */ +void ath11k_hal_tx_status_desc_sync(void *hw_desc, void *local_desc) +{ + memcpy(local_desc, hw_desc, hal_tx_status_desc_len); +} + +void ath11k_hal_tx_status_parse(struct ath11k_base *ab, + struct hal_wbm_release_ring *desc, + struct hal_tx_status *ts) +{ + ts->buf_rel_source = + field_get(hal_wbm_release_info0_rel_src_module, desc->info0); + if (ts->buf_rel_source != hal_wbm_rel_src_module_fw && + ts->buf_rel_source != hal_wbm_rel_src_module_tqm) + return; + + ts->desc_id = field_get(buffer_addr_info1_sw_cookie, + desc->buf_addr_info.info1); + + if (ts->buf_rel_source == hal_wbm_rel_src_module_fw) + return; + + ts->status = field_get(hal_wbm_release_info0_tqm_release_reason, + desc->info0); + ts->ppdu_id = field_get(hal_wbm_release_info1_tqm_status_number, + desc->info1); + ts->try_cnt = field_get(hal_wbm_release_info1_transmit_count, + desc->info1); + + ts->ack_rssi = field_get(hal_wbm_release_info2_ack_frame_rssi, + desc->info2); + if (desc->info2 & hal_wbm_release_info2_first_msdu) + ts->flags |= hal_tx_status_flags_first_msdu; + + if (desc->info2 & hal_wbm_release_info2_last_msdu) + ts->flags |= hal_tx_status_flags_last_msdu; + + if (desc->info2 & hal_wbm_release_info2_msdu_in_amsdu) + ts->flags |= hal_tx_status_flags_msdu_in_amsdu; + + ts->peer_id = field_get(hal_wbm_release_info3_peer_id, desc->info3); + ts->tid = field_get(hal_wbm_release_info3_tid, desc->info3); + + if (!(desc->rate_stats.info0 & hal_tx_rate_stats_info0_valid)) + return; + + ts->flags |= hal_tx_status_flags_rate_stats_valid; + ts->tsf = desc->rate_stats.tsf; + ts->bw = field_get(hal_tx_rate_stats_info0_bw, desc->rate_stats.info0); + ts->pkt_type = field_get(hal_tx_rate_stats_info0_pkt_type, + desc->rate_stats.info0); + if (desc->rate_stats.info0 & hal_tx_rate_stats_info0_stbc) + ts->flags |= hal_tx_status_flags_rate_stbc; + if (desc->rate_stats.info0 & hal_tx_rate_stats_info0_ldpc) + ts->flags |= hal_tx_status_flags_rate_ldpc; + if (desc->rate_stats.info0 & hal_tx_rate_stats_info0_ofdma_tx) + ts->flags |= hal_tx_status_flags_ofdma; + + ts->sgi = field_get(hal_tx_rate_stats_info0_sgi, + desc->rate_stats.info0); + ts->mcs = field_get(hal_tx_rate_stats_info0_mcs, + desc->rate_stats.info0); + ts->num_tones_in_ru = field_get(hal_tx_rate_stats_info0_tones_in_ru, + desc->rate_stats.info0); +} + +void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id) +{ + u32 ctrl_reg_val; + u32 addr; + u8 hw_map_val[hal_dscp_tid_tbl_size]; + int i; + u32 value; + int cnt = 0; + + ctrl_reg_val = ath11k_ahb_read32(ab, hal_seq_wcss_umac_tcl_reg + + hal_tcl1_ring_cmn_ctrl_reg); + /* enable read/write access */ + ctrl_reg_val |= hal_tcl1_ring_cmn_ctrl_dscp_tid_map_prog_en; + ath11k_ahb_write32(ab, hal_seq_wcss_umac_tcl_reg + + hal_tcl1_ring_cmn_ctrl_reg, ctrl_reg_val); + + addr = hal_seq_wcss_umac_tcl_reg + hal_tcl1_ring_dscp_tid_map + + (4 * id * (hal_dscp_tid_tbl_size / 4)); + + /* configure each dscp-tid mapping in three bits there by configure + * three bytes in an iteration. + */ + for (i = 0; i < dscp_tid_map_tbl_entry_size; i += 8) { + value = field_prep(hal_tcl1_ring_field_dscp_tid_map0, + dscp_tid_map[i]) | + field_prep(hal_tcl1_ring_field_dscp_tid_map1, + dscp_tid_map[i + 1]) | + field_prep(hal_tcl1_ring_field_dscp_tid_map2, + dscp_tid_map[i + 2]) | + field_prep(hal_tcl1_ring_field_dscp_tid_map3, + dscp_tid_map[i + 3]) | + field_prep(hal_tcl1_ring_field_dscp_tid_map4, + dscp_tid_map[i + 4]) | + field_prep(hal_tcl1_ring_field_dscp_tid_map5, + dscp_tid_map[i + 5]) | + field_prep(hal_tcl1_ring_field_dscp_tid_map6, + dscp_tid_map[i + 6]) | + field_prep(hal_tcl1_ring_field_dscp_tid_map7, + dscp_tid_map[i + 7]); + memcpy(&hw_map_val[cnt], (u8 *)&value, 3); + cnt += 3; + } + + for (i = 0; i < hal_dscp_tid_tbl_size; i += 4) { + ath11k_ahb_write32(ab, addr, *(u32 *)&hw_map_val[i]); + addr += 4; + } + + /* disable read/write access */ + ctrl_reg_val = ath11k_ahb_read32(ab, hal_seq_wcss_umac_tcl_reg + + hal_tcl1_ring_cmn_ctrl_reg); + ctrl_reg_val &= ~hal_tcl1_ring_cmn_ctrl_dscp_tid_map_prog_en; + ath11k_ahb_write32(ab, hal_seq_wcss_umac_tcl_reg + + hal_tcl1_ring_cmn_ctrl_reg, + ctrl_reg_val); +} + +void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng) +{ + struct hal_srng_params params; + struct hal_tlv_hdr *tlv; + int i, entry_size; + u8 *desc; + + memset(¶ms, 0, sizeof(params)); + + entry_size = ath11k_hal_srng_get_entrysize(hal_tcl_data); + ath11k_hal_srng_get_params(ab, srng, ¶ms); + desc = (u8 *)params.ring_base_vaddr; + + for (i = 0; i < params.num_entries; i++) { + tlv = (struct hal_tlv_hdr *)desc; + tlv->tl = field_prep(hal_tlv_hdr_tag, hal_tcl_data_cmd) | + field_prep(hal_tlv_hdr_len, + sizeof(struct hal_tcl_data_cmd)); + desc += entry_size; + } +} diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hal_tx.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_hal_tx_h +#define ath11k_hal_tx_h + +#include "hal_desc.h" + +#define hal_tx_addrx_en 1 +#define hal_tx_addry_en 2 + +#define hal_tx_addr_search_index 0 +#define hal_tx_addr_search_default 1 + +struct hal_tx_info { + u16 meta_data_flags; /* %hal_tcl_data_cmd_info0_meta_ */ + u8 ring_id; + u32 desc_id; + enum hal_tcl_desc_type type; + enum hal_tcl_encap_type encap_type; + dma_addr_t paddr; + u32 data_len; + u32 pkt_offset; + enum hal_encrypt_type encrypt_type; + u32 flags0; /* %hal_tcl_data_cmd_info1_ */ + u32 flags1; /* %hal_tcl_data_cmd_info2_ */ + u16 addr_search_flags; /* %hal_tcl_data_cmd_info0_addr(x/y)_ */ + u16 bss_ast_hash; + u8 tid; + u8 search_type; /* %hal_tx_addr_search_ */ + u8 lmac_id; + u8 dscp_tid_tbl_idx; +}; + +/* todo: check if the actual desc macros can be used instead */ +#define hal_tx_status_flags_first_msdu bit(0) +#define hal_tx_status_flags_last_msdu bit(1) +#define hal_tx_status_flags_msdu_in_amsdu bit(2) +#define hal_tx_status_flags_rate_stats_valid bit(3) +#define hal_tx_status_flags_rate_ldpc bit(4) +#define hal_tx_status_flags_rate_stbc bit(5) +#define hal_tx_status_flags_ofdma bit(6) + +#define hal_tx_status_desc_len sizeof(struct hal_wbm_release_ring) + +/* tx status parsed from srng desc */ +struct hal_tx_status { + enum hal_wbm_rel_src_module buf_rel_source; + u32 desc_id; + enum hal_wbm_tqm_rel_reason status; + u8 ack_rssi; + enum hal_tx_rate_stats_bw bw; + enum hal_tx_rate_stats_pkt_type pkt_type; + enum hal_tx_rate_stats_sgi sgi; + u8 mcs; + u16 num_tones_in_ru; + u32 flags; /* %hal_tx_status_flags_ */ + u32 tsf; + u32 ppdu_id; + u8 try_cnt; + u8 tid; + u16 peer_id; +}; + +void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd, + struct hal_tx_info *ti); +void ath11k_hal_tx_desc_sync(void *tx_desc_cached, void *hw_desc); +void ath11k_hal_tx_status_parse(struct ath11k_base *ab, + struct hal_wbm_release_ring *desc, + struct hal_tx_status *ts); +void ath11k_hal_tx_status_desc_sync(void *hw_desc, void *local_desc); +void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id); +int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng, + enum hal_reo_cmd_type type, + struct ath11k_hal_reo_cmd *cmd); +void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, + struct hal_srng *srng); +#endif diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/htc.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ +#include <linux/skbuff.h> +#include <linux/ctype.h> + +#include "ahb.h" +#include "debug.h" + +struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size) +{ + struct sk_buff *skb; + + skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr)); + if (!skb) + return null; + + skb_reserve(skb, sizeof(struct ath11k_htc_hdr)); + + /* fw/htc requires 4-byte aligned streams */ + if (!is_aligned((unsigned long)skb->data, 4)) + ath11k_warn(ab, "unaligned htc tx skb "); + + return skb; +} + +static void ath11k_htc_control_tx_complete(struct ath11k_base *ab, + struct sk_buff *skb) +{ + kfree_skb(skb); +} + +static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab) +{ + struct sk_buff *skb; + struct ath11k_skb_cb *skb_cb; + + skb = dev_alloc_skb(ath11k_htc_control_buffer_size); + if (!skb) + return null; + + skb_reserve(skb, sizeof(struct ath11k_htc_hdr)); + warn_on_once(!is_aligned((unsigned long)skb->data, 4)); + + skb_cb = ath11k_skb_cb(skb); + memset(skb_cb, 0, sizeof(*skb_cb)); + + ath11k_dbg(ab, ath11k_dbg_htc, "%s: skb %pk ", __func__, skb); + return skb; +} + +static inline void ath11k_htc_restore_tx_skb(struct ath11k_htc *htc, + struct sk_buff *skb) +{ + struct ath11k_skb_cb *skb_cb = ath11k_skb_cb(skb); + + dma_unmap_single(htc->ab->dev, skb_cb->paddr, skb->len, dma_to_device); + skb_pull(skb, sizeof(struct ath11k_htc_hdr)); +} + +static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep, + struct sk_buff *skb) +{ + struct ath11k_htc_hdr *hdr; + + hdr = (struct ath11k_htc_hdr *)skb->data; + + memset(hdr, 0, sizeof(*hdr)); + hdr->htc_info = field_prep(htc_hdr_endpointid, ep->eid) | + field_prep(htc_hdr_payloadlen, + (skb->len - sizeof(*hdr))) | + field_prep(htc_hdr_flags, + ath11k_htc_flag_need_credit_update); + + spin_lock_bh(&ep->htc->tx_lock); + hdr->ctrl_info = field_prep(htc_hdr_controlbytes1, ep->seq_no++); + spin_unlock_bh(&ep->htc->tx_lock); +} + +int ath11k_htc_send(struct ath11k_htc *htc, + enum ath11k_htc_ep_id eid, + struct sk_buff *skb) +{ + struct ath11k_htc_ep *ep = &htc->endpoint[eid]; + struct ath11k_skb_cb *skb_cb = ath11k_skb_cb(skb); + struct device *dev = htc->ab->dev; + struct ath11k_base *ab = htc->ab; + int credits = 0; + int ret; + + if (eid >= ath11k_htc_ep_count) { + ath11k_warn(ab, "invalid endpoint id: %d ", eid); + return -enoent; + } + + skb_push(skb, sizeof(struct ath11k_htc_hdr)); + + if (ep->tx_credit_flow_enabled) { + credits = div_round_up(skb->len, htc->target_credit_size); + spin_lock_bh(&htc->tx_lock); + if (ep->tx_credits < credits) { + ath11k_dbg(ab, ath11k_dbg_htc, + "htc insufficient credits ep %d required %d available %d ", + eid, credits, ep->tx_credits); + spin_unlock_bh(&htc->tx_lock); + ret = -eagain; + goto err_pull; + } + ep->tx_credits -= credits; + ath11k_dbg(ab, ath11k_dbg_htc, + "htc ep %d consumed %d credits (total %d) ", + eid, credits, ep->tx_credits); + spin_unlock_bh(&htc->tx_lock); + } + + ath11k_htc_prepare_tx_skb(ep, skb); + + skb_cb->eid = eid; + skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, dma_to_device); + ret = dma_mapping_error(dev, skb_cb->paddr); + if (ret) { + ret = -eio; + goto err_credits; + } + + ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid); + if (ret) + goto err_unmap; + + return 0; + +err_unmap: + dma_unmap_single(dev, skb_cb->paddr, skb->len, dma_to_device); +err_credits: + if (ep->tx_credit_flow_enabled) { + spin_lock_bh(&htc->tx_lock); + ep->tx_credits += credits; + ath11k_dbg(ab, ath11k_dbg_htc, + "htc ep %d reverted %d credits back (total %d) ", + eid, credits, ep->tx_credits); + spin_unlock_bh(&htc->tx_lock); + + if (ep->ep_ops.ep_tx_credits) + ep->ep_ops.ep_tx_credits(htc->ab); + } +err_pull: + skb_pull(skb, sizeof(struct ath11k_htc_hdr)); + return ret; +} + +static void +ath11k_htc_process_credit_report(struct ath11k_htc *htc, + const struct ath11k_htc_credit_report *report, + int len, + enum ath11k_htc_ep_id eid) +{ + struct ath11k_base *ab = htc->ab; + struct ath11k_htc_ep *ep; + int i, n_reports; + + if (len % sizeof(*report)) + ath11k_warn(ab, "uneven credit report len %d", len); + + n_reports = len / sizeof(*report); + + spin_lock_bh(&htc->tx_lock); + for (i = 0; i < n_reports; i++, report++) { + if (report->eid >= ath11k_htc_ep_count) + break; + + ep = &htc->endpoint[report->eid]; + ep->tx_credits += report->credits; + + ath11k_dbg(ab, ath11k_dbg_htc, "htc ep %d got %d credits (total %d) ", + report->eid, report->credits, ep->tx_credits); + + if (ep->ep_ops.ep_tx_credits) { + spin_unlock_bh(&htc->tx_lock); + ep->ep_ops.ep_tx_credits(htc->ab); + spin_lock_bh(&htc->tx_lock); + } + } + spin_unlock_bh(&htc->tx_lock); +} + +static int ath11k_htc_process_trailer(struct ath11k_htc *htc, + u8 *buffer, + int length, + enum ath11k_htc_ep_id src_eid) +{ + struct ath11k_base *ab = htc->ab; + int status = 0; + struct ath11k_htc_record *record; + size_t len; + + while (length > 0) { + record = (struct ath11k_htc_record *)buffer; + + if (length < sizeof(record->hdr)) { + status = -einval; + break; + } + + if (record->hdr.len > length) { + /* no room left in buffer for record */ + ath11k_warn(ab, "invalid record length: %d ", + record->hdr.len); + status = -einval; + break; + } + + switch (record->hdr.id) { + case ath11k_htc_record_credits: + len = sizeof(struct ath11k_htc_credit_report); + if (record->hdr.len < len) { + ath11k_warn(ab, "credit report too long "); + status = -einval; + break; + } + ath11k_htc_process_credit_report(htc, + record->credit_report, + record->hdr.len, + src_eid); + break; + default: + ath11k_warn(ab, "unhandled record: id:%d length:%d ", + record->hdr.id, record->hdr.len); + break; + } + + if (status) + break; + + /* multiple records may be present in a trailer */ + buffer += sizeof(record->hdr) + record->hdr.len; + length -= sizeof(record->hdr) + record->hdr.len; + } + + return status; +} + +void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, + struct sk_buff *skb) +{ + int status = 0; + struct ath11k_htc *htc = &ab->htc; + struct ath11k_htc_hdr *hdr; + struct ath11k_htc_ep *ep; + u16 payload_len; + u32 trailer_len = 0; + size_t min_len; + u8 eid; + bool trailer_present; + + hdr = (struct ath11k_htc_hdr *)skb->data; + skb_pull(skb, sizeof(*hdr)); + + eid = field_get(htc_hdr_endpointid, hdr->htc_info); + + if (eid >= ath11k_htc_ep_count) { + ath11k_warn(ab, "htc rx: invalid eid %d ", eid); + goto out; + } + + ep = &htc->endpoint[eid]; + + payload_len = field_get(htc_hdr_payloadlen, hdr->htc_info); + + if (payload_len + sizeof(*hdr) > ath11k_htc_max_len) { + ath11k_warn(ab, "htc rx frame too long, len: %zu ", + payload_len + sizeof(*hdr)); + goto out; + } + + if (skb->len < payload_len) { + ath11k_warn(ab, "htc rx: insufficient length, got %d, expected %d ", + skb->len, payload_len); + goto out; + } + + /* get flags to check for trailer */ + trailer_present = (field_get(htc_hdr_flags, hdr->htc_info)) & + ath11k_htc_flag_trailer_present; + + if (trailer_present) { + u8 *trailer; + + trailer_len = field_get(htc_hdr_controlbytes0, hdr->ctrl_info); + min_len = sizeof(struct ath11k_htc_record_hdr); + + if ((trailer_len < min_len) || + (trailer_len > payload_len)) { + ath11k_warn(ab, "invalid trailer length: %d ", + trailer_len); + goto out; + } + + trailer = (u8 *)hdr; + trailer += sizeof(*hdr); + trailer += payload_len; + trailer -= trailer_len; + status = ath11k_htc_process_trailer(htc, trailer, + trailer_len, eid); + if (status) + goto out; + + skb_trim(skb, skb->len - trailer_len); + } + + if (trailer_len >= payload_len) + /* zero length packet with trailer data, just drop these */ + goto out; + + if (eid == ath11k_htc_ep_0) { + struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data; + + switch (field_get(htc_msg_messageid, msg->msg_svc_id)) { + case ath11k_htc_msg_ready_id: + case ath11k_htc_msg_connect_service_resp_id: + /* handle htc control message */ + if (completion_done(&htc->ctl_resp)) { + /* this is a fatal error, target should not be + * sending unsolicited messages on the ep 0 + */ + ath11k_warn(ab, "htc rx ctrl still processing "); + complete(&htc->ctl_resp); + goto out; + } + + htc->control_resp_len = + min_t(int, skb->len, + ath11k_htc_max_ctrl_msg_len); + + memcpy(htc->control_resp_buffer, skb->data, + htc->control_resp_len); + + complete(&htc->ctl_resp); + break; + default: + ath11k_warn(ab, "ignoring unsolicited htc ep0 event "); + break; + } + goto out; + } + + ath11k_dbg(ab, ath11k_dbg_htc, "htc rx completion ep %d skb %pk ", + eid, skb); + ep->ep_ops.ep_rx_complete(ab, skb); + + /* poll tx completion for interrupt disabled ce's */ + ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id); + + /* skb is now owned by the rx completion handler */ + skb = null; +out: + kfree_skb(skb); +} + +static void ath11k_htc_control_rx_complete(struct ath11k_base *ab, + struct sk_buff *skb) +{ + /* this is unexpected. fw is not supposed to send regular rx on this + * endpoint. + */ + ath11k_warn(ab, "unexpected htc rx "); + kfree_skb(skb); +} + +static const char *htc_service_name(enum ath11k_htc_svc_id id) +{ + switch (id) { + case ath11k_htc_svc_id_reserved: + return "reserved"; + case ath11k_htc_svc_id_rsvd_ctrl: + return "control"; + case ath11k_htc_svc_id_wmi_control: + return "wmi"; + case ath11k_htc_svc_id_wmi_data_be: + return "data be"; + case ath11k_htc_svc_id_wmi_data_bk: + return "data bk"; + case ath11k_htc_svc_id_wmi_data_vi: + return "data vi"; + case ath11k_htc_svc_id_wmi_data_vo: + return "data vo"; + case ath11k_htc_svc_id_wmi_control_mac1: + return "wmi mac1"; + case ath11k_htc_svc_id_wmi_control_mac2: + return "wmi mac2"; + case ath11k_htc_svc_id_nmi_control: + return "nmi control"; + case ath11k_htc_svc_id_nmi_data: + return "nmi data"; + case ath11k_htc_svc_id_htt_data_msg: + return "htt data"; + case ath11k_htc_svc_id_test_raw_streams: + return "raw"; + case ath11k_htc_svc_id_ipa_tx: + return "ipa tx"; + case ath11k_htc_svc_id_pkt_log: + return "pkt log"; + } + + return "unknown"; +} + +static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc) +{ + struct ath11k_htc_ep *ep; + int i; + + for (i = ath11k_htc_ep_0; i < ath11k_htc_ep_count; i++) { + ep = &htc->endpoint[i]; + ep->service_id = ath11k_htc_svc_id_unused; + ep->max_ep_message_len = 0; + ep->max_tx_queue_depth = 0; + ep->eid = i; + ep->htc = htc; + ep->tx_credit_flow_enabled = true; + } +} + +static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc, + u16 service_id) +{ + u8 i, allocation = 0; + + for (i = 0; i < ath11k_htc_max_service_alloc_entries; i++) { + if (htc->service_alloc_table[i].service_id == service_id) { + allocation = + htc->service_alloc_table[i].credit_allocation; + } + } + + return allocation; +} + +static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc) +{ + struct ath11k_htc_svc_tx_credits *serv_entry; + u32 svc_id[] = { + ath11k_htc_svc_id_wmi_control, + ath11k_htc_svc_id_wmi_control_mac1, + ath11k_htc_svc_id_wmi_control_mac2, + }; + int i, credits; + + credits = htc->total_transmit_credits; + serv_entry = htc->service_alloc_table; + + if ((htc->wmi_ep_count == 0) || + (htc->wmi_ep_count > array_size(svc_id))) + return -einval; + + /* divide credits among number of endpoints for wmi */ + credits = credits / htc->wmi_ep_count; + for (i = 0; i < htc->wmi_ep_count; i++) { + serv_entry[i].service_id = svc_id[i]; + serv_entry[i].credit_allocation = credits; + } + + return 0; +} + +int ath11k_htc_wait_target(struct ath11k_htc *htc) +{ + int i, status = 0; + struct ath11k_base *ab = htc->ab; + unsigned long time_left; + struct ath11k_htc_ready *ready; + u16 message_id; + u16 credit_count; + u16 credit_size; + + time_left = wait_for_completion_timeout(&htc->ctl_resp, + ath11k_htc_wait_timeout_hz); + if (!time_left) { + ath11k_warn(ab, "failed to receive control response completion, polling.. "); + + for (i = 0; i < ce_count; i++) + ath11k_ce_per_engine_service(htc->ab, i); + + time_left = + wait_for_completion_timeout(&htc->ctl_resp, + ath11k_htc_wait_timeout_hz); + + if (!time_left) + status = -etimedout; + } + + if (status < 0) { + ath11k_warn(ab, "ctl_resp never came in (%d) ", status); + return status; + } + + if (htc->control_resp_len < sizeof(*ready)) { + ath11k_warn(ab, "invalid htc ready msg len:%d ", + htc->control_resp_len); + return -ecomm; + } + + ready = (struct ath11k_htc_ready *)htc->control_resp_buffer; + message_id = field_get(htc_msg_messageid, ready->id_credit_count); + credit_count = field_get(htc_ready_msg_creditcount, + ready->id_credit_count); + credit_size = field_get(htc_ready_msg_creditsize, ready->size_ep); + + if (message_id != ath11k_htc_msg_ready_id) { + ath11k_warn(ab, "invalid htc ready msg: 0x%x ", message_id); + return -ecomm; + } + + htc->total_transmit_credits = credit_count; + htc->target_credit_size = credit_size; + + ath11k_dbg(ab, ath11k_dbg_htc, + "target ready! transmit resources: %d size:%d ", + htc->total_transmit_credits, htc->target_credit_size); + + if ((htc->total_transmit_credits == 0) || + (htc->target_credit_size == 0)) { + ath11k_warn(ab, "invalid credit size received "); + return -ecomm; + } + + ath11k_htc_setup_target_buffer_assignments(htc); + + return 0; +} + +int ath11k_htc_connect_service(struct ath11k_htc *htc, + struct ath11k_htc_svc_conn_req *conn_req, + struct ath11k_htc_svc_conn_resp *conn_resp) +{ + struct ath11k_base *ab = htc->ab; + struct ath11k_htc_conn_svc *req_msg; + struct ath11k_htc_conn_svc_resp resp_msg_dummy; + struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy; + enum ath11k_htc_ep_id assigned_eid = ath11k_htc_ep_count; + struct ath11k_htc_ep *ep; + struct sk_buff *skb; + unsigned int max_msg_size = 0; + int length, status; + unsigned long time_left; + bool disable_credit_flow_ctrl = false; + u16 message_id, service_id, flags = 0; + u8 tx_alloc = 0; + + /* special case for htc pseudo control service */ + if (conn_req->service_id == ath11k_htc_svc_id_rsvd_ctrl) { + disable_credit_flow_ctrl = true; + assigned_eid = ath11k_htc_ep_0; + max_msg_size = ath11k_htc_max_ctrl_msg_len; + memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy)); + goto setup; + } + + tx_alloc = ath11k_htc_get_credit_allocation(htc, + conn_req->service_id); + if (!tx_alloc) + ath11k_dbg(ab, ath11k_dbg_boot, + "boot htc service %s does not allocate target credits ", + htc_service_name(conn_req->service_id)); + + skb = ath11k_htc_build_tx_ctrl_skb(htc->ab); + if (!skb) { + ath11k_warn(ab, "failed to allocate htc packet "); + return -enomem; + } + + length = sizeof(*req_msg); + skb_put(skb, length); + memset(skb->data, 0, length); + + req_msg = (struct ath11k_htc_conn_svc *)skb->data; + req_msg->msg_svc_id = field_prep(htc_msg_messageid, + ath11k_htc_msg_connect_service_id); + + flags |= field_prep(ath11k_htc_conn_flags_recv_alloc, tx_alloc); + + /* only enable credit flow control for wmi ctrl service */ + if (!(conn_req->service_id == ath11k_htc_svc_id_wmi_control || + conn_req->service_id == ath11k_htc_svc_id_wmi_control_mac1 || + conn_req->service_id == ath11k_htc_svc_id_wmi_control_mac2)) { + flags |= ath11k_htc_conn_flags_disable_credit_flow_ctrl; + disable_credit_flow_ctrl = true; + } + + req_msg->flags_len = field_prep(htc_svc_msg_connectionflags, flags); + req_msg->msg_svc_id |= field_prep(htc_svc_msg_service_id, + conn_req->service_id); + + reinit_completion(&htc->ctl_resp); + + status = ath11k_htc_send(htc, ath11k_htc_ep_0, skb); + if (status) { + kfree_skb(skb); + return status; + } + + /* wait for response */ + time_left = wait_for_completion_timeout(&htc->ctl_resp, + ath11k_htc_conn_svc_timeout_hz); + if (!time_left) { + ath11k_err(ab, "service connect timeout "); + return -etimedout; + } + + /* we controlled the buffer creation, it's aligned */ + resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer; + message_id = field_get(htc_msg_messageid, resp_msg->msg_svc_id); + service_id = field_get(htc_svc_resp_msg_serviceid, + resp_msg->msg_svc_id); + + if ((message_id != ath11k_htc_msg_connect_service_resp_id) || + (htc->control_resp_len < sizeof(*resp_msg))) { + ath11k_err(ab, "invalid resp message id 0x%x", message_id); + return -eproto; + } + + ath11k_dbg(ab, ath11k_dbg_htc, + "htc service %s connect response: status: 0x%lx, assigned ep: 0x%lx ", + htc_service_name(service_id), + field_get(htc_svc_resp_msg_status, resp_msg->flags_len), + field_get(htc_svc_resp_msg_endpointid, resp_msg->flags_len)); + + conn_resp->connect_resp_code = field_get(htc_svc_resp_msg_status, + resp_msg->flags_len); + + /* check response status */ + if (conn_resp->connect_resp_code != ath11k_htc_conn_svc_status_success) { + ath11k_err(ab, "htc service %s connect request failed: 0x%x) ", + htc_service_name(service_id), + conn_resp->connect_resp_code); + return -eproto; + } + + assigned_eid = (enum ath11k_htc_ep_id)field_get( + htc_svc_resp_msg_endpointid, + resp_msg->flags_len); + + max_msg_size = field_get(htc_svc_resp_msg_maxmsgsize, + resp_msg->flags_len); + +setup: + + if (assigned_eid >= ath11k_htc_ep_count) + return -eproto; + + if (max_msg_size == 0) + return -eproto; + + ep = &htc->endpoint[assigned_eid]; + ep->eid = assigned_eid; + + if (ep->service_id != ath11k_htc_svc_id_unused) + return -eproto; + + /* return assigned endpoint to caller */ + conn_resp->eid = assigned_eid; + conn_resp->max_msg_len = field_get(htc_svc_resp_msg_maxmsgsize, + resp_msg->flags_len); + + /* setup the endpoint */ + ep->service_id = conn_req->service_id; + ep->max_tx_queue_depth = conn_req->max_send_queue_depth; + ep->max_ep_message_len = field_get(htc_svc_resp_msg_maxmsgsize, + resp_msg->flags_len); + ep->tx_credits = tx_alloc; + + /* copy all the callbacks */ + ep->ep_ops = conn_req->ep_ops; + + status = ath11k_ahb_map_service_to_pipe(htc->ab, + ep->service_id, + &ep->ul_pipe_id, + &ep->dl_pipe_id); + if (status) + return status; + + ath11k_dbg(ab, ath11k_dbg_boot, + "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready ", + htc_service_name(ep->service_id), ep->ul_pipe_id, + ep->dl_pipe_id, ep->eid); + + if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { + ep->tx_credit_flow_enabled = false; + ath11k_dbg(ab, ath11k_dbg_boot, + "boot htc service '%s' eid %d tx flow control disabled ", + htc_service_name(ep->service_id), assigned_eid); + } + + return status; +} + +int ath11k_htc_start(struct ath11k_htc *htc) +{ + struct sk_buff *skb; + int status = 0; + struct ath11k_base *ab = htc->ab; + struct ath11k_htc_setup_complete_extended *msg; + + skb = ath11k_htc_build_tx_ctrl_skb(htc->ab); + if (!skb) + return -enomem; + + skb_put(skb, sizeof(*msg)); + memset(skb->data, 0, skb->len); + + msg = (struct ath11k_htc_setup_complete_extended *)skb->data; + msg->msg_id = field_prep(htc_msg_messageid, + ath11k_htc_msg_setup_complete_ex_id); + + ath11k_dbg(ab, ath11k_dbg_htc, "htc is using tx credit flow control "); + + status = ath11k_htc_send(htc, ath11k_htc_ep_0, skb); + if (status) { + kfree_skb(skb); + return status; + } + + return 0; +} + +int ath11k_htc_init(struct ath11k_base *ab) +{ + struct ath11k_htc *htc = &ab->htc; + struct ath11k_htc_svc_conn_req conn_req; + struct ath11k_htc_svc_conn_resp conn_resp; + int ret; + + spin_lock_init(&htc->tx_lock); + + ath11k_htc_reset_endpoint_states(htc); + + htc->ab = ab; + + switch (ab->wmi_sc.preferred_hw_mode) { + case wmi_host_hw_mode_single: + htc->wmi_ep_count = 1; + break; + case wmi_host_hw_mode_dbs: + case wmi_host_hw_mode_dbs_or_sbs: + htc->wmi_ep_count = 2; + break; + case wmi_host_hw_mode_dbs_sbs: + htc->wmi_ep_count = 3; + break; + default: + htc->wmi_ep_count = 3; + break; + } + + /* setup our pseudo htc control endpoint connection */ + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete; + conn_req.max_send_queue_depth = ath11k_num_control_tx_buffers; + conn_req.service_id = ath11k_htc_svc_id_rsvd_ctrl; + + /* connect fake service */ + ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp); + if (ret) { + ath11k_err(ab, "could not connect to htc service (%d) ", ret); + return ret; + } + + init_completion(&htc->ctl_resp); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/htc.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_htc_h +#define ath11k_htc_h + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/bug.h> +#include <linux/skbuff.h> +#include <linux/timer.h> + +struct ath11k_base; + +#define htc_hdr_endpointid genmask(7, 0) +#define htc_hdr_flags genmask(15, 8) +#define htc_hdr_payloadlen genmask(31, 16) +#define htc_hdr_controlbytes0 genmask(7, 0) +#define htc_hdr_controlbytes1 genmask(15, 8) +#define htc_hdr_reserved genmask(31, 16) + +#define htc_svc_msg_service_id genmask(31, 16) +#define htc_svc_msg_connectionflags genmask(15, 0) +#define htc_svc_msg_servicemetalength genmask(23, 16) +#define htc_ready_msg_creditcount genmask(31, 16) +#define htc_ready_msg_creditsize genmask(15, 0) +#define htc_ready_msg_maxendpoints genmask(23, 16) + +#define htc_ready_ex_msg_htcversion genmask(7, 0) +#define htc_ready_ex_msg_maxmsgsperhtcbundle genmask(15, 8) + +#define htc_svc_resp_msg_serviceid genmask(31, 16) +#define htc_svc_resp_msg_status genmask(7, 0) +#define htc_svc_resp_msg_endpointid genmask(15, 8) +#define htc_svc_resp_msg_maxmsgsize genmask(31, 16) +#define htc_svc_resp_msg_servicemetalength genmask(7, 0) + +#define htc_msg_messageid genmask(15, 0) +#define htc_setup_complete_ex_msg_setupflags genmask(31, 0) +#define htc_setup_complete_ex_msg_maxmsgsperbundledrecv genmask(7, 0) +#define htc_setup_complete_ex_msg_rsvd0 genmask(15, 8) +#define htc_setup_complete_ex_msg_rsvd1 genmask(23, 16) +#define htc_setup_complete_ex_msg_rsvd2 genmask(31, 24) + +enum ath11k_htc_tx_flags { + ath11k_htc_flag_need_credit_update = 0x01, + ath11k_htc_flag_send_bundle = 0x02 +}; + +enum ath11k_htc_rx_flags { + ath11k_htc_flag_trailer_present = 0x02, + ath11k_htc_flag_bundle_mask = 0xf0 +}; + +struct ath11k_htc_hdr { + u32 htc_info; + u32 ctrl_info; +} __packed __aligned(4); + +enum ath11k_htc_msg_id { + ath11k_htc_msg_ready_id = 1, + ath11k_htc_msg_connect_service_id = 2, + ath11k_htc_msg_connect_service_resp_id = 3, + ath11k_htc_msg_setup_complete_id = 4, + ath11k_htc_msg_setup_complete_ex_id = 5, + ath11k_htc_msg_send_suspend_complete = 6 +}; + +enum ath11k_htc_version { + ath11k_htc_version_2p0 = 0x00, /* 2.0 */ + ath11k_htc_version_2p1 = 0x01, /* 2.1 */ +}; + +#define ath11k_htc_conn_flags_threshold_level_mask genmask(1, 0) +#define ath11k_htc_conn_flags_recv_alloc genmask(15, 8) + +enum ath11k_htc_conn_flags { + ath11k_htc_conn_flags_threshold_level_one_fourth = 0x0, + ath11k_htc_conn_flags_threshold_level_one_half = 0x1, + ath11k_htc_conn_flags_threshold_level_three_fourths = 0x2, + ath11k_htc_conn_flags_threshold_level_unity = 0x3, + ath11k_htc_conn_flags_reduce_credit_dribble = 1 << 2, + ath11k_htc_conn_flags_disable_credit_flow_ctrl = 1 << 3 +}; + +enum ath11k_htc_conn_svc_status { + ath11k_htc_conn_svc_status_success = 0, + ath11k_htc_conn_svc_status_not_found = 1, + ath11k_htc_conn_svc_status_failed = 2, + ath11k_htc_conn_svc_status_no_resources = 3, + ath11k_htc_conn_svc_status_no_more_ep = 4 +}; + +struct ath11k_htc_ready { + u32 id_credit_count; + u32 size_ep; +} __packed; + +struct ath11k_htc_ready_extended { + struct ath11k_htc_ready base; + u32 ver_bundle; +} __packed; + +struct ath11k_htc_conn_svc { + u32 msg_svc_id; + u32 flags_len; +} __packed; + +struct ath11k_htc_conn_svc_resp { + u32 msg_svc_id; + u32 flags_len; + u32 svc_meta_pad; +} __packed; + +struct ath11k_htc_setup_complete_extended { + u32 msg_id; + u32 flags; + u32 max_msgs_per_bundled_recv; +} __packed; + +struct ath11k_htc_msg { + u32 msg_svc_id; + u32 flags_len; +} __packed __aligned(4); + +enum ath11k_htc_record_id { + ath11k_htc_record_null = 0, + ath11k_htc_record_credits = 1 +}; + +struct ath11k_htc_record_hdr { + u8 id; /* @enum ath11k_htc_record_id */ + u8 len; + u8 pad0; + u8 pad1; +} __packed; + +struct ath11k_htc_credit_report { + u8 eid; /* @enum ath11k_htc_ep_id */ + u8 credits; + u8 pad0; + u8 pad1; +} __packed; + +struct ath11k_htc_record { + struct ath11k_htc_record_hdr hdr; + union { + struct ath11k_htc_credit_report credit_report[0]; + u8 pauload[0]; + }; +} __packed __aligned(4); + +/* note: the trailer offset is dynamic depending + * on payload length. this is only a struct layout draft + */ +struct ath11k_htc_frame { + struct ath11k_htc_hdr hdr; + union { + struct ath11k_htc_msg msg; + u8 payload[0]; + }; + struct ath11k_htc_record trailer[0]; +} __packed __aligned(4); + +enum ath11k_htc_svc_gid { + ath11k_htc_svc_grp_rsvd = 0, + ath11k_htc_svc_grp_wmi = 1, + ath11k_htc_svc_grp_nmi = 2, + ath11k_htc_svc_grp_htt = 3, + ath11k_htc_svc_grp_cfg = 4, + ath11k_htc_svc_grp_ipa = 5, + ath11k_htc_svc_grp_pktlog = 6, + + ath11k_htc_svc_grp_test = 254, + ath11k_htc_svc_grp_last = 255, +}; + +#define svc(group, idx) \ + (int)(((int)(group) << 8) | (int)(idx)) + +enum ath11k_htc_svc_id { + /* note: service id of 0x0000 is reserved and should never be used */ + ath11k_htc_svc_id_reserved = 0x0000, + ath11k_htc_svc_id_unused = ath11k_htc_svc_id_reserved, + + ath11k_htc_svc_id_rsvd_ctrl = svc(ath11k_htc_svc_grp_rsvd, 1), + ath11k_htc_svc_id_wmi_control = svc(ath11k_htc_svc_grp_wmi, 0), + ath11k_htc_svc_id_wmi_data_be = svc(ath11k_htc_svc_grp_wmi, 1), + ath11k_htc_svc_id_wmi_data_bk = svc(ath11k_htc_svc_grp_wmi, 2), + ath11k_htc_svc_id_wmi_data_vi = svc(ath11k_htc_svc_grp_wmi, 3), + ath11k_htc_svc_id_wmi_data_vo = svc(ath11k_htc_svc_grp_wmi, 4), + ath11k_htc_svc_id_wmi_control_mac1 = svc(ath11k_htc_svc_grp_wmi, 5), + ath11k_htc_svc_id_wmi_control_mac2 = svc(ath11k_htc_svc_grp_wmi, 6), + + ath11k_htc_svc_id_nmi_control = svc(ath11k_htc_svc_grp_nmi, 0), + ath11k_htc_svc_id_nmi_data = svc(ath11k_htc_svc_grp_nmi, 1), + + ath11k_htc_svc_id_htt_data_msg = svc(ath11k_htc_svc_grp_htt, 0), + + /* raw stream service (i.e. flash, tcmd, calibration apps) */ + ath11k_htc_svc_id_test_raw_streams = svc(ath11k_htc_svc_grp_test, 0), + ath11k_htc_svc_id_ipa_tx = svc(ath11k_htc_svc_grp_ipa, 0), + ath11k_htc_svc_id_pkt_log = svc(ath11k_htc_svc_grp_pktlog, 0), +}; + +#undef svc + +enum ath11k_htc_ep_id { + ath11k_htc_ep_unused = -1, + ath11k_htc_ep_0 = 0, + ath11k_htc_ep_1 = 1, + ath11k_htc_ep_2, + ath11k_htc_ep_3, + ath11k_htc_ep_4, + ath11k_htc_ep_5, + ath11k_htc_ep_6, + ath11k_htc_ep_7, + ath11k_htc_ep_8, + ath11k_htc_ep_count, +}; + +struct ath11k_htc_ops { + void (*target_send_suspend_complete)(struct ath11k_base *ar); +}; + +struct ath11k_htc_ep_ops { + void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *); + void (*ep_rx_complete)(struct ath11k_base *, struct sk_buff *); + void (*ep_tx_credits)(struct ath11k_base *); +}; + +/* service connection information */ +struct ath11k_htc_svc_conn_req { + u16 service_id; + struct ath11k_htc_ep_ops ep_ops; + int max_send_queue_depth; +}; + +/* service connection response information */ +struct ath11k_htc_svc_conn_resp { + u8 buffer_len; + u8 actual_len; + enum ath11k_htc_ep_id eid; + unsigned int max_msg_len; + u8 connect_resp_code; +}; + +#define ath11k_num_control_tx_buffers 2 +#define ath11k_htc_max_len 4096 +#define ath11k_htc_max_ctrl_msg_len 256 +#define ath11k_htc_wait_timeout_hz (1 * hz) +#define ath11k_htc_control_buffer_size (ath11k_htc_max_ctrl_msg_len + \ + sizeof(struct ath11k_htc_hdr)) +#define ath11k_htc_conn_svc_timeout_hz (1 * hz) +#define ath11k_htc_max_service_alloc_entries 8 + +struct ath11k_htc_ep { + struct ath11k_htc *htc; + enum ath11k_htc_ep_id eid; + enum ath11k_htc_svc_id service_id; + struct ath11k_htc_ep_ops ep_ops; + + int max_tx_queue_depth; + int max_ep_message_len; + u8 ul_pipe_id; + u8 dl_pipe_id; + + u8 seq_no; /* for debugging */ + int tx_credits; + bool tx_credit_flow_enabled; +}; + +struct ath11k_htc_svc_tx_credits { + u16 service_id; + u8 credit_allocation; +}; + +struct ath11k_htc { + struct ath11k_base *ab; + struct ath11k_htc_ep endpoint[ath11k_htc_ep_count]; + + /* protects endpoints */ + spinlock_t tx_lock; + + struct ath11k_htc_ops htc_ops; + + u8 control_resp_buffer[ath11k_htc_max_ctrl_msg_len]; + int control_resp_len; + + struct completion ctl_resp; + + int total_transmit_credits; + struct ath11k_htc_svc_tx_credits + service_alloc_table[ath11k_htc_max_service_alloc_entries]; + int target_credit_size; + u8 wmi_ep_count; +}; + +int ath11k_htc_init(struct ath11k_base *ar); +int ath11k_htc_wait_target(struct ath11k_htc *htc); +int ath11k_htc_start(struct ath11k_htc *htc); +int ath11k_htc_connect_service(struct ath11k_htc *htc, + struct ath11k_htc_svc_conn_req *conn_req, + struct ath11k_htc_svc_conn_resp *conn_resp); +int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid, + struct sk_buff *packet); +struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size); +void ath11k_htc_rx_completion_handler(struct ath11k_base *ar, + struct sk_buff *skb); + +#endif diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/hw.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_hw_h +#define ath11k_hw_h + +/* target configuration defines */ + +/* num vdevs per radio */ +#define target_num_vdevs (16 + 1) + +#define target_num_peers_pdev (512 + target_num_vdevs) + +/* num of peers for single radio mode */ +#define target_num_peers_single (target_num_peers_pdev) + +/* num of peers for dbs */ +#define target_num_peers_dbs (2 * target_num_peers_pdev) + +/* num of peers for dbs_sbs */ +#define target_num_peers_dbs_sbs (3 * target_num_peers_pdev) + +/* max num of stations (per radio) */ +#define target_num_stations 512 + +#define target_num_peers(x) target_num_peers_##x +#define target_num_peer_keys 2 +#define target_num_tids(x) (2 * target_num_peers(x) + \ + 4 * target_num_vdevs + 8) + +#define target_ast_skid_limit 16 +#define target_num_offld_peers 4 +#define target_num_offld_reorder_buffs 4 + +#define target_tx_chain_mask (bit(0) | bit(1) | bit(2) | bit(4)) +#define target_rx_chain_mask (bit(0) | bit(1) | bit(2) | bit(4)) +#define target_rx_timeout_lo_pri 100 +#define target_rx_timeout_hi_pri 40 + +#define target_decap_mode_raw 0 +#define target_decap_mode_native_wifi 1 +#define target_decap_mode_eth 2 + +#define target_scan_max_pending_reqs 4 +#define target_bmiss_offload_max_vdev 3 +#define target_roam_offload_max_vdev 3 +#define target_roam_offload_max_ap_profiles 8 +#define target_gtk_offload_max_vdev 3 +#define target_num_mcast_groups 12 +#define target_num_mcast_table_elems 64 +#define target_mcast2ucast_mode 2 +#define target_tx_dbg_log_size 1024 +#define target_rx_skip_defrag_timeout_dup_detection_check 1 +#define target_vow_config 0 +#define target_num_msdu_desc (2500) +#define target_max_frag_entries 6 +#define target_max_bcn_offld 16 +#define target_num_wds_entries 32 +#define target_dma_burst_size 1 +#define target_rx_batchmode 1 + +#define ath11k_hw_max_queues 4 + +#define ath11k_hw_ratecode_cck_short_pream_mask 0x4 + +#define ath11k_fw_dir "ath11k" + +/* ipq8074 definitions */ +#define ipq8074_fw_dir "ipq8074" +#define ipq8074_max_board_data_sz (256 * 1024) +#define ipq8074_max_cal_data_sz ipq8074_max_board_data_sz + +#define ath11k_board_magic "qca-ath11k-board" +#define ath11k_board_api2_file "board-2.bin" +#define ath11k_default_board_file "bdwlan.bin" +#define ath11k_default_cal_file "caldata.bin" + +enum ath11k_hw_rate_cck { + ath11k_hw_rate_cck_lp_11m = 0, + ath11k_hw_rate_cck_lp_5_5m, + ath11k_hw_rate_cck_lp_2m, + ath11k_hw_rate_cck_lp_1m, + ath11k_hw_rate_cck_sp_11m, + ath11k_hw_rate_cck_sp_5_5m, + ath11k_hw_rate_cck_sp_2m, +}; + +enum ath11k_hw_rate_ofdm { + ath11k_hw_rate_ofdm_48m = 0, + ath11k_hw_rate_ofdm_24m, + ath11k_hw_rate_ofdm_12m, + ath11k_hw_rate_ofdm_6m, + ath11k_hw_rate_ofdm_54m, + ath11k_hw_rate_ofdm_36m, + ath11k_hw_rate_ofdm_18m, + ath11k_hw_rate_ofdm_9m, +}; + +struct ath11k_hw_params { + const char *name; + struct { + const char *dir; + size_t board_size; + size_t cal_size; + } fw; +}; + +struct ath11k_fw_ie { + __le32 id; + __le32 len; + u8 data[0]; +}; + +enum ath11k_bd_ie_board_type { + ath11k_bd_ie_board_name = 0, + ath11k_bd_ie_board_data = 1, +}; + +enum ath11k_bd_ie_type { + /* contains sub ies of enum ath11k_bd_ie_board_type */ + ath11k_bd_ie_board = 0, + ath11k_bd_ie_board_ext = 1, +}; + +#endif diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/mac.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include "mac.h" +#include "core.h" +#include "debug.h" +#include "wmi.h" +#include "hw.h" +#include "dp_tx.h" +#include "dp_rx.h" +#include "testmode.h" +#include "peer.h" + +#define chan2g(_channel, _freq, _flags) { \ + .band = nl80211_band_2ghz, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define chan5g(_channel, _freq, _flags) { \ + .band = nl80211_band_5ghz, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static const struct ieee80211_channel ath11k_2ghz_channels[] = { + chan2g(1, 2412, 0), + chan2g(2, 2417, 0), + chan2g(3, 2422, 0), + chan2g(4, 2427, 0), + chan2g(5, 2432, 0), + chan2g(6, 2437, 0), + chan2g(7, 2442, 0), + chan2g(8, 2447, 0), + chan2g(9, 2452, 0), + chan2g(10, 2457, 0), + chan2g(11, 2462, 0), + chan2g(12, 2467, 0), + chan2g(13, 2472, 0), + chan2g(14, 2484, 0), +}; + +static const struct ieee80211_channel ath11k_5ghz_channels[] = { + chan5g(36, 5180, 0), + chan5g(40, 5200, 0), + chan5g(44, 5220, 0), + chan5g(48, 5240, 0), + chan5g(52, 5260, 0), + chan5g(56, 5280, 0), + chan5g(60, 5300, 0), + chan5g(64, 5320, 0), + chan5g(100, 5500, 0), + chan5g(104, 5520, 0), + chan5g(108, 5540, 0), + chan5g(112, 5560, 0), + chan5g(116, 5580, 0), + chan5g(120, 5600, 0), + chan5g(124, 5620, 0), + chan5g(128, 5640, 0), + chan5g(132, 5660, 0), + chan5g(136, 5680, 0), + chan5g(140, 5700, 0), + chan5g(144, 5720, 0), + chan5g(149, 5745, 0), + chan5g(153, 5765, 0), + chan5g(157, 5785, 0), + chan5g(161, 5805, 0), + chan5g(165, 5825, 0), + chan5g(169, 5845, 0), + chan5g(173, 5865, 0), +}; + +static struct ieee80211_rate ath11k_legacy_rates[] = { + { .bitrate = 10, + .hw_value = ath11k_hw_rate_cck_lp_1m }, + { .bitrate = 20, + .hw_value = ath11k_hw_rate_cck_lp_2m, + .hw_value_short = ath11k_hw_rate_cck_sp_2m, + .flags = ieee80211_rate_short_preamble }, + { .bitrate = 55, + .hw_value = ath11k_hw_rate_cck_lp_5_5m, + .hw_value_short = ath11k_hw_rate_cck_sp_5_5m, + .flags = ieee80211_rate_short_preamble }, + { .bitrate = 110, + .hw_value = ath11k_hw_rate_cck_lp_11m, + .hw_value_short = ath11k_hw_rate_cck_sp_11m, + .flags = ieee80211_rate_short_preamble }, + + { .bitrate = 60, .hw_value = ath11k_hw_rate_ofdm_6m }, + { .bitrate = 90, .hw_value = ath11k_hw_rate_ofdm_9m }, + { .bitrate = 120, .hw_value = ath11k_hw_rate_ofdm_12m }, + { .bitrate = 180, .hw_value = ath11k_hw_rate_ofdm_18m }, + { .bitrate = 240, .hw_value = ath11k_hw_rate_ofdm_24m }, + { .bitrate = 360, .hw_value = ath11k_hw_rate_ofdm_36m }, + { .bitrate = 480, .hw_value = ath11k_hw_rate_ofdm_48m }, + { .bitrate = 540, .hw_value = ath11k_hw_rate_ofdm_54m }, +}; + +static const int +ath11k_phymodes[num_nl80211_bands][ath11k_chan_width_num] = { + [nl80211_band_2ghz] = { + [nl80211_chan_width_5] = mode_unknown, + [nl80211_chan_width_10] = mode_unknown, + [nl80211_chan_width_20_noht] = mode_11ax_he20_2g, + [nl80211_chan_width_20] = mode_11ax_he20_2g, + [nl80211_chan_width_40] = mode_11ax_he40_2g, + [nl80211_chan_width_80] = mode_11ax_he80_2g, + [nl80211_chan_width_80p80] = mode_unknown, + [nl80211_chan_width_160] = mode_unknown, + }, + [nl80211_band_5ghz] = { + [nl80211_chan_width_5] = mode_unknown, + [nl80211_chan_width_10] = mode_unknown, + [nl80211_chan_width_20_noht] = mode_11ax_he20, + [nl80211_chan_width_20] = mode_11ax_he20, + [nl80211_chan_width_40] = mode_11ax_he40, + [nl80211_chan_width_80] = mode_11ax_he80, + [nl80211_chan_width_160] = mode_11ax_he160, + [nl80211_chan_width_80p80] = mode_11ax_he80_80, + }, +}; + +const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = { + .rx_filter = htt_rx_filter_tlv_flags_mpdu_start | + htt_rx_filter_tlv_flags_ppdu_end | + htt_rx_filter_tlv_flags_ppdu_end_status_done, + .pkt_filter_flags0 = htt_rx_fp_mgmt_filter_flags0, + .pkt_filter_flags1 = htt_rx_fp_mgmt_filter_flags1, + .pkt_filter_flags2 = htt_rx_fp_ctrl_filter_flasg2, + .pkt_filter_flags3 = htt_rx_fp_data_filter_flasg3 | + htt_rx_fp_ctrl_filter_flasg3 +}; + +#define ath11k_mac_first_ofdm_rate_idx 4 +#define ath11k_g_rates ath11k_legacy_rates +#define ath11k_g_rates_size (array_size(ath11k_legacy_rates)) +#define ath11k_a_rates (ath11k_legacy_rates + 4) +#define ath11k_a_rates_size (array_size(ath11k_legacy_rates) - 4) + +#define ath11k_mac_scan_timeout_msecs 200 /* in msecs */ + +static const u32 ath11k_smps_map[] = { + [wlan_ht_cap_sm_ps_static] = wmi_peer_smps_static, + [wlan_ht_cap_sm_ps_dynamic] = wmi_peer_smps_dynamic, + [wlan_ht_cap_sm_ps_invalid] = wmi_peer_smps_ps_none, + [wlan_ht_cap_sm_ps_disabled] = wmi_peer_smps_ps_none, +}; + +int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, + u16 *rate) +{ + /* as default, it is ofdm rates */ + int i = ath11k_mac_first_ofdm_rate_idx; + int max_rates_idx = ath11k_g_rates_size; + + if (preamble == wmi_rate_preamble_cck) { + hw_rc &= ~ath11k_hw_ratecode_cck_short_pream_mask; + i = 0; + max_rates_idx = ath11k_mac_first_ofdm_rate_idx; + } + + while (i < max_rates_idx) { + if (hw_rc == ath11k_legacy_rates[i].hw_value) { + *rateidx = i; + *rate = ath11k_legacy_rates[i].bitrate; + return 0; + } + i++; + } + + return -einval; +} + +static int get_num_chains(u32 mask) +{ + int num_chains = 0; + + while (mask) { + if (mask & bit(0)) + num_chains++; + mask >>= 1; + } + + return num_chains; +} + +u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, + u32 bitrate) +{ + int i; + + for (i = 0; i < sband->n_bitrates; i++) + if (sband->bitrates[i].bitrate == bitrate) + return i; + + return 0; +} + +static u32 +ath11k_mac_max_ht_nss(const u8 ht_mcs_mask[ieee80211_ht_mcs_mask_len]) +{ + int nss; + + for (nss = ieee80211_ht_mcs_mask_len - 1; nss >= 0; nss--) + if (ht_mcs_mask[nss]) + return nss + 1; + + return 1; +} + +static u32 +ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[nl80211_vht_nss_max]) +{ + int nss; + + for (nss = nl80211_vht_nss_max - 1; nss >= 0; nss--) + if (vht_mcs_mask[nss]) + return nss + 1; + + return 1; +} + +static u8 ath11k_parse_mpdudensity(u8 mpdudensity) +{ +/* 802.11n d2.0 defined values for "minimum mpdu start spacing": + * 0 for no restriction + * 1 for 1/4 us + * 2 for 1/2 us + * 3 for 1 us + * 4 for 2 us + * 5 for 4 us + * 6 for 8 us + * 7 for 16 us + */ + switch (mpdudensity) { + case 0: + return 0; + case 1: + case 2: + case 3: + /* our lower layer calculations limit our precision to + * 1 microsecond + */ + return 1; + case 4: + return 2; + case 5: + return 4; + case 6: + return 8; + case 7: + return 16; + default: + return 0; + } +} + +static int ath11k_mac_vif_chan(struct ieee80211_vif *vif, + struct cfg80211_chan_def *def) +{ + struct ieee80211_chanctx_conf *conf; + + rcu_read_lock(); + conf = rcu_dereference(vif->chanctx_conf); + if (!conf) { + rcu_read_unlock(); + return -enoent; + } + + *def = conf->def; + rcu_read_unlock(); + + return 0; +} + +static bool ath11k_mac_bitrate_is_cck(int bitrate) +{ + switch (bitrate) { + case 10: + case 20: + case 55: + case 110: + return true; + } + + return false; +} + +u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, + u8 hw_rate, bool cck) +{ + const struct ieee80211_rate *rate; + int i; + + for (i = 0; i < sband->n_bitrates; i++) { + rate = &sband->bitrates[i]; + + if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck) + continue; + + if (rate->hw_value == hw_rate) + return i; + else if (rate->flags & ieee80211_rate_short_preamble && + rate->hw_value_short == hw_rate) + return i; + } + + return 0; +} + +static u8 ath11k_mac_bitrate_to_rate(int bitrate) +{ + return div_round_up(bitrate, 5) | + (ath11k_mac_bitrate_is_cck(bitrate) ? bit(7) : 0); +} + +static void ath11k_get_arvif_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath11k_vif_iter *arvif_iter = data; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + + if (arvif->vdev_id == arvif_iter->vdev_id) + arvif_iter->arvif = arvif; +} + +struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id) +{ + struct ath11k_vif_iter arvif_iter; + u32 flags; + + memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter)); + arvif_iter.vdev_id = vdev_id; + + flags = ieee80211_iface_iter_resume_all; + ieee80211_iterate_active_interfaces_atomic(ar->hw, + flags, + ath11k_get_arvif_iter, + &arvif_iter); + if (!arvif_iter.arvif) + return null; + + return arvif_iter.arvif; +} + +struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab, + u32 vdev_id) +{ + int i; + struct ath11k_pdev *pdev; + struct ath11k_vif *arvif; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) { + arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id); + if (arvif) + return arvif; + } + } + + return null; +} + +struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id) +{ + int i; + struct ath11k_pdev *pdev; + struct ath11k_vif *arvif; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) { + arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id); + if (arvif) + return arvif->ar; + } + } + + return null; +} + +struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id) +{ + int i; + struct ath11k_pdev *pdev; + + if (warn_on(pdev_id > ab->num_radios)) + return null; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + + if (pdev && pdev->pdev_id == pdev_id) + return (pdev->ar ? pdev->ar : null); + } + + return null; +} + +struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab, + u32 vdev_id) +{ + int i; + struct ath11k_pdev *pdev; + struct ath11k *ar; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) { + ar = pdev->ar; + + spin_lock_bh(&ar->data_lock); + if (ar->vdev_stop_status.stop_in_progress && + ar->vdev_stop_status.vdev_id == vdev_id) { + ar->vdev_stop_status.stop_in_progress = false; + spin_unlock_bh(&ar->data_lock); + return ar; + } + spin_unlock_bh(&ar->data_lock); + } + } + return null; +} + +static void ath11k_pdev_caps_update(struct ath11k *ar) +{ + struct ath11k_base *ab = ar->ab; + + ar->max_tx_power = ab->target_caps.hw_max_tx_power; + + /* fixme set min_tx_power to ab->target_caps.hw_min_tx_power. + * but since the received value in svcrdy is same as hw_max_tx_power, + * we can set ar->min_tx_power to 0 currently until + * this is fixed in firmware + */ + ar->min_tx_power = 0; + + ar->txpower_limit_2g = ar->max_tx_power; + ar->txpower_limit_5g = ar->max_tx_power; + ar->txpower_scale = wmi_host_tp_scale_max; +} + +static int ath11k_mac_txpower_recalc(struct ath11k *ar) +{ + struct ath11k_pdev *pdev = ar->pdev; + struct ath11k_vif *arvif; + int ret, txpower = -1; + u32 param; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->txpower <= 0) + continue; + + if (txpower == -1) + txpower = arvif->txpower; + else + txpower = min(txpower, arvif->txpower); + } + + if (txpower == -1) + return 0; + + /* txpwr is set as 2 units per dbm in fw*/ + txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower), + ar->max_tx_power) * 2; + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "txpower to set in hw %d ", + txpower / 2); + + if ((pdev->cap.supported_bands & wmi_host_wlan_2g_cap) && + ar->txpower_limit_2g != txpower) { + param = wmi_pdev_param_txpower_limit2g; + ret = ath11k_wmi_pdev_set_param(ar, param, + txpower, ar->pdev->pdev_id); + if (ret) + goto fail; + ar->txpower_limit_2g = txpower; + } + + if ((pdev->cap.supported_bands & wmi_host_wlan_5g_cap) && + ar->txpower_limit_5g != txpower) { + param = wmi_pdev_param_txpower_limit5g; + ret = ath11k_wmi_pdev_set_param(ar, param, + txpower, ar->pdev->pdev_id); + if (ret) + goto fail; + ar->txpower_limit_5g = txpower; + } + + return 0; + +fail: + ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d ", + txpower / 2, param, ret); + return ret; +} + +static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + u32 vdev_param, rts_cts = 0; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + vdev_param = wmi_vdev_param_enable_rtscts; + + /* enable rts/cts protection for sw retries (when legacy stations + * are in bss) or by default only for second rate series. + * todo: check if we need to enable cts 2 self in any case + */ + rts_cts = wmi_use_rts_cts; + + if (arvif->num_legacy_stations > 0) + rts_cts |= wmi_rtscts_across_sw_retries << 4; + else + rts_cts |= wmi_rtscts_for_second_rateseries << 4; + + /* need not send duplicate param value to firmware */ + if (arvif->rtscts_prot_mode == rts_cts) + return 0; + + arvif->rtscts_prot_mode = rts_cts; + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac vdev %d recalc rts/cts prot %d ", + arvif->vdev_id, rts_cts); + + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rts_cts); + if (ret) + ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d ", + arvif->vdev_id, ret); + + return ret; +} + +static int ath11k_mac_set_kickout(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + u32 param; + int ret; + + ret = ath11k_wmi_pdev_set_param(ar, wmi_pdev_param_sta_kickout_th, + ath11k_kickout_threshold, + ar->pdev->pdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d ", + arvif->vdev_id, ret); + return ret; + } + + param = wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, + ath11k_keepalive_min_idle); + if (ret) { + ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d ", + arvif->vdev_id, ret); + return ret; + } + + param = wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, + ath11k_keepalive_max_idle); + if (ret) { + ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d ", + arvif->vdev_id, ret); + return ret; + } + + param = wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, + ath11k_keepalive_max_unresponsive); + if (ret) { + ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d ", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} + +void ath11k_mac_peer_cleanup_all(struct ath11k *ar) +{ + struct ath11k_peer *peer, *tmp; + struct ath11k_base *ab = ar->ab; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ab->base_lock); + list_for_each_entry_safe(peer, tmp, &ab->peers, list) { + ath11k_peer_rx_tid_cleanup(ar, peer); + list_del(&peer->list); + kfree(peer); + } + spin_unlock_bh(&ab->base_lock); + + ar->num_peers = 0; + ar->num_stations = 0; +} + +static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id) +{ + int ret = 0; + + ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); + if (ret) { + ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d ", + vdev_id, ret); + return ret; + } + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac monitor vdev %i started ", + vdev_id); + return 0; +} + +static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed) +{ + struct ath11k *ar = hw->priv; + int ret = 0; + + /* mac80211 requires this op to be present and that's why + * there's an empty function, this can be extended when + * required. + */ + + mutex_lock(&ar->conf_mutex); + + /* todo: handle configuration changes as appropriate */ + + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + struct ath11k_base *ab = ar->ab; + struct ieee80211_hw *hw = ar->hw; + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_mutable_offsets offs = {}; + struct sk_buff *bcn; + int ret; + + if (arvif->vdev_type != wmi_vdev_type_ap) + return 0; + + bcn = ieee80211_beacon_get_template(hw, vif, &offs); + if (!bcn) { + ath11k_warn(ab, "failed to get beacon template from mac80211 "); + return -eperm; + } + + ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); + + kfree_skb(bcn); + + if (ret) + ath11k_warn(ab, "failed to submit beacon template command: %d ", + ret); + + return ret; +} + +static void ath11k_control_beaconing(struct ath11k_vif *arvif, + struct ieee80211_bss_conf *info) +{ + struct ath11k *ar = arvif->ar; + int ret = 0; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + if (!info->enable_beacon) { + ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath11k_warn(ar->ab, "failed to down vdev_id %i: %d ", + arvif->vdev_id, ret); + + arvif->is_up = false; + return; + } + + /* install the beacon template to the fw */ + ret = ath11k_mac_setup_bcn_tmpl(arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d ", + ret); + return; + } + + arvif->tx_seq_no = 0x1000; + + arvif->aid = 0; + + ether_addr_copy(arvif->bssid, info->bssid); + + ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, + arvif->bssid); + if (ret) { + ath11k_warn(ar->ab, "failed to bring up vdev %d: %i ", + arvif->vdev_id, ret); + return; + } + + arvif->is_up = true; + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac vdev %d up ", arvif->vdev_id); +} + +static void ath11k_peer_assoc_h_basic(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + struct ath11k_vif *arvif = (void *)vif->drv_priv; + u32 aid; + + lockdep_assert_held(&ar->conf_mutex); + + if (vif->type == nl80211_iftype_station) + aid = vif->bss_conf.aid; + else + aid = sta->aid; + + ether_addr_copy(arg->peer_mac, sta->addr); + arg->vdev_id = arvif->vdev_id; + arg->peer_associd = aid; + arg->auth_flag = true; + /* todo: sta war in ath10k for listen interval required? */ + arg->peer_listen_intval = ar->hw->conf.listen_interval; + arg->peer_nss = 1; + arg->peer_caps = vif->bss_conf.assoc_capability; +} + +static void ath11k_peer_assoc_h_crypto(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + struct ieee80211_bss_conf *info = &vif->bss_conf; + struct cfg80211_chan_def def; + struct cfg80211_bss *bss; + const u8 *rsnie = null; + const u8 *wpaie = null; + + lockdep_assert_held(&ar->conf_mutex); + + if (warn_on(ath11k_mac_vif_chan(vif, &def))) + return; + + bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, null, 0, + ieee80211_bss_type_any, ieee80211_privacy_any); + if (bss) { + const struct cfg80211_bss_ies *ies; + + rcu_read_lock(); + rsnie = ieee80211_bss_get_ie(bss, wlan_eid_rsn); + + ies = rcu_dereference(bss->ies); + + wpaie = cfg80211_find_vendor_ie(wlan_oui_microsoft, + wlan_oui_type_microsoft_wpa, + ies->data, + ies->len); + rcu_read_unlock(); + cfg80211_put_bss(ar->hw->wiphy, bss); + } + + /* fixme: base on rsn ie/wpa ie is a correct idea? */ + if (rsnie || wpaie) { + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "%s: rsn ie found ", __func__); + arg->need_ptk_4_way = true; + } + + if (wpaie) { + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "%s: wpa ie found ", __func__); + arg->need_gtk_2_way = true; + } + + if (sta->mfp) { + /* todo: need to check if fw supports pmf? */ + arg->is_pmf_enabled = true; + } + + /* todo: safe_mode_enabled (bypass 4-way handshake) flag req? */ +} + +static void ath11k_peer_assoc_h_rates(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; + struct cfg80211_chan_def def; + const struct ieee80211_supported_band *sband; + const struct ieee80211_rate *rates; + enum nl80211_band band; + u32 ratemask; + u8 rate; + int i; + + lockdep_assert_held(&ar->conf_mutex); + + if (warn_on(ath11k_mac_vif_chan(vif, &def))) + return; + + band = def.chan->band; + sband = ar->hw->wiphy->bands[band]; + ratemask = sta->supp_rates[band]; + ratemask &= arvif->bitrate_mask.control[band].legacy; + rates = sband->bitrates; + + rateset->num_rates = 0; + + for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { + if (!(ratemask & 1)) + continue; + + rate = ath11k_mac_bitrate_to_rate(rates->bitrate); + rateset->rates[rateset->num_rates] = rate; + rateset->num_rates++; + } +} + +static bool +ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[ieee80211_ht_mcs_mask_len]) +{ + int nss; + + for (nss = 0; nss < ieee80211_ht_mcs_mask_len; nss++) + if (ht_mcs_mask[nss]) + return false; + + return true; +} + +static bool +ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[nl80211_vht_nss_max]) +{ + int nss; + + for (nss = 0; nss < nl80211_vht_nss_max; nss++) + if (vht_mcs_mask[nss]) + return false; + + return true; +} + +static void ath11k_peer_assoc_h_ht(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + int i, n; + u8 max_nss; + u32 stbc; + + lockdep_assert_held(&ar->conf_mutex); + + if (warn_on(ath11k_mac_vif_chan(vif, &def))) + return; + + if (!ht_cap->ht_supported) + return; + + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + + if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) + return; + + arg->ht_flag = true; + + arg->peer_max_mpdu = (1 << (ieee80211_ht_max_ampdu_factor + + ht_cap->ampdu_factor)) - 1; + + arg->peer_mpdu_density = + ath11k_parse_mpdudensity(ht_cap->ampdu_density); + + arg->peer_ht_caps = ht_cap->cap; + arg->peer_rate_caps |= wmi_host_rc_ht_flag; + + if (ht_cap->cap & ieee80211_ht_cap_ldpc_coding) + arg->ldpc_flag = true; + + if (sta->bandwidth >= ieee80211_sta_rx_bw_40) { + arg->bw_40 = true; + arg->peer_rate_caps |= wmi_host_rc_cw40_flag; + } + + if (arvif->bitrate_mask.control[band].gi != nl80211_txrate_force_lgi) { + if (ht_cap->cap & (ieee80211_ht_cap_sgi_20 | + ieee80211_ht_cap_sgi_40)) + arg->peer_rate_caps |= wmi_host_rc_sgi_flag; + } + + if (ht_cap->cap & ieee80211_ht_cap_tx_stbc) { + arg->peer_rate_caps |= wmi_host_rc_tx_stbc_flag; + arg->stbc_flag = true; + } + + if (ht_cap->cap & ieee80211_ht_cap_rx_stbc) { + stbc = ht_cap->cap & ieee80211_ht_cap_rx_stbc; + stbc = stbc >> ieee80211_ht_cap_rx_stbc_shift; + stbc = stbc << wmi_host_rc_rx_stbc_flag_s; + arg->peer_rate_caps |= stbc; + arg->stbc_flag = true; + } + + if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) + arg->peer_rate_caps |= wmi_host_rc_ts_flag; + else if (ht_cap->mcs.rx_mask[1]) + arg->peer_rate_caps |= wmi_host_rc_ds_flag; + + for (i = 0, n = 0, max_nss = 0; i < ieee80211_ht_mcs_mask_len * 8; i++) + if ((ht_cap->mcs.rx_mask[i / 8] & bit(i % 8)) && + (ht_mcs_mask[i / 8] & bit(i % 8))) { + max_nss = (i / 8) + 1; + arg->peer_ht_rates.rates[n++] = i; + } + + /* this is a workaround for ht-enabled stas which break the spec + * and have no ht capabilities rx mask (no ht rx mcs map). + * + * as per spec, in section 20.3.5 modulation and coding scheme (mcs), + * mcs 0 through 7 are mandatory in 20mhz with 800 ns gi at all stas. + * + * firmware asserts if such situation occurs. + */ + if (n == 0) { + arg->peer_ht_rates.num_rates = 8; + for (i = 0; i < arg->peer_ht_rates.num_rates; i++) + arg->peer_ht_rates.rates[i] = i; + } else { + arg->peer_ht_rates.num_rates = n; + arg->peer_nss = min(sta->rx_nss, max_nss); + } + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac ht peer %pm mcs cnt %d nss %d ", + arg->peer_mac, + arg->peer_ht_rates.num_rates, + arg->peer_nss); +} + +static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) +{ + switch ((mcs_map >> (2 * nss)) & 0x3) { + case ieee80211_vht_mcs_support_0_7: return bit(8) - 1; + case ieee80211_vht_mcs_support_0_8: return bit(9) - 1; + case ieee80211_vht_mcs_support_0_9: return bit(10) - 1; + } + return 0; +} + +static u16 +ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set, + const u16 vht_mcs_limit[nl80211_vht_nss_max]) +{ + int idx_limit; + int nss; + u16 mcs_map; + u16 mcs; + + for (nss = 0; nss < nl80211_vht_nss_max; nss++) { + mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & + vht_mcs_limit[nss]; + + if (mcs_map) + idx_limit = fls(mcs_map) - 1; + else + idx_limit = -1; + + switch (idx_limit) { + case 0: /* fall through */ + case 1: /* fall through */ + case 2: /* fall through */ + case 3: /* fall through */ + case 4: /* fall through */ + case 5: /* fall through */ + case 6: /* fall through */ + case 7: + mcs = ieee80211_vht_mcs_support_0_7; + break; + case 8: + mcs = ieee80211_vht_mcs_support_0_8; + break; + case 9: + mcs = ieee80211_vht_mcs_support_0_9; + break; + default: + warn_on(1); + /* fall through */ + case -1: + mcs = ieee80211_vht_mcs_not_supported; + break; + } + + tx_mcs_set &= ~(0x3 << (nss * 2)); + tx_mcs_set |= mcs << (nss * 2); + } + + return tx_mcs_set; +} + +static void ath11k_peer_assoc_h_vht(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u16 *vht_mcs_mask; + u8 ampdu_factor; + u8 max_nss, vht_mcs; + int i; + + if (warn_on(ath11k_mac_vif_chan(vif, &def))) + return; + + if (!vht_cap->vht_supported) + return; + + band = def.chan->band; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) + return; + + arg->vht_flag = true; + + /* todo: similar flags required? */ + arg->vht_capable = true; + + if (def.chan->band == nl80211_band_2ghz) + arg->vht_ng_flag = true; + + arg->peer_vht_caps = vht_cap->cap; + + ampdu_factor = (vht_cap->cap & + ieee80211_vht_cap_max_a_mpdu_length_exponent_mask) >> + ieee80211_vht_cap_max_a_mpdu_length_exponent_shift; + + /* workaround: some netgear/linksys 11ac aps set rx a-mpdu factor to + * zero in vht ie. using it would result in degraded throughput. + * arg->peer_max_mpdu at this point contains ht max_mpdu so keep + * it if vht max_mpdu is smaller. + */ + arg->peer_max_mpdu = max(arg->peer_max_mpdu, + (1u << (ieee80211_ht_max_ampdu_factor + + ampdu_factor)) - 1); + + if (sta->bandwidth == ieee80211_sta_rx_bw_80) + arg->bw_80 = true; + + if (sta->bandwidth == ieee80211_sta_rx_bw_160) + arg->bw_160 = true; + + /* calculate peer nss capability from vht capabilities if sta + * supports vht. + */ + for (i = 0, max_nss = 0, vht_mcs = 0; i < nl80211_vht_nss_max; i++) { + vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> + (2 * i) & 3; + + if (vht_mcs != ieee80211_vht_mcs_not_supported && + vht_mcs_mask[i]) + max_nss = i + 1; + } + arg->peer_nss = min(sta->rx_nss, max_nss); + arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); + arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); + arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); + arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit( + __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); + + /* in ipq8074 platform, vht mcs rate 10 and 11 is enabled by default. + * vht mcs rate 10 and 11 is not suppoerted in 11ac standard. + * so explicitly disable the vht mcs rate 10 and 11 in 11ac mode. + */ + arg->tx_mcs_set &= ~ieee80211_vht_mcs_support_0_11_mask; + arg->tx_mcs_set |= ieee80211_disable_vht_mcs_support_0_11; + + /* todo: check */ + arg->tx_max_mcs_nss = 0xff; + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac vht peer %pm max_mpdu %d flags 0x%x ", + sta->addr, arg->peer_max_mpdu, arg->peer_flags); + + /* todo: rxnss_override */ +} + +static void ath11k_peer_assoc_h_he(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + /* todo: implementation */ +} + +static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + int smps; + + if (!ht_cap->ht_supported) + return; + + smps = ht_cap->cap & ieee80211_ht_cap_sm_ps; + smps >>= ieee80211_ht_cap_sm_ps_shift; + + switch (smps) { + case wlan_ht_cap_sm_ps_static: + arg->static_mimops_flag = true; + break; + case wlan_ht_cap_sm_ps_dynamic: + arg->dynamic_mimops_flag = true; + break; + case wlan_ht_cap_sm_ps_disabled: + arg->spatial_mux_flag = true; + break; + default: + break; + } +} + +static void ath11k_peer_assoc_h_qos(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + struct ath11k_vif *arvif = (void *)vif->drv_priv; + + switch (arvif->vdev_type) { + case wmi_vdev_type_ap: + if (sta->wme) { + /* todo: check wme vs qos */ + arg->is_wme_set = true; + arg->qos_flag = true; + } + + if (sta->wme && sta->uapsd_queues) { + /* todo: check wme vs qos */ + arg->is_wme_set = true; + arg->apsd_flag = true; + arg->peer_rate_caps |= wmi_host_rc_uapsd_flag; + } + break; + case wmi_vdev_type_sta: + if (sta->wme) { + arg->is_wme_set = true; + arg->qos_flag = true; + } + break; + default: + break; + } + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac peer %pm qos %d ", + sta->addr, arg->qos_flag); +} + +static int ath11k_peer_assoc_qos_ap(struct ath11k *ar, + struct ath11k_vif *arvif, + struct ieee80211_sta *sta) +{ + struct ap_ps_params params; + u32 max_sp; + u32 uapsd; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + params.vdev_id = arvif->vdev_id; + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac uapsd_queues 0x%x max_sp %d ", + sta->uapsd_queues, sta->max_sp); + + uapsd = 0; + if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_vo) + uapsd |= wmi_ap_ps_uapsd_ac3_delivery_en | + wmi_ap_ps_uapsd_ac3_trigger_en; + if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_vi) + uapsd |= wmi_ap_ps_uapsd_ac2_delivery_en | + wmi_ap_ps_uapsd_ac2_trigger_en; + if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_bk) + uapsd |= wmi_ap_ps_uapsd_ac1_delivery_en | + wmi_ap_ps_uapsd_ac1_trigger_en; + if (sta->uapsd_queues & ieee80211_wmm_ie_sta_qosinfo_ac_be) + uapsd |= wmi_ap_ps_uapsd_ac0_delivery_en | + wmi_ap_ps_uapsd_ac0_trigger_en; + + max_sp = 0; + if (sta->max_sp < max_wmi_ap_ps_peer_param_max_sp) + max_sp = sta->max_sp; + + params.param = wmi_ap_ps_peer_param_uapsd; + params.value = uapsd; + ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); + if (ret) + goto err; + + params.param = wmi_ap_ps_peer_param_max_sp; + params.value = max_sp; + ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); + if (ret) + goto err; + + /* todo revisit during testing */ + params.param = wmi_ap_ps_peer_param_sifs_resp_frmtype; + params.value = disable_sifs_response_trigger; + ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); + if (ret) + goto err; + + params.param = wmi_ap_ps_peer_param_sifs_resp_uapsd; + params.value = disable_sifs_response_trigger; + ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, ¶ms); + if (ret) + goto err; + + return 0; + +err: + ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d ", + params.param, arvif->vdev_id, ret); + return ret; +} + +static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) +{ + return sta->supp_rates[nl80211_band_2ghz] >> + ath11k_mac_first_ofdm_rate_idx; +} + +static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar, + struct ieee80211_sta *sta) +{ + if (sta->bandwidth == ieee80211_sta_rx_bw_160) { + switch (sta->vht_cap.cap & + ieee80211_vht_cap_supp_chan_width_mask) { + case ieee80211_vht_cap_supp_chan_width_160mhz: + return mode_11ac_vht160; + case ieee80211_vht_cap_supp_chan_width_160_80plus80mhz: + return mode_11ac_vht80_80; + default: + /* not sure if this is a valid case? */ + return mode_11ac_vht160; + } + } + + if (sta->bandwidth == ieee80211_sta_rx_bw_80) + return mode_11ac_vht80; + + if (sta->bandwidth == ieee80211_sta_rx_bw_40) + return mode_11ac_vht40; + + if (sta->bandwidth == ieee80211_sta_rx_bw_20) + return mode_11ac_vht20; + + return mode_unknown; +} + +static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg) +{ + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; + enum wmi_phy_mode phymode = mode_unknown; + + if (warn_on(ath11k_mac_vif_chan(vif, &def))) + return; + + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + switch (band) { + case nl80211_band_2ghz: + if (sta->vht_cap.vht_supported && + !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { + if (sta->bandwidth == ieee80211_sta_rx_bw_40) + phymode = mode_11ac_vht40; + else + phymode = mode_11ac_vht20; + } else if (sta->ht_cap.ht_supported && + !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) { + if (sta->bandwidth == ieee80211_sta_rx_bw_40) + phymode = mode_11ng_ht40; + else + phymode = mode_11ng_ht20; + } else if (ath11k_mac_sta_has_ofdm_only(sta)) { + phymode = mode_11g; + } else { + phymode = mode_11b; + } + /* todo: he */ + + break; + case nl80211_band_5ghz: + /* check vht first */ + if (sta->vht_cap.vht_supported && + !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) { + phymode = ath11k_mac_get_phymode_vht(ar, sta); + } else if (sta->ht_cap.ht_supported && + !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) { + if (sta->bandwidth >= ieee80211_sta_rx_bw_40) + phymode = mode_11na_ht40; + else + phymode = mode_11na_ht20; + } else { + phymode = mode_11a; + } + /* todo: he phymode */ + break; + default: + break; + } + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac peer %pm phymode %s ", + sta->addr, ath11k_wmi_phymode_str(phymode)); + + arg->peer_phymode = phymode; + warn_on(phymode == mode_unknown); +} + +static void ath11k_peer_assoc_prepare(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct peer_assoc_params *arg, + bool reassoc) +{ + lockdep_assert_held(&ar->conf_mutex); + + memset(arg, 0, sizeof(*arg)); + + reinit_completion(&ar->peer_assoc_done); + + arg->peer_new_assoc = !reassoc; + ath11k_peer_assoc_h_basic(ar, vif, sta, arg); + ath11k_peer_assoc_h_crypto(ar, vif, sta, arg); + ath11k_peer_assoc_h_rates(ar, vif, sta, arg); + ath11k_peer_assoc_h_ht(ar, vif, sta, arg); + ath11k_peer_assoc_h_vht(ar, vif, sta, arg); + ath11k_peer_assoc_h_he(ar, vif, sta, arg); + ath11k_peer_assoc_h_qos(ar, vif, sta, arg); + ath11k_peer_assoc_h_phymode(ar, vif, sta, arg); + ath11k_peer_assoc_h_smps(sta, arg); + + /* todo: amsdu_disable req? */ +} + +static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif, + const u8 *addr, + const struct ieee80211_sta_ht_cap *ht_cap) +{ + int smps; + + if (!ht_cap->ht_supported) + return 0; + + smps = ht_cap->cap & ieee80211_ht_cap_sm_ps; + smps >>= ieee80211_ht_cap_sm_ps_shift; + + if (smps >= array_size(ath11k_smps_map)) + return -einval; + + return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id, + wmi_peer_mimo_ps_state, + ath11k_smps_map[smps]); +} + +static void ath11k_bss_assoc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct peer_assoc_params peer_arg; + struct ieee80211_sta *ap_sta; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac vdev %i assoc bssid %pm aid %d ", + arvif->vdev_id, arvif->bssid, arvif->aid); + + rcu_read_lock(); + + ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (!ap_sta) { + ath11k_warn(ar->ab, "failed to find station entry for bss %pm vdev %i ", + bss_conf->bssid, arvif->vdev_id); + rcu_read_unlock(); + return; + } + + ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false); + + rcu_read_unlock(); + + ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (ret) { + ath11k_warn(ar->ab, "failed to run peer assoc for %pm vdev %i: %d ", + bss_conf->bssid, arvif->vdev_id, ret); + return; + } + + if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * hz)) { + ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pm vdev %i ", + bss_conf->bssid, arvif->vdev_id); + return; + } + + ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid, + &ap_sta->ht_cap); + if (ret) { + ath11k_warn(ar->ab, "failed to setup peer smps for vdev %d: %d ", + arvif->vdev_id, ret); + return; + } + + warn_on(arvif->is_up); + + arvif->aid = bss_conf->aid; + ether_addr_copy(arvif->bssid, bss_conf->bssid); + + ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); + if (ret) { + ath11k_warn(ar->ab, "failed to set vdev %d up: %d ", + arvif->vdev_id, ret); + return; + } + + arvif->is_up = true; + + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "mac vdev %d up (associated) bssid %pm aid %d ", + arvif->vdev_id, bss_conf->bssid, bss_conf->aid); + + /* authorize bss peer */ + ret = ath11k_wmi_set_peer_param(ar, arvif->bssid, + arvif->vdev_id, + wmi_peer_authorize, + 1); + if (ret) + ath11k_warn(ar->ab, "unable to authorize bss peer: %d ", ret); +} + +static void ath11k_bss_disassoc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac vdev %i disassoc bssid %pm ", + arvif->vdev_id, arvif->bssid); + + ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) + ath11k_warn(ar->ab, "failed to down vdev %i: %d ", + arvif->vdev_id, ret); + + arvif->is_up = false; + + /* todo: cancel connection_loss_work */ +} + +static u32 ath11k_mac_get_rate_hw_value(int bitrate) +{ + u32 preamble; + u16 hw_value; + int rate; + size_t i; + + if (ath11k_mac_bitrate_is_cck(bitrate)) + preamble = wmi_rate_preamble_cck; + else + preamble = wmi_rate_preamble_ofdm; + + for (i = 0; i < array_size(ath11k_legacy_rates); i++) { + if (ath11k_legacy_rates[i].bitrate != bitrate) + continue; + + hw_value = ath11k_legacy_rates[i].hw_value; + rate = ath11k_hw_rate_code(hw_value, 0, preamble); + + return rate; + } + + return -einval; +} + +static void ath11k_recalculate_mgmt_rate(struct ath11k *ar, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *def) +{ + struct ath11k_vif *arvif = (void *)vif->drv_priv; + const struct ieee80211_supported_band *sband; + u8 basic_rate_idx; + int hw_rate_code; + u32 vdev_param; + u16 bitrate; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + sband = ar->hw->wiphy->bands[def->chan->band]; + basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; + bitrate = sband->bitrates[basic_rate_idx].bitrate; + + hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate); + if (hw_rate_code < 0) { + ath11k_warn(ar->ab, "bitrate not supported %d ", bitrate); + return; + } + + vdev_param = wmi_vdev_param_mgmt_rate; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, + hw_rate_code); + if (ret) + ath11k_warn(ar->ab, "failed to set mgmt tx rate %d ", ret); + + vdev_param = wmi_vdev_param_beacon_rate; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, + hw_rate_code); + if (ret) + ath11k_warn(ar->ab, "failed to set beacon tx rate %d ", ret); +} + +static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct cfg80211_chan_def def; + u32 param_id, param_value; + enum nl80211_band band; + u32 vdev_param; + int mcast_rate; + u32 preamble; + u16 hw_value; + u16 bitrate; + int ret = 0; + u8 rateidx; + u32 rate; + + mutex_lock(&ar->conf_mutex); + + if (changed & bss_changed_beacon_int) { + arvif->beacon_interval = info->beacon_int; + + param_id = wmi_vdev_param_beacon_interval; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, + arvif->beacon_interval); + if (ret) + ath11k_warn(ar->ab, "failed to set beacon interval for vdev: %d ", + arvif->vdev_id); + else + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "beacon interval: %d set for vdev: %d ", + arvif->beacon_interval, arvif->vdev_id); + } + + if (changed & bss_changed_beacon) { + param_id = wmi_pdev_param_beacon_tx_mode; + param_value = wmi_beacon_staggered_mode; + ret = ath11k_wmi_pdev_set_param(ar, param_id, + param_value, ar->pdev->pdev_id); + if (ret) + ath11k_warn(ar->ab, "failed to set beacon mode for vdev: %d ", + arvif->vdev_id); + else + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "set staggered beacon mode for vdev: %d ", + arvif->vdev_id); + + ret = ath11k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath11k_warn(ar->ab, "failed to update bcn template: %d ", + ret); + } + + if (changed & (bss_changed_beacon_info | bss_changed_beacon)) { + arvif->dtim_period = info->dtim_period; + + param_id = wmi_vdev_param_dtim_period; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, + arvif->dtim_period); + + if (ret) + ath11k_warn(ar->ab, "failed to set dtim period for vdev %d: %i ", + arvif->vdev_id, ret); + else + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "dtim period: %d set for vdev: %d ", + arvif->dtim_period, arvif->vdev_id); + } + + if (changed & bss_changed_ssid && + vif->type == nl80211_iftype_ap) { + arvif->u.ap.ssid_len = info->ssid_len; + if (info->ssid_len) + memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); + arvif->u.ap.hidden_ssid = info->hidden_ssid; + } + + if (changed & bss_changed_bssid && !is_zero_ether_addr(info->bssid)) + ether_addr_copy(arvif->bssid, info->bssid); + + if (changed & bss_changed_beacon_enabled) + ath11k_control_beaconing(arvif, info); + + if (changed & bss_changed_erp_cts_prot) { + u32 cts_prot; + + cts_prot = !!(info->use_cts_prot); + param_id = wmi_vdev_param_protection_mode; + + if (arvif->is_started) { + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, cts_prot); + if (ret) + ath11k_warn(ar->ab, "failed to set cts prot for vdev: %d ", + arvif->vdev_id); + else + ath11k_dbg(ar->ab, ath11k_dbg_mac, "set cts prot: %d for vdev: %d ", + cts_prot, arvif->vdev_id); + } else { + ath11k_dbg(ar->ab, ath11k_dbg_mac, "defer protection mode setup, vdev is not ready yet "); + } + } + + if (changed & bss_changed_erp_slot) { + u32 slottime; + + if (info->use_short_slot) + slottime = wmi_vdev_slot_time_short; /* 9us */ + + else + slottime = wmi_vdev_slot_time_long; /* 20us */ + + param_id = wmi_vdev_param_slot_time; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, slottime); + if (ret) + ath11k_warn(ar->ab, "failed to set erp slot for vdev: %d ", + arvif->vdev_id); + else + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "set slottime: %d for vdev: %d ", + slottime, arvif->vdev_id); + } + + if (changed & bss_changed_erp_preamble) { + u32 preamble; + + if (info->use_short_preamble) + preamble = wmi_vdev_preamble_short; + else + preamble = wmi_vdev_preamble_long; + + param_id = wmi_vdev_param_preamble; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, preamble); + if (ret) + ath11k_warn(ar->ab, "failed to set preamble for vdev: %d ", + arvif->vdev_id); + else + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "set preamble: %d for vdev: %d ", + preamble, arvif->vdev_id); + } + + if (changed & bss_changed_assoc) { + if (info->assoc) + ath11k_bss_assoc(hw, vif, info); + else + ath11k_bss_disassoc(hw, vif); + } + + if (changed & bss_changed_txpower) { + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac vdev_id %i txpower %d ", + arvif->vdev_id, info->txpower); + + arvif->txpower = info->txpower; + ath11k_mac_txpower_recalc(ar); + } + + if (changed & bss_changed_mcast_rate && + !ath11k_mac_vif_chan(arvif->vif, &def)) { + band = def.chan->band; + mcast_rate = vif->bss_conf.mcast_rate[band]; + + if (mcast_rate > 0) + rateidx = mcast_rate - 1; + else + rateidx = ffs(vif->bss_conf.basic_rates) - 1; + + if (ar->pdev->cap.supported_bands & wmi_host_wlan_5g_cap) + rateidx += ath11k_mac_first_ofdm_rate_idx; + + bitrate = ath11k_legacy_rates[rateidx].bitrate; + hw_value = ath11k_legacy_rates[rateidx].hw_value; + + if (ath11k_mac_bitrate_is_cck(bitrate)) + preamble = wmi_rate_preamble_cck; + else + preamble = wmi_rate_preamble_ofdm; + + rate = ath11k_hw_rate_code(hw_value, 0, preamble); + + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "mac vdev %d mcast_rate %x ", + arvif->vdev_id, rate); + + vdev_param = wmi_vdev_param_mcast_data_rate; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) + ath11k_warn(ar->ab, + "failed to set mcast rate on vdev %i: %d ", + arvif->vdev_id, ret); + + vdev_param = wmi_vdev_param_bcast_data_rate; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) + ath11k_warn(ar->ab, + "failed to set bcast rate on vdev %i: %d ", + arvif->vdev_id, ret); + } + + if (changed & bss_changed_basic_rates && + !ath11k_mac_vif_chan(arvif->vif, &def)) + ath11k_recalculate_mgmt_rate(ar, vif, &def); + + mutex_unlock(&ar->conf_mutex); +} + +void __ath11k_mac_scan_finish(struct ath11k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ath11k_scan_idle: + break; + case ath11k_scan_running: + case ath11k_scan_aborting: + if (!ar->scan.is_roc) { + struct cfg80211_scan_info info = { + .aborted = (ar->scan.state == + ath11k_scan_aborting), + }; + + ieee80211_scan_completed(ar->hw, &info); + } else if (ar->scan.roc_notify) { + ieee80211_remain_on_channel_expired(ar->hw); + } + /* fall through */ + case ath11k_scan_starting: + ar->scan.state = ath11k_scan_idle; + ar->scan_channel = null; + ar->scan.roc_freq = 0; + cancel_delayed_work(&ar->scan.timeout); + complete(&ar->scan.completed); + break; + } +} + +void ath11k_mac_scan_finish(struct ath11k *ar) +{ + spin_lock_bh(&ar->data_lock); + __ath11k_mac_scan_finish(ar); + spin_unlock_bh(&ar->data_lock); +} + +static int ath11k_scan_stop(struct ath11k *ar) +{ + struct scan_cancel_param arg = { + .req_type = wlan_scan_cancel_single, + .scan_id = ath11k_scan_id, + }; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + /* todo: fill other stop params */ + arg.pdev_id = ar->pdev->pdev_id; + + ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg); + if (ret) { + ath11k_warn(ar->ab, "failed to stop wmi scan: %d ", ret); + goto out; + } + + ret = wait_for_completion_timeout(&ar->scan.completed, 3 * hz); + if (ret == 0) { + ath11k_warn(ar->ab, + "failed to receive scan abort comple: timed out "); + ret = -etimedout; + } else if (ret > 0) { + ret = 0; + } + +out: + /* scan state should be updated upon scan completion but in case + * firmware fails to deliver the event (for whatever reason) it is + * desired to clean up scan state anyway. firmware may have just + * dropped the scan completion event delivery due to transport pipe + * being overflown with data and/or it can recover on its own before + * next scan request is submitted. + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state != ath11k_scan_idle) + __ath11k_mac_scan_finish(ar); + spin_unlock_bh(&ar->data_lock); + + return ret; +} + +static void ath11k_scan_abort(struct ath11k *ar) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + + switch (ar->scan.state) { + case ath11k_scan_idle: + /* this can happen if timeout worker kicked in and called + * abortion while scan completion was being processed. + */ + break; + case ath11k_scan_starting: + case ath11k_scan_aborting: + ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d ", + ar->scan.state); + break; + case ath11k_scan_running: + ar->scan.state = ath11k_scan_aborting; + spin_unlock_bh(&ar->data_lock); + + ret = ath11k_scan_stop(ar); + if (ret) + ath11k_warn(ar->ab, "failed to abort scan: %d ", ret); + + spin_lock_bh(&ar->data_lock); + break; + } + + spin_unlock_bh(&ar->data_lock); +} + +static void ath11k_scan_timeout_work(struct work_struct *work) +{ + struct ath11k *ar = container_of(work, struct ath11k, + scan.timeout.work); + + mutex_lock(&ar->conf_mutex); + ath11k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); +} + +static int ath11k_start_scan(struct ath11k *ar, + struct scan_req_params *arg) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath11k_wmi_send_scan_start_cmd(ar, arg); + if (ret) + return ret; + + ret = wait_for_completion_timeout(&ar->scan.started, 1 * hz); + if (ret == 0) { + ret = ath11k_scan_stop(ar); + if (ret) + ath11k_warn(ar->ab, "failed to stop scan: %d ", ret); + + return -etimedout; + } + + /* if we failed to start the scan, return error code at + * this point. this is probably due to some issue in the + * firmware, but no need to wedge the driver due to that... + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state == ath11k_scan_idle) { + spin_unlock_bh(&ar->data_lock); + return -einval; + } + spin_unlock_bh(&ar->data_lock); + + return 0; +} + +static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct cfg80211_scan_request *req = &hw_req->req; + struct scan_req_params arg; + int ret = 0; + int i; + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ath11k_scan_idle: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + ar->scan.state = ath11k_scan_starting; + ar->scan.is_roc = false; + ar->scan.vdev_id = arvif->vdev_id; + ret = 0; + break; + case ath11k_scan_starting: + case ath11k_scan_running: + case ath11k_scan_aborting: + ret = -ebusy; + break; + } + spin_unlock_bh(&ar->data_lock); + + if (ret) + goto exit; + + memset(&arg, 0, sizeof(arg)); + ath11k_wmi_start_scan_init(ar, &arg); + arg.vdev_id = arvif->vdev_id; + arg.scan_id = ath11k_scan_id; + + if (req->ie_len) { + arg.extraie.len = req->ie_len; + arg.extraie.ptr = kzalloc(req->ie_len, gfp_kernel); + memcpy(arg.extraie.ptr, req->ie, req->ie_len); + } + + if (req->n_ssids) { + arg.num_ssids = req->n_ssids; + for (i = 0; i < arg.num_ssids; i++) { + arg.ssid[i].length = req->ssids[i].ssid_len; + memcpy(&arg.ssid[i].ssid, req->ssids[i].ssid, + req->ssids[i].ssid_len); + } + } else { + arg.scan_flags |= wmi_scan_flag_passive; + } + + if (req->n_channels) { + arg.num_chan = req->n_channels; + for (i = 0; i < arg.num_chan; i++) + arg.chan_list[i] = req->channels[i]->center_freq; + } + + ret = ath11k_start_scan(ar, &arg); + if (ret) { + ath11k_warn(ar->ab, "failed to start hw scan: %d ", ret); + spin_lock_bh(&ar->data_lock); + ar->scan.state = ath11k_scan_idle; + spin_unlock_bh(&ar->data_lock); + } + + /* add a 200ms margin to account for event/command processing */ + ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, + msecs_to_jiffies(arg.max_scan_time + + ath11k_mac_scan_timeout_msecs)); + +exit: + if (req->ie_len) + kfree(arg.extraie.ptr); + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath11k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + ath11k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); +} + +static int ath11k_install_key(struct ath11k_vif *arvif, + struct ieee80211_key_conf *key, + enum set_key_cmd cmd, + const u8 *macaddr, u32 flags) +{ + int ret; + struct ath11k *ar = arvif->ar; + struct wmi_vdev_install_key_arg arg = { + .vdev_id = arvif->vdev_id, + .key_idx = key->keyidx, + .key_len = key->keylen, + .key_data = key->key, + .key_flags = flags, + .macaddr = macaddr, + }; + + lockdep_assert_held(&arvif->ar->conf_mutex); + + reinit_completion(&ar->install_key_done); + + if (cmd == disable_key) { + /* todo: check if fw expects value other than none for del */ + /* arg.key_cipher = wmi_cipher_none; */ + arg.key_len = 0; + arg.key_data = null; + goto install; + } + + switch (key->cipher) { + case wlan_cipher_suite_ccmp: + arg.key_cipher = wmi_cipher_aes_ccm; + /* todo: re-check if flag is valid */ + key->flags |= ieee80211_key_flag_generate_iv_mgmt; + break; + case wlan_cipher_suite_tkip: + arg.key_cipher = wmi_cipher_tkip; + arg.key_txmic_len = 8; + arg.key_rxmic_len = 8; + break; + case wlan_cipher_suite_ccmp_256: + arg.key_cipher = wmi_cipher_aes_ccm; + break; + case wlan_cipher_suite_gcmp: + case wlan_cipher_suite_gcmp_256: + arg.key_cipher = wmi_cipher_aes_gcm; + break; + default: + ath11k_warn(ar->ab, "cipher %d is not supported ", key->cipher); + return -eopnotsupp; + } + +install: + ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg); + if (ret) + return ret; + + if (!wait_for_completion_timeout(&ar->install_key_done, 1 * hz)) + return -etimedout; + + return ar->install_key_status ? -einval : 0; +} + +static int ath11k_clear_peer_keys(struct ath11k_vif *arvif, + const u8 *addr) +{ + struct ath11k *ar = arvif->ar; + struct ath11k_base *ab = ar->ab; + struct ath11k_peer *peer; + int first_errno = 0; + int ret; + int i; + u32 flags = 0; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find(ab, arvif->vdev_id, addr); + spin_unlock_bh(&ab->base_lock); + + if (!peer) + return -enoent; + + for (i = 0; i < array_size(peer->keys); i++) { + if (!peer->keys[i]) + continue; + + /* key flags are not required to delete the key */ + ret = ath11k_install_key(arvif, peer->keys[i], + disable_key, addr, flags); + if (ret < 0 && first_errno == 0) + first_errno = ret; + + if (ret < 0) + ath11k_warn(ab, "failed to remove peer key %d: %d ", + i, ret); + + spin_lock_bh(&ab->base_lock); + peer->keys[i] = null; + spin_unlock_bh(&ab->base_lock); + } + + return first_errno; +} + +static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ath11k_peer *peer; + const u8 *peer_addr; + int ret = 0; + u32 flags = 0; + + /* bip needs to be done in software */ + if (key->cipher == wlan_cipher_suite_aes_cmac || + key->cipher == wlan_cipher_suite_bip_gmac_128 || + key->cipher == wlan_cipher_suite_bip_gmac_256 || + key->cipher == wlan_cipher_suite_bip_cmac_256) + return 1; + + if (key->keyidx > wmi_max_key_index) + return -enospc; + + mutex_lock(&ar->conf_mutex); + + if (sta) + peer_addr = sta->addr; + else if (arvif->vdev_type == wmi_vdev_type_sta) + peer_addr = vif->bss_conf.bssid; + else + peer_addr = vif->addr; + + key->hw_key_idx = key->keyidx; + + /* the peer should not disappear in mid-way (unless fw goes awry) since + * we already hold conf_mutex. we just make sure its there now. + */ + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); + spin_unlock_bh(&ab->base_lock); + + if (!peer) { + if (cmd == set_key) { + ath11k_warn(ab, "cannot install key for non-existent peer %pm ", + peer_addr); + ret = -eopnotsupp; + goto exit; + } else { + /* if the peer doesn't exist there is no key to disable + * anymore + */ + goto exit; + } + } + + if (key->flags & ieee80211_key_flag_pairwise) + flags |= wmi_key_pairwise; + else + flags |= wmi_key_group; + + ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags); + if (ret) { + ath11k_warn(ab, "ath11k_install_key failed (%d) ", ret); + goto exit; + } + + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr); + if (peer && cmd == set_key) + peer->keys[key->keyidx] = key; + else if (peer && cmd == disable_key) + peer->keys[key->keyidx] = null; + else if (!peer) + /* impossible unless fw goes crazy */ + ath11k_warn(ab, "peer %pm disappeared! ", peer_addr); + spin_unlock_bh(&ab->base_lock); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static int +ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int num_rates = 0; + int i; + + for (i = 0; i < array_size(mask->control[band].vht_mcs); i++) + num_rates += hweight16(mask->control[band].vht_mcs[i]); + + return num_rates; +} + +static int +ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif, + struct ieee80211_sta *sta, + const struct cfg80211_bitrate_mask *mask, + enum nl80211_band band) +{ + struct ath11k *ar = arvif->ar; + u8 vht_rate, nss; + u32 rate_code; + int ret, i; + + lockdep_assert_held(&ar->conf_mutex); + + nss = 0; + + for (i = 0; i < array_size(mask->control[band].vht_mcs); i++) { + if (hweight16(mask->control[band].vht_mcs[i]) == 1) { + nss = i + 1; + vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1; + } + } + + if (!nss) { + ath11k_warn(ar->ab, "no single vht fixed rate found to set for %pm", + sta->addr); + return -einval; + } + + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "setting fixed vht rate for peer %pm. device will not switch to any other selected rates", + sta->addr); + + rate_code = ath11k_hw_rate_code(vht_rate, nss - 1, + wmi_rate_preamble_vht); + ret = ath11k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + wmi_peer_param_fixed_rate, + rate_code); + if (ret) + ath11k_warn(ar->ab, + "failed to update sta %pm fixed rate %d: %d ", + sta->addr, rate_code, ret); + + return ret; +} + +static int ath11k_station_assoc(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + bool reassoc) +{ + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct peer_assoc_params peer_arg; + int ret = 0; + struct cfg80211_chan_def def; + enum nl80211_band band; + struct cfg80211_bitrate_mask *mask; + u8 num_vht_rates; + + lockdep_assert_held(&ar->conf_mutex); + + if (warn_on(ath11k_mac_vif_chan(vif, &def))) + return -eperm; + + band = def.chan->band; + mask = &arvif->bitrate_mask; + + ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + + ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (ret) { + ath11k_warn(ar->ab, "failed to run peer assoc for sta %pm vdev %i: %d ", + sta->addr, arvif->vdev_id, ret); + return ret; + } + + if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * hz)) { + ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pm vdev %i ", + sta->addr, arvif->vdev_id); + return -etimedout; + } + + num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask); + + /* if single vht rate is configured (by set_bitrate_mask()), + * peer_assoc will disable vht. this is now enabled by a peer specific + * fixed param. + * note that all other rates and nss will be disabled for this peer. + */ + if (sta->vht_cap.vht_supported && num_vht_rates == 1) { + ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, + band); + if (ret) + return ret; + } + + /* re-assoc is run only to update supported rates for given station. it + * doesn't make much sense to reconfigure the peer completely. + */ + if (reassoc) + return 0; + + ret = ath11k_setup_peer_smps(ar, arvif, sta->addr, + &sta->ht_cap); + if (ret) { + ath11k_warn(ar->ab, "failed to setup peer smps for vdev %d: %d ", + arvif->vdev_id, ret); + return ret; + } + + if (!sta->wme) { + arvif->num_legacy_stations++; + ret = ath11k_recalc_rtscts_prot(arvif); + if (ret) + return ret; + } + + if (sta->wme && sta->uapsd_queues) { + ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta); + if (ret) { + ath11k_warn(ar->ab, "failed to set qos params for sta %pm for vdev %i: %d ", + sta->addr, arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath11k_station_disassoc(struct ath11k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath11k_vif *arvif = (void *)vif->drv_priv; + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + if (!sta->wme) { + arvif->num_legacy_stations--; + ret = ath11k_recalc_rtscts_prot(arvif); + if (ret) + return ret; + } + + ret = ath11k_clear_peer_keys(arvif, sta->addr); + if (ret) { + ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d ", + arvif->vdev_id, ret); + return ret; + } + return 0; +} + +static void ath11k_sta_rc_update_wk(struct work_struct *wk) +{ + struct ath11k *ar; + struct ath11k_vif *arvif; + struct ath11k_sta *arsta; + struct ieee80211_sta *sta; + struct cfg80211_chan_def def; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; + u32 changed, bw, nss, smps; + int err, num_vht_rates; + const struct cfg80211_bitrate_mask *mask; + struct peer_assoc_params peer_arg; + + arsta = container_of(wk, struct ath11k_sta, update_wk); + sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); + arvif = arsta->arvif; + ar = arvif->ar; + + if (warn_on(ath11k_mac_vif_chan(arvif->vif, &def))) + return; + + band = def.chan->band; + ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; + vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; + + spin_lock_bh(&ar->data_lock); + + changed = arsta->changed; + arsta->changed = 0; + + bw = arsta->bw; + nss = arsta->nss; + smps = arsta->smps; + + spin_unlock_bh(&ar->data_lock); + + mutex_lock(&ar->conf_mutex); + + nss = max_t(u32, 1, nss); + nss = min(nss, max(ath11k_mac_max_ht_nss(ht_mcs_mask), + ath11k_mac_max_vht_nss(vht_mcs_mask))); + + if (changed & ieee80211_rc_bw_changed) { + err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + wmi_peer_chwidth, bw); + if (err) + ath11k_warn(ar->ab, "failed to update sta %pm peer bw %d: %d ", + sta->addr, bw, err); + } + + if (changed & ieee80211_rc_nss_changed) { + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac update sta %pm nss %d ", + sta->addr, nss); + + err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + wmi_peer_nss, nss); + if (err) + ath11k_warn(ar->ab, "failed to update sta %pm nss %d: %d ", + sta->addr, nss, err); + } + + if (changed & ieee80211_rc_smps_changed) { + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac update sta %pm smps %d ", + sta->addr, smps); + + err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + wmi_peer_mimo_ps_state, smps); + if (err) + ath11k_warn(ar->ab, "failed to update sta %pm smps %d: %d ", + sta->addr, smps, err); + } + + if (changed & ieee80211_rc_supp_rates_changed) { + mask = &arvif->bitrate_mask; + num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, + mask); + + /* peer_assoc_prepare will reject vht rates in + * bitrate_mask if its not available in range format and + * sets vht tx_rateset as unsupported. so multiple vht mcs + * setting(eg. mcs 4,5,6) per peer is not supported here. + * but, single rate in vht mask can be set as per-peer + * fixed rate. but even if any ht rates are configured in + * the bitrate mask, device will not switch to those rates + * when per-peer fixed rate is set. + * todo: check ratemask_cmdid to support auto rates selection + * across ht/vht and for multiple vht mcs support. + */ + if (sta->vht_cap.vht_supported && num_vht_rates == 1) { + ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, + band); + } else { + /* if the peer is non-vht or no fixed vht rate + * is provided in the new bitrate mask we set the + * other rates using peer_assoc command. + */ + ath11k_peer_assoc_prepare(ar, arvif->vif, sta, + &peer_arg, true); + + err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg); + if (err) + ath11k_warn(ar->ab, "failed to run peer assoc for sta %pm vdev %i: %d ", + sta->addr, arvif->vdev_id, err); + + if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * hz)) + ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pm vdev %i ", + sta->addr, arvif->vdev_id); + } + } + + mutex_unlock(&ar->conf_mutex); +} + +static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif, + struct ieee80211_sta *sta) +{ + struct ath11k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type == wmi_vdev_type_sta && !sta->tdls) + return 0; + + if (ar->num_stations >= ar->max_num_stations) + return -enobufs; + + ar->num_stations++; + + return 0; +} + +static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif, + struct ieee80211_sta *sta) +{ + struct ath11k *ar = arvif->ar; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type == wmi_vdev_type_sta && !sta->tdls) + return; + + ar->num_stations--; +} + +static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct peer_create_params peer_param; + int ret = 0; + + /* cancel must be done outside the mutex to avoid deadlock */ + if ((old_state == ieee80211_sta_none && + new_state == ieee80211_sta_notexist)) + cancel_work_sync(&arsta->update_wk); + + mutex_lock(&ar->conf_mutex); + + if (old_state == ieee80211_sta_notexist && + new_state == ieee80211_sta_none) { + memset(arsta, 0, sizeof(*arsta)); + arsta->arvif = arvif; + init_work(&arsta->update_wk, ath11k_sta_rc_update_wk); + + ret = ath11k_mac_inc_num_stations(arvif, sta); + if (ret) { + ath11k_warn(ar->ab, "refusing to associate station: too many connected already (%d) ", + ar->max_num_stations); + goto exit; + } + + arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), gfp_kernel); + if (!arsta->rx_stats) { + ret = -enomem; + goto exit; + } + + peer_param.vdev_id = arvif->vdev_id; + peer_param.peer_addr = sta->addr; + peer_param.peer_type = wmi_peer_type_default; + ret = ath11k_peer_create(ar, arvif, sta, &peer_param); + if (ret) { + ath11k_warn(ar->ab, "failed to add peer: %pm for vdev: %d ", + sta->addr, arvif->vdev_id); + ath11k_mac_dec_num_stations(arvif, sta); + goto exit; + } + + ath11k_info(ar->ab, "added peer: %pm for vdev: %d ", + sta->addr, arvif->vdev_id); + + if (ath11k_debug_is_extd_tx_stats_enabled(ar)) { + arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), + gfp_kernel); + if (!arsta->tx_stats) { + ret = -enomem; + goto exit; + } + } + + if (ieee80211_vif_is_mesh(vif)) { + ret = ath11k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + wmi_peer_use_4addr, 1); + if (ret) { + ath11k_warn(ar->ab, "failed to sta %pm 4addr capability: %d ", + sta->addr, ret); + goto exit; + } + } + + ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); + if (ret) { + ath11k_warn(ar->ab, "failed to setup dp for peer %pm on vdev %i (%d) ", + sta->addr, arvif->vdev_id, ret); + ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); + ath11k_mac_dec_num_stations(arvif, sta); + } + } else if ((old_state == ieee80211_sta_none && + new_state == ieee80211_sta_notexist)) { + ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); + + ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr); + if (ret) + ath11k_warn(ar->ab, "failed to delete peer: %pm for vdev: %d ", + sta->addr, arvif->vdev_id); + else + ath11k_info(ar->ab, + "removed peer: %pm for vdev: %d ", + sta->addr, arvif->vdev_id); + + ath11k_mac_dec_num_stations(arvif, sta); + + kfree(arsta->tx_stats); + arsta->tx_stats = null; + + kfree(arsta->rx_stats); + arsta->rx_stats = null; + } else if (old_state == ieee80211_sta_auth && + new_state == ieee80211_sta_assoc && + (vif->type == nl80211_iftype_ap || + vif->type == nl80211_iftype_mesh_point || + vif->type == nl80211_iftype_adhoc)) { + ret = ath11k_station_assoc(ar, vif, sta, false); + if (ret) + ath11k_warn(ar->ab, "failed to associate station: %pm ", + sta->addr); + else + ath11k_info(ar->ab, + "station %pm moved to assoc state ", + sta->addr); + } else if (old_state == ieee80211_sta_assoc && + new_state == ieee80211_sta_auth && + (vif->type == nl80211_iftype_ap || + vif->type == nl80211_iftype_mesh_point || + vif->type == nl80211_iftype_adhoc)) { + ret = ath11k_station_disassoc(ar, vif, sta); + if (ret) + ath11k_warn(ar->ab, "failed to disassociate station: %pm ", + sta->addr); + else + ath11k_info(ar->ab, + "station %pm moved to disassociated state ", + sta->addr); + } + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed) +{ + struct ath11k *ar = hw->priv; + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct ath11k_peer *peer; + u32 bw, smps; + + spin_lock_bh(&ar->ab->base_lock); + + peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); + if (!peer) { + spin_unlock_bh(&ar->ab->base_lock); + ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pm on vdev %i ", + sta->addr, arvif->vdev_id); + return; + } + + spin_unlock_bh(&ar->ab->base_lock); + + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "mac sta rc update for %pm changed %08x bw %d nss %d smps %d ", + sta->addr, changed, sta->bandwidth, sta->rx_nss, + sta->smps_mode); + + spin_lock_bh(&ar->data_lock); + + if (changed & ieee80211_rc_bw_changed) { + bw = wmi_peer_chwidth_20mhz; + + switch (sta->bandwidth) { + case ieee80211_sta_rx_bw_20: + bw = wmi_peer_chwidth_20mhz; + break; + case ieee80211_sta_rx_bw_40: + bw = wmi_peer_chwidth_40mhz; + break; + case ieee80211_sta_rx_bw_80: + bw = wmi_peer_chwidth_80mhz; + break; + case ieee80211_sta_rx_bw_160: + bw = wmi_peer_chwidth_160mhz; + break; + default: + ath11k_warn(ar->ab, "invalid bandwidth %d in rc update for %pm ", + sta->bandwidth, sta->addr); + bw = wmi_peer_chwidth_20mhz; + break; + } + + arsta->bw = bw; + } + + if (changed & ieee80211_rc_nss_changed) + arsta->nss = sta->rx_nss; + + if (changed & ieee80211_rc_smps_changed) { + smps = wmi_peer_smps_ps_none; + + switch (sta->smps_mode) { + case ieee80211_smps_automatic: + case ieee80211_smps_off: + smps = wmi_peer_smps_ps_none; + break; + case ieee80211_smps_static: + smps = wmi_peer_smps_static; + break; + case ieee80211_smps_dynamic: + smps = wmi_peer_smps_dynamic; + break; + default: + ath11k_warn(ar->ab, "invalid smps %d in sta rc update for %pm ", + sta->smps_mode, sta->addr); + smps = wmi_peer_smps_ps_none; + break; + } + + arsta->smps = smps; + } + + arsta->changed |= changed; + + spin_unlock_bh(&ar->data_lock); + + ieee80211_queue_work(hw, &arsta->update_wk); +} + +static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif, + u16 ac, bool enable) +{ + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + u32 value = 0; + int ret = 0; + + if (arvif->vdev_type != wmi_vdev_type_sta) + return 0; + + switch (ac) { + case ieee80211_ac_vo: + value = wmi_sta_ps_uapsd_ac3_delivery_en | + wmi_sta_ps_uapsd_ac3_trigger_en; + break; + case ieee80211_ac_vi: + value = wmi_sta_ps_uapsd_ac2_delivery_en | + wmi_sta_ps_uapsd_ac2_trigger_en; + break; + case ieee80211_ac_be: + value = wmi_sta_ps_uapsd_ac1_delivery_en | + wmi_sta_ps_uapsd_ac1_trigger_en; + break; + case ieee80211_ac_bk: + value = wmi_sta_ps_uapsd_ac0_delivery_en | + wmi_sta_ps_uapsd_ac0_trigger_en; + break; + } + + if (enable) + arvif->u.sta.uapsd |= value; + else + arvif->u.sta.uapsd &= ~value; + + ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + wmi_sta_ps_param_uapsd, + arvif->u.sta.uapsd); + if (ret) { + ath11k_warn(ar->ab, "could not set uapsd params %d ", ret); + goto exit; + } + + if (arvif->u.sta.uapsd) + value = wmi_sta_ps_rx_wake_policy_poll_uapsd; + else + value = wmi_sta_ps_rx_wake_policy_wake; + + ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + wmi_sta_ps_param_rx_wake_policy, + value); + if (ret) + ath11k_warn(ar->ab, "could not set rx wake param %d ", ret); + +exit: + return ret; +} + +static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct wmi_wmm_params_arg *p = null; + int ret; + + mutex_lock(&ar->conf_mutex); + + switch (ac) { + case ieee80211_ac_vo: + p = &arvif->wmm_params.ac_vo; + break; + case ieee80211_ac_vi: + p = &arvif->wmm_params.ac_vi; + break; + case ieee80211_ac_be: + p = &arvif->wmm_params.ac_be; + break; + case ieee80211_ac_bk: + p = &arvif->wmm_params.ac_bk; + break; + } + + if (warn_on(!p)) { + ret = -einval; + goto exit; + } + + p->cwmin = params->cw_min; + p->cwmax = params->cw_max; + p->aifs = params->aifs; + + /* the channel time duration programmed in the hw is in absolute + * microseconds, while mac80211 gives the txop in units of + * 32 microseconds. + */ + p->txop = params->txop * 32; + + ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, + &arvif->wmm_params); + if (ret) { + ath11k_warn(ar->ab, "failed to set wmm params: %d ", ret); + goto exit; + } + + ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd); + + if (ret) + ath11k_warn(ar->ab, "failed to set sta uapsd: %d ", ret); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static struct ieee80211_sta_ht_cap +ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask) +{ + int i; + struct ieee80211_sta_ht_cap ht_cap = {0}; + u32 ar_vht_cap = ar->pdev->cap.vht_cap; + + if (!(ar_ht_cap & wmi_ht_cap_enabled)) + return ht_cap; + + ht_cap.ht_supported = 1; + ht_cap.ampdu_factor = ieee80211_ht_max_ampdu_64k; + ht_cap.ampdu_density = ieee80211_ht_mpdu_density_8; + ht_cap.cap |= ieee80211_ht_cap_sup_width_20_40; + ht_cap.cap |= ieee80211_ht_cap_dssscck40; + ht_cap.cap |= wlan_ht_cap_sm_ps_static << ieee80211_ht_cap_sm_ps_shift; + + if (ar_ht_cap & wmi_ht_cap_ht20_sgi) + ht_cap.cap |= ieee80211_ht_cap_sgi_20; + + if (ar_ht_cap & wmi_ht_cap_ht40_sgi) + ht_cap.cap |= ieee80211_ht_cap_sgi_40; + + if (ar_ht_cap & wmi_ht_cap_dynamic_smps) { + u32 smps; + + smps = wlan_ht_cap_sm_ps_dynamic; + smps <<= ieee80211_ht_cap_sm_ps_shift; + + ht_cap.cap |= smps; + } + + if (ar_ht_cap & wmi_ht_cap_tx_stbc) + ht_cap.cap |= ieee80211_ht_cap_tx_stbc; + + if (ar_ht_cap & wmi_ht_cap_rx_stbc) { + u32 stbc; + + stbc = ar_ht_cap; + stbc &= wmi_ht_cap_rx_stbc; + stbc >>= wmi_ht_cap_rx_stbc_mask_shift; + stbc <<= ieee80211_ht_cap_rx_stbc_shift; + stbc &= ieee80211_ht_cap_rx_stbc; + + ht_cap.cap |= stbc; + } + + if (ar_ht_cap & wmi_ht_cap_rx_ldpc) + ht_cap.cap |= ieee80211_ht_cap_ldpc_coding; + + if (ar_ht_cap & wmi_ht_cap_l_sig_txop_prot) + ht_cap.cap |= ieee80211_ht_cap_lsig_txop_prot; + + if (ar_vht_cap & wmi_vht_cap_max_mpdu_len_mask) + ht_cap.cap |= ieee80211_ht_cap_max_amsdu; + + for (i = 0; i < ar->num_rx_chains; i++) { + if (rate_cap_rx_chainmask & bit(i)) + ht_cap.mcs.rx_mask[i] = 0xff; + } + + ht_cap.mcs.tx_params |= ieee80211_ht_mcs_tx_defined; + + return ht_cap; +} + +static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) +{ + u32 value = 0; + struct ath11k *ar = arvif->ar; + int nsts; + int sound_dim; + u32 vht_cap = ar->pdev->cap.vht_cap; + u32 vdev_param = wmi_vdev_param_txbf; + + if (vht_cap & (ieee80211_vht_cap_su_beamformee_capable)) { + nsts = vht_cap & ieee80211_vht_cap_beamformee_sts_mask; + nsts >>= ieee80211_vht_cap_beamformee_sts_shift; + value |= sm(nsts, wmi_txbf_sts_cap_offset); + } + + if (vht_cap & (ieee80211_vht_cap_su_beamformer_capable)) { + sound_dim = vht_cap & + ieee80211_vht_cap_sounding_dimensions_mask; + sound_dim >>= ieee80211_vht_cap_sounding_dimensions_shift; + if (sound_dim > (ar->num_tx_chains - 1)) + sound_dim = ar->num_tx_chains - 1; + value |= sm(sound_dim, wmi_bf_sound_dim_offset); + } + + if (!value) + return 0; + + if (vht_cap & ieee80211_vht_cap_su_beamformer_capable) { + value |= wmi_vdev_param_txbf_su_tx_bfer; + + if ((vht_cap & ieee80211_vht_cap_mu_beamformer_capable) && + arvif->vdev_type == wmi_vdev_type_ap) + value |= wmi_vdev_param_txbf_mu_tx_bfer; + } + + /* todo: subfee not validated in hk, disable here until validated? */ + + if (vht_cap & ieee80211_vht_cap_su_beamformee_capable) { + value |= wmi_vdev_param_txbf_su_tx_bfee; + + if ((vht_cap & ieee80211_vht_cap_mu_beamformee_capable) && + arvif->vdev_type == wmi_vdev_type_sta) + value |= wmi_vdev_param_txbf_mu_tx_bfee; + } + + return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, value); +} + +static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) +{ + bool subfer, subfee; + int sound_dim = 0; + + subfer = !!(*vht_cap & (ieee80211_vht_cap_su_beamformer_capable)); + subfee = !!(*vht_cap & (ieee80211_vht_cap_su_beamformee_capable)); + + if (ar->num_tx_chains < 2) { + *vht_cap &= ~(ieee80211_vht_cap_su_beamformer_capable); + subfer = false; + } + + /* if su beaformer is not set, then disable mu beamformer capability */ + if (!subfer) + *vht_cap &= ~(ieee80211_vht_cap_mu_beamformer_capable); + + /* if su beaformee is not set, then disable mu beamformee capability */ + if (!subfee) + *vht_cap &= ~(ieee80211_vht_cap_mu_beamformee_capable); + + sound_dim = (*vht_cap & ieee80211_vht_cap_sounding_dimensions_mask); + sound_dim >>= ieee80211_vht_cap_sounding_dimensions_shift; + *vht_cap &= ~ieee80211_vht_cap_sounding_dimensions_mask; + + /* todo: need to check invalid sts and sound_dim values set by fw? */ + + /* enable sounding dimension field only if su bf is enabled */ + if (subfer) { + if (sound_dim > (ar->num_tx_chains - 1)) + sound_dim = ar->num_tx_chains - 1; + + sound_dim <<= ieee80211_vht_cap_sounding_dimensions_shift; + sound_dim &= ieee80211_vht_cap_sounding_dimensions_mask; + *vht_cap |= sound_dim; + } + + /* use the sts advertised by fw unless su beamformee is not supported*/ + if (!subfee) + *vht_cap &= ~(ieee80211_vht_cap_beamformee_sts_mask); +} + +static struct ieee80211_sta_vht_cap +ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask, + u32 rate_cap_rx_chainmask) +{ + struct ieee80211_sta_vht_cap vht_cap = {0}; + u16 txmcs_map, rxmcs_map; + int i; + + vht_cap.vht_supported = 1; + vht_cap.cap = ar->pdev->cap.vht_cap; + + ath11k_set_vht_txbf_cap(ar, &vht_cap.cap); + + /* todo: enable back vht160 mode once association issues are fixed */ + /* disabling vht160 and vht80+80 modes */ + vht_cap.cap &= ~ieee80211_vht_cap_supp_chan_width_mask; + vht_cap.cap &= ~ieee80211_vht_cap_short_gi_160; + + rxmcs_map = 0; + txmcs_map = 0; + for (i = 0; i < 8; i++) { + if (i < ar->num_tx_chains && rate_cap_tx_chainmask & bit(i)) + txmcs_map |= ieee80211_vht_mcs_support_0_9 << (i * 2); + else + txmcs_map |= ieee80211_vht_mcs_not_supported << (i * 2); + + if (i < ar->num_rx_chains && rate_cap_rx_chainmask & bit(i)) + rxmcs_map |= ieee80211_vht_mcs_support_0_9 << (i * 2); + else + rxmcs_map |= ieee80211_vht_mcs_not_supported << (i * 2); + } + + if (rate_cap_tx_chainmask <= 1) + vht_cap.cap &= ~ieee80211_vht_cap_txstbc; + + vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map); + vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map); + + return vht_cap; +} + +static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar, + struct ath11k_pdev_cap *cap, + u32 *ht_cap_info) +{ + struct ieee80211_supported_band *band; + u32 rate_cap_tx_chainmask; + u32 rate_cap_rx_chainmask; + u32 ht_cap; + + rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift; + rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift; + + if (cap->supported_bands & wmi_host_wlan_2g_cap) { + band = &ar->mac.sbands[nl80211_band_2ghz]; + ht_cap = cap->band[nl80211_band_2ghz].ht_cap_info; + if (ht_cap_info) + *ht_cap_info = ht_cap; + band->ht_cap = ath11k_create_ht_cap(ar, ht_cap, + rate_cap_rx_chainmask); + } + + if (cap->supported_bands & wmi_host_wlan_5g_cap) { + band = &ar->mac.sbands[nl80211_band_5ghz]; + ht_cap = cap->band[nl80211_band_5ghz].ht_cap_info; + if (ht_cap_info) + *ht_cap_info = ht_cap; + band->ht_cap = ath11k_create_ht_cap(ar, ht_cap, + rate_cap_rx_chainmask); + band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask, + rate_cap_rx_chainmask); + } +} + +static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant) +{ + /* todo: check the request chainmask against the supported + * chainmask table which is advertised in extented_service_ready event + */ + + return 0; +} + +static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (ath11k_check_chain_mask(ar, tx_ant, true)) + return -einval; + + if (ath11k_check_chain_mask(ar, rx_ant, false)) + return -einval; + + ar->cfg_tx_chainmask = tx_ant; + ar->cfg_rx_chainmask = rx_ant; + + if (ar->state != ath11k_state_on && + ar->state != ath11k_state_restarted) + return 0; + + ret = ath11k_wmi_pdev_set_param(ar, wmi_pdev_param_tx_chain_mask, + tx_ant, ar->pdev->pdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x ", + ret, tx_ant); + return ret; + } + + ret = ath11k_wmi_pdev_set_param(ar, wmi_pdev_param_rx_chain_mask, + rx_ant, ar->pdev->pdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x ", + ret, rx_ant); + return ret; + } + + /* reload ht/vht capability */ + ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, null); + + return 0; +} + +int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +{ + struct ath11k *ar = ctx; + struct ath11k_base *ab = ar->ab; + struct sk_buff *msdu = skb; + struct ieee80211_tx_info *info; + + spin_lock_bh(&ar->txmgmt_idr_lock); + idr_remove(&ar->txmgmt_idr, buf_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + dma_unmap_single(ab->dev, ath11k_skb_cb(msdu)->paddr, msdu->len, + dma_to_device); + + info = ieee80211_skb_cb(msdu); + memset(&info->status, 0, sizeof(info->status)); + + ieee80211_free_txskb(ar->hw, msdu); + + return 0; +} + +static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) +{ + struct ieee80211_vif *vif = ctx; + struct ath11k_skb_cb *skb_cb = ath11k_skb_cb((struct sk_buff *)skb); + struct sk_buff *msdu = skb; + struct ath11k *ar = skb_cb->ar; + struct ath11k_base *ab = ar->ab; + + if (skb_cb->vif == vif) { + spin_lock_bh(&ar->txmgmt_idr_lock); + idr_remove(&ar->txmgmt_idr, buf_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, + dma_to_device); + } + + return 0; +} + +static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, + struct sk_buff *skb) +{ + struct ath11k_base *ab = ar->ab; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + dma_addr_t paddr; + int buf_id; + int ret; + + spin_lock_bh(&ar->txmgmt_idr_lock); + buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, + ath11k_tx_mgmt_num_pending_max, gfp_atomic); + spin_unlock_bh(&ar->txmgmt_idr_lock); + if (buf_id < 0) + return -enospc; + + if ((ieee80211_is_action(hdr->frame_control) || + ieee80211_is_deauth(hdr->frame_control) || + ieee80211_is_disassoc(hdr->frame_control)) && + ieee80211_has_protected(hdr->frame_control)) { + skb_put(skb, ieee80211_ccmp_mic_len); + } + + paddr = dma_map_single(ab->dev, skb->data, skb->len, dma_to_device); + if (dma_mapping_error(ab->dev, paddr)) { + ath11k_warn(ab, "failed to dma map mgmt tx buffer "); + ret = -eio; + goto err_free_idr; + } + + ath11k_skb_cb(skb)->paddr = paddr; + + ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb); + if (ret) { + ath11k_warn(ar->ab, "failed to send mgmt frame: %d ", ret); + goto err_unmap_buf; + } + + return 0; + +err_unmap_buf: + dma_unmap_single(ab->dev, ath11k_skb_cb(skb)->paddr, + skb->len, dma_to_device); +err_free_idr: + spin_lock_bh(&ar->txmgmt_idr_lock); + idr_remove(&ar->txmgmt_idr, buf_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + + return ret; +} + +static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != null) + ieee80211_free_txskb(ar->hw, skb); +} + +static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work) +{ + struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work); + struct ieee80211_tx_info *info; + struct ath11k_vif *arvif; + struct sk_buff *skb; + int ret; + + while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != null) { + info = ieee80211_skb_cb(skb); + arvif = ath11k_vif_to_arvif(info->control.vif); + + ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb); + if (ret) { + ath11k_warn(ar->ab, "failed to transmit management frame %d ", + ret); + ieee80211_free_txskb(ar->hw, skb); + } else { + atomic_inc(&ar->num_pending_mgmt_tx); + } + } +} + +static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb, + bool is_prb_rsp) +{ + struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; + + if (test_bit(ath11k_flag_crash_flush, &ar->ab->dev_flags)) + return -eshutdown; + + /* drop probe response packets when the pending management tx + * count has reached a certain threshold, so as to prioritize + * other mgmt packets like auth and assoc to be sent on time + * for establishing successful connections. + */ + if (is_prb_rsp && + atomic_read(&ar->num_pending_mgmt_tx) > ath11k_prb_rsp_drop_threshold) { + ath11k_warn(ar->ab, + "dropping probe response as pending queue is almost full "); + return -enospc; + } + + if (skb_queue_len(q) == ath11k_tx_mgmt_num_pending_max) { + ath11k_warn(ar->ab, "mgmt tx queue is full "); + return -enospc; + } + + skb_queue_tail(q, skb); + ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); + + return 0; +} + +static void ath11k_mac_op_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ath11k *ar = hw->priv; + struct ieee80211_tx_info *info = ieee80211_skb_cb(skb); + struct ieee80211_vif *vif = info->control.vif; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + bool is_prb_rsp; + int ret; + + if (ieee80211_is_mgmt(hdr->frame_control)) { + is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); + ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp); + if (ret) { + ath11k_warn(ar->ab, "failed to queue management frame %d ", + ret); + ieee80211_free_txskb(ar->hw, skb); + } + return; + } + + ret = ath11k_dp_tx(ar, arvif, skb); + if (ret) { + ath11k_warn(ar->ab, "failed to transmit frame %d ", ret); + ieee80211_free_txskb(ar->hw, skb); + } +} + +void ath11k_mac_drain_tx(struct ath11k *ar) +{ + /* make sure rcu-protected mac80211 tx path itself is drained */ + synchronize_net(); + + cancel_work_sync(&ar->wmi_mgmt_tx_work); + ath11k_mgmt_over_wmi_tx_purge(ar); +} + +static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) +{ + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + u32 ring_id; + + if (enable) + tlv_filter = ath11k_mac_mon_status_filter_default; + + ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id; + + return ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, + hal_rxdma_monitor_status, + dp_rx_buffer_size, &tlv_filter); +} + +static int ath11k_mac_op_start(struct ieee80211_hw *hw) +{ + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + struct ath11k_pdev *pdev = ar->pdev; + int ret; + + ath11k_mac_drain_tx(ar); + mutex_lock(&ar->conf_mutex); + + switch (ar->state) { + case ath11k_state_off: + ar->state = ath11k_state_on; + break; + case ath11k_state_restarting: + ar->state = ath11k_state_restarted; + break; + case ath11k_state_restarted: + case ath11k_state_wedged: + case ath11k_state_on: + warn_on(1); + ret = -einval; + goto err; + } + + ret = ath11k_wmi_pdev_set_param(ar, wmi_pdev_param_pmf_qos, + 1, pdev->pdev_id); + + if (ret) { + ath11k_err(ar->ab, "failed to enable pmf qos: (%d ", ret); + goto err; + } + + ret = ath11k_wmi_pdev_set_param(ar, wmi_pdev_param_dynamic_bw, 1, + pdev->pdev_id); + if (ret) { + ath11k_err(ar->ab, "failed to enable dynamic bw: %d ", ret); + goto err; + } + + ret = ath11k_wmi_pdev_set_param(ar, wmi_pdev_param_arp_ac_override, + 0, pdev->pdev_id); + if (ret) { + ath11k_err(ab, "failed to set ac override for arp: %d ", + ret); + goto err; + } + + ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id); + if (ret) { + ath11k_err(ab, "failed to offload radar detection: %d ", + ret); + goto err; + } + + ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar, + htt_ppdu_stats_tag_default); + if (ret) { + ath11k_err(ab, "failed to req ppdu stats: %d ", ret); + goto err; + } + + ret = ath11k_wmi_pdev_set_param(ar, wmi_pdev_param_mesh_mcast_enable, + 1, pdev->pdev_id); + + if (ret) { + ath11k_err(ar->ab, "failed to enable mesh mcast enable: (%d ", ret); + goto err; + } + + __ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); + + /* todo: do we need to enable ani? */ + + ath11k_reg_update_chan_list(ar); + + ar->num_started_vdevs = 0; + ar->num_created_vdevs = 0; + ar->num_peers = 0; + + /* configure monitor status ring with default rx_filter to get rx status + * such as rssi, rx_duration. + */ + ret = ath11k_mac_config_mon_status_default(ar, true); + if (ret) { + ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d) ", + ret); + goto err; + } + + mutex_unlock(&ar->conf_mutex); + + rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], + &ab->pdevs[ar->pdev_idx]); + + return 0; + +err: + ar->state = ath11k_state_off; + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void ath11k_mac_op_stop(struct ieee80211_hw *hw) +{ + struct ath11k *ar = hw->priv; + struct htt_ppdu_stats_info *ppdu_stats, *tmp; + int ret; + + ath11k_mac_drain_tx(ar); + + mutex_lock(&ar->conf_mutex); + ret = ath11k_mac_config_mon_status_default(ar, false); + if (ret) + ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d) ", + ret); + + clear_bit(ath11k_cac_running, &ar->dev_flags); + ar->state = ath11k_state_off; + mutex_unlock(&ar->conf_mutex); + + cancel_delayed_work_sync(&ar->scan.timeout); + cancel_work_sync(&ar->regd_update_work); + + spin_lock_bh(&ar->data_lock); + list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { + list_del(&ppdu_stats->list); + kfree(ppdu_stats); + } + spin_unlock_bh(&ar->data_lock); + + rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], null); + + synchronize_rcu(); + + atomic_set(&ar->num_pending_mgmt_tx, 0); +} + +static void +ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif, + struct vdev_create_params *params) +{ + struct ath11k *ar = arvif->ar; + struct ath11k_pdev *pdev = ar->pdev; + + params->if_id = arvif->vdev_id; + params->type = arvif->vdev_type; + params->subtype = arvif->vdev_subtype; + params->pdev_id = pdev->pdev_id; + + if (pdev->cap.supported_bands & wmi_host_wlan_2g_cap) { + params->chains[nl80211_band_2ghz].tx = ar->num_tx_chains; + params->chains[nl80211_band_2ghz].rx = ar->num_rx_chains; + } + if (pdev->cap.supported_bands & wmi_host_wlan_5g_cap) { + params->chains[nl80211_band_5ghz].tx = ar->num_tx_chains; + params->chains[nl80211_band_5ghz].rx = ar->num_rx_chains; + } +} + +static u32 +ath11k_mac_prepare_he_mode(struct ath11k_pdev *pdev, u32 viftype) +{ + struct ath11k_pdev_cap *pdev_cap = &pdev->cap; + struct ath11k_band_cap *cap_band = null; + u32 *hecap_phy_ptr = null; + u32 hemode = 0; + + if (pdev->cap.supported_bands & wmi_host_wlan_2g_cap) + cap_band = &pdev_cap->band[nl80211_band_2ghz]; + else + cap_band = &pdev_cap->band[nl80211_band_5ghz]; + + hecap_phy_ptr = &cap_band->he_cap_phy_info[0]; + + hemode = field_prep(he_mode_su_tx_bfee, he_su_bfee_enable) | + field_prep(he_mode_su_tx_bfer, hecap_phy_subfmr_get(hecap_phy_ptr)) | + field_prep(he_mode_ul_mumimo, hecap_phy_ulmumimo_get(hecap_phy_ptr)); + + /* todo wds and other modes */ + if (viftype == nl80211_iftype_ap) { + hemode |= field_prep(he_mode_mu_tx_bfer, + hecap_phy_mubfmr_get(hecap_phy_ptr)) | + field_prep(he_mode_dl_ofdma, he_dl_muofdma_enable) | + field_prep(he_mode_ul_ofdma, he_ul_muofdma_enable); + } else { + hemode |= field_prep(he_mode_mu_tx_bfee, he_mu_bfee_enable); + } + + return hemode; +} + +static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar, + struct ath11k_vif *arvif) +{ + u32 param_id, param_value; + struct ath11k_base *ab = ar->ab; + int ret = 0; + + param_id = wmi_vdev_param_set_hemu_mode; + param_value = ath11k_mac_prepare_he_mode(ar->pdev, arvif->vif->type); + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath11k_warn(ab, "failed to set vdev %d he mu mode: %d param_value %x ", + arvif->vdev_id, ret, param_value); + return ret; + } + param_id = wmi_vdev_param_set_he_sounding_mode; + param_value = + field_prep(he_vht_sounding_mode, he_vht_sounding_mode_enable) | + field_prep(he_trig_nontrig_sounding_mode, + he_trig_nontrig_sounding_mode_enable); + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath11k_warn(ab, "failed to set vdev %d he mu mode: %d ", + arvif->vdev_id, ret); + return ret; + } + return ret; +} + +static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct vdev_create_params vdev_param = {0}; + struct peer_create_params peer_param; + u32 param_id, param_value; + u16 nss; + int i; + int ret; + int bit; + + vif->driver_flags |= ieee80211_vif_supports_uapsd; + + mutex_lock(&ar->conf_mutex); + + if (vif->type == nl80211_iftype_ap && + ar->num_peers > (ar->max_num_peers - 1)) { + ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware "); + ret = -enobufs; + goto err; + } + + if (ar->num_created_vdevs > (target_num_vdevs - 1)) { + ath11k_warn(ab, "failed to create vdev, reached max vdev limit %d ", + target_num_vdevs); + ret = -ebusy; + goto err; + } + + memset(arvif, 0, sizeof(*arvif)); + + arvif->ar = ar; + arvif->vif = vif; + + init_list_head(&arvif->list); + + /* should we initialize any worker to handle connection loss indication + * from firmware in sta mode? + */ + + for (i = 0; i < array_size(arvif->bitrate_mask.control); i++) { + arvif->bitrate_mask.control[i].legacy = 0xffffffff; + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + } + + bit = __ffs64(ab->free_vdev_map); + + arvif->vdev_id = bit; + arvif->vdev_subtype = wmi_vdev_subtype_none; + + switch (vif->type) { + case nl80211_iftype_unspecified: + case nl80211_iftype_station: + arvif->vdev_type = wmi_vdev_type_sta; + break; + case nl80211_iftype_mesh_point: + arvif->vdev_subtype = wmi_vdev_subtype_mesh_11s; + /* fall through */ + case nl80211_iftype_ap: + arvif->vdev_type = wmi_vdev_type_ap; + break; + case nl80211_iftype_monitor: + arvif->vdev_type = wmi_vdev_type_monitor; + break; + default: + warn_on(1); + break; + } + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac add interface id %d type %d subtype %d map %llx ", + arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, + ab->free_vdev_map); + + vif->cab_queue = arvif->vdev_id % (ath11k_hw_max_queues - 1); + for (i = 0; i < array_size(vif->hw_queue); i++) + vif->hw_queue[i] = i % (ath11k_hw_max_queues - 1); + + ath11k_mac_setup_vdev_create_params(arvif, &vdev_param); + + ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param); + if (ret) { + ath11k_warn(ab, "failed to create wmi vdev %d: %d ", + arvif->vdev_id, ret); + goto err; + } + + ar->num_created_vdevs++; + + ab->free_vdev_map &= ~(1ll << arvif->vdev_id); + spin_lock_bh(&ar->data_lock); + list_add(&arvif->list, &ar->arvifs); + spin_unlock_bh(&ar->data_lock); + + param_id = wmi_vdev_param_tx_encap_type; + param_value = ath11k_hw_txrx_native_wifi; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d ", + arvif->vdev_id, ret); + goto err_vdev_del; + } + + nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + wmi_vdev_param_nss, nss); + if (ret) { + ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d ", + arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret); + goto err_vdev_del; + } + + switch (arvif->vdev_type) { + case wmi_vdev_type_ap: + peer_param.vdev_id = arvif->vdev_id; + peer_param.peer_addr = vif->addr; + peer_param.peer_type = wmi_peer_type_default; + ret = ath11k_peer_create(ar, arvif, null, &peer_param); + if (ret) { + ath11k_warn(ab, "failed to vdev %d create peer for ap: %d ", + arvif->vdev_id, ret); + goto err_vdev_del; + } + + ret = ath11k_mac_set_kickout(arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d ", + arvif->vdev_id, ret); + goto err_peer_del; + } + break; + case wmi_vdev_type_sta: + param_id = wmi_sta_ps_param_rx_wake_policy; + param_value = wmi_sta_ps_rx_wake_policy_wake; + ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath11k_warn(ar->ab, "failed to set vdev %d rx wake policy: %d ", + arvif->vdev_id, ret); + goto err_peer_del; + } + + param_id = wmi_sta_ps_param_tx_wake_threshold; + param_value = wmi_sta_ps_tx_wake_threshold_always; + ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath11k_warn(ar->ab, "failed to set vdev %d tx wake threshold: %d ", + arvif->vdev_id, ret); + goto err_peer_del; + } + + param_id = wmi_sta_ps_param_pspoll_count; + param_value = wmi_sta_ps_pspoll_count_no_max; + ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d ", + arvif->vdev_id, ret); + goto err_peer_del; + } + break; + default: + break; + } + + arvif->txpower = vif->bss_conf.txpower; + ret = ath11k_mac_txpower_recalc(ar); + if (ret) + goto err_peer_del; + + param_id = wmi_vdev_param_rts_threshold; + param_value = ar->hw->wiphy->rts_threshold; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param_id, param_value); + if (ret) { + ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d ", + arvif->vdev_id, ret); + } + + ret = ath11k_mac_set_txbf_conf(arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to set txbf conf for vdev %d: %d ", + arvif->vdev_id, ret); + } + + ath11k_dp_vdev_tx_attach(ar, arvif); + + mutex_unlock(&ar->conf_mutex); + + return 0; + +err_peer_del: + if (arvif->vdev_type == wmi_vdev_type_ap) { + ar->num_peers--; + ath11k_wmi_send_peer_delete_cmd(ar, vif->addr, arvif->vdev_id); + } + +err_vdev_del: + ath11k_wmi_vdev_delete(ar, arvif->vdev_id); + ar->num_created_vdevs--; + ab->free_vdev_map |= 1ll << arvif->vdev_id; + spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); + spin_unlock_bh(&ar->data_lock); + +err: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx) +{ + struct ieee80211_vif *vif = (struct ieee80211_vif *)ctx; + struct ath11k_skb_cb *skb_cb = ath11k_skb_cb((struct sk_buff *)skb); + + if (skb_cb->vif == vif) + skb_cb->vif = null; + + return 0; +} + +static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ath11k_base *ab = ar->ab; + int ret; + int i; + + mutex_lock(&ar->conf_mutex); + + ath11k_dbg(ab, ath11k_dbg_mac, "mac remove interface (vdev %d) ", + arvif->vdev_id); + + ab->free_vdev_map |= 1ll << (arvif->vdev_id); + spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); + spin_unlock_bh(&ar->data_lock); + + if (arvif->vdev_type == wmi_vdev_type_ap) { + ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr); + if (ret) + ath11k_warn(ab, "failed to submit ap self-peer removal on vdev %d: %d ", + arvif->vdev_id, ret); + } + + ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id); + if (ret) + ath11k_warn(ab, "failed to delete wmi vdev %d: %d ", + arvif->vdev_id, ret); + + ar->num_created_vdevs--; + + ath11k_peer_cleanup(ar, arvif->vdev_id); + + idr_for_each(&ar->txmgmt_idr, + ath11k_mac_vif_txmgmt_idr_remove, vif); + + for (i = 0; i < dp_tcl_num_ring_max; i++) { + spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock); + idr_for_each(&ab->dp.tx_ring[i].txbuf_idr, + ath11k_mac_vif_unref, vif); + spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock); + } + + /* recalc txpower for remaining vdev */ + ath11k_mac_txpower_recalc(ar); + clear_bit(ath11k_flag_monitor_enabled, &ar->monitor_flags); + + /* todo: recal traffic pause state based on the available vdevs */ + + mutex_unlock(&ar->conf_mutex); +} + +/* fixme: has to be verified. */ +#define supported_filters \ + (fif_allmulti | \ + fif_control | \ + fif_pspoll | \ + fif_other_bss | \ + fif_bcn_prbresp_promisc | \ + fif_probe_req | \ + fif_fcsfail) + +static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct ath11k *ar = hw->priv; + bool reset_flag = false; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + + changed_flags &= supported_filters; + *total_flags &= supported_filters; + ar->filter_flags = *total_flags; + + /* for monitor mode */ + reset_flag = !(ar->filter_flags & fif_bcn_prbresp_promisc); + + ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); + if (!ret) { + if (!reset_flag) + set_bit(ath11k_flag_monitor_enabled, &ar->monitor_flags); + else + clear_bit(ath11k_flag_monitor_enabled, &ar->monitor_flags); + } else { + ath11k_warn(ar->ab, + "fail to set monitor filter: %d ", ret); + } + mutex_unlock(&ar->conf_mutex); +} + +static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) +{ + struct ath11k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + + *tx_ant = ar->cfg_tx_chainmask; + *rx_ant = ar->cfg_rx_chainmask; + + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) +{ + struct ath11k *ar = hw->priv; + int ret; + + mutex_lock(&ar->conf_mutex); + ret = __ath11k_set_antenna(ar, tx_ant, rx_ant); + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct ath11k *ar = hw->priv; + int ret = -einval; + + mutex_lock(&ar->conf_mutex); + + switch (params->action) { + case ieee80211_ampdu_rx_start: + ret = ath11k_dp_rx_ampdu_start(ar, params); + break; + case ieee80211_ampdu_rx_stop: + ret = ath11k_dp_rx_ampdu_stop(ar, params); + break; + case ieee80211_ampdu_tx_start: + case ieee80211_ampdu_tx_stop_cont: + case ieee80211_ampdu_tx_stop_flush: + case ieee80211_ampdu_tx_stop_flush_cont: + case ieee80211_ampdu_tx_operational: + /* tx a-mpdu aggregation offloaded to hw/fw so deny mac80211 + * tx aggregation requests. + */ + ret = -eopnotsupp; + break; + } + + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + + ath11k_dbg(ab, ath11k_dbg_mac, + "mac chanctx add freq %hu width %d ptr %pk ", + ctx->def.chan->center_freq, ctx->def.width, ctx); + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + /* todo: in case of multiple channel context, populate rx_channel from + * rx ppdu desc information. + */ + ar->rx_channel = ctx->def.chan; + spin_unlock_bh(&ar->data_lock); + + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + + ath11k_dbg(ab, ath11k_dbg_mac, + "mac chanctx remove freq %hu width %d ptr %pk ", + ctx->def.chan->center_freq, ctx->def.width, ctx); + + mutex_lock(&ar->conf_mutex); + + spin_lock_bh(&ar->data_lock); + /* todo: in case of there is one more channel context left, populate + * rx_channel with the channel of that remaining channel context. + */ + ar->rx_channel = null; + spin_unlock_bh(&ar->data_lock); + + mutex_unlock(&ar->conf_mutex); +} + +static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar) +{ + lockdep_assert_held(&ar->conf_mutex); + + if (test_bit(ath11k_flag_crash_flush, &ar->ab->dev_flags)) + return -eshutdown; + + if (!wait_for_completion_timeout(&ar->vdev_setup_done, + ath11k_vdev_setup_timeout_hz)) + return -etimedout; + + return ar->last_wmi_vdev_start_status ? -einval : 0; +} + +static int +ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif, + const struct cfg80211_chan_def *chandef, + bool restart) +{ + struct ath11k *ar = arvif->ar; + struct ath11k_base *ab = ar->ab; + struct wmi_vdev_start_req_arg arg = {}; + int he_support = arvif->vif->bss_conf.he_support; + int ret = 0; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->vdev_setup_done); + + arg.vdev_id = arvif->vdev_id; + arg.dtim_period = arvif->dtim_period; + arg.bcn_intval = arvif->beacon_interval; + + arg.channel.freq = chandef->chan->center_freq; + arg.channel.band_center_freq1 = chandef->center_freq1; + arg.channel.band_center_freq2 = chandef->center_freq2; + arg.channel.mode = + ath11k_phymodes[chandef->chan->band][chandef->width]; + + arg.channel.min_power = 0; + arg.channel.max_power = chandef->chan->max_power * 2; + arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; + arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; + + arg.pref_tx_streams = ar->num_tx_chains; + arg.pref_rx_streams = ar->num_rx_chains; + + if (arvif->vdev_type == wmi_vdev_type_ap) { + arg.ssid = arvif->u.ap.ssid; + arg.ssid_len = arvif->u.ap.ssid_len; + arg.hidden_ssid = arvif->u.ap.hidden_ssid; + + /* for now allow dfs for ap mode */ + arg.channel.chan_radar = + !!(chandef->chan->flags & ieee80211_chan_radar); + + arg.channel.passive = arg.channel.chan_radar; + + spin_lock_bh(&ab->base_lock); + arg.regdomain = ar->ab->dfs_region; + spin_unlock_bh(&ab->base_lock); + + /* todo: notify if secondary 80mhz also needs radar detection */ + if (he_support) { + ret = ath11k_set_he_mu_sounding_mode(ar, arvif); + if (ret) { + ath11k_warn(ar->ab, "failed to set he mode vdev %i ", + arg.vdev_id); + return ret; + } + } + } + + arg.channel.passive |= !!(chandef->chan->flags & ieee80211_chan_no_ir); + + ath11k_dbg(ab, ath11k_dbg_mac, + "mac vdev %d start center_freq %d phymode %s ", + arg.vdev_id, arg.channel.freq, + ath11k_wmi_phymode_str(arg.channel.mode)); + + ret = ath11k_wmi_vdev_start(ar, &arg, restart); + if (ret) { + ath11k_warn(ar->ab, "failed to %s wmi vdev %i ", + restart ? "restart" : "start", arg.vdev_id); + return ret; + } + + ret = ath11k_mac_vdev_setup_sync(ar); + if (ret) { + ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d ", + arg.vdev_id, restart ? "restart" : "start", ret); + return ret; + } + + ar->num_started_vdevs++; + + /* enable cac flag in the driver by checking the channel dfs cac time, + * i.e dfs_cac_ms value which will be valid only for radar channels + * and state as nl80211_dfs_usable which indicates cac needs to be + * done before channel usage. this flags is used to drop rx packets. + * during cac. + */ + /* todo set the flag for other interface types as required */ + if (arvif->vdev_type == wmi_vdev_type_ap && + chandef->chan->dfs_cac_ms && + chandef->chan->dfs_state == nl80211_dfs_usable) { + set_bit(ath11k_cac_running, &ar->dev_flags); + ath11k_dbg(ab, ath11k_dbg_mac, + "cac started in chan_freq %d for vdev %d ", + arg.channel.freq, arg.vdev_id); + } + + return 0; +} + +static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif) +{ + struct ath11k *ar = arvif->ar; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + reinit_completion(&ar->vdev_setup_done); + + spin_lock_bh(&ar->data_lock); + + ar->vdev_stop_status.stop_in_progress = true; + ar->vdev_stop_status.vdev_id = arvif->vdev_id; + + spin_unlock_bh(&ar->data_lock); + + ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id); + if (ret) { + ath11k_warn(ar->ab, "failed to stop wmi vdev %i: %d ", + arvif->vdev_id, ret); + goto err; + } + + ret = ath11k_mac_vdev_setup_sync(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d ", + arvif->vdev_id, ret); + goto err; + } + + warn_on(ar->num_started_vdevs == 0); + + ar->num_started_vdevs--; + + if (test_bit(ath11k_cac_running, &ar->dev_flags)) { + clear_bit(ath11k_cac_running, &ar->dev_flags); + ath11k_dbg(ar->ab, ath11k_dbg_mac, "cac stopped for vdev %d ", + arvif->vdev_id); + } + + return 0; +err: + spin_lock_bh(&ar->data_lock); + ar->vdev_stop_status.stop_in_progress = false; + spin_unlock_bh(&ar->data_lock); + + return ret; +} + +static int ath11k_mac_vdev_start(struct ath11k_vif *arvif, + const struct cfg80211_chan_def *chandef) +{ + return ath11k_mac_vdev_start_restart(arvif, chandef, false); +} + +static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif, + const struct cfg80211_chan_def *chandef) +{ + return ath11k_mac_vdev_start_restart(arvif, chandef, true); +} + +struct ath11k_mac_change_chanctx_arg { + struct ieee80211_chanctx_conf *ctx; + struct ieee80211_vif_chanctx_switch *vifs; + int n_vifs; + int next_vif; +}; + +static void +ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath11k_mac_change_chanctx_arg *arg = data; + + if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) + return; + + arg->n_vifs++; +} + +static void +ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ath11k_mac_change_chanctx_arg *arg = data; + struct ieee80211_chanctx_conf *ctx; + + ctx = rcu_access_pointer(vif->chanctx_conf); + if (ctx != arg->ctx) + return; + + if (warn_on(arg->next_vif == arg->n_vifs)) + return; + + arg->vifs[arg->next_vif].vif = vif; + arg->vifs[arg->next_vif].old_ctx = ctx; + arg->vifs[arg->next_vif].new_ctx = ctx; + arg->next_vif++; +} + +static void +ath11k_mac_update_vif_chan(struct ath11k *ar, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_vif *arvif; + int ret; + int i; + + lockdep_assert_held(&ar->conf_mutex); + + for (i = 0; i < n_vifs; i++) { + arvif = (void *)vifs[i].vif->drv_priv; + + ath11k_dbg(ab, ath11k_dbg_mac, + "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d ", + arvif->vdev_id, + vifs[i].old_ctx->def.chan->center_freq, + vifs[i].new_ctx->def.chan->center_freq, + vifs[i].old_ctx->def.width, + vifs[i].new_ctx->def.width); + + if (warn_on(!arvif->is_started)) + continue; + + if (warn_on(!arvif->is_up)) + continue; + + ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id); + if (ret) { + ath11k_warn(ab, "failed to down vdev %d: %d ", + arvif->vdev_id, ret); + continue; + } + } + + /* all relevant vdevs are downed and associated channel resources + * should be available for the channel switch now. + */ + + /* todo: update ar->rx_channel */ + + for (i = 0; i < n_vifs; i++) { + arvif = (void *)vifs[i].vif->drv_priv; + + if (warn_on(!arvif->is_started)) + continue; + + if (warn_on(!arvif->is_up)) + continue; + + ret = ath11k_mac_setup_bcn_tmpl(arvif); + if (ret) + ath11k_warn(ab, "failed to update bcn tmpl during csa: %d ", + ret); + + ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def); + if (ret) { + ath11k_warn(ab, "failed to restart vdev %d: %d ", + arvif->vdev_id, ret); + continue; + } + + ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, + arvif->bssid); + if (ret) { + ath11k_warn(ab, "failed to bring vdev up %d: %d ", + arvif->vdev_id, ret); + continue; + } + } +} + +static void +ath11k_mac_update_active_vif_chan(struct ath11k *ar, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx }; + + lockdep_assert_held(&ar->conf_mutex); + + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ieee80211_iface_iter_normal, + ath11k_mac_change_chanctx_cnt_iter, + &arg); + if (arg.n_vifs == 0) + return; + + arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), gfp_kernel); + if (!arg.vifs) + return; + + ieee80211_iterate_active_interfaces_atomic(ar->hw, + ieee80211_iface_iter_normal, + ath11k_mac_change_chanctx_fill_iter, + &arg); + + ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); + + kfree(arg.vifs); +} + +static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + u32 changed) +{ + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + + mutex_lock(&ar->conf_mutex); + + ath11k_dbg(ab, ath11k_dbg_mac, + "mac chanctx change freq %hu width %d ptr %pk changed %x ", + ctx->def.chan->center_freq, ctx->def.width, ctx, changed); + + /* this shouldn't really happen because channel switching should use + * switch_vif_chanctx(). + */ + if (warn_on(changed & ieee80211_chanctx_change_channel)) + goto unlock; + + if (changed & ieee80211_chanctx_change_width) + ath11k_mac_update_active_vif_chan(ar, ctx); + + /* todo: recalc radar detection */ + +unlock: + mutex_unlock(&ar->conf_mutex); +} + +static int +ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + ath11k_dbg(ab, ath11k_dbg_mac, + "mac chanctx assign ptr %pk vdev_id %i ", + ctx, arvif->vdev_id); + + if (warn_on(arvif->is_started)) { + mutex_unlock(&ar->conf_mutex); + return -ebusy; + } + + ret = ath11k_mac_vdev_start(arvif, &ctx->def); + if (ret) { + ath11k_warn(ab, "failed to start vdev %i addr %pm on freq %d: %d ", + arvif->vdev_id, vif->addr, + ctx->def.chan->center_freq, ret); + goto err; + } + if (arvif->vdev_type == wmi_vdev_type_monitor) { + ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id); + if (ret) + goto err; + } + + arvif->is_started = true; + + /* todo: setup ps and cts/rts protection */ + + mutex_unlock(&ar->conf_mutex); + + return 0; + +err: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void +ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + struct ath11k *ar = hw->priv; + struct ath11k_base *ab = ar->ab; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + int ret; + + mutex_lock(&ar->conf_mutex); + + ath11k_dbg(ab, ath11k_dbg_mac, + "mac chanctx unassign ptr %pk vdev_id %i ", + ctx, arvif->vdev_id); + + warn_on(!arvif->is_started); + + ret = ath11k_mac_vdev_stop(arvif); + if (ret) + ath11k_warn(ab, "failed to stop vdev %i: %d ", + arvif->vdev_id, ret); + + arvif->is_started = false; + + mutex_unlock(&ar->conf_mutex); +} + +static int +ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + struct ath11k *ar = hw->priv; + + mutex_lock(&ar->conf_mutex); + + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "mac chanctx switch n_vifs %d mode %d ", + n_vifs, mode); + ath11k_mac_update_vif_chan(ar, vifs, n_vifs); + + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static int +ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value) +{ + struct ath11k_vif *arvif; + int ret = 0; + + mutex_lock(&ar->conf_mutex); + list_for_each_entry(arvif, &ar->arvifs, list) { + ath11k_dbg(ar->ab, ath11k_dbg_mac, "setting mac vdev %d param %d value %d ", + param, arvif->vdev_id, value); + + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + param, value); + if (ret) { + ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d ", + param, arvif->vdev_id, ret); + break; + } + } + mutex_unlock(&ar->conf_mutex); + return ret; +} + +/* mac80211 stores device specific rts/fragmentation threshold value, + * this is set interface specific to firmware from ath11k driver + */ +static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + struct ath11k *ar = hw->priv; + int param_id = wmi_vdev_param_rts_threshold; + + return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value); +} + +static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) +{ + /* even though there's a wmi vdev param for fragmentation threshold no + * known firmware actually implements it. moreover it is not possible to + * rely frame fragmentation to mac80211 because firmware clears the + * "more fragments" bit in frame control making it impossible for remote + * devices to reassemble frames. + * + * hence implement a dummy callback just to say fragmentation isn't + * supported. this effectively prevents mac80211 from doing frame + * fragmentation in software. + */ + return -eopnotsupp; +} + +static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct ath11k *ar = hw->priv; + long time_left; + + if (drop) + return; + + time_left = wait_event_timeout(ar->dp.tx_empty_waitq, + (atomic_read(&ar->dp.num_tx_pending) == 0), + ath11k_flush_timeout); + if (time_left == 0) + ath11k_warn(ar->ab, "failed to flush transmit queue %ld ", time_left); +} + +static int +ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int num_rates = 0; + int i; + + for (i = 0; i < array_size(mask->control[band].ht_mcs); i++) + num_rates += hweight16(mask->control[band].ht_mcs[i]); + + return num_rates; +} + +static bool +ath11k_mac_has_single_legacy_rate(struct ath11k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int num_rates = 0; + + num_rates = hweight32(mask->control[band].legacy); + + if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask)) + return false; + + if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask)) + return false; + + return num_rates == 1; +} + +static bool +ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask, + int *nss) +{ + struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; + u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); + u8 ht_nss_mask = 0; + u8 vht_nss_mask = 0; + int i; + + /* no need to consider legacy here. basic rates are always present + * in bitrate mask + */ + + for (i = 0; i < array_size(mask->control[band].ht_mcs); i++) { + if (mask->control[band].ht_mcs[i] == 0) + continue; + else if (mask->control[band].ht_mcs[i] == + sband->ht_cap.mcs.rx_mask[i]) + ht_nss_mask |= bit(i); + else + return false; + } + + for (i = 0; i < array_size(mask->control[band].vht_mcs); i++) { + if (mask->control[band].vht_mcs[i] == 0) + continue; + else if (mask->control[band].vht_mcs[i] == + ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) + vht_nss_mask |= bit(i); + else + return false; + } + + if (ht_nss_mask != vht_nss_mask) + return false; + + if (ht_nss_mask == 0) + return false; + + if (bit(fls(ht_nss_mask)) - 1 != ht_nss_mask) + return false; + + *nss = fls(ht_nss_mask); + + return true; +} + +static int +ath11k_mac_get_single_legacy_rate(struct ath11k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask, + u32 *rate, u8 *nss) +{ + int rate_idx; + u16 bitrate; + u8 preamble; + u8 hw_rate; + + if (hweight32(mask->control[band].legacy) != 1) + return -einval; + + rate_idx = ffs(mask->control[band].legacy) - 1; + + if (band == nl80211_band_5ghz) + rate_idx += ath11k_mac_first_ofdm_rate_idx; + + hw_rate = ath11k_legacy_rates[rate_idx].hw_value; + bitrate = ath11k_legacy_rates[rate_idx].bitrate; + + if (ath11k_mac_bitrate_is_cck(bitrate)) + preamble = wmi_rate_preamble_cck; + else + preamble = wmi_rate_preamble_ofdm; + + *nss = 1; + *rate = ath11k_hw_rate_code(hw_rate, 0, preamble); + + return 0; +} + +static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif, + u32 rate, u8 nss, u8 sgi, u8 ldpc) +{ + struct ath11k *ar = arvif->ar; + u32 vdev_param; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ath11k_dbg(ar->ab, ath11k_dbg_mac, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu ", + arvif->vdev_id, rate, nss, sgi); + + vdev_param = wmi_vdev_param_fixed_rate; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, rate); + if (ret) { + ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d ", + rate, ret); + return ret; + } + + vdev_param = wmi_vdev_param_nss; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, nss); + if (ret) { + ath11k_warn(ar->ab, "failed to set nss param %d: %d ", + nss, ret); + return ret; + } + + vdev_param = wmi_vdev_param_sgi; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, sgi); + if (ret) { + ath11k_warn(ar->ab, "failed to set sgi param %d: %d ", + sgi, ret); + return ret; + } + + vdev_param = wmi_vdev_param_ldpc; + ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, + vdev_param, ldpc); + if (ret) { + ath11k_warn(ar->ab, "failed to set ldpc param %d: %d ", + ldpc, ret); + return ret; + } + + return 0; +} + +static bool +ath11k_mac_vht_mcs_range_present(struct ath11k *ar, + enum nl80211_band band, + const struct cfg80211_bitrate_mask *mask) +{ + int i; + u16 vht_mcs; + + for (i = 0; i < nl80211_vht_nss_max; i++) { + vht_mcs = mask->control[band].vht_mcs[i]; + + switch (vht_mcs) { + case 0: + case bit(8) - 1: + case bit(9) - 1: + case bit(10) - 1: + break; + default: + return false; + } + } + + return true; +} + +static void ath11k_mac_set_bitrate_mask_iter(void *data, + struct ieee80211_sta *sta) +{ + struct ath11k_vif *arvif = data; + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + struct ath11k *ar = arvif->ar; + + spin_lock_bh(&ar->data_lock); + arsta->changed |= ieee80211_rc_supp_rates_changed; + spin_unlock_bh(&ar->data_lock); + + ieee80211_queue_work(ar->hw, &arsta->update_wk); +} + +static void ath11k_mac_disable_peer_fixed_rate(void *data, + struct ieee80211_sta *sta) +{ + struct ath11k_vif *arvif = data; + struct ath11k *ar = arvif->ar; + int ret; + + ret = ath11k_wmi_set_peer_param(ar, sta->addr, + arvif->vdev_id, + wmi_peer_param_fixed_rate, + wmi_fixed_rate_none); + if (ret) + ath11k_warn(ar->ab, + "failed to disable peer fixed rate for sta %pm ret %d ", + sta->addr, ret); +} + +static int +ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + const struct cfg80211_bitrate_mask *mask) +{ + struct ath11k_vif *arvif = (void *)vif->drv_priv; + struct cfg80211_chan_def def; + struct ath11k *ar = arvif->ar; + enum nl80211_band band; + const u8 *ht_mcs_mask; + const u16 *vht_mcs_mask; + u32 rate; + u8 nss; + u8 sgi; + u8 ldpc; + int single_nss; + int ret; + int num_rates; + + if (ath11k_mac_vif_chan(vif, &def)) + return -eperm; + + band = def.chan->band; + ht_mcs_mask = mask->control[band].ht_mcs; + vht_mcs_mask = mask->control[band].vht_mcs; + ldpc = !!(ar->ht_cap_info & wmi_ht_cap_ldpc); + + sgi = mask->control[band].gi; + if (sgi == nl80211_txrate_force_lgi) + return -einval; + + /* mac80211 doesn't support sending a fixed ht/vht mcs alone, rather it + * requires passing atleast one of used basic rates along with them. + * fixed rate setting across different preambles(legacy, ht, vht) is + * not supported by the fw. hence use of fixed_rate vdev param is not + * suitable for setting single ht/vht rates. + * but, there could be a single basic rate passed from userspace which + * can be done through the fixed_rate param. + */ + if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) { + ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate, + &nss); + if (ret) { + ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d ", + arvif->vdev_id, ret); + return ret; + } + ieee80211_iterate_stations_atomic(ar->hw, + ath11k_mac_disable_peer_fixed_rate, + arvif); + } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, band, mask, + &single_nss)) { + rate = wmi_fixed_rate_none; + nss = single_nss; + } else { + rate = wmi_fixed_rate_none; + nss = min_t(u32, ar->num_tx_chains, + max(ath11k_mac_max_ht_nss(ht_mcs_mask), + ath11k_mac_max_vht_nss(vht_mcs_mask))); + + /* if multiple rates across different preambles are given + * we can reconfigure this info with all peers using peer_assoc + * command with the below exception cases. + * - single vht rate : peer_assoc command accommodates only mcs + * range values i.e 0-7, 0-8, 0-9 for vht. though mac80211 + * mandates passing basic rates along with ht/vht rates, fw + * doesn't allow switching from vht to legacy. hence instead of + * setting legacy and vht rates using ratemask_cmd vdev cmd, + * we could set this vht rate as peer fixed rate param, which + * will override fixed rate and fw rate control algorithm. + * if single vht rate is passed along with ht rates, we select + * the vht rate as fixed rate for vht peers. + * - multiple vht rates : when multiple vht rates are given,this + * can be set using ratemask cmd which uses fw rate-ctl alg. + * todo: setting multiple vht mcs and replacing peer_assoc with + * ratemask_cmdid can cover all use cases of setting rates + * across multiple preambles and rates within same type. + * but requires more validation of the command at this point. + */ + + num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, + mask); + + if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) && + num_rates > 1) { + /* todo: handle multiple vht mcs values setting using + * ratemask cmd + */ + ath11k_warn(ar->ab, + "setting more than one mcs value in bitrate mask not supported "); + return -einval; + } + + ieee80211_iterate_stations_atomic(ar->hw, + ath11k_mac_disable_peer_fixed_rate, + arvif); + + mutex_lock(&ar->conf_mutex); + + arvif->bitrate_mask = *mask; + ieee80211_iterate_stations_atomic(ar->hw, + ath11k_mac_set_bitrate_mask_iter, + arvif); + + mutex_unlock(&ar->conf_mutex); + } + + mutex_lock(&ar->conf_mutex); + + ret = ath11k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); + if (ret) { + ath11k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d ", + arvif->vdev_id, ret); + } + + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static void +ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct ath11k *ar = hw->priv; + + if (reconfig_type != ieee80211_reconfig_type_restart) + return; + + mutex_lock(&ar->conf_mutex); + + if (ar->state == ath11k_state_restarted) { + ath11k_warn(ar->ab, "pdev %d successfully recovered ", + ar->pdev->pdev_id); + ar->state = ath11k_state_on; + ieee80211_wake_queues(ar->hw); + } + + mutex_unlock(&ar->conf_mutex); +} + +static void +ath11k_mac_update_bss_chan_survey(struct ath11k *ar, + struct ieee80211_channel *channel) +{ + int ret; + enum wmi_bss_chan_info_req_type type = wmi_bss_survey_req_type_read; + + lockdep_assert_held(&ar->conf_mutex); + + if (!test_bit(wmi_tlv_service_bss_channel_info_64, ar->ab->wmi_sc.svc_map) || + ar->rx_channel != channel) + return; + + if (ar->scan.state != ath11k_scan_idle) { + ath11k_dbg(ar->ab, ath11k_dbg_mac, + "ignoring bss chan info req while scanning.. "); + return; + } + + reinit_completion(&ar->bss_survey_done); + + ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type); + if (ret) { + ath11k_warn(ar->ab, "failed to send pdev bss chan info request "); + return; + } + + ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * hz); + if (ret == 0) + ath11k_warn(ar->ab, "bss channel survey timed out "); +} + +static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, + struct survey_info *survey) +{ + struct ath11k *ar = hw->priv; + struct ieee80211_supported_band *sband; + struct survey_info *ar_survey; + int ret = 0; + + if (idx >= ath11k_num_chans) + return -enoent; + + ar_survey = &ar->survey[idx]; + + mutex_lock(&ar->conf_mutex); + + sband = hw->wiphy->bands[nl80211_band_2ghz]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = null; + } + + if (!sband) + sband = hw->wiphy->bands[nl80211_band_5ghz]; + + if (!sband || idx >= sband->n_channels) { + ret = -enoent; + goto exit; + } + + ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); + + spin_lock_bh(&ar->data_lock); + memcpy(survey, ar_survey, sizeof(*survey)); + spin_unlock_bh(&ar->data_lock); + + survey->channel = &sband->channels[idx]; + + if (ar->rx_channel == survey->channel) + survey->filled |= survey_info_in_use; + +exit: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv; + + sinfo->rx_duration = arsta->rx_duration; + sinfo->filled |= bit_ull(nl80211_sta_info_rx_duration); + + if (!arsta->txrate.legacy && !arsta->txrate.nss) + return; + + if (arsta->txrate.legacy) { + sinfo->txrate.legacy = arsta->txrate.legacy; + } else { + sinfo->txrate.mcs = arsta->txrate.mcs; + sinfo->txrate.nss = arsta->txrate.nss; + sinfo->txrate.bw = arsta->txrate.bw; + sinfo->txrate.he_gi = arsta->txrate.he_gi; + sinfo->txrate.he_dcm = arsta->txrate.he_dcm; + sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; + } + sinfo->txrate.flags = arsta->txrate.flags; + sinfo->filled |= bit_ull(nl80211_sta_info_tx_bitrate); + + /* todo: use real nf instead of default one. */ + sinfo->signal = arsta->rssi_comb + ath11k_default_noise_floor; +} + +static const struct ieee80211_ops ath11k_ops = { + .tx = ath11k_mac_op_tx, + .start = ath11k_mac_op_start, + .stop = ath11k_mac_op_stop, + .reconfig_complete = ath11k_mac_op_reconfig_complete, + .add_interface = ath11k_mac_op_add_interface, + .remove_interface = ath11k_mac_op_remove_interface, + .config = ath11k_mac_op_config, + .bss_info_changed = ath11k_mac_op_bss_info_changed, + .configure_filter = ath11k_mac_op_configure_filter, + .hw_scan = ath11k_mac_op_hw_scan, + .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan, + .set_key = ath11k_mac_op_set_key, + .sta_state = ath11k_mac_op_sta_state, + .sta_rc_update = ath11k_mac_op_sta_rc_update, + .conf_tx = ath11k_mac_op_conf_tx, + .set_antenna = ath11k_mac_op_set_antenna, + .get_antenna = ath11k_mac_op_get_antenna, + .ampdu_action = ath11k_mac_op_ampdu_action, + .add_chanctx = ath11k_mac_op_add_chanctx, + .remove_chanctx = ath11k_mac_op_remove_chanctx, + .change_chanctx = ath11k_mac_op_change_chanctx, + .assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx, + .unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx, + .switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx, + .set_rts_threshold = ath11k_mac_op_set_rts_threshold, + .set_frag_threshold = ath11k_mac_op_set_frag_threshold, + .set_bitrate_mask = ath11k_mac_op_set_bitrate_mask, + .get_survey = ath11k_mac_op_get_survey, + .flush = ath11k_mac_op_flush, + .sta_statistics = ath11k_mac_op_sta_statistics, + cfg80211_testmode_cmd(ath11k_tm_cmd) +#ifdef config_mac80211_debugfs + .sta_add_debugfs = ath11k_sta_add_debugfs, +#endif +}; + +static const struct ieee80211_iface_limit ath11k_if_limits[] = { + { + .max = 1, + .types = bit(nl80211_iftype_station), + }, + { + .max = 16, + .types = bit(nl80211_iftype_ap) +#ifdef config_mac80211_mesh + | bit(nl80211_iftype_mesh_point) +#endif + }, +}; + +static const struct ieee80211_iface_combination ath11k_if_comb[] = { + { + .limits = ath11k_if_limits, + .n_limits = array_size(ath11k_if_limits), + .max_interfaces = 16, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .beacon_int_min_gcd = 100, + .radar_detect_widths = bit(nl80211_chan_width_20_noht) | + bit(nl80211_chan_width_20) | + bit(nl80211_chan_width_40) | + bit(nl80211_chan_width_80), + }, +}; + +static void ath11k_mac_update_ch_list(struct ath11k *ar, + struct ieee80211_supported_band *band, + u32 freq_low, u32 freq_high) +{ + int i; + + if (!(freq_low && freq_high)) + return; + + for (i = 0; i < band->n_channels; i++) { + if (band->channels[i].center_freq < freq_low || + band->channels[i].center_freq > freq_high) + band->channels[i].flags |= ieee80211_chan_disabled; + } +} + +static int ath11k_mac_setup_channels_rates(struct ath11k *ar, + u32 supported_bands) +{ + struct ieee80211_supported_band *band; + struct ath11k_hal_reg_capabilities_ext *reg_cap; + void *channels; + + build_bug_on((array_size(ath11k_2ghz_channels) + + array_size(ath11k_5ghz_channels)) != + ath11k_num_chans); + + reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx]; + + if (supported_bands & wmi_host_wlan_2g_cap) { + channels = kmemdup(ath11k_2ghz_channels, + sizeof(ath11k_2ghz_channels), + gfp_kernel); + if (!channels) + return -enomem; + + band = &ar->mac.sbands[nl80211_band_2ghz]; + band->n_channels = array_size(ath11k_2ghz_channels); + band->channels = channels; + band->n_bitrates = ath11k_g_rates_size; + band->bitrates = ath11k_g_rates; + ar->hw->wiphy->bands[nl80211_band_2ghz] = band; + ath11k_mac_update_ch_list(ar, band, + reg_cap->low_2ghz_chan, + reg_cap->high_2ghz_chan); + } + + if (supported_bands & wmi_host_wlan_5g_cap) { + channels = kmemdup(ath11k_5ghz_channels, + sizeof(ath11k_5ghz_channels), + gfp_kernel); + if (!channels) { + kfree(ar->mac.sbands[nl80211_band_2ghz].channels); + return -enomem; + } + + band = &ar->mac.sbands[nl80211_band_5ghz]; + band->n_channels = array_size(ath11k_5ghz_channels); + band->channels = channels; + band->n_bitrates = ath11k_a_rates_size; + band->bitrates = ath11k_a_rates; + ar->hw->wiphy->bands[nl80211_band_5ghz] = band; + ath11k_mac_update_ch_list(ar, band, + reg_cap->low_5ghz_chan, + reg_cap->high_5ghz_chan); + } + + return 0; +} + +static const u8 ath11k_if_types_ext_capa[] = { + [0] = wlan_ext_capa1_ext_channel_switching, + [7] = wlan_ext_capa8_opmode_notif, +}; + +static const u8 ath11k_if_types_ext_capa_sta[] = { + [0] = wlan_ext_capa1_ext_channel_switching, + [7] = wlan_ext_capa8_opmode_notif, + [9] = wlan_ext_capa10_twt_requester_support, +}; + +static const u8 ath11k_if_types_ext_capa_ap[] = { + [0] = wlan_ext_capa1_ext_channel_switching, + [7] = wlan_ext_capa8_opmode_notif, + [9] = wlan_ext_capa10_twt_responder_support, +}; + +static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = { + { + .extended_capabilities = ath11k_if_types_ext_capa, + .extended_capabilities_mask = ath11k_if_types_ext_capa, + .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa), + }, { + .iftype = nl80211_iftype_station, + .extended_capabilities = ath11k_if_types_ext_capa_sta, + .extended_capabilities_mask = ath11k_if_types_ext_capa_sta, + .extended_capabilities_len = + sizeof(ath11k_if_types_ext_capa_sta), + }, { + .iftype = nl80211_iftype_ap, + .extended_capabilities = ath11k_if_types_ext_capa_ap, + .extended_capabilities_mask = ath11k_if_types_ext_capa_ap, + .extended_capabilities_len = + sizeof(ath11k_if_types_ext_capa_ap), + }, +}; + +static int ath11k_mac_register(struct ath11k *ar) +{ + struct ath11k_base *ab = ar->ab; + struct ath11k_pdev_cap *cap = &ar->pdev->cap; + static const u32 cipher_suites[] = { + wlan_cipher_suite_tkip, + wlan_cipher_suite_ccmp, + wlan_cipher_suite_aes_cmac, + wlan_cipher_suite_bip_cmac_256, + wlan_cipher_suite_bip_gmac_128, + wlan_cipher_suite_bip_gmac_256, + wlan_cipher_suite_gcmp, + wlan_cipher_suite_gcmp_256, + wlan_cipher_suite_ccmp_256, + }; + int ret; + u32 ht_cap = 0; + + ath11k_pdev_caps_update(ar); + + set_ieee80211_perm_addr(ar->hw, ar->mac_addr); + + set_ieee80211_dev(ar->hw, ab->dev); + + ret = ath11k_mac_setup_channels_rates(ar, + cap->supported_bands); + if (ret) + goto err_free; + + ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap); + + ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask; + ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask; + + ar->hw->wiphy->interface_modes = bit(nl80211_iftype_station) | + bit(nl80211_iftype_ap) | + bit(nl80211_iftype_mesh_point); + + ieee80211_hw_set(ar->hw, signal_dbm); + ieee80211_hw_set(ar->hw, supports_ps); + ieee80211_hw_set(ar->hw, supports_dynamic_ps); + ieee80211_hw_set(ar->hw, mfp_capable); + ieee80211_hw_set(ar->hw, reports_tx_ack_status); + ieee80211_hw_set(ar->hw, has_rate_control); + ieee80211_hw_set(ar->hw, ap_link_ps); + ieee80211_hw_set(ar->hw, spectrum_mgmt); + ieee80211_hw_set(ar->hw, support_fast_xmit); + ieee80211_hw_set(ar->hw, connection_monitor); + ieee80211_hw_set(ar->hw, supports_per_sta_gtk); + ieee80211_hw_set(ar->hw, want_monitor_vif); + ieee80211_hw_set(ar->hw, chanctx_sta_csa); + ieee80211_hw_set(ar->hw, queue_control); + ieee80211_hw_set(ar->hw, supports_tx_frag); + ieee80211_hw_set(ar->hw, reports_low_ack); + if (ht_cap & wmi_ht_cap_enabled) { + ieee80211_hw_set(ar->hw, ampdu_aggregation); + ieee80211_hw_set(ar->hw, tx_ampdu_setup_in_hw); + ieee80211_hw_set(ar->hw, supports_reordering_buffer); + ieee80211_hw_set(ar->hw, supports_amsdu_in_ampdu); + } + + ar->hw->wiphy->features |= nl80211_feature_static_smps; + ar->hw->wiphy->flags |= wiphy_flag_ibss_rsn; + + /* todo: check if ht capability advertised from firmware is different + * for each band for a dual band capable radio. it will be tricky to + * handle it when the ht capability different for each band. + */ + if (ht_cap & wmi_ht_cap_dynamic_smps) + ar->hw->wiphy->features |= nl80211_feature_dynamic_smps; + + ar->hw->wiphy->max_scan_ssids = wlan_scan_params_max_ssid; + ar->hw->wiphy->max_scan_ie_len = wlan_scan_params_max_ie_len; + + ar->hw->max_listen_interval = ath11k_max_hw_listen_interval; + + ar->hw->wiphy->flags |= wiphy_flag_has_remain_on_channel; + ar->hw->wiphy->flags |= wiphy_flag_has_channel_switch; + ar->hw->wiphy->max_remain_on_channel_duration = 5000; + + ar->hw->wiphy->flags |= wiphy_flag_ap_uapsd; + ar->hw->wiphy->features |= nl80211_feature_ap_mode_chan_width_change | + nl80211_feature_ap_scan; + + ar->max_num_stations = target_num_stations; + ar->max_num_peers = target_num_peers_pdev; + + ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; + + ar->hw->queues = ath11k_hw_max_queues; + ar->hw->offchannel_tx_hw_queue = ath11k_hw_max_queues - 1; + ar->hw->max_rx_aggregation_subframes = ieee80211_max_ampdu_buf; + + ar->hw->vif_data_size = sizeof(struct ath11k_vif); + ar->hw->sta_data_size = sizeof(struct ath11k_sta); + + ar->hw->wiphy->iface_combinations = ath11k_if_comb; + ar->hw->wiphy->n_iface_combinations = array_size(ath11k_if_comb); + + wiphy_ext_feature_set(ar->hw->wiphy, nl80211_ext_feature_cqm_rssi_list); + + ar->hw->wiphy->cipher_suites = cipher_suites; + ar->hw->wiphy->n_cipher_suites = array_size(cipher_suites); + + ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa; + ar->hw->wiphy->num_iftype_ext_capab = + array_size(ath11k_iftypes_ext_capa); + + ath11k_reg_init(ar); + + /* advertise hw checksum offload capabilities */ + ar->hw->netdev_features = netif_f_hw_csum; + + ret = ieee80211_register_hw(ar->hw); + if (ret) { + ath11k_err(ar->ab, "ieee80211 registration failed: %d ", ret); + goto err_free; + } + + /* apply the regd received during initialization */ + ret = ath11k_regd_update(ar, true); + if (ret) { + ath11k_err(ar->ab, "ath11k regd update failed: %d ", ret); + goto err_free; + } + + ret = ath11k_debug_register(ar); + if (ret) { + ath11k_err(ar->ab, "debugfs registration failed: %d ", ret); + goto err_free; + } + + return 0; + +err_free: + kfree(ar->mac.sbands[nl80211_band_2ghz].channels); + kfree(ar->mac.sbands[nl80211_band_5ghz].channels); + + set_ieee80211_dev(ar->hw, null); + return ret; +} + +void ath11k_mac_unregister(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_pdev *pdev; + int i; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (!ar) + continue; + cancel_work_sync(&ar->regd_update_work); + + ieee80211_unregister_hw(ar->hw); + + idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar); + idr_destroy(&ar->txmgmt_idr); + + kfree(ar->mac.sbands[nl80211_band_2ghz].channels); + kfree(ar->mac.sbands[nl80211_band_5ghz].channels); + + set_ieee80211_dev(ar->hw, null); + } +} + +int ath11k_mac_create(struct ath11k_base *ab) +{ + struct ieee80211_hw *hw; + struct ath11k *ar; + struct ath11k_pdev *pdev; + int ret; + int i; + + if (test_bit(ath11k_flag_registered, &ab->dev_flags)) + return 0; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops); + if (!hw) { + ath11k_warn(ab, "failed to allocate mac80211 hw device "); + ret = -enomem; + goto err_destroy_mac; + } + + ar = hw->priv; + ar->hw = hw; + ar->ab = ab; + ar->pdev = pdev; + ar->pdev_idx = i; + ar->lmac_id = ath11k_core_get_hw_mac_id(ab, i); + + ar->wmi = &ab->wmi_sc.wmi[i]; + /* fixme wmi[0] is already initialized during attach, + * should we do this again? + */ + ath11k_wmi_pdev_attach(ab, i); + + ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; + ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; + ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask); + ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask); + + if (ab->pdevs_macaddr_valid) { + ether_addr_copy(ar->mac_addr, pdev->mac_addr); + } else { + ether_addr_copy(ar->mac_addr, ab->mac_addr); + ar->mac_addr[4] += i; + } + + pdev->ar = ar; + spin_lock_init(&ar->data_lock); + init_list_head(&ar->arvifs); + init_list_head(&ar->ppdu_stats_info); + mutex_init(&ar->conf_mutex); + init_completion(&ar->vdev_setup_done); + init_completion(&ar->peer_assoc_done); + init_completion(&ar->install_key_done); + init_completion(&ar->bss_survey_done); + init_completion(&ar->scan.started); + init_completion(&ar->scan.completed); + init_delayed_work(&ar->scan.timeout, ath11k_scan_timeout_work); + init_work(&ar->regd_update_work, ath11k_regd_update_work); + + init_work(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work); + skb_queue_head_init(&ar->wmi_mgmt_tx_queue); + clear_bit(ath11k_flag_monitor_enabled, &ar->monitor_flags); + + ret = ath11k_mac_register(ar); + if (ret) { + ath11k_warn(ab, "failed to register hw device "); + pdev->ar = null; + ieee80211_free_hw(hw); + goto err_destroy_mac; + } + + idr_init(&ar->txmgmt_idr); + spin_lock_init(&ar->txmgmt_idr_lock); + } + + /* initialize channel counters frequency value in hertz */ + ab->cc_freq_hz = ipq8074_cc_freq_hertz; + ab->free_vdev_map = (1ll << (ab->num_radios * target_num_vdevs)) - 1; + + return 0; + +err_destroy_mac: + ath11k_mac_destroy(ab); + + return ret; +} + +void ath11k_mac_destroy(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_pdev *pdev; + int i; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + if (!ar) + continue; + + ieee80211_free_hw(ar->hw); + pdev->ar = null; + } +} diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/mac.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_mac_h +#define ath11k_mac_h + +#include <net/mac80211.h> +#include <net/cfg80211.h> + +struct ath11k; +struct ath11k_base; + +struct ath11k_generic_iter { + struct ath11k *ar; + int ret; +}; + +/* number of failed packets (20 packets with 16 sw reties each) */ +#define ath11k_kickout_threshold (20 * 16) + +/* use insanely high numbers to make sure that the firmware implementation + * won't start, we have the same functionality already in hostapd. unit + * is seconds. + */ +#define ath11k_keepalive_min_idle 3747 +#define ath11k_keepalive_max_idle 3895 +#define ath11k_keepalive_max_unresponsive 3900 + +#define wmi_host_rc_ds_flag 0x01 +#define wmi_host_rc_cw40_flag 0x02 +#define wmi_host_rc_sgi_flag 0x04 +#define wmi_host_rc_ht_flag 0x08 +#define wmi_host_rc_rtscts_flag 0x10 +#define wmi_host_rc_tx_stbc_flag 0x20 +#define wmi_host_rc_rx_stbc_flag 0xc0 +#define wmi_host_rc_rx_stbc_flag_s 6 +#define wmi_host_rc_wep_tkip_flag 0x100 +#define wmi_host_rc_ts_flag 0x200 +#define wmi_host_rc_uapsd_flag 0x400 + +#define wmi_ht_cap_enabled 0x0001 +#define wmi_ht_cap_ht20_sgi 0x0002 +#define wmi_ht_cap_dynamic_smps 0x0004 +#define wmi_ht_cap_tx_stbc 0x0008 +#define wmi_ht_cap_tx_stbc_mask_shift 3 +#define wmi_ht_cap_rx_stbc 0x0030 +#define wmi_ht_cap_rx_stbc_mask_shift 4 +#define wmi_ht_cap_ldpc 0x0040 +#define wmi_ht_cap_l_sig_txop_prot 0x0080 +#define wmi_ht_cap_mpdu_density 0x0700 +#define wmi_ht_cap_mpdu_density_mask_shift 8 +#define wmi_ht_cap_ht40_sgi 0x0800 +#define wmi_ht_cap_rx_ldpc 0x1000 +#define wmi_ht_cap_tx_ldpc 0x2000 +#define wmi_ht_cap_ibf_bfer 0x4000 + +/* these macros should be used when we wish to advertise stbc support for + * only 1ss or 2ss or 3ss. + */ +#define wmi_ht_cap_rx_stbc_1ss 0x0010 +#define wmi_ht_cap_rx_stbc_2ss 0x0020 +#define wmi_ht_cap_rx_stbc_3ss 0x0030 + +#define wmi_ht_cap_default_all (wmi_ht_cap_enabled | \ + wmi_ht_cap_ht20_sgi | \ + wmi_ht_cap_ht40_sgi | \ + wmi_ht_cap_tx_stbc | \ + wmi_ht_cap_rx_stbc | \ + wmi_ht_cap_ldpc) + +#define wmi_vht_cap_max_mpdu_len_mask 0x00000003 +#define wmi_vht_cap_rx_ldpc 0x00000010 +#define wmi_vht_cap_sgi_80mhz 0x00000020 +#define wmi_vht_cap_sgi_160mhz 0x00000040 +#define wmi_vht_cap_tx_stbc 0x00000080 +#define wmi_vht_cap_rx_stbc_mask 0x00000300 +#define wmi_vht_cap_rx_stbc_mask_shift 8 +#define wmi_vht_cap_su_bfer 0x00000800 +#define wmi_vht_cap_su_bfee 0x00001000 +#define wmi_vht_cap_max_cs_ant_mask 0x0000e000 +#define wmi_vht_cap_max_cs_ant_mask_shift 13 +#define wmi_vht_cap_max_snd_dim_mask 0x00070000 +#define wmi_vht_cap_max_snd_dim_mask_shift 16 +#define wmi_vht_cap_mu_bfer 0x00080000 +#define wmi_vht_cap_mu_bfee 0x00100000 +#define wmi_vht_cap_max_ampdu_len_exp 0x03800000 +#define wmi_vht_cap_max_ampdu_len_exp_shit 23 +#define wmi_vht_cap_rx_fixed_ant 0x10000000 +#define wmi_vht_cap_tx_fixed_ant 0x20000000 + +#define wmi_vht_cap_max_mpdu_len_11454 0x00000002 + +/* these macros should be used when we wish to advertise stbc support for + * only 1ss or 2ss or 3ss. + */ +#define wmi_vht_cap_rx_stbc_1ss 0x00000100 +#define wmi_vht_cap_rx_stbc_2ss 0x00000200 +#define wmi_vht_cap_rx_stbc_3ss 0x00000300 + +#define wmi_vht_cap_default_all (wmi_vht_cap_max_mpdu_len_11454 | \ + wmi_vht_cap_sgi_80mhz | \ + wmi_vht_cap_tx_stbc | \ + wmi_vht_cap_rx_stbc_mask | \ + wmi_vht_cap_rx_ldpc | \ + wmi_vht_cap_max_ampdu_len_exp | \ + wmi_vht_cap_rx_fixed_ant | \ + wmi_vht_cap_tx_fixed_ant) + +/* fixme: should these be in ieee80211.h? */ +#define ieee80211_vht_mcs_support_0_11_mask genmask(23, 16) +#define ieee80211_disable_vht_mcs_support_0_11 bit(24) + +#define wmi_max_spatial_stream 3 + +#define ath11k_chan_width_num 8 + +extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default; + +int ath11k_mac_create(struct ath11k_base *ab); +void ath11k_mac_destroy(struct ath11k_base *ab); +void ath11k_mac_unregister(struct ath11k_base *ab); +int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, + u16 *rate); +u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, + u32 bitrate); +u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, + u8 hw_rate, bool cck); + +void __ath11k_mac_scan_finish(struct ath11k *ar); +void ath11k_mac_scan_finish(struct ath11k *ar); + +struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id); +struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab, + u32 vdev_id); +struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id); +struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id); +struct ath11k *ath11k_mac_get_ar_vdev_stop_status(struct ath11k_base *ab, + u32 vdev_id); + +void ath11k_mac_drain_tx(struct ath11k *ar); +void ath11k_mac_peer_cleanup_all(struct ath11k *ar); +int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx); +#endif diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/peer.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include "core.h" +#include "peer.h" +#include "debug.h" + +struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id, + const u8 *addr) +{ + struct ath11k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) { + if (peer->vdev_id != vdev_id) + continue; + if (memcmp(peer->addr, addr, eth_alen)) + continue; + + return peer; + } + + return null; +} + +struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab, + const u8 *addr) +{ + struct ath11k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) { + if (memcmp(peer->addr, addr, eth_alen)) + continue; + + return peer; + } + + return null; +} + +struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, + int peer_id) +{ + struct ath11k_peer *peer; + + lockdep_assert_held(&ab->base_lock); + + list_for_each_entry(peer, &ab->peers, list) + if (peer_id == peer->peer_id) + return peer; + + return null; +} + +void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id) +{ + struct ath11k_peer *peer; + + spin_lock_bh(&ab->base_lock); + + peer = ath11k_peer_find_by_id(ab, peer_id); + if (!peer) { + ath11k_warn(ab, "peer-unmap-event: unknown peer id %d ", + peer_id); + goto exit; + } + + ath11k_dbg(ab, ath11k_dbg_dp_htt, "htt peer unmap vdev %d peer %pm id %d ", + peer->vdev_id, peer->addr, peer_id); + + list_del(&peer->list); + kfree(peer); + wake_up(&ab->peer_mapping_wq); + +exit: + spin_unlock_bh(&ab->base_lock); +} + +void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id, + u8 *mac_addr, u16 ast_hash) +{ + struct ath11k_peer *peer; + + spin_lock_bh(&ab->base_lock); + peer = ath11k_peer_find(ab, vdev_id, mac_addr); + if (!peer) { + peer = kzalloc(sizeof(*peer), gfp_atomic); + if (!peer) + goto exit; + + peer->vdev_id = vdev_id; + peer->peer_id = peer_id; + peer->ast_hash = ast_hash; + ether_addr_copy(peer->addr, mac_addr); + list_add(&peer->list, &ab->peers); + wake_up(&ab->peer_mapping_wq); + } + + ath11k_dbg(ab, ath11k_dbg_dp_htt, "htt peer map vdev %d peer %pm id %d ", + vdev_id, mac_addr, peer_id); + +exit: + spin_unlock_bh(&ab->base_lock); +} + +static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id, + const u8 *addr, bool expect_mapped) +{ + int ret; + + ret = wait_event_timeout(ab->peer_mapping_wq, ({ + bool mapped; + + spin_lock_bh(&ab->base_lock); + mapped = !!ath11k_peer_find(ab, vdev_id, addr); + spin_unlock_bh(&ab->base_lock); + + (mapped == expect_mapped || + test_bit(ath11k_flag_crash_flush, &ab->dev_flags)); + }), 3 * hz); + + if (ret <= 0) + return -etimedout; + + return 0; +} + +void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id) +{ + struct ath11k_peer *peer, *tmp; + struct ath11k_base *ab = ar->ab; + + lockdep_assert_held(&ar->conf_mutex); + + spin_lock_bh(&ab->base_lock); + list_for_each_entry_safe(peer, tmp, &ab->peers, list) { + if (peer->vdev_id != vdev_id) + continue; + + ath11k_warn(ab, "removing stale peer %pm from vdev_id %d ", + peer->addr, vdev_id); + + list_del(&peer->list); + kfree(peer); + ar->num_peers--; + } + + spin_unlock_bh(&ab->base_lock); +} + +static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr) +{ + return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false); +} + +int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr) +{ + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id); + if (ret) { + ath11k_warn(ar->ab, + "failed to delete peer vdev_id %d addr %pm ret %d ", + vdev_id, addr, ret); + return ret; + } + + ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr); + if (ret) + return ret; + + ar->num_peers--; + + return 0; +} + +static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr) +{ + return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true); +} + +int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, + struct ieee80211_sta *sta, struct peer_create_params *param) +{ + struct ath11k_peer *peer; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (ar->num_peers > (ar->max_num_peers - 1)) { + ath11k_warn(ar->ab, + "failed to create peer due to insufficient peer entry resource in firmware "); + return -enobufs; + } + + ret = ath11k_wmi_send_peer_create_cmd(ar, param); + if (ret) { + ath11k_warn(ar->ab, + "failed to send peer create vdev_id %d ret %d ", + param->vdev_id, ret); + return ret; + } + + ret = ath11k_wait_for_peer_created(ar, param->vdev_id, + param->peer_addr); + if (ret) + return ret; + + spin_lock_bh(&ar->ab->base_lock); + + peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr); + if (!peer) { + spin_unlock_bh(&ar->ab->base_lock); + ath11k_warn(ar->ab, "failed to find peer %pm on vdev %i after creation ", + param->peer_addr, param->vdev_id); + ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr, + param->vdev_id); + return -enoent; + } + + peer->sta = sta; + arvif->ast_hash = peer->ast_hash; + + ar->num_peers++; + + spin_unlock_bh(&ar->ab->base_lock); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/peer.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_peer_h +#define ath11k_peer_h + +struct ath11k_peer { + struct list_head list; + struct ieee80211_sta *sta; + int vdev_id; + u8 addr[eth_alen]; + int peer_id; + u16 ast_hash; + + /* protected by ab->data_lock */ + struct ieee80211_key_conf *keys[wmi_max_key_index + 1]; + struct dp_rx_tid rx_tid[ieee80211_num_tids + 1]; +}; + +void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id); +void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id, + u8 *mac_addr, u16 ast_hash); +struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id, + const u8 *addr); +struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab, + const u8 *addr); +struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id); +void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id); +int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr); +int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, + struct ieee80211_sta *sta, struct peer_create_params *param); + +#endif /* _peer_h_ */ diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/qmi.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include "qmi.h" +#include "core.h" +#include "debug.h" +#include <linux/of.h> +#include <linux/firmware.h> + +static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = { + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + num_clients_valid), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + num_clients), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + wake_msi_valid), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + wake_msi), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + gpios_valid), + }, + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + gpios_len), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = qmi_wlfw_max_num_gpio_v01, + .elem_size = sizeof(u32), + .array_type = var_len_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + gpios), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + nm_modem_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + nm_modem), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + bdf_support_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + bdf_support), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + bdf_cache_support_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + bdf_cache_support), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + m3_support_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + m3_support), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + m3_cache_support_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + m3_cache_support), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_filesys_support_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_filesys_support), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_cache_support_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_cache_support), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1a, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_done_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1a, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + cal_done), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1b, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mem_bucket_valid), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0x1b, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mem_bucket), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1c, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mem_cfg_mode_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1c, + .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01, + mem_cfg_mode), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = { + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = { + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_ready_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_ready_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + initiate_cal_download_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + initiate_cal_download_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + initiate_cal_update_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + initiate_cal_update_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + msa_ready_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + msa_ready_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + pin_connect_result_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + pin_connect_result_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + client_id_valid), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + client_id), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + request_mem_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x16, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + request_mem_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_mem_ready_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x17, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_mem_ready_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_init_done_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x18, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + fw_init_done_enable), + }, + + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + rejuvenate_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x19, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + rejuvenate_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1a, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + xo_cal_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1a, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + xo_cal_enable), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1b, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + cal_done_enable_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x1b, + .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01, + cal_done_enable), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = { + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, + fw_status_valid), + }, + { + .data_type = qmi_unsigned_8_byte, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01, + fw_status), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = { + { + .data_type = qmi_unsigned_8_byte, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, + size), + }, + { + .data_type = qmi_signed_4_byte_enum, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type), + }, + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len), + }, + { + .data_type = qmi_struct, + .elem_len = qmi_wlanfw_max_num_mem_cfg_v01, + .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01), + .array_type = var_len_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg), + .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = { + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01, + mem_seg_len), + }, + { + .data_type = qmi_struct, + .elem_len = ath11k_qmi_wlanfw_max_num_mem_seg_v01, + .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01), + .array_type = var_len_array, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01, + mem_seg), + .ei_array = qmi_wlanfw_mem_seg_s_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = { + { + .data_type = qmi_unsigned_8_byte, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size), + }, + { + .data_type = qmi_signed_4_byte_enum, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = { + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01, + mem_seg_len), + }, + { + .data_type = qmi_struct, + .elem_len = ath11k_qmi_wlanfw_max_num_mem_seg_v01, + .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01), + .array_type = var_len_array, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01, + mem_seg), + .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = { + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = { + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01, + chip_id), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01, + chip_family), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01, + board_id), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01, + fw_version), + }, + { + .data_type = qmi_string, + .elem_len = ath11k_qmi_wlanfw_max_timestamp_len_v01 + 1, + .elem_size = sizeof(char), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01, + fw_build_timestamp), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = { + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + chip_info_valid), + }, + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + chip_info), + .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + board_info_valid), + }, + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + board_info), + .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + soc_info_valid), + }, + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + soc_info), + .ei_array = qmi_wlanfw_soc_info_s_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_version_info_valid), + }, + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_version_info), + .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_build_id_valid), + }, + { + .data_type = qmi_string, + .elem_len = ath11k_qmi_wlanfw_max_build_id_len_v01 + 1, + .elem_size = sizeof(char), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + fw_build_id), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + num_macs_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, + num_macs), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = { + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + valid), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + file_id_valid), + }, + { + .data_type = qmi_signed_4_byte_enum, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + file_id), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + total_size), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + seg_id), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + data_valid), + }, + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + data_len), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = qmi_wlanfw_max_data_size_v01, + .elem_size = sizeof(u8), + .array_type = var_len_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + data), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + end_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + end), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + bdf_type_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x15, + .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01, + bdf_type), + }, + + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = { + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = { + { + .data_type = qmi_unsigned_8_byte, + .elem_len = 1, + .elem_size = sizeof(u64), + .array_type = no_array, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = { + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = qmi_signed_4_byte_enum, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + nentries), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + nbytes_max), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01, + flags), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, + service_id), + }, + { + .data_type = qmi_signed_4_byte_enum, + .elem_len = 1, + .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = { + { + .data_type = qmi_unsigned_2_byte, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id), + }, + { + .data_type = qmi_unsigned_2_byte, + .elem_len = 1, + .elem_size = sizeof(u16), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, + offset), + }, + { + .data_type = qmi_eoti, + .array_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0, + .offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01, + addr), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = { + { + .data_type = qmi_unsigned_4_byte, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = no_array, + .tlv_type = 0x01, + .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, + mode), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, + hw_debug_valid), + }, + { + .data_type = qmi_unsigned_1_byte, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01, + hw_debug), + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = { + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01, + resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = { + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + host_version_valid), + }, + { + .data_type = qmi_string, + .elem_len = qmi_wlanfw_max_str_len_v01 + 1, + .elem_size = sizeof(char), + .array_type = no_array, + .tlv_type = 0x10, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + host_version), + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + tgt_cfg_valid), + }, + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + tgt_cfg_len), + }, + { + .data_type = qmi_struct, + .elem_len = qmi_wlanfw_max_num_ce_v01, + .elem_size = sizeof( + struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01), + .array_type = var_len_array, + .tlv_type = 0x11, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + tgt_cfg), + .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + svc_cfg_valid), + }, + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + svc_cfg_len), + }, + { + .data_type = qmi_struct, + .elem_len = qmi_wlanfw_max_num_svc_v01, + .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01), + .array_type = var_len_array, + .tlv_type = 0x12, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + svc_cfg), + .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_valid), + }, + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_len), + }, + { + .data_type = qmi_struct, + .elem_len = qmi_wlanfw_max_num_shadow_reg_v01, + .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01), + .array_type = var_len_array, + .tlv_type = 0x13, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg), + .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei, + }, + { + .data_type = qmi_opt_flag, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_v2_valid), + }, + { + .data_type = qmi_data_len, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = no_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_v2_len), + }, + { + .data_type = qmi_struct, + .elem_len = qmi_wlanfw_max_num_shadow_reg_v2_v01, + .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01), + .array_type = var_len_array, + .tlv_type = 0x14, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01, + shadow_reg_v2), + .ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = { + { + .data_type = qmi_struct, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .array_type = no_array, + .tlv_type = 0x02, + .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp), + .ei_array = qmi_response_type_v01_ei, + }, + { + .data_type = qmi_eoti, + .array_type = no_array, + .tlv_type = qmi_common_tlv_type, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = { + { + .data_type = qmi_eoti, + .array_type = no_array, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = { + { + .data_type = qmi_eoti, + .array_type = no_array, + }, +}; + +static struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = { + { + .data_type = qmi_eoti, + .array_type = no_array, + }, +}; + +static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) +{ + struct qmi_wlanfw_host_cap_req_msg_v01 req; + struct qmi_wlanfw_host_cap_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.num_clients_valid = 1; + req.num_clients = 1; + req.mem_cfg_mode = ab->qmi.target_mem_mode; + req.mem_cfg_mode_valid = 1; + req.bdf_support_valid = 1; + req.bdf_support = 1; + + req.m3_support_valid = 0; + req.m3_support = 0; + + req.m3_cache_support_valid = 0; + req.m3_cache_support = 0; + + req.cal_done_valid = 1; + req.cal_done = ab->qmi.cal_done; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, null, &txn, + qmi_wlanfw_host_cap_req_v01, + qmi_wlanfw_host_cap_req_msg_v01_max_len, + qmi_wlanfw_host_cap_req_msg_v01_ei, &req); + if (ret < 0) { + ath11k_warn(ab, "failed to send host capability request,err = %d ", ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ath11k_qmi_wlanfw_timeout_ms)); + if (ret < 0) + goto out; + + if (resp.resp.result != qmi_result_success_v01) { + ath11k_warn(ab, "host capability request failed, result: %d, err: %d ", + resp.resp.result, resp.resp.error); + ret = -einval; + goto out; + } + +out: + return ret; +} + +static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab) +{ + struct qmi_wlanfw_ind_register_req_msg_v01 *req; + struct qmi_wlanfw_ind_register_resp_msg_v01 *resp; + struct qmi_handle *handle = &ab->qmi.handle; + struct qmi_txn txn; + int ret = 0; + + req = kzalloc(sizeof(*req), gfp_kernel); + if (!req) + return -enomem; + + resp = kzalloc(sizeof(*resp), gfp_kernel); + if (!resp) + goto resp_out; + + req->client_id_valid = 1; + req->client_id = qmi_wlanfw_client_id; + req->fw_ready_enable_valid = 1; + req->fw_ready_enable = 1; + req->request_mem_enable_valid = 1; + req->request_mem_enable = 1; + req->fw_mem_ready_enable_valid = 1; + req->fw_mem_ready_enable = 1; + req->cal_done_enable_valid = 1; + req->cal_done_enable = 1; + req->fw_init_done_enable_valid = 1; + req->fw_init_done_enable = 1; + + req->pin_connect_result_enable_valid = 0; + req->pin_connect_result_enable = 0; + + ret = qmi_txn_init(handle, &txn, + qmi_wlanfw_ind_register_resp_msg_v01_ei, resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, null, &txn, + qmi_wlanfw_ind_register_req_v01, + qmi_wlanfw_ind_register_req_msg_v01_max_len, + qmi_wlanfw_ind_register_req_msg_v01_ei, req); + if (ret < 0) { + ath11k_warn(ab, "failed to send indication register request, err = %d ", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ath11k_qmi_wlanfw_timeout_ms)); + if (ret < 0) { + ath11k_warn(ab, "failed to register fw indication %d ", ret); + goto out; + } + + if (resp->resp.result != qmi_result_success_v01) { + ath11k_warn(ab, "fw ind register request failed, result: %d, err: %d ", + resp->resp.result, resp->resp.error); + ret = -einval; + goto out; + } + +out: + kfree(resp); +resp_out: + kfree(req); + return ret; +} + +static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab) +{ + struct qmi_wlanfw_respond_mem_req_msg_v01 *req; + struct qmi_wlanfw_respond_mem_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0, i; + + req = kzalloc(sizeof(*req), gfp_kernel); + if (!req) + return -enomem; + + memset(&resp, 0, sizeof(resp)); + + req->mem_seg_len = ab->qmi.mem_seg_count; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + for (i = 0; i < req->mem_seg_len ; i++) { + req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr; + req->mem_seg[i].size = ab->qmi.target_mem[i].size; + req->mem_seg[i].type = ab->qmi.target_mem[i].type; + } + + ret = qmi_send_request(&ab->qmi.handle, null, &txn, + qmi_wlanfw_respond_mem_req_v01, + qmi_wlanfw_respond_mem_req_msg_v01_max_len, + qmi_wlanfw_respond_mem_req_msg_v01_ei, req); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to respond memory request, err = %d ", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ath11k_qmi_wlanfw_timeout_ms)); + if (ret < 0) { + ath11k_warn(ab, "qmi failed memory request, err = %d ", ret); + goto out; + } + + if (resp.resp.result != qmi_result_success_v01) { + ath11k_warn(ab, "respond mem req failed, result: %d, err: %d ", + resp.resp.result, resp.resp.error); + ret = -einval; + goto out; + } +out: + kfree(req); + return ret; +} + +static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab) +{ + int i, idx; + + for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) { + switch (ab->qmi.target_mem[i].type) { + case bdf_mem_region_type: + ab->qmi.target_mem[idx].paddr = ath11k_qmi_bdf_address; + ab->qmi.target_mem[idx].vaddr = ath11k_qmi_bdf_address; + ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; + ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; + idx++; + break; + case caldb_mem_region_type: + if (ab->qmi.target_mem[i].size > ath11k_qmi_caldb_size) { + ath11k_warn(ab, "qmi mem size is low to load caldata "); + return -einval; + } + /* todo ath11k does not support cold boot calibration */ + ab->qmi.target_mem[idx].paddr = 0; + ab->qmi.target_mem[idx].vaddr = 0; + ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; + ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; + idx++; + break; + default: + ath11k_warn(ab, "qmi ignore invalid mem req type %d ", + ab->qmi.target_mem[i].type); + break; + } + } + ab->qmi.mem_seg_count = idx; + + return 0; +} + +static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) +{ + struct qmi_wlanfw_cap_req_msg_v01 req; + struct qmi_wlanfw_cap_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_cap_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, null, &txn, + qmi_wlanfw_cap_req_v01, + qmi_wlanfw_cap_req_msg_v01_max_len, + qmi_wlanfw_cap_req_msg_v01_ei, &req); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send target cap request, err = %d ", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ath11k_qmi_wlanfw_timeout_ms)); + if (ret < 0) { + ath11k_warn(ab, "qmi failed target cap request %d ", ret); + goto out; + } + + if (resp.resp.result != qmi_result_success_v01) { + ath11k_warn(ab, "qmi targetcap req failed, result: %d, err: %d ", + resp.resp.result, resp.resp.error); + ret = -einval; + goto out; + } + + if (resp.chip_info_valid) { + ab->qmi.target.chip_id = resp.chip_info.chip_id; + ab->qmi.target.chip_family = resp.chip_info.chip_family; + } + + if (resp.board_info_valid) + ab->qmi.target.board_id = resp.board_info.board_id; + else + ab->qmi.target.board_id = 0xff; + + if (resp.soc_info_valid) + ab->qmi.target.soc_id = resp.soc_info.soc_id; + + if (resp.fw_version_info_valid) { + ab->qmi.target.fw_version = resp.fw_version_info.fw_version; + strlcpy(ab->qmi.target.fw_build_timestamp, + resp.fw_version_info.fw_build_timestamp, + sizeof(ab->qmi.target.fw_build_timestamp)); + } + + if (resp.fw_build_id_valid) + strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id, + sizeof(ab->qmi.target.fw_build_id)); + + ath11k_info(ab, "qmi target: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x ", + ab->qmi.target.chip_id, ab->qmi.target.chip_family, + ab->qmi.target.board_id, ab->qmi.target.soc_id); + + ath11k_info(ab, "qmi fw_version: 0x%x fw_build_timestamp: %s fw_build_id: %s", + ab->qmi.target.fw_version, + ab->qmi.target.fw_build_timestamp, + ab->qmi.target.fw_build_id); + +out: + return ret; +} + +static int +ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type, + struct qmi_wlanfw_bdf_download_req_msg_v01 *req, + void __iomem *bdf_addr) +{ + struct device *dev = ab->dev; + char filename[ath11k_qmi_max_bdf_file_name_size]; + const struct firmware *fw_entry; + struct ath11k_board_data bd; + u32 fw_size; + int ret = 0; + + memset(&bd, 0, sizeof(bd)); + + switch (type) { + case ath11k_qmi_file_type_bdf_golden: + ret = ath11k_core_fetch_bdf(ab, &bd); + if (ret) { + ath11k_warn(ab, "qmi failed to load bdf "); + goto out; + } + + fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len); + memcpy_toio(bdf_addr, bd.data, fw_size); + ath11k_core_free_bdf(ab, &bd); + break; + case ath11k_qmi_file_type_caldata: + snprintf(filename, sizeof(filename), + "%s/%s", ab->hw_params.fw.dir, ath11k_qmi_default_cal_file_name); + ret = request_firmware(&fw_entry, filename, dev); + if (ret) { + ath11k_warn(ab, "qmi failed to load cal: %s ", filename); + goto out; + } + + fw_size = min_t(u32, ab->hw_params.fw.board_size, + fw_entry->size); + + memcpy_toio(bdf_addr + ath11k_qmi_caldata_offset, + fw_entry->data, fw_size); + ath11k_info(ab, "qmi downloading bdf: %s, size: %zu ", + filename, fw_entry->size); + + release_firmware(fw_entry); + break; + default: + ret = -einval; + goto out; + } + + req->total_size = fw_size; + +out: + return ret; +} + +static int ath11k_qmi_load_bdf(struct ath11k_base *ab) +{ + struct qmi_wlanfw_bdf_download_req_msg_v01 *req; + struct qmi_wlanfw_bdf_download_resp_msg_v01 resp; + struct qmi_txn txn = {}; + void __iomem *bdf_addr = null; + int type, ret; + + req = kzalloc(sizeof(*req), gfp_kernel); + if (!req) + return -enomem; + memset(&resp, 0, sizeof(resp)); + + bdf_addr = ioremap(ath11k_qmi_bdf_address, ath11k_qmi_bdf_max_size); + if (!bdf_addr) { + ath11k_warn(ab, "qmi ioremap error for bdf "); + ret = -eio; + goto out; + } + + for (type = 0; type < ath11k_qmi_max_file_type; type++) { + req->valid = 1; + req->file_id_valid = 1; + req->file_id = ab->qmi.target.board_id; + req->total_size_valid = 1; + req->seg_id_valid = 1; + req->seg_id = type; + req->data_valid = 0; + req->data_len = ath11k_qmi_max_bdf_file_name_size; + req->bdf_type = 0; + req->bdf_type_valid = 0; + req->end_valid = 1; + req->end = 1; + + ret = ath11k_qmi_prepare_bdf_download(ab, type, req, bdf_addr); + if (ret < 0) + goto out_qmi_bdf; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_bdf_download_resp_msg_v01_ei, + &resp); + if (ret < 0) + goto out_qmi_bdf; + + ret = qmi_send_request(&ab->qmi.handle, null, &txn, + qmi_wlanfw_bdf_download_req_v01, + qmi_wlanfw_bdf_download_req_msg_v01_max_len, + qmi_wlanfw_bdf_download_req_msg_v01_ei, req); + if (ret < 0) { + qmi_txn_cancel(&txn); + goto out_qmi_bdf; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ath11k_qmi_wlanfw_timeout_ms)); + if (ret < 0) + goto out_qmi_bdf; + + if (resp.resp.result != qmi_result_success_v01) { + ath11k_warn(ab, "qmi bdf download failed, result: %d, err: %d ", + resp.resp.result, resp.resp.error); + ret = -einval; + goto out_qmi_bdf; + } + } + ath11k_info(ab, "qmi bdf downloaded "); + +out_qmi_bdf: + iounmap(bdf_addr); +out: + kfree(req); + return ret; +} + +static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab) +{ + struct qmi_wlanfw_m3_info_req_msg_v01 req; + struct qmi_wlanfw_m3_info_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + req.addr = 0; + req.size = 0; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, null, &txn, + qmi_wlanfw_m3_info_req_v01, + qmi_wlanfw_m3_info_req_msg_v01_max_msg_len, + qmi_wlanfw_m3_info_req_msg_v01_ei, &req); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send m3 information request, err = %d ", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ath11k_qmi_wlanfw_timeout_ms)); + if (ret < 0) { + ath11k_warn(ab, "qmi failed m3 information request %d ", ret); + goto out; + } + + if (resp.resp.result != qmi_result_success_v01) { + ath11k_warn(ab, "qmi m3 info request failed, result: %d, err: %d ", + resp.resp.result, resp.resp.error); + ret = -einval; + goto out; + } +out: + return ret; +} + +static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab, + u32 mode) +{ + struct qmi_wlanfw_wlan_mode_req_msg_v01 req; + struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp; + struct qmi_txn txn = {}; + int ret = 0; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.mode = mode; + req.hw_debug_valid = 1; + req.hw_debug = 0; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, null, &txn, + qmi_wlanfw_wlan_mode_req_v01, + qmi_wlanfw_wlan_mode_req_msg_v01_max_len, + qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d ", + mode, ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ath11k_qmi_wlanfw_timeout_ms)); + if (ret < 0) { + if (mode == ath11k_firmware_mode_off && ret == -enetreset) { + ath11k_warn(ab, "wlfw service is dis-connected "); + return 0; + } + ath11k_warn(ab, "qmi failed set mode request, mode: %d, err = %d ", + mode, ret); + goto out; + } + + if (resp.resp.result != qmi_result_success_v01) { + ath11k_warn(ab, "mode request failed, mode: %d, result: %d err: %d ", + mode, resp.resp.result, resp.resp.error); + ret = -einval; + goto out; + } + +out: + return ret; +} + +static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab) +{ + struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req; + struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp; + struct ce_pipe_config *ce_cfg; + struct service_to_pipe *svc_cfg; + struct qmi_txn txn = {}; + int ret = 0; + + ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce; + svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map; + + req = kzalloc(sizeof(*req), gfp_kernel); + if (!req) + return -enomem; + + memset(&resp, 0, sizeof(resp)); + + req->host_version_valid = 1; + strlcpy(req->host_version, ath11k_host_version_string, + sizeof(req->host_version)); + + req->tgt_cfg_valid = 1; + /* this is number of ce configs */ + req->tgt_cfg_len = ((ab->qmi.ce_cfg.tgt_ce_len) / + (sizeof(struct ce_pipe_config))) - 1; + for (ret = 0; ret <= req->tgt_cfg_len ; ret++) { + req->tgt_cfg[ret].pipe_num = ce_cfg[ret].pipenum; + req->tgt_cfg[ret].pipe_dir = ce_cfg[ret].pipedir; + req->tgt_cfg[ret].nentries = ce_cfg[ret].nentries; + req->tgt_cfg[ret].nbytes_max = ce_cfg[ret].nbytes_max; + req->tgt_cfg[ret].flags = ce_cfg[ret].flags; + } + + req->svc_cfg_valid = 1; + /* this is number of service/ce configs */ + req->svc_cfg_len = (ab->qmi.ce_cfg.svc_to_ce_map_len) / + (sizeof(struct service_to_pipe)); + for (ret = 0; ret < req->svc_cfg_len; ret++) { + req->svc_cfg[ret].service_id = svc_cfg[ret].service_id; + req->svc_cfg[ret].pipe_dir = svc_cfg[ret].pipedir; + req->svc_cfg[ret].pipe_num = svc_cfg[ret].pipenum; + } + req->shadow_reg_valid = 0; + req->shadow_reg_v2_valid = 0; + + ret = qmi_txn_init(&ab->qmi.handle, &txn, + qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp); + if (ret < 0) + goto out; + + ret = qmi_send_request(&ab->qmi.handle, null, &txn, + qmi_wlanfw_wlan_cfg_req_v01, + qmi_wlanfw_wlan_cfg_req_msg_v01_max_len, + qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send wlan config request, err = %d ", + ret); + goto out; + } + + ret = qmi_txn_wait(&txn, msecs_to_jiffies(ath11k_qmi_wlanfw_timeout_ms)); + if (ret < 0) { + ath11k_warn(ab, "qmi failed wlan config request, err = %d ", ret); + goto out; + } + + if (resp.resp.result != qmi_result_success_v01) { + ath11k_warn(ab, "qmi wlan config request failed, result: %d, err: %d ", + resp.resp.result, resp.resp.error); + ret = -einval; + goto out; + } + +out: + kfree(req); + return ret; +} + +void ath11k_qmi_firmware_stop(struct ath11k_base *ab) +{ + int ret; + + ret = ath11k_qmi_wlanfw_mode_send(ab, ath11k_firmware_mode_off); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send wlan mode off "); + return; + } +} + +int ath11k_qmi_firmware_start(struct ath11k_base *ab, + u32 mode) +{ + int ret; + + ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send wlan cfg:%d ", ret); + return ret; + } + + ret = ath11k_qmi_wlanfw_mode_send(ab, mode); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send wlan fw mode:%d ", ret); + return ret; + } + + return 0; +} + +static int +ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi, + enum ath11k_qmi_event_type type, + void *data) +{ + struct ath11k_qmi_driver_event *event; + + event = kzalloc(sizeof(*event), gfp_atomic); + if (!event) + return -enomem; + + event->type = type; + event->data = data; + + spin_lock(&qmi->event_lock); + list_add_tail(&event->list, &qmi->event_list); + spin_unlock(&qmi->event_lock); + + queue_work(qmi->event_wq, &qmi->event_work); + + return 0; +} + +static void ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi) +{ + struct ath11k_base *ab = qmi->ab; + int ret; + + ret = ath11k_qmi_fw_ind_register_send(ab); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send fw indication qmi:%d ", ret); + return; + } + + ret = ath11k_qmi_host_cap_send(ab); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send host cap qmi:%d ", ret); + return; + } +} + +static void ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi) +{ + struct ath11k_base *ab = qmi->ab; + int ret; + + ret = ath11k_qmi_respond_fw_mem_request(ab); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to respond fw mem req:%d ", ret); + return; + } +} + +static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi) +{ + struct ath11k_base *ab = qmi->ab; + int ret; + + ret = ath11k_qmi_request_target_cap(ab); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to req target capabilities:%d ", ret); + return; + } + + ret = ath11k_qmi_load_bdf(ab); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to load board data file:%d ", ret); + return; + } + + ret = ath11k_qmi_wlanfw_m3_info_send(ab); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to send m3 info req:%d ", ret); + return; + } +} + +static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *data) +{ + struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); + struct ath11k_base *ab = qmi->ab; + const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data; + int i, ret; + + ath11k_dbg(ab, ath11k_dbg_qmi, "qmi firmware request memory request "); + + if (msg->mem_seg_len == 0 || + msg->mem_seg_len > ath11k_qmi_wlanfw_max_num_mem_seg_v01) + ath11k_warn(ab, "invalid memory segment length: %u ", + msg->mem_seg_len); + + ab->qmi.mem_seg_count = msg->mem_seg_len; + + for (i = 0; i < qmi->mem_seg_count ; i++) { + ab->qmi.target_mem[i].type = msg->mem_seg[i].type; + ab->qmi.target_mem[i].size = msg->mem_seg[i].size; + ath11k_dbg(ab, ath11k_dbg_qmi, "qmi mem seg type %d size %d ", + msg->mem_seg[i].type, msg->mem_seg[i].size); + } + + ret = ath11k_qmi_alloc_target_mem_chunk(ab); + if (ret < 0) { + ath11k_warn(ab, "qmi failed to alloc target memory:%d ", ret); + return; + } + + ath11k_qmi_driver_event_post(qmi, ath11k_qmi_event_request_mem, null); +} + +static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ + struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); + struct ath11k_base *ab = qmi->ab; + + ath11k_dbg(ab, ath11k_dbg_qmi, "qmi firmware memory ready indication "); + ath11k_qmi_driver_event_post(qmi, ath11k_qmi_event_fw_mem_ready, null); +} + +static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ + struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); + struct ath11k_base *ab = qmi->ab; + + ath11k_dbg(ab, ath11k_dbg_qmi, "qmi firmware ready "); + ath11k_qmi_driver_event_post(qmi, ath11k_qmi_event_fw_ready, null); +} + +static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi, + struct sockaddr_qrtr *sq, + struct qmi_txn *txn, + const void *decoded) +{ +} + +static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = { + { + .type = qmi_indication, + .msg_id = qmi_wlfw_request_mem_ind_v01, + .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei, + .decoded_size = sizeof(qmi_wlanfw_request_mem_ind_msg_v01_ei), + .fn = ath11k_qmi_msg_mem_request_cb, + }, + { + .type = qmi_indication, + .msg_id = qmi_wlfw_fw_mem_ready_ind_v01, + .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei, + .decoded_size = sizeof(qmi_wlanfw_mem_ready_ind_msg_v01_ei), + .fn = ath11k_qmi_msg_mem_ready_cb, + }, + { + .type = qmi_indication, + .msg_id = qmi_wlfw_fw_ready_ind_v01, + .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei, + .decoded_size = sizeof(qmi_wlanfw_fw_ready_ind_msg_v01_ei), + .fn = ath11k_qmi_msg_fw_ready_cb, + }, + { + .type = qmi_indication, + .msg_id = qmi_wlfw_cold_boot_cal_done_ind_v01, + .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei, + .decoded_size = + sizeof(qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei), + .fn = ath11k_qmi_msg_cold_boot_cal_done_cb, + }, +}; + +static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl, + struct qmi_service *service) +{ + struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); + struct ath11k_base *ab = qmi->ab; + struct sockaddr_qrtr *sq = &qmi->sq; + int ret; + + sq->sq_family = af_qipcrtr; + sq->sq_node = service->node; + sq->sq_port = service->port; + + ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq, + sizeof(*sq), 0); + if (ret) { + ath11k_warn(ab, "qmi failed to connect to remote service %d ", ret); + return ret; + } + + ath11k_dbg(ab, ath11k_dbg_qmi, "qmi wifi fw qmi service connected "); + ath11k_qmi_driver_event_post(qmi, ath11k_qmi_event_server_arrive, null); + + return 0; +} + +static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl, + struct qmi_service *service) +{ + struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle); + struct ath11k_base *ab = qmi->ab; + + ath11k_dbg(ab, ath11k_dbg_qmi, "qmi wifi fw del server "); + ath11k_qmi_driver_event_post(qmi, ath11k_qmi_event_server_exit, null); +} + +static const struct qmi_ops ath11k_qmi_ops = { + .new_server = ath11k_qmi_ops_new_server, + .del_server = ath11k_qmi_ops_del_server, +}; + +static void ath11k_qmi_driver_event_work(struct work_struct *work) +{ + struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi, + event_work); + struct ath11k_qmi_driver_event *event; + struct ath11k_base *ab = qmi->ab; + + spin_lock(&qmi->event_lock); + while (!list_empty(&qmi->event_list)) { + event = list_first_entry(&qmi->event_list, + struct ath11k_qmi_driver_event, list); + list_del(&event->list); + spin_unlock(&qmi->event_lock); + + if (test_bit(ath11k_flag_unregistering, &ab->dev_flags)) + return; + + switch (event->type) { + case ath11k_qmi_event_server_arrive: + ath11k_qmi_event_server_arrive(qmi); + break; + case ath11k_qmi_event_server_exit: + set_bit(ath11k_flag_crash_flush, &ab->dev_flags); + set_bit(ath11k_flag_recovery, &ab->dev_flags); + break; + case ath11k_qmi_event_request_mem: + ath11k_qmi_event_mem_request(qmi); + break; + case ath11k_qmi_event_fw_mem_ready: + ath11k_qmi_event_load_bdf(qmi); + break; + case ath11k_qmi_event_fw_ready: + if (test_bit(ath11k_flag_registered, &ab->dev_flags)) { + queue_work(ab->workqueue, &ab->restart_work); + break; + } + + ath11k_core_qmi_firmware_ready(ab); + ab->qmi.cal_done = 1; + set_bit(ath11k_flag_registered, &ab->dev_flags); + + break; + case ath11k_qmi_event_cold_boot_cal_done: + break; + default: + ath11k_warn(ab, "invalid event type: %d", event->type); + break; + } + kfree(event); + spin_lock(&qmi->event_lock); + } + spin_unlock(&qmi->event_lock); +} + +int ath11k_qmi_init_service(struct ath11k_base *ab) +{ + int ret; + + memset(&ab->qmi.target, 0, sizeof(struct target_info)); + memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk)); + ab->qmi.ab = ab; + + ab->qmi.target_mem_mode = ath11k_qmi_target_mem_mode_default; + ret = qmi_handle_init(&ab->qmi.handle, ath11k_qmi_resp_len_max, + &ath11k_qmi_ops, ath11k_qmi_msg_handlers); + if (ret < 0) { + ath11k_warn(ab, "failed to initialize qmi handle "); + return ret; + } + + ab->qmi.event_wq = alloc_workqueue("ath11k_qmi_driver_event", + wq_unbound, 1); + if (!ab->qmi.event_wq) { + ath11k_err(ab, "failed to allocate workqueue "); + return -efault; + } + + init_list_head(&ab->qmi.event_list); + spin_lock_init(&ab->qmi.event_lock); + init_work(&ab->qmi.event_work, ath11k_qmi_driver_event_work); + + ret = qmi_add_lookup(&ab->qmi.handle, ath11k_qmi_wlfw_service_id_v01, + ath11k_qmi_wlfw_service_vers_v01, + ath11k_qmi_wlfw_service_ins_id_v01); + if (ret < 0) { + ath11k_warn(ab, "failed to add qmi lookup "); + return ret; + } + + return ret; +} + +void ath11k_qmi_deinit_service(struct ath11k_base *ab) +{ + qmi_handle_release(&ab->qmi.handle); + cancel_work_sync(&ab->qmi.event_work); + destroy_workqueue(ab->qmi.event_wq); +} + diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/qmi.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_qmi_h +#define ath11k_qmi_h + +#include <linux/mutex.h> +#include <linux/soc/qcom/qmi.h> + +#define ath11k_host_version_string "win" +#define ath11k_qmi_wlanfw_timeout_ms 5000 +#define ath11k_qmi_max_bdf_file_name_size 64 +#define ath11k_qmi_bdf_address 0x4b0c0000 +#define ath11k_qmi_bdf_max_size (256 * 1024) +#define ath11k_qmi_caldata_offset (128 * 1024) +#define ath11k_qmi_wlanfw_max_build_id_len_v01 128 +#define ath11k_qmi_wlfw_service_id_v01 0x45 +#define ath11k_qmi_wlfw_service_vers_v01 0x01 +#define ath11k_qmi_wlfw_service_ins_id_v01 0x02 +#define ath11k_qmi_wlanfw_max_timestamp_len_v01 32 +#define ath11k_qmi_resp_len_max 8192 +#define ath11k_qmi_wlanfw_max_num_mem_seg_v01 32 +#define ath11k_qmi_caldb_size 0x480000 +#define ath11k_qmi_default_cal_file_name "caldata.bin" + +#define qmi_wlfw_request_mem_ind_v01 0x0035 +#define qmi_wlfw_fw_mem_ready_ind_v01 0x0037 +#define qmi_wlfw_cold_boot_cal_done_ind_v01 0x0021 +#define qmi_wlfw_fw_ready_ind_v01 0x0038 + +#define qmi_wlanfw_max_data_size_v01 6144 +#define ath11k_firmware_mode_off 4 +#define ath11k_qmi_target_mem_mode_default 0 + +struct ath11k_base; + +enum ath11k_qmi_file_type { + ath11k_qmi_file_type_bdf_golden, + ath11k_qmi_file_type_caldata, + ath11k_qmi_max_file_type, +}; + +enum ath11k_qmi_event_type { + ath11k_qmi_event_server_arrive, + ath11k_qmi_event_server_exit, + ath11k_qmi_event_request_mem, + ath11k_qmi_event_fw_mem_ready, + ath11k_qmi_event_fw_ready, + ath11k_qmi_event_cold_boot_cal_start, + ath11k_qmi_event_cold_boot_cal_done, + ath11k_qmi_event_register_driver, + ath11k_qmi_event_unregister_driver, + ath11k_qmi_event_recovery, + ath11k_qmi_event_force_fw_assert, + ath11k_qmi_event_power_up, + ath11k_qmi_event_power_down, + ath11k_qmi_event_max, +}; + +struct ath11k_qmi_driver_event { + struct list_head list; + enum ath11k_qmi_event_type type; + void *data; +}; + +struct ath11k_qmi_ce_cfg { + const u8 *tgt_ce; + int tgt_ce_len; + const u8 *svc_to_ce_map; + int svc_to_ce_map_len; + const u8 *shadow_reg; + int shadow_reg_len; + u8 *shadow_reg_v2; + int shadow_reg_v2_len; +}; + +struct ath11k_qmi_event_msg { + struct list_head list; + enum ath11k_qmi_event_type type; +}; + +struct target_mem_chunk { + u32 size; + u32 type; + dma_addr_t paddr; + u32 vaddr; +}; + +struct target_info { + u32 chip_id; + u32 chip_family; + u32 board_id; + u32 soc_id; + u32 fw_version; + char fw_build_timestamp[ath11k_qmi_wlanfw_max_timestamp_len_v01 + 1]; + char fw_build_id[ath11k_qmi_wlanfw_max_build_id_len_v01 + 1]; +}; + +struct ath11k_qmi { + struct ath11k_base *ab; + struct qmi_handle handle; + struct sockaddr_qrtr sq; + struct work_struct event_work; + struct workqueue_struct *event_wq; + struct list_head event_list; + spinlock_t event_lock; /* spinlock for qmi event list */ + struct ath11k_qmi_ce_cfg ce_cfg; + struct target_mem_chunk target_mem[ath11k_qmi_wlanfw_max_num_mem_seg_v01]; + u32 mem_seg_count; + u32 target_mem_mode; + u8 cal_done; + struct target_info target; +}; + +#define qmi_wlanfw_host_cap_req_msg_v01_max_len 189 +#define qmi_wlanfw_host_cap_req_v01 0x0034 +#define qmi_wlanfw_host_cap_resp_msg_v01_max_len 7 +#define qmi_wlfw_host_cap_resp_v01 0x0034 +#define qmi_wlfw_max_num_gpio_v01 32 +#define qmi_ipq8074_fw_mem_mode 0xff +#define host_ddr_region_type 0x1 +#define bdf_mem_region_type 0x2 +#define caldb_mem_region_type 0x4 + +struct qmi_wlanfw_host_cap_req_msg_v01 { + u8 num_clients_valid; + u32 num_clients; + u8 wake_msi_valid; + u32 wake_msi; + u8 gpios_valid; + u32 gpios_len; + u32 gpios[qmi_wlfw_max_num_gpio_v01]; + u8 nm_modem_valid; + u8 nm_modem; + u8 bdf_support_valid; + u8 bdf_support; + u8 bdf_cache_support_valid; + u8 bdf_cache_support; + u8 m3_support_valid; + u8 m3_support; + u8 m3_cache_support_valid; + u8 m3_cache_support; + u8 cal_filesys_support_valid; + u8 cal_filesys_support; + u8 cal_cache_support_valid; + u8 cal_cache_support; + u8 cal_done_valid; + u8 cal_done; + u8 mem_bucket_valid; + u32 mem_bucket; + u8 mem_cfg_mode_valid; + u8 mem_cfg_mode; +}; + +struct qmi_wlanfw_host_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define qmi_wlanfw_ind_register_req_msg_v01_max_len 54 +#define qmi_wlanfw_ind_register_req_v01 0x0020 +#define qmi_wlanfw_ind_register_resp_msg_v01_max_len 18 +#define qmi_wlanfw_ind_register_resp_v01 0x0020 +#define qmi_wlanfw_client_id 0x4b4e454c + +struct qmi_wlanfw_ind_register_req_msg_v01 { + u8 fw_ready_enable_valid; + u8 fw_ready_enable; + u8 initiate_cal_download_enable_valid; + u8 initiate_cal_download_enable; + u8 initiate_cal_update_enable_valid; + u8 initiate_cal_update_enable; + u8 msa_ready_enable_valid; + u8 msa_ready_enable; + u8 pin_connect_result_enable_valid; + u8 pin_connect_result_enable; + u8 client_id_valid; + u32 client_id; + u8 request_mem_enable_valid; + u8 request_mem_enable; + u8 fw_mem_ready_enable_valid; + u8 fw_mem_ready_enable; + u8 fw_init_done_enable_valid; + u8 fw_init_done_enable; + u8 rejuvenate_enable_valid; + u32 rejuvenate_enable; + u8 xo_cal_enable_valid; + u8 xo_cal_enable; + u8 cal_done_enable_valid; + u8 cal_done_enable; +}; + +struct qmi_wlanfw_ind_register_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 fw_status_valid; + u64 fw_status; +}; + +#define qmi_wlanfw_request_mem_ind_msg_v01_max_len 1124 +#define qmi_wlanfw_respond_mem_req_msg_v01_max_len 548 +#define qmi_wlanfw_respond_mem_resp_msg_v01_max_len 7 +#define qmi_wlanfw_request_mem_ind_v01 0x0035 +#define qmi_wlanfw_respond_mem_req_v01 0x0036 +#define qmi_wlanfw_respond_mem_resp_v01 0x0036 +#define qmi_wlanfw_max_num_mem_cfg_v01 2 + +struct qmi_wlanfw_mem_cfg_s_v01 { + u64 offset; + u32 size; + u8 secure_flag; +}; + +enum qmi_wlanfw_mem_type_enum_v01 { + wlanfw_mem_type_enum_min_val_v01 = int_min, + qmi_wlanfw_mem_type_msa_v01 = 0, + qmi_wlanfw_mem_type_ddr_v01 = 1, + qmi_wlanfw_mem_bdf_v01 = 2, + qmi_wlanfw_mem_m3_v01 = 3, + qmi_wlanfw_mem_cal_v01 = 4, + qmi_wlanfw_mem_dpd_v01 = 5, + wlanfw_mem_type_enum_max_val_v01 = int_max, +}; + +struct qmi_wlanfw_mem_seg_s_v01 { + u32 size; + enum qmi_wlanfw_mem_type_enum_v01 type; + u32 mem_cfg_len; + struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[qmi_wlanfw_max_num_mem_cfg_v01]; +}; + +struct qmi_wlanfw_request_mem_ind_msg_v01 { + u32 mem_seg_len; + struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ath11k_qmi_wlanfw_max_num_mem_seg_v01]; +}; + +struct qmi_wlanfw_mem_seg_resp_s_v01 { + u64 addr; + u32 size; + enum qmi_wlanfw_mem_type_enum_v01 type; + u8 restore; +}; + +struct qmi_wlanfw_respond_mem_req_msg_v01 { + u32 mem_seg_len; + struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ath11k_qmi_wlanfw_max_num_mem_seg_v01]; +}; + +struct qmi_wlanfw_respond_mem_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 { + char placeholder; +}; + +#define qmi_wlanfw_cap_req_msg_v01_max_len 0 +#define qmi_wlanfw_cap_resp_msg_v01_max_len 207 +#define qmi_wlanfw_cap_req_v01 0x0024 +#define qmi_wlanfw_cap_resp_v01 0x0024 + +enum qmi_wlanfw_pipedir_enum_v01 { + qmi_wlfw_pipedir_none_v01 = 0, + qmi_wlfw_pipedir_in_v01 = 1, + qmi_wlfw_pipedir_out_v01 = 2, + qmi_wlfw_pipedir_inout_v01 = 3, +}; + +struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 { + __le32 pipe_num; + __le32 pipe_dir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; +}; + +struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 { + __le32 service_id; + __le32 pipe_dir; + __le32 pipe_num; +}; + +struct qmi_wlanfw_shadow_reg_cfg_s_v01 { + u16 id; + u16 offset; +}; + +struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 { + u32 addr; +}; + +struct qmi_wlanfw_memory_region_info_s_v01 { + u64 region_addr; + u32 size; + u8 secure_flag; +}; + +struct qmi_wlanfw_rf_chip_info_s_v01 { + u32 chip_id; + u32 chip_family; +}; + +struct qmi_wlanfw_rf_board_info_s_v01 { + u32 board_id; +}; + +struct qmi_wlanfw_soc_info_s_v01 { + u32 soc_id; +}; + +struct qmi_wlanfw_fw_version_info_s_v01 { + u32 fw_version; + char fw_build_timestamp[ath11k_qmi_wlanfw_max_timestamp_len_v01 + 1]; +}; + +enum qmi_wlanfw_cal_temp_id_enum_v01 { + qmi_wlanfw_cal_temp_idx_0_v01 = 0, + qmi_wlanfw_cal_temp_idx_1_v01 = 1, + qmi_wlanfw_cal_temp_idx_2_v01 = 2, + qmi_wlanfw_cal_temp_idx_3_v01 = 3, + qmi_wlanfw_cal_temp_idx_4_v01 = 4, + qmi_wlanfw_cal_temp_id_max_v01 = 0xff, +}; + +struct qmi_wlanfw_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 chip_info_valid; + struct qmi_wlanfw_rf_chip_info_s_v01 chip_info; + u8 board_info_valid; + struct qmi_wlanfw_rf_board_info_s_v01 board_info; + u8 soc_info_valid; + struct qmi_wlanfw_soc_info_s_v01 soc_info; + u8 fw_version_info_valid; + struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info; + u8 fw_build_id_valid; + char fw_build_id[ath11k_qmi_wlanfw_max_build_id_len_v01 + 1]; + u8 num_macs_valid; + u8 num_macs; +}; + +struct qmi_wlanfw_cap_req_msg_v01 { + char placeholder; +}; + +#define qmi_wlanfw_bdf_download_req_msg_v01_max_len 6182 +#define qmi_wlanfw_bdf_download_resp_msg_v01_max_len 7 +#define qmi_wlanfw_bdf_download_resp_v01 0x0025 +#define qmi_wlanfw_bdf_download_req_v01 0x0025 +/* todo: need to check with mcl and fw team that data can be pointer and + * can be last element in structure + */ +struct qmi_wlanfw_bdf_download_req_msg_v01 { + u8 valid; + u8 file_id_valid; + enum qmi_wlanfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[qmi_wlanfw_max_data_size_v01]; + u8 end_valid; + u8 end; + u8 bdf_type_valid; + u8 bdf_type; + +}; + +struct qmi_wlanfw_bdf_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define qmi_wlanfw_m3_info_req_msg_v01_max_msg_len 18 +#define qmi_wlanfw_m3_info_resp_msg_v01_max_msg_len 7 +#define qmi_wlanfw_m3_info_resp_v01 0x003c +#define qmi_wlanfw_m3_info_req_v01 0x003c + +struct qmi_wlanfw_m3_info_req_msg_v01 { + u64 addr; + u32 size; +}; + +struct qmi_wlanfw_m3_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define qmi_wlanfw_wlan_mode_req_msg_v01_max_len 11 +#define qmi_wlanfw_wlan_mode_resp_msg_v01_max_len 7 +#define qmi_wlanfw_wlan_cfg_req_msg_v01_max_len 803 +#define qmi_wlanfw_wlan_cfg_resp_msg_v01_max_len 7 +#define qmi_wlanfw_wlan_mode_req_v01 0x0022 +#define qmi_wlanfw_wlan_mode_resp_v01 0x0022 +#define qmi_wlanfw_wlan_cfg_req_v01 0x0023 +#define qmi_wlanfw_wlan_cfg_resp_v01 0x0023 +#define qmi_wlanfw_max_str_len_v01 16 +#define qmi_wlanfw_max_num_ce_v01 12 +#define qmi_wlanfw_max_num_svc_v01 24 +#define qmi_wlanfw_max_num_shadow_reg_v01 24 +#define qmi_wlanfw_max_num_shadow_reg_v2_v01 36 + +struct qmi_wlanfw_wlan_mode_req_msg_v01 { + u32 mode; + u8 hw_debug_valid; + u8 hw_debug; +}; + +struct qmi_wlanfw_wlan_mode_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +struct qmi_wlanfw_wlan_cfg_req_msg_v01 { + u8 host_version_valid; + char host_version[qmi_wlanfw_max_str_len_v01 + 1]; + u8 tgt_cfg_valid; + u32 tgt_cfg_len; + struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 + tgt_cfg[qmi_wlanfw_max_num_ce_v01]; + u8 svc_cfg_valid; + u32 svc_cfg_len; + struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 + svc_cfg[qmi_wlanfw_max_num_svc_v01]; + u8 shadow_reg_valid; + u32 shadow_reg_len; + struct qmi_wlanfw_shadow_reg_cfg_s_v01 + shadow_reg[qmi_wlanfw_max_num_shadow_reg_v01]; + u8 shadow_reg_v2_valid; + u32 shadow_reg_v2_len; + struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 + shadow_reg_v2[qmi_wlanfw_max_num_shadow_reg_v2_v01]; +}; + +struct qmi_wlanfw_wlan_cfg_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +int ath11k_qmi_firmware_start(struct ath11k_base *ab, + u32 mode); +void ath11k_qmi_firmware_stop(struct ath11k_base *ab); +void ath11k_qmi_event_work(struct work_struct *work); +void ath11k_qmi_msg_recv_work(struct work_struct *work); +void ath11k_qmi_deinit_service(struct ath11k_base *ab); +int ath11k_qmi_init_service(struct ath11k_base *ab); + +#endif diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/reg.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ +#include "core.h" +#include "debug.h" + +/* world regdom to be used in case default regd from fw is unavailable */ +#define ath11k_2ghz_ch01_11 reg_rule(2412 - 10, 2462 + 10, 40, 0, 20, 0) +#define ath11k_5ghz_5150_5350 reg_rule(5150 - 10, 5350 + 10, 80, 0, 30,\ + nl80211_rrf_no_ir) +#define ath11k_5ghz_5725_5850 reg_rule(5725 - 10, 5850 + 10, 80, 0, 30,\ + nl80211_rrf_no_ir) + +#define etsi_weather_radar_band_low 5590 +#define etsi_weather_radar_band_high 5650 +#define etsi_weather_radar_band_cac_timeout 600000 + +static const struct ieee80211_regdomain ath11k_world_regd = { + .n_reg_rules = 3, + .alpha2 = "00", + .reg_rules = { + ath11k_2ghz_ch01_11, + ath11k_5ghz_5150_5350, + ath11k_5ghz_5725_5850, + } +}; + +static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2) +{ + const struct ieee80211_regdomain *regd; + + regd = rcu_dereference_rtnl(ar->hw->wiphy->regd); + /* this can happen during wiphy registration where the previous + * user request is received before we update the regd received + * from firmware. + */ + if (!regd) + return true; + + return memcmp(regd->alpha2, alpha2, 2) != 0; +} + +static void +ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct wmi_init_country_params init_country_param; + struct ath11k *ar = hw->priv; + int ret; + + ath11k_dbg(ar->ab, ath11k_dbg_reg, + "regulatory notification received for %s ", wiphy_name(wiphy)); + + /* currently supporting only general user hints. cell base user + * hints to be handled later. + * hints from other sources like core, beacons are not expected for + * self managed wiphy's + */ + if (!(request->initiator == nl80211_regdom_set_by_user && + request->user_reg_hint_type == nl80211_user_reg_hint_user)) { + ath11k_warn(ar->ab, "unexpected regulatory event for this wiphy "); + return; + } + + if (!is_enabled(config_ath_reg_dynamic_user_reg_hints)) { + ath11k_dbg(ar->ab, ath11k_dbg_reg, + "country setting is not allowed "); + return; + } + + if (!ath11k_regdom_changes(ar, request->alpha2)) { + ath11k_dbg(ar->ab, ath11k_dbg_reg, "country is already set "); + return; + } + + /* set the country code to the firmware and wait for + * the wmi_reg_chan_list_cc event for updating the + * reg info + */ + init_country_param.flags = alpha_is_set; + memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2); + + ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param); + if (ret) + ath11k_warn(ar->ab, + "init country code set to fw failed : %d ", ret); +} + +int ath11k_reg_update_chan_list(struct ath11k *ar) +{ + struct ieee80211_supported_band **bands; + struct scan_chan_list_params *params; + struct ieee80211_channel *channel; + struct ieee80211_hw *hw = ar->hw; + struct channel_param *ch; + enum nl80211_band band; + int num_channels = 0; + int params_len; + int i, ret; + + bands = hw->wiphy->bands; + for (band = 0; band < num_nl80211_bands; band++) { + if (!bands[band]) + continue; + + for (i = 0; i < bands[band]->n_channels; i++) { + if (bands[band]->channels[i].flags & + ieee80211_chan_disabled) + continue; + + num_channels++; + } + } + + if (warn_on(!num_channels)) + return -einval; + + params_len = sizeof(struct scan_chan_list_params) + + num_channels * sizeof(struct channel_param); + params = kzalloc(params_len, gfp_kernel); + + if (!params) + return -enomem; + + params->pdev_id = ar->pdev->pdev_id; + params->nallchans = num_channels; + + ch = params->ch_param; + + for (band = 0; band < num_nl80211_bands; band++) { + if (!bands[band]) + continue; + + for (i = 0; i < bands[band]->n_channels; i++) { + channel = &bands[band]->channels[i]; + + if (channel->flags & ieee80211_chan_disabled) + continue; + + /* todo: set to true/false based on some condition? */ + ch->allow_ht = true; + ch->allow_vht = true; + + ch->dfs_set = + !!(channel->flags & ieee80211_chan_radar); + ch->is_chan_passive = !!(channel->flags & + ieee80211_chan_no_ir); + ch->is_chan_passive |= ch->dfs_set; + ch->mhz = channel->center_freq; + ch->cfreq1 = channel->center_freq; + ch->minpower = 0; + ch->maxpower = channel->max_power * 2; + ch->maxregpower = channel->max_reg_power * 2; + ch->antennamax = channel->max_antenna_gain * 2; + + /* todo: use appropriate phymodes */ + if (channel->band == nl80211_band_2ghz) + ch->phy_mode = mode_11g; + else + ch->phy_mode = mode_11a; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d ", + i, params->nallchans, + ch->mhz, ch->maxpower, ch->maxregpower, + ch->antennamax, ch->phy_mode); + + ch++; + /* todo: use quarrter/half rate, cfreq12, dfs_cfreq2 + * set_agile, reg_class_idx + */ + } + } + + ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params); + kfree(params); + + return ret; +} + +static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig, + struct ieee80211_regdomain *regd_copy) +{ + u8 i; + + /* the caller should have checked error conditions */ + memcpy(regd_copy, regd_orig, sizeof(*regd_orig)); + + for (i = 0; i < regd_orig->n_reg_rules; i++) + memcpy(®d_copy->reg_rules[i], ®d_orig->reg_rules[i], + sizeof(struct ieee80211_reg_rule)); +} + +int ath11k_regd_update(struct ath11k *ar, bool init) +{ + struct ieee80211_regdomain *regd, *regd_copy = null; + int ret, regd_len, pdev_id; + struct ath11k_base *ab; + + ab = ar->ab; + pdev_id = ar->pdev_idx; + + spin_lock(&ab->base_lock); + + if (init) { + /* apply the regd received during init through + * wmi_reg_chan_list_cc event. in case of failure to + * receive the regd, initialize with a default world + * regulatory. + */ + if (ab->default_regd[pdev_id]) { + regd = ab->default_regd[pdev_id]; + } else { + ath11k_warn(ab, + "failed to receive default regd during init "); + regd = (struct ieee80211_regdomain *)&ath11k_world_regd; + } + } else { + regd = ab->new_regd[pdev_id]; + } + + if (!regd) { + ret = -einval; + spin_unlock(&ab->base_lock); + goto err; + } + + regd_len = sizeof(*regd) + (regd->n_reg_rules * + sizeof(struct ieee80211_reg_rule)); + + regd_copy = kzalloc(regd_len, gfp_atomic); + if (regd_copy) + ath11k_copy_regd(regd, regd_copy); + + spin_unlock(&ab->base_lock); + + if (!regd_copy) { + ret = -enomem; + goto err; + } + + rtnl_lock(); + ret = regulatory_set_wiphy_regd_sync_rtnl(ar->hw->wiphy, regd_copy); + rtnl_unlock(); + + kfree(regd_copy); + + if (ret) + goto err; + + if (ar->state == ath11k_state_on) { + ret = ath11k_reg_update_chan_list(ar); + if (ret) + goto err; + } + + return 0; +err: + ath11k_warn(ab, "failed to perform regd update : %d ", ret); + return ret; +} + +static enum nl80211_dfs_regions +ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region) +{ + switch (dfs_region) { + case ath11k_dfs_reg_fcc: + case ath11k_dfs_reg_cn: + return nl80211_dfs_fcc; + case ath11k_dfs_reg_etsi: + case ath11k_dfs_reg_kr: + return nl80211_dfs_etsi; + case ath11k_dfs_reg_mkk: + return nl80211_dfs_jp; + default: + return nl80211_dfs_unset; + } +} + +static u32 ath11k_map_fw_reg_flags(u16 reg_flags) +{ + u32 flags = 0; + + if (reg_flags & regulatory_chan_no_ir) + flags = nl80211_rrf_no_ir; + + if (reg_flags & regulatory_chan_radar) + flags |= nl80211_rrf_dfs; + + if (reg_flags & regulatory_chan_no_ofdm) + flags |= nl80211_rrf_no_ofdm; + + if (reg_flags & regulatory_chan_indoor_only) + flags |= nl80211_rrf_no_outdoor; + + if (reg_flags & regulatory_chan_no_ht40) + flags |= nl80211_rrf_no_ht40; + + if (reg_flags & regulatory_chan_no_80mhz) + flags |= nl80211_rrf_no_80mhz; + + if (reg_flags & regulatory_chan_no_160mhz) + flags |= nl80211_rrf_no_160mhz; + + return flags; +} + +static bool +ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1, + struct ieee80211_reg_rule *rule2) +{ + u32 start_freq1, end_freq1; + u32 start_freq2, end_freq2; + + start_freq1 = rule1->freq_range.start_freq_khz; + start_freq2 = rule2->freq_range.start_freq_khz; + + end_freq1 = rule1->freq_range.end_freq_khz; + end_freq2 = rule2->freq_range.end_freq_khz; + + if ((start_freq1 >= start_freq2 && + start_freq1 < end_freq2) || + (start_freq2 > start_freq1 && + start_freq2 < end_freq1)) + return true; + + /* todo: should we restrict intersection feasibility + * based on min bandwidth of the intersected region also, + * say the intersected rule should have a min bandwidth + * of 20mhz? + */ + + return false; +} + +static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1, + struct ieee80211_reg_rule *rule2, + struct ieee80211_reg_rule *new_rule) +{ + u32 start_freq1, end_freq1; + u32 start_freq2, end_freq2; + u32 freq_diff, max_bw; + + start_freq1 = rule1->freq_range.start_freq_khz; + start_freq2 = rule2->freq_range.start_freq_khz; + + end_freq1 = rule1->freq_range.end_freq_khz; + end_freq2 = rule2->freq_range.end_freq_khz; + + new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1, + start_freq2); + new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2); + + freq_diff = new_rule->freq_range.end_freq_khz - + new_rule->freq_range.start_freq_khz; + max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz, + rule2->freq_range.max_bandwidth_khz); + new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff); + + new_rule->power_rule.max_antenna_gain = + min_t(u32, rule1->power_rule.max_antenna_gain, + rule2->power_rule.max_antenna_gain); + + new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp, + rule2->power_rule.max_eirp); + + /* use the flags of both the rules */ + new_rule->flags = rule1->flags | rule2->flags; + + /* to be safe, lts use the max cac timeout of both rules */ + new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms, + rule2->dfs_cac_ms); +} + +static struct ieee80211_regdomain * +ath11k_regd_intersect(struct ieee80211_regdomain *default_regd, + struct ieee80211_regdomain *curr_regd) +{ + u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules; + struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule; + struct ieee80211_regdomain *new_regd = null; + u8 i, j, k; + + num_old_regd_rules = default_regd->n_reg_rules; + num_curr_regd_rules = curr_regd->n_reg_rules; + num_new_regd_rules = 0; + + /* find the number of intersecting rules to allocate new regd memory */ + for (i = 0; i < num_old_regd_rules; i++) { + old_rule = default_regd->reg_rules + i; + for (j = 0; j < num_curr_regd_rules; j++) { + curr_rule = curr_regd->reg_rules + j; + + if (ath11k_reg_can_intersect(old_rule, curr_rule)) + num_new_regd_rules++; + } + } + + if (!num_new_regd_rules) + return null; + + new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules * + sizeof(struct ieee80211_reg_rule)), + gfp_atomic); + + if (!new_regd) + return null; + + /* we set the new country and dfs region directly and only trim + * the freq, power, antenna gain by intersecting with the + * default regdomain. also max of the dfs cac timeout is selected. + */ + new_regd->n_reg_rules = num_new_regd_rules; + memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2)); + new_regd->dfs_region = curr_regd->dfs_region; + new_rule = new_regd->reg_rules; + + for (i = 0, k = 0; i < num_old_regd_rules; i++) { + old_rule = default_regd->reg_rules + i; + for (j = 0; j < num_curr_regd_rules; j++) { + curr_rule = curr_regd->reg_rules + j; + + if (ath11k_reg_can_intersect(old_rule, curr_rule)) + ath11k_reg_intersect_rules(old_rule, curr_rule, + (new_rule + k++)); + } + } + return new_regd; +} + +static const char * +ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region) +{ + switch (dfs_region) { + case nl80211_dfs_fcc: + return "fcc"; + case nl80211_dfs_etsi: + return "etsi"; + case nl80211_dfs_jp: + return "jp"; + default: + return "unset"; + } +} + +static u16 +ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw) +{ + u16 bw; + + bw = end_freq - start_freq; + bw = min_t(u16, bw, max_bw); + + if (bw >= 80 && bw < 160) + bw = 80; + else if (bw >= 40 && bw < 80) + bw = 40; + else if (bw < 40) + bw = 20; + + return bw; +} + +static void +ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq, + u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr, + u32 reg_flags) +{ + reg_rule->freq_range.start_freq_khz = mhz_to_khz(start_freq); + reg_rule->freq_range.end_freq_khz = mhz_to_khz(end_freq); + reg_rule->freq_range.max_bandwidth_khz = mhz_to_khz(bw); + reg_rule->power_rule.max_antenna_gain = dbi_to_mbi(ant_gain); + reg_rule->power_rule.max_eirp = dbm_to_mbm(reg_pwr); + reg_rule->flags = reg_flags; +} + +static void +ath11k_reg_update_weather_radar_band(struct ath11k_base *ab, + struct ieee80211_regdomain *regd, + struct cur_reg_rule *reg_rule, + u8 *rule_idx, u32 flags, u16 max_bw) +{ + u32 end_freq; + u16 bw; + u8 i; + + i = *rule_idx; + + bw = ath11k_reg_adjust_bw(reg_rule->start_freq, + etsi_weather_radar_band_low, max_bw); + + ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq, + etsi_weather_radar_band_low, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); + + ath11k_dbg(ab, ath11k_dbg_reg, + " %d. (%d - %d @ %d) (%d, %d) (%d ms) (flags %d) ", + i + 1, reg_rule->start_freq, etsi_weather_radar_band_low, + bw, reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, + flags); + + if (reg_rule->end_freq > etsi_weather_radar_band_high) + end_freq = etsi_weather_radar_band_high; + else + end_freq = reg_rule->end_freq; + + bw = ath11k_reg_adjust_bw(etsi_weather_radar_band_low, end_freq, + max_bw); + + i++; + + ath11k_reg_update_rule(regd->reg_rules + i, + etsi_weather_radar_band_low, end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); + + regd->reg_rules[i].dfs_cac_ms = etsi_weather_radar_band_cac_timeout; + + ath11k_dbg(ab, ath11k_dbg_reg, + " %d. (%d - %d @ %d) (%d, %d) (%d ms) (flags %d) ", + i + 1, etsi_weather_radar_band_low, end_freq, + bw, reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, + flags); + + if (end_freq == reg_rule->end_freq) { + regd->n_reg_rules--; + *rule_idx = i; + return; + } + + bw = ath11k_reg_adjust_bw(etsi_weather_radar_band_high, + reg_rule->end_freq, max_bw); + + i++; + + ath11k_reg_update_rule(regd->reg_rules + i, etsi_weather_radar_band_high, + reg_rule->end_freq, bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); + + ath11k_dbg(ab, ath11k_dbg_reg, + " %d. (%d - %d @ %d) (%d, %d) (%d ms) (flags %d) ", + i + 1, etsi_weather_radar_band_high, reg_rule->end_freq, + bw, reg_rule->ant_gain, reg_rule->reg_power, + regd->reg_rules[i].dfs_cac_ms, + flags); + + *rule_idx = i; +} + +struct ieee80211_regdomain * +ath11k_reg_build_regd(struct ath11k_base *ab, + struct cur_regulatory_info *reg_info, bool intersect) +{ + struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = null; + struct cur_reg_rule *reg_rule; + u8 i = 0, j = 0; + u8 num_rules; + u16 max_bw; + u32 flags; + char alpha2[3]; + + num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules; + + if (!num_rules) + goto ret; + + /* add max additional rules to accommodate weather radar band */ + if (reg_info->dfs_region == ath11k_dfs_reg_etsi) + num_rules += 2; + + tmp_regd = kzalloc(sizeof(*tmp_regd) + + (num_rules * sizeof(struct ieee80211_reg_rule)), + gfp_atomic); + if (!tmp_regd) + goto ret; + + tmp_regd->n_reg_rules = num_rules; + memcpy(tmp_regd->alpha2, reg_info->alpha2, reg_alpha2_len + 1); + memcpy(alpha2, reg_info->alpha2, reg_alpha2_len + 1); + alpha2[2] = ''; + tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region); + + ath11k_dbg(ab, ath11k_dbg_reg, + " country %s, cfg regdomain %s fw regdomain %d, num_reg_rules %d ", + alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region), + reg_info->dfs_region, num_rules); + /* update reg_rules[] below. firmware is expected to + * send these rules in order(2g rules first and then 5g) + */ + for (; i < tmp_regd->n_reg_rules; i++) { + if (reg_info->num_2g_reg_rules && + (i < reg_info->num_2g_reg_rules)) { + reg_rule = reg_info->reg_rules_2g_ptr + i; + max_bw = min_t(u16, reg_rule->max_bw, + reg_info->max_bw_2g); + flags = 0; + } else if (reg_info->num_5g_reg_rules && + (j < reg_info->num_5g_reg_rules)) { + reg_rule = reg_info->reg_rules_5g_ptr + j++; + max_bw = min_t(u16, reg_rule->max_bw, + reg_info->max_bw_5g); + + /* fw doesn't pass nl80211_rrf_auto_bw flag for + * bw auto correction, we can enable this by default + * for all 5g rules here. the regulatory core performs + * bw correction if required and applies flags as + * per other bw rule flags we pass from here + */ + flags = nl80211_rrf_auto_bw; + } else { + break; + } + + flags |= ath11k_map_fw_reg_flags(reg_rule->flags); + + ath11k_reg_update_rule(tmp_regd->reg_rules + i, + reg_rule->start_freq, + reg_rule->end_freq, max_bw, + reg_rule->ant_gain, reg_rule->reg_power, + flags); + + /* update dfs cac timeout if the dfs domain is etsi and the + * new rule covers weather radar band. + * default value of '0' corresponds to 60s timeout, so no + * need to update that for other rules. + */ + if (flags & nl80211_rrf_dfs && + reg_info->dfs_region == ath11k_dfs_reg_etsi && + (reg_rule->end_freq > etsi_weather_radar_band_low && + reg_rule->start_freq < etsi_weather_radar_band_high)){ + ath11k_reg_update_weather_radar_band(ab, tmp_regd, + reg_rule, &i, + flags, max_bw); + continue; + } + + ath11k_dbg(ab, ath11k_dbg_reg, + " %d. (%d - %d @ %d) (%d, %d) (%d ms) (flags %d) ", + i + 1, reg_rule->start_freq, reg_rule->end_freq, + max_bw, reg_rule->ant_gain, reg_rule->reg_power, + tmp_regd->reg_rules[i].dfs_cac_ms, + flags); + } + + if (intersect) { + default_regd = ab->default_regd[reg_info->phy_id]; + + /* get a new regd by intersecting the received regd with + * our default regd. + */ + new_regd = ath11k_regd_intersect(default_regd, tmp_regd); + kfree(tmp_regd); + if (!new_regd) { + ath11k_warn(ab, "unable to create intersected regdomain "); + goto ret; + } + } else { + new_regd = tmp_regd; + } + +ret: + return new_regd; +} + +void ath11k_regd_update_work(struct work_struct *work) +{ + struct ath11k *ar = container_of(work, struct ath11k, + regd_update_work); + int ret; + + ret = ath11k_regd_update(ar, false); + if (ret) { + /* firmware has already moved to the new regd. we need + * to maintain channel consistency across fw, host driver + * and userspace. hence as a fallback mechanism we can set + * the prev or default country code to the firmware. + */ + /* todo: implement fallback mechanism */ + } +} + +void ath11k_reg_init(struct ath11k *ar) +{ + ar->hw->wiphy->regulatory_flags = regulatory_wiphy_self_managed; + ar->hw->wiphy->reg_notifier = ath11k_reg_notifier; +} + +void ath11k_reg_free(struct ath11k_base *ab) +{ + int i; + + for (i = 0; i < max_radios; i++) { + kfree(ab->default_regd[i]); + kfree(ab->new_regd[i]); + } +} diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/reg.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_reg_h +#define ath11k_reg_h + +#include <linux/kernel.h> +#include <net/regulatory.h> + +struct ath11k_base; +struct ath11k; + +/* dfs regdomains supported by firmware */ +enum ath11k_dfs_region { + ath11k_dfs_reg_unset, + ath11k_dfs_reg_fcc, + ath11k_dfs_reg_etsi, + ath11k_dfs_reg_mkk, + ath11k_dfs_reg_cn, + ath11k_dfs_reg_kr, + ath11k_dfs_reg_undef, +}; + +/* ath11k regulatory api's */ +void ath11k_reg_init(struct ath11k *ar); +void ath11k_reg_free(struct ath11k_base *ab); +void ath11k_regd_update_work(struct work_struct *work); +struct ieee80211_regdomain * +ath11k_reg_build_regd(struct ath11k_base *ab, + struct cur_regulatory_info *reg_info, bool intersect); +int ath11k_regd_update(struct ath11k *ar, bool init); +int ath11k_reg_update_chan_list(struct ath11k *ar); +#endif diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/rx_desc.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ +#ifndef ath11k_rx_desc_h +#define ath11k_rx_desc_h + +enum rx_desc_rxpcu_filter { + rx_desc_rxpcu_filter_pass, + rx_desc_rxpcu_filter_monitor_client, + rx_desc_rxpcu_filter_monitor_other, +}; + +/* rxpcu_filter_pass + * this mpdu passed the normal frame filter programming of rxpcu. + * + * rxpcu_filter_monitor_client + * this mpdu did not pass the regular frame filter and would + * have been dropped, were it not for the frame fitting into the + * 'monitor_client' category. + * + * rxpcu_filter_monitor_other + * this mpdu did not pass the regular frame filter and also did + * not pass the rxpcu_monitor_client filter. it would have been + * dropped accept that it did pass the 'monitor_other' category. + */ + +#define rx_desc_info0_rxpcu_mpdu_fitler genmask(1, 0) +#define rx_desc_info0_sw_frame_grp_id genmask(8, 2) + +enum rx_desc_sw_frame_grp_id { + rx_desc_sw_frame_grp_id_ndp_frame, + rx_desc_sw_frame_grp_id_mcast_data, + rx_desc_sw_frame_grp_id_ucast_data, + rx_desc_sw_frame_grp_id_null_data, + rx_desc_sw_frame_grp_id_mgmt_0000, + rx_desc_sw_frame_grp_id_mgmt_0001, + rx_desc_sw_frame_grp_id_mgmt_0010, + rx_desc_sw_frame_grp_id_mgmt_0011, + rx_desc_sw_frame_grp_id_mgmt_0100, + rx_desc_sw_frame_grp_id_mgmt_0101, + rx_desc_sw_frame_grp_id_mgmt_0110, + rx_desc_sw_frame_grp_id_mgmt_0111, + rx_desc_sw_frame_grp_id_mgmt_1000, + rx_desc_sw_frame_grp_id_mgmt_1001, + rx_desc_sw_frame_grp_id_mgmt_1010, + rx_desc_sw_frame_grp_id_mgmt_1011, + rx_desc_sw_frame_grp_id_mgmt_1100, + rx_desc_sw_frame_grp_id_mgmt_1101, + rx_desc_sw_frame_grp_id_mgmt_1110, + rx_desc_sw_frame_grp_id_mgmt_1111, + rx_desc_sw_frame_grp_id_ctrl_0000, + rx_desc_sw_frame_grp_id_ctrl_0001, + rx_desc_sw_frame_grp_id_ctrl_0010, + rx_desc_sw_frame_grp_id_ctrl_0011, + rx_desc_sw_frame_grp_id_ctrl_0100, + rx_desc_sw_frame_grp_id_ctrl_0101, + rx_desc_sw_frame_grp_id_ctrl_0110, + rx_desc_sw_frame_grp_id_ctrl_0111, + rx_desc_sw_frame_grp_id_ctrl_1000, + rx_desc_sw_frame_grp_id_ctrl_1001, + rx_desc_sw_frame_grp_id_ctrl_1010, + rx_desc_sw_frame_grp_id_ctrl_1011, + rx_desc_sw_frame_grp_id_ctrl_1100, + rx_desc_sw_frame_grp_id_ctrl_1101, + rx_desc_sw_frame_grp_id_ctrl_1110, + rx_desc_sw_frame_grp_id_ctrl_1111, + rx_desc_sw_frame_grp_id_unsupported, + rx_desc_sw_frame_grp_id_phy_err, +}; + +enum rx_desc_decap_type { + rx_desc_decap_type_raw, + rx_desc_decap_type_native_wifi, + rx_desc_decap_type_ethernet2_dix, + rx_desc_decap_type_8023, +}; + +enum rx_desc_decrypt_status_code { + rx_desc_decrypt_status_code_ok, + rx_desc_decrypt_status_code_unprotected_frame, + rx_desc_decrypt_status_code_data_err, + rx_desc_decrypt_status_code_key_invalid, + rx_desc_decrypt_status_code_peer_entry_invalid, + rx_desc_decrypt_status_code_other, +}; + +#define rx_attention_info1_first_mpdu bit(0) +#define rx_attention_info1_rsvd_1a bit(1) +#define rx_attention_info1_mcast_bcast bit(2) +#define rx_attention_info1_ast_idx_not_found bit(3) +#define rx_attention_info1_ast_idx_timedout bit(4) +#define rx_attention_info1_power_mgmt bit(5) +#define rx_attention_info1_non_qos bit(6) +#define rx_attention_info1_null_data bit(7) +#define rx_attention_info1_mgmt_type bit(8) +#define rx_attention_info1_ctrl_type bit(9) +#define rx_attention_info1_more_data bit(10) +#define rx_attention_info1_eosp bit(11) +#define rx_attention_info1_a_msdu_error bit(12) +#define rx_attention_info1_fragment bit(13) +#define rx_attention_info1_order bit(14) +#define rx_attention_info1_cce_match bit(15) +#define rx_attention_info1_overflow_err bit(16) +#define rx_attention_info1_msdu_len_err bit(17) +#define rx_attention_info1_tcp_udp_cksum_fail bit(18) +#define rx_attention_info1_ip_cksum_fail bit(19) +#define rx_attention_info1_sa_idx_invalid bit(20) +#define rx_attention_info1_da_idx_invalid bit(21) +#define rx_attention_info1_rsvd_1b bit(22) +#define rx_attention_info1_rx_in_tx_decrypt_byp bit(23) +#define rx_attention_info1_encrypt_required bit(24) +#define rx_attention_info1_directed bit(25) +#define rx_attention_info1_buffer_fragment bit(26) +#define rx_attention_info1_mpdu_len_err bit(27) +#define rx_attention_info1_tkip_mic_err bit(28) +#define rx_attention_info1_decrypt_err bit(29) +#define rx_attention_info1_undecrypt_frame_err bit(30) +#define rx_attention_info1_fcs_err bit(31) + +#define rx_attention_info2_flow_idx_timeout bit(0) +#define rx_attention_info2_flow_idx_invalid bit(1) +#define rx_attention_info2_wifi_parser_err bit(2) +#define rx_attention_info2_amsdu_parser_err bit(3) +#define rx_attention_info2_sa_idx_timeout bit(4) +#define rx_attention_info2_da_idx_timeout bit(5) +#define rx_attention_info2_msdu_limit_err bit(6) +#define rx_attention_info2_da_is_valid bit(7) +#define rx_attention_info2_da_is_mcbc bit(8) +#define rx_attention_info2_sa_is_valid bit(9) +#define rx_attention_info2_dcrypt_status_code genmask(12, 10) +#define rx_attention_info2_rx_bitmap_not_upded bit(13) +#define rx_attention_info2_msdu_done bit(31) + +struct rx_attention { + __le16 info0; + __le16 phy_ppdu_id; + __le32 info1; + __le32 info2; +} __packed; + +/* rx_attention + * + * rxpcu_mpdu_filter_in_category + * field indicates what the reason was that this mpdu frame + * was allowed to come into the receive path by rxpcu. values + * are defined in enum %rx_desc_rxpcu_filter_*. + * + * sw_frame_group_id + * sw processes frames based on certain classifications. values + * are defined in enum %rx_desc_sw_frame_grp_id_*. + * + * phy_ppdu_id + * a ppdu counter value that phy increments for every ppdu + * received. the counter value wraps around. + * + * first_mpdu + * indicates the first msdu of the ppdu. if both first_mpdu + * and last_mpdu are set in the msdu then this is a not an + * a-mpdu frame but a stand alone mpdu. interior mpdu in an + * a-mpdu shall have both first_mpdu and last_mpdu bits set to + * 0. the ppdu start status will only be valid when this bit + * is set. + * + * mcast_bcast + * multicast / broadcast indicator. only set when the mac + * address 1 bit 0 is set indicating mcast/bcast and the bssid + * matches one of the 4 bssid registers. only set when + * first_msdu is set. + * + * ast_index_not_found + * only valid when first_msdu is set. indicates no ast matching + * entries within the the max search count. + * + * ast_index_timeout + * only valid when first_msdu is set. indicates an unsuccessful + * search in the address search table due to timeout. + * + * power_mgmt + * power management bit set in the 802.11 header. only set + * when first_msdu is set. + * + * non_qos + * set if packet is not a non-qos data frame. only set when + * first_msdu is set. + * + * null_data + * set if frame type indicates either null data or qos null + * data format. only set when first_msdu is set. + * + * mgmt_type + * set if packet is a management packet. only set when + * first_msdu is set. + * + * ctrl_type + * set if packet is a control packet. only set when first_msdu + * is set. + * + * more_data + * set if more bit in frame control is set. only set when + * first_msdu is set. + * + * eosp + * set if the eosp (end of service period) bit in the qos + * control field is set. only set when first_msdu is set. + * + * a_msdu_error + * set if number of msdus in a-msdu is above a threshold or if the + * size of the msdu is invalid. this receive buffer will contain + * all of the remainder of msdus in this mpdu w/o decapsulation. + * + * fragment + * indicates that this is an 802.11 fragment frame. this is + * set when either the more_frag bit is set in the frame + * control or the fragment number is not zero. only set when + * first_msdu is set. + * + * order + * set if the order bit in the frame control is set. only set + * when first_msdu is set. + * + * cce_match + * indicates that this status has a corresponding msdu that + * requires fw processing. the ole will have classification + * ring mask registers which will indicate the ring(s) for + * packets and descriptors which need fw attention. + * + * overflow_err + * pcu receive fifo does not have enough space to store the + * full receive packet. enough space is reserved in the + * receive fifo for the status is written. this mpdu remaining + * packets in the ppdu will be filtered and no ack response + * will be transmitted. + * + * msdu_length_err + * indicates that the msdu length from the 802.3 encapsulated + * length field extends beyond the mpdu boundary. + * + * tcp_udp_chksum_fail + * indicates that the computed checksum (tcp_udp_chksum) did + * not match the checksum in the tcp/udp header. + * + * ip_chksum_fail + * indicates that the computed checksum did not match the + * checksum in the ip header. + * + * sa_idx_invalid + * indicates no matching entry was found in the address search + * table for the source mac address. + * + * da_idx_invalid + * indicates no matching entry was found in the address search + * table for the destination mac address. + * + * rx_in_tx_decrypt_byp + * indicates that rx packet is not decrypted as crypto is busy + * with tx packet processing. + * + * encrypt_required + * indicates that this data type frame is not encrypted even if + * the policy for this mpdu requires encryption as indicated in + * the peer table key type. + * + * directed + * mpdu is a directed packet which means that the ra matched + * our sta addresses. in proxysta it means that the ta matched + * an entry in our address search table with the corresponding + * 'no_ack' bit is the address search entry cleared. + * + * buffer_fragment + * indicates that at least one of the rx buffers has been + * fragmented. if set the fw should look at the rx_frag_info + * descriptor described below. + * + * mpdu_length_err + * indicates that the mpdu was pre-maturely terminated + * resulting in a truncated mpdu. don't trust the mpdu length + * field. + * + * tkip_mic_err + * indicates that the mpdu michael integrity check failed + * + * decrypt_err + * indicates that the mpdu decrypt integrity check failed + * + * fcs_err + * indicates that the mpdu fcs check failed + * + * flow_idx_timeout + * indicates an unsuccessful flow search due to the expiring of + * the search timer. + * + * flow_idx_invalid + * flow id is not valid. + * + * amsdu_parser_error + * a-msdu could not be properly de-agregated. + * + * sa_idx_timeout + * indicates an unsuccessful search for the source mac address + * due to the expiring of the search timer. + * + * da_idx_timeout + * indicates an unsuccessful search for the destination mac + * address due to the expiring of the search timer. + * + * msdu_limit_error + * indicates that the msdu threshold was exceeded and thus + * all the rest of the msdus will not be scattered and will not + * be decasulated but will be dma'ed in raw format as a single + * msdu buffer. + * + * da_is_valid + * indicates that ole found a valid da entry. + * + * da_is_mcbc + * field only valid if da_is_valid is set. indicates the da address + * was a multicast or broadcast address. + * + * sa_is_valid + * indicates that ole found a valid sa entry. + * + * decrypt_status_code + * field provides insight into the decryption performed. values are + * defined in enum %rx_desc_decrypt_status_code*. + * + * rx_bitmap_not_updated + * frame is received, but rxpcu could not update the receive bitmap + * due to (temporary) fifo constraints. + * + * msdu_done + * if set indicates that the rx packet data, rx header data, rx + * ppdu start descriptor, rx mpdu start/end descriptor, rx msdu + * start/end descriptors and rx attention descriptor are all + * valid. this bit must be in the last octet of the + * descriptor. + */ + +#define rx_mpdu_start_info0_ndp_frame bit(9) +#define rx_mpdu_start_info0_phy_err bit(10) +#define rx_mpdu_start_info0_phy_err_mpdu_hdr bit(11) +#define rx_mpdu_start_info0_proto_ver_err bit(12) +#define rx_mpdu_start_info0_ast_lookup_valid bit(13) + +#define rx_mpdu_start_info1_mpdu_ctrl_valid bit(0) +#define rx_mpdu_start_info1_mpdu_dur_valid bit(1) +#define rx_mpdu_start_info1_mac_addr1_valid bit(2) +#define rx_mpdu_start_info1_mac_addr2_valid bit(3) +#define rx_mpdu_start_info1_mac_addr3_valid bit(4) +#define rx_mpdu_start_info1_mac_addr4_valid bit(5) +#define rx_mpdu_start_info1_mpdu_seq_ctrl_valid bit(6) +#define rx_mpdu_start_info1_mpdu_qos_ctrl_valid bit(7) +#define rx_mpdu_start_info1_mpdu_ht_ctrl_valid bit(8) +#define rx_mpdu_start_info1_encrypt_info_valid bit(9) +#define rx_mpdu_start_info1_mpdu_frag_number genmask(13, 10) +#define rx_mpdu_start_info1_more_frag_flag bit(14) +#define rx_mpdu_start_info1_from_ds bit(16) +#define rx_mpdu_start_info1_to_ds bit(17) +#define rx_mpdu_start_info1_encrypted bit(18) +#define rx_mpdu_start_info1_mpdu_retry bit(19) +#define rx_mpdu_start_info1_mpdu_seq_num genmask(31, 20) + +#define rx_mpdu_start_info2_epd_en bit(0) +#define rx_mpdu_start_info2_all_frame_encpd bit(1) +#define rx_mpdu_start_info2_enc_type genmask(5, 2) +#define rx_mpdu_start_info2_var_wep_key_width genmask(7, 6) +#define rx_mpdu_start_info2_mesh_sta bit(8) +#define rx_mpdu_start_info2_bssid_hit bit(9) +#define rx_mpdu_start_info2_bssid_num genmask(13, 10) +#define rx_mpdu_start_info2_tid genmask(17, 14) + +#define rx_mpdu_start_info3_reo_dest_ind genmask(4, 0) +#define rx_mpdu_start_info3_flow_id_toeplitz bit(7) +#define rx_mpdu_start_info3_pkt_sel_fp_ucast_data bit(8) +#define rx_mpdu_start_info3_pkt_sel_fp_mcast_data bit(9) +#define rx_mpdu_start_info3_pkt_sel_fp_ctrl_bar bit(10) +#define rx_mpdu_start_info3_rxdma0_src_ring_sel genmask(12, 11) +#define rx_mpdu_start_info3_rxdma0_dst_ring_sel genmask(14, 13) + +#define rx_mpdu_start_info4_reo_queue_desc_hi genmask(7, 0) +#define rx_mpdu_start_info4_recv_queue_num genmask(23, 8) +#define rx_mpdu_start_info4_pre_delim_err_warn bit(24) +#define rx_mpdu_start_info4_first_delim_err bit(25) + +#define rx_mpdu_start_info5_key_id genmask(7, 0) +#define rx_mpdu_start_info5_new_peer_entry bit(8) +#define rx_mpdu_start_info5_decrypt_needed bit(9) +#define rx_mpdu_start_info5_decap_type genmask(11, 10) +#define rx_mpdu_start_info5_vlan_tag_c_padding bit(12) +#define rx_mpdu_start_info5_vlan_tag_s_padding bit(13) +#define rx_mpdu_start_info5_strip_vlan_tag_c bit(14) +#define rx_mpdu_start_info5_strip_vlan_tag_s bit(15) +#define rx_mpdu_start_info5_pre_delim_count genmask(27, 16) +#define rx_mpdu_start_info5_ampdu_flag bit(28) +#define rx_mpdu_start_info5_bar_frame bit(29) + +#define rx_mpdu_start_info6_mpdu_len genmask(13, 0) +#define rx_mpdu_start_info6_first_mpdu bit(14) +#define rx_mpdu_start_info6_mcast_bcast bit(15) +#define rx_mpdu_start_info6_ast_idx_not_found bit(16) +#define rx_mpdu_start_info6_ast_idx_timeout bit(17) +#define rx_mpdu_start_info6_power_mgmt bit(18) +#define rx_mpdu_start_info6_non_qos bit(19) +#define rx_mpdu_start_info6_null_data bit(20) +#define rx_mpdu_start_info6_mgmt_type bit(21) +#define rx_mpdu_start_info6_ctrl_type bit(22) +#define rx_mpdu_start_info6_more_data bit(23) +#define rx_mpdu_start_info6_eosp bit(24) +#define rx_mpdu_start_info6_fragment bit(25) +#define rx_mpdu_start_info6_order bit(26) +#define rx_mpdu_start_info6_uapsd_trigger bit(27) +#define rx_mpdu_start_info6_encrypt_required bit(28) +#define rx_mpdu_start_info6_directed bit(29) + +#define rx_mpdu_start_raw_mpdu bit(0) + +struct rx_mpdu_start { + __le16 info0; + __le16 phy_ppdu_id; + __le16 ast_index; + __le16 sw_peer_id; + __le32 info1; + __le32 info2; + __le32 pn[4]; + __le32 peer_meta_data; + __le32 info3; + __le32 reo_queue_desc_lo; + __le32 info4; + __le32 info5; + __le32 info6; + __le16 frame_ctrl; + __le16 duration; + u8 addr1[eth_alen]; + u8 addr2[eth_alen]; + u8 addr3[eth_alen]; + __le16 seq_ctrl; + u8 addr4[eth_alen]; + __le16 qos_ctrl; + __le32 ht_ctrl; + __le32 raw; +} __packed; + +/* rx_mpdu_start + * + * rxpcu_mpdu_filter_in_category + * field indicates what the reason was that this mpdu frame + * was allowed to come into the receive path by rxpcu. values + * are defined in enum %rx_desc_rxpcu_filter_*. + * note: for ndp frame, if it was expected because the preceding + * ndpa was filter_pass, the setting rxpcu_filter_pass will be + * used. this setting will also be used for every ndp frame in + * case promiscuous mode is enabled. + * + * sw_frame_group_id + * sw processes frames based on certain classifications. values + * are defined in enum %rx_desc_sw_frame_grp_id_*. + * + * ndp_frame + * indicates that the received frame was an ndp frame. + * + * phy_err + * indicates that phy error was received before mac received data. + * + * phy_err_during_mpdu_header + * phy error was received before mac received the complete mpdu + * header which was needed for proper decoding. + * + * protocol_version_err + * rxpcu detected a version error in the frame control field. + * + * ast_based_lookup_valid + * ast based lookup for this frame has found a valid result. + * + * phy_ppdu_id + * a ppdu counter value that phy increments for every ppdu + * received. the counter value wraps around. + * + * ast_index + * this field indicates the index of the ast entry corresponding + * to this mpdu. it is provided by the gse module instantiated in + * rxpcu. a value of 0xffff indicates an invalid ast index. + * + * sw_peer_id + * this field indicates a unique peer identifier. it is set equal + * to field 'sw_peer_id' from the ast entry. + * + * mpdu_frame_control_valid, mpdu_duration_valid, mpdu_qos_control_valid, + * mpdu_ht_control_valid, frame_encryption_info_valid + * indicates that each fields have valid entries. + * + * mac_addr_adx_valid + * corresponding mac_addr_adx_{lo/hi} has valid entries. + * + * from_ds, to_ds + * valid only when mpdu_frame_control_valid is set. indicates that + * frame is received from ds and sent to ds. + * + * encrypted + * protected bit from the frame control. + * + * mpdu_retry + * retry bit from frame control. only valid when first_msdu is set. + * + * mpdu_sequence_number + * the sequence number from the 802.11 header. + * + * epd_en + * if set, use epd instead of lpd. + * + * all_frames_shall_be_encrypted + * if set, all frames (data only?) shall be encrypted. if not, + * rx crypto shall set an error flag. + * + * encrypt_type + * values are defined in enum %hal_encrypt_type_. + * + * mesh_sta + * indicates a mesh (11s) sta. + * + * bssid_hit + * bssid of the incoming frame matched one of the 8 bssid + * register values. + * + * bssid_number + * this number indicates which one out of the 8 bssid register + * values matched the incoming frame. + * + * tid + * tid field in the qos control field + * + * pn + * the pn number. + * + * peer_meta_data + * meta data that sw has programmed in the peer table entry + * of the transmitting sta. + * + * rx_reo_queue_desc_addr_lo + * address (lower 32 bits) of the reo queue descriptor. + * + * rx_reo_queue_desc_addr_hi + * address (upper 8 bits) of the reo queue descriptor. + * + * receive_queue_number + * indicates the mpdu queue id to which this mpdu link + * descriptor belongs. + * + * pre_delim_err_warning + * indicates that a delimiter fcs error was found in between the + * previous mpdu and this mpdu. note that this is just a warning, + * and does not mean that this mpdu is corrupted in any way. if + * it is, there will be other errors indicated such as fcs or + * decrypt errors. + * + * first_delim_err + * indicates that the first delimiter had a fcs failure. + * + * key_id + * the key id octet from the iv. + * + * new_peer_entry + * set if new rx_peer_entry tlv follows. if clear, rx_peer_entry + * doesn't follow so rx decryption module either uses old peer + * entry or not decrypt. + * + * decrypt_needed + * when rxpcu sets bit 'ast_index_not_found or ast_index_timeout', + * rxpcu will also ensure that this bit is not set. crypto for that + * reason only needs to evaluate this bit and non of the other ones + * + * decap_type + * used by the ole during decapsulation. values are defined in + * enum %mpdu_start_decap_type_*. + * + * rx_insert_vlan_c_tag_padding + * rx_insert_vlan_s_tag_padding + * insert 4 byte of all zeros as vlan tag or double vlan tag if + * the rx payload does not have vlan. + * + * strip_vlan_c_tag_decap + * strip_vlan_s_tag_decap + * strip vlan or double vlan during decapsulation. + * + * pre_delim_count + * the number of delimiters before this mpdu. note that this + * number is cleared at ppdu start. if this mpdu is the first + * received mpdu in the ppdu and this mpdu gets filtered-in, + * this field will indicate the number of delimiters located + * after the last mpdu in the previous ppdu. + * + * if this mpdu is located after the first received mpdu in + * an ppdu, this field will indicate the number of delimiters + * located between the previous mpdu and this mpdu. + * + * ampdu_flag + * received frame was part of an a-mpdu. + * + * bar_frame + * received frame is a bar frame + * + * mpdu_length + * mpdu length before decapsulation. + * + * first_mpdu..directed + * see definition in rx attention descriptor + * + */ + +enum rx_msdu_start_pkt_type { + rx_msdu_start_pkt_type_11a, + rx_msdu_start_pkt_type_11b, + rx_msdu_start_pkt_type_11n, + rx_msdu_start_pkt_type_11ac, + rx_msdu_start_pkt_type_11ax, +}; + +enum rx_msdu_start_sgi { + rx_msdu_start_sgi_0_8_us, + rx_msdu_start_sgi_0_4_us, + rx_msdu_start_sgi_1_6_us, + rx_msdu_start_sgi_3_2_us, +}; + +enum rx_msdu_start_recv_bw { + rx_msdu_start_recv_bw_20mhz, + rx_msdu_start_recv_bw_40mhz, + rx_msdu_start_recv_bw_80mhz, + rx_msdu_start_recv_bw_160mhz, +}; + +enum rx_msdu_start_reception_type { + rx_msdu_start_reception_type_su, + rx_msdu_start_reception_type_dl_mu_mimo, + rx_msdu_start_reception_type_dl_mu_ofdma, + rx_msdu_start_reception_type_dl_mu_ofdma_mimo, + rx_msdu_start_reception_type_ul_mu_mimo, + rx_msdu_start_reception_type_ul_mu_ofdma, + rx_msdu_start_reception_type_ul_mu_ofdma_mimo, +}; + +#define rx_msdu_start_info1_msdu_length genmask(13, 0) +#define rx_msdu_start_info1_rsvd_1a bit(14) +#define rx_msdu_start_info1_ipsec_esp bit(15) +#define rx_msdu_start_info1_l3_offset genmask(22, 16) +#define rx_msdu_start_info1_ipsec_ah bit(23) +#define rx_msdu_start_info1_l4_offset genmask(31, 24) + +#define rx_msdu_start_info2_msdu_number genmask(7, 0) +#define rx_msdu_start_info2_decap_type genmask(9, 8) +#define rx_msdu_start_info2_ipv4 bit(10) +#define rx_msdu_start_info2_ipv6 bit(11) +#define rx_msdu_start_info2_tcp bit(12) +#define rx_msdu_start_info2_udp bit(13) +#define rx_msdu_start_info2_ip_frag bit(14) +#define rx_msdu_start_info2_tcp_only_ack bit(15) +#define rx_msdu_start_info2_da_is_bcast_mcast bit(16) +#define rx_msdu_start_info2_selected_toeplitz_hash genmask(18, 17) +#define rx_msdu_start_info2_ip_fixed_hdr_valid bit(19) +#define rx_msdu_start_info2_ip_extn_hdr_valid bit(20) +#define rx_msdu_start_info2_ip_tcp_udp_hdr_valid bit(21) +#define rx_msdu_start_info2_mesh_ctrl_present bit(22) +#define rx_msdu_start_info2_ldpc bit(23) +#define rx_msdu_start_info2_ip4_ip6_nxt_hdr genmask(31, 24) +#define rx_msdu_start_info2_decap_format genmask(9, 8) + +#define rx_msdu_start_info3_user_rssi genmask(7, 0) +#define rx_msdu_start_info3_pkt_type genmask(11, 8) +#define rx_msdu_start_info3_stbc bit(12) +#define rx_msdu_start_info3_sgi genmask(14, 13) +#define rx_msdu_start_info3_rate_mcs genmask(18, 15) +#define rx_msdu_start_info3_recv_bw genmask(20, 19) +#define rx_msdu_start_info3_reception_type genmask(23, 21) +#define rx_msdu_start_info3_mimo_ss_bitmap genmask(31, 24) + +struct rx_msdu_start { + __le16 info0; + __le16 phy_ppdu_id; + __le32 info1; + __le32 info2; + __le32 toeplitz_hash; + __le32 flow_id_toeplitz; + __le32 info3; + __le32 ppdu_start_timestamp; + __le32 phy_meta_data; +} __packed; + +/* rx_msdu_start + * + * rxpcu_mpdu_filter_in_category + * field indicates what the reason was that this mpdu frame + * was allowed to come into the receive path by rxpcu. values + * are defined in enum %rx_desc_rxpcu_filter_*. + * + * sw_frame_group_id + * sw processes frames based on certain classifications. values + * are defined in enum %rx_desc_sw_frame_grp_id_*. + * + * phy_ppdu_id + * a ppdu counter value that phy increments for every ppdu + * received. the counter value wraps around. + * + * msdu_length + * msdu length in bytes after decapsulation. + * + * ipsec_esp + * set if ipv4/v6 packet is using ipsec esp. + * + * l3_offset + * depending upon mode bit, this field either indicates the + * l3 offset in bytes from the start of the rx_header or the ip + * offset in bytes from the start of the packet after + * decapsulation. the latter is only valid if ipv4_proto or + * ipv6_proto is set. + * + * ipsec_ah + * set if ipv4/v6 packet is using ipsec ah + * + * l4_offset + * depending upon mode bit, this field either indicates the + * l4 offset nin bytes from the start of rx_header (only valid + * if either ipv4_proto or ipv6_proto is set to 1) or indicates + * the offset in bytes to the start of tcp or udp header from + * the start of the ip header after decapsulation (only valid if + * tcp_proto or udp_proto is set). the value 0 indicates that + * the offset is longer than 127 bytes. + * + * msdu_number + * indicates the msdu number within a mpdu. this value is + * reset to zero at the start of each mpdu. if the number of + * msdu exceeds 255 this number will wrap using modulo 256. + * + * decap_type + * indicates the format after decapsulation. values are defined in + * enum %mpdu_start_decap_type_*. + * + * ipv4_proto + * set if l2 layer indicates ipv4 protocol. + * + * ipv6_proto + * set if l2 layer indicates ipv6 protocol. + * + * tcp_proto + * set if the ipv4_proto or ipv6_proto are set and the ip protocol + * indicates tcp. + * + * udp_proto + * set if the ipv4_proto or ipv6_proto are set and the ip protocol + * indicates udp. + * + * ip_frag + * indicates that either the ip more frag bit is set or ip frag + * number is non-zero. if set indicates that this is a fragmented + * ip packet. + * + * tcp_only_ack + * set if only the tcp ack bit is set in the tcp flags and if + * the tcp payload is 0. + * + * da_is_bcast_mcast + * the destination address is broadcast or multicast. + * + * toeplitz_hash + * actual chosen hash. + * 0 - toeplitz hash of 2-tuple (ip source address, ip + * destination address) + * 1 - toeplitz hash of 4-tuple (ip source address, + * ip destination address, l4 (tcp/udp) source port, + * l4 (tcp/udp) destination port) + * 2 - toeplitz of flow_id + * 3 - zero is used + * + * ip_fixed_header_valid + * fixed 20-byte ipv4 header or 40-byte ipv6 header parsed + * fully within first 256 bytes of the packet + * + * ip_extn_header_valid + * ipv6/ipv6 header, including ipv4 options and + * recognizable extension headers parsed fully within first 256 + * bytes of the packet + * + * tcp_udp_header_valid + * fixed 20-byte tcp (excluding tcp options) or 8-byte udp + * header parsed fully within first 256 bytes of the packet + * + * mesh_control_present + * when set, this msdu includes the 'mesh control' field + * + * ldpc + * + * ip4_protocol_ip6_next_header + * for ipv4, this is the 8 bit protocol field set). for ipv6 this + * is the 8 bit next_header field. + * + * toeplitz_hash_2_or_4 + * controlled by rxole register - if register bit set to 0, + * toeplitz hash is computed over 2-tuple ipv4 or ipv6 src/dest + * addresses; otherwise, toeplitz hash is computed over 4-tuple + * ipv4 or ipv6 src/dest addresses and src/dest ports. + * + * flow_id_toeplitz + * toeplitz hash of 5-tuple + * {ip source address, ip destination address, ip source port, ip + * destination port, l4 protocol} in case of non-ipsec. + * + * in case of ipsec - toeplitz hash of 4-tuple + * {ip source address, ip destination address, spi, l4 protocol} + * + * the relevant toeplitz key registers are provided in rxole's + * instance of common parser module. these registers are separate + * from the toeplitz keys used by ase/fse modules inside rxole. + * the actual value will be passed on from common parser module + * to rxole in one of the who_* tlvs. + * + * user_rssi + * rssi for this user + * + * pkt_type + * values are defined in enum %rx_msdu_start_pkt_type_*. + * + * stbc + * when set, use stbc transmission rates. + * + * sgi + * field only valid when pkt type is ht, vht or he. values are + * defined in enum %rx_msdu_start_sgi_*. + * + * rate_mcs + * mcs rate used. + * + * receive_bandwidth + * full receive bandwidth. values are defined in enum + * %rx_msdu_start_recv_*. + * + * reception_type + * indicates what type of reception this is and defined in enum + * %rx_msdu_start_reception_type_*. + * + * mimo_ss_bitmap + * field only valid when + * reception_type is rx_msdu_start_reception_type_dl_mu_mimo or + * rx_msdu_start_reception_type_dl_mu_ofdma_mimo. + * + * bitmap, with each bit indicating if the related spatial + * stream is used for this sta + * + * lsb related to ss 0 + * + * 0 - spatial stream not used for this reception + * 1 - spatial stream used for this reception + * + * ppdu_start_timestamp + * timestamp that indicates when the ppdu that contained this mpdu + * started on the medium. + * + * phy_meta_data + * sw programmed meta data provided by the phy. can be used for sw + * to indicate the channel the device is on. + */ + +#define rx_msdu_end_info0_rxpcu_mpdu_fitler genmask(1, 0) +#define rx_msdu_end_info0_sw_frame_grp_id genmask(8, 2) + +#define rx_msdu_end_info1_key_id genmask(7, 0) +#define rx_msdu_end_info1_cce_super_rule genmask(13, 8) +#define rx_msdu_end_info1_ccnd_truncate bit(14) +#define rx_msdu_end_info1_ccnd_cce_dis bit(15) +#define rx_msdu_end_info1_ext_wapi_pn genmask(31, 16) + +#define rx_msdu_end_info2_reported_mpdu_len genmask(13, 0) +#define rx_msdu_end_info2_first_msdu bit(14) +#define rx_msdu_end_info2_last_msdu bit(15) +#define rx_msdu_end_info2_sa_idx_timeout bit(16) +#define rx_msdu_end_info2_da_idx_timeout bit(17) +#define rx_msdu_end_info2_msdu_limit_err bit(18) +#define rx_msdu_end_info2_flow_idx_timeout bit(19) +#define rx_msdu_end_info2_flow_idx_invalid bit(20) +#define rx_msdu_end_info2_wifi_parser_err bit(21) +#define rx_msdu_end_info2_amsdu_parset_err bit(22) +#define rx_msdu_end_info2_sa_is_valid bit(23) +#define rx_msdu_end_info2_da_is_valid bit(24) +#define rx_msdu_end_info2_da_is_mcbc bit(25) +#define rx_msdu_end_info2_l3_hdr_padding genmask(27, 26) + +#define rx_msdu_end_info3_tcp_flag genmask(8, 0) +#define rx_msdu_end_info3_lro_eligible bit(9) + +#define rx_msdu_end_info4_da_offset genmask(5, 0) +#define rx_msdu_end_info4_sa_offset genmask(11, 6) +#define rx_msdu_end_info4_da_offset_valid bit(12) +#define rx_msdu_end_info4_sa_offset_valid bit(13) +#define rx_msdu_end_info4_l3_type genmask(31, 16) + +#define rx_msdu_end_info5_msdu_drop bit(0) +#define rx_msdu_end_info5_reo_dest_ind genmask(5, 1) +#define rx_msdu_end_info5_flow_idx genmask(25, 6) + +struct rx_msdu_end { + __le16 info0; + __le16 phy_ppdu_id; + __le16 ip_hdr_cksum; + __le16 tcp_udp_cksum; + __le32 info1; + __le32 ext_wapi_pn[2]; + __le32 info2; + __le32 ipv6_options_crc; + __le32 tcp_seq_num; + __le32 tcp_ack_num; + __le16 info3; + __le16 window_size; + __le32 info4; + __le32 rule_indication[2]; + __le16 sa_idx; + __le16 da_idx; + __le32 info5; + __le32 fse_metadata; + __le16 cce_metadata; + __le16 sa_sw_peer_id; +} __packed; + +/* rx_msdu_end + * + * rxpcu_mpdu_filter_in_category + * field indicates what the reason was that this mpdu frame + * was allowed to come into the receive path by rxpcu. values + * are defined in enum %rx_desc_rxpcu_filter_*. + * + * sw_frame_group_id + * sw processes frames based on certain classifications. values + * are defined in enum %rx_desc_sw_frame_grp_id_*. + * + * phy_ppdu_id + * a ppdu counter value that phy increments for every ppdu + * received. the counter value wraps around. + * + * ip_hdr_cksum + * this can include the ip header checksum or the pseudo + * header checksum used by tcp/udp checksum. + * + * tcp_udp_chksum + * the value of the computed tcp/udp checksum. a mode bit + * selects whether this checksum is the full checksum or the + * partial checksum which does not include the pseudo header. + * + * key_id + * the key id octet from the iv. only valid when first_msdu is set. + * + * cce_super_rule + * indicates the super filter rule. + * + * cce_classify_not_done_truncate + * classification failed due to truncated frame. + * + * cce_classify_not_done_cce_dis + * classification failed due to cce global disable + * + * ext_wapi_pn* + * extension pn (packet number) which is only used by wapi. + * + * reported_mpdu_length + * mpdu length before decapsulation. only valid when first_msdu is + * set. this field is taken directly from the length field of the + * a-mpdu delimiter or the preamble length field for non-a-mpdu + * frames. + * + * first_msdu + * indicates the first msdu of a-msdu. if both first_msdu and + * last_msdu are set in the msdu then this is a non-aggregated msdu + * frame: normal mpdu. interior msdu in an a-msdu shall have both + * first_mpdu and last_mpdu bits set to 0. + * + * last_msdu + * indicates the last msdu of the a-msdu. mpdu end status is only + * valid when last_msdu is set. + * + * sa_idx_timeout + * indicates an unsuccessful mac source address search due to the + * expiring of the search timer. + * + * da_idx_timeout + * indicates an unsuccessful mac destination address search due to + * the expiring of the search timer. + * + * msdu_limit_error + * indicates that the msdu threshold was exceeded and thus all the + * rest of the msdus will not be scattered and will not be + * decapsulated but will be dma'ed in raw format as a single msdu. + * + * flow_idx_timeout + * indicates an unsuccessful flow search due to the expiring of + * the search timer. + * + * flow_idx_invalid + * flow id is not valid. + * + * amsdu_parser_error + * a-msdu could not be properly de-agregated. + * + * sa_is_valid + * indicates that ole found a valid sa entry. + * + * da_is_valid + * indicates that ole found a valid da entry. + * + * da_is_mcbc + * field only valid if da_is_valid is set. indicates the da address + * was a multicast of broadcast address. + * + * l3_header_padding + * number of bytes padded to make sure that the l3 header will + * always start of a dword boundary. + * + * ipv6_options_crc + * 32 bit crc computed out of ip v6 extension headers. + * + * tcp_seq_number + * tcp sequence number. + * + * tcp_ack_number + * tcp acknowledge number. + * + * tcp_flag + * tcp flags {ns, cwr, ece, urg, ack, psh, rst, syn, fin}. + * + * lro_eligible + * computed out of tcp and ip fields to indicate that this + * msdu is eligible for lro. + * + * window_size + * tcp receive window size. + * + * da_offset + * offset into msdu buffer for da. + * + * sa_offset + * offset into msdu buffer for sa. + * + * da_offset_valid + * da_offset field is valid. this will be set to 0 in case + * of a dynamic a-msdu when da is compressed. + * + * sa_offset_valid + * sa_offset field is valid. this will be set to 0 in case + * of a dynamic a-msdu when sa is compressed. + * + * l3_type + * the 16-bit type value indicating the type of l3 later + * extracted from llc/snap, set to zero if snap is not + * available. + * + * rule_indication + * bitmap indicating which of rules have matched. + * + * sa_idx + * the offset in the address table which matches mac source address + * + * da_idx + * the offset in the address table which matches mac destination + * address. + * + * msdu_drop + * reo shall drop this msdu and not forward it to any other ring. + * + * reo_destination_indication + * the id of the reo exit ring where the msdu frame shall push + * after (mpdu level) reordering has finished. values are defined + * in enum %hal_rx_msdu_desc_reo_dest_ind_. + * + * flow_idx + * flow table index. + * + * fse_metadata + * fse related meta data. + * + * cce_metadata + * cce related meta data. + * + * sa_sw_peer_id + * sw_peer_id from the address search entry corresponding to the + * source address of the msdu. + */ + +enum rx_mpdu_end_rxdma_dest_ring { + rx_mpdu_end_rxdma_dest_ring_release, + rx_mpdu_end_rxdma_dest_ring_fw, + rx_mpdu_end_rxdma_dest_ring_sw, + rx_mpdu_end_rxdma_dest_ring_reo, +}; + +#define rx_mpdu_end_info1_unsup_ktype_short_frame bit(11) +#define rx_mpdu_end_info1_rx_in_tx_decrypt_byt bit(12) +#define rx_mpdu_end_info1_overflow_err bit(13) +#define rx_mpdu_end_info1_mpdu_len_err bit(14) +#define rx_mpdu_end_info1_tkip_mic_err bit(15) +#define rx_mpdu_end_info1_decrypt_err bit(16) +#define rx_mpdu_end_info1_unencrypted_frame_err bit(17) +#define rx_mpdu_end_info1_pn_fields_valid bit(18) +#define rx_mpdu_end_info1_fcs_err bit(19) +#define rx_mpdu_end_info1_msdu_len_err bit(20) +#define rx_mpdu_end_info1_rxdma0_dest_ring genmask(22, 21) +#define rx_mpdu_end_info1_rxdma1_dest_ring genmask(24, 23) +#define rx_mpdu_end_info1_decrypt_status_code genmask(27, 25) +#define rx_mpdu_end_info1_rx_bitmap_not_upd bit(28) + +struct rx_mpdu_end { + __le16 info0; + __le16 phy_ppdu_id; + __le32 info1; +} __packed; + +/* rx_mpdu_end + * + * rxpcu_mpdu_filter_in_category + * field indicates what the reason was that this mpdu frame + * was allowed to come into the receive path by rxpcu. values + * are defined in enum %rx_desc_rxpcu_filter_*. + * + * sw_frame_group_id + * sw processes frames based on certain classifications. values + * are defined in enum %rx_desc_sw_frame_grp_id_*. + * + * phy_ppdu_id + * a ppdu counter value that phy increments for every ppdu + * received. the counter value wraps around. + * + * unsup_ktype_short_frame + * this bit will be '1' when wep or tkip or wapi key type is + * received for 11ah short frame. crypto will bypass the received + * packet without decryption to rxole after setting this bit. + * + * rx_in_tx_decrypt_byp + * indicates that rx packet is not decrypted as crypto is + * busy with tx packet processing. + * + * overflow_err + * rxpcu receive fifo ran out of space to receive the full mpdu. + * therefore this mpdu is terminated early and is thus corrupted. + * + * this mpdu will not be acked. + * + * rxpcu might still be able to correctly receive the following + * mpdus in the ppdu if enough fifo space became available in time. + * + * mpdu_length_err + * set by rxpcu if the expected mpdu length does not correspond + * with the actually received number of bytes in the mpdu. + * + * tkip_mic_err + * set by rx crypto when crypto detected a tkip mic error for + * this mpdu. + * + * decrypt_err + * set by rx crypto when crypto detected a decrypt error for this + * mpdu or crypto received an encrypted frame, but did not get a + * valid corresponding key id in the peer entry. + * + * unencrypted_frame_err + * set by rx crypto when crypto detected an unencrypted frame while + * in the peer entry field 'all_frames_shall_be_encrypted' is set. + * + * pn_fields_contain_valid_info + * set by rx crypto to indicate that there is a valid pn field + * present in this mpdu. + * + * fcs_err + * set by rxpcu when there is an fcs error detected for this mpdu. + * + * msdu_length_err + * set by rxole when there is an msdu length error detected + * in at least 1 of the msdus embedded within the mpdu. + * + * rxdma0_destination_ring + * rxdma1_destination_ring + * the ring to which rxdma0/1 shall push the frame, assuming + * no mpdu level errors are detected. in case of mpdu level + * errors, rxdma0/1 might change the rxdma0/1 destination. values + * are defined in %enum rx_mpdu_end_rxdma_dest_ring_*. + * + * decrypt_status_code + * field provides insight into the decryption performed. values + * are defined in enum %rx_desc_decrypt_status_code_*. + * + * rx_bitmap_not_updated + * frame is received, but rxpcu could not update the receive bitmap + * due to (temporary) fifo constraints. + */ + +/* padding bytes to avoid tlv's spanning across 128 byte boundary */ +#define hal_rx_desc_padding0_bytes 4 +#define hal_rx_desc_padding1_bytes 16 + +#define hal_rx_desc_hdr_status_len 120 + +struct hal_rx_desc { + __le32 msdu_end_tag; + struct rx_msdu_end msdu_end; + __le32 rx_attn_tag; + struct rx_attention attention; + __le32 msdu_start_tag; + struct rx_msdu_start msdu_start; + u8 rx_padding0[hal_rx_desc_padding0_bytes]; + __le32 mpdu_start_tag; + struct rx_mpdu_start mpdu_start; + __le32 mpdu_end_tag; + struct rx_mpdu_end mpdu_end; + u8 rx_padding1[hal_rx_desc_padding1_bytes]; + __le32 hdr_status_tag; + __le32 phy_ppdu_id; + u8 hdr_status[hal_rx_desc_hdr_status_len]; + u8 msdu_payload[0]; +} __packed; + +#endif /* ath11k_rx_desc_h */ diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/testmode.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include "testmode.h" +#include <net/netlink.h> +#include "debug.h" +#include "wmi.h" +#include "hw.h" +#include "core.h" +#include "testmode_i.h" + +static const struct nla_policy ath11k_tm_policy[ath11k_tm_attr_max + 1] = { + [ath11k_tm_attr_cmd] = { .type = nla_u32 }, + [ath11k_tm_attr_data] = { .type = nla_binary, + .len = ath11k_tm_data_max_len }, + [ath11k_tm_attr_wmi_cmdid] = { .type = nla_u32 }, + [ath11k_tm_attr_version_major] = { .type = nla_u32 }, + [ath11k_tm_attr_version_minor] = { .type = nla_u32 }, +}; + +/* returns true if callee consumes the skb and the skb should be discarded. + * returns false if skb is not used. does not sleep. + */ +bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb) +{ + struct sk_buff *nl_skb; + bool consumed; + int ret; + + ath11k_dbg(ar->ab, ath11k_dbg_testmode, + "testmode event wmi cmd_id %d skb %pk skb->len %d ", + cmd_id, skb, skb->len); + + ath11k_dbg_dump(ar->ab, ath11k_dbg_testmode, null, "", skb->data, skb->len); + + spin_lock_bh(&ar->data_lock); + + consumed = true; + + nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy, + 2 * sizeof(u32) + skb->len, + gfp_atomic); + if (!nl_skb) { + ath11k_warn(ar->ab, + "failed to allocate skb for testmode wmi event "); + goto out; + } + + ret = nla_put_u32(nl_skb, ath11k_tm_attr_cmd, ath11k_tm_cmd_wmi); + if (ret) { + ath11k_warn(ar->ab, + "failed to to put testmode wmi event cmd attribute: %d ", + ret); + kfree_skb(nl_skb); + goto out; + } + + ret = nla_put_u32(nl_skb, ath11k_tm_attr_wmi_cmdid, cmd_id); + if (ret) { + ath11k_warn(ar->ab, + "failed to to put testmode wmi even cmd_id: %d ", + ret); + kfree_skb(nl_skb); + goto out; + } + + ret = nla_put(nl_skb, ath11k_tm_attr_data, skb->len, skb->data); + if (ret) { + ath11k_warn(ar->ab, + "failed to copy skb to testmode wmi event: %d ", + ret); + kfree_skb(nl_skb); + goto out; + } + + cfg80211_testmode_event(nl_skb, gfp_atomic); + +out: + spin_unlock_bh(&ar->data_lock); + + return consumed; +} + +static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[]) +{ + struct sk_buff *skb; + int ret; + + ath11k_dbg(ar->ab, ath11k_dbg_testmode, + "testmode cmd get version_major %d version_minor %d ", + ath11k_testmode_version_major, + ath11k_testmode_version_minor); + + skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy, + nla_total_size(sizeof(u32))); + if (!skb) + return -enomem; + + ret = nla_put_u32(skb, ath11k_tm_attr_version_major, + ath11k_testmode_version_major); + if (ret) { + kfree_skb(skb); + return ret; + } + + ret = nla_put_u32(skb, ath11k_tm_attr_version_minor, + ath11k_testmode_version_minor); + if (ret) { + kfree_skb(skb); + return ret; + } + + return cfg80211_testmode_reply(skb); +} + +static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[]) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct sk_buff *skb; + u32 cmd_id, buf_len; + int ret; + void *buf; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ath11k_state_on) { + ret = -enetdown; + goto out; + } + + if (!tb[ath11k_tm_attr_data]) { + ret = -einval; + goto out; + } + + if (!tb[ath11k_tm_attr_wmi_cmdid]) { + ret = -einval; + goto out; + } + + buf = nla_data(tb[ath11k_tm_attr_data]); + buf_len = nla_len(tb[ath11k_tm_attr_data]); + cmd_id = nla_get_u32(tb[ath11k_tm_attr_wmi_cmdid]); + + ath11k_dbg(ar->ab, ath11k_dbg_testmode, + "testmode cmd wmi cmd_id %d buf %pk buf_len %d ", + cmd_id, buf, buf_len); + + ath11k_dbg_dump(ar->ab, ath11k_dbg_testmode, null, "", buf, buf_len); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, buf_len); + if (!skb) { + ret = -enomem; + goto out; + } + + memcpy(skb->data, buf, buf_len); + + ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id); + if (ret) { + dev_kfree_skb(skb); + ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d ", + ret); + goto out; + } + + ret = 0; + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + +int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct ath11k *ar = hw->priv; + struct nlattr *tb[ath11k_tm_attr_max + 1]; + int ret; + + ret = nla_parse(tb, ath11k_tm_attr_max, data, len, ath11k_tm_policy, + null); + if (ret) + return ret; + + if (!tb[ath11k_tm_attr_cmd]) + return -einval; + + switch (nla_get_u32(tb[ath11k_tm_attr_cmd])) { + case ath11k_tm_cmd_get_version: + return ath11k_tm_cmd_get_version(ar, tb); + case ath11k_tm_cmd_wmi: + return ath11k_tm_cmd_wmi(ar, tb); + default: + return -eopnotsupp; + } +} diff --git a/drivers/net/wireless/ath/ath11k/testmode.h b/drivers/net/wireless/ath/ath11k/testmode.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/testmode.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#include "core.h" + +#ifdef config_nl80211_testmode + +bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, struct sk_buff *skb); +int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); + +#else + +static inline bool ath11k_tm_event_wmi(struct ath11k *ar, u32 cmd_id, + struct sk_buff *skb) +{ + return false; +} + +static inline int ath11k_tm_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) +{ + return 0; +} + +#endif diff --git a/drivers/net/wireless/ath/ath11k/testmode_i.h b/drivers/net/wireless/ath/ath11k/testmode_i.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/testmode_i.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +/* "api" level of the ath11k testmode interface. bump it after every + * incompatible interface change. + */ +#define ath11k_testmode_version_major 1 + +/* bump this after every _compatible_ interface change, for example + * addition of a new command or an attribute. + */ +#define ath11k_testmode_version_minor 0 + +#define ath11k_tm_data_max_len 5000 + +enum ath11k_tm_attr { + __ath11k_tm_attr_invalid = 0, + ath11k_tm_attr_cmd = 1, + ath11k_tm_attr_data = 2, + ath11k_tm_attr_wmi_cmdid = 3, + ath11k_tm_attr_version_major = 4, + ath11k_tm_attr_version_minor = 5, + ath11k_tm_attr_wmi_op_version = 6, + + /* keep last */ + __ath11k_tm_attr_after_last, + ath11k_tm_attr_max = __ath11k_tm_attr_after_last - 1, +}; + +/* all ath11k testmode interface commands specified in + * ath11k_tm_attr_cmd + */ +enum ath11k_tm_cmd { + /* returns the supported ath11k testmode interface version in + * ath11k_tm_attr_version. always guaranteed to work. user space + * uses this to verify it's using the correct version of the + * testmode interface + */ + ath11k_tm_cmd_get_version = 0, + + /* the command used to transmit a wmi command to the firmware and + * the event to receive wmi events from the firmware. without + * struct wmi_cmd_hdr header, only the wmi payload. command id is + * provided with ath11k_tm_attr_wmi_cmdid and payload in + * ath11k_tm_attr_data. + */ + ath11k_tm_cmd_wmi = 1, +}; diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/trace.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2019 the linux foundation. all rights reserved. + */ + +#include <linux/module.h> + +#define create_trace_points +#include "trace.h" diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/trace.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2019 the linux foundation. all rights reserved. + */ + +#if !defined(_trace_h_) || defined(trace_header_multi_read) + +#include <linux/tracepoint.h> +#include "core.h" + +#define _trace_h_ + +/* create empty functions when tracing is disabled */ +#if !defined(config_ath11k_tracing) +#undef trace_event +#define trace_event(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif /* !config_ath11k_tracing || __checker__ */ + +trace_event(ath11k_htt_pktlog, + tp_proto(struct ath11k *ar, const void *buf, u16 buf_len), + + tp_args(ar, buf, buf_len), + + tp_struct__entry( + __string(device, dev_name(ar->ab->dev)) + __string(driver, dev_driver_string(ar->ab->dev)) + __field(u16, buf_len) + __dynamic_array(u8, pktlog, buf_len) + ), + + tp_fast_assign( + __assign_str(device, dev_name(ar->ab->dev)); + __assign_str(driver, dev_driver_string(ar->ab->dev)); + __entry->buf_len = buf_len; + memcpy(__get_dynamic_array(pktlog), buf, buf_len); + ), + + tp_printk( + "%s %s size %hu", + __get_str(driver), + __get_str(device), + __entry->buf_len + ) +); + +trace_event(ath11k_htt_ppdu_stats, + tp_proto(struct ath11k *ar, const void *data, size_t len), + + tp_args(ar, data, len), + + tp_struct__entry( + __string(device, dev_name(ar->ab->dev)) + __string(driver, dev_driver_string(ar->ab->dev)) + __field(u16, len) + __dynamic_array(u8, ppdu, len) + ), + + tp_fast_assign( + __assign_str(device, dev_name(ar->ab->dev)); + __assign_str(driver, dev_driver_string(ar->ab->dev)); + __entry->len = len; + memcpy(__get_dynamic_array(ppdu), data, len); + ), + + tp_printk( + "%s %s ppdu len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +trace_event(ath11k_htt_rxdesc, + tp_proto(struct ath11k *ar, const void *data, size_t len), + + tp_args(ar, data, len), + + tp_struct__entry( + __string(device, dev_name(ar->ab->dev)) + __string(driver, dev_driver_string(ar->ab->dev)) + __field(u16, len) + __dynamic_array(u8, rxdesc, len) + ), + + tp_fast_assign( + __assign_str(device, dev_name(ar->ab->dev)); + __assign_str(driver, dev_driver_string(ar->ab->dev)); + __entry->len = len; + memcpy(__get_dynamic_array(rxdesc), data, len); + ), + + tp_printk( + "%s %s rxdesc len %d", + __get_str(driver), + __get_str(device), + __entry->len + ) +); + +#endif /* _trace_h_ || trace_header_multi_read*/ + +/* we don't want to use include/trace/events */ +#undef trace_include_path +#define trace_include_path . +#undef trace_include_file +#define trace_include_file trace + +/* this part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/wmi.c +// spdx-license-identifier: bsd-3-clause-clear +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/mac80211.h> +#include <net/cfg80211.h> +#include <linux/completion.h> +#include <linux/if_ether.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/uuid.h> +#include <linux/time.h> +#include <linux/of.h> +#include "core.h" +#include "debug.h" +#include "mac.h" +#include "hw.h" +#include "peer.h" + +struct wmi_tlv_policy { + size_t min_len; +}; + +struct wmi_tlv_svc_ready_parse { + bool wmi_svc_bitmap_done; +}; + +struct wmi_tlv_svc_rdy_ext_parse { + struct ath11k_service_ext_param param; + struct wmi_soc_mac_phy_hw_mode_caps *hw_caps; + struct wmi_hw_mode_capabilities *hw_mode_caps; + u32 n_hw_mode_caps; + u32 tot_phy_id; + struct wmi_hw_mode_capabilities pref_hw_mode_caps; + struct wmi_mac_phy_capabilities *mac_phy_caps; + u32 n_mac_phy_caps; + struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps; + struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps; + u32 n_ext_hal_reg_caps; + bool hw_mode_done; + bool mac_phy_done; + bool ext_hal_reg_done; +}; + +struct wmi_tlv_rdy_parse { + u32 num_extra_mac_addr; +}; + +static const struct wmi_tlv_policy wmi_tlv_policies[] = { + [wmi_tag_array_byte] + = { .min_len = 0 }, + [wmi_tag_array_uint32] + = { .min_len = 0 }, + [wmi_tag_service_ready_event] + = { .min_len = sizeof(struct wmi_service_ready_event) }, + [wmi_tag_service_ready_ext_event] + = { .min_len = sizeof(struct wmi_service_ready_ext_event) }, + [wmi_tag_soc_mac_phy_hw_mode_caps] + = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) }, + [wmi_tag_soc_hal_reg_capabilities] + = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) }, + [wmi_tag_vdev_start_response_event] + = { .min_len = sizeof(struct wmi_vdev_start_resp_event) }, + [wmi_tag_peer_delete_resp_event] + = { .min_len = sizeof(struct wmi_peer_delete_resp_event) }, + [wmi_tag_offload_bcn_tx_status_event] + = { .min_len = sizeof(struct wmi_bcn_tx_status_event) }, + [wmi_tag_vdev_stopped_event] + = { .min_len = sizeof(struct wmi_vdev_stopped_event) }, + [wmi_tag_reg_chan_list_cc_event] + = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) }, + [wmi_tag_mgmt_rx_hdr] + = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) }, + [wmi_tag_mgmt_tx_compl_event] + = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) }, + [wmi_tag_scan_event] + = { .min_len = sizeof(struct wmi_scan_event) }, + [wmi_tag_peer_sta_kickout_event] + = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, + [wmi_tag_roam_event] + = { .min_len = sizeof(struct wmi_roam_event) }, + [wmi_tag_chan_info_event] + = { .min_len = sizeof(struct wmi_chan_info_event) }, + [wmi_tag_pdev_bss_chan_info_event] + = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) }, + [wmi_tag_vdev_install_key_complete_event] + = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) }, + [wmi_tag_ready_event] + = {.min_len = sizeof(struct wmi_ready_event) }, + [wmi_tag_service_available_event] + = {.min_len = sizeof(struct wmi_service_available_event) }, + [wmi_tag_peer_assoc_conf_event] + = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) }, + [wmi_tag_stats_event] + = { .min_len = sizeof(struct wmi_stats_event) }, + [wmi_tag_pdev_ctl_failsafe_check_event] + = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) }, +}; + +#define primap(_hw_mode_) \ + [_hw_mode_] = _hw_mode_##_pri + +static const int ath11k_hw_mode_pri_map[] = { + primap(wmi_host_hw_mode_single), + primap(wmi_host_hw_mode_dbs), + primap(wmi_host_hw_mode_sbs_passive), + primap(wmi_host_hw_mode_sbs), + primap(wmi_host_hw_mode_dbs_sbs), + primap(wmi_host_hw_mode_dbs_or_sbs), + /* keep last */ + primap(wmi_host_hw_mode_max), +}; + +static int +ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath11k_base *ab, u16 tag, u16 len, + const void *ptr, void *data), + void *data) +{ + const void *begin = ptr; + const struct wmi_tlv *tlv; + u16 tlv_tag, tlv_len; + int ret; + + while (len > 0) { + if (len < sizeof(*tlv)) { + ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected) ", + ptr - begin, len, sizeof(*tlv)); + return -einval; + } + + tlv = ptr; + tlv_tag = field_get(wmi_tlv_tag, tlv->header); + tlv_len = field_get(wmi_tlv_len, tlv->header); + ptr += sizeof(*tlv); + len -= sizeof(*tlv); + + if (tlv_len > len) { + ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected) ", + tlv_tag, ptr - begin, len, tlv_len); + return -einval; + } + + if (tlv_tag < array_size(wmi_tlv_policies) && + wmi_tlv_policies[tlv_tag].min_len && + wmi_tlv_policies[tlv_tag].min_len > tlv_len) { + ath11k_err(ab, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu) ", + tlv_tag, ptr - begin, tlv_len, + wmi_tlv_policies[tlv_tag].min_len); + return -einval; + } + + ret = iter(ab, tlv_tag, tlv_len, ptr, data); + if (ret) + return ret; + + ptr += tlv_len; + len -= tlv_len; + } + + return 0; +} + +static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len, + const void *ptr, void *data) +{ + const void **tb = data; + + if (tag < wmi_tag_max) + tb[tag] = ptr; + + return 0; +} + +static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb, + const void *ptr, size_t len) +{ + return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse, + (void *)tb); +} + +static const void ** +ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr, + size_t len, gfp_t gfp) +{ + const void **tb; + int ret; + + tb = kcalloc(wmi_tag_max, sizeof(*tb), gfp); + if (!tb) + return err_ptr(-enomem); + + ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len); + if (ret) { + kfree(tb); + return err_ptr(ret); + } + + return tb; +} + +static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, + u32 cmd_id) +{ + struct ath11k_skb_cb *skb_cb = ath11k_skb_cb(skb); + struct ath11k_base *ab = wmi->wmi_sc->ab; + struct wmi_cmd_hdr *cmd_hdr; + int ret; + u32 cmd = 0; + + if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == null) + return -enomem; + + cmd |= field_prep(wmi_cmd_hdr_cmd_id, cmd_id); + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + cmd_hdr->cmd_id = cmd; + + memset(skb_cb, 0, sizeof(*skb_cb)); + ret = ath11k_htc_send(&ab->htc, wmi->eid, skb); + + if (ret) + goto err_pull; + + return 0; + +err_pull: + skb_pull(skb, sizeof(struct wmi_cmd_hdr)); + return ret; +} + +int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, + u32 cmd_id) +{ + struct ath11k_wmi_base *wmi_sc = wmi->wmi_sc; + int ret = -eopnotsupp; + + might_sleep(); + + wait_event_timeout(wmi_sc->tx_credits_wq, ({ + ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id); + + if (ret && test_bit(ath11k_flag_crash_flush, &wmi_sc->ab->dev_flags)) + ret = -eshutdown; + + (ret != -eagain); + }), wmi_send_timeout_hz); + + if (ret == -eagain) + ath11k_warn(wmi_sc->ab, "wmi command %d timeout ", cmd_id); + + return ret; +} + +static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, + const void *ptr, + struct ath11k_service_ext_param *param) +{ + const struct wmi_service_ready_ext_event *ev = ptr; + + if (!ev) + return -einval; + + /* move this to host based bitmap */ + param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits; + param->default_fw_config_bits = ev->default_fw_config_bits; + param->he_cap_info = ev->he_cap_info; + param->mpdu_density = ev->mpdu_density; + param->max_bssid_rx_filters = ev->max_bssid_rx_filters; + memcpy(¶m->ppet, &ev->ppet, sizeof(param->ppet)); + + return 0; +} + +static int +ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, + struct wmi_soc_mac_phy_hw_mode_caps *hw_caps, + struct wmi_hw_mode_capabilities *wmi_hw_mode_caps, + struct wmi_soc_hal_reg_capabilities *hal_reg_caps, + struct wmi_mac_phy_capabilities *wmi_mac_phy_caps, + u8 hw_mode_id, u8 phy_id, + struct ath11k_pdev *pdev) +{ + struct wmi_mac_phy_capabilities *mac_phy_caps; + struct ath11k_band_cap *cap_band; + struct ath11k_pdev_cap *pdev_cap = &pdev->cap; + u32 phy_map; + u32 hw_idx, phy_idx = 0; + + if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps) + return -einval; + + for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) { + if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id) + break; + + phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map; + while (phy_map) { + phy_map >>= 1; + phy_idx++; + } + } + + if (hw_idx == hw_caps->num_hw_modes) + return -einval; + + phy_idx += phy_id; + if (phy_id >= hal_reg_caps->num_phy) + return -einval; + + mac_phy_caps = &wmi_mac_phy_caps[phy_idx]; + + pdev->pdev_id = mac_phy_caps->pdev_id; + pdev_cap->supported_bands = mac_phy_caps->supported_bands; + pdev_cap->ampdu_density = mac_phy_caps->ampdu_density; + + /* take non-zero tx/rx chainmask. if tx/rx chainmask differs from + * band to band for a single radio, need to see how this should be + * handled. + */ + if (mac_phy_caps->supported_bands & wmi_host_wlan_2g_cap) { + pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g; + pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g; + } else if (mac_phy_caps->supported_bands & wmi_host_wlan_5g_cap) { + pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g; + pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g; + pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g; + pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g; + pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g; + } else { + return -einval; + } + + /* tx/rx chainmask reported from fw depends on the actual hw chains used, + * for example, for 4x4 capable macphys, first 4 chains can be used for first + * mac and the remaing 4 chains can be used for the second mac or vice-versa. + * in this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0 + * will be advertised for second mac or vice-versa. compute the shift value for + * for tx/rx chainmask which will be used to advertise supported ht/vht rates to + * mac80211. + */ + pdev_cap->tx_chain_mask_shift = + find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32); + pdev_cap->rx_chain_mask_shift = + find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32); + + cap_band = &pdev_cap->band[nl80211_band_2ghz]; + cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g; + cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g; + cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g; + cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext; + cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g; + memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g, + sizeof(u32) * psoc_host_max_phy_size); + memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g, + sizeof(struct ath11k_ppe_threshold)); + + cap_band = &pdev_cap->band[nl80211_band_5ghz]; + cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; + cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; + cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; + cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; + cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; + memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, + sizeof(u32) * psoc_host_max_phy_size); + memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, + sizeof(struct ath11k_ppe_threshold)); + + return 0; +} + +static int +ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle, + struct wmi_soc_hal_reg_capabilities *reg_caps, + struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap, + u8 phy_idx, + struct ath11k_hal_reg_capabilities_ext *param) +{ + struct wmi_hal_reg_capabilities_ext *ext_reg_cap; + + if (!reg_caps || !wmi_ext_reg_cap) + return -einval; + + if (phy_idx >= reg_caps->num_phy) + return -einval; + + ext_reg_cap = &wmi_ext_reg_cap[phy_idx]; + + param->phy_id = ext_reg_cap->phy_id; + param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain; + param->eeprom_reg_domain_ext = + ext_reg_cap->eeprom_reg_domain_ext; + param->regcap1 = ext_reg_cap->regcap1; + param->regcap2 = ext_reg_cap->regcap2; + /* check if param->wireless_mode is needed */ + param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan; + param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan; + param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan; + param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan; + + return 0; +} + +static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab, + const void *evt_buf, + struct ath11k_targ_cap *cap) +{ + const struct wmi_service_ready_event *ev = evt_buf; + + if (!ev) { + ath11k_err(ab, "%s: failed by null param ", + __func__); + return -einval; + } + + cap->phy_capability = ev->phy_capability; + cap->max_frag_entry = ev->max_frag_entry; + cap->num_rf_chains = ev->num_rf_chains; + cap->ht_cap_info = ev->ht_cap_info; + cap->vht_cap_info = ev->vht_cap_info; + cap->vht_supp_mcs = ev->vht_supp_mcs; + cap->hw_min_tx_power = ev->hw_min_tx_power; + cap->hw_max_tx_power = ev->hw_max_tx_power; + cap->sys_cap_info = ev->sys_cap_info; + cap->min_pkt_size_enable = ev->min_pkt_size_enable; + cap->max_bcn_ie_size = ev->max_bcn_ie_size; + cap->max_num_scan_channels = ev->max_num_scan_channels; + cap->max_supported_macs = ev->max_supported_macs; + cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps; + cap->txrx_chainmask = ev->txrx_chainmask; + cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index; + cap->num_msdu_desc = ev->num_msdu_desc; + + return 0; +} + +/* save the wmi_service_bitmap into a linear bitmap. the wmi_services in + * wmi_service ready event are advertised in b0-b3 (lsb 4-bits) of each + * 4-byte word. + */ +static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi, + const u32 *wmi_svc_bm) +{ + int i, j; + + for (i = 0, j = 0; i < wmi_service_bm_size && j < wmi_max_service; i++) { + do { + if (wmi_svc_bm[i] & bit(j % wmi_service_bits_in_size32)) + set_bit(j, wmi->wmi_sc->svc_map); + } while (++j % wmi_service_bits_in_size32); + } +} + +static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_svc_ready_parse *svc_ready = data; + struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_sc.wmi[0]; + u16 expect_len; + + switch (tag) { + case wmi_tag_service_ready_event: + if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps)) + return -einval; + break; + + case wmi_tag_array_uint32: + if (!svc_ready->wmi_svc_bitmap_done) { + expect_len = wmi_service_bm_size * sizeof(u32); + if (len < expect_len) { + ath11k_warn(ab, "invalid len %d for the tag 0x%x ", + len, tag); + return -einval; + } + + ath11k_wmi_service_bitmap_copy(wmi_handle, ptr); + + svc_ready->wmi_svc_bitmap_done = true; + } + break; + default: + break; + } + + return 0; +} + +static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_tlv_svc_ready_parse svc_ready = { }; + int ret; + + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_svc_rdy_parse, + &svc_ready); + if (ret) { + ath11k_warn(ab, "failed to parse tlv %d ", ret); + return ret; + } + + return 0; +} + +struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len) +{ + struct sk_buff *skb; + struct ath11k_base *ab = wmi_sc->ab; + u32 round_len = roundup(len, 4); + + skb = ath11k_htc_alloc_skb(ab, wmi_skb_headroom + round_len); + if (!skb) + return null; + + skb_reserve(skb, wmi_skb_headroom); + if (!is_aligned((unsigned long)skb->data, 4)) + ath11k_warn(ab, "unaligned wmi skb data "); + + skb_put(skb, round_len); + memset(skb->data, 0, round_len); + + return skb; +} + +int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, + struct sk_buff *frame) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_mgmt_send_cmd *cmd; + struct wmi_tlv *frame_tlv; + struct sk_buff *skb; + u32 buf_len; + int ret, len; + + buf_len = frame->len < wmi_mgmt_send_downld_len ? + frame->len : wmi_mgmt_send_downld_len; + + len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_mgmt_send_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_mgmt_tx_send_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->vdev_id = vdev_id; + cmd->desc_id = buf_id; + cmd->chanfreq = 0; + cmd->paddr_lo = lower_32_bits(ath11k_skb_cb(frame)->paddr); + cmd->paddr_hi = upper_32_bits(ath11k_skb_cb(frame)->paddr); + cmd->frame_len = frame->len; + cmd->buf_len = buf_len; + cmd->tx_params_valid = 0; + + frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); + frame_tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_byte) | + field_prep(wmi_tlv_len, buf_len); + + memcpy(frame_tlv->value, frame->data, buf_len); + + ath11k_ce_byte_swap(frame_tlv->value, buf_len); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_mgmt_tx_send_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to submit wmi_mgmt_tx_send_cmdid cmd "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr, + struct vdev_create_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_create_cmd *cmd; + struct sk_buff *skb; + struct wmi_vdev_txrx_streams *txrx_streams; + struct wmi_tlv *tlv; + int ret, len; + void *ptr; + + /* it can be optimized my sending tx/rx chain configuration + * only for supported bands instead of always sending it for + * both the bands. + */ + len = sizeof(*cmd) + tlv_hdr_size + + (wmi_num_supported_band_max * sizeof(*txrx_streams)); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_create_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_vdev_create_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->vdev_id = param->if_id; + cmd->vdev_type = param->type; + cmd->vdev_subtype = param->subtype; + cmd->num_cfg_txrx_streams = wmi_num_supported_band_max; + cmd->pdev_id = param->pdev_id; + ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); + + ptr = skb->data + sizeof(*cmd); + len = wmi_num_supported_band_max * sizeof(*txrx_streams); + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_struct) | + field_prep(wmi_tlv_len, len); + + ptr += tlv_hdr_size; + txrx_streams = ptr; + len = sizeof(*txrx_streams); + txrx_streams->tlv_header = + field_prep(wmi_tlv_tag, wmi_tag_vdev_txrx_streams) | + field_prep(wmi_tlv_len, len - tlv_hdr_size); + txrx_streams->band = wmi_tpc_chainmask_config_band_2g; + txrx_streams->supported_tx_streams = + param->chains[nl80211_band_2ghz].tx; + txrx_streams->supported_rx_streams = + param->chains[nl80211_band_2ghz].rx; + + txrx_streams++; + txrx_streams->tlv_header = + field_prep(wmi_tlv_tag, wmi_tag_vdev_txrx_streams) | + field_prep(wmi_tlv_len, len - tlv_hdr_size); + txrx_streams->band = wmi_tpc_chainmask_config_band_5g; + txrx_streams->supported_tx_streams = + param->chains[nl80211_band_5ghz].tx; + txrx_streams->supported_rx_streams = + param->chains[nl80211_band_5ghz].rx; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_vdev_create_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to submit wmi_vdev_create_cmdid "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi vdev create: id %d type %d subtype %d macaddr %pm pdevid %d ", + param->if_id, param->type, param->subtype, + macaddr, param->pdev_id); + + return ret; +} + +int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_delete_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_delete_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_vdev_delete_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->vdev_id = vdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_vdev_delete_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to submit wmi_vdev_delete_cmdid "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, "wmi vdev delete id %d ", vdev_id); + + return ret; +} + +int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_stop_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_stop_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_vdev_stop_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->vdev_id = vdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_vdev_stop_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to submit wmi_vdev_stop cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, "wmi vdev stop id 0x%x ", vdev_id); + + return ret; +} + +int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_down_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_down_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_vdev_down_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->vdev_id = vdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_vdev_down_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to submit wmi_vdev_down cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, "wmi vdev down id 0x%x ", vdev_id); + + return ret; +} + +static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan, + struct wmi_vdev_start_req_arg *arg) +{ + memset(chan, 0, sizeof(*chan)); + + chan->mhz = arg->channel.freq; + chan->band_center_freq1 = arg->channel.band_center_freq1; + if (arg->channel.mode == mode_11ac_vht80_80) + chan->band_center_freq2 = arg->channel.band_center_freq2; + else + chan->band_center_freq2 = 0; + + chan->info |= field_prep(wmi_chan_info_mode, arg->channel.mode); + if (arg->channel.passive) + chan->info |= wmi_chan_info_passive; + if (arg->channel.allow_ibss) + chan->info |= wmi_chan_info_adhoc_allowed; + if (arg->channel.allow_ht) + chan->info |= wmi_chan_info_allow_ht; + if (arg->channel.allow_vht) + chan->info |= wmi_chan_info_allow_vht; + if (arg->channel.ht40plus) + chan->info |= wmi_chan_info_ht40_plus; + if (arg->channel.chan_radar) + chan->info |= wmi_chan_info_dfs; + if (arg->channel.freq2_radar) + chan->info |= wmi_chan_info_dfs_freq2; + + chan->reg_info_1 = field_prep(wmi_chan_reg_info1_max_pwr, + arg->channel.max_power) | + field_prep(wmi_chan_reg_info1_max_reg_pwr, + arg->channel.max_reg_power); + + chan->reg_info_2 = field_prep(wmi_chan_reg_info2_ant_max, + arg->channel.max_antenna_gain) | + field_prep(wmi_chan_reg_info2_max_tx_pwr, + arg->channel.max_power); +} + +int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, + bool restart) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_start_request_cmd *cmd; + struct sk_buff *skb; + struct wmi_channel *chan; + struct wmi_tlv *tlv; + void *ptr; + int ret, len; + + if (warn_on(arg->ssid_len > sizeof(cmd->ssid.ssid))) + return -einval; + + len = sizeof(*cmd) + sizeof(*chan) + tlv_hdr_size; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_start_request_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_vdev_start_request_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->vdev_id = arg->vdev_id; + cmd->beacon_interval = arg->bcn_intval; + cmd->bcn_tx_rate = arg->bcn_tx_rate; + cmd->dtim_period = arg->dtim_period; + cmd->num_noa_descriptors = arg->num_noa_descriptors; + cmd->preferred_rx_streams = arg->pref_rx_streams; + cmd->preferred_tx_streams = arg->pref_tx_streams; + cmd->cac_duration_ms = arg->cac_duration_ms; + cmd->regdomain = arg->regdomain; + cmd->he_ops = arg->he_ops; + + if (!restart) { + if (arg->ssid) { + cmd->ssid.ssid_len = arg->ssid_len; + memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); + } + if (arg->hidden_ssid) + cmd->flags |= wmi_vdev_start_hidden_ssid; + if (arg->pmf_enabled) + cmd->flags |= wmi_vdev_start_pmf_enabled; + } + + cmd->flags |= wmi_vdev_start_ldpc_rx_enabled; + + ptr = skb->data + sizeof(*cmd); + chan = ptr; + + ath11k_wmi_put_wmi_channel(chan, arg); + + chan->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_channel) | + field_prep(wmi_tlv_len, + sizeof(*chan) - tlv_hdr_size); + ptr += sizeof(*chan); + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_struct) | + field_prep(wmi_tlv_len, 0); + + /* note: this is a nested tlv containing: + * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. + */ + + ptr += sizeof(*tlv); + + if (restart) + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_vdev_restart_request_cmdid); + else + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_vdev_start_request_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to submit vdev_%s cmd ", + restart ? "restart" : "start"); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, "vdev %s id 0x%x freq 0x%x mode 0x%x ", + restart ? "restart" : "start", arg->vdev_id, + arg->channel.freq, arg->channel.mode); + + return ret; +} + +int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_up_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_up_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_vdev_up_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->vdev_id = vdev_id; + cmd->vdev_assoc_id = aid; + + ether_addr_copy(cmd->vdev_bssid.addr, bssid); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_vdev_up_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to submit wmi_vdev_up cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi mgmt vdev up id 0x%x assoc id %d bssid %pm ", + vdev_id, aid, bssid); + + return ret; +} + +int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar, + struct peer_create_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_peer_create_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_peer_create_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_peer_create_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr); + cmd->peer_type = param->peer_type; + cmd->vdev_id = param->vdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_peer_create_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to submit wmi_peer_create cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi peer create vdev_id %d peer_addr %pm ", + param->vdev_id, param->peer_addr); + + return ret; +} + +int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar, + const u8 *peer_addr, u8 vdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_peer_delete_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_peer_delete_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_peer_delete_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->vdev_id = vdev_id; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi peer delete vdev_id %d peer_addr %pm ", + vdev_id, peer_addr); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_peer_delete_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_peer_delete cmd "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar, + struct pdev_set_regdomain_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_set_regdomain_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_pdev_set_regdomain_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->reg_domain = param->current_rd_in_use; + cmd->reg_domain_2g = param->current_rd_2g; + cmd->reg_domain_5g = param->current_rd_5g; + cmd->conformance_test_limit_2g = param->ctl_2g; + cmd->conformance_test_limit_5g = param->ctl_5g; + cmd->dfs_domain = param->dfs_domain; + cmd->pdev_id = param->pdev_id; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d ", + param->current_rd_in_use, param->current_rd_2g, + param->current_rd_5g, param->dfs_domain, param->pdev_id); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_pdev_set_regdomain_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_pdev_set_regdomain cmd "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr, + u32 vdev_id, u32 param_id, u32 param_val) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_peer_set_param_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_peer_set_param_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_peer_set_param_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->vdev_id = vdev_id; + cmd->param_id = param_id; + cmd->param_value = param_val; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_peer_set_param_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_peer_set_param cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi vdev %d peer 0x%pm set param %d value %d ", + vdev_id, peer_addr, param_id, param_val); + + return ret; +} + +int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar, + u8 peer_addr[eth_alen], + struct peer_flush_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_peer_flush_tids_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_peer_flush_tids_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_peer_flush_tids_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->peer_tid_bitmap = param->peer_tid_bitmap; + cmd->vdev_id = param->vdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_peer_flush_tids_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_peer_flush_tids cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi peer flush vdev_id %d peer_addr %pm tids %08x ", + param->vdev_id, peer_addr, param->peer_tid_bitmap); + + return ret; +} + +int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, + int vdev_id, const u8 *addr, + dma_addr_t paddr, u8 tid, + u8 ba_window_size_valid, + u32 ba_window_size) +{ + struct wmi_peer_reorder_queue_setup_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_reorder_queue_setup_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + ether_addr_copy(cmd->peer_macaddr.addr, addr); + cmd->vdev_id = vdev_id; + cmd->tid = tid; + cmd->queue_ptr_lo = lower_32_bits(paddr); + cmd->queue_ptr_hi = upper_32_bits(paddr); + cmd->queue_no = tid; + cmd->ba_window_size_valid = ba_window_size_valid; + cmd->ba_window_size = ba_window_size; + + ret = ath11k_wmi_cmd_send(ar->wmi, skb, + wmi_peer_reorder_queue_setup_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_peer_reorder_queue_setup "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi rx reorder queue setup addr %pm vdev_id %d tid %d ", + addr, vdev_id, tid); + + return ret; +} + +int +ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar, + struct rx_reorder_queue_remove_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_peer_reorder_queue_remove_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_reorder_queue_remove_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr); + cmd->vdev_id = param->vdev_id; + cmd->tid_mask = param->peer_tid_bitmap; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "%s: peer_macaddr %pm vdev_id %d, tid_map %d", __func__, + param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap); + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_peer_reorder_queue_remove_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_peer_reorder_queue_remove_cmdid"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, + u32 param_value, u8 pdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_set_param_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_pdev_set_param_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_pdev_set_param_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->pdev_id = pdev_id; + cmd->param_id = param_id; + cmd->param_value = param_value; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_pdev_set_param_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_pdev_set_param cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi pdev set param %d pdev id %d value %d ", + param_id, pdev_id, param_value); + + return ret; +} + +int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt, + u32 pdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_suspend_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_pdev_suspend_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_pdev_suspend_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->suspend_opt = suspend_opt; + cmd->pdev_id = pdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_pdev_suspend_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_pdev_suspend cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi pdev suspend pdev_id %d ", pdev_id); + + return ret; +} + +int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_resume_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_pdev_resume_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_pdev_resume_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->pdev_id = pdev_id; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi pdev resume pdev id %d ", pdev_id); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_pdev_resume_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_pdev_resume cmd "); + dev_kfree_skb(skb); + } + + return ret; +} + +/* todo fw support for the cmd is not available yet. + * can be tested once the command and corresponding + * event is implemented in fw + */ +int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar, + enum wmi_bss_chan_info_req_type type) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_bss_chan_info_req_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_pdev_bss_chan_info_request) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->req_type = type; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi bss chan info req type %d ", type); + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_pdev_bss_chan_info_request_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_pdev_bss_chan_info_request cmd "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr, + struct ap_ps_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_ap_ps_peer_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_ap_ps_peer_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_ap_ps_peer_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->vdev_id = param->vdev_id; + ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->param = param->param; + cmd->value = param->value; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_ap_ps_peer_param_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_ap_ps_peer_param_cmdid "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi set ap ps vdev id %d peer %pm param %d value %d ", + param->vdev_id, peer_addr, param->param, param->value); + + return ret; +} + +int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id, + u32 param, u32 param_value) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_sta_powersave_param_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_sta_powersave_param_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_sta_powersave_param_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->vdev_id = vdev_id; + cmd->param = param; + cmd->value = param_value; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi set sta ps vdev_id %d param %d value %d ", + vdev_id, param, param_value); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_sta_powersave_param_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_sta_powersave_param_cmdid"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_force_fw_hang_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_force_fw_hang_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_force_fw_hang_cmd) | + field_prep(wmi_tlv_len, len - tlv_hdr_size); + + cmd->type = type; + cmd->delay_time_ms = delay_time_ms; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_force_fw_hang_cmdid); + + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_force_fw_hang_cmdid"); + dev_kfree_skb(skb); + } + return ret; +} + +int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id, + u32 param_id, u32 param_value) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_set_param_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_set_param_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_vdev_set_param_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->vdev_id = vdev_id; + cmd->param_id = param_id; + cmd->param_value = param_value; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_vdev_set_param_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_vdev_set_param_cmdid "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi vdev id 0x%x set param %d value %d ", + vdev_id, param_id, param_value); + + return ret; +} + +int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar, + struct stats_request_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_request_stats_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_request_stats_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_request_stats_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->stats_id = param->stats_id; + cmd->vdev_id = param->vdev_id; + cmd->pdev_id = param->pdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_request_stats_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_request_stats cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi request stats 0x%x vdev id %d pdev id %d ", + param->stats_id, param->vdev_id, param->pdev_id); + + return ret; +} + +int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, + u32 vdev_id, u32 bcn_ctrl_op) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_bcn_offload_ctrl_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_bcn_offload_ctrl_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->vdev_id = vdev_id; + cmd->bcn_ctrl_op = bcn_ctrl_op; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi bcn ctrl offload vdev id %d ctrl_op %d ", + vdev_id, bcn_ctrl_op); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_bcn_offload_ctrl_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_bcn_offload_ctrl_cmdid "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, + struct ieee80211_mutable_offsets *offs, + struct sk_buff *bcn) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_bcn_tmpl_cmd *cmd; + struct wmi_bcn_prb_info *bcn_prb_info; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + int ret, len; + size_t aligned_len = roundup(bcn->len, 4); + + len = sizeof(*cmd) + sizeof(*bcn_prb_info) + tlv_hdr_size + aligned_len; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_bcn_tmpl_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_bcn_tmpl_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->vdev_id = vdev_id; + cmd->tim_ie_offset = offs->tim_offset; + cmd->csa_switch_count_offset = offs->csa_counter_offs[0]; + cmd->ext_csa_switch_count_offset = offs->csa_counter_offs[1]; + cmd->buf_len = bcn->len; + + ptr = skb->data + sizeof(*cmd); + + bcn_prb_info = ptr; + len = sizeof(*bcn_prb_info); + bcn_prb_info->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_bcn_prb_info) | + field_prep(wmi_tlv_len, len - tlv_hdr_size); + bcn_prb_info->caps = 0; + bcn_prb_info->erp = 0; + + ptr += sizeof(*bcn_prb_info); + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_byte) | + field_prep(wmi_tlv_len, aligned_len); + memcpy(tlv->value, bcn->data, bcn->len); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_bcn_tmpl_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_bcn_tmpl_cmdid "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_vdev_install_key(struct ath11k *ar, + struct wmi_vdev_install_key_arg *arg) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_install_key_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + int ret, len; + int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t)); + + len = sizeof(*cmd) + tlv_hdr_size + key_len_aligned; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_install_key_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_vdev_install_key_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + cmd->vdev_id = arg->vdev_id; + ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); + cmd->key_idx = arg->key_idx; + cmd->key_flags = arg->key_flags; + cmd->key_cipher = arg->key_cipher; + cmd->key_len = arg->key_len; + cmd->key_txmic_len = arg->key_txmic_len; + cmd->key_rxmic_len = arg->key_rxmic_len; + + if (arg->key_rsc_counter) + memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter, + sizeof(struct wmi_key_seq_counter)); + + tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd)); + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_byte) | + field_prep(wmi_tlv_len, key_len_aligned); + memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned); + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_vdev_install_key_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_vdev_install_key cmd "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi vdev install key idx %d cipher %d len %d ", + arg->key_idx, arg->key_cipher, arg->key_len); + + return ret; +} + +static inline void +ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, + struct peer_assoc_params *param) +{ + cmd->peer_flags = 0; + + if (param->is_wme_set) { + if (param->qos_flag) + cmd->peer_flags |= wmi_peer_qos; + if (param->apsd_flag) + cmd->peer_flags |= wmi_peer_apsd; + if (param->ht_flag) + cmd->peer_flags |= wmi_peer_ht; + if (param->bw_40) + cmd->peer_flags |= wmi_peer_40mhz; + if (param->bw_80) + cmd->peer_flags |= wmi_peer_80mhz; + if (param->bw_160) + cmd->peer_flags |= wmi_peer_160mhz; + + /* typically if stbc is enabled for vht it should be enabled + * for ht as well + **/ + if (param->stbc_flag) + cmd->peer_flags |= wmi_peer_stbc; + + /* typically if ldpc is enabled for vht it should be enabled + * for ht as well + **/ + if (param->ldpc_flag) + cmd->peer_flags |= wmi_peer_ldpc; + + if (param->static_mimops_flag) + cmd->peer_flags |= wmi_peer_static_mimops; + if (param->dynamic_mimops_flag) + cmd->peer_flags |= wmi_peer_dyn_mimops; + if (param->spatial_mux_flag) + cmd->peer_flags |= wmi_peer_spatial_mux; + if (param->vht_flag) + cmd->peer_flags |= wmi_peer_vht; + if (param->he_flag) + cmd->peer_flags |= wmi_peer_he; + } + + /* suppress authorization for all auth modes that need 4-way handshake + * (during re-association). + * authorization will be done for these modes on key installation. + */ + if (param->auth_flag) + cmd->peer_flags |= wmi_peer_auth; + if (param->need_ptk_4_way) + cmd->peer_flags |= wmi_peer_need_ptk_4_way; + else + cmd->peer_flags &= ~wmi_peer_need_ptk_4_way; + if (param->need_gtk_2_way) + cmd->peer_flags |= wmi_peer_need_gtk_2_way; + /* safe mode bypass the 4-way handshake */ + if (param->safe_mode_enabled) + cmd->peer_flags &= ~(wmi_peer_need_ptk_4_way | + wmi_peer_need_gtk_2_way); + + if (param->is_pmf_enabled) + cmd->peer_flags |= wmi_peer_pmf; + + /* disable amsdu for station transmit, if user configures it */ + /* disable amsdu for ap transmit to 11n stations, if user configures + * it + * if (param->amsdu_disable) add after fw support + **/ + + /* target asserts if node is marked ht and all mcs is set to 0. + * mark the node as non-ht if all the mcs rates are disabled through + * iwpriv + **/ + if (param->peer_ht_rates.num_rates == 0) + cmd->peer_flags &= ~wmi_peer_ht; +} + +int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, + struct peer_assoc_params *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_peer_assoc_complete_cmd *cmd; + struct wmi_vht_rate_set *mcs; + struct wmi_he_rate_set *he_mcs; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + u32 peer_legacy_rates_align; + u32 peer_ht_rates_align; + int i, ret, len; + + peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates, + sizeof(u32)); + peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates, + sizeof(u32)); + + len = sizeof(*cmd) + + tlv_hdr_size + (peer_legacy_rates_align * sizeof(u8)) + + tlv_hdr_size + (peer_ht_rates_align * sizeof(u8)) + + sizeof(*mcs) + tlv_hdr_size + + (sizeof(*he_mcs) * param->peer_he_mcs_count); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + ptr = skb->data; + + cmd = ptr; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_peer_assoc_complete_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->vdev_id = param->vdev_id; + + cmd->peer_new_assoc = param->peer_new_assoc; + cmd->peer_associd = param->peer_associd; + + ath11k_wmi_copy_peer_flags(cmd, param); + + ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac); + + cmd->peer_rate_caps = param->peer_rate_caps; + cmd->peer_caps = param->peer_caps; + cmd->peer_listen_intval = param->peer_listen_intval; + cmd->peer_ht_caps = param->peer_ht_caps; + cmd->peer_max_mpdu = param->peer_max_mpdu; + cmd->peer_mpdu_density = param->peer_mpdu_density; + cmd->peer_vht_caps = param->peer_vht_caps; + cmd->peer_phymode = param->peer_phymode; + + /* update 11ax capabilities */ + cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0]; + cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1]; + cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal; + cmd->peer_he_ops = param->peer_he_ops; + memcpy(&cmd->peer_he_cap_phy, ¶m->peer_he_cap_phyinfo, + sizeof(param->peer_he_cap_phyinfo)); + memcpy(&cmd->peer_ppet, ¶m->peer_ppet, + sizeof(param->peer_ppet)); + + /* update peer legacy rate information */ + ptr += sizeof(*cmd); + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_byte) | + field_prep(wmi_tlv_len, peer_legacy_rates_align); + + ptr += tlv_hdr_size; + + cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates; + memcpy(ptr, param->peer_legacy_rates.rates, + param->peer_legacy_rates.num_rates); + + /* update peer ht rate information */ + ptr += peer_legacy_rates_align; + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_byte) | + field_prep(wmi_tlv_len, peer_ht_rates_align); + ptr += tlv_hdr_size; + cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates; + memcpy(ptr, param->peer_ht_rates.rates, + param->peer_ht_rates.num_rates); + + /* vht rates */ + ptr += peer_ht_rates_align; + + mcs = ptr; + + mcs->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_vht_rate_set) | + field_prep(wmi_tlv_len, sizeof(*mcs) - tlv_hdr_size); + + cmd->peer_nss = param->peer_nss; + + /* update bandwidth-nss mapping */ + cmd->peer_bw_rxnss_override = 0; + cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override; + + if (param->vht_capable) { + mcs->rx_max_rate = param->rx_max_rate; + mcs->rx_mcs_set = param->rx_mcs_set; + mcs->tx_max_rate = param->tx_max_rate; + mcs->tx_mcs_set = param->tx_mcs_set; + } + + /* he rates */ + cmd->peer_he_mcs = param->peer_he_mcs_count; + + ptr += sizeof(*mcs); + + len = param->peer_he_mcs_count * sizeof(*he_mcs); + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_struct) | + field_prep(wmi_tlv_len, len); + ptr += tlv_hdr_size; + + /* loop through the he rate set */ + for (i = 0; i < param->peer_he_mcs_count; i++) { + he_mcs = ptr; + he_mcs->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_he_rate_set) | + field_prep(wmi_tlv_len, + sizeof(*he_mcs) - tlv_hdr_size); + + he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i]; + he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i]; + ptr += sizeof(*he_mcs); + } + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_peer_assoc_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_peer_assoc_cmdid "); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi peer assoc vdev id %d assoc id %d peer mac %pm ", + param->vdev_id, param->peer_associd, param->peer_mac); + + return ret; +} + +void ath11k_wmi_start_scan_init(struct ath11k *ar, + struct scan_req_params *arg) +{ + /* setup commonly used values */ + arg->scan_req_id = 1; + arg->scan_priority = wmi_scan_priority_low; + arg->dwell_time_active = 50; + arg->dwell_time_active_2g = 0; + arg->dwell_time_passive = 150; + arg->min_rest_time = 50; + arg->max_rest_time = 500; + arg->repeat_probe_time = 0; + arg->probe_spacing_time = 0; + arg->idle_time = 0; + arg->max_scan_time = 20000; + arg->probe_delay = 5; + arg->notify_scan_events = wmi_scan_event_started | + wmi_scan_event_completed | + wmi_scan_event_bss_channel | + wmi_scan_event_foreign_chan | + wmi_scan_event_dequeued; + arg->scan_flags |= wmi_scan_chan_stat_event; + arg->num_bssid = 1; +} + +static inline void +ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd, + struct scan_req_params *param) +{ + /* scan events subscription */ + if (param->scan_ev_started) + cmd->notify_scan_events |= wmi_scan_event_started; + if (param->scan_ev_completed) + cmd->notify_scan_events |= wmi_scan_event_completed; + if (param->scan_ev_bss_chan) + cmd->notify_scan_events |= wmi_scan_event_bss_channel; + if (param->scan_ev_foreign_chan) + cmd->notify_scan_events |= wmi_scan_event_foreign_chan; + if (param->scan_ev_dequeued) + cmd->notify_scan_events |= wmi_scan_event_dequeued; + if (param->scan_ev_preempted) + cmd->notify_scan_events |= wmi_scan_event_preempted; + if (param->scan_ev_start_failed) + cmd->notify_scan_events |= wmi_scan_event_start_failed; + if (param->scan_ev_restarted) + cmd->notify_scan_events |= wmi_scan_event_restarted; + if (param->scan_ev_foreign_chn_exit) + cmd->notify_scan_events |= wmi_scan_event_foreign_chan_exit; + if (param->scan_ev_suspended) + cmd->notify_scan_events |= wmi_scan_event_suspended; + if (param->scan_ev_resumed) + cmd->notify_scan_events |= wmi_scan_event_resumed; + + /** set scan control flags */ + cmd->scan_ctrl_flags = 0; + if (param->scan_f_passive) + cmd->scan_ctrl_flags |= wmi_scan_flag_passive; + if (param->scan_f_strict_passive_pch) + cmd->scan_ctrl_flags |= wmi_scan_flag_strict_passive_on_pchn; + if (param->scan_f_promisc_mode) + cmd->scan_ctrl_flags |= wmi_scan_filter_promiscuos; + if (param->scan_f_capture_phy_err) + cmd->scan_ctrl_flags |= wmi_scan_capture_phy_error; + if (param->scan_f_half_rate) + cmd->scan_ctrl_flags |= wmi_scan_flag_half_rate_support; + if (param->scan_f_quarter_rate) + cmd->scan_ctrl_flags |= wmi_scan_flag_quarter_rate_support; + if (param->scan_f_cck_rates) + cmd->scan_ctrl_flags |= wmi_scan_add_cck_rates; + if (param->scan_f_ofdm_rates) + cmd->scan_ctrl_flags |= wmi_scan_add_ofdm_rates; + if (param->scan_f_chan_stat_evnt) + cmd->scan_ctrl_flags |= wmi_scan_chan_stat_event; + if (param->scan_f_filter_prb_req) + cmd->scan_ctrl_flags |= wmi_scan_filter_probe_req; + if (param->scan_f_bcast_probe) + cmd->scan_ctrl_flags |= wmi_scan_add_bcast_probe_req; + if (param->scan_f_offchan_mgmt_tx) + cmd->scan_ctrl_flags |= wmi_scan_offchan_mgmt_tx; + if (param->scan_f_offchan_data_tx) + cmd->scan_ctrl_flags |= wmi_scan_offchan_data_tx; + if (param->scan_f_force_active_dfs_chn) + cmd->scan_ctrl_flags |= wmi_scan_flag_force_active_on_dfs; + if (param->scan_f_add_tpc_ie_in_probe) + cmd->scan_ctrl_flags |= wmi_scan_add_tpc_ie_in_probe_req; + if (param->scan_f_add_ds_ie_in_probe) + cmd->scan_ctrl_flags |= wmi_scan_add_ds_ie_in_probe_req; + if (param->scan_f_add_spoofed_mac_in_probe) + cmd->scan_ctrl_flags |= wmi_scan_add_spoof_mac_in_probe_req; + if (param->scan_f_add_rand_seq_in_probe) + cmd->scan_ctrl_flags |= wmi_scan_random_seq_no_in_probe_req; + if (param->scan_f_en_ie_whitelist_in_probe) + cmd->scan_ctrl_flags |= + wmi_scan_enable_ie_whtelist_in_probe_req; + + /* for adaptive scan mode using 3 bits (21 - 23 bits) */ + wmi_scan_set_dwell_mode(cmd->scan_ctrl_flags, + param->adaptive_dwell_time_mode); +} + +int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, + struct scan_req_params *params) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_start_scan_cmd *cmd; + struct wmi_ssid *ssid = null; + struct wmi_mac_addr *bssid; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + int i, ret, len; + u32 *tmp_ptr; + u8 extraie_len_with_pad = 0; + + len = sizeof(*cmd); + + len += tlv_hdr_size; + if (params->num_chan) + len += params->num_chan * sizeof(u32); + + len += tlv_hdr_size; + if (params->num_ssids) + len += params->num_ssids * sizeof(*ssid); + + len += tlv_hdr_size; + if (params->num_bssid) + len += sizeof(*bssid) * params->num_bssid; + + len += tlv_hdr_size; + if (params->extraie.len) + extraie_len_with_pad = + roundup(params->extraie.len, sizeof(u32)); + len += extraie_len_with_pad; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + ptr = skb->data; + + cmd = ptr; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_start_scan_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->scan_id = params->scan_id; + cmd->scan_req_id = params->scan_req_id; + cmd->vdev_id = params->vdev_id; + cmd->scan_priority = params->scan_priority; + cmd->notify_scan_events = params->notify_scan_events; + + ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params); + + cmd->dwell_time_active = params->dwell_time_active; + cmd->dwell_time_active_2g = params->dwell_time_active_2g; + cmd->dwell_time_passive = params->dwell_time_passive; + cmd->min_rest_time = params->min_rest_time; + cmd->max_rest_time = params->max_rest_time; + cmd->repeat_probe_time = params->repeat_probe_time; + cmd->probe_spacing_time = params->probe_spacing_time; + cmd->idle_time = params->idle_time; + cmd->max_scan_time = params->max_scan_time; + cmd->probe_delay = params->probe_delay; + cmd->burst_duration = params->burst_duration; + cmd->num_chan = params->num_chan; + cmd->num_bssid = params->num_bssid; + cmd->num_ssids = params->num_ssids; + cmd->ie_len = params->extraie.len; + cmd->n_probes = params->n_probes; + + ptr += sizeof(*cmd); + + len = params->num_chan * sizeof(u32); + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_uint32) | + field_prep(wmi_tlv_len, len); + ptr += tlv_hdr_size; + tmp_ptr = (u32 *)ptr; + + for (i = 0; i < params->num_chan; ++i) + tmp_ptr[i] = params->chan_list[i]; + + ptr += len; + + len = params->num_ssids * sizeof(*ssid); + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_fixed_struct) | + field_prep(wmi_tlv_len, len); + + ptr += tlv_hdr_size; + + if (params->num_ssids) { + ssid = ptr; + for (i = 0; i < params->num_ssids; ++i) { + ssid->ssid_len = params->ssid[i].length; + memcpy(ssid->ssid, params->ssid[i].ssid, + params->ssid[i].length); + ssid++; + } + } + + ptr += (params->num_ssids * sizeof(*ssid)); + len = params->num_bssid * sizeof(*bssid); + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_fixed_struct) | + field_prep(wmi_tlv_len, len); + + ptr += tlv_hdr_size; + bssid = ptr; + + if (params->num_bssid) { + for (i = 0; i < params->num_bssid; ++i) { + ether_addr_copy(bssid->addr, + params->bssid_list[i].addr); + bssid++; + } + } + + ptr += params->num_bssid * sizeof(*bssid); + + len = extraie_len_with_pad; + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_byte) | + field_prep(wmi_tlv_len, len); + ptr += tlv_hdr_size; + + if (params->extraie.len) + memcpy(ptr, params->extraie.ptr, + params->extraie.len); + + ptr += extraie_len_with_pad; + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_start_scan_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_start_scan_cmdid "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, + struct scan_cancel_param *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_stop_scan_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_stop_scan_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_stop_scan_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->vdev_id = param->vdev_id; + cmd->requestor = param->requester; + cmd->scan_id = param->scan_id; + cmd->pdev_id = param->pdev_id; + /* stop the scan with the corresponding scan_id */ + if (param->req_type == wlan_scan_cancel_pdev_all) { + /* cancelling all scans */ + cmd->req_type = wmi_scan_stop_all; + } else if (param->req_type == wlan_scan_cancel_vdev_all) { + /* cancelling vap scans */ + cmd->req_type = wmi_scn_stop_vap_all; + } else if (param->req_type == wlan_scan_cancel_single) { + /* cancelling specific scan */ + cmd->req_type = wmi_scan_stop_one; + } else { + ath11k_warn(ar->ab, "invalid scan cancel param %d", + param->req_type); + dev_kfree_skb(skb); + return -einval; + } + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_stop_scan_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_stop_scan_cmdid "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, + struct scan_chan_list_params *chan_list) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_scan_chan_list_cmd *cmd; + struct sk_buff *skb; + struct wmi_channel *chan_info; + struct channel_param *tchan_info; + struct wmi_tlv *tlv; + void *ptr; + int i, ret, len; + u32 *reg1, *reg2; + + len = sizeof(*cmd) + tlv_hdr_size + + sizeof(*chan_info) * chan_list->nallchans; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_scan_chan_list_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_scan_chan_list_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi no.of chan = %d len = %d ", chan_list->nallchans, len); + cmd->pdev_id = chan_list->pdev_id; + cmd->num_scan_chans = chan_list->nallchans; + + ptr = skb->data + sizeof(*cmd); + + len = sizeof(*chan_info) * chan_list->nallchans; + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_struct) | + field_prep(wmi_tlv_len, len - tlv_hdr_size); + ptr += tlv_hdr_size; + + tchan_info = &chan_list->ch_param[0]; + + for (i = 0; i < chan_list->nallchans; ++i) { + chan_info = ptr; + memset(chan_info, 0, sizeof(*chan_info)); + len = sizeof(*chan_info); + chan_info->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_channel) | + field_prep(wmi_tlv_len, + len - tlv_hdr_size); + + reg1 = &chan_info->reg_info_1; + reg2 = &chan_info->reg_info_2; + chan_info->mhz = tchan_info->mhz; + chan_info->band_center_freq1 = tchan_info->cfreq1; + chan_info->band_center_freq2 = tchan_info->cfreq2; + + if (tchan_info->is_chan_passive) + chan_info->info |= wmi_chan_info_passive; + if (tchan_info->allow_vht) + chan_info->info |= wmi_chan_info_allow_vht; + else if (tchan_info->allow_ht) + chan_info->info |= wmi_chan_info_allow_ht; + if (tchan_info->half_rate) + chan_info->info |= wmi_chan_info_half_rate; + if (tchan_info->quarter_rate) + chan_info->info |= wmi_chan_info_quarter_rate; + + chan_info->info |= field_prep(wmi_chan_info_mode, + tchan_info->phy_mode); + *reg1 |= field_prep(wmi_chan_reg_info1_min_pwr, + tchan_info->minpower); + *reg1 |= field_prep(wmi_chan_reg_info1_max_pwr, + tchan_info->maxpower); + *reg1 |= field_prep(wmi_chan_reg_info1_max_reg_pwr, + tchan_info->maxregpower); + *reg1 |= field_prep(wmi_chan_reg_info1_reg_cls, + tchan_info->reg_class_id); + *reg2 |= field_prep(wmi_chan_reg_info2_ant_max, + tchan_info->antennamax); + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi chan scan list chan[%d] = %u ", + i, chan_info->mhz); + + ptr += sizeof(*chan_info); + + tchan_info++; + } + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_scan_chan_list_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_scan_chan_list cmd "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, + struct wmi_wmm_params_all_arg *param) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_vdev_set_wmm_params_cmd *cmd; + struct wmi_wmm_params *wmm_param; + struct wmi_wmm_params_arg *wmi_wmm_arg; + struct sk_buff *skb; + int ret, ac; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_vdev_set_wmm_params_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->vdev_id = vdev_id; + cmd->wmm_param_type = 0; + + for (ac = 0; ac < wme_num_ac; ac++) { + switch (ac) { + case wme_ac_be: + wmi_wmm_arg = ¶m->ac_be; + break; + case wme_ac_bk: + wmi_wmm_arg = ¶m->ac_bk; + break; + case wme_ac_vi: + wmi_wmm_arg = ¶m->ac_vi; + break; + case wme_ac_vo: + wmi_wmm_arg = ¶m->ac_vo; + break; + } + + wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac]; + wmm_param->tlv_header = + field_prep(wmi_tlv_tag, + wmi_tag_vdev_set_wmm_params_cmd) | + field_prep(wmi_tlv_len, + sizeof(*wmm_param) - tlv_hdr_size); + + wmm_param->aifs = wmi_wmm_arg->aifs; + wmm_param->cwmin = wmi_wmm_arg->cwmin; + wmm_param->cwmax = wmi_wmm_arg->cwmax; + wmm_param->txoplimit = wmi_wmm_arg->txop; + wmm_param->acm = wmi_wmm_arg->acm; + wmm_param->no_ack = wmi_wmm_arg->no_ack; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d ", + ac, wmm_param->aifs, wmm_param->cwmin, + wmm_param->cwmax, wmm_param->txoplimit, + wmm_param->acm, wmm_param->no_ack); + } + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_vdev_set_wmm_params_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_vdev_set_wmm_params_cmdid"); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar, + u32 pdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_dfs_phyerr_offload_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data; + cmd->tlv_header = + field_prep(wmi_tlv_tag, + wmi_tag_pdev_dfs_phyerr_offload_enable_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->pdev_id = pdev_id; + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi dfs phy err offload enable pdev id %d ", pdev_id); + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_pdev_dfs_phyerr_offload_enable_cmdid); + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_pdev_dfs_phyerr_offload_enable cmd "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pdev_pktlog_filter_cmd *cmd; + struct wmi_pdev_pktlog_filter_info *info; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + int ret, len; + + len = sizeof(*cmd) + sizeof(*info) + tlv_hdr_size; + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_pdev_peer_pktlog_filter_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->pdev_id = ar->pdev->pdev_id; + cmd->num_mac = 1; + cmd->enable = enable; + + ptr = skb->data + sizeof(*cmd); + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_struct) | + field_prep(wmi_tlv_len, 0); + + ptr += tlv_hdr_size; + info = ptr; + + ether_addr_copy(info->peer_macaddr.addr, addr); + info->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_pdev_peer_pktlog_filter_info) | + field_prep(wmi_tlv_len, + sizeof(*info) - tlv_hdr_size); + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_pdev_pktlog_filter_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_pdev_pktlog_enable_cmdid "); + dev_kfree_skb(skb); + } + + return ret; +} + +int +ath11k_wmi_send_init_country_cmd(struct ath11k *ar, + struct wmi_init_country_params init_cc_params) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_init_country_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_init_country_cmd *)skb->data; + cmd->tlv_header = + field_prep(wmi_tlv_tag, + wmi_tag_set_init_country_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->pdev_id = ar->pdev->pdev_id; + + switch (init_cc_params.flags) { + case alpha_is_set: + cmd->init_cc_type = wmi_country_info_type_alpha; + memcpy((u8 *)&cmd->cc_info.alpha2, + init_cc_params.cc_info.alpha2, 3); + break; + case cc_is_set: + cmd->init_cc_type = wmi_country_info_type_country_code; + cmd->cc_info.country_code = init_cc_params.cc_info.country_code; + break; + case regdmn_is_set: + cmd->init_cc_type = wmi_country_info_type_regdomain; + cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id; + break; + default: + ret = -einval; + goto out; + } + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_set_init_country_cmdid); + +out: + if (ret) { + ath11k_warn(ar->ab, + "failed to send wmi_set_init_country cmd :%d ", + ret); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pktlog_enable_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_pktlog_enable_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_pdev_pktlog_enable_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->pdev_id = ar->pdev->pdev_id; + cmd->evlist = pktlog_filter; + cmd->enable = ath11k_wmi_pktlog_enable_force; + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_pdev_pktlog_enable_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_pdev_pktlog_enable_cmdid "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_pktlog_disable_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, sizeof(*cmd)); + if (!skb) + return -enomem; + + cmd = (struct wmi_pktlog_disable_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_pdev_pktlog_disable_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + cmd->pdev_id = ar->pdev->pdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_pdev_pktlog_disable_cmdid); + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_pdev_pktlog_enable_cmdid "); + dev_kfree_skb(skb); + } + + return ret; +} + +static void +ath11k_fill_band_to_mac_param(struct ath11k_base *soc, + struct wmi_host_pdev_band_to_mac *band_to_mac) +{ + u8 i; + struct ath11k_hal_reg_capabilities_ext *hal_reg_cap; + struct ath11k_pdev *pdev; + + for (i = 0; i < soc->num_radios; i++) { + pdev = &soc->pdevs[i]; + hal_reg_cap = &soc->hal_reg_cap[i]; + band_to_mac[i].pdev_id = pdev->pdev_id; + + switch (pdev->cap.supported_bands) { + case wmi_host_wlan_2g_5g_cap: + band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan; + band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan; + break; + case wmi_host_wlan_2g_cap: + band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan; + band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan; + break; + case wmi_host_wlan_5g_cap: + band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan; + band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan; + break; + default: + break; + } + } +} + +static void +ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg, + struct target_resource_config *tg_cfg) +{ + wmi_cfg->num_vdevs = tg_cfg->num_vdevs; + wmi_cfg->num_peers = tg_cfg->num_peers; + wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers; + wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs; + wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys; + wmi_cfg->num_tids = tg_cfg->num_tids; + wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit; + wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask; + wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask; + wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0]; + wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1]; + wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2]; + wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3]; + wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode; + wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req; + wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev; + wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev; + wmi_cfg->roam_offload_max_ap_profiles = + tg_cfg->roam_offload_max_ap_profiles; + wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups; + wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems; + wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode; + wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size; + wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries; + wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size; + wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim; + wmi_cfg->rx_skip_defrag_timeout_dup_detection_check = + tg_cfg->rx_skip_defrag_timeout_dup_detection_check; + wmi_cfg->vow_config = tg_cfg->vow_config; + wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev; + wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc; + wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries; + wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs; + wmi_cfg->num_tdls_conn_table_entries = + tg_cfg->num_tdls_conn_table_entries; + wmi_cfg->beacon_tx_offload_max_vdev = + tg_cfg->beacon_tx_offload_max_vdev; + wmi_cfg->num_multicast_filter_entries = + tg_cfg->num_multicast_filter_entries; + wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters; + wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern; + wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size; + wmi_cfg->max_tdls_concurrent_sleep_sta = + tg_cfg->max_tdls_concurrent_sleep_sta; + wmi_cfg->max_tdls_concurrent_buffer_sta = + tg_cfg->max_tdls_concurrent_buffer_sta; + wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate; + wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs; + wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels; + wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules; + wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size; + wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters; + wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id; + wmi_cfg->flag1 = tg_cfg->atf_config; + wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support; +} + +static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi, + struct wmi_init_cmd_param *param) +{ + struct ath11k_base *ab = wmi->wmi_sc->ab; + struct sk_buff *skb; + struct wmi_init_cmd *cmd; + struct wmi_resource_config *cfg; + struct wmi_pdev_set_hw_mode_cmd_param *hw_mode; + struct wmi_pdev_band_to_mac *band_to_mac; + struct wlan_host_mem_chunk *host_mem_chunks; + struct wmi_tlv *tlv; + size_t ret, len; + void *ptr; + u32 hw_mode_len = 0; + u16 idx; + + if (param->hw_mode_id != wmi_host_hw_mode_max) + hw_mode_len = sizeof(*hw_mode) + tlv_hdr_size + + (param->num_band_to_mac * sizeof(*band_to_mac)); + + len = sizeof(*cmd) + tlv_hdr_size + sizeof(*cfg) + hw_mode_len + + (sizeof(*host_mem_chunks) * wmi_max_mem_reqs); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (struct wmi_init_cmd *)skb->data; + + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_init_cmd) | + field_prep(wmi_tlv_len, sizeof(*cmd) - tlv_hdr_size); + + ptr = skb->data + sizeof(*cmd); + cfg = ptr; + + ath11k_wmi_copy_resource_config(cfg, param->res_cfg); + + cfg->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_resource_config) | + field_prep(wmi_tlv_len, sizeof(*cfg) - tlv_hdr_size); + + ptr += sizeof(*cfg); + host_mem_chunks = ptr + tlv_hdr_size; + len = sizeof(struct wlan_host_mem_chunk); + + for (idx = 0; idx < param->num_mem_chunks; ++idx) { + host_mem_chunks[idx].tlv_header = + field_prep(wmi_tlv_tag, + wmi_tag_wlan_host_memory_chunk) | + field_prep(wmi_tlv_len, len); + + host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr; + host_mem_chunks[idx].size = param->mem_chunks[idx].len; + host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id; + + ath11k_dbg(ab, ath11k_dbg_wmi, + "wmi host mem chunk req_id %d paddr 0x%llx len %d ", + param->mem_chunks[idx].req_id, + (u64)param->mem_chunks[idx].paddr, + param->mem_chunks[idx].len); + } + cmd->num_host_mem_chunks = param->num_mem_chunks; + len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks; + + /* num_mem_chunks is zero */ + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_struct) | + field_prep(wmi_tlv_len, len); + ptr += tlv_hdr_size + len; + + if (param->hw_mode_id != wmi_host_hw_mode_max) { + hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr; + hw_mode->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_pdev_set_hw_mode_cmd) | + field_prep(wmi_tlv_len, + sizeof(*hw_mode) - tlv_hdr_size); + + hw_mode->hw_mode_index = param->hw_mode_id; + hw_mode->num_band_to_mac = param->num_band_to_mac; + + ptr += sizeof(*hw_mode); + + len = param->num_band_to_mac * sizeof(*band_to_mac); + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_struct) | + field_prep(wmi_tlv_len, len); + + ptr += tlv_hdr_size; + len = sizeof(*band_to_mac); + + for (idx = 0; idx < param->num_band_to_mac; idx++) { + band_to_mac = (void *)ptr; + + band_to_mac->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_pdev_band_to_mac) | + field_prep(wmi_tlv_len, + len - tlv_hdr_size); + band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id; + band_to_mac->start_freq = + param->band_to_mac[idx].start_freq; + band_to_mac->end_freq = + param->band_to_mac[idx].end_freq; + ptr += sizeof(*band_to_mac); + } + } + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_init_cmdid); + if (ret) { + ath11k_warn(ab, "failed to send wmi_init_cmdid "); + dev_kfree_skb(skb); + } + + return ret; +} + +int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab) +{ + unsigned long time_left; + + time_left = wait_for_completion_timeout(&ab->wmi_sc.service_ready, + wmi_service_ready_timeout_hz); + if (!time_left) + return -etimedout; + + return 0; +} + +int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab) +{ + unsigned long time_left; + + time_left = wait_for_completion_timeout(&ab->wmi_sc.unified_ready, + wmi_service_ready_timeout_hz); + if (!time_left) + return -etimedout; + + return 0; +} + +int ath11k_wmi_cmd_init(struct ath11k_base *ab) +{ + struct ath11k_wmi_base *wmi_sc = &ab->wmi_sc; + struct wmi_init_cmd_param init_param; + struct target_resource_config config; + + memset(&init_param, 0, sizeof(init_param)); + memset(&config, 0, sizeof(config)); + + config.num_vdevs = ab->num_radios * target_num_vdevs; + + if (ab->num_radios == 2) { + config.num_peers = target_num_peers(dbs); + config.num_tids = target_num_tids(dbs); + } else if (ab->num_radios == 3) { + config.num_peers = target_num_peers(dbs_sbs); + config.num_tids = target_num_tids(dbs_sbs); + } else { + /* control should not reach here */ + config.num_peers = target_num_peers(single); + config.num_tids = target_num_tids(single); + } + config.num_offload_peers = target_num_offld_peers; + config.num_offload_reorder_buffs = target_num_offld_reorder_buffs; + config.num_peer_keys = target_num_peer_keys; + config.ast_skid_limit = target_ast_skid_limit; + config.tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; + config.rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1; + config.rx_timeout_pri[0] = target_rx_timeout_lo_pri; + config.rx_timeout_pri[1] = target_rx_timeout_lo_pri; + config.rx_timeout_pri[2] = target_rx_timeout_lo_pri; + config.rx_timeout_pri[3] = target_rx_timeout_hi_pri; + config.rx_decap_mode = target_decap_mode_native_wifi; + config.scan_max_pending_req = target_scan_max_pending_reqs; + config.bmiss_offload_max_vdev = target_bmiss_offload_max_vdev; + config.roam_offload_max_vdev = target_roam_offload_max_vdev; + config.roam_offload_max_ap_profiles = target_roam_offload_max_ap_profiles; + config.num_mcast_groups = target_num_mcast_groups; + config.num_mcast_table_elems = target_num_mcast_table_elems; + config.mcast2ucast_mode = target_mcast2ucast_mode; + config.tx_dbg_log_size = target_tx_dbg_log_size; + config.num_wds_entries = target_num_wds_entries; + config.dma_burst_size = target_dma_burst_size; + config.rx_skip_defrag_timeout_dup_detection_check = + target_rx_skip_defrag_timeout_dup_detection_check; + config.vow_config = target_vow_config; + config.gtk_offload_max_vdev = target_gtk_offload_max_vdev; + config.num_msdu_desc = target_num_msdu_desc; + config.beacon_tx_offload_max_vdev = ab->num_radios * target_max_bcn_offld; + config.rx_batchmode = target_rx_batchmode; + config.peer_map_unmap_v2_support = 1; + + memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config)); + + init_param.res_cfg = &wmi_sc->wlan_resource_config; + init_param.num_mem_chunks = wmi_sc->num_mem_chunks; + init_param.hw_mode_id = wmi_sc->preferred_hw_mode; + init_param.mem_chunks = wmi_sc->mem_chunks; + + if (wmi_sc->preferred_hw_mode == wmi_host_hw_mode_single) + init_param.hw_mode_id = wmi_host_hw_mode_max; + + init_param.num_band_to_mac = ab->num_radios; + + ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac); + + return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param); +} + +static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; + struct wmi_hw_mode_capabilities *hw_mode_cap; + u32 phy_map = 0; + + if (tag != wmi_tag_hw_mode_capabilities) + return -eproto; + + if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes) + return -enobufs; + + hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities, + hw_mode_id); + svc_rdy_ext->n_hw_mode_caps++; + + phy_map = hw_mode_cap->phy_id_map; + while (phy_map) { + svc_rdy_ext->tot_phy_id++; + phy_map = phy_map >> 1; + } + + return 0; +} + +static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc, + u16 len, const void *ptr, void *data) +{ + struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; + struct wmi_hw_mode_capabilities *hw_mode_caps; + enum wmi_host_hw_mode_config_type mode, pref; + u32 i; + int ret; + + svc_rdy_ext->n_hw_mode_caps = 0; + svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr; + + ret = ath11k_wmi_tlv_iter(soc, ptr, len, + ath11k_wmi_tlv_hw_mode_caps_parse, + svc_rdy_ext); + if (ret) { + ath11k_warn(soc, "failed to parse tlv %d ", ret); + return ret; + } + + i = 0; + while (i < svc_rdy_ext->n_hw_mode_caps) { + hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i]; + mode = hw_mode_caps->hw_mode_id; + pref = soc->wmi_sc.preferred_hw_mode; + + if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) { + svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps; + soc->wmi_sc.preferred_hw_mode = mode; + } + i++; + } + + if (soc->wmi_sc.preferred_hw_mode == wmi_host_hw_mode_max) + return -einval; + + return 0; +} + +static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; + + if (tag != wmi_tag_mac_phy_capabilities) + return -eproto; + + if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id) + return -enobufs; + + svc_rdy_ext->n_mac_phy_caps++; + return 0; +} + +static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; + + if (tag != wmi_tag_hal_reg_capabilities_ext) + return -eproto; + + if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy) + return -enobufs; + + svc_rdy_ext->n_ext_hal_reg_caps++; + return 0; +} + +static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc, + u16 len, const void *ptr, void *data) +{ + struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_sc.wmi[0]; + struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; + struct ath11k_hal_reg_capabilities_ext reg_cap; + int ret; + u32 i; + + svc_rdy_ext->n_ext_hal_reg_caps = 0; + svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr; + ret = ath11k_wmi_tlv_iter(soc, ptr, len, + ath11k_wmi_tlv_ext_hal_reg_caps_parse, + svc_rdy_ext); + if (ret) { + ath11k_warn(soc, "failed to parse tlv %d ", ret); + return ret; + } + + for (i = 0; i < svc_rdy_ext->param.num_phy; i++) { + ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle, + svc_rdy_ext->soc_hal_reg_caps, + svc_rdy_ext->ext_hal_reg_caps, i, + ®_cap); + if (ret) { + ath11k_warn(soc, "failed to extract reg cap %d ", i); + return ret; + } + + memcpy(&soc->hal_reg_cap[reg_cap.phy_id], + ®_cap, sizeof(reg_cap)); + } + return 0; +} + +static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc, + u16 len, const void *ptr, + void *data) +{ + struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_sc.wmi[0]; + struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; + u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id; + u32 phy_id_map; + int ret; + + svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr; + svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy; + + soc->num_radios = 0; + phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map; + + while (phy_id_map && soc->num_radios < max_radios) { + ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle, + svc_rdy_ext->hw_caps, + svc_rdy_ext->hw_mode_caps, + svc_rdy_ext->soc_hal_reg_caps, + svc_rdy_ext->mac_phy_caps, + hw_mode_id, soc->num_radios, + &soc->pdevs[soc->num_radios]); + if (ret) { + ath11k_warn(soc, "failed to extract mac caps, idx :%d ", + soc->num_radios); + return ret; + } + + soc->num_radios++; + + /* todo: mac_phy_cap prints */ + phy_id_map >>= 1; + } + return 0; +} + +static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_sc.wmi[0]; + struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data; + int ret; + + switch (tag) { + case wmi_tag_service_ready_ext_event: + ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr, + &svc_rdy_ext->param); + if (ret) { + ath11k_warn(ab, "unable to extract ext params "); + return ret; + } + break; + + case wmi_tag_soc_mac_phy_hw_mode_caps: + svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr; + svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes; + break; + + case wmi_tag_soc_hal_reg_capabilities: + ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr, + svc_rdy_ext); + if (ret) + return ret; + break; + + case wmi_tag_array_struct: + if (!svc_rdy_ext->hw_mode_done) { + ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr, + svc_rdy_ext); + if (ret) + return ret; + + svc_rdy_ext->hw_mode_done = true; + } else if (!svc_rdy_ext->mac_phy_done) { + svc_rdy_ext->n_mac_phy_caps = 0; + svc_rdy_ext->mac_phy_caps = + (struct wmi_mac_phy_capabilities *)ptr; + ret = ath11k_wmi_tlv_iter(ab, ptr, len, + ath11k_wmi_tlv_mac_phy_caps_parse, + svc_rdy_ext); + if (ret) { + ath11k_warn(ab, "failed to parse tlv %d ", ret); + return ret; + } + + svc_rdy_ext->mac_phy_done = true; + } else if (!svc_rdy_ext->ext_hal_reg_done) { + ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr, + svc_rdy_ext); + if (ret) + return ret; + + svc_rdy_ext->ext_hal_reg_done = true; + complete(&ab->wmi_sc.service_ready); + } + break; + + default: + break; + } + return 0; +} + +static int ath11k_service_ready_ext_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { }; + int ret; + + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_svc_rdy_ext_parse, + &svc_rdy_ext); + if (ret) { + ath11k_warn(ab, "failed to parse tlv %d ", ret); + return ret; + } + + return 0; +} + +static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_vdev_start_resp_event *vdev_rsp) +{ + const void **tb; + const struct wmi_vdev_start_resp_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_vdev_start_response_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch vdev start resp ev"); + kfree(tb); + return -eproto; + } + + memset(vdev_rsp, 0, sizeof(*vdev_rsp)); + + vdev_rsp->vdev_id = ev->vdev_id; + vdev_rsp->requestor_id = ev->requestor_id; + vdev_rsp->resp_type = ev->resp_type; + vdev_rsp->status = ev->status; + vdev_rsp->chain_mask = ev->chain_mask; + vdev_rsp->smps_mode = ev->smps_mode; + vdev_rsp->mac_id = ev->mac_id; + vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams; + vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams; + + kfree(tb); + return 0; +} + +static struct cur_reg_rule +*create_reg_rules_from_wmi(u32 num_reg_rules, + struct wmi_regulatory_rule_struct *wmi_reg_rule) +{ + struct cur_reg_rule *reg_rule_ptr; + u32 count; + + reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)), + gfp_atomic); + + if (!reg_rule_ptr) + return null; + + for (count = 0; count < num_reg_rules; count++) { + reg_rule_ptr[count].start_freq = + field_get(reg_rule_start_freq, + wmi_reg_rule[count].freq_info); + reg_rule_ptr[count].end_freq = + field_get(reg_rule_end_freq, + wmi_reg_rule[count].freq_info); + reg_rule_ptr[count].max_bw = + field_get(reg_rule_max_bw, + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].reg_power = + field_get(reg_rule_reg_pwr, + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].ant_gain = + field_get(reg_rule_ant_gain, + wmi_reg_rule[count].bw_pwr_info); + reg_rule_ptr[count].flags = + field_get(reg_rule_flags, + wmi_reg_rule[count].flag_info); + } + + return reg_rule_ptr; +} + +static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab, + struct sk_buff *skb, + struct cur_regulatory_info *reg_info) +{ + const void **tb; + const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr; + struct wmi_regulatory_rule_struct *wmi_reg_rule; + u32 num_2g_reg_rules, num_5g_reg_rules; + int ret; + + ath11k_dbg(ab, ath11k_dbg_wmi, "processing regulatory channel list "); + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + chan_list_event_hdr = tb[wmi_tag_reg_chan_list_cc_event]; + if (!chan_list_event_hdr) { + ath11k_warn(ab, "failed to fetch reg chan list update ev "); + kfree(tb); + return -eproto; + } + + reg_info->num_2g_reg_rules = chan_list_event_hdr->num_2g_reg_rules; + reg_info->num_5g_reg_rules = chan_list_event_hdr->num_5g_reg_rules; + + if (!(reg_info->num_2g_reg_rules + reg_info->num_5g_reg_rules)) { + ath11k_warn(ab, "no regulatory rules available in the event info "); + kfree(tb); + return -einval; + } + + memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2, + reg_alpha2_len); + reg_info->dfs_region = chan_list_event_hdr->dfs_region; + reg_info->phybitmap = chan_list_event_hdr->phybitmap; + reg_info->num_phy = chan_list_event_hdr->num_phy; + reg_info->phy_id = chan_list_event_hdr->phy_id; + reg_info->ctry_code = chan_list_event_hdr->country_id; + reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code; + if (chan_list_event_hdr->status_code == wmi_reg_set_cc_status_pass) + reg_info->status_code = reg_set_cc_status_pass; + else if (chan_list_event_hdr->status_code == wmi_reg_current_alpha2_not_found) + reg_info->status_code = reg_current_alpha2_not_found; + else if (chan_list_event_hdr->status_code == wmi_reg_init_alpha2_not_found) + reg_info->status_code = reg_init_alpha2_not_found; + else if (chan_list_event_hdr->status_code == wmi_reg_set_cc_change_not_allowed) + reg_info->status_code = reg_set_cc_change_not_allowed; + else if (chan_list_event_hdr->status_code == wmi_reg_set_cc_status_no_memory) + reg_info->status_code = reg_set_cc_status_no_memory; + else if (chan_list_event_hdr->status_code == wmi_reg_set_cc_status_fail) + reg_info->status_code = reg_set_cc_status_fail; + + reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g; + reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g; + reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g; + reg_info->max_bw_5g = chan_list_event_hdr->max_bw_5g; + + num_2g_reg_rules = reg_info->num_2g_reg_rules; + num_5g_reg_rules = reg_info->num_5g_reg_rules; + + ath11k_dbg(ab, ath11k_dbg_wmi, + "%s:cc %s dsf %d bw: min_2g %d max_2g %d min_5g %d max_5g %d", + __func__, reg_info->alpha2, reg_info->dfs_region, + reg_info->min_bw_2g, reg_info->max_bw_2g, + reg_info->min_bw_5g, reg_info->max_bw_5g); + + ath11k_dbg(ab, ath11k_dbg_wmi, + "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__, + num_2g_reg_rules, num_5g_reg_rules); + + wmi_reg_rule = + (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr + + sizeof(*chan_list_event_hdr) + + sizeof(struct wmi_tlv)); + + if (num_2g_reg_rules) { + reg_info->reg_rules_2g_ptr = create_reg_rules_from_wmi(num_2g_reg_rules, + wmi_reg_rule); + if (!reg_info->reg_rules_2g_ptr) { + kfree(tb); + ath11k_warn(ab, "unable to allocate memory for 2g rules "); + return -enomem; + } + } + + if (num_5g_reg_rules) { + wmi_reg_rule += num_2g_reg_rules; + reg_info->reg_rules_5g_ptr = create_reg_rules_from_wmi(num_5g_reg_rules, + wmi_reg_rule); + if (!reg_info->reg_rules_5g_ptr) { + kfree(tb); + ath11k_warn(ab, "unable to allocate memory for 5g rules "); + return -enomem; + } + } + + ath11k_dbg(ab, ath11k_dbg_wmi, "processed regulatory channel list "); + + kfree(tb); + return 0; +} + +static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_peer_delete_resp_event *peer_del_resp) +{ + const void **tb; + const struct wmi_peer_delete_resp_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_peer_delete_resp_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch peer delete resp ev"); + kfree(tb); + return -eproto; + } + + memset(peer_del_resp, 0, sizeof(*peer_del_resp)); + + peer_del_resp->vdev_id = ev->vdev_id; + ether_addr_copy(peer_del_resp->peer_macaddr.addr, + ev->peer_macaddr.addr); + + kfree(tb); + return 0; +} + +static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf, + u32 len, u32 *vdev_id, + u32 *tx_status) +{ + const void **tb; + const struct wmi_bcn_tx_status_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_offload_bcn_tx_status_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch bcn tx status ev"); + kfree(tb); + return -eproto; + } + + *vdev_id = ev->vdev_id; + *tx_status = ev->tx_status; + + kfree(tb); + return 0; +} + +static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb, + u32 *vdev_id) +{ + const void **tb; + const struct wmi_vdev_stopped_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_vdev_stopped_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch vdev stop ev"); + kfree(tb); + return -eproto; + } + + *vdev_id = ev->vdev_id; + + kfree(tb); + return 0; +} + +static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab, + struct sk_buff *skb, + struct mgmt_rx_event_params *hdr) +{ + const void **tb; + const struct wmi_mgmt_rx_hdr *ev; + const u8 *frame; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_mgmt_rx_hdr]; + frame = tb[wmi_tag_array_byte]; + + if (!ev || !frame) { + ath11k_warn(ab, "failed to fetch mgmt rx hdr"); + kfree(tb); + return -eproto; + } + + hdr->pdev_id = ev->pdev_id; + hdr->channel = ev->channel; + hdr->snr = ev->snr; + hdr->rate = ev->rate; + hdr->phy_mode = ev->phy_mode; + hdr->buf_len = ev->buf_len; + hdr->status = ev->status; + hdr->flags = ev->flags; + hdr->rssi = ev->rssi; + hdr->tsf_delta = ev->tsf_delta; + memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl)); + + if (skb->len < (frame - skb->data) + hdr->buf_len) { + ath11k_warn(ab, "invalid length in mgmt rx hdr ev"); + kfree(tb); + return -eproto; + } + + /* shift the sk_buff to point to 'frame' */ + skb_trim(skb, 0); + skb_put(skb, frame - skb->data); + skb_pull(skb, frame - skb->data); + skb_put(skb, hdr->buf_len); + + ath11k_ce_byte_swap(skb->data, hdr->buf_len); + + kfree(tb); + return 0; +} + +static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id, + u32 status) +{ + struct sk_buff *msdu; + struct ieee80211_tx_info *info; + struct ath11k_skb_cb *skb_cb; + + spin_lock_bh(&ar->txmgmt_idr_lock); + msdu = idr_find(&ar->txmgmt_idr, desc_id); + + if (!msdu) { + ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d ", + desc_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + return -enoent; + } + + idr_remove(&ar->txmgmt_idr, desc_id); + spin_unlock_bh(&ar->txmgmt_idr_lock); + + skb_cb = ath11k_skb_cb(msdu); + dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, dma_to_device); + + info = ieee80211_skb_cb(msdu); + if ((!(info->flags & ieee80211_tx_ctl_no_ack)) && !status) + info->flags |= ieee80211_tx_stat_ack; + + ieee80211_tx_status_irqsafe(ar->hw, msdu); + + warn_on_once(atomic_read(&ar->num_pending_mgmt_tx) == 0); + atomic_dec(&ar->num_pending_mgmt_tx); + + return 0; +} + +static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab, + struct sk_buff *skb, + struct wmi_mgmt_tx_compl_event *param) +{ + const void **tb; + const struct wmi_mgmt_tx_compl_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_mgmt_tx_compl_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch mgmt tx compl ev"); + kfree(tb); + return -eproto; + } + + param->pdev_id = ev->pdev_id; + param->desc_id = ev->desc_id; + param->status = ev->status; + + kfree(tb); + return 0; +} + +static void ath11k_wmi_event_scan_started(struct ath11k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ath11k_scan_idle: + case ath11k_scan_running: + case ath11k_scan_aborting: + ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d) ", + ath11k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ath11k_scan_starting: + ar->scan.state = ath11k_scan_running; + complete(&ar->scan.started); + break; + } +} + +static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ath11k_scan_idle: + case ath11k_scan_running: + case ath11k_scan_aborting: + ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d) ", + ath11k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ath11k_scan_starting: + complete(&ar->scan.started); + __ath11k_mac_scan_finish(ar); + break; + } +} + +static void ath11k_wmi_event_scan_completed(struct ath11k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ath11k_scan_idle: + case ath11k_scan_starting: + /* one suspected reason scan can be completed while starting is + * if firmware fails to deliver all scan events to the host, + * e.g. when transport pipe is full. this has been observed + * with spectral scan phyerr events starving wmi transport + * pipe. in such case the "scan completed" event should be (and + * is) ignored by the host as it may be just firmware's scan + * state machine recovering. + */ + ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d) ", + ath11k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ath11k_scan_running: + case ath11k_scan_aborting: + __ath11k_mac_scan_finish(ar); + break; + } +} + +static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ath11k_scan_idle: + case ath11k_scan_starting: + ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d) ", + ath11k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ath11k_scan_running: + case ath11k_scan_aborting: + ar->scan_channel = null; + break; + } +} + +static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ath11k_scan_idle: + case ath11k_scan_starting: + ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d) ", + ath11k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ath11k_scan_running: + case ath11k_scan_aborting: + ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + break; + } +} + +static const char * +ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type, + enum wmi_scan_completion_reason reason) +{ + switch (type) { + case wmi_scan_event_started: + return "started"; + case wmi_scan_event_completed: + switch (reason) { + case wmi_scan_reason_completed: + return "completed"; + case wmi_scan_reason_cancelled: + return "completed [cancelled]"; + case wmi_scan_reason_preempted: + return "completed [preempted]"; + case wmi_scan_reason_timedout: + return "completed [timedout]"; + case wmi_scan_reason_internal_failure: + return "completed [internal err]"; + case wmi_scan_reason_max: + break; + } + return "completed [unknown]"; + case wmi_scan_event_bss_channel: + return "bss channel"; + case wmi_scan_event_foreign_chan: + return "foreign channel"; + case wmi_scan_event_dequeued: + return "dequeued"; + case wmi_scan_event_preempted: + return "preempted"; + case wmi_scan_event_start_failed: + return "start failed"; + case wmi_scan_event_restarted: + return "restarted"; + case wmi_scan_event_foreign_chan_exit: + return "foreign channel exit"; + default: + return "unknown"; + } +} + +static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_scan_event *scan_evt_param) +{ + const void **tb; + const struct wmi_scan_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_scan_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch scan ev"); + kfree(tb); + return -eproto; + } + + scan_evt_param->event_type = ev->event_type; + scan_evt_param->reason = ev->reason; + scan_evt_param->channel_freq = ev->channel_freq; + scan_evt_param->scan_req_id = ev->scan_req_id; + scan_evt_param->scan_id = ev->scan_id; + scan_evt_param->vdev_id = ev->vdev_id; + scan_evt_param->tsf_timestamp = ev->tsf_timestamp; + + kfree(tb); + return 0; +} + +static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_peer_sta_kickout_arg *arg) +{ + const void **tb; + const struct wmi_peer_sta_kickout_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_peer_sta_kickout_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch peer sta kickout ev"); + kfree(tb); + return -eproto; + } + + arg->mac_addr = ev->peer_macaddr.addr; + + kfree(tb); + return 0; +} + +static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_roam_event *roam_ev) +{ + const void **tb; + const struct wmi_roam_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_roam_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch roam ev"); + kfree(tb); + return -eproto; + } + + roam_ev->vdev_id = ev->vdev_id; + roam_ev->reason = ev->reason; + roam_ev->rssi = ev->rssi; + + kfree(tb); + return 0; +} + +static int freq_to_idx(struct ath11k *ar, int freq) +{ + struct ieee80211_supported_band *sband; + int band, ch, idx = 0; + + for (band = nl80211_band_2ghz; band < num_nl80211_bands; band++) { + sband = ar->hw->wiphy->bands[band]; + if (!sband) + continue; + + for (ch = 0; ch < sband->n_channels; ch++, idx++) + if (sband->channels[ch].center_freq == freq) + goto exit; + } + +exit: + return idx; +} + +static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf, + u32 len, struct wmi_chan_info_event *ch_info_ev) +{ + const void **tb; + const struct wmi_chan_info_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_chan_info_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch chan info ev"); + kfree(tb); + return -eproto; + } + + ch_info_ev->err_code = ev->err_code; + ch_info_ev->freq = ev->freq; + ch_info_ev->cmd_flags = ev->cmd_flags; + ch_info_ev->noise_floor = ev->noise_floor; + ch_info_ev->rx_clear_count = ev->rx_clear_count; + ch_info_ev->cycle_count = ev->cycle_count; + ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range; + ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp; + ch_info_ev->rx_frame_count = ev->rx_frame_count; + ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt; + ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz; + ch_info_ev->vdev_id = ev->vdev_id; + + kfree(tb); + return 0; +} + +static int +ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev) +{ + const void **tb; + const struct wmi_pdev_bss_chan_info_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_pdev_bss_chan_info_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch pdev bss chan info ev"); + kfree(tb); + return -eproto; + } + + bss_ch_info_ev->pdev_id = ev->pdev_id; + bss_ch_info_ev->freq = ev->freq; + bss_ch_info_ev->noise_floor = ev->noise_floor; + bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low; + bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high; + bss_ch_info_ev->cycle_count_low = ev->cycle_count_low; + bss_ch_info_ev->cycle_count_high = ev->cycle_count_high; + bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low; + bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high; + bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low; + bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high; + bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low; + bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high; + + kfree(tb); + return 0; +} + +static int +ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_vdev_install_key_complete_arg *arg) +{ + const void **tb; + const struct wmi_vdev_install_key_compl_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_vdev_install_key_complete_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch vdev install key compl ev"); + kfree(tb); + return -eproto; + } + + arg->vdev_id = ev->vdev_id; + arg->macaddr = ev->peer_macaddr.addr; + arg->key_idx = ev->key_idx; + arg->key_flags = ev->key_flags; + arg->status = ev->status; + + kfree(tb); + return 0; +} + +static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb, + struct wmi_peer_assoc_conf_arg *peer_assoc_conf) +{ + const void **tb; + const struct wmi_peer_assoc_conf_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_peer_assoc_conf_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch peer assoc conf ev"); + kfree(tb); + return -eproto; + } + + peer_assoc_conf->vdev_id = ev->vdev_id; + peer_assoc_conf->macaddr = ev->peer_macaddr.addr; + + kfree(tb); + return 0; +} + +static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, + struct ath11k_fw_stats_pdev *dst) +{ + dst->ch_noise_floor = src->chan_nf; + dst->tx_frame_count = src->tx_frame_count; + dst->rx_frame_count = src->rx_frame_count; + dst->rx_clear_count = src->rx_clear_count; + dst->cycle_count = src->cycle_count; + dst->phy_err_count = src->phy_err_count; + dst->chan_tx_power = src->chan_tx_pwr; +} + +static void +ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src, + struct ath11k_fw_stats_pdev *dst) +{ + dst->comp_queued = src->comp_queued; + dst->comp_delivered = src->comp_delivered; + dst->msdu_enqued = src->msdu_enqued; + dst->mpdu_enqued = src->mpdu_enqued; + dst->wmm_drop = src->wmm_drop; + dst->local_enqued = src->local_enqued; + dst->local_freed = src->local_freed; + dst->hw_queued = src->hw_queued; + dst->hw_reaped = src->hw_reaped; + dst->underrun = src->underrun; + dst->tx_abort = src->tx_abort; + dst->mpdus_requed = src->mpdus_requed; + dst->tx_ko = src->tx_ko; + dst->data_rc = src->data_rc; + dst->self_triggers = src->self_triggers; + dst->sw_retry_failure = src->sw_retry_failure; + dst->illgl_rate_phy_err = src->illgl_rate_phy_err; + dst->pdev_cont_xretry = src->pdev_cont_xretry; + dst->pdev_tx_timeout = src->pdev_tx_timeout; + dst->pdev_resets = src->pdev_resets; + dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure; + dst->phy_underrun = src->phy_underrun; + dst->txop_ovf = src->txop_ovf; +} + +static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src, + struct ath11k_fw_stats_pdev *dst) +{ + dst->mid_ppdu_route_change = src->mid_ppdu_route_change; + dst->status_rcvd = src->status_rcvd; + dst->r0_frags = src->r0_frags; + dst->r1_frags = src->r1_frags; + dst->r2_frags = src->r2_frags; + dst->r3_frags = src->r3_frags; + dst->htt_msdus = src->htt_msdus; + dst->htt_mpdus = src->htt_mpdus; + dst->loc_msdus = src->loc_msdus; + dst->loc_mpdus = src->loc_mpdus; + dst->oversize_amsdu = src->oversize_amsdu; + dst->phy_errs = src->phy_errs; + dst->phy_err_drop = src->phy_err_drop; + dst->mpdu_errs = src->mpdu_errs; +} + +static void +ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src, + struct ath11k_fw_stats_vdev *dst) +{ + int i; + + dst->vdev_id = src->vdev_id; + dst->beacon_snr = src->beacon_snr; + dst->data_snr = src->data_snr; + dst->num_rx_frames = src->num_rx_frames; + dst->num_rts_fail = src->num_rts_fail; + dst->num_rts_success = src->num_rts_success; + dst->num_rx_err = src->num_rx_err; + dst->num_rx_discard = src->num_rx_discard; + dst->num_tx_not_acked = src->num_tx_not_acked; + + for (i = 0; i < array_size(src->num_tx_frames); i++) + dst->num_tx_frames[i] = src->num_tx_frames[i]; + + for (i = 0; i < array_size(src->num_tx_frames_retries); i++) + dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i]; + + for (i = 0; i < array_size(src->num_tx_frames_failures); i++) + dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i]; + + for (i = 0; i < array_size(src->tx_rate_history); i++) + dst->tx_rate_history[i] = src->tx_rate_history[i]; + + for (i = 0; i < array_size(src->beacon_rssi_history); i++) + dst->beacon_rssi_history[i] = src->beacon_rssi_history[i]; +} + +static void +ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src, + struct ath11k_fw_stats_bcn *dst) +{ + dst->vdev_id = src->vdev_id; + dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt; + dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt; +} + +int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, + struct ath11k_fw_stats *stats) +{ + const void **tb; + const struct wmi_stats_event *ev; + const void *data; + int i, ret; + u32 len = skb->len; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return ret; + } + + ev = tb[wmi_tag_stats_event]; + data = tb[wmi_tag_array_byte]; + if (!ev || !data) { + ath11k_warn(ab, "failed to fetch update stats ev"); + kfree(tb); + return -eproto; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i ", + ev->pdev_id, + ev->num_pdev_stats, ev->num_vdev_stats, + ev->num_bcn_stats); + + stats->pdev_id = ev->pdev_id; + stats->stats_id = 0; + + for (i = 0; i < ev->num_pdev_stats; i++) { + const struct wmi_pdev_stats *src; + struct ath11k_fw_stats_pdev *dst; + + src = data; + if (len < sizeof(*src)) { + kfree(tb); + return -eproto; + } + + stats->stats_id = wmi_request_pdev_stat; + + data += sizeof(*src); + len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), gfp_atomic); + if (!dst) + continue; + + ath11k_wmi_pull_pdev_stats_base(&src->base, dst); + ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst); + ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst); + list_add_tail(&dst->list, &stats->pdevs); + } + + for (i = 0; i < ev->num_vdev_stats; i++) { + const struct wmi_vdev_stats *src; + struct ath11k_fw_stats_vdev *dst; + + src = data; + if (len < sizeof(*src)) { + kfree(tb); + return -eproto; + } + + stats->stats_id = wmi_request_vdev_stat; + + data += sizeof(*src); + len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), gfp_atomic); + if (!dst) + continue; + + ath11k_wmi_pull_vdev_stats(src, dst); + list_add_tail(&dst->list, &stats->vdevs); + } + + for (i = 0; i < ev->num_bcn_stats; i++) { + const struct wmi_bcn_stats *src; + struct ath11k_fw_stats_bcn *dst; + + src = data; + if (len < sizeof(*src)) { + kfree(tb); + return -eproto; + } + + stats->stats_id = wmi_request_bcn_stat; + + data += sizeof(*src); + len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), gfp_atomic); + if (!dst) + continue; + + ath11k_wmi_pull_bcn_stats(src, dst); + list_add_tail(&dst->list, &stats->bcn); + } + + kfree(tb); + return 0; +} + +size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head) +{ + struct ath11k_fw_stats_vdev *i; + size_t num = 0; + + list_for_each_entry(i, head, list) + ++num; + + return num; +} + +static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head *head) +{ + struct ath11k_fw_stats_bcn *i; + size_t num = 0; + + list_for_each_entry(i, head, list) + ++num; + + return num; +} + +static void +ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ath11k_fw_stats_buf_size; + + len += scnprintf(buf + len, buf_len - len, " "); + len += scnprintf(buf + len, buf_len - len, "%30s ", + "ath11k pdev stats"); + len += scnprintf(buf + len, buf_len - len, "%30s ", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "channel noise floor", pdev->ch_noise_floor); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "channel tx power", pdev->chan_tx_power); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "tx frame count", pdev->tx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "rx frame count", pdev->rx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "rx clear count", pdev->rx_clear_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "cycle count", pdev->cycle_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "phy error count", pdev->phy_err_count); + + *length = len; +} + +static void +ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ath11k_fw_stats_buf_size; + + len += scnprintf(buf + len, buf_len - len, " %30s ", + "ath11k pdev tx stats"); + len += scnprintf(buf + len, buf_len - len, "%30s ", + "===================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "htt cookies queued", pdev->comp_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "htt cookies disp.", pdev->comp_delivered); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "msdu queued", pdev->msdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "mpdu queued", pdev->mpdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "msdus dropped", pdev->wmm_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "local enqued", pdev->local_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "local freed", pdev->local_freed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "hw queued", pdev->hw_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "ppdus reaped", pdev->hw_reaped); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "num underruns", pdev->underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "ppdus cleaned", pdev->tx_abort); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "mpdus requed", pdev->mpdus_requed); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "excessive retries", pdev->tx_ko); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "hw rate", pdev->data_rc); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "sched self triggers", pdev->self_triggers); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "dropped due to sw retries", + pdev->sw_retry_failure); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "illegal rate phy errors", + pdev->illgl_rate_phy_err); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "pdev continuous xretry", pdev->pdev_cont_xretry); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "tx timeout", pdev->pdev_tx_timeout); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "pdev resets", pdev->pdev_resets); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "stateless tids alloc failures", + pdev->stateless_tid_alloc_failure); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "phy underrun", pdev->phy_underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10u ", + "mpdu is more than txop limit", pdev->txop_ovf); + *length = len; +} + +static void +ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ath11k_fw_stats_buf_size; + + len += scnprintf(buf + len, buf_len - len, " %30s ", + "ath11k pdev rx stats"); + len += scnprintf(buf + len, buf_len - len, "%30s ", + "===================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "mid ppdu route change", + pdev->mid_ppdu_route_change); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "tot. number of statuses", pdev->status_rcvd); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "extra frags on rings 0", pdev->r0_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "extra frags on rings 1", pdev->r1_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "extra frags on rings 2", pdev->r2_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "extra frags on rings 3", pdev->r3_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "msdus delivered to htt", pdev->htt_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "mpdus delivered to htt", pdev->htt_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "msdus delivered to stack", pdev->loc_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "mpdus delivered to stack", pdev->loc_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "oversized amsus", pdev->oversize_amsdu); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "phy errors", pdev->phy_errs); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "phy errors drops", pdev->phy_err_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d ", + "mpdu errors (fcs, mic, enc)", pdev->mpdu_errs); + *length = len; +} + +static void +ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar, + const struct ath11k_fw_stats_vdev *vdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ath11k_fw_stats_buf_size; + struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id); + u8 *vif_macaddr; + int i; + + /* vdev stats has all the active vdevs of other pdevs as well, + * ignoring those not part of requested pdev + */ + if (!arvif) + return; + + vif_macaddr = arvif->vif->addr; + + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "vdev id", vdev->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %pm ", + "vdev mac address", vif_macaddr); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "beacon snr", vdev->beacon_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "data snr", vdev->data_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "num rx frames", vdev->num_rx_frames); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "num rts fail", vdev->num_rts_fail); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "num rts success", vdev->num_rts_success); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "num rx err", vdev->num_rx_err); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "num rx discard", vdev->num_rx_discard); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "num tx not acked", vdev->num_tx_not_acked); + + for (i = 0 ; i < array_size(vdev->num_tx_frames); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u ", + "num tx frames", i, + vdev->num_tx_frames[i]); + + for (i = 0 ; i < array_size(vdev->num_tx_frames_retries); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u ", + "num tx frames retries", i, + vdev->num_tx_frames_retries[i]); + + for (i = 0 ; i < array_size(vdev->num_tx_frames_failures); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u ", + "num tx frames failures", i, + vdev->num_tx_frames_failures[i]); + + for (i = 0 ; i < array_size(vdev->tx_rate_history); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] 0x%08x ", + "tx rate history", i, + vdev->tx_rate_history[i]); + + for (i = 0 ; i < array_size(vdev->beacon_rssi_history); i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u ", + "beacon rssi history", i, + vdev->beacon_rssi_history[i]); + + len += scnprintf(buf + len, buf_len - len, " "); + *length = len; +} + +static void +ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar, + const struct ath11k_fw_stats_bcn *bcn, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ath11k_fw_stats_buf_size; + struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id); + u8 *vdev_macaddr; + + if (!arvif) { + ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats", + bcn->vdev_id); + return; + } + + vdev_macaddr = arvif->vif->addr; + + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "vdev id", bcn->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %pm ", + "vdev mac address", vdev_macaddr); + len += scnprintf(buf + len, buf_len - len, "%30s ", + "================"); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "num of beacon tx success", bcn->tx_bcn_succ_cnt); + len += scnprintf(buf + len, buf_len - len, "%30s %u ", + "num of beacon tx failures", bcn->tx_bcn_outage_cnt); + + len += scnprintf(buf + len, buf_len - len, " "); + *length = len; +} + +void ath11k_wmi_fw_stats_fill(struct ath11k *ar, + struct ath11k_fw_stats *fw_stats, + u32 stats_id, char *buf) +{ + u32 len = 0; + u32 buf_len = ath11k_fw_stats_buf_size; + const struct ath11k_fw_stats_pdev *pdev; + const struct ath11k_fw_stats_vdev *vdev; + const struct ath11k_fw_stats_bcn *bcn; + size_t num_bcn; + + spin_lock_bh(&ar->data_lock); + + if (stats_id == wmi_request_pdev_stat) { + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath11k_fw_stats_pdev, list); + if (!pdev) { + ath11k_warn(ar->ab, "failed to get pdev stats "); + goto unlock; + } + + ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len); + ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len); + ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len); + } + + if (stats_id == wmi_request_vdev_stat) { + len += scnprintf(buf + len, buf_len - len, " "); + len += scnprintf(buf + len, buf_len - len, "%30s ", + "ath11k vdev stats"); + len += scnprintf(buf + len, buf_len - len, "%30s ", + "================="); + + list_for_each_entry(vdev, &fw_stats->vdevs, list) + ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len); + } + + if (stats_id == wmi_request_bcn_stat) { + num_bcn = ath11k_wmi_fw_stats_num_bcn(&fw_stats->bcn); + + len += scnprintf(buf + len, buf_len - len, " "); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu) ", + "ath11k beacon stats", num_bcn); + len += scnprintf(buf + len, buf_len - len, "%30s ", + "==================="); + + list_for_each_entry(bcn, &fw_stats->bcn, list) + ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len); + } + +unlock: + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; +} + +static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) +{ + /* try to send pending beacons first. they take priority */ + wake_up(&ab->wmi_sc.tx_credits_wq); +} + +static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab, + struct sk_buff *skb) +{ + dev_kfree_skb(skb); +} + +static bool ath11k_reg_is_world_alpha(char *alpha) +{ + return alpha[0] == '0' && alpha[1] == '0'; +} + +static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct cur_regulatory_info *reg_info = null; + struct ieee80211_regdomain *regd = null; + bool intersect = false; + int ret = 0, pdev_idx; + struct ath11k *ar; + + reg_info = kzalloc(sizeof(*reg_info), gfp_atomic); + if (!reg_info) { + ret = -enomem; + goto fallback; + } + + ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info); + if (ret) { + ath11k_warn(ab, "failed to extract regulatory info from received event "); + goto fallback; + } + + if (reg_info->status_code != reg_set_cc_status_pass) { + /* in case of failure to set the requested ctry, + * fw retains the current regd. we print a failure info + * and return from here. + */ + ath11k_warn(ab, "failed to set the requested country regulatory setting "); + goto mem_free; + } + + pdev_idx = reg_info->phy_id; + + if (pdev_idx >= ab->num_radios) + goto fallback; + + /* avoid multiple overwrites to default regd, during core + * stop-start after mac registration. + */ + if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] && + !memcmp((char *)ab->default_regd[pdev_idx]->alpha2, + (char *)reg_info->alpha2, 2)) + return 0; + + /* intersect new rules with default regd if a new country setting was + * requested, i.e a default regd was already set during initialization + * and the regd coming from this event has a valid country info. + */ + if (ab->default_regd[pdev_idx] && + !ath11k_reg_is_world_alpha((char *) + ab->default_regd[pdev_idx]->alpha2) && + !ath11k_reg_is_world_alpha((char *)reg_info->alpha2)) + intersect = true; + + regd = ath11k_reg_build_regd(ab, reg_info, intersect); + if (!regd) { + ath11k_warn(ab, "failed to build regd from reg_info "); + goto fallback; + } + + spin_lock(&ab->base_lock); + if (test_bit(ath11k_flag_registered, &ab->dev_flags)) { + /* once mac is registered, ar is valid and all cc events from + * fw is considered to be received due to user requests + * currently. + * free previously built regd before assigning the newly + * generated regd to ar. null pointer handling will be + * taken care by kfree itself. + */ + ar = ab->pdevs[pdev_idx].ar; + kfree(ab->new_regd[pdev_idx]); + ab->new_regd[pdev_idx] = regd; + ieee80211_queue_work(ar->hw, &ar->regd_update_work); + } else { + /* multiple events for the same *ar is not expected. but we + * can still clear any previously stored default_regd if we + * are receiving this event for the same radio by mistake. + * null pointer handling will be taken care by kfree itself. + */ + kfree(ab->default_regd[pdev_idx]); + /* this regd would be applied during mac registration */ + ab->default_regd[pdev_idx] = regd; + } + ab->dfs_region = reg_info->dfs_region; + spin_unlock(&ab->base_lock); + + goto mem_free; + +fallback: + /* fallback to older reg (by sending previous country setting + * again if fw has succeded and we failed to process here. + * the regdomain should be uniform across driver and fw. since the + * fw has processed the command and sent a success status, we expect + * this function to succeed as well. if it doesn't, ctry needs to be + * reverted at the fw and the old scan_chan_list cmd needs to be sent. + */ + /* todo: this is rare, but still should also be handled */ + warn_on(1); +mem_free: + if (reg_info) { + kfree(reg_info->reg_rules_2g_ptr); + kfree(reg_info->reg_rules_5g_ptr); + kfree(reg_info); + } + return ret; +} + +static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_rdy_parse *rdy_parse = data; + struct wmi_ready_event *fixed_param; + struct wmi_mac_addr *addr_list; + struct ath11k_pdev *pdev; + u32 num_mac_addr; + int i; + + switch (tag) { + case wmi_tag_ready_event: + fixed_param = (struct wmi_ready_event *)ptr; + ab->wlan_init_status = fixed_param->status; + rdy_parse->num_extra_mac_addr = fixed_param->num_extra_mac_addr; + + ether_addr_copy(ab->mac_addr, fixed_param->mac_addr.addr); + ab->wmi_ready = true; + break; + case wmi_tag_array_fixed_struct: + addr_list = (struct wmi_mac_addr *)ptr; + num_mac_addr = rdy_parse->num_extra_mac_addr; + + if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios)) + break; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ether_addr_copy(pdev->mac_addr, addr_list[i].addr); + } + ab->pdevs_macaddr_valid = true; + break; + default: + break; + } + + return 0; +} + +static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_tlv_rdy_parse rdy_parse = { }; + int ret; + + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_rdy_parse, &rdy_parse); + if (ret) { + ath11k_warn(ab, "failed to parse tlv %d ", ret); + return ret; + } + + complete(&ab->wmi_sc.unified_ready); + return 0; +} + +static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_peer_delete_resp_event peer_del_resp; + + if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) { + ath11k_warn(ab, "failed to extract peer delete resp"); + return; + } + + /* todo: do we need to validate whether ath11k_peer_find() return null + * why this is needed when there is htt event for peer delete + */ +} + +static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status) +{ + switch (vdev_resp_status) { + case wmi_vdev_start_response_invalid_vdevid: + return "invalid vdev id"; + case wmi_vdev_start_response_not_supported: + return "not supported"; + case wmi_vdev_start_response_dfs_violation: + return "dfs violation"; + case wmi_vdev_start_response_invalid_regdomain: + return "invalid regdomain"; + default: + return "unknown"; + } +} + +static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_vdev_start_resp_event vdev_start_resp; + struct ath11k *ar; + u32 status; + + if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) { + ath11k_warn(ab, "failed to extract vdev start resp"); + return; + } + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d", + vdev_start_resp.vdev_id); + rcu_read_unlock(); + return; + } + + ar->last_wmi_vdev_start_status = 0; + + status = vdev_start_resp.status; + + if (warn_on_once(status)) { + ath11k_warn(ab, "vdev start resp error status %d (%s) ", + status, ath11k_wmi_vdev_resp_print(status)); + ar->last_wmi_vdev_start_status = status; + } + + complete(&ar->vdev_setup_done); + + rcu_read_unlock(); + + ath11k_dbg(ab, ath11k_dbg_wmi, "vdev start resp for vdev id %d", + vdev_start_resp.vdev_id); +} + +static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + u32 vdev_id, tx_status; + + if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len, + &vdev_id, &tx_status) != 0) { + ath11k_warn(ab, "failed to extract bcn tx status"); + return; + } +} + +static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct ath11k *ar; + u32 vdev_id = 0; + + if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) { + ath11k_warn(ab, "failed to extract vdev stopped event"); + return; + } + + rcu_read_lock(); + ar = ath11k_mac_get_ar_vdev_stop_status(ab, vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d", + vdev_id); + rcu_read_unlock(); + return; + } + + complete(&ar->vdev_setup_done); + + rcu_read_unlock(); + + ath11k_dbg(ab, ath11k_dbg_wmi, "vdev stopped for vdev id %d", vdev_id); +} + +static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct mgmt_rx_event_params rx_ev = {0}; + struct ath11k *ar; + struct ieee80211_rx_status *status = ieee80211_skb_rxcb(skb); + struct ieee80211_hdr *hdr; + u16 fc; + struct ieee80211_supported_band *sband; + + if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) { + ath11k_warn(ab, "failed to extract mgmt rx event"); + dev_kfree_skb(skb); + return; + } + + memset(status, 0, sizeof(*status)); + + ath11k_dbg(ab, ath11k_dbg_mgmt, "mgmt rx event status %08x ", + rx_ev.status); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id); + + if (!ar) { + ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event ", + rx_ev.pdev_id); + dev_kfree_skb(skb); + goto exit; + } + + if ((test_bit(ath11k_cac_running, &ar->dev_flags)) || + (rx_ev.status & (wmi_rx_status_err_decrypt | + wmi_rx_status_err_key_cache_miss | wmi_rx_status_err_crc))) { + dev_kfree_skb(skb); + goto exit; + } + + if (rx_ev.status & wmi_rx_status_err_mic) + status->flag |= rx_flag_mmic_error; + + if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { + status->band = nl80211_band_2ghz; + } else if (rx_ev.channel >= 36 && rx_ev.channel <= ath11k_max_5g_chan) { + status->band = nl80211_band_5ghz; + } else { + /* shouldn't happen unless list of advertised channels to + * mac80211 has been changed. + */ + warn_on_once(1); + dev_kfree_skb(skb); + goto exit; + } + + if (rx_ev.phy_mode == mode_11b && status->band == nl80211_band_5ghz) + ath11k_dbg(ab, ath11k_dbg_wmi, + "wmi mgmt rx 11b (cck) on 5ghz "); + + sband = &ar->mac.sbands[status->band]; + + status->freq = ieee80211_channel_to_frequency(rx_ev.channel, + status->band); + status->signal = rx_ev.snr + ath11k_default_noise_floor; + status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); + + hdr = (struct ieee80211_hdr *)skb->data; + fc = le16_to_cpu(hdr->frame_control); + + /* firmware is guaranteed to report all essential management frames via + * wmi while it can deliver some extra via htt. since there can be + * duplicates split the reporting wrt monitor/sniffing. + */ + status->flag |= rx_flag_skip_monitor; + + /* in case of pmf, fw delivers decrypted frames with protected bit set. + * don't clear that. also, fw delivers broadcast management frames + * (ex: group privacy action frames in mesh) as encrypted payload. + */ + if (ieee80211_has_protected(hdr->frame_control) && + !is_multicast_ether_addr(ieee80211_get_da(hdr))) { + status->flag |= rx_flag_decrypted; + + if (!ieee80211_is_robust_mgmt_frame(skb)) { + status->flag |= rx_flag_iv_stripped | + rx_flag_mmic_stripped; + hdr->frame_control = __cpu_to_le16(fc & + ~ieee80211_fctl_protected); + } + } + + /* todo: pending handle beacon implementation + *if (ieee80211_is_beacon(hdr->frame_control)) + * ath11k_mac_handle_beacon(ar, skb); + */ + + ath11k_dbg(ab, ath11k_dbg_mgmt, + "event mgmt rx skb %pk len %d ftype %02x stype %02x ", + skb, skb->len, + fc & ieee80211_fctl_ftype, fc & ieee80211_fctl_stype); + + ath11k_dbg(ab, ath11k_dbg_mgmt, + "event mgmt rx freq %d band %d snr %d, rate_idx %d ", + status->freq, status->band, status->signal, + status->rate_idx); + + ieee80211_rx_ni(ar->hw, skb); + +exit: + rcu_read_unlock(); +} + +static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_mgmt_tx_compl_event tx_compl_param = {0}; + struct ath11k *ar; + + if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) { + ath11k_warn(ab, "failed to extract mgmt tx compl event"); + return; + } + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id); + if (!ar) { + ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event ", + tx_compl_param.pdev_id); + goto exit; + } + + wmi_process_mgmt_tx_comp(ar, tx_compl_param.desc_id, + tx_compl_param.status); + + ath11k_dbg(ab, ath11k_dbg_mgmt, + "mgmt tx compl ev pdev_id %d, desc_id %d, status %d", + tx_compl_param.pdev_id, tx_compl_param.desc_id, + tx_compl_param.status); + +exit: + rcu_read_unlock(); +} + +static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab, + u32 vdev_id) +{ + int i; + struct ath11k_pdev *pdev; + struct ath11k *ar; + + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) { + ar = pdev->ar; + + spin_lock_bh(&ar->data_lock); + if (ar->scan.state == ath11k_scan_aborting && + ar->scan.vdev_id == vdev_id) { + spin_unlock_bh(&ar->data_lock); + return ar; + } + spin_unlock_bh(&ar->data_lock); + } + } + return null; +} + +static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct ath11k *ar; + struct wmi_scan_event scan_ev = {0}; + + if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) { + ath11k_warn(ab, "failed to extract scan event"); + return; + } + + rcu_read_lock(); + + /* in case the scan was cancelled, ex. during interface teardown, + * the interface will not be found in active interfaces. + * rather, in such scenarios, iterate over the active pdev's to + * search 'ar' if the corresponding 'ar' scan is aborting and the + * aborting scan's vdev id matches this event info. + */ + if (scan_ev.event_type == wmi_scan_event_completed && + scan_ev.reason == wmi_scan_reason_cancelled) + ar = ath11k_get_ar_on_scan_abort(ab, scan_ev.vdev_id); + else + ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id); + + if (!ar) { + ath11k_warn(ab, "received scan event for unknown vdev"); + rcu_read_unlock(); + return; + } + + spin_lock_bh(&ar->data_lock); + + ath11k_dbg(ab, ath11k_dbg_wmi, + "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d) ", + ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason), + scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq, + scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id, + ath11k_scan_state_str(ar->scan.state), ar->scan.state); + + switch (scan_ev.event_type) { + case wmi_scan_event_started: + ath11k_wmi_event_scan_started(ar); + break; + case wmi_scan_event_completed: + ath11k_wmi_event_scan_completed(ar); + break; + case wmi_scan_event_bss_channel: + ath11k_wmi_event_scan_bss_chan(ar); + break; + case wmi_scan_event_foreign_chan: + ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq); + break; + case wmi_scan_event_start_failed: + ath11k_warn(ab, "received scan start failure event "); + ath11k_wmi_event_scan_start_failed(ar); + break; + case wmi_scan_event_dequeued: + case wmi_scan_event_preempted: + case wmi_scan_event_restarted: + case wmi_scan_event_foreign_chan_exit: + default: + break; + } + + spin_unlock_bh(&ar->data_lock); + + rcu_read_unlock(); +} + +static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_peer_sta_kickout_arg arg = {}; + struct ieee80211_sta *sta; + struct ath11k_peer *peer; + struct ath11k *ar; + + if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) { + ath11k_warn(ab, "failed to extract peer sta kickout event"); + return; + } + + rcu_read_lock(); + + spin_lock_bh(&ab->base_lock); + + peer = ath11k_peer_find_by_addr(ab, arg.mac_addr); + + if (!peer) { + ath11k_warn(ab, "peer not found %pm ", + arg.mac_addr); + goto exit; + } + + ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d", + peer->vdev_id); + goto exit; + } + + sta = ieee80211_find_sta_by_ifaddr(ar->hw, + arg.mac_addr, null); + if (!sta) { + ath11k_warn(ab, "spurious quick kickout for sta %pm ", + arg.mac_addr); + goto exit; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, "peer sta kickout event %pm", + arg.mac_addr); + + ieee80211_report_low_ack(sta, 10); + +exit: + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); +} + +static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_roam_event roam_ev = {}; + struct ath11k *ar; + + if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) { + ath11k_warn(ab, "failed to extract roam event"); + return; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "wmi roam event vdev %u reason 0x%08x rssi %d ", + roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id in roam ev %d", + roam_ev.vdev_id); + rcu_read_unlock(); + return; + } + + if (roam_ev.reason >= wmi_roam_reason_max) + ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i ", + roam_ev.reason, roam_ev.vdev_id); + + switch (roam_ev.reason) { + case wmi_roam_reason_beacon_miss: + /* todo: pending beacon miss and connection_loss_work + * implementation + * ath11k_mac_handle_beacon_miss(ar, vdev_id); + */ + break; + case wmi_roam_reason_better_ap: + case wmi_roam_reason_low_rssi: + case wmi_roam_reason_suitable_ap_found: + case wmi_roam_reason_ho_failed: + ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i ", + roam_ev.reason, roam_ev.vdev_id); + break; + } + + rcu_read_unlock(); +} + +static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_chan_info_event ch_info_ev = {0}; + struct ath11k *ar; + struct survey_info *survey; + int idx; + /* hw channel counters frequency value in hertz */ + u32 cc_freq_hz = ab->cc_freq_hz; + + if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) { + ath11k_warn(ab, "failed to extract chan info event"); + return; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d ", + ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq, + ch_info_ev.cmd_flags, ch_info_ev.noise_floor, + ch_info_ev.rx_clear_count, ch_info_ev.cycle_count, + ch_info_ev.mac_clk_mhz); + + if (ch_info_ev.cmd_flags == wmi_chan_info_end_resp) { + ath11k_dbg(ab, ath11k_dbg_wmi, "chan info report completed "); + return; + } + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id in chan info ev %d", + ch_info_ev.vdev_id); + rcu_read_unlock(); + return; + } + spin_lock_bh(&ar->data_lock); + + switch (ar->scan.state) { + case ath11k_scan_idle: + case ath11k_scan_starting: + ath11k_warn(ab, "received chan info event without a scan request, ignoring "); + goto exit; + case ath11k_scan_running: + case ath11k_scan_aborting: + break; + } + + idx = freq_to_idx(ar, ch_info_ev.freq); + if (idx >= array_size(ar->survey)) { + ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds) ", + ch_info_ev.freq, idx); + goto exit; + } + + /* if fw provides mac clock frequency in mhz, overriding the initialized + * hw channel counters frequency value + */ + if (ch_info_ev.mac_clk_mhz) + cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000); + + if (ch_info_ev.cmd_flags == wmi_chan_info_start_resp) { + survey = &ar->survey[idx]; + memset(survey, 0, sizeof(*survey)); + survey->noise = ch_info_ev.noise_floor; + survey->filled = survey_info_noise_dbm | survey_info_time | + survey_info_time_busy; + survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz); + survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz); + } +exit: + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); +} + +static void +ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {}; + struct survey_info *survey; + struct ath11k *ar; + u32 cc_freq_hz = ab->cc_freq_hz; + u64 busy, total, tx, rx, rx_bss; + int idx; + + if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) { + ath11k_warn(ab, "failed to extract pdev bss chan info event"); + return; + } + + busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 | + bss_ch_info_ev.rx_clear_count_low; + + total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 | + bss_ch_info_ev.cycle_count_low; + + tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 | + bss_ch_info_ev.tx_cycle_count_low; + + rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 | + bss_ch_info_ev.rx_cycle_count_low; + + rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 | + bss_ch_info_ev.rx_bss_cycle_count_low; + + ath11k_dbg(ab, ath11k_dbg_wmi, + "pdev bss chan info: pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu ", + bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq, + bss_ch_info_ev.noise_floor, busy, total, + tx, rx, rx_bss); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id); + + if (!ar) { + ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event ", + bss_ch_info_ev.pdev_id); + rcu_read_unlock(); + return; + } + + spin_lock_bh(&ar->data_lock); + idx = freq_to_idx(ar, bss_ch_info_ev.freq); + if (idx >= array_size(ar->survey)) { + ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds) ", + bss_ch_info_ev.freq, idx); + goto exit; + } + + survey = &ar->survey[idx]; + + survey->noise = bss_ch_info_ev.noise_floor; + survey->time = div_u64(total, cc_freq_hz); + survey->time_busy = div_u64(busy, cc_freq_hz); + survey->time_rx = div_u64(rx_bss, cc_freq_hz); + survey->time_tx = div_u64(tx, cc_freq_hz); + survey->filled |= (survey_info_noise_dbm | + survey_info_time | + survey_info_time_busy | + survey_info_time_rx | + survey_info_time_tx); +exit: + spin_unlock_bh(&ar->data_lock); + complete(&ar->bss_survey_done); + + rcu_read_unlock(); +} + +static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct wmi_vdev_install_key_complete_arg install_key_compl = {0}; + struct ath11k *ar; + + if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) { + ath11k_warn(ab, "failed to extract install key compl event"); + return; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "vdev install key ev idx %d flags %08x macaddr %pm status %d ", + install_key_compl.key_idx, install_key_compl.key_flags, + install_key_compl.macaddr, install_key_compl.status); + + rcu_read_lock(); + ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id); + if (!ar) { + ath11k_warn(ab, "invalid vdev id in install key compl ev %d", + install_key_compl.vdev_id); + rcu_read_unlock(); + return; + } + + ar->install_key_status = 0; + + if (install_key_compl.status != wmi_vdev_install_key_compl_status_success) { + ath11k_warn(ab, "install key failed for %pm status %d ", + install_key_compl.macaddr, install_key_compl.status); + ar->install_key_status = install_key_compl.status; + } + + complete(&ar->install_key_done); + rcu_read_unlock(); +} + +static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + const void **tb; + const struct wmi_service_available_event *ev; + int ret; + int i, j; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return; + } + + ev = tb[wmi_tag_service_available_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch svc available ev"); + kfree(tb); + return; + } + + /* todo: use wmi_service_segment_offset information to get the service + * especially when more services are advertised in multiple sevice + * available events. + */ + for (i = 0, j = wmi_max_service; + i < wmi_service_segment_bm_size32 && j < wmi_max_ext_service; + i++) { + do { + if (ev->wmi_service_segment_bitmap[i] & + bit(j % wmi_avail_service_bits_in_size32)) + set_bit(j, ab->wmi_sc.svc_map); + } while (++j % wmi_avail_service_bits_in_size32); + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x", + ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1], + ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]); + + kfree(tb); +} + +static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0}; + struct ath11k *ar; + + if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) { + ath11k_warn(ab, "failed to extract peer assoc conf event"); + return; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "peer assoc conf ev vdev id %d macaddr %pm ", + peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr); + + ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id); + + if (!ar) { + ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d", + peer_assoc_conf.vdev_id); + return; + } + + complete(&ar->peer_assoc_done); +} + +static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + ath11k_debug_fw_stats_process(ab, skb); +} + +/* pdev_ctl_failsafe_check_event is received from fw when the frequency scanned + * is not part of bdf ctl(conformance test limits) table entries. + */ +static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_pdev_ctl_failsafe_chk_event *ev; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return; + } + + ev = tb[wmi_tag_pdev_ctl_failsafe_check_event]; + if (!ev) { + ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev"); + kfree(tb); + return; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "pdev ctl failsafe check ev status %d ", + ev->ctl_failsafe_status); + + /* if ctl_failsafe_status is set to 1 fw will max out the transmit power + * to 10 dbm else the ctl power entry in the bdf would be picked up. + */ + if (ev->ctl_failsafe_status != 0) + ath11k_warn(ab, "pdev ctl failsafe failure status %d", + ev->ctl_failsafe_status); + + kfree(tb); +} + +static void +ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab, + const struct wmi_pdev_csa_switch_ev *ev, + const u32 *vdev_ids) +{ + int i; + struct ath11k_vif *arvif; + + /* finish csa once the switch count becomes null */ + if (ev->current_switch_count) + return; + + rcu_read_lock(); + for (i = 0; i < ev->num_vdevs; i++) { + arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); + + if (!arvif) { + ath11k_warn(ab, "recvd csa status for unknown vdev %d", + vdev_ids[i]); + continue; + } + + if (arvif->is_up && arvif->vif->csa_active) + ieee80211_csa_finish(arvif->vif); + } + rcu_read_unlock(); +} + +static void +ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + const void **tb; + const struct wmi_pdev_csa_switch_ev *ev; + const u32 *vdev_ids; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return; + } + + ev = tb[wmi_tag_pdev_csa_switch_count_status_event]; + vdev_ids = tb[wmi_tag_array_uint32]; + + if (!ev || !vdev_ids) { + ath11k_warn(ab, "failed to fetch pdev csa switch count ev"); + kfree(tb); + return; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "pdev csa switch count %d for pdev %d, num_vdevs %d", + ev->current_switch_count, ev->pdev_id, + ev->num_vdevs); + + ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids); + + kfree(tb); +} + +static void +ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb) +{ + const void **tb; + const struct wmi_pdev_radar_ev *ev; + struct ath11k *ar; + int ret; + + tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, gfp_atomic); + if (is_err(tb)) { + ret = ptr_err(tb); + ath11k_warn(ab, "failed to parse tlv: %d ", ret); + return; + } + + ev = tb[wmi_tag_pdev_dfs_radar_detection_event]; + + if (!ev) { + ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev"); + kfree(tb); + return; + } + + ath11k_dbg(ab, ath11k_dbg_wmi, + "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d", + ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width, + ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp, + ev->freq_offset, ev->sidx); + + ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id); + + if (!ar) { + ath11k_warn(ab, "radar detected in invalid pdev %d ", + ev->pdev_id); + goto exit; + } + + ath11k_dbg(ar->ab, ath11k_dbg_reg, "dfs radar detected in pdev %d ", + ev->pdev_id); + + if (ar->dfs_block_radar_events) + ath11k_info(ab, "dfs radar detected, but ignored as requested "); + else + ieee80211_radar_detected(ar->hw); + +exit: + kfree(tb); +} + +static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_tlv_event_id id; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = field_get(wmi_cmd_hdr_cmd_id, (cmd_hdr->cmd_id)); + + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == null) + goto out; + + switch (id) { + /* process all the wmi events here */ + case wmi_service_ready_eventid: + ath11k_service_ready_event(ab, skb); + break; + case wmi_service_ready_ext_eventid: + ath11k_service_ready_ext_event(ab, skb); + break; + case wmi_reg_chan_list_cc_eventid: + ath11k_reg_chan_list_event(ab, skb); + break; + case wmi_ready_eventid: + ath11k_ready_event(ab, skb); + break; + case wmi_peer_delete_resp_eventid: + ath11k_peer_delete_resp_event(ab, skb); + break; + case wmi_vdev_start_resp_eventid: + ath11k_vdev_start_resp_event(ab, skb); + break; + case wmi_offload_bcn_tx_status_eventid: + ath11k_bcn_tx_status_event(ab, skb); + break; + case wmi_vdev_stopped_eventid: + ath11k_vdev_stopped_event(ab, skb); + break; + case wmi_mgmt_rx_eventid: + ath11k_mgmt_rx_event(ab, skb); + /* mgmt_rx_event() owns the skb now! */ + return; + case wmi_mgmt_tx_completion_eventid: + ath11k_mgmt_tx_compl_event(ab, skb); + break; + case wmi_scan_eventid: + ath11k_scan_event(ab, skb); + break; + case wmi_peer_sta_kickout_eventid: + ath11k_peer_sta_kickout_event(ab, skb); + break; + case wmi_roam_eventid: + ath11k_roam_event(ab, skb); + break; + case wmi_chan_info_eventid: + ath11k_chan_info_event(ab, skb); + break; + case wmi_pdev_bss_chan_info_eventid: + ath11k_pdev_bss_chan_info_event(ab, skb); + break; + case wmi_vdev_install_key_complete_eventid: + ath11k_vdev_install_key_compl_event(ab, skb); + break; + case wmi_service_available_eventid: + ath11k_service_available_event(ab, skb); + break; + case wmi_peer_assoc_conf_eventid: + ath11k_peer_assoc_conf_event(ab, skb); + break; + case wmi_update_stats_eventid: + ath11k_update_stats_event(ab, skb); + break; + case wmi_pdev_ctl_failsafe_check_eventid: + ath11k_pdev_ctl_failsafe_check_event(ab, skb); + break; + case wmi_pdev_csa_switch_count_status_eventid: + ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb); + break; + /* add unsupported events here */ + case wmi_tbttoffset_ext_update_eventid: + case wmi_vdev_delete_resp_eventid: + ath11k_dbg(ab, ath11k_dbg_wmi, + "ignoring unsupported event 0x%x ", id); + break; + case wmi_pdev_dfs_radar_detection_eventid: + ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb); + break; + /* todo: add remaining events */ + default: + ath11k_warn(ab, "unknown eventid: 0x%x ", id); + break; + } + +out: + dev_kfree_skb(skb); +} + +static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab, + u32 pdev_idx) +{ + int status; + u32 svc_id[] = { ath11k_htc_svc_id_wmi_control, + ath11k_htc_svc_id_wmi_control_mac1, + ath11k_htc_svc_id_wmi_control_mac2 }; + + struct ath11k_htc_svc_conn_req conn_req; + struct ath11k_htc_svc_conn_resp conn_resp; + + memset(&conn_req, 0, sizeof(conn_req)); + memset(&conn_resp, 0, sizeof(conn_resp)); + + /* these fields are the same for all service endpoints */ + conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete; + conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx; + conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits; + + /* connect to control service */ + conn_req.service_id = svc_id[pdev_idx]; + + status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp); + if (status) { + ath11k_warn(ab, "failed to connect to wmi control service status: %d ", + status); + return status; + } + + ab->wmi_sc.wmi_endpoint_id[pdev_idx] = conn_resp.eid; + ab->wmi_sc.wmi[pdev_idx].eid = conn_resp.eid; + ab->wmi_sc.max_msg_len[pdev_idx] = conn_resp.max_msg_len; + + return 0; +} + +static int +ath11k_wmi_send_unit_test_cmd(struct ath11k *ar, + struct wmi_unit_test_cmd ut_cmd, + u32 *test_args) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct wmi_unit_test_cmd *cmd; + struct sk_buff *skb; + struct wmi_tlv *tlv; + void *ptr; + u32 *ut_cmd_args; + int buf_len, arg_len; + int ret; + int i; + + arg_len = sizeof(u32) * ut_cmd.num_args; + buf_len = sizeof(ut_cmd) + arg_len + tlv_hdr_size; + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, buf_len); + if (!skb) + return -enomem; + + cmd = (struct wmi_unit_test_cmd *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_unit_test_cmd) | + field_prep(wmi_tlv_len, sizeof(ut_cmd) - tlv_hdr_size); + + cmd->vdev_id = ut_cmd.vdev_id; + cmd->module_id = ut_cmd.module_id; + cmd->num_args = ut_cmd.num_args; + cmd->diag_token = ut_cmd.diag_token; + + ptr = skb->data + sizeof(ut_cmd); + + tlv = ptr; + tlv->header = field_prep(wmi_tlv_tag, wmi_tag_array_uint32) | + field_prep(wmi_tlv_len, arg_len); + + ptr += tlv_hdr_size; + + ut_cmd_args = ptr; + for (i = 0; i < ut_cmd.num_args; i++) + ut_cmd_args[i] = test_args[i]; + + ret = ath11k_wmi_cmd_send(wmi, skb, wmi_unit_test_cmdid); + + if (ret) { + ath11k_warn(ar->ab, "failed to send wmi_unit_test cmd :%d ", + ret); + dev_kfree_skb(skb); + } + + ath11k_dbg(ar->ab, ath11k_dbg_wmi, + "wmi unit test : module %d vdev %d n_args %d token %d ", + cmd->module_id, cmd->vdev_id, cmd->num_args, + cmd->diag_token); + + return ret; +} + +int ath11k_wmi_simulate_radar(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + u32 dfs_args[dfs_max_test_args]; + struct wmi_unit_test_cmd wmi_ut; + bool arvif_found = false; + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_started && arvif->vdev_type == wmi_vdev_type_ap) { + arvif_found = true; + break; + } + } + + if (!arvif_found) + return -einval; + + dfs_args[dfs_test_cmdid] = 0; + dfs_args[dfs_test_pdev_id] = ar->pdev->pdev_id; + /* currently we could pass segment_id(b0 - b1), chirp(b2) + * freq offset (b3 - b10) to unit test. for simulation + * purpose this can be set to 0 which is valid. + */ + dfs_args[dfs_test_radar_param] = 0; + + wmi_ut.vdev_id = arvif->vdev_id; + wmi_ut.module_id = dfs_unit_test_module; + wmi_ut.num_args = dfs_max_test_args; + wmi_ut.diag_token = dfs_unit_test_token; + + ath11k_dbg(ar->ab, ath11k_dbg_reg, "triggering radar simulation "); + + return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); +} + +int ath11k_wmi_connect(struct ath11k_base *ab) +{ + u32 i; + u8 wmi_ep_count; + + wmi_ep_count = ab->htc.wmi_ep_count; + if (wmi_ep_count > max_radios) + return -1; + + for (i = 0; i < wmi_ep_count; i++) + ath11k_connect_pdev_htc_service(ab, i); + + return 0; +} + +static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id) +{ + if (warn_on(pdev_id >= max_radios)) + return; + + /* todo: deinit any pdev specific wmi resource */ +} + +int ath11k_wmi_pdev_attach(struct ath11k_base *ab, + u8 pdev_id) +{ + struct ath11k_pdev_wmi *wmi_handle; + + if (pdev_id >= max_radios) + return -einval; + + wmi_handle = &ab->wmi_sc.wmi[pdev_id]; + + wmi_handle->wmi_sc = &ab->wmi_sc; + + ab->wmi_sc.ab = ab; + /* todo: init remaining resource specific to pdev */ + + return 0; +} + +int ath11k_wmi_attach(struct ath11k_base *ab) +{ + int ret; + + ret = ath11k_wmi_pdev_attach(ab, 0); + if (ret) + return ret; + + ab->wmi_sc.ab = ab; + ab->wmi_sc.preferred_hw_mode = wmi_host_hw_mode_max; + + /* todo: init remaining wmi soc resources required */ + init_completion(&ab->wmi_sc.service_ready); + init_completion(&ab->wmi_sc.unified_ready); + + return 0; +} + +void ath11k_wmi_detach(struct ath11k_base *ab) +{ + int i; + + /* todo: deinit wmi resource specific to soc as required */ + + for (i = 0; i < ab->htc.wmi_ep_count; i++) + ath11k_wmi_pdev_detach(ab, i); +} diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/wmi.h +/* spdx-license-identifier: bsd-3-clause-clear */ +/* + * copyright (c) 2018-2019 the linux foundation. all rights reserved. + */ + +#ifndef ath11k_wmi_h +#define ath11k_wmi_h + +#include <net/mac80211.h> +#include "htc.h" + +struct ath11k_base; +struct ath11k; +struct ath11k_fw_stats; + +#define psoc_host_max_num_ss (8) + +/* defines to set packet extension values whic can be 0 us, 8 usec or 16 usec */ +#define max_he_nss 8 +#define max_he_modulation 8 +#define max_he_ru 4 +#define he_modulation_none 7 +#define he_pet_0_usec 0 +#define he_pet_8_usec 1 +#define he_pet_16_usec 2 + +#define wmi_max_num_ss max_he_nss +#define wmi_max_num_ru max_he_ru + +#define wmi_tlv_cmd(grp_id) (((grp_id) << 12) | 0x1) +#define wmi_tlv_ev(grp_id) (((grp_id) << 12) | 0x1) +#define wmi_tlv_cmd_unsupported 0 +#define wmi_tlv_pdev_param_unsupported 0 +#define wmi_tlv_vdev_param_unsupported 0 + +struct wmi_cmd_hdr { + u32 cmd_id; +} __packed; + +struct wmi_tlv { + u32 header; + u8 value[0]; +} __packed; + +#define wmi_tlv_len genmask(15, 0) +#define wmi_tlv_tag genmask(31, 16) +#define tlv_hdr_size field_sizeof(struct wmi_tlv, header) + +#define wmi_cmd_hdr_cmd_id genmask(23, 0) +#define wmi_max_mem_reqs 32 +#define ath11k_max_hw_listen_interval 5 + +#define wlan_scan_params_max_ssid 16 +#define wlan_scan_params_max_bssid 4 +#define wlan_scan_params_max_ie_len 256 + +/* + * hw mode config type replicated from fw header + * @wmi_host_hw_mode_single: only one phy is active. + * @wmi_host_hw_mode_dbs: both phys are active in different bands, + * one in 2g and another in 5g. + * @wmi_host_hw_mode_sbs_passive: both phys are in passive mode (only rx) in + * same band; no tx allowed. + * @wmi_host_hw_mode_sbs: both phys are active in the same band. + * support for both phys within one band is planned + * for 5g only(as indicated in wmi_mac_phy_capabilities), + * but could be extended to other bands in the future. + * the separation of the band between the two phys needs + * to be communicated separately. + * @wmi_host_hw_mode_dbs_sbs: 3 phys, with 2 on the same band doing sbs + * as in wmi_hw_mode_sbs, and 3rd on the other band + * @wmi_host_hw_mode_dbs_or_sbs: two phy with one phy capabale of both 2g and + * 5g. it can support sbs (5g + 5g) or dbs (5g + 2g). + * @wmi_host_hw_mode_max: max hw_mode_id. used to indicate invalid mode. + */ +enum wmi_host_hw_mode_config_type { + wmi_host_hw_mode_single = 0, + wmi_host_hw_mode_dbs = 1, + wmi_host_hw_mode_sbs_passive = 2, + wmi_host_hw_mode_sbs = 3, + wmi_host_hw_mode_dbs_sbs = 4, + wmi_host_hw_mode_dbs_or_sbs = 5, + + /* keep last */ + wmi_host_hw_mode_max +}; + +/* hw mode priority values used to detect the preferred hw mode + * on the available modes. + */ +enum wmi_host_hw_mode_priority { + wmi_host_hw_mode_dbs_sbs_pri, + wmi_host_hw_mode_dbs_pri, + wmi_host_hw_mode_dbs_or_sbs_pri, + wmi_host_hw_mode_sbs_pri, + wmi_host_hw_mode_sbs_passive_pri, + wmi_host_hw_mode_single_pri, + + /* keep last the lowest priority */ + wmi_host_hw_mode_max_pri +}; + +enum { + wmi_host_wlan_2g_cap = 0x1, + wmi_host_wlan_5g_cap = 0x2, + wmi_host_wlan_2g_5g_cap = 0x3, +}; + +/* + * wmi command groups. + */ +enum wmi_cmd_group { + /* 0 to 2 are reserved */ + wmi_grp_start = 0x3, + wmi_grp_scan = wmi_grp_start, + wmi_grp_pdev = 0x4, + wmi_grp_vdev = 0x5, + wmi_grp_peer = 0x6, + wmi_grp_mgmt = 0x7, + wmi_grp_ba_neg = 0x8, + wmi_grp_sta_ps = 0x9, + wmi_grp_dfs = 0xa, + wmi_grp_roam = 0xb, + wmi_grp_ofl_scan = 0xc, + wmi_grp_p2p = 0xd, + wmi_grp_ap_ps = 0xe, + wmi_grp_rate_ctrl = 0xf, + wmi_grp_profile = 0x10, + wmi_grp_suspend = 0x11, + wmi_grp_bcn_filter = 0x12, + wmi_grp_wow = 0x13, + wmi_grp_rtt = 0x14, + wmi_grp_spectral = 0x15, + wmi_grp_stats = 0x16, + wmi_grp_arp_ns_ofl = 0x17, + wmi_grp_nlo_ofl = 0x18, + wmi_grp_gtk_ofl = 0x19, + wmi_grp_csa_ofl = 0x1a, + wmi_grp_chatter = 0x1b, + wmi_grp_tid_addba = 0x1c, + wmi_grp_misc = 0x1d, + wmi_grp_gpio = 0x1e, + wmi_grp_fwtest = 0x1f, + wmi_grp_tdls = 0x20, + wmi_grp_resmgr = 0x21, + wmi_grp_sta_smps = 0x22, + wmi_grp_wlan_hb = 0x23, + wmi_grp_rmc = 0x24, + wmi_grp_mhf_ofl = 0x25, + wmi_grp_location_scan = 0x26, + wmi_grp_oem = 0x27, + wmi_grp_nan = 0x28, + wmi_grp_coex = 0x29, + wmi_grp_obss_ofl = 0x2a, + wmi_grp_lpi = 0x2b, + wmi_grp_extscan = 0x2c, + wmi_grp_dhcp_ofl = 0x2d, + wmi_grp_ipa = 0x2e, + wmi_grp_mdns_ofl = 0x2f, + wmi_grp_sap_ofl = 0x30, + wmi_grp_ocb = 0x31, + wmi_grp_soc = 0x32, + wmi_grp_pkt_filter = 0x33, + wmi_grp_mawc = 0x34, + wmi_grp_pmf_offload = 0x35, + wmi_grp_bpf_offload = 0x36, + wmi_grp_nan_data = 0x37, + wmi_grp_prototype = 0x38, + wmi_grp_monitor = 0x39, + wmi_grp_regulatory = 0x3a, + wmi_grp_hw_data_filter = 0x3b, +}; + +#define wmi_cmd_grp(grp_id) (((grp_id) << 12) | 0x1) +#define wmi_evt_grp_start_id(grp_id) (((grp_id) << 12) | 0x1) + +#define wmi_cmd_unsupported 0 + +enum wmi_tlv_cmd_id { + wmi_init_cmdid = 0x1, + wmi_start_scan_cmdid = wmi_tlv_cmd(wmi_grp_scan), + wmi_stop_scan_cmdid, + wmi_scan_chan_list_cmdid, + wmi_scan_sch_prio_tbl_cmdid, + wmi_scan_update_request_cmdid, + wmi_scan_prob_req_oui_cmdid, + wmi_scan_adaptive_dwell_config_cmdid, + wmi_pdev_set_regdomain_cmdid = wmi_tlv_cmd(wmi_grp_pdev), + wmi_pdev_set_channel_cmdid, + wmi_pdev_set_param_cmdid, + wmi_pdev_pktlog_enable_cmdid, + wmi_pdev_pktlog_disable_cmdid, + wmi_pdev_set_wmm_params_cmdid, + wmi_pdev_set_ht_cap_ie_cmdid, + wmi_pdev_set_vht_cap_ie_cmdid, + wmi_pdev_set_dscp_tid_map_cmdid, + wmi_pdev_set_quiet_mode_cmdid, + wmi_pdev_green_ap_ps_enable_cmdid, + wmi_pdev_get_tpc_config_cmdid, + wmi_pdev_set_base_macaddr_cmdid, + wmi_pdev_dump_cmdid, + wmi_pdev_set_led_config_cmdid, + wmi_pdev_get_temperature_cmdid, + wmi_pdev_set_led_flashing_cmdid, + wmi_pdev_smart_ant_enable_cmdid, + wmi_pdev_smart_ant_set_rx_antenna_cmdid, + wmi_pdev_set_antenna_switch_table_cmdid, + wmi_pdev_set_ctl_table_cmdid, + wmi_pdev_set_mimogain_table_cmdid, + wmi_pdev_fips_cmdid, + wmi_pdev_get_ani_cck_config_cmdid, + wmi_pdev_get_ani_ofdm_config_cmdid, + wmi_pdev_get_nfcal_power_cmdid, + wmi_pdev_get_tpc_cmdid, + wmi_mib_stats_enable_cmdid, + wmi_pdev_set_pcl_cmdid, + wmi_pdev_set_hw_mode_cmdid, + wmi_pdev_set_mac_config_cmdid, + wmi_pdev_set_antenna_mode_cmdid, + wmi_set_periodic_channel_stats_config_cmdid, + wmi_pdev_wal_power_debug_cmdid, + wmi_pdev_set_reorder_timeout_val_cmdid, + wmi_pdev_set_wakeup_config_cmdid, + wmi_pdev_get_antdiv_status_cmdid, + wmi_pdev_get_chip_power_stats_cmdid, + wmi_pdev_set_stats_threshold_cmdid, + wmi_pdev_multiple_vdev_restart_request_cmdid, + wmi_pdev_update_pkt_routing_cmdid, + wmi_pdev_check_cal_version_cmdid, + wmi_pdev_set_diversity_gain_cmdid, + wmi_pdev_div_get_rssi_antid_cmdid, + wmi_pdev_bss_chan_info_request_cmdid, + wmi_pdev_update_pmk_cache_cmdid, + wmi_pdev_update_fils_hlp_pkt_cmdid, + wmi_pdev_update_ctltable_request_cmdid, + wmi_pdev_config_vendor_oui_action_cmdid, + wmi_pdev_set_ac_tx_queue_optimized_cmdid, + wmi_pdev_set_rx_filter_promiscuous_cmdid, + wmi_pdev_dma_ring_cfg_req_cmdid, + wmi_pdev_he_tb_action_frm_cmdid, + wmi_pdev_pktlog_filter_cmdid, + wmi_vdev_create_cmdid = wmi_tlv_cmd(wmi_grp_vdev), + wmi_vdev_delete_cmdid, + wmi_vdev_start_request_cmdid, + wmi_vdev_restart_request_cmdid, + wmi_vdev_up_cmdid, + wmi_vdev_stop_cmdid, + wmi_vdev_down_cmdid, + wmi_vdev_set_param_cmdid, + wmi_vdev_install_key_cmdid, + wmi_vdev_wnm_sleepmode_cmdid, + wmi_vdev_wmm_addts_cmdid, + wmi_vdev_wmm_delts_cmdid, + wmi_vdev_set_wmm_params_cmdid, + wmi_vdev_set_gtx_params_cmdid, + wmi_vdev_ipsec_natkeepalive_filter_cmdid, + wmi_vdev_plmreq_start_cmdid, + wmi_vdev_plmreq_stop_cmdid, + wmi_vdev_tsf_tstamp_action_cmdid, + wmi_vdev_set_ie_cmdid, + wmi_vdev_ratemask_cmdid, + wmi_vdev_atf_request_cmdid, + wmi_vdev_set_dscp_tid_map_cmdid, + wmi_vdev_filter_neighbor_rx_packets_cmdid, + wmi_vdev_set_quiet_mode_cmdid, + wmi_vdev_set_custom_aggr_size_cmdid, + wmi_vdev_encrypt_decrypt_data_req_cmdid, + wmi_vdev_add_mac_addr_to_rx_filter_cmdid, + wmi_peer_create_cmdid = wmi_tlv_cmd(wmi_grp_peer), + wmi_peer_delete_cmdid, + wmi_peer_flush_tids_cmdid, + wmi_peer_set_param_cmdid, + wmi_peer_assoc_cmdid, + wmi_peer_add_wds_entry_cmdid, + wmi_peer_remove_wds_entry_cmdid, + wmi_peer_mcast_group_cmdid, + wmi_peer_info_req_cmdid, + wmi_peer_get_estimated_linkspeed_cmdid, + wmi_peer_set_rate_report_condition_cmdid, + wmi_peer_update_wds_entry_cmdid, + wmi_peer_add_proxy_sta_entry_cmdid, + wmi_peer_smart_ant_set_tx_antenna_cmdid, + wmi_peer_smart_ant_set_train_info_cmdid, + wmi_peer_smart_ant_set_node_config_ops_cmdid, + wmi_peer_atf_request_cmdid, + wmi_peer_bwf_request_cmdid, + wmi_peer_reorder_queue_setup_cmdid, + wmi_peer_reorder_queue_remove_cmdid, + wmi_peer_set_rx_blocksize_cmdid, + wmi_peer_antdiv_info_req_cmdid, + wmi_peer_oper_mode_change_eventid, + wmi_bcn_tx_cmdid = wmi_tlv_cmd(wmi_grp_mgmt), + wmi_pdev_send_bcn_cmdid, + wmi_bcn_tmpl_cmdid, + wmi_bcn_filter_rx_cmdid, + wmi_prb_req_filter_rx_cmdid, + wmi_mgmt_tx_cmdid, + wmi_prb_tmpl_cmdid, + wmi_mgmt_tx_send_cmdid, + wmi_offchan_data_tx_send_cmdid, + wmi_pdev_send_fd_cmdid, + wmi_bcn_offload_ctrl_cmdid, + wmi_bss_color_change_enable_cmdid, + wmi_vdev_bcn_offload_quiet_config_cmdid, + wmi_addba_clear_resp_cmdid = wmi_tlv_cmd(wmi_grp_ba_neg), + wmi_addba_send_cmdid, + wmi_addba_status_cmdid, + wmi_delba_send_cmdid, + wmi_addba_set_resp_cmdid, + wmi_send_singleamsdu_cmdid, + wmi_sta_powersave_mode_cmdid = wmi_tlv_cmd(wmi_grp_sta_ps), + wmi_sta_powersave_param_cmdid, + wmi_sta_mimo_ps_mode_cmdid, + wmi_pdev_dfs_enable_cmdid = wmi_tlv_cmd(wmi_grp_dfs), + wmi_pdev_dfs_disable_cmdid, + wmi_dfs_phyerr_filter_ena_cmdid, + wmi_dfs_phyerr_filter_dis_cmdid, + wmi_pdev_dfs_phyerr_offload_enable_cmdid, + wmi_pdev_dfs_phyerr_offload_disable_cmdid, + wmi_vdev_adfs_ch_cfg_cmdid, + wmi_vdev_adfs_ocac_abort_cmdid, + wmi_roam_scan_mode = wmi_tlv_cmd(wmi_grp_roam), + wmi_roam_scan_rssi_threshold, + wmi_roam_scan_period, + wmi_roam_scan_rssi_change_threshold, + wmi_roam_ap_profile, + wmi_roam_chan_list, + wmi_roam_scan_cmd, + wmi_roam_synch_complete, + wmi_roam_set_ric_request_cmdid, + wmi_roam_invoke_cmdid, + wmi_roam_filter_cmdid, + wmi_roam_subnet_change_config_cmdid, + wmi_roam_configure_mawc_cmdid, + wmi_roam_set_mbo_param_cmdid, + wmi_roam_per_config_cmdid, + wmi_ofl_scan_add_ap_profile = wmi_tlv_cmd(wmi_grp_ofl_scan), + wmi_ofl_scan_remove_ap_profile, + wmi_ofl_scan_period, + wmi_p2p_dev_set_device_info = wmi_tlv_cmd(wmi_grp_p2p), + wmi_p2p_dev_set_discoverability, + wmi_p2p_go_set_beacon_ie, + wmi_p2p_go_set_probe_resp_ie, + wmi_p2p_set_vendor_ie_data_cmdid, + wmi_p2p_disc_offload_config_cmdid, + wmi_p2p_disc_offload_appie_cmdid, + wmi_p2p_disc_offload_pattern_cmdid, + wmi_p2p_set_oppps_param_cmdid, + wmi_p2p_listen_offload_start_cmdid, + wmi_p2p_listen_offload_stop_cmdid, + wmi_ap_ps_peer_param_cmdid = wmi_tlv_cmd(wmi_grp_ap_ps), + wmi_ap_ps_peer_uapsd_coex_cmdid, + wmi_ap_ps_egap_param_cmdid, + wmi_peer_rate_retry_sched_cmdid = wmi_tlv_cmd(wmi_grp_rate_ctrl), + wmi_wlan_profile_trigger_cmdid = wmi_tlv_cmd(wmi_grp_profile), + wmi_wlan_profile_set_hist_intvl_cmdid, + wmi_wlan_profile_get_profile_data_cmdid, + wmi_wlan_profile_enable_profile_id_cmdid, + wmi_wlan_profile_list_profile_id_cmdid, + wmi_pdev_suspend_cmdid = wmi_tlv_cmd(wmi_grp_suspend), + wmi_pdev_resume_cmdid, + wmi_add_bcn_filter_cmdid = wmi_tlv_cmd(wmi_grp_bcn_filter), + wmi_rmv_bcn_filter_cmdid, + wmi_wow_add_wake_pattern_cmdid = wmi_tlv_cmd(wmi_grp_wow), + wmi_wow_del_wake_pattern_cmdid, + wmi_wow_enable_disable_wake_event_cmdid, + wmi_wow_enable_cmdid, + wmi_wow_hostwakeup_from_sleep_cmdid, + wmi_wow_ioac_add_keepalive_cmdid, + wmi_wow_ioac_del_keepalive_cmdid, + wmi_wow_ioac_add_wake_pattern_cmdid, + wmi_wow_ioac_del_wake_pattern_cmdid, + wmi_d0_wow_enable_disable_cmdid, + wmi_extwow_enable_cmdid, + wmi_extwow_set_app_type1_params_cmdid, + wmi_extwow_set_app_type2_params_cmdid, + wmi_wow_enable_icmpv6_na_flt_cmdid, + wmi_wow_udp_svc_ofld_cmdid, + wmi_wow_hostwakeup_gpio_pin_pattern_config_cmdid, + wmi_wow_set_action_wake_up_cmdid, + wmi_rtt_measreq_cmdid = wmi_tlv_cmd(wmi_grp_rtt), + wmi_rtt_tsf_cmdid, + wmi_vdev_spectral_scan_configure_cmdid = wmi_tlv_cmd(wmi_grp_spectral), + wmi_vdev_spectral_scan_enable_cmdid, + wmi_request_stats_cmdid = wmi_tlv_cmd(wmi_grp_stats), + wmi_mcc_sched_traffic_stats_cmdid, + wmi_request_stats_ext_cmdid, + wmi_request_link_stats_cmdid, + wmi_start_link_stats_cmdid, + wmi_clear_link_stats_cmdid, + wmi_get_fw_mem_dump_cmdid, + wmi_debug_mesg_flush_cmdid, + wmi_diag_event_log_config_cmdid, + wmi_request_wlan_stats_cmdid, + wmi_request_rcpi_cmdid, + wmi_request_peer_stats_info_cmdid, + wmi_request_radio_chan_stats_cmdid, + wmi_set_arp_ns_offload_cmdid = wmi_tlv_cmd(wmi_grp_arp_ns_ofl), + wmi_add_proactive_arp_rsp_pattern_cmdid, + wmi_del_proactive_arp_rsp_pattern_cmdid, + wmi_network_list_offload_config_cmdid = wmi_tlv_cmd(wmi_grp_nlo_ofl), + wmi_apfind_cmdid, + wmi_passpoint_list_config_cmdid, + wmi_nlo_configure_mawc_cmdid, + wmi_gtk_offload_cmdid = wmi_tlv_cmd(wmi_grp_gtk_ofl), + wmi_csa_offload_enable_cmdid = wmi_tlv_cmd(wmi_grp_csa_ofl), + wmi_csa_offload_chanswitch_cmdid, + wmi_chatter_set_mode_cmdid = wmi_tlv_cmd(wmi_grp_chatter), + wmi_chatter_add_coalescing_filter_cmdid, + wmi_chatter_delete_coalescing_filter_cmdid, + wmi_chatter_coalescing_query_cmdid, + wmi_peer_tid_addba_cmdid = wmi_tlv_cmd(wmi_grp_tid_addba), + wmi_peer_tid_delba_cmdid, + wmi_sta_dtim_ps_method_cmdid, + wmi_sta_uapsd_auto_trig_cmdid, + wmi_sta_keepalive_cmdid, + wmi_ba_req_ssn_cmdid, + wmi_echo_cmdid = wmi_tlv_cmd(wmi_grp_misc), + wmi_pdev_utf_cmdid, + wmi_dbglog_cfg_cmdid, + wmi_pdev_qvit_cmdid, + wmi_pdev_ftm_intg_cmdid, + wmi_vdev_set_keepalive_cmdid, + wmi_vdev_get_keepalive_cmdid, + wmi_force_fw_hang_cmdid, + wmi_set_mcastbcast_filter_cmdid, + wmi_thermal_mgmt_cmdid, + wmi_host_auto_shutdown_cfg_cmdid, + wmi_tpc_chainmask_config_cmdid, + wmi_set_antenna_diversity_cmdid, + wmi_ocb_set_sched_cmdid, + wmi_rssi_breach_monitor_config_cmdid, + wmi_lro_config_cmdid, + wmi_transfer_data_to_flash_cmdid, + wmi_config_enhanced_mcast_filter_cmdid, + wmi_vdev_wisa_cmdid, + wmi_dbglog_time_stamp_sync_cmdid, + wmi_set_multiple_mcast_filter_cmdid, + wmi_read_data_from_flash_cmdid, + wmi_gpio_config_cmdid = wmi_tlv_cmd(wmi_grp_gpio), + wmi_gpio_output_cmdid, + wmi_txbf_cmdid, + wmi_fwtest_vdev_mcc_set_tbtt_mode_cmdid = wmi_tlv_cmd(wmi_grp_fwtest), + wmi_fwtest_p2p_set_noa_param_cmdid, + wmi_unit_test_cmdid, + wmi_fwtest_cmdid, + wmi_qboost_cfg_cmdid, + wmi_tdls_set_state_cmdid = wmi_tlv_cmd(wmi_grp_tdls), + wmi_tdls_peer_update_cmdid, + wmi_tdls_set_offchan_mode_cmdid, + wmi_resmgr_adaptive_ocs_en_dis_cmdid = wmi_tlv_cmd(wmi_grp_resmgr), + wmi_resmgr_set_chan_time_quota_cmdid, + wmi_resmgr_set_chan_latency_cmdid, + wmi_sta_smps_force_mode_cmdid = wmi_tlv_cmd(wmi_grp_sta_smps), + wmi_sta_smps_param_cmdid, + wmi_hb_set_enable_cmdid = wmi_tlv_cmd(wmi_grp_wlan_hb), + wmi_hb_set_tcp_params_cmdid, + wmi_hb_set_tcp_pkt_filter_cmdid, + wmi_hb_set_udp_params_cmdid, + wmi_hb_set_udp_pkt_filter_cmdid, + wmi_rmc_set_mode_cmdid = wmi_tlv_cmd(wmi_grp_rmc), + wmi_rmc_set_action_period_cmdid, + wmi_rmc_config_cmdid, + wmi_rmc_set_manual_leader_cmdid, + wmi_mhf_offload_set_mode_cmdid = wmi_tlv_cmd(wmi_grp_mhf_ofl), + wmi_mhf_offload_plumb_routing_tbl_cmdid, + wmi_batch_scan_enable_cmdid = wmi_tlv_cmd(wmi_grp_location_scan), + wmi_batch_scan_disable_cmdid, + wmi_batch_scan_trigger_result_cmdid, + wmi_oem_req_cmdid = wmi_tlv_cmd(wmi_grp_oem), + wmi_oem_request_cmdid, + wmi_lpi_oem_req_cmdid, + wmi_nan_cmdid = wmi_tlv_cmd(wmi_grp_nan), + wmi_modem_power_state_cmdid = wmi_tlv_cmd(wmi_grp_coex), + wmi_chan_avoid_update_cmdid, + wmi_coex_config_cmdid, + wmi_chan_avoid_rpt_allow_cmdid, + wmi_coex_get_antenna_isolation_cmdid, + wmi_sar_limits_cmdid, + wmi_obss_scan_enable_cmdid = wmi_tlv_cmd(wmi_grp_obss_ofl), + wmi_obss_scan_disable_cmdid, + wmi_lpi_mgmt_snooping_config_cmdid = wmi_tlv_cmd(wmi_grp_lpi), + wmi_lpi_start_scan_cmdid, + wmi_lpi_stop_scan_cmdid, + wmi_extscan_start_cmdid = wmi_tlv_cmd(wmi_grp_extscan), + wmi_extscan_stop_cmdid, + wmi_extscan_configure_wlan_change_monitor_cmdid, + wmi_extscan_configure_hotlist_monitor_cmdid, + wmi_extscan_get_cached_results_cmdid, + wmi_extscan_get_wlan_change_results_cmdid, + wmi_extscan_set_capabilities_cmdid, + wmi_extscan_get_capabilities_cmdid, + wmi_extscan_configure_hotlist_ssid_monitor_cmdid, + wmi_extscan_configure_mawc_cmdid, + wmi_set_dhcp_server_offload_cmdid = wmi_tlv_cmd(wmi_grp_dhcp_ofl), + wmi_ipa_offload_enable_disable_cmdid = wmi_tlv_cmd(wmi_grp_ipa), + wmi_mdns_offload_enable_cmdid = wmi_tlv_cmd(wmi_grp_mdns_ofl), + wmi_mdns_set_fqdn_cmdid, + wmi_mdns_set_response_cmdid, + wmi_mdns_get_stats_cmdid, + wmi_sap_ofl_enable_cmdid = wmi_tlv_cmd(wmi_grp_sap_ofl), + wmi_sap_set_blacklist_param_cmdid, + wmi_ocb_set_config_cmdid = wmi_tlv_cmd(wmi_grp_ocb), + wmi_ocb_set_utc_time_cmdid, + wmi_ocb_start_timing_advert_cmdid, + wmi_ocb_stop_timing_advert_cmdid, + wmi_ocb_get_tsf_timer_cmdid, + wmi_dcc_get_stats_cmdid, + wmi_dcc_clear_stats_cmdid, + wmi_dcc_update_ndl_cmdid, + wmi_soc_set_pcl_cmdid = wmi_tlv_cmd(wmi_grp_soc), + wmi_soc_set_hw_mode_cmdid, + wmi_soc_set_dual_mac_config_cmdid, + wmi_soc_set_antenna_mode_cmdid, + wmi_packet_filter_config_cmdid = wmi_tlv_cmd(wmi_grp_pkt_filter), + wmi_packet_filter_enable_cmdid, + wmi_mawc_sensor_report_ind_cmdid = wmi_tlv_cmd(wmi_grp_mawc), + wmi_pmf_offload_set_sa_query_cmdid = wmi_tlv_cmd(wmi_grp_pmf_offload), + wmi_bpf_get_capability_cmdid = wmi_tlv_cmd(wmi_grp_bpf_offload), + wmi_bpf_get_vdev_stats_cmdid, + wmi_bpf_set_vdev_instructions_cmdid, + wmi_bpf_del_vdev_instructions_cmdid, + wmi_bpf_set_vdev_active_mode_cmdid, + wmi_mnt_filter_cmdid = wmi_tlv_cmd(wmi_grp_monitor), + wmi_set_current_country_cmdid = wmi_tlv_cmd(wmi_grp_regulatory), + wmi_11d_scan_start_cmdid, + wmi_11d_scan_stop_cmdid, + wmi_set_init_country_cmdid, + wmi_ndi_get_cap_req_cmdid = wmi_tlv_cmd(wmi_grp_prototype), + wmi_ndp_initiator_req_cmdid, + wmi_ndp_responder_req_cmdid, + wmi_ndp_end_req_cmdid, + wmi_hw_data_filter_cmdid = wmi_tlv_cmd(wmi_grp_hw_data_filter), +}; + +enum wmi_tlv_event_id { + wmi_service_ready_eventid = 0x1, + wmi_ready_eventid, + wmi_service_available_eventid, + wmi_scan_eventid = wmi_evt_grp_start_id(wmi_grp_scan), + wmi_pdev_tpc_config_eventid = wmi_tlv_cmd(wmi_grp_pdev), + wmi_chan_info_eventid, + wmi_phyerr_eventid, + wmi_pdev_dump_eventid, + wmi_tx_pause_eventid, + wmi_dfs_radar_eventid, + wmi_pdev_l1ss_track_eventid, + wmi_pdev_temperature_eventid, + wmi_service_ready_ext_eventid, + wmi_pdev_fips_eventid, + wmi_pdev_channel_hopping_eventid, + wmi_pdev_ani_cck_level_eventid, + wmi_pdev_ani_ofdm_level_eventid, + wmi_pdev_tpc_eventid, + wmi_pdev_nfcal_power_all_channels_eventid, + wmi_pdev_set_hw_mode_resp_eventid, + wmi_pdev_hw_mode_transition_eventid, + wmi_pdev_set_mac_config_resp_eventid, + wmi_pdev_antdiv_status_eventid, + wmi_pdev_chip_power_stats_eventid, + wmi_pdev_chip_power_save_failure_detected_eventid, + wmi_pdev_csa_switch_count_status_eventid, + wmi_pdev_check_cal_version_eventid, + wmi_pdev_div_rssi_antid_eventid, + wmi_pdev_bss_chan_info_eventid, + wmi_pdev_update_ctltable_eventid, + wmi_pdev_dma_ring_cfg_rsp_eventid, + wmi_pdev_dma_ring_buf_release_eventid, + wmi_pdev_ctl_failsafe_check_eventid, + wmi_vdev_start_resp_eventid = wmi_tlv_cmd(wmi_grp_vdev), + wmi_vdev_stopped_eventid, + wmi_vdev_install_key_complete_eventid, + wmi_vdev_mcc_bcn_interval_change_req_eventid, + wmi_vdev_tsf_report_eventid, + wmi_vdev_delete_resp_eventid, + wmi_vdev_encrypt_decrypt_data_resp_eventid, + wmi_vdev_add_mac_addr_to_rx_filter_status_eventid, + wmi_peer_sta_kickout_eventid = wmi_tlv_cmd(wmi_grp_peer), + wmi_peer_info_eventid, + wmi_peer_tx_fail_cnt_thr_eventid, + wmi_peer_estimated_linkspeed_eventid, + wmi_peer_state_eventid, + wmi_peer_assoc_conf_eventid, + wmi_peer_delete_resp_eventid, + wmi_peer_ratecode_list_eventid, + wmi_wds_peer_eventid, + wmi_peer_sta_ps_statechg_eventid, + wmi_peer_antdiv_info_eventid, + wmi_mgmt_rx_eventid = wmi_tlv_cmd(wmi_grp_mgmt), + wmi_host_swba_eventid, + wmi_tbttoffset_update_eventid, + wmi_offload_bcn_tx_status_eventid, + wmi_offload_prob_resp_tx_status_eventid, + wmi_mgmt_tx_completion_eventid, + wmi_mgmt_tx_bundle_completion_eventid, + wmi_tbttoffset_ext_update_eventid, + wmi_tx_delba_complete_eventid = wmi_tlv_cmd(wmi_grp_ba_neg), + wmi_tx_addba_complete_eventid, + wmi_ba_rsp_ssn_eventid, + wmi_aggr_state_trig_eventid, + wmi_roam_eventid = wmi_tlv_cmd(wmi_grp_roam), + wmi_profile_match, + wmi_roam_synch_eventid, + wmi_p2p_disc_eventid = wmi_tlv_cmd(wmi_grp_p2p), + wmi_p2p_noa_eventid, + wmi_p2p_listen_offload_stopped_eventid, + wmi_ap_ps_egap_info_eventid = wmi_tlv_cmd(wmi_grp_ap_ps), + wmi_pdev_resume_eventid = wmi_tlv_cmd(wmi_grp_suspend), + wmi_wow_wakeup_host_eventid = wmi_tlv_cmd(wmi_grp_wow), + wmi_d0_wow_disable_ack_eventid, + wmi_wow_initial_wakeup_eventid, + wmi_rtt_measurement_report_eventid = wmi_tlv_cmd(wmi_grp_rtt), + wmi_tsf_measurement_report_eventid, + wmi_rtt_error_report_eventid, + wmi_stats_ext_eventid = wmi_tlv_cmd(wmi_grp_stats), + wmi_iface_link_stats_eventid, + wmi_peer_link_stats_eventid, + wmi_radio_link_stats_eventid, + wmi_update_fw_mem_dump_eventid, + wmi_diag_event_log_supported_eventid, + wmi_inst_rssi_stats_eventid, + wmi_radio_tx_power_level_stats_eventid, + wmi_report_stats_eventid, + wmi_update_rcpi_eventid, + wmi_peer_stats_info_eventid, + wmi_radio_chan_stats_eventid, + wmi_nlo_match_eventid = wmi_tlv_cmd(wmi_grp_nlo_ofl), + wmi_nlo_scan_complete_eventid, + wmi_apfind_eventid, + wmi_passpoint_match_eventid, + wmi_gtk_offload_status_eventid = wmi_tlv_cmd(wmi_grp_gtk_ofl), + wmi_gtk_rekey_fail_eventid, + wmi_csa_handling_eventid = wmi_tlv_cmd(wmi_grp_csa_ofl), + wmi_chatter_pc_query_eventid = wmi_tlv_cmd(wmi_grp_chatter), + wmi_pdev_dfs_radar_detection_eventid = wmi_tlv_cmd(wmi_grp_dfs), + wmi_vdev_dfs_cac_complete_eventid, + wmi_vdev_adfs_ocac_complete_eventid, + wmi_echo_eventid = wmi_tlv_cmd(wmi_grp_misc), + wmi_pdev_utf_eventid, + wmi_debug_mesg_eventid, + wmi_update_stats_eventid, + wmi_debug_print_eventid, + wmi_dcs_interference_eventid, + wmi_pdev_qvit_eventid, + wmi_wlan_profile_data_eventid, + wmi_pdev_ftm_intg_eventid, + wmi_wlan_freq_avoid_eventid, + wmi_vdev_get_keepalive_eventid, + wmi_thermal_mgmt_eventid, + wmi_diag_data_container_eventid, + wmi_host_auto_shutdown_eventid, + wmi_update_whal_mib_stats_eventid, + wmi_update_vdev_rate_stats_eventid, + wmi_diag_eventid, + wmi_ocb_set_sched_eventid, + wmi_debug_mesg_flush_complete_eventid, + wmi_rssi_breach_eventid, + wmi_transfer_data_to_flash_complete_eventid, + wmi_pdev_utf_scpc_eventid, + wmi_read_data_from_flash_eventid, + wmi_report_rx_aggr_failure_eventid, + wmi_pkgid_eventid, + wmi_gpio_input_eventid = wmi_tlv_cmd(wmi_grp_gpio), + wmi_uploadh_eventid, + wmi_captureh_eventid, + wmi_rfkill_state_change_eventid, + wmi_tdls_peer_eventid = wmi_tlv_cmd(wmi_grp_tdls), + wmi_sta_smps_force_mode_compl_eventid = wmi_tlv_cmd(wmi_grp_sta_smps), + wmi_batch_scan_enabled_eventid = wmi_tlv_cmd(wmi_grp_location_scan), + wmi_batch_scan_result_eventid, + wmi_oem_capability_eventid = wmi_tlv_cmd(wmi_grp_oem), + wmi_oem_measurement_report_eventid, + wmi_oem_error_report_eventid, + wmi_oem_response_eventid, + wmi_nan_eventid = wmi_tlv_cmd(wmi_grp_nan), + wmi_nan_disc_iface_created_eventid, + wmi_nan_disc_iface_deleted_eventid, + wmi_nan_started_cluster_eventid, + wmi_nan_joined_cluster_eventid, + wmi_coex_report_antenna_isolation_eventid = wmi_tlv_cmd(wmi_grp_coex), + wmi_lpi_result_eventid = wmi_tlv_cmd(wmi_grp_lpi), + wmi_lpi_status_eventid, + wmi_lpi_handoff_eventid, + wmi_extscan_start_stop_eventid = wmi_tlv_cmd(wmi_grp_extscan), + wmi_extscan_operation_eventid, + wmi_extscan_table_usage_eventid, + wmi_extscan_cached_results_eventid, + wmi_extscan_wlan_change_results_eventid, + wmi_extscan_hotlist_match_eventid, + wmi_extscan_capabilities_eventid, + wmi_extscan_hotlist_ssid_match_eventid, + wmi_mdns_stats_eventid = wmi_tlv_cmd(wmi_grp_mdns_ofl), + wmi_sap_ofl_add_sta_eventid = wmi_tlv_cmd(wmi_grp_sap_ofl), + wmi_sap_ofl_del_sta_eventid, + wmi_ocb_set_config_resp_eventid = wmi_tlv_cmd(wmi_grp_ocb), + wmi_ocb_get_tsf_timer_resp_eventid, + wmi_dcc_get_stats_resp_eventid, + wmi_dcc_update_ndl_resp_eventid, + wmi_dcc_stats_eventid, + wmi_soc_set_hw_mode_resp_eventid = wmi_tlv_cmd(wmi_grp_soc), + wmi_soc_hw_mode_transition_eventid, + wmi_soc_set_dual_mac_config_resp_eventid, + wmi_mawc_enable_sensor_eventid = wmi_tlv_cmd(wmi_grp_mawc), + wmi_bpf_capabiliy_info_eventid = wmi_tlv_cmd(wmi_grp_bpf_offload), + wmi_bpf_vdev_stats_info_eventid, + wmi_rmc_new_leader_eventid = wmi_tlv_cmd(wmi_grp_rmc), + wmi_reg_chan_list_cc_eventid = wmi_tlv_cmd(wmi_grp_regulatory), + wmi_11d_new_country_eventid, + wmi_ndi_cap_rsp_eventid = wmi_tlv_cmd(wmi_grp_prototype), + wmi_ndp_initiator_rsp_eventid, + wmi_ndp_responder_rsp_eventid, + wmi_ndp_end_rsp_eventid, + wmi_ndp_indication_eventid, + wmi_ndp_confirm_eventid, + wmi_ndp_end_indication_eventid, +}; + +enum wmi_tlv_pdev_param { + wmi_pdev_param_tx_chain_mask = 0x1, + wmi_pdev_param_rx_chain_mask, + wmi_pdev_param_txpower_limit2g, + wmi_pdev_param_txpower_limit5g, + wmi_pdev_param_txpower_scale, + wmi_pdev_param_beacon_gen_mode, + wmi_pdev_param_beacon_tx_mode, + wmi_pdev_param_resmgr_offchan_mode, + wmi_pdev_param_protection_mode, + wmi_pdev_param_dynamic_bw, + wmi_pdev_param_non_agg_sw_retry_th, + wmi_pdev_param_agg_sw_retry_th, + wmi_pdev_param_sta_kickout_th, + wmi_pdev_param_ac_aggrsize_scaling, + wmi_pdev_param_ltr_enable, + wmi_pdev_param_ltr_ac_latency_be, + wmi_pdev_param_ltr_ac_latency_bk, + wmi_pdev_param_ltr_ac_latency_vi, + wmi_pdev_param_ltr_ac_latency_vo, + wmi_pdev_param_ltr_ac_latency_timeout, + wmi_pdev_param_ltr_sleep_override, + wmi_pdev_param_ltr_rx_override, + wmi_pdev_param_ltr_tx_activity_timeout, + wmi_pdev_param_l1ss_enable, + wmi_pdev_param_dsleep_enable, + wmi_pdev_param_pcielp_txbuf_flush, + wmi_pdev_param_pcielp_txbuf_watermark, + wmi_pdev_param_pcielp_txbuf_tmo_en, + wmi_pdev_param_pcielp_txbuf_tmo_value, + wmi_pdev_param_pdev_stats_update_period, + wmi_pdev_param_vdev_stats_update_period, + wmi_pdev_param_peer_stats_update_period, + wmi_pdev_param_bcnflt_stats_update_period, + wmi_pdev_param_pmf_qos, + wmi_pdev_param_arp_ac_override, + wmi_pdev_param_dcs, + wmi_pdev_param_ani_enable, + wmi_pdev_param_ani_poll_period, + wmi_pdev_param_ani_listen_period, + wmi_pdev_param_ani_ofdm_level, + wmi_pdev_param_ani_cck_level, + wmi_pdev_param_dyntxchain, + wmi_pdev_param_proxy_sta, + wmi_pdev_param_idle_ps_config, + wmi_pdev_param_power_gating_sleep, + wmi_pdev_param_rfkill_enable, + wmi_pdev_param_burst_dur, + wmi_pdev_param_burst_enable, + wmi_pdev_param_hw_rfkill_config, + wmi_pdev_param_low_power_rf_enable, + wmi_pdev_param_l1ss_track, + wmi_pdev_param_hyst_en, + wmi_pdev_param_power_collapse_enable, + wmi_pdev_param_led_sys_state, + wmi_pdev_param_led_enable, + wmi_pdev_param_audio_over_wlan_latency, + wmi_pdev_param_audio_over_wlan_enable, + wmi_pdev_param_whal_mib_stats_update_enable, + wmi_pdev_param_vdev_rate_stats_update_period, + wmi_pdev_param_cts_cbw, + wmi_pdev_param_wnts_config, + wmi_pdev_param_adaptive_early_rx_enable, + wmi_pdev_param_adaptive_early_rx_min_sleep_slop, + wmi_pdev_param_adaptive_early_rx_inc_dec_step, + wmi_pdev_param_early_rx_fix_sleep_slop, + wmi_pdev_param_bmiss_based_adaptive_bto_enable, + wmi_pdev_param_bmiss_bto_min_bcn_timeout, + wmi_pdev_param_bmiss_bto_inc_dec_step, + wmi_pdev_param_bto_fix_bcn_timeout, + wmi_pdev_param_ce_based_adaptive_bto_enable, + wmi_pdev_param_ce_bto_combo_ce_value, + wmi_pdev_param_tx_chain_mask_2g, + wmi_pdev_param_rx_chain_mask_2g, + wmi_pdev_param_tx_chain_mask_5g, + wmi_pdev_param_rx_chain_mask_5g, + wmi_pdev_param_tx_chain_mask_cck, + wmi_pdev_param_tx_chain_mask_1ss, + wmi_pdev_param_cts2self_for_p2p_go_config, + wmi_pdev_param_txpower_decr_db, + wmi_pdev_param_aggr_burst, + wmi_pdev_param_rx_decap_mode, + wmi_pdev_param_fast_channel_reset, + wmi_pdev_param_smart_antenna_default_antenna, + wmi_pdev_param_antenna_gain, + wmi_pdev_param_rx_filter, + wmi_pdev_set_mcast_to_ucast_tid, + wmi_pdev_param_proxy_sta_mode, + wmi_pdev_param_set_mcast2ucast_mode, + wmi_pdev_param_set_mcast2ucast_buffer, + wmi_pdev_param_remove_mcast2ucast_buffer, + wmi_pdev_peer_sta_ps_statechg_enable, + wmi_pdev_param_igmpmld_ac_override, + wmi_pdev_param_block_interbss, + wmi_pdev_param_set_disable_reset_cmdid, + wmi_pdev_param_set_msdu_ttl_cmdid, + wmi_pdev_param_set_ppdu_duration_cmdid, + wmi_pdev_param_txbf_sound_period_cmdid, + wmi_pdev_param_set_promisc_mode_cmdid, + wmi_pdev_param_set_burst_mode_cmdid, + wmi_pdev_param_en_stats, + wmi_pdev_param_mu_group_policy, + wmi_pdev_param_noise_detection, + wmi_pdev_param_noise_threshold, + wmi_pdev_param_dpd_enable, + wmi_pdev_param_set_mcast_bcast_echo, + wmi_pdev_param_atf_strict_sch, + wmi_pdev_param_atf_sched_duration, + wmi_pdev_param_ant_plzn, + wmi_pdev_param_mgmt_retry_limit, + wmi_pdev_param_sensitivity_level, + wmi_pdev_param_signed_txpower_2g, + wmi_pdev_param_signed_txpower_5g, + wmi_pdev_param_enable_per_tid_amsdu, + wmi_pdev_param_enable_per_tid_ampdu, + wmi_pdev_param_cca_threshold, + wmi_pdev_param_rts_fixed_rate, + wmi_pdev_param_pdev_reset, + wmi_pdev_param_wapi_mbssid_offset, + wmi_pdev_param_arp_dbg_srcaddr, + wmi_pdev_param_arp_dbg_dstaddr, + wmi_pdev_param_atf_obss_noise_sch, + wmi_pdev_param_atf_obss_noise_scaling_factor, + wmi_pdev_param_cust_txpower_scale, + wmi_pdev_param_atf_dynamic_enable, + wmi_pdev_param_ctrl_retry_limit, + wmi_pdev_param_propagation_delay, + wmi_pdev_param_ena_ant_div, + wmi_pdev_param_force_chain_ant, + wmi_pdev_param_ant_div_selftest, + wmi_pdev_param_ant_div_selftest_intvl, + wmi_pdev_param_stats_observation_period, + wmi_pdev_param_tx_ppdu_delay_bin_size_ms, + wmi_pdev_param_tx_ppdu_delay_array_len, + wmi_pdev_param_tx_mpdu_aggr_array_len, + wmi_pdev_param_rx_mpdu_aggr_array_len, + wmi_pdev_param_tx_sch_delay, + wmi_pdev_param_enable_rts_sifs_bursting, + wmi_pdev_param_max_mpdus_in_ampdu, + wmi_pdev_param_peer_stats_info_enable, + wmi_pdev_param_fast_pwr_transition, + wmi_pdev_param_radio_chan_stats_enable, + wmi_pdev_param_radio_diagnosis_enable, + wmi_pdev_param_mesh_mcast_enable, +}; + +enum wmi_tlv_vdev_param { + wmi_vdev_param_rts_threshold = 0x1, + wmi_vdev_param_fragmentation_threshold, + wmi_vdev_param_beacon_interval, + wmi_vdev_param_listen_interval, + wmi_vdev_param_multicast_rate, + wmi_vdev_param_mgmt_tx_rate, + wmi_vdev_param_slot_time, + wmi_vdev_param_preamble, + wmi_vdev_param_swba_time, + wmi_vdev_stats_update_period, + wmi_vdev_pwrsave_ageout_time, + wmi_vdev_host_swba_interval, + wmi_vdev_param_dtim_period, + wmi_vdev_oc_scheduler_air_time_limit, + wmi_vdev_param_wds, + wmi_vdev_param_atim_window, + wmi_vdev_param_bmiss_count_max, + wmi_vdev_param_bmiss_first_bcnt, + wmi_vdev_param_bmiss_final_bcnt, + wmi_vdev_param_feature_wmm, + wmi_vdev_param_chwidth, + wmi_vdev_param_chextoffset, + wmi_vdev_param_disable_htprotection, + wmi_vdev_param_sta_quickkickout, + wmi_vdev_param_mgmt_rate, + wmi_vdev_param_protection_mode, + wmi_vdev_param_fixed_rate, + wmi_vdev_param_sgi, + wmi_vdev_param_ldpc, + wmi_vdev_param_tx_stbc, + wmi_vdev_param_rx_stbc, + wmi_vdev_param_intra_bss_fwd, + wmi_vdev_param_def_keyid, + wmi_vdev_param_nss, + wmi_vdev_param_bcast_data_rate, + wmi_vdev_param_mcast_data_rate, + wmi_vdev_param_mcast_indicate, + wmi_vdev_param_dhcp_indicate, + wmi_vdev_param_unknown_dest_indicate, + wmi_vdev_param_ap_keepalive_min_idle_inactive_time_secs, + wmi_vdev_param_ap_keepalive_max_idle_inactive_time_secs, + wmi_vdev_param_ap_keepalive_max_unresponsive_time_secs, + wmi_vdev_param_ap_enable_nawds, + wmi_vdev_param_enable_rtscts, + wmi_vdev_param_txbf, + wmi_vdev_param_packet_powersave, + wmi_vdev_param_drop_unencry, + wmi_vdev_param_tx_encap_type, + wmi_vdev_param_ap_detect_out_of_sync_sleeping_sta_time_secs, + wmi_vdev_param_early_rx_adjust_enable, + wmi_vdev_param_early_rx_tgt_bmiss_num, + wmi_vdev_param_early_rx_bmiss_sample_cycle, + wmi_vdev_param_early_rx_slop_step, + wmi_vdev_param_early_rx_init_slop, + wmi_vdev_param_early_rx_adjust_pause, + wmi_vdev_param_tx_pwrlimit, + wmi_vdev_param_snr_num_for_cal, + wmi_vdev_param_roam_fw_offload, + wmi_vdev_param_enable_rmc, + wmi_vdev_param_ibss_max_bcn_lost_ms, + wmi_vdev_param_max_rate, + wmi_vdev_param_early_rx_drift_sample, + wmi_vdev_param_set_ibss_tx_fail_cnt_thr, + wmi_vdev_param_ebt_resync_timeout, + wmi_vdev_param_aggr_trig_event_enable, + wmi_vdev_param_is_ibss_power_save_allowed, + wmi_vdev_param_is_power_collapse_allowed, + wmi_vdev_param_is_awake_on_txrx_enabled, + wmi_vdev_param_inactivity_cnt, + wmi_vdev_param_txsp_end_inactivity_time_ms, + wmi_vdev_param_dtim_policy, + wmi_vdev_param_ibss_ps_warmup_time_secs, + wmi_vdev_param_ibss_ps_1rx_chain_in_atim_window_enable, + wmi_vdev_param_rx_leak_window, + wmi_vdev_param_stats_avg_factor, + wmi_vdev_param_disconnect_th, + wmi_vdev_param_rtscts_rate, + wmi_vdev_param_mcc_rtscts_protection_enable, + wmi_vdev_param_mcc_broadcast_probe_enable, + wmi_vdev_param_txpower_scale, + wmi_vdev_param_txpower_scale_decr_db, + wmi_vdev_param_mcast2ucast_set, + wmi_vdev_param_rc_num_retries, + wmi_vdev_param_cabq_maxdur, + wmi_vdev_param_mfptest_set, + wmi_vdev_param_rts_fixed_rate, + wmi_vdev_param_vht_sgimask, + wmi_vdev_param_vht80_ratemask, + wmi_vdev_param_proxy_sta, + wmi_vdev_param_virtual_cell_mode, + wmi_vdev_param_rx_decap_type, + wmi_vdev_param_bw_nss_ratemask, + wmi_vdev_param_sensor_ap, + wmi_vdev_param_beacon_rate, + wmi_vdev_param_dtim_enable_cts, + wmi_vdev_param_sta_kickout, + wmi_vdev_param_capabilities, + wmi_vdev_param_tsf_increment, + wmi_vdev_param_ampdu_per_ac, + wmi_vdev_param_rx_filter, + wmi_vdev_param_mgmt_tx_power, + wmi_vdev_param_non_agg_sw_retry_th, + wmi_vdev_param_agg_sw_retry_th, + wmi_vdev_param_disable_dyn_bw_rts, + wmi_vdev_param_atf_ssid_sched_policy, + wmi_vdev_param_he_dcm, + wmi_vdev_param_he_range_ext, + wmi_vdev_param_enable_bcast_probe_response, + wmi_vdev_param_fils_max_channel_guard_time, + wmi_vdev_param_set_he_sounding_mode = 0x87, + wmi_vdev_param_prototype = 0x8000, + wmi_vdev_param_bss_color, + wmi_vdev_param_set_hemu_mode, + wmi_vdev_param_tx_ofdma_cplen, +}; + +enum wmi_tlv_peer_flags { + wmi_tlv_peer_auth = 0x00000001, + wmi_tlv_peer_qos = 0x00000002, + wmi_tlv_peer_need_ptk_4_way = 0x00000004, + wmi_tlv_peer_need_gtk_2_way = 0x00000010, + wmi_tlv_peer_apsd = 0x00000800, + wmi_tlv_peer_ht = 0x00001000, + wmi_tlv_peer_40mhz = 0x00002000, + wmi_tlv_peer_stbc = 0x00008000, + wmi_tlv_peer_ldpc = 0x00010000, + wmi_tlv_peer_dyn_mimops = 0x00020000, + wmi_tlv_peer_static_mimops = 0x00040000, + wmi_tlv_peer_spatial_mux = 0x00200000, + wmi_tlv_peer_vht = 0x02000000, + wmi_tlv_peer_80mhz = 0x04000000, + wmi_tlv_peer_pmf = 0x08000000, + wmi_peer_is_p2p_capable = 0x20000000, + wmi_peer_160mhz = 0x40000000, + wmi_peer_safemode_en = 0x80000000, + +}; + +/** enum list of tlv tags for each parameter structure type. */ +enum wmi_tlv_tag { + wmi_tag_last_reserved = 15, + wmi_tag_first_array_enum, + wmi_tag_array_uint32 = wmi_tag_first_array_enum, + wmi_tag_array_byte, + wmi_tag_array_struct, + wmi_tag_array_fixed_struct, + wmi_tag_last_array_enum = 31, + wmi_tag_service_ready_event, + wmi_tag_hal_reg_capabilities, + wmi_tag_wlan_host_mem_req, + wmi_tag_ready_event, + wmi_tag_scan_event, + wmi_tag_pdev_tpc_config_event, + wmi_tag_chan_info_event, + wmi_tag_comb_phyerr_rx_hdr, + wmi_tag_vdev_start_response_event, + wmi_tag_vdev_stopped_event, + wmi_tag_vdev_install_key_complete_event, + wmi_tag_peer_sta_kickout_event, + wmi_tag_mgmt_rx_hdr, + wmi_tag_tbtt_offset_event, + wmi_tag_tx_delba_complete_event, + wmi_tag_tx_addba_complete_event, + wmi_tag_roam_event, + wmi_tag_wow_event_info, + wmi_tag_wow_event_info_section_bitmap, + wmi_tag_rtt_event_header, + wmi_tag_rtt_error_report_event, + wmi_tag_rtt_meas_event, + wmi_tag_echo_event, + wmi_tag_ftm_intg_event, + wmi_tag_vdev_get_keepalive_event, + wmi_tag_gpio_input_event, + wmi_tag_csa_event, + wmi_tag_gtk_offload_status_event, + wmi_tag_igtk_info, + wmi_tag_dcs_interference_event, + wmi_tag_ath_dcs_cw_int, + wmi_tag_wlan_dcs_cw_int = /* alias */ + wmi_tag_ath_dcs_cw_int, + wmi_tag_ath_dcs_wlan_int_stat, + wmi_tag_wlan_dcs_im_tgt_stats_t = /* alias */ + wmi_tag_ath_dcs_wlan_int_stat, + wmi_tag_wlan_profile_ctx_t, + wmi_tag_wlan_profile_t, + wmi_tag_pdev_qvit_event, + wmi_tag_host_swba_event, + wmi_tag_tim_info, + wmi_tag_p2p_noa_info, + wmi_tag_stats_event, + wmi_tag_avoid_freq_ranges_event, + wmi_tag_avoid_freq_range_desc, + wmi_tag_gtk_rekey_fail_event, + wmi_tag_init_cmd, + wmi_tag_resource_config, + wmi_tag_wlan_host_memory_chunk, + wmi_tag_start_scan_cmd, + wmi_tag_stop_scan_cmd, + wmi_tag_scan_chan_list_cmd, + wmi_tag_channel, + wmi_tag_pdev_set_regdomain_cmd, + wmi_tag_pdev_set_param_cmd, + wmi_tag_pdev_set_wmm_params_cmd, + wmi_tag_wmm_params, + wmi_tag_pdev_set_quiet_cmd, + wmi_tag_vdev_create_cmd, + wmi_tag_vdev_delete_cmd, + wmi_tag_vdev_start_request_cmd, + wmi_tag_p2p_noa_descriptor, + wmi_tag_p2p_go_set_beacon_ie, + wmi_tag_gtk_offload_cmd, + wmi_tag_vdev_up_cmd, + wmi_tag_vdev_stop_cmd, + wmi_tag_vdev_down_cmd, + wmi_tag_vdev_set_param_cmd, + wmi_tag_vdev_install_key_cmd, + wmi_tag_peer_create_cmd, + wmi_tag_peer_delete_cmd, + wmi_tag_peer_flush_tids_cmd, + wmi_tag_peer_set_param_cmd, + wmi_tag_peer_assoc_complete_cmd, + wmi_tag_vht_rate_set, + wmi_tag_bcn_tmpl_cmd, + wmi_tag_prb_tmpl_cmd, + wmi_tag_bcn_prb_info, + wmi_tag_peer_tid_addba_cmd, + wmi_tag_peer_tid_delba_cmd, + wmi_tag_sta_powersave_mode_cmd, + wmi_tag_sta_powersave_param_cmd, + wmi_tag_sta_dtim_ps_method_cmd, + wmi_tag_roam_scan_mode, + wmi_tag_roam_scan_rssi_threshold, + wmi_tag_roam_scan_period, + wmi_tag_roam_scan_rssi_change_threshold, + wmi_tag_pdev_suspend_cmd, + wmi_tag_pdev_resume_cmd, + wmi_tag_add_bcn_filter_cmd, + wmi_tag_rmv_bcn_filter_cmd, + wmi_tag_wow_enable_cmd, + wmi_tag_wow_hostwakeup_from_sleep_cmd, + wmi_tag_sta_uapsd_auto_trig_cmd, + wmi_tag_sta_uapsd_auto_trig_param, + wmi_tag_set_arp_ns_offload_cmd, + wmi_tag_arp_offload_tuple, + wmi_tag_ns_offload_tuple, + wmi_tag_ftm_intg_cmd, + wmi_tag_sta_keepalive_cmd, + wmi_tag_sta_keepalve_arp_response, + wmi_tag_p2p_set_vendor_ie_data_cmd, + wmi_tag_ap_ps_peer_cmd, + wmi_tag_peer_rate_retry_sched_cmd, + wmi_tag_wlan_profile_trigger_cmd, + wmi_tag_wlan_profile_set_hist_intvl_cmd, + wmi_tag_wlan_profile_get_prof_data_cmd, + wmi_tag_wlan_profile_enable_profile_id_cmd, + wmi_tag_wow_del_pattern_cmd, + wmi_tag_wow_add_del_evt_cmd, + wmi_tag_rtt_measreq_head, + wmi_tag_rtt_measreq_body, + wmi_tag_rtt_tsf_cmd, + wmi_tag_vdev_spectral_configure_cmd, + wmi_tag_vdev_spectral_enable_cmd, + wmi_tag_request_stats_cmd, + wmi_tag_nlo_config_cmd, + wmi_tag_nlo_configured_parameters, + wmi_tag_csa_offload_enable_cmd, + wmi_tag_csa_offload_chanswitch_cmd, + wmi_tag_chatter_set_mode_cmd, + wmi_tag_echo_cmd, + wmi_tag_vdev_set_keepalive_cmd, + wmi_tag_vdev_get_keepalive_cmd, + wmi_tag_force_fw_hang_cmd, + wmi_tag_gpio_config_cmd, + wmi_tag_gpio_output_cmd, + wmi_tag_peer_add_wds_entry_cmd, + wmi_tag_peer_remove_wds_entry_cmd, + wmi_tag_bcn_tx_hdr, + wmi_tag_bcn_send_from_host_cmd, + wmi_tag_mgmt_tx_hdr, + wmi_tag_addba_clear_resp_cmd, + wmi_tag_addba_send_cmd, + wmi_tag_delba_send_cmd, + wmi_tag_addba_setresponse_cmd, + wmi_tag_send_singleamsdu_cmd, + wmi_tag_pdev_pktlog_enable_cmd, + wmi_tag_pdev_pktlog_disable_cmd, + wmi_tag_pdev_set_ht_ie_cmd, + wmi_tag_pdev_set_vht_ie_cmd, + wmi_tag_pdev_set_dscp_tid_map_cmd, + wmi_tag_pdev_green_ap_ps_enable_cmd, + wmi_tag_pdev_get_tpc_config_cmd, + wmi_tag_pdev_set_base_macaddr_cmd, + wmi_tag_peer_mcast_group_cmd, + wmi_tag_roam_ap_profile, + wmi_tag_ap_profile, + wmi_tag_scan_sch_priority_table_cmd, + wmi_tag_pdev_dfs_enable_cmd, + wmi_tag_pdev_dfs_disable_cmd, + wmi_tag_wow_add_pattern_cmd, + wmi_tag_wow_bitmap_pattern_t, + wmi_tag_wow_ipv4_sync_pattern_t, + wmi_tag_wow_ipv6_sync_pattern_t, + wmi_tag_wow_magic_pattern_cmd, + wmi_tag_scan_update_request_cmd, + wmi_tag_chatter_pkt_coalescing_filter, + wmi_tag_chatter_coalescing_add_filter_cmd, + wmi_tag_chatter_coalescing_delete_filter_cmd, + wmi_tag_chatter_coalescing_query_cmd, + wmi_tag_txbf_cmd, + wmi_tag_debug_log_config_cmd, + wmi_tag_nlo_event, + wmi_tag_chatter_query_reply_event, + wmi_tag_upload_h_hdr, + wmi_tag_capture_h_event_hdr, + wmi_tag_vdev_wnm_sleepmode_cmd, + wmi_tag_vdev_ipsec_natkeepalive_filter_cmd, + wmi_tag_vdev_wmm_addts_cmd, + wmi_tag_vdev_wmm_delts_cmd, + wmi_tag_vdev_set_wmm_params_cmd, + wmi_tag_tdls_set_state_cmd, + wmi_tag_tdls_peer_update_cmd, + wmi_tag_tdls_peer_event, + wmi_tag_tdls_peer_capabilities, + wmi_tag_vdev_mcc_set_tbtt_mode_cmd, + wmi_tag_roam_chan_list, + wmi_tag_vdev_mcc_bcn_intvl_change_event, + wmi_tag_resmgr_adaptive_ocs_enable_disable_cmd, + wmi_tag_resmgr_set_chan_time_quota_cmd, + wmi_tag_resmgr_set_chan_latency_cmd, + wmi_tag_ba_req_ssn_cmd, + wmi_tag_ba_rsp_ssn_event, + wmi_tag_sta_smps_force_mode_cmd, + wmi_tag_set_mcastbcast_filter_cmd, + wmi_tag_p2p_set_oppps_cmd, + wmi_tag_p2p_set_noa_cmd, + wmi_tag_ba_req_ssn_cmd_sub_struct_param, + wmi_tag_ba_req_ssn_event_sub_struct_param, + wmi_tag_sta_smps_param_cmd, + wmi_tag_vdev_set_gtx_params_cmd, + wmi_tag_mcc_sched_traffic_stats_cmd, + wmi_tag_mcc_sched_sta_traffic_stats, + wmi_tag_offload_bcn_tx_status_event, + wmi_tag_p2p_noa_event, + wmi_tag_hb_set_enable_cmd, + wmi_tag_hb_set_tcp_params_cmd, + wmi_tag_hb_set_tcp_pkt_filter_cmd, + wmi_tag_hb_set_udp_params_cmd, + wmi_tag_hb_set_udp_pkt_filter_cmd, + wmi_tag_hb_ind_event, + wmi_tag_tx_pause_event, + wmi_tag_rfkill_event, + wmi_tag_dfs_radar_event, + wmi_tag_dfs_phyerr_filter_ena_cmd, + wmi_tag_dfs_phyerr_filter_dis_cmd, + wmi_tag_batch_scan_result_scan_list, + wmi_tag_batch_scan_result_network_info, + wmi_tag_batch_scan_enable_cmd, + wmi_tag_batch_scan_disable_cmd, + wmi_tag_batch_scan_trigger_result_cmd, + wmi_tag_batch_scan_enabled_event, + wmi_tag_batch_scan_result_event, + wmi_tag_vdev_plmreq_start_cmd, + wmi_tag_vdev_plmreq_stop_cmd, + wmi_tag_thermal_mgmt_cmd, + wmi_tag_thermal_mgmt_event, + wmi_tag_peer_info_req_cmd, + wmi_tag_peer_info_event, + wmi_tag_peer_info, + wmi_tag_peer_tx_fail_cnt_thr_event, + wmi_tag_rmc_set_mode_cmd, + wmi_tag_rmc_set_action_period_cmd, + wmi_tag_rmc_config_cmd, + wmi_tag_mhf_offload_set_mode_cmd, + wmi_tag_mhf_offload_plumb_routing_table_cmd, + wmi_tag_add_proactive_arp_rsp_pattern_cmd, + wmi_tag_del_proactive_arp_rsp_pattern_cmd, + wmi_tag_nan_cmd_param, + wmi_tag_nan_event_hdr, + wmi_tag_pdev_l1ss_track_event, + wmi_tag_diag_data_container_event, + wmi_tag_modem_power_state_cmd_param, + wmi_tag_peer_get_estimated_linkspeed_cmd, + wmi_tag_peer_estimated_linkspeed_event, + wmi_tag_aggr_state_trig_event, + wmi_tag_mhf_offload_routing_table_entry, + wmi_tag_roam_scan_cmd, + wmi_tag_req_stats_ext_cmd, + wmi_tag_stats_ext_event, + wmi_tag_obss_scan_enable_cmd, + wmi_tag_obss_scan_disable_cmd, + wmi_tag_offload_prb_rsp_tx_status_event, + wmi_tag_pdev_set_led_config_cmd, + wmi_tag_host_auto_shutdown_cfg_cmd, + wmi_tag_host_auto_shutdown_event, + wmi_tag_update_whal_mib_stats_event, + wmi_tag_chan_avoid_update_cmd_param, + wmi_tag_wow_ioac_pkt_pattern_t, + wmi_tag_wow_ioac_tmr_pattern_t, + wmi_tag_wow_ioac_add_keepalive_cmd, + wmi_tag_wow_ioac_del_keepalive_cmd, + wmi_tag_wow_ioac_keepalive_t, + wmi_tag_wow_ioac_add_pattern_cmd, + wmi_tag_wow_ioac_del_pattern_cmd, + wmi_tag_start_link_stats_cmd, + wmi_tag_clear_link_stats_cmd, + wmi_tag_request_link_stats_cmd, + wmi_tag_iface_link_stats_event, + wmi_tag_radio_link_stats_event, + wmi_tag_peer_stats_event, + wmi_tag_channel_stats, + wmi_tag_radio_link_stats, + wmi_tag_rate_stats, + wmi_tag_peer_link_stats, + wmi_tag_wmm_ac_stats, + wmi_tag_iface_link_stats, + wmi_tag_lpi_mgmt_snooping_config_cmd, + wmi_tag_lpi_start_scan_cmd, + wmi_tag_lpi_stop_scan_cmd, + wmi_tag_lpi_result_event, + wmi_tag_peer_state_event, + wmi_tag_extscan_bucket_cmd, + wmi_tag_extscan_bucket_channel_event, + wmi_tag_extscan_start_cmd, + wmi_tag_extscan_stop_cmd, + wmi_tag_extscan_configure_wlan_change_monitor_cmd, + wmi_tag_extscan_wlan_change_bssid_param_cmd, + wmi_tag_extscan_configure_hotlist_monitor_cmd, + wmi_tag_extscan_get_cached_results_cmd, + wmi_tag_extscan_get_wlan_change_results_cmd, + wmi_tag_extscan_set_capabilities_cmd, + wmi_tag_extscan_get_capabilities_cmd, + wmi_tag_extscan_operation_event, + wmi_tag_extscan_start_stop_event, + wmi_tag_extscan_table_usage_event, + wmi_tag_extscan_wlan_descriptor_event, + wmi_tag_extscan_rssi_info_event, + wmi_tag_extscan_cached_results_event, + wmi_tag_extscan_wlan_change_results_event, + wmi_tag_extscan_wlan_change_result_bssid_event, + wmi_tag_extscan_hotlist_match_event, + wmi_tag_extscan_capabilities_event, + wmi_tag_extscan_cache_capabilities_event, + wmi_tag_extscan_wlan_change_monitor_capabilities_event, + wmi_tag_extscan_hotlist_monitor_capabilities_event, + wmi_tag_d0_wow_enable_disable_cmd, + wmi_tag_d0_wow_disable_ack_event, + wmi_tag_unit_test_cmd, + wmi_tag_roam_offload_tlv_param, + wmi_tag_roam_11i_offload_tlv_param, + wmi_tag_roam_11r_offload_tlv_param, + wmi_tag_roam_ese_offload_tlv_param, + wmi_tag_roam_synch_event, + wmi_tag_roam_synch_complete, + wmi_tag_extwow_enable_cmd, + wmi_tag_extwow_set_app_type1_params_cmd, + wmi_tag_extwow_set_app_type2_params_cmd, + wmi_tag_lpi_status_event, + wmi_tag_lpi_handoff_event, + wmi_tag_vdev_rate_stats_event, + wmi_tag_vdev_rate_ht_info, + wmi_tag_ric_request, + wmi_tag_pdev_get_temperature_cmd, + wmi_tag_pdev_temperature_event, + wmi_tag_set_dhcp_server_offload_cmd, + wmi_tag_tpc_chainmask_config_cmd, + wmi_tag_ric_tspec, + wmi_tag_tpc_chainmask_config, + wmi_tag_ipa_offload_enable_disable_cmd, + wmi_tag_scan_prob_req_oui_cmd, + wmi_tag_key_material, + wmi_tag_tdls_set_offchan_mode_cmd, + wmi_tag_set_led_flashing_cmd, + wmi_tag_mdns_offload_cmd, + wmi_tag_mdns_set_fqdn_cmd, + wmi_tag_mdns_set_resp_cmd, + wmi_tag_mdns_get_stats_cmd, + wmi_tag_mdns_stats_event, + wmi_tag_roam_invoke_cmd, + wmi_tag_pdev_resume_event, + wmi_tag_pdev_set_antenna_diversity_cmd, + wmi_tag_sap_ofl_enable_cmd, + wmi_tag_sap_ofl_add_sta_event, + wmi_tag_sap_ofl_del_sta_event, + wmi_tag_apfind_cmd_param, + wmi_tag_apfind_event_hdr, + wmi_tag_ocb_set_sched_cmd, + wmi_tag_ocb_set_sched_event, + wmi_tag_ocb_set_config_cmd, + wmi_tag_ocb_set_config_resp_event, + wmi_tag_ocb_set_utc_time_cmd, + wmi_tag_ocb_start_timing_advert_cmd, + wmi_tag_ocb_stop_timing_advert_cmd, + wmi_tag_ocb_get_tsf_timer_cmd, + wmi_tag_ocb_get_tsf_timer_resp_event, + wmi_tag_dcc_get_stats_cmd, + wmi_tag_dcc_channel_stats_request, + wmi_tag_dcc_get_stats_resp_event, + wmi_tag_dcc_clear_stats_cmd, + wmi_tag_dcc_update_ndl_cmd, + wmi_tag_dcc_update_ndl_resp_event, + wmi_tag_dcc_stats_event, + wmi_tag_ocb_channel, + wmi_tag_ocb_schedule_element, + wmi_tag_dcc_ndl_stats_per_channel, + wmi_tag_dcc_ndl_chan, + wmi_tag_qos_parameter, + wmi_tag_dcc_ndl_active_state_config, + wmi_tag_roam_scan_extended_threshold_param, + wmi_tag_roam_filter, + wmi_tag_passpoint_config_cmd, + wmi_tag_passpoint_event_hdr, + wmi_tag_extscan_configure_hotlist_ssid_monitor_cmd, + wmi_tag_extscan_hotlist_ssid_match_event, + wmi_tag_vdev_tsf_tstamp_action_cmd, + wmi_tag_vdev_tsf_report_event, + wmi_tag_get_fw_mem_dump, + wmi_tag_update_fw_mem_dump, + wmi_tag_fw_mem_dump_params, + wmi_tag_debug_mesg_flush, + wmi_tag_debug_mesg_flush_complete, + wmi_tag_peer_set_rate_report_condition, + wmi_tag_roam_subnet_change_config, + wmi_tag_vdev_set_ie_cmd, + wmi_tag_rssi_breach_monitor_config, + wmi_tag_rssi_breach_event, + wmi_tag_wow_event_initial_wakeup, + wmi_tag_soc_set_pcl_cmd, + wmi_tag_soc_set_hw_mode_cmd, + wmi_tag_soc_set_hw_mode_response_event, + wmi_tag_soc_hw_mode_transition_event, + wmi_tag_vdev_txrx_streams, + wmi_tag_soc_set_hw_mode_response_vdev_mac_entry, + wmi_tag_soc_set_dual_mac_config_cmd, + wmi_tag_soc_set_dual_mac_config_response_event, + wmi_tag_wow_ioac_sock_pattern_t, + wmi_tag_wow_enable_icmpv6_na_flt_cmd, + wmi_tag_diag_event_log_config, + wmi_tag_diag_event_log_supported_event_fixed_params, + wmi_tag_packet_filter_config, + wmi_tag_packet_filter_enable, + wmi_tag_sap_set_blacklist_param_cmd, + wmi_tag_mgmt_tx_send_cmd, + wmi_tag_mgmt_tx_compl_event, + wmi_tag_soc_set_antenna_mode_cmd, + wmi_tag_wow_udp_svc_ofld_cmd, + wmi_tag_lro_info_cmd, + wmi_tag_roam_earlystop_rssi_thres_param, + wmi_tag_service_ready_ext_event, + wmi_tag_mawc_sensor_report_ind_cmd, + wmi_tag_mawc_enable_sensor_event, + wmi_tag_roam_configure_mawc_cmd, + wmi_tag_nlo_configure_mawc_cmd, + wmi_tag_extscan_configure_mawc_cmd, + wmi_tag_peer_assoc_conf_event, + wmi_tag_wow_hostwakeup_gpio_pin_pattern_config_cmd, + wmi_tag_ap_ps_egap_param_cmd, + wmi_tag_ap_ps_egap_info_event, + wmi_tag_pmf_offload_set_sa_query_cmd, + wmi_tag_transfer_data_to_flash_cmd, + wmi_tag_transfer_data_to_flash_complete_event, + wmi_tag_scpc_event, + wmi_tag_ap_ps_egap_info_chainmask_list, + wmi_tag_sta_smps_force_mode_complete_event, + wmi_tag_bpf_get_capability_cmd, + wmi_tag_bpf_capability_info_evt, + wmi_tag_bpf_get_vdev_stats_cmd, + wmi_tag_bpf_vdev_stats_info_evt, + wmi_tag_bpf_set_vdev_instructions_cmd, + wmi_tag_bpf_del_vdev_instructions_cmd, + wmi_tag_vdev_delete_resp_event, + wmi_tag_peer_delete_resp_event, + wmi_tag_roam_dense_thres_param, + wmi_tag_enlo_candidate_score_param, + wmi_tag_peer_update_wds_entry_cmd, + wmi_tag_vdev_config_ratemask, + wmi_tag_pdev_fips_cmd, + wmi_tag_pdev_smart_ant_enable_cmd, + wmi_tag_pdev_smart_ant_set_rx_antenna_cmd, + wmi_tag_peer_smart_ant_set_tx_antenna_cmd, + wmi_tag_peer_smart_ant_set_train_antenna_cmd, + wmi_tag_peer_smart_ant_set_node_config_ops_cmd, + wmi_tag_pdev_set_ant_switch_tbl_cmd, + wmi_tag_pdev_set_ctl_table_cmd, + wmi_tag_pdev_set_mimogain_table_cmd, + wmi_tag_fwtest_set_param_cmd, + wmi_tag_peer_atf_request, + wmi_tag_vdev_atf_request, + wmi_tag_pdev_get_ani_cck_config_cmd, + wmi_tag_pdev_get_ani_ofdm_config_cmd, + wmi_tag_inst_rssi_stats_resp, + wmi_tag_med_util_report_event, + wmi_tag_peer_sta_ps_statechange_event, + wmi_tag_wds_addr_event, + wmi_tag_peer_ratecode_list_event, + wmi_tag_pdev_nfcal_power_all_channels_event, + wmi_tag_pdev_tpc_event, + wmi_tag_ani_ofdm_event, + wmi_tag_ani_cck_event, + wmi_tag_pdev_channel_hopping_event, + wmi_tag_pdev_fips_event, + wmi_tag_atf_peer_info, + wmi_tag_pdev_get_tpc_cmd, + wmi_tag_vdev_filter_nrp_config_cmd, + wmi_tag_qboost_cfg_cmd, + wmi_tag_pdev_smart_ant_gpio_handle, + wmi_tag_peer_smart_ant_set_tx_antenna_series, + wmi_tag_peer_smart_ant_set_train_antenna_param, + wmi_tag_pdev_set_ant_ctrl_chain, + wmi_tag_peer_cck_ofdm_rate_info, + wmi_tag_peer_mcs_rate_info, + wmi_tag_pdev_nfcal_power_all_channels_nfdbr, + wmi_tag_pdev_nfcal_power_all_channels_nfdbm, + wmi_tag_pdev_nfcal_power_all_channels_freqnum, + wmi_tag_mu_report_total_mu, + wmi_tag_vdev_set_dscp_tid_map_cmd, + wmi_tag_roam_set_mbo, + wmi_tag_mib_stats_enable_cmd, + wmi_tag_nan_disc_iface_created_event, + wmi_tag_nan_disc_iface_deleted_event, + wmi_tag_nan_started_cluster_event, + wmi_tag_nan_joined_cluster_event, + wmi_tag_ndi_get_cap_req, + wmi_tag_ndp_initiator_req, + wmi_tag_ndp_responder_req, + wmi_tag_ndp_end_req, + wmi_tag_ndi_cap_rsp_event, + wmi_tag_ndp_initiator_rsp_event, + wmi_tag_ndp_responder_rsp_event, + wmi_tag_ndp_end_rsp_event, + wmi_tag_ndp_indication_event, + wmi_tag_ndp_confirm_event, + wmi_tag_ndp_end_indication_event, + wmi_tag_vdev_set_quiet_cmd, + wmi_tag_pdev_set_pcl_cmd, + wmi_tag_pdev_set_hw_mode_cmd, + wmi_tag_pdev_set_mac_config_cmd, + wmi_tag_pdev_set_antenna_mode_cmd, + wmi_tag_pdev_set_hw_mode_response_event, + wmi_tag_pdev_hw_mode_transition_event, + wmi_tag_pdev_set_hw_mode_response_vdev_mac_entry, + wmi_tag_pdev_set_mac_config_response_event, + wmi_tag_coex_config_cmd, + wmi_tag_config_enhanced_mcast_filter, + wmi_tag_chan_avoid_rpt_allow_cmd, + wmi_tag_set_periodic_channel_stats_config, + wmi_tag_vdev_set_custom_aggr_size_cmd, + wmi_tag_pdev_wal_power_debug_cmd, + wmi_tag_mac_phy_capabilities, + wmi_tag_hw_mode_capabilities, + wmi_tag_soc_mac_phy_hw_mode_caps, + wmi_tag_hal_reg_capabilities_ext, + wmi_tag_soc_hal_reg_capabilities, + wmi_tag_vdev_wisa_cmd, + wmi_tag_tx_power_level_stats_evt, + wmi_tag_scan_adaptive_dwell_parameters_tlv, + wmi_tag_scan_adaptive_dwell_config, + wmi_tag_wow_set_action_wake_up_cmd, + wmi_tag_ndp_end_rsp_per_ndi, + wmi_tag_peer_bwf_request, + wmi_tag_bwf_peer_info, + wmi_tag_dbglog_time_stamp_sync_cmd, + wmi_tag_rmc_set_leader_cmd, + wmi_tag_rmc_manual_leader_event, + wmi_tag_per_chain_rssi_stats, + wmi_tag_rssi_stats, + wmi_tag_p2p_lo_start_cmd, + wmi_tag_p2p_lo_stop_cmd, + wmi_tag_p2p_lo_stopped_event, + wmi_tag_reorder_queue_setup_cmd, + wmi_tag_reorder_queue_remove_cmd, + wmi_tag_set_multiple_mcast_filter_cmd, + wmi_tag_mgmt_tx_compl_bundle_event, + wmi_tag_read_data_from_flash_cmd, + wmi_tag_read_data_from_flash_event, + wmi_tag_pdev_set_reorder_timeout_val_cmd, + wmi_tag_peer_set_rx_blocksize_cmd, + wmi_tag_pdev_set_wakeup_config_cmdid, + wmi_tag_tlv_buf_len_param, + wmi_tag_service_available_event, + wmi_tag_peer_antdiv_info_req_cmd, + wmi_tag_peer_antdiv_info_event, + wmi_tag_peer_antdiv_info, + wmi_tag_pdev_get_antdiv_status_cmd, + wmi_tag_pdev_antdiv_status_event, + wmi_tag_mnt_filter_cmd, + wmi_tag_get_chip_power_stats_cmd, + wmi_tag_pdev_chip_power_stats_event, + wmi_tag_coex_get_antenna_isolation_cmd, + wmi_tag_coex_report_isolation_event, + wmi_tag_chan_cca_stats, + wmi_tag_peer_signal_stats, + wmi_tag_tx_stats, + wmi_tag_peer_ac_tx_stats, + wmi_tag_rx_stats, + wmi_tag_peer_ac_rx_stats, + wmi_tag_report_stats_event, + wmi_tag_chan_cca_stats_thresh, + wmi_tag_peer_signal_stats_thresh, + wmi_tag_tx_stats_thresh, + wmi_tag_rx_stats_thresh, + wmi_tag_pdev_set_stats_threshold_cmd, + wmi_tag_request_wlan_stats_cmd, + wmi_tag_rx_aggr_failure_event, + wmi_tag_rx_aggr_failure_info, + wmi_tag_vdev_encrypt_decrypt_data_req_cmd, + wmi_tag_vdev_encrypt_decrypt_data_resp_event, + wmi_tag_pdev_band_to_mac, + wmi_tag_tbtt_offset_info, + wmi_tag_tbtt_offset_ext_event, + wmi_tag_sar_limits_cmd, + wmi_tag_sar_limit_cmd_row, + wmi_tag_pdev_dfs_phyerr_offload_enable_cmd, + wmi_tag_pdev_dfs_phyerr_offload_disable_cmd, + wmi_tag_vdev_adfs_ch_cfg_cmd, + wmi_tag_vdev_adfs_ocac_abort_cmd, + wmi_tag_pdev_dfs_radar_detection_event, + wmi_tag_vdev_adfs_ocac_complete_event, + wmi_tag_vdev_dfs_cac_complete_event, + wmi_tag_vendor_oui, + wmi_tag_request_rcpi_cmd, + wmi_tag_update_rcpi_event, + wmi_tag_request_peer_stats_info_cmd, + wmi_tag_peer_stats_info, + wmi_tag_peer_stats_info_event, + wmi_tag_pkgid_event, + wmi_tag_connected_nlo_rssi_params, + wmi_tag_set_current_country_cmd, + wmi_tag_regulatory_rule_struct, + wmi_tag_reg_chan_list_cc_event, + wmi_tag_11d_scan_start_cmd, + wmi_tag_11d_scan_stop_cmd, + wmi_tag_11d_new_country_event, + wmi_tag_request_radio_chan_stats_cmd, + wmi_tag_radio_chan_stats, + wmi_tag_radio_chan_stats_event, + wmi_tag_roam_per_config, + wmi_tag_vdev_add_mac_addr_to_rx_filter_cmd, + wmi_tag_vdev_add_mac_addr_to_rx_filter_status_event, + wmi_tag_bpf_set_vdev_active_mode_cmd, + wmi_tag_hw_data_filter_cmd, + wmi_tag_connected_nlo_bss_band_rssi_pref, + wmi_tag_peer_oper_mode_change_event, + wmi_tag_chip_power_save_failure_detected, + wmi_tag_pdev_multiple_vdev_restart_request_cmd, + wmi_tag_pdev_csa_switch_count_status_event, + wmi_tag_pdev_update_pkt_routing_cmd, + wmi_tag_pdev_check_cal_version_cmd, + wmi_tag_pdev_check_cal_version_event, + wmi_tag_pdev_set_diversity_gain_cmd, + wmi_tag_mac_phy_chainmask_combo, + wmi_tag_mac_phy_chainmask_capability, + wmi_tag_vdev_set_arp_stats_cmd, + wmi_tag_vdev_get_arp_stats_cmd, + wmi_tag_vdev_get_arp_stats_event, + wmi_tag_iface_offload_stats, + wmi_tag_request_stats_cmd_sub_struct_param, + wmi_tag_rssi_ctl_ext, + wmi_tag_single_phyerr_ext_rx_hdr, + wmi_tag_coex_bt_activity_event, + wmi_tag_vdev_get_tx_power_cmd, + wmi_tag_vdev_tx_power_event, + wmi_tag_offchan_data_tx_compl_event, + wmi_tag_offchan_data_tx_send_cmd, + wmi_tag_tx_send_params, + wmi_tag_he_rate_set, + wmi_tag_congestion_stats, + wmi_tag_set_init_country_cmd, + wmi_tag_scan_dbs_duty_cycle, + wmi_tag_scan_dbs_duty_cycle_param_tlv, + wmi_tag_pdev_div_get_rssi_antid, + wmi_tag_therm_throt_config_request, + wmi_tag_therm_throt_level_config_info, + wmi_tag_therm_throt_stats_event, + wmi_tag_therm_throt_level_stats_info, + wmi_tag_pdev_div_rssi_antid_event, + wmi_tag_oem_dma_ring_capabilities, + wmi_tag_oem_dma_ring_cfg_req, + wmi_tag_oem_dma_ring_cfg_rsp, + wmi_tag_oem_indirect_data, + wmi_tag_oem_dma_buf_release, + wmi_tag_oem_dma_buf_release_entry, + wmi_tag_pdev_bss_chan_info_request, + wmi_tag_pdev_bss_chan_info_event, + wmi_tag_roam_lca_disallow_config, + wmi_tag_vdev_limit_offchan_cmd, + wmi_tag_roam_rssi_rejection_oce_config, + wmi_tag_unit_test_event, + wmi_tag_roam_fils_offload, + wmi_tag_pdev_update_pmk_cache_cmd, + wmi_tag_pmk_cache, + wmi_tag_pdev_update_fils_hlp_pkt_cmd, + wmi_tag_roam_fils_synch, + wmi_tag_gtk_offload_extended, + wmi_tag_roam_bg_scan_roaming, + wmi_tag_oic_ping_offload_params_cmd, + wmi_tag_oic_ping_offload_set_enable_cmd, + wmi_tag_oic_ping_handoff_event, + wmi_tag_dhcp_lease_renew_offload_cmd, + wmi_tag_dhcp_lease_renew_event, + wmi_tag_btm_config, + wmi_tag_debug_mesg_fw_data_stall, + wmi_tag_wlm_config_cmd, + wmi_tag_pdev_update_ctltable_request, + wmi_tag_pdev_update_ctltable_event, + wmi_tag_roam_cnd_scoring_param, + wmi_tag_pdev_config_vendor_oui_action, + wmi_tag_vendor_oui_ext, + wmi_tag_roam_synch_frame_event, + wmi_tag_fd_send_from_host_cmd, + wmi_tag_enable_fils_cmd, + wmi_tag_host_swfda_event, + wmi_tag_bcn_offload_ctrl_cmd, + wmi_tag_pdev_set_ac_tx_queue_optimized_cmd, + wmi_tag_stats_period, + wmi_tag_ndl_schedule_update, + wmi_tag_peer_tid_msduq_qdepth_thresh_update_cmd, + wmi_tag_msduq_qdepth_thresh_update, + wmi_tag_pdev_set_rx_filter_promiscuous_cmd, + wmi_tag_sar2_result_event, + wmi_tag_sar_capabilities, + wmi_tag_sap_obss_detection_cfg_cmd, + wmi_tag_sap_obss_detection_info_evt, + wmi_tag_dma_ring_capabilities, + wmi_tag_dma_ring_cfg_req, + wmi_tag_dma_ring_cfg_rsp, + wmi_tag_dma_buf_release, + wmi_tag_dma_buf_release_entry, + wmi_tag_sar_get_limits_cmd, + wmi_tag_sar_get_limits_event, + wmi_tag_sar_get_limits_event_row, + wmi_tag_offload_11k_report, + wmi_tag_invoke_neighbor_report, + wmi_tag_neighbor_report_offload, + wmi_tag_vdev_set_connectivity_check_stats, + wmi_tag_vdev_get_connectivity_check_stats, + wmi_tag_bpf_set_vdev_enable_cmd, + wmi_tag_bpf_set_vdev_work_memory_cmd, + wmi_tag_bpf_get_vdev_work_memory_cmd, + wmi_tag_bpf_get_vdev_work_memory_resp_evt, + wmi_tag_pdev_get_nfcal_power, + wmi_tag_bss_color_change_enable, + wmi_tag_obss_color_collision_det_config, + wmi_tag_obss_color_collision_evt, + wmi_tag_runtime_dpd_recal_cmd, + wmi_tag_twt_enable_cmd, + wmi_tag_twt_disable_cmd, + wmi_tag_twt_add_dialog_cmd, + wmi_tag_twt_del_dialog_cmd, + wmi_tag_twt_pause_dialog_cmd, + wmi_tag_twt_resume_dialog_cmd, + wmi_tag_twt_enable_complete_event, + wmi_tag_twt_disable_complete_event, + wmi_tag_twt_add_dialog_complete_event, + wmi_tag_twt_del_dialog_complete_event, + wmi_tag_twt_pause_dialog_complete_event, + wmi_tag_twt_resume_dialog_complete_event, + wmi_tag_request_roam_scan_stats_cmd, + wmi_tag_roam_scan_stats_event, + wmi_tag_peer_tid_configurations_cmd, + wmi_tag_vdev_set_custom_sw_retry_th_cmd, + wmi_tag_get_tpc_power_cmd, + wmi_tag_get_tpc_power_event, + wmi_tag_dma_buf_release_spectral_meta_data, + wmi_tag_motion_det_config_params_cmd, + wmi_tag_motion_det_base_line_config_params_cmd, + wmi_tag_motion_det_start_stop_cmd, + wmi_tag_motion_det_base_line_start_stop_cmd, + wmi_tag_motion_det_event, + wmi_tag_motion_det_base_line_event, + wmi_tag_ndp_transport_ip, + wmi_tag_obss_spatial_reuse_set_cmd, + wmi_tag_esp_estimate_event, + wmi_tag_nan_host_config, + wmi_tag_spectral_bin_scaling_params, + wmi_tag_peer_cfr_capture_cmd, + wmi_tag_peer_chan_width_switch_cmd, + wmi_tag_chan_width_peer_list, + wmi_tag_obss_spatial_reuse_set_def_obss_thresh_cmd, + wmi_tag_pdev_he_tb_action_frm_cmd, + wmi_tag_peer_extd2_stats, + wmi_tag_hpcs_pulse_start_cmd, + wmi_tag_pdev_ctl_failsafe_check_event, + wmi_tag_vdev_chainmask_config_cmd, + wmi_tag_vdev_bcn_offload_quiet_config_cmd, + wmi_tag_nan_event_info, + wmi_tag_ndp_channel_info, + wmi_tag_ndp_cmd, + wmi_tag_ndp_event, + /* todo add all the missing cmds */ + wmi_tag_pdev_peer_pktlog_filter_cmd = 0x301, + wmi_tag_pdev_peer_pktlog_filter_info, + wmi_tag_max +}; + +enum wmi_tlv_service { + wmi_tlv_service_beacon_offload = 0, + wmi_tlv_service_scan_offload = 1, + wmi_tlv_service_roam_scan_offload = 2, + wmi_tlv_service_bcn_miss_offload = 3, + wmi_tlv_service_sta_pwrsave = 4, + wmi_tlv_service_sta_advanced_pwrsave = 5, + wmi_tlv_service_ap_uapsd = 6, + wmi_tlv_service_ap_dfs = 7, + wmi_tlv_service_11ac = 8, + wmi_tlv_service_blockack = 9, + wmi_tlv_service_phyerr = 10, + wmi_tlv_service_bcn_filter = 11, + wmi_tlv_service_rtt = 12, + wmi_tlv_service_wow = 13, + wmi_tlv_service_ratectrl_cache = 14, + wmi_tlv_service_iram_tids = 15, + wmi_tlv_service_arpns_offload = 16, + wmi_tlv_service_nlo = 17, + wmi_tlv_service_gtk_offload = 18, + wmi_tlv_service_scan_sch = 19, + wmi_tlv_service_csa_offload = 20, + wmi_tlv_service_chatter = 21, + wmi_tlv_service_coex_freqavoid = 22, + wmi_tlv_service_packet_power_save = 23, + wmi_tlv_service_force_fw_hang = 24, + wmi_tlv_service_gpio = 25, + wmi_tlv_service_sta_dtim_ps_modulated_dtim = 26, + wmi_sta_uapsd_basic_auto_trig = 27, + wmi_sta_uapsd_var_auto_trig = 28, + wmi_tlv_service_sta_keep_alive = 29, + wmi_tlv_service_tx_encap = 30, + wmi_tlv_service_ap_ps_detect_out_of_sync = 31, + wmi_tlv_service_early_rx = 32, + wmi_tlv_service_sta_smps = 33, + wmi_tlv_service_fwtest = 34, + wmi_tlv_service_sta_wmmac = 35, + wmi_tlv_service_tdls = 36, + wmi_tlv_service_burst = 37, + wmi_tlv_service_mcc_bcn_interval_change = 38, + wmi_tlv_service_adaptive_ocs = 39, + wmi_tlv_service_ba_ssn_support = 40, + wmi_tlv_service_filter_ipsec_natkeepalive = 41, + wmi_tlv_service_wlan_hb = 42, + wmi_tlv_service_lte_ant_share_support = 43, + wmi_tlv_service_batch_scan = 44, + wmi_tlv_service_qpower = 45, + wmi_tlv_service_plmreq = 46, + wmi_tlv_service_thermal_mgmt = 47, + wmi_tlv_service_rmc = 48, + wmi_tlv_service_mhf_offload = 49, + wmi_tlv_service_coex_sar = 50, + wmi_tlv_service_bcn_txrate_override = 51, + wmi_tlv_service_nan = 52, + wmi_tlv_service_l1ss_stat = 53, + wmi_tlv_service_estimate_linkspeed = 54, + wmi_tlv_service_obss_scan = 55, + wmi_tlv_service_tdls_offchan = 56, + wmi_tlv_service_tdls_uapsd_buffer_sta = 57, + wmi_tlv_service_tdls_uapsd_sleep_sta = 58, + wmi_tlv_service_ibss_pwrsave = 59, + wmi_tlv_service_lpass = 60, + wmi_tlv_service_extscan = 61, + wmi_tlv_service_d0wow = 62, + wmi_tlv_service_hsoffload = 63, + wmi_tlv_service_roam_ho_offload = 64, + wmi_tlv_service_rx_full_reorder = 65, + wmi_tlv_service_dhcp_offload = 66, + wmi_tlv_service_sta_rx_ipa_offload_support = 67, + wmi_tlv_service_mdns_offload = 68, + wmi_tlv_service_sap_auth_offload = 69, + wmi_tlv_service_dual_band_simultaneous_support = 70, + wmi_tlv_service_ocb = 71, + wmi_tlv_service_ap_arpns_offload = 72, + wmi_tlv_service_per_band_chainmask_support = 73, + wmi_tlv_service_packet_filter_offload = 74, + wmi_tlv_service_mgmt_tx_htt = 75, + wmi_tlv_service_mgmt_tx_wmi = 76, + wmi_tlv_service_ext_msg = 77, + wmi_tlv_service_mawc = 78, + wmi_tlv_service_peer_assoc_conf = 79, + wmi_tlv_service_egap = 80, + wmi_tlv_service_sta_pmf_offload = 81, + wmi_tlv_service_unified_wow_capability = 82, + wmi_tlv_service_enhanced_proxy_sta = 83, + wmi_tlv_service_atf = 84, + wmi_tlv_service_coex_gpio = 85, + wmi_tlv_service_aux_spectral_intf = 86, + wmi_tlv_service_aux_chan_load_intf = 87, + wmi_tlv_service_bss_channel_info_64 = 88, + wmi_tlv_service_enterprise_mesh = 89, + wmi_tlv_service_restrt_chnl_support = 90, + wmi_tlv_service_bpf_offload = 91, + wmi_tlv_service_sync_delete_cmds = 92, + wmi_tlv_service_smart_antenna_sw_support = 93, + wmi_tlv_service_smart_antenna_hw_support = 94, + wmi_tlv_service_ratectrl_limit_max_min_rates = 95, + wmi_tlv_service_nan_data = 96, + wmi_tlv_service_nan_rtt = 97, + wmi_tlv_service_11ax = 98, + wmi_tlv_service_deprecated_replace = 99, + wmi_tlv_service_tdls_conn_tracker_in_host_mode = 100, + wmi_tlv_service_enhanced_mcast_filter = 101, + wmi_tlv_service_periodic_chan_stat_support = 102, + wmi_tlv_service_mesh_11s = 103, + wmi_tlv_service_half_rate_quarter_rate_support = 104, + wmi_tlv_service_vdev_rx_filter = 105, + wmi_tlv_service_p2p_listen_offload_support = 106, + wmi_tlv_service_mark_first_wakeup_packet = 107, + wmi_tlv_service_multiple_mcast_filter_set = 108, + wmi_tlv_service_host_managed_rx_reorder = 109, + wmi_tlv_service_flash_rdwr_support = 110, + wmi_tlv_service_wlan_stats_report = 111, + wmi_tlv_service_tx_msdu_id_new_partition_support = 112, + wmi_tlv_service_dfs_phyerr_offload = 113, + wmi_tlv_service_rcpi_support = 114, + wmi_tlv_service_fw_mem_dump_support = 115, + wmi_tlv_service_peer_stats_info = 116, + wmi_tlv_service_regulatory_db = 117, + wmi_tlv_service_11d_offload = 118, + wmi_tlv_service_hw_data_filtering = 119, + wmi_tlv_service_multiple_vdev_restart = 120, + wmi_tlv_service_pkt_routing = 121, + wmi_tlv_service_check_cal_version = 122, + wmi_tlv_service_offchan_tx_wmi = 123, + wmi_tlv_service_8ss_tx_bfee = 124, + wmi_tlv_service_extended_nss_support = 125, + wmi_tlv_service_ack_timeout = 126, + wmi_tlv_service_pdev_bss_channel_info_64 = 127, + + wmi_max_service = 128, + + wmi_tlv_service_chan_load_info = 128, + wmi_tlv_service_tx_ppdu_info_stats_support = 129, + wmi_tlv_service_vdev_limit_offchan_support = 130, + wmi_tlv_service_fils_support = 131, + wmi_tlv_service_wlan_oic_ping_offload = 132, + wmi_tlv_service_wlan_dhcp_renew = 133, + wmi_tlv_service_mawc_support = 134, + wmi_tlv_service_vdev_latency_config = 135, + wmi_tlv_service_pdev_update_ctltable_support = 136, + wmi_tlv_service_pktlog_support_over_htt = 137, + wmi_tlv_service_vdev_multi_group_key_support = 138, + wmi_tlv_service_scan_phymode_support = 139, + wmi_tlv_service_therm_throt = 140, + wmi_tlv_service_bcn_offload_start_stop_support = 141, + wmi_tlv_service_wow_wakeup_by_timer_pattern = 142, + wmi_tlv_service_peer_map_unmap_v2_support = 143, + wmi_tlv_service_offchan_data_tid_support = 144, + wmi_tlv_service_rx_promisc_enable_support = 145, + wmi_tlv_service_support_direct_dma = 146, + wmi_tlv_service_ap_obss_detection_offload = 147, + wmi_tlv_service_11k_neighbour_report_support = 148, + wmi_tlv_service_listen_interval_offload_support = 149, + wmi_tlv_service_bss_color_offload = 150, + wmi_tlv_service_runtime_dpd_recal = 151, + wmi_tlv_service_sta_twt = 152, + wmi_tlv_service_ap_twt = 153, + wmi_tlv_service_gmac_offload_support = 154, + wmi_tlv_service_spoof_mac_support = 155, + wmi_tlv_service_peer_tid_configs_support = 156, + wmi_tlv_service_vdev_swretry_per_ac_config_support = 157, + wmi_tlv_service_dual_beacon_on_single_mac_scc_support = 158, + wmi_tlv_service_dual_beacon_on_single_mac_mcc_support = 159, + wmi_tlv_service_motion_det = 160, + wmi_tlv_service_infra_mbssid = 161, + wmi_tlv_service_obss_spatial_reuse = 162, + wmi_tlv_service_vdev_different_beacon_interval_support = 163, + wmi_tlv_service_nan_dbs_support = 164, + wmi_tlv_service_ndi_dbs_support = 165, + wmi_tlv_service_nan_sap_support = 166, + wmi_tlv_service_ndi_sap_support = 167, + wmi_tlv_service_cfr_capture_support = 168, + wmi_tlv_service_cfr_capture_ind_msg_type_1 = 169, + wmi_tlv_service_esp_support = 170, + wmi_tlv_service_peer_chwidth_change = 171, + wmi_tlv_service_wlan_hpcs_pulse = 172, + wmi_tlv_service_per_vdev_chainmask_config_support = 173, + wmi_tlv_service_tx_data_mgmt_ack_rssi = 174, + wmi_tlv_service_nan_disable_support = 175, + wmi_tlv_service_htt_h2t_no_htc_hdr_len_in_msg_len = 176, + + wmi_max_ext_service + +}; + +enum { + wmi_smps_forced_mode_none = 0, + wmi_smps_forced_mode_disabled, + wmi_smps_forced_mode_static, + wmi_smps_forced_mode_dynamic +}; + +#define wmi_tpc_chainmask_config_band_2g 0 +#define wmi_tpc_chainmask_config_band_5g 1 +#define wmi_num_supported_band_max 2 + +#define wmi_peer_mimo_ps_state 0x1 +#define wmi_peer_ampdu 0x2 +#define wmi_peer_authorize 0x3 +#define wmi_peer_chwidth 0x4 +#define wmi_peer_nss 0x5 +#define wmi_peer_use_4addr 0x6 +#define wmi_peer_membership 0x7 +#define wmi_peer_userpos 0x8 +#define wmi_peer_crit_proto_hint_enabled 0x9 +#define wmi_peer_tx_fail_cnt_thr 0xa +#define wmi_peer_set_hw_retry_cts2s 0xb +#define wmi_peer_ibss_atim_window_length 0xc +#define wmi_peer_phymode 0xd +#define wmi_peer_use_fixed_pwr 0xe +#define wmi_peer_param_fixed_rate 0xf +#define wmi_peer_set_mu_whitelist 0x10 +#define wmi_peer_set_max_tx_rate 0x11 +#define wmi_peer_set_min_tx_rate 0x12 +#define wmi_peer_set_default_routing 0x13 + +/* slot time long */ +#define wmi_vdev_slot_time_long 0x1 +/* slot time short */ +#define wmi_vdev_slot_time_short 0x2 +/* preablbe long */ +#define wmi_vdev_preamble_long 0x1 +/* preablbe short */ +#define wmi_vdev_preamble_short 0x2 + +enum wmi_peer_smps_state { + wmi_peer_smps_ps_none = 0x0, + wmi_peer_smps_static = 0x1, + wmi_peer_smps_dynamic = 0x2 +}; + +enum wmi_peer_chwidth { + wmi_peer_chwidth_20mhz = 0, + wmi_peer_chwidth_40mhz = 1, + wmi_peer_chwidth_80mhz = 2, + wmi_peer_chwidth_160mhz = 3, +}; + +enum wmi_beacon_gen_mode { + wmi_beacon_staggered_mode = 0, + wmi_beacon_burst_mode = 1 +}; + +struct wmi_host_pdev_band_to_mac { + u32 pdev_id; + u32 start_freq; + u32 end_freq; +}; + +struct ath11k_ppe_threshold { + u32 numss_m1; + u32 ru_bit_mask; + u32 ppet16_ppet8_ru3_ru0[psoc_host_max_num_ss]; +}; + +struct ath11k_service_ext_param { + u32 default_conc_scan_config_bits; + u32 default_fw_config_bits; + struct ath11k_ppe_threshold ppet; + u32 he_cap_info; + u32 mpdu_density; + u32 max_bssid_rx_filters; + u32 num_hw_modes; + u32 num_phy; +}; + +struct ath11k_hw_mode_caps { + u32 hw_mode_id; + u32 phy_id_map; + u32 hw_mode_config_type; +}; + +#define psoc_host_max_phy_size (3) +#define ath11k_11b_support bit(0) +#define ath11k_11g_support bit(1) +#define ath11k_11a_support bit(2) +#define ath11k_11n_support bit(3) +#define ath11k_11ac_support bit(4) +#define ath11k_11ax_support bit(5) + +struct ath11k_hal_reg_capabilities_ext { + u32 phy_id; + u32 eeprom_reg_domain; + u32 eeprom_reg_domain_ext; + u32 regcap1; + u32 regcap2; + u32 wireless_modes; + u32 low_2ghz_chan; + u32 high_2ghz_chan; + u32 low_5ghz_chan; + u32 high_5ghz_chan; +}; + +#define wmi_host_max_pdev 3 + +struct wlan_host_mem_chunk { + u32 tlv_header; + u32 req_id; + u32 ptr; + u32 size; +} __packed; + +struct wmi_host_mem_chunk { + void *vaddr; + dma_addr_t paddr; + u32 len; + u32 req_id; +}; + +struct wmi_init_cmd_param { + u32 tlv_header; + struct target_resource_config *res_cfg; + u8 num_mem_chunks; + struct wmi_host_mem_chunk *mem_chunks; + u32 hw_mode_id; + u32 num_band_to_mac; + struct wmi_host_pdev_band_to_mac band_to_mac[wmi_host_max_pdev]; +}; + +struct wmi_pdev_band_to_mac { + u32 tlv_header; + u32 pdev_id; + u32 start_freq; + u32 end_freq; +} __packed; + +struct wmi_pdev_set_hw_mode_cmd_param { + u32 tlv_header; + u32 pdev_id; + u32 hw_mode_index; + u32 num_band_to_mac; +} __packed; + +struct wmi_ppe_threshold { + u32 numss_m1; /** nss - 1*/ + union { + u32 ru_count; + u32 ru_mask; + } __packed; + u32 ppet16_ppet8_ru3_ru0[wmi_max_num_ss]; +} __packed; + +#define hw_bd_info_size 5 + +struct wmi_abi_version { + u32 abi_version_0; + u32 abi_version_1; + u32 abi_version_ns_0; + u32 abi_version_ns_1; + u32 abi_version_ns_2; + u32 abi_version_ns_3; +} __packed; + +struct wmi_init_cmd { + u32 tlv_header; + struct wmi_abi_version host_abi_vers; + u32 num_host_mem_chunks; +} __packed; + +struct wmi_resource_config { + u32 tlv_header; + u32 num_vdevs; + u32 num_peers; + u32 num_offload_peers; + u32 num_offload_reorder_buffs; + u32 num_peer_keys; + u32 num_tids; + u32 ast_skid_limit; + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 rx_timeout_pri[4]; + u32 rx_decap_mode; + u32 scan_max_pending_req; + u32 bmiss_offload_max_vdev; + u32 roam_offload_max_vdev; + u32 roam_offload_max_ap_profiles; + u32 num_mcast_groups; + u32 num_mcast_table_elems; + u32 mcast2ucast_mode; + u32 tx_dbg_log_size; + u32 num_wds_entries; + u32 dma_burst_size; + u32 mac_aggr_delim; + u32 rx_skip_defrag_timeout_dup_detection_check; + u32 vow_config; + u32 gtk_offload_max_vdev; + u32 num_msdu_desc; + u32 max_frag_entries; + u32 num_tdls_vdevs; + u32 num_tdls_conn_table_entries; + u32 beacon_tx_offload_max_vdev; + u32 num_multicast_filter_entries; + u32 num_wow_filters; + u32 num_keep_alive_pattern; + u32 keep_alive_pattern_size; + u32 max_tdls_concurrent_sleep_sta; + u32 max_tdls_concurrent_buffer_sta; + u32 wmi_send_separate; + u32 num_ocb_vdevs; + u32 num_ocb_channels; + u32 num_ocb_schedules; + u32 flag1; + u32 smart_ant_cap; + u32 bk_minfree; + u32 be_minfree; + u32 vi_minfree; + u32 vo_minfree; + u32 alloc_frag_desc_for_data_pkt; + u32 num_ns_ext_tuples_cfg; + u32 bpf_instruction_size; + u32 max_bssid_rx_filters; + u32 use_pdev_id; + u32 max_num_dbs_scan_duty_cycle; + u32 max_num_group_keys; + u32 peer_map_unmap_v2_support; +} __packed; + +struct wmi_service_ready_event { + u32 fw_build_vers; + struct wmi_abi_version fw_abi_vers; + u32 phy_capability; + u32 max_frag_entry; + u32 num_rf_chains; + u32 ht_cap_info; + u32 vht_cap_info; + u32 vht_supp_mcs; + u32 hw_min_tx_power; + u32 hw_max_tx_power; + u32 sys_cap_info; + u32 min_pkt_size_enable; + u32 max_bcn_ie_size; + u32 num_mem_reqs; + u32 max_num_scan_channels; + u32 hw_bd_id; + u32 hw_bd_info[hw_bd_info_size]; + u32 max_supported_macs; + u32 wmi_fw_sub_feat_caps; + u32 num_dbs_hw_modes; + /* txrx_chainmask + * [7:0] - 2g band tx chain mask + * [15:8] - 2g band rx chain mask + * [23:16] - 5g band tx chain mask + * [31:24] - 5g band rx chain mask + */ + u32 txrx_chainmask; + u32 default_dbs_hw_mode_index; + u32 num_msdu_desc; +} __packed; + +#define wmi_service_bm_size ((wmi_max_service + sizeof(u32) - 1) / sizeof(u32)) + +#define wmi_service_segment_bm_size32 4 /* 4x u32 = 128 bits */ +#define wmi_service_ext_bm_size (wmi_service_segment_bm_size32 * sizeof(u32)) +#define wmi_avail_service_bits_in_size32 32 +#define wmi_service_bits_in_size32 4 + +struct wmi_service_ready_ext_event { + u32 default_conc_scan_config_bits; + u32 default_fw_config_bits; + struct wmi_ppe_threshold ppet; + u32 he_cap_info; + u32 mpdu_density; + u32 max_bssid_rx_filters; + u32 fw_build_vers_ext; + u32 max_nlo_ssids; + u32 max_bssid_indicator; + u32 he_cap_info_ext; +} __packed; + +struct wmi_soc_mac_phy_hw_mode_caps { + u32 num_hw_modes; + u32 num_chainmask_tables; +} __packed; + +struct wmi_hw_mode_capabilities { + u32 tlv_header; + u32 hw_mode_id; + u32 phy_id_map; + u32 hw_mode_config_type; +} __packed; + +#define wmi_max_hecap_phy_size (3) + +struct wmi_mac_phy_capabilities { + u32 tlv_header; + u32 hw_mode_id; + u32 pdev_id; + u32 phy_id; + u32 supported_flags; + u32 supported_bands; + u32 ampdu_density; + u32 max_bw_supported_2g; + u32 ht_cap_info_2g; + u32 vht_cap_info_2g; + u32 vht_supp_mcs_2g; + u32 he_cap_info_2g; + u32 he_supp_mcs_2g; + u32 tx_chain_mask_2g; + u32 rx_chain_mask_2g; + u32 max_bw_supported_5g; + u32 ht_cap_info_5g; + u32 vht_cap_info_5g; + u32 vht_supp_mcs_5g; + u32 he_cap_info_5g; + u32 he_supp_mcs_5g; + u32 tx_chain_mask_5g; + u32 rx_chain_mask_5g; + u32 he_cap_phy_info_2g[wmi_max_hecap_phy_size]; + u32 he_cap_phy_info_5g[wmi_max_hecap_phy_size]; + struct wmi_ppe_threshold he_ppet2g; + struct wmi_ppe_threshold he_ppet5g; + u32 chainmask_table_id; + u32 lmac_id; + u32 he_cap_info_2g_ext; + u32 he_cap_info_5g_ext; + u32 he_cap_info_internal; +} __packed; + +struct wmi_hal_reg_capabilities_ext { + u32 tlv_header; + u32 phy_id; + u32 eeprom_reg_domain; + u32 eeprom_reg_domain_ext; + u32 regcap1; + u32 regcap2; + u32 wireless_modes; + u32 low_2ghz_chan; + u32 high_2ghz_chan; + u32 low_5ghz_chan; + u32 high_5ghz_chan; +} __packed; + +struct wmi_soc_hal_reg_capabilities { + u32 num_phy; +} __packed; + +/* 2 word representation of mac addr */ +struct wmi_mac_addr { + union { + u8 addr[6]; + struct { + u32 word0; + u32 word1; + } __packed; + } __packed; +} __packed; + +struct wmi_ready_event { + struct wmi_abi_version fw_abi_vers; + struct wmi_mac_addr mac_addr; + u32 status; + u32 num_dscp_table; + u32 num_extra_mac_addr; + u32 num_total_peers; + u32 num_extra_peers; +} __packed; + +struct wmi_service_available_event { + u32 wmi_service_segment_offset; + u32 wmi_service_segment_bitmap[wmi_service_segment_bm_size32]; +} __packed; + +struct ath11k_pdev_wmi { + struct ath11k_wmi_base *wmi_sc; + enum ath11k_htc_ep_id eid; + const struct wmi_peer_flags_map *peer_flags; + u32 rx_decap_mode; +}; + +struct vdev_create_params { + u8 if_id; + u32 type; + u32 subtype; + struct { + u8 tx; + u8 rx; + } chains[num_nl80211_bands]; + u32 pdev_id; +}; + +struct wmi_vdev_create_cmd { + u32 tlv_header; + u32 vdev_id; + u32 vdev_type; + u32 vdev_subtype; + struct wmi_mac_addr vdev_macaddr; + u32 num_cfg_txrx_streams; + u32 pdev_id; +} __packed; + +struct wmi_vdev_txrx_streams { + u32 tlv_header; + u32 band; + u32 supported_tx_streams; + u32 supported_rx_streams; +} __packed; + +struct wmi_vdev_delete_cmd { + u32 tlv_header; + u32 vdev_id; +} __packed; + +struct wmi_vdev_up_cmd { + u32 tlv_header; + u32 vdev_id; + u32 vdev_assoc_id; + struct wmi_mac_addr vdev_bssid; + struct wmi_mac_addr trans_bssid; + u32 profile_idx; + u32 profile_num; +} __packed; + +struct wmi_vdev_stop_cmd { + u32 tlv_header; + u32 vdev_id; +} __packed; + +struct wmi_vdev_down_cmd { + u32 tlv_header; + u32 vdev_id; +} __packed; + +#define wmi_vdev_start_hidden_ssid bit(0) +#define wmi_vdev_start_pmf_enabled bit(1) +#define wmi_vdev_start_ldpc_rx_enabled bit(3) + +struct wmi_ssid { + u32 ssid_len; + u32 ssid[8]; +} __packed; + +#define ath11k_vdev_setup_timeout_hz (1 * hz) + +struct wmi_vdev_start_request_cmd { + u32 tlv_header; + u32 vdev_id; + u32 requestor_id; + u32 beacon_interval; + u32 dtim_period; + u32 flags; + struct wmi_ssid ssid; + u32 bcn_tx_rate; + u32 bcn_txpower; + u32 num_noa_descriptors; + u32 disable_hw_ack; + u32 preferred_tx_streams; + u32 preferred_rx_streams; + u32 he_ops; + u32 cac_duration_ms; + u32 regdomain; +} __packed; + +#define mgmt_tx_dl_frm_len 64 +#define wmi_mac_max_ssid_length 32 +struct mac_ssid { + u8 length; + u8 mac_ssid[wmi_mac_max_ssid_length]; +} __packed; + +struct wmi_p2p_noa_descriptor { + u32 type_count; + u32 duration; + u32 interval; + u32 start_time; +}; + +struct channel_param { + u8 chan_id; + u8 pwr; + u32 mhz; + u32 half_rate:1, + quarter_rate:1, + dfs_set:1, + dfs_set_cfreq2:1, + is_chan_passive:1, + allow_ht:1, + allow_vht:1, + set_agile:1; + u32 phy_mode; + u32 cfreq1; + u32 cfreq2; + char maxpower; + char minpower; + char maxregpower; + u8 antennamax; + u8 reg_class_id; +} __packed; + +enum wmi_phy_mode { + mode_11a = 0, + mode_11g = 1, /* 11b/g mode */ + mode_11b = 2, /* 11b mode */ + mode_11gonly = 3, /* 11g only mode */ + mode_11na_ht20 = 4, + mode_11ng_ht20 = 5, + mode_11na_ht40 = 6, + mode_11ng_ht40 = 7, + mode_11ac_vht20 = 8, + mode_11ac_vht40 = 9, + mode_11ac_vht80 = 10, + mode_11ac_vht20_2g = 11, + mode_11ac_vht40_2g = 12, + mode_11ac_vht80_2g = 13, + mode_11ac_vht80_80 = 14, + mode_11ac_vht160 = 15, + mode_11ax_he20 = 16, + mode_11ax_he40 = 17, + mode_11ax_he80 = 18, + mode_11ax_he80_80 = 19, + mode_11ax_he160 = 20, + mode_11ax_he20_2g = 21, + mode_11ax_he40_2g = 22, + mode_11ax_he80_2g = 23, + mode_unknown = 24, + mode_max = 24 +}; + +static inline const char *ath11k_wmi_phymode_str(enum wmi_phy_mode mode) +{ + switch (mode) { + case mode_11a: + return "11a"; + case mode_11g: + return "11g"; + case mode_11b: + return "11b"; + case mode_11gonly: + return "11gonly"; + case mode_11na_ht20: + return "11na-ht20"; + case mode_11ng_ht20: + return "11ng-ht20"; + case mode_11na_ht40: + return "11na-ht40"; + case mode_11ng_ht40: + return "11ng-ht40"; + case mode_11ac_vht20: + return "11ac-vht20"; + case mode_11ac_vht40: + return "11ac-vht40"; + case mode_11ac_vht80: + return "11ac-vht80"; + case mode_11ac_vht160: + return "11ac-vht160"; + case mode_11ac_vht80_80: + return "11ac-vht80+80"; + case mode_11ac_vht20_2g: + return "11ac-vht20-2g"; + case mode_11ac_vht40_2g: + return "11ac-vht40-2g"; + case mode_11ac_vht80_2g: + return "11ac-vht80-2g"; + case mode_11ax_he20: + return "11ax-he20"; + case mode_11ax_he40: + return "11ax-he40"; + case mode_11ax_he80: + return "11ax-he80"; + case mode_11ax_he80_80: + return "11ax-he80+80"; + case mode_11ax_he160: + return "11ax-he160"; + case mode_11ax_he20_2g: + return "11ax-he20-2g"; + case mode_11ax_he40_2g: + return "11ax-he40-2g"; + case mode_11ax_he80_2g: + return "11ax-he80-2g"; + case mode_unknown: + /* skip */ + break; + + /* no default handler to allow compiler to check that the + * enum is fully handled + */ + }; + + return "<unknown>"; +} + +struct wmi_channel_arg { + u32 freq; + u32 band_center_freq1; + u32 band_center_freq2; + bool passive; + bool allow_ibss; + bool allow_ht; + bool allow_vht; + bool ht40plus; + bool chan_radar; + bool freq2_radar; + bool allow_he; + u32 min_power; + u32 max_power; + u32 max_reg_power; + u32 max_antenna_gain; + enum wmi_phy_mode mode; +}; + +struct wmi_vdev_start_req_arg { + u32 vdev_id; + struct wmi_channel_arg channel; + u32 bcn_intval; + u32 dtim_period; + u8 *ssid; + u32 ssid_len; + u32 bcn_tx_rate; + u32 bcn_tx_power; + bool disable_hw_ack; + bool hidden_ssid; + bool pmf_enabled; + u32 he_ops; + u32 cac_duration_ms; + u32 regdomain; + u32 pref_rx_streams; + u32 pref_tx_streams; + u32 num_noa_descriptors; +}; + +struct peer_create_params { + const u8 *peer_addr; + u32 peer_type; + u32 vdev_id; +}; + +struct peer_delete_params { + u8 vdev_id; +}; + +struct peer_flush_params { + u32 peer_tid_bitmap; + u8 vdev_id; +}; + +struct pdev_set_regdomain_params { + u16 current_rd_in_use; + u16 current_rd_2g; + u16 current_rd_5g; + u32 ctl_2g; + u32 ctl_5g; + u8 dfs_domain; + u32 pdev_id; +}; + +struct rx_reorder_queue_remove_params { + u8 *peer_macaddr; + u16 vdev_id; + u32 peer_tid_bitmap; +}; + +#define wmi_host_pdev_id_soc 0xff +#define wmi_host_pdev_id_0 0 +#define wmi_host_pdev_id_1 1 +#define wmi_host_pdev_id_2 2 + +#define wmi_pdev_id_soc 0 +#define wmi_pdev_id_1st 1 +#define wmi_pdev_id_2nd 2 +#define wmi_pdev_id_3rd 3 + +/* freq units in mhz */ +#define reg_rule_start_freq 0x0000ffff +#define reg_rule_end_freq 0xffff0000 +#define reg_rule_flags 0x0000ffff +#define reg_rule_max_bw 0x0000ffff +#define reg_rule_reg_pwr 0x00ff0000 +#define reg_rule_ant_gain 0xff000000 + +#define wmi_vdev_param_txbf_su_tx_bfee bit(0) +#define wmi_vdev_param_txbf_mu_tx_bfee bit(1) +#define wmi_vdev_param_txbf_su_tx_bfer bit(2) +#define wmi_vdev_param_txbf_mu_tx_bfer bit(3) + +#define hecap_phydword_0 0 +#define hecap_phydword_1 1 +#define hecap_phydword_2 2 + +#define hecap_phy_su_bfer bit(31) +#define hecap_phy_su_bfee bit(0) +#define hecap_phy_mu_bfer bit(1) +#define hecap_phy_ul_mumimo bit(22) +#define hecap_phy_ul_muofdma bit(23) + +#define hecap_phy_subfmr_get(hecap_phy) \ + field_get(hecap_phy_su_bfer, hecap_phy[hecap_phydword_0]) + +#define hecap_phy_subfme_get(hecap_phy) \ + field_get(hecap_phy_su_bfee, hecap_phy[hecap_phydword_1]) + +#define hecap_phy_mubfmr_get(hecap_phy) \ + field_get(hecap_phy_mu_bfer, hecap_phy[hecap_phydword_1]) + +#define hecap_phy_ulmumimo_get(hecap_phy) \ + field_get(hecap_phy_ul_mumimo, hecap_phy[hecap_phydword_0]) + +#define hecap_phy_ulofdma_get(hecap_phy) \ + field_get(hecap_phy_ul_muofdma, hecap_phy[hecap_phydword_0]) + +#define he_mode_su_tx_bfee bit(0) +#define he_mode_su_tx_bfer bit(1) +#define he_mode_mu_tx_bfee bit(2) +#define he_mode_mu_tx_bfer bit(3) +#define he_mode_dl_ofdma bit(4) +#define he_mode_ul_ofdma bit(5) +#define he_mode_ul_mumimo bit(6) + +#define he_dl_muofdma_enable 1 +#define he_ul_muofdma_enable 1 +#define he_dl_mumimo_enable 1 +#define he_mu_bfee_enable 1 +#define he_su_bfee_enable 1 + +#define he_vht_sounding_mode_enable 1 +#define he_su_mu_sounding_mode_enable 1 +#define he_trig_nontrig_sounding_mode_enable 1 + +/* he or vht sounding */ +#define he_vht_sounding_mode bit(0) +/* su or mu sounding */ +#define he_su_mu_sounding_mode bit(2) +/* trig or non-trig sounding */ +#define he_trig_nontrig_sounding_mode bit(3) + +#define wmi_txbf_sts_cap_offset_lsb 4 +#define wmi_txbf_sts_cap_offset_mask 0x70 +#define wmi_bf_sound_dim_offset_lsb 8 +#define wmi_bf_sound_dim_offset_mask 0x700 + +struct pdev_params { + u32 param_id; + u32 param_value; +}; + +enum wmi_peer_type { + wmi_peer_type_default = 0, + wmi_peer_type_bss = 1, + wmi_peer_type_tdls = 2, +}; + +struct wmi_peer_create_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 peer_type; +} __packed; + +struct wmi_peer_delete_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; +} __packed; + +struct wmi_peer_reorder_queue_setup_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 tid; + u32 queue_ptr_lo; + u32 queue_ptr_hi; + u32 queue_no; + u32 ba_window_size_valid; + u32 ba_window_size; +} __packed; + +struct wmi_peer_reorder_queue_remove_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 tid_mask; +} __packed; + +struct gpio_config_params { + u32 gpio_num; + u32 input; + u32 pull_type; + u32 intr_mode; +}; + +enum wmi_gpio_type { + wmi_gpio_pull_none, + wmi_gpio_pull_up, + wmi_gpio_pull_down +}; + +enum wmi_gpio_intr_type { + wmi_gpio_inttype_disable, + wmi_gpio_inttype_rising_edge, + wmi_gpio_inttype_falling_edge, + wmi_gpio_inttype_both_edge, + wmi_gpio_inttype_level_low, + wmi_gpio_inttype_level_high +}; + +enum wmi_bss_chan_info_req_type { + wmi_bss_survey_req_type_read = 1, + wmi_bss_survey_req_type_read_clear, +}; + +struct wmi_gpio_config_cmd_param { + u32 tlv_header; + u32 gpio_num; + u32 input; + u32 pull_type; + u32 intr_mode; +}; + +struct gpio_output_params { + u32 gpio_num; + u32 set; +}; + +struct wmi_gpio_output_cmd_param { + u32 tlv_header; + u32 gpio_num; + u32 set; +}; + +struct set_fwtest_params { + u32 arg; + u32 value; +}; + +struct wmi_fwtest_set_param_cmd_param { + u32 tlv_header; + u32 param_id; + u32 param_value; +}; + +struct wmi_pdev_set_param_cmd { + u32 tlv_header; + u32 pdev_id; + u32 param_id; + u32 param_value; +} __packed; + +struct wmi_pdev_suspend_cmd { + u32 tlv_header; + u32 pdev_id; + u32 suspend_opt; +} __packed; + +struct wmi_pdev_resume_cmd { + u32 tlv_header; + u32 pdev_id; +} __packed; + +struct wmi_pdev_bss_chan_info_req_cmd { + u32 tlv_header; + /* ref wmi_bss_chan_info_req_type */ + u32 req_type; +} __packed; + +struct wmi_ap_ps_peer_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 param; + u32 value; +} __packed; + +struct wmi_sta_powersave_param_cmd { + u32 tlv_header; + u32 vdev_id; + u32 param; + u32 value; +} __packed; + +struct wmi_pdev_set_regdomain_cmd { + u32 tlv_header; + u32 pdev_id; + u32 reg_domain; + u32 reg_domain_2g; + u32 reg_domain_5g; + u32 conformance_test_limit_2g; + u32 conformance_test_limit_5g; + u32 dfs_domain; +} __packed; + +struct wmi_peer_set_param_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 param_id; + u32 param_value; +} __packed; + +struct wmi_peer_flush_tids_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 peer_tid_bitmap; +} __packed; + +struct wmi_dfs_phyerr_offload_cmd { + u32 tlv_header; + u32 pdev_id; +} __packed; + +struct wmi_bcn_offload_ctrl_cmd { + u32 tlv_header; + u32 vdev_id; + u32 bcn_ctrl_op; +} __packed; + +enum scan_priority { + scan_priority_very_low, + scan_priority_low, + scan_priority_medium, + scan_priority_high, + scan_priority_very_high, + scan_priority_count, +}; + +enum scan_dwelltime_adaptive_mode { + scan_dwell_mode_default = 0, + scan_dwell_mode_conservative = 1, + scan_dwell_mode_moderate = 2, + scan_dwell_mode_aggressive = 3, + scan_dwell_mode_static = 4 +}; + +#define wlan_scan_max_num_ssid 10 +#define wlan_scan_max_num_bssid 10 +#define wlan_scan_max_num_channels 40 + +#define wlan_ssid_max_len 32 + +struct element_info { + u32 len; + u8 *ptr; +}; + +struct wlan_ssid { + u8 length; + u8 ssid[wlan_ssid_max_len]; +}; + +#define wmi_ie_bitmap_size 8 + +#define wmi_scan_max_num_ssid 0x0a +/* prefix used by scan requestor ids on the host */ +#define wmi_host_scan_requestor_id_prefix 0xa000 + +/* prefix used by scan request ids generated on the host */ +/* host cycles through the lower 12 bits to generate ids */ +#define wmi_host_scan_req_id_prefix 0xa000 + +#define wlan_scan_params_max_ssid 16 +#define wlan_scan_params_max_bssid 4 +#define wlan_scan_params_max_ie_len 256 + +/* values lower than this may be refused by some firmware revisions with a scan + * completion with a timedout reason. + */ +#define wmi_scan_chan_min_time_msec 40 + +/* scan priority numbers must be sequential, starting with 0 */ +enum wmi_scan_priority { + wmi_scan_priority_very_low = 0, + wmi_scan_priority_low, + wmi_scan_priority_medium, + wmi_scan_priority_high, + wmi_scan_priority_very_high, + wmi_scan_priority_count /* number of priorities supported */ +}; + +enum wmi_scan_event_type { + wmi_scan_event_started = bit(0), + wmi_scan_event_completed = bit(1), + wmi_scan_event_bss_channel = bit(2), + wmi_scan_event_foreign_chan = bit(3), + wmi_scan_event_dequeued = bit(4), + /* possibly by high-prio scan */ + wmi_scan_event_preempted = bit(5), + wmi_scan_event_start_failed = bit(6), + wmi_scan_event_restarted = bit(7), + wmi_scan_event_foreign_chan_exit = bit(8), + wmi_scan_event_suspended = bit(9), + wmi_scan_event_resumed = bit(10), + wmi_scan_event_max = bit(15), +}; + +enum wmi_scan_completion_reason { + wmi_scan_reason_completed, + wmi_scan_reason_cancelled, + wmi_scan_reason_preempted, + wmi_scan_reason_timedout, + wmi_scan_reason_internal_failure, + wmi_scan_reason_max, +}; + +struct wmi_start_scan_cmd { + u32 tlv_header; + u32 scan_id; + u32 scan_req_id; + u32 vdev_id; + u32 scan_priority; + u32 notify_scan_events; + u32 dwell_time_active; + u32 dwell_time_passive; + u32 min_rest_time; + u32 max_rest_time; + u32 repeat_probe_time; + u32 probe_spacing_time; + u32 idle_time; + u32 max_scan_time; + u32 probe_delay; + u32 scan_ctrl_flags; + u32 burst_duration; + u32 num_chan; + u32 num_bssid; + u32 num_ssids; + u32 ie_len; + u32 n_probes; + struct wmi_mac_addr mac_addr; + struct wmi_mac_addr mac_mask; + u32 ie_bitmap[wmi_ie_bitmap_size]; + u32 num_vendor_oui; + u32 scan_ctrl_flags_ext; + u32 dwell_time_active_2g; +} __packed; + +#define wmi_scan_flag_passive 0x1 +#define wmi_scan_add_bcast_probe_req 0x2 +#define wmi_scan_add_cck_rates 0x4 +#define wmi_scan_add_ofdm_rates 0x8 +#define wmi_scan_chan_stat_event 0x10 +#define wmi_scan_filter_probe_req 0x20 +#define wmi_scan_bypass_dfs_chn 0x40 +#define wmi_scan_continue_on_error 0x80 +#define wmi_scan_filter_promiscuos 0x100 +#define wmi_scan_flag_force_active_on_dfs 0x200 +#define wmi_scan_add_tpc_ie_in_probe_req 0x400 +#define wmi_scan_add_ds_ie_in_probe_req 0x800 +#define wmi_scan_add_spoof_mac_in_probe_req 0x1000 +#define wmi_scan_offchan_mgmt_tx 0x2000 +#define wmi_scan_offchan_data_tx 0x4000 +#define wmi_scan_capture_phy_error 0x8000 +#define wmi_scan_flag_strict_passive_on_pchn 0x10000 +#define wmi_scan_flag_half_rate_support 0x20000 +#define wmi_scan_flag_quarter_rate_support 0x40000 +#define wmi_scan_random_seq_no_in_probe_req 0x80000 +#define wmi_scan_enable_ie_whtelist_in_probe_req 0x100000 + +#define wmi_scan_dwell_mode_mask 0x00e00000 +#define wmi_scan_dwell_mode_shift 21 + +enum { + wmi_scan_dwell_mode_default = 0, + wmi_scan_dwell_mode_conservative = 1, + wmi_scan_dwell_mode_moderate = 2, + wmi_scan_dwell_mode_aggressive = 3, + wmi_scan_dwell_mode_static = 4, +}; + +#define wmi_scan_set_dwell_mode(flag, mode) \ + ((flag) |= (((mode) << wmi_scan_dwell_mode_shift) & \ + wmi_scan_dwell_mode_mask)) + +struct scan_req_params { + u32 scan_id; + u32 scan_req_id; + u32 vdev_id; + u32 pdev_id; + enum scan_priority scan_priority; + union { + struct { + u32 scan_ev_started:1, + scan_ev_completed:1, + scan_ev_bss_chan:1, + scan_ev_foreign_chan:1, + scan_ev_dequeued:1, + scan_ev_preempted:1, + scan_ev_start_failed:1, + scan_ev_restarted:1, + scan_ev_foreign_chn_exit:1, + scan_ev_invalid:1, + scan_ev_gpio_timeout:1, + scan_ev_suspended:1, + scan_ev_resumed:1; + }; + u32 scan_events; + }; + u32 dwell_time_active; + u32 dwell_time_active_2g; + u32 dwell_time_passive; + u32 min_rest_time; + u32 max_rest_time; + u32 repeat_probe_time; + u32 probe_spacing_time; + u32 idle_time; + u32 max_scan_time; + u32 probe_delay; + union { + struct { + u32 scan_f_passive:1, + scan_f_bcast_probe:1, + scan_f_cck_rates:1, + scan_f_ofdm_rates:1, + scan_f_chan_stat_evnt:1, + scan_f_filter_prb_req:1, + scan_f_bypass_dfs_chn:1, + scan_f_continue_on_err:1, + scan_f_offchan_mgmt_tx:1, + scan_f_offchan_data_tx:1, + scan_f_promisc_mode:1, + scan_f_capture_phy_err:1, + scan_f_strict_passive_pch:1, + scan_f_half_rate:1, + scan_f_quarter_rate:1, + scan_f_force_active_dfs_chn:1, + scan_f_add_tpc_ie_in_probe:1, + scan_f_add_ds_ie_in_probe:1, + scan_f_add_spoofed_mac_in_probe:1, + scan_f_add_rand_seq_in_probe:1, + scan_f_en_ie_whitelist_in_probe:1, + scan_f_forced:1, + scan_f_2ghz:1, + scan_f_5ghz:1, + scan_f_80mhz:1; + }; + u32 scan_flags; + }; + enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode; + u32 burst_duration; + u32 num_chan; + u32 num_bssid; + u32 num_ssids; + u32 n_probes; + u32 chan_list[wlan_scan_max_num_channels]; + u32 notify_scan_events; + struct wlan_ssid ssid[wlan_scan_max_num_ssid]; + struct wmi_mac_addr bssid_list[wlan_scan_max_num_bssid]; + struct element_info extraie; + struct element_info htcap; + struct element_info vhtcap; +}; + +struct wmi_ssid_arg { + int len; + const u8 *ssid; +}; + +struct wmi_bssid_arg { + const u8 *bssid; +}; + +struct wmi_start_scan_arg { + u32 scan_id; + u32 scan_req_id; + u32 vdev_id; + u32 scan_priority; + u32 notify_scan_events; + u32 dwell_time_active; + u32 dwell_time_passive; + u32 min_rest_time; + u32 max_rest_time; + u32 repeat_probe_time; + u32 probe_spacing_time; + u32 idle_time; + u32 max_scan_time; + u32 probe_delay; + u32 scan_ctrl_flags; + + u32 ie_len; + u32 n_channels; + u32 n_ssids; + u32 n_bssids; + + u8 ie[wlan_scan_params_max_ie_len]; + u32 channels[64]; + struct wmi_ssid_arg ssids[wlan_scan_params_max_ssid]; + struct wmi_bssid_arg bssids[wlan_scan_params_max_bssid]; +}; + +#define wmi_scan_stop_one 0x00000000 +#define wmi_scn_stop_vap_all 0x01000000 +#define wmi_scan_stop_all 0x04000000 + +/* prefix 0xa000 indicates that the scan request + * is trigger by host + */ +#define ath11k_scan_id 0xa000 + +enum scan_cancel_req_type { + wlan_scan_cancel_single = 1, + wlan_scan_cancel_vdev_all, + wlan_scan_cancel_pdev_all, +}; + +struct scan_cancel_param { + u32 requester; + u32 scan_id; + enum scan_cancel_req_type req_type; + u32 vdev_id; + u32 pdev_id; +}; + +struct wmi_bcn_send_from_host_cmd { + u32 tlv_header; + u32 vdev_id; + u32 data_len; + union { + u32 frag_ptr; + u32 frag_ptr_lo; + }; + u32 frame_ctrl; + u32 dtim_flag; + u32 bcn_antenna; + u32 frag_ptr_hi; +}; + +#define wmi_chan_info_mode genmask(5, 0) +#define wmi_chan_info_ht40_plus bit(6) +#define wmi_chan_info_passive bit(7) +#define wmi_chan_info_adhoc_allowed bit(8) +#define wmi_chan_info_ap_disabled bit(9) +#define wmi_chan_info_dfs bit(10) +#define wmi_chan_info_allow_ht bit(11) +#define wmi_chan_info_allow_vht bit(12) +#define wmi_chan_info_chan_change_cause_csa bit(13) +#define wmi_chan_info_half_rate bit(14) +#define wmi_chan_info_quarter_rate bit(15) +#define wmi_chan_info_dfs_freq2 bit(16) +#define wmi_chan_info_allow_he bit(17) + +#define wmi_chan_reg_info1_min_pwr genmask(7, 0) +#define wmi_chan_reg_info1_max_pwr genmask(15, 8) +#define wmi_chan_reg_info1_max_reg_pwr genmask(23, 16) +#define wmi_chan_reg_info1_reg_cls genmask(31, 24) + +#define wmi_chan_reg_info2_ant_max genmask(7, 0) +#define wmi_chan_reg_info2_max_tx_pwr genmask(15, 8) + +struct wmi_channel { + u32 tlv_header; + u32 mhz; + u32 band_center_freq1; + u32 band_center_freq2; + u32 info; + u32 reg_info_1; + u32 reg_info_2; +} __packed; + +struct wmi_mgmt_params { + void *tx_frame; + u16 frm_len; + u8 vdev_id; + u16 chanfreq; + void *pdata; + u16 desc_id; + u8 *macaddr; + void *qdf_ctx; +}; + +enum wmi_sta_ps_mode { + wmi_sta_ps_mode_disabled = 0, + wmi_sta_ps_mode_enabled = 1, +}; + +#define wmi_smps_mask_lower_16bits 0xff +#define wmi_smps_mask_upper_3bits 0x7 +#define wmi_smps_param_value_shift 29 + +#define ath11k_wmi_fw_hang_assert_type 1 +#define ath11k_wmi_fw_hang_delay 0 + +/* type, 0:unused 1: assert 2: not respond detect command + * delay_time_ms, the simulate will delay time + */ + +struct wmi_force_fw_hang_cmd { + u32 tlv_header; + u32 type; + u32 delay_time_ms; +}; + +struct wmi_vdev_set_param_cmd { + u32 tlv_header; + u32 vdev_id; + u32 param_id; + u32 param_value; +} __packed; + +enum wmi_stats_id { + wmi_request_peer_stat = bit(0), + wmi_request_ap_stat = bit(1), + wmi_request_pdev_stat = bit(2), + wmi_request_vdev_stat = bit(3), + wmi_request_bcnflt_stat = bit(4), + wmi_request_vdev_rate_stat = bit(5), + wmi_request_inst_stat = bit(6), + wmi_request_mib_stat = bit(7), + wmi_request_rssi_per_chain_stat = bit(8), + wmi_request_congestion_stat = bit(9), + wmi_request_peer_extd_stat = bit(10), + wmi_request_bcn_stat = bit(11), + wmi_request_bcn_stat_reset = bit(12), + wmi_request_peer_extd2_stat = bit(13), +}; + +struct wmi_request_stats_cmd { + u32 tlv_header; + enum wmi_stats_id stats_id; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 pdev_id; +} __packed; + +#define wmi_beacon_tx_buffer_size 512 + +struct wmi_bcn_tmpl_cmd { + u32 tlv_header; + u32 vdev_id; + u32 tim_ie_offset; + u32 buf_len; + u32 csa_switch_count_offset; + u32 ext_csa_switch_count_offset; + u32 csa_event_bitmap; + u32 mbssid_ie_offset; + u32 esp_ie_offset; +} __packed; + +struct wmi_key_seq_counter { + u32 key_seq_counter_l; + u32 key_seq_counter_h; +} __packed; + +struct wmi_vdev_install_key_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 key_idx; + u32 key_flags; + u32 key_cipher; + struct wmi_key_seq_counter key_rsc_counter; + struct wmi_key_seq_counter key_global_rsc_counter; + struct wmi_key_seq_counter key_tsc_counter; + u8 wpi_key_rsc_counter[16]; + u8 wpi_key_tsc_counter[16]; + u32 key_len; + u32 key_txmic_len; + u32 key_rxmic_len; + u32 is_group_key_id_valid; + u32 group_key_id; + + /* followed by key_data containing key followed by + * tx mic and then rx mic + */ +} __packed; + +struct wmi_vdev_install_key_arg { + u32 vdev_id; + const u8 *macaddr; + u32 key_idx; + u32 key_flags; + u32 key_cipher; + u32 key_len; + u32 key_txmic_len; + u32 key_rxmic_len; + u64 key_rsc_counter; + const void *key_data; +}; + +#define wmi_max_supported_rates 128 +#define wmi_host_max_hecap_phy_size 3 +#define wmi_host_max_he_rate_set 1 + +struct wmi_rate_set_arg { + u32 num_rates; + u8 rates[wmi_max_supported_rates]; +}; + +struct peer_assoc_params { + struct wmi_mac_addr peer_macaddr; + u32 vdev_id; + u32 peer_new_assoc; + u32 peer_associd; + u32 peer_flags; + u32 peer_caps; + u32 peer_listen_intval; + u32 peer_ht_caps; + u32 peer_max_mpdu; + u32 peer_mpdu_density; + u32 peer_rate_caps; + u32 peer_nss; + u32 peer_vht_caps; + u32 peer_phymode; + u32 peer_ht_info[2]; + struct wmi_rate_set_arg peer_legacy_rates; + struct wmi_rate_set_arg peer_ht_rates; + u32 rx_max_rate; + u32 rx_mcs_set; + u32 tx_max_rate; + u32 tx_mcs_set; + u8 vht_capable; + u32 tx_max_mcs_nss; + u32 peer_bw_rxnss_override; + bool is_pmf_enabled; + bool is_wme_set; + bool qos_flag; + bool apsd_flag; + bool ht_flag; + bool bw_40; + bool bw_80; + bool bw_160; + bool stbc_flag; + bool ldpc_flag; + bool static_mimops_flag; + bool dynamic_mimops_flag; + bool spatial_mux_flag; + bool vht_flag; + bool vht_ng_flag; + bool need_ptk_4_way; + bool need_gtk_2_way; + bool auth_flag; + bool safe_mode_enabled; + bool amsdu_disable; + /* use common structure */ + u8 peer_mac[eth_alen]; + + bool he_flag; + u32 peer_he_cap_macinfo[2]; + u32 peer_he_cap_macinfo_internal; + u32 peer_he_ops; + u32 peer_he_cap_phyinfo[wmi_host_max_hecap_phy_size]; + u32 peer_he_mcs_count; + u32 peer_he_rx_mcs_set[wmi_host_max_he_rate_set]; + u32 peer_he_tx_mcs_set[wmi_host_max_he_rate_set]; + struct ath11k_ppe_threshold peer_ppet; +}; + +struct wmi_peer_assoc_complete_cmd { + u32 tlv_header; + struct wmi_mac_addr peer_macaddr; + u32 vdev_id; + u32 peer_new_assoc; + u32 peer_associd; + u32 peer_flags; + u32 peer_caps; + u32 peer_listen_intval; + u32 peer_ht_caps; + u32 peer_max_mpdu; + u32 peer_mpdu_density; + u32 peer_rate_caps; + u32 peer_nss; + u32 peer_vht_caps; + u32 peer_phymode; + u32 peer_ht_info[2]; + u32 num_peer_legacy_rates; + u32 num_peer_ht_rates; + u32 peer_bw_rxnss_override; + struct wmi_ppe_threshold peer_ppet; + u32 peer_he_cap_info; + u32 peer_he_ops; + u32 peer_he_cap_phy[wmi_max_hecap_phy_size]; + u32 peer_he_mcs; + u32 peer_he_cap_info_ext; + u32 peer_he_cap_info_internal; +} __packed; + +struct wmi_stop_scan_cmd { + u32 tlv_header; + u32 requestor; + u32 scan_id; + u32 req_type; + u32 vdev_id; + u32 pdev_id; +}; + +struct scan_chan_list_params { + u32 pdev_id; + u16 nallchans; + struct channel_param ch_param[1]; +}; + +struct wmi_scan_chan_list_cmd { + u32 tlv_header; + u32 num_scan_chans; + u32 flags; + u32 pdev_id; +} __packed; + +#define wmi_mgmt_send_downld_len 64 + +#define wmi_tx_params_dword0_power genmask(7, 0) +#define wmi_tx_params_dword0_mcs_mask genmask(19, 8) +#define wmi_tx_params_dword0_nss_mask genmask(27, 20) +#define wmi_tx_params_dword0_retry_limit genmask(31, 28) + +#define wmi_tx_params_dword1_chain_mask genmask(7, 0) +#define wmi_tx_params_dword1_bw_mask genmask(14, 8) +#define wmi_tx_params_dword1_preamble_type genmask(19, 15) +#define wmi_tx_params_dword1_frame_type bit(20) +#define wmi_tx_params_dword1_rsvd genmask(31, 21) + +struct wmi_mgmt_send_params { + u32 tlv_header; + u32 tx_params_dword0; + u32 tx_params_dword1; +}; + +struct wmi_mgmt_send_cmd { + u32 tlv_header; + u32 vdev_id; + u32 desc_id; + u32 chanfreq; + u32 paddr_lo; + u32 paddr_hi; + u32 frame_len; + u32 buf_len; + u32 tx_params_valid; + + /* this tlv is followed by struct wmi_mgmt_frame */ + + /* followed by struct wmi_mgmt_send_params */ +} __packed; + +struct wmi_sta_powersave_mode_cmd { + u32 tlv_header; + u32 vdev_id; + u32 sta_ps_mode; +}; + +struct wmi_sta_smps_force_mode_cmd { + u32 tlv_header; + u32 vdev_id; + u32 forced_mode; +}; + +struct wmi_sta_smps_param_cmd { + u32 tlv_header; + u32 vdev_id; + u32 param; + u32 value; +}; + +struct wmi_bcn_prb_info { + u32 tlv_header; + u32 caps; + u32 erp; +} __packed; + +enum { + wmi_pdev_suspend, + wmi_pdev_suspend_and_disable_intr, +}; + +struct green_ap_ps_params { + u32 value; +}; + +struct wmi_pdev_green_ap_ps_enable_cmd_param { + u32 tlv_header; + u32 pdev_id; + u32 enable; +}; + +struct ap_ps_params { + u32 vdev_id; + u32 param; + u32 value; +}; + +struct vdev_set_params { + u32 if_id; + u32 param_id; + u32 param_value; +}; + +struct stats_request_params { + u32 stats_id; + u32 vdev_id; + u32 pdev_id; +}; + +enum set_init_cc_type { + wmi_country_info_type_alpha, + wmi_country_info_type_country_code, + wmi_country_info_type_regdomain, +}; + +enum set_init_cc_flags { + invalid_cc, + cc_is_set, + regdmn_is_set, + alpha_is_set, +}; + +struct wmi_init_country_params { + union { + u16 country_code; + u16 regdom_id; + u8 alpha2[3]; + } cc_info; + enum set_init_cc_flags flags; +}; + +struct wmi_init_country_cmd { + u32 tlv_header; + u32 pdev_id; + u32 init_cc_type; + union { + u32 country_code; + u32 regdom_id; + u32 alpha2; + } cc_info; +} __packed; + +struct wmi_pdev_pktlog_filter_info { + u32 tlv_header; + struct wmi_mac_addr peer_macaddr; +} __packed; + +struct wmi_pdev_pktlog_filter_cmd { + u32 tlv_header; + u32 pdev_id; + u32 enable; + u32 filter_type; + u32 num_mac; +} __packed; + +enum ath11k_wmi_pktlog_enable { + ath11k_wmi_pktlog_enable_auto = 0, + ath11k_wmi_pktlog_enable_force = 1, +}; + +struct wmi_pktlog_enable_cmd { + u32 tlv_header; + u32 pdev_id; + u32 evlist; /* wmi_pktlog_event */ + u32 enable; +} __packed; + +struct wmi_pktlog_disable_cmd { + u32 tlv_header; + u32 pdev_id; +} __packed; + +#define dfs_phyerr_unit_test_cmd 0 +#define dfs_unit_test_module 0x2b +#define dfs_unit_test_token 0xaa + +enum dfs_test_args_idx { + dfs_test_cmdid = 0, + dfs_test_pdev_id, + dfs_test_radar_param, + dfs_max_test_args, +}; + +struct wmi_dfs_unit_test_arg { + u32 cmd_id; + u32 pdev_id; + u32 radar_param; +}; + +struct wmi_unit_test_cmd { + u32 tlv_header; + u32 vdev_id; + u32 module_id; + u32 num_args; + u32 diag_token; + /* followed by test args*/ +} __packed; + +#define max_supported_rates 128 + +#define wmi_peer_auth 0x00000001 +#define wmi_peer_qos 0x00000002 +#define wmi_peer_need_ptk_4_way 0x00000004 +#define wmi_peer_need_gtk_2_way 0x00000010 +#define wmi_peer_he 0x00000400 +#define wmi_peer_apsd 0x00000800 +#define wmi_peer_ht 0x00001000 +#define wmi_peer_40mhz 0x00002000 +#define wmi_peer_stbc 0x00008000 +#define wmi_peer_ldpc 0x00010000 +#define wmi_peer_dyn_mimops 0x00020000 +#define wmi_peer_static_mimops 0x00040000 +#define wmi_peer_spatial_mux 0x00200000 +#define wmi_peer_vht 0x02000000 +#define wmi_peer_80mhz 0x04000000 +#define wmi_peer_pmf 0x08000000 +/* todo: place holder for wlan_peer_f_ps_presend_required = 0x10000000. + * need to be cleaned up + */ +#define wmi_peer_is_p2p_capable 0x20000000 +#define wmi_peer_160mhz 0x40000000 +#define wmi_peer_safemode_en 0x80000000 + +struct beacon_tmpl_params { + u8 vdev_id; + u32 tim_ie_offset; + u32 tmpl_len; + u32 tmpl_len_aligned; + u32 csa_switch_count_offset; + u32 ext_csa_switch_count_offset; + u8 *frm; +}; + +struct wmi_rate_set { + u32 num_rates; + u32 rates[(max_supported_rates / 4) + 1]; +}; + +struct wmi_vht_rate_set { + u32 tlv_header; + u32 rx_max_rate; + u32 rx_mcs_set; + u32 tx_max_rate; + u32 tx_mcs_set; + u32 tx_max_mcs_nss; +} __packed; + +struct wmi_he_rate_set { + u32 tlv_header; + u32 rx_mcs_set; + u32 tx_mcs_set; +} __packed; + +#define max_reg_rules 10 +#define reg_alpha2_len 2 + +enum wmi_start_event_param { + wmi_vdev_start_resp_event = 0, + wmi_vdev_restart_resp_event, +}; + +struct wmi_vdev_start_resp_event { + u32 vdev_id; + u32 requestor_id; + enum wmi_start_event_param resp_type; + u32 status; + u32 chain_mask; + u32 smps_mode; + union { + u32 mac_id; + u32 pdev_id; + }; + u32 cfgd_tx_streams; + u32 cfgd_rx_streams; +} __packed; + +/* vdev start response status codes */ +enum wmi_vdev_start_resp_status_code { + wmi_vdev_start_response_status_success = 0, + wmi_vdev_start_response_invalid_vdevid = 1, + wmi_vdev_start_response_not_supported = 2, + wmi_vdev_start_response_dfs_violation = 3, + wmi_vdev_start_response_invalid_regdomain = 4, +}; + +; +enum cc_setting_code { + reg_set_cc_status_pass = 0, + reg_current_alpha2_not_found = 1, + reg_init_alpha2_not_found = 2, + reg_set_cc_change_not_allowed = 3, + reg_set_cc_status_no_memory = 4, + reg_set_cc_status_fail = 5, +}; + +/* regaulatory rule flags passed by fw */ +#define regulatory_chan_disabled bit(0) +#define regulatory_chan_no_ir bit(1) +#define regulatory_chan_radar bit(3) +#define regulatory_chan_no_ofdm bit(6) +#define regulatory_chan_indoor_only bit(9) + +#define regulatory_chan_no_ht40 bit(4) +#define regulatory_chan_no_80mhz bit(7) +#define regulatory_chan_no_160mhz bit(8) +#define regulatory_chan_no_20mhz bit(11) +#define regulatory_chan_no_10mhz bit(12) + +enum { + wmi_reg_set_cc_status_pass = 0, + wmi_reg_current_alpha2_not_found = 1, + wmi_reg_init_alpha2_not_found = 2, + wmi_reg_set_cc_change_not_allowed = 3, + wmi_reg_set_cc_status_no_memory = 4, + wmi_reg_set_cc_status_fail = 5, +}; + +struct cur_reg_rule { + u16 start_freq; + u16 end_freq; + u16 max_bw; + u8 reg_power; + u8 ant_gain; + u16 flags; +}; + +struct cur_regulatory_info { + enum cc_setting_code status_code; + u8 num_phy; + u8 phy_id; + u16 reg_dmn_pair; + u16 ctry_code; + u8 alpha2[reg_alpha2_len + 1]; + u32 dfs_region; + u32 phybitmap; + u32 min_bw_2g; + u32 max_bw_2g; + u32 min_bw_5g; + u32 max_bw_5g; + u32 num_2g_reg_rules; + u32 num_5g_reg_rules; + struct cur_reg_rule *reg_rules_2g_ptr; + struct cur_reg_rule *reg_rules_5g_ptr; +}; + +struct wmi_reg_chan_list_cc_event { + u32 status_code; + u32 phy_id; + u32 alpha2; + u32 num_phy; + u32 country_id; + u32 domain_code; + u32 dfs_region; + u32 phybitmap; + u32 min_bw_2g; + u32 max_bw_2g; + u32 min_bw_5g; + u32 max_bw_5g; + u32 num_2g_reg_rules; + u32 num_5g_reg_rules; +} __packed; + +struct wmi_regulatory_rule_struct { + u32 tlv_header; + u32 freq_info; + u32 bw_pwr_info; + u32 flag_info; +}; + +struct wmi_peer_delete_resp_event { + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; +} __packed; + +struct wmi_bcn_tx_status_event { + u32 vdev_id; + u32 tx_status; +} __packed; + +struct wmi_vdev_stopped_event { + u32 vdev_id; +} __packed; + +struct wmi_pdev_bss_chan_info_event { + u32 pdev_id; + u32 freq; /* units in mhz */ + u32 noise_floor; /* units are dbm */ + /* rx clear - how often the channel was unused */ + u32 rx_clear_count_low; + u32 rx_clear_count_high; + /* cycle count - elapsed time during measured period, in clock ticks */ + u32 cycle_count_low; + u32 cycle_count_high; + /* tx cycle count - elapsed time spent in tx, in clock ticks */ + u32 tx_cycle_count_low; + u32 tx_cycle_count_high; + /* rx cycle count - elapsed time spent in rx, in clock ticks */ + u32 rx_cycle_count_low; + u32 rx_cycle_count_high; + /*rx_cycle cnt for my bss in 64bits format */ + u32 rx_bss_cycle_count_low; + u32 rx_bss_cycle_count_high; +} __packed; + +#define wmi_vdev_install_key_compl_status_success 0 + +struct wmi_vdev_install_key_compl_event { + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 key_idx; + u32 key_flags; + u32 status; +} __packed; + +struct wmi_vdev_install_key_complete_arg { + u32 vdev_id; + const u8 *macaddr; + u32 key_idx; + u32 key_flags; + u32 status; +}; + +struct wmi_peer_assoc_conf_event { + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; +} __packed; + +struct wmi_peer_assoc_conf_arg { + u32 vdev_id; + const u8 *macaddr; +}; + +/* + * pdev statistics + */ +struct wmi_pdev_stats_base { + s32 chan_nf; + u32 tx_frame_count; /* cycles spent transmitting frames */ + u32 rx_frame_count; /* cycles spent receiving frames */ + u32 rx_clear_count; /* total channel busy time, evidently */ + u32 cycle_count; /* total on-channel time */ + u32 phy_err_count; + u32 chan_tx_pwr; +} __packed; + +struct wmi_pdev_stats_extra { + u32 ack_rx_bad; + u32 rts_bad; + u32 rts_good; + u32 fcs_bad; + u32 no_beacons; + u32 mib_int_count; +} __packed; + +struct wmi_pdev_stats_tx { + /* num htt cookies queued to dispatch list */ + s32 comp_queued; + + /* num htt cookies dispatched */ + s32 comp_delivered; + + /* num msdu queued to wal */ + s32 msdu_enqued; + + /* num mpdu queue to wal */ + s32 mpdu_enqued; + + /* num msdus dropped by wmm limit */ + s32 wmm_drop; + + /* num local frames queued */ + s32 local_enqued; + + /* num local frames done */ + s32 local_freed; + + /* num queued to hw */ + s32 hw_queued; + + /* num ppdu reaped from hw */ + s32 hw_reaped; + + /* num underruns */ + s32 underrun; + + /* num ppdus cleaned up in tx abort */ + s32 tx_abort; + + /* num mpdus requed by sw */ + s32 mpdus_requed; + + /* excessive retries */ + u32 tx_ko; + + /* data hw rate code */ + u32 data_rc; + + /* scheduler self triggers */ + u32 self_triggers; + + /* frames dropped due to excessive sw retries */ + u32 sw_retry_failure; + + /* illegal rate phy errors */ + u32 illgl_rate_phy_err; + + /* wal pdev continuous xretry */ + u32 pdev_cont_xretry; + + /* wal pdev tx timeouts */ + u32 pdev_tx_timeout; + + /* wal pdev resets */ + u32 pdev_resets; + + /* frames dropped due to non-availability of stateless tids */ + u32 stateless_tid_alloc_failure; + + /* phy/bb underrun */ + u32 phy_underrun; + + /* mpdu is more than txop limit */ + u32 txop_ovf; +} __packed; + +struct wmi_pdev_stats_rx { + /* cnts any change in ring routing mid-ppdu */ + s32 mid_ppdu_route_change; + + /* total number of statuses processed */ + s32 status_rcvd; + + /* extra frags on rings 0-3 */ + s32 r0_frags; + s32 r1_frags; + s32 r2_frags; + s32 r3_frags; + + /* msdus / mpdus delivered to htt */ + s32 htt_msdus; + s32 htt_mpdus; + + /* msdus / mpdus delivered to local stack */ + s32 loc_msdus; + s32 loc_mpdus; + + /* amsdus that have more msdus than the status ring size */ + s32 oversize_amsdu; + + /* number of phy errors */ + s32 phy_errs; + + /* number of phy errors drops */ + s32 phy_err_drop; + + /* number of mpdu errors - fcs, mic, enc etc. */ + s32 mpdu_errs; +} __packed; + +struct wmi_pdev_stats { + struct wmi_pdev_stats_base base; + struct wmi_pdev_stats_tx tx; + struct wmi_pdev_stats_rx rx; +} __packed; + +#define wlan_max_ac 4 +#define max_tx_rate_values 10 +#define max_tx_rate_values 10 + +struct wmi_vdev_stats { + u32 vdev_id; + u32 beacon_snr; + u32 data_snr; + u32 num_tx_frames[wlan_max_ac]; + u32 num_rx_frames; + u32 num_tx_frames_retries[wlan_max_ac]; + u32 num_tx_frames_failures[wlan_max_ac]; + u32 num_rts_fail; + u32 num_rts_success; + u32 num_rx_err; + u32 num_rx_discard; + u32 num_tx_not_acked; + u32 tx_rate_history[max_tx_rate_values]; + u32 beacon_rssi_history[max_tx_rate_values]; +} __packed; + +struct wmi_bcn_stats { + u32 vdev_id; + u32 tx_bcn_succ_cnt; + u32 tx_bcn_outage_cnt; +} __packed; + +struct wmi_stats_event { + u32 stats_id; + u32 num_pdev_stats; + u32 num_vdev_stats; + u32 num_peer_stats; + u32 num_bcnflt_stats; + u32 num_chan_stats; + u32 num_mib_stats; + u32 pdev_id; + u32 num_bcn_stats; + u32 num_peer_extd_stats; + u32 num_peer_extd2_stats; +} __packed; + +struct wmi_pdev_ctl_failsafe_chk_event { + u32 pdev_id; + u32 ctl_failsafe_status; +} __packed; + +struct wmi_pdev_csa_switch_ev { + u32 pdev_id; + u32 current_switch_count; + u32 num_vdevs; +} __packed; + +struct wmi_pdev_radar_ev { + u32 pdev_id; + u32 detection_mode; + u32 chan_freq; + u32 chan_width; + u32 detector_id; + u32 segment_id; + u32 timestamp; + u32 is_chirp; + s32 freq_offset; + s32 sidx; +} __packed; + +#define wmi_rx_status_ok 0x00 +#define wmi_rx_status_err_crc 0x01 +#define wmi_rx_status_err_decrypt 0x08 +#define wmi_rx_status_err_mic 0x10 +#define wmi_rx_status_err_key_cache_miss 0x20 + +#define wlan_mgmt_txrx_host_max_antenna 4 + +struct mgmt_rx_event_params { + u32 channel; + u32 snr; + u8 rssi_ctl[wlan_mgmt_txrx_host_max_antenna]; + u32 rate; + enum wmi_phy_mode phy_mode; + u32 buf_len; + int status; + u32 flags; + int rssi; + u32 tsf_delta; + u8 pdev_id; +}; + +#define ath_max_antenna 4 + +struct wmi_mgmt_rx_hdr { + u32 channel; + u32 snr; + u32 rate; + u32 phy_mode; + u32 buf_len; + u32 status; + u32 rssi_ctl[ath_max_antenna]; + u32 flags; + int rssi; + u32 tsf_delta; + u32 rx_tsf_l32; + u32 rx_tsf_u32; + u32 pdev_id; +} __packed; + +#define max_antenna_eight 8 + +struct wmi_rssi_ctl_ext { + u32 tlv_header; + u32 rssi_ctl_ext[max_antenna_eight - ath_max_antenna]; +}; + +struct wmi_mgmt_tx_compl_event { + u32 desc_id; + u32 status; + u32 pdev_id; +} __packed; + +struct wmi_scan_event { + u32 event_type; /* %wmi_scan_event_ */ + u32 reason; /* %wmi_scan_reason_ */ + u32 channel_freq; /* only valid for wmi_scan_event_foreign_channel */ + u32 scan_req_id; + u32 scan_id; + u32 vdev_id; + /* tsf timestamp when the scan event (%wmi_scan_event_) is completed + * in case of ap it is tsf of the ap vdev + * in case of sta connected state, this is the tsf of the ap + * in case of sta not connected, it will be the free running hw timer + */ + u32 tsf_timestamp; +} __packed; + +struct wmi_peer_sta_kickout_arg { + const u8 *mac_addr; +}; + +struct wmi_peer_sta_kickout_event { + struct wmi_mac_addr peer_macaddr; +} __packed; + +enum wmi_roam_reason { + wmi_roam_reason_better_ap = 1, + wmi_roam_reason_beacon_miss = 2, + wmi_roam_reason_low_rssi = 3, + wmi_roam_reason_suitable_ap_found = 4, + wmi_roam_reason_ho_failed = 5, + + /* keep last */ + wmi_roam_reason_max, +}; + +struct wmi_roam_event { + u32 vdev_id; + u32 reason; + u32 rssi; +} __packed; + +#define wmi_chan_info_start_resp 0 +#define wmi_chan_info_end_resp 1 + +struct wmi_chan_info_event { + u32 err_code; + u32 freq; + u32 cmd_flags; + u32 noise_floor; + u32 rx_clear_count; + u32 cycle_count; + u32 chan_tx_pwr_range; + u32 chan_tx_pwr_tp; + u32 rx_frame_count; + u32 my_bss_rx_cycle_count; + u32 rx_11b_mode_data_duration; + u32 tx_frame_cnt; + u32 mac_clk_mhz; + u32 vdev_id; +} __packed; + +struct ath11k_targ_cap { + u32 phy_capability; + u32 max_frag_entry; + u32 num_rf_chains; + u32 ht_cap_info; + u32 vht_cap_info; + u32 vht_supp_mcs; + u32 hw_min_tx_power; + u32 hw_max_tx_power; + u32 sys_cap_info; + u32 min_pkt_size_enable; + u32 max_bcn_ie_size; + u32 max_num_scan_channels; + u32 max_supported_macs; + u32 wmi_fw_sub_feat_caps; + u32 txrx_chainmask; + u32 default_dbs_hw_mode_index; + u32 num_msdu_desc; +}; + +enum wmi_vdev_type { + wmi_vdev_type_ap = 1, + wmi_vdev_type_sta = 2, + wmi_vdev_type_ibss = 3, + wmi_vdev_type_monitor = 4, +}; + +enum wmi_vdev_subtype { + wmi_vdev_subtype_none, + wmi_vdev_subtype_p2p_device, + wmi_vdev_subtype_p2p_client, + wmi_vdev_subtype_p2p_go, + wmi_vdev_subtype_proxy_sta, + wmi_vdev_subtype_mesh_non_11s, + wmi_vdev_subtype_mesh_11s, +}; + +enum wmi_sta_powersave_param { + wmi_sta_ps_param_rx_wake_policy = 0, + wmi_sta_ps_param_tx_wake_threshold = 1, + wmi_sta_ps_param_pspoll_count = 2, + wmi_sta_ps_param_inactivity_time = 3, + wmi_sta_ps_param_uapsd = 4, +}; + +#define wmi_uapsd_ac_type_deli 0 +#define wmi_uapsd_ac_type_trig 1 + +#define wmi_uapsd_ac_bit_mask(ac, type) \ + ((type == wmi_uapsd_ac_type_deli) ? \ + (1 << (ac << 1)) : (1 << ((ac << 1) + 1))) + +enum wmi_sta_ps_param_uapsd { + wmi_sta_ps_uapsd_ac0_delivery_en = (1 << 0), + wmi_sta_ps_uapsd_ac0_trigger_en = (1 << 1), + wmi_sta_ps_uapsd_ac1_delivery_en = (1 << 2), + wmi_sta_ps_uapsd_ac1_trigger_en = (1 << 3), + wmi_sta_ps_uapsd_ac2_delivery_en = (1 << 4), + wmi_sta_ps_uapsd_ac2_trigger_en = (1 << 5), + wmi_sta_ps_uapsd_ac3_delivery_en = (1 << 6), + wmi_sta_ps_uapsd_ac3_trigger_en = (1 << 7), +}; + +#define wmi_sta_uapsd_max_interval_msec uint_max + +struct wmi_sta_uapsd_auto_trig_param { + u32 wmm_ac; + u32 user_priority; + u32 service_interval; + u32 suspend_interval; + u32 delay_interval; +}; + +struct wmi_sta_uapsd_auto_trig_cmd_fixed_param { + u32 vdev_id; + struct wmi_mac_addr peer_macaddr; + u32 num_ac; +}; + +struct wmi_sta_uapsd_auto_trig_arg { + u32 wmm_ac; + u32 user_priority; + u32 service_interval; + u32 suspend_interval; + u32 delay_interval; +}; + +enum wmi_sta_ps_param_tx_wake_threshold { + wmi_sta_ps_tx_wake_threshold_never = 0, + wmi_sta_ps_tx_wake_threshold_always = 1, + + /* values greater than one indicate that many tx attempts per beacon + * interval before the sta will wake up + */ +}; + +/* the maximum number of ps-poll frames the fw will send in response to + * traffic advertised in tim before waking up (by sending a null frame with ps + * = 0). value 0 has a special meaning: there is no maximum count and the fw + * will send as many ps-poll as are necessary to retrieve buffered bu. this + * parameter is used when the rx wake policy is + * wmi_sta_ps_rx_wake_policy_poll_uapsd and ignored when the rx wake + * policy is wmi_sta_ps_rx_wake_policy_wake. + */ +enum wmi_sta_ps_param_pspoll_count { + wmi_sta_ps_pspoll_count_no_max = 0, + /* values greater than 0 indicate the maximum numer of ps-poll frames + * fw will send before waking up. + */ +}; + +/* u-apsd configuration of peer station from (re)assoc request and tspecs */ +enum wmi_ap_ps_param_uapsd { + wmi_ap_ps_uapsd_ac0_delivery_en = (1 << 0), + wmi_ap_ps_uapsd_ac0_trigger_en = (1 << 1), + wmi_ap_ps_uapsd_ac1_delivery_en = (1 << 2), + wmi_ap_ps_uapsd_ac1_trigger_en = (1 << 3), + wmi_ap_ps_uapsd_ac2_delivery_en = (1 << 4), + wmi_ap_ps_uapsd_ac2_trigger_en = (1 << 5), + wmi_ap_ps_uapsd_ac3_delivery_en = (1 << 6), + wmi_ap_ps_uapsd_ac3_trigger_en = (1 << 7), +}; + +/* u-apsd maximum service period of peer station */ +enum wmi_ap_ps_peer_param_max_sp { + wmi_ap_ps_peer_param_max_sp_unlimited = 0, + wmi_ap_ps_peer_param_max_sp_2 = 1, + wmi_ap_ps_peer_param_max_sp_4 = 2, + wmi_ap_ps_peer_param_max_sp_6 = 3, + max_wmi_ap_ps_peer_param_max_sp, +}; + +enum wmi_ap_ps_peer_param { + /** set uapsd configuration for a given peer. + * + * this include the delivery and trigger enabled state for each ac. + * the host mlme needs to set this based on ap capability and stations + * request set in the association request received from the station. + * + * lower 8 bits of the value specify the uapsd configuration. + * + * (see enum wmi_ap_ps_param_uapsd) + * the default value is 0. + */ + wmi_ap_ps_peer_param_uapsd = 0, + + /** + * set the service period for a uapsd capable station + * + * the service period from wme ie in the (re)assoc request frame. + * + * (see enum wmi_ap_ps_peer_param_max_sp) + */ + wmi_ap_ps_peer_param_max_sp = 1, + + /** time in seconds for aging out buffered frames + * for sta in power save + */ + wmi_ap_ps_peer_param_ageout_time = 2, + + /** specify frame types that are considered sifs + * resp trigger frame + */ + wmi_ap_ps_peer_param_sifs_resp_frmtype = 3, + + /** specifies the trigger state of tid. + * valid only for uapsd frame type + */ + wmi_ap_ps_peer_param_sifs_resp_uapsd = 4, + + /* specifies the wnm sleep state of a sta */ + wmi_ap_ps_peer_param_wnm_sleep = 5, +}; + +#define disable_sifs_response_trigger 0 + +#define wmi_max_key_index 3 +#define wmi_max_key_len 32 + +#define wmi_key_pairwise 0x00 +#define wmi_key_group 0x01 + +#define wmi_cipher_none 0x0 /* clear key */ +#define wmi_cipher_wep 0x1 +#define wmi_cipher_tkip 0x2 +#define wmi_cipher_aes_ocb 0x3 +#define wmi_cipher_aes_ccm 0x4 +#define wmi_cipher_wapi 0x5 +#define wmi_cipher_ckip 0x6 +#define wmi_cipher_aes_cmac 0x7 +#define wmi_cipher_any 0x8 +#define wmi_cipher_aes_gcm 0x9 +#define wmi_cipher_aes_gmac 0xa + +/* value to disable fixed rate setting */ +#define wmi_fixed_rate_none (0xffff) + +#define ath11k_rc_version_offset 28 +#define ath11k_rc_preamble_offset 8 +#define ath11k_rc_nss_offset 5 + +#define ath11k_hw_rate_code(rate, nss, preamble) \ + ((1 << ath11k_rc_version_offset) | \ + ((nss) << ath11k_rc_nss_offset) | \ + ((preamble) << ath11k_rc_preamble_offset) | \ + (rate)) + +/* preamble types to be used with vdev fixed rate configuration */ +enum wmi_rate_preamble { + wmi_rate_preamble_ofdm, + wmi_rate_preamble_cck, + wmi_rate_preamble_ht, + wmi_rate_preamble_vht, + wmi_rate_preamble_he, +}; + +/** + * enum wmi_rtscts_prot_mode - enable/disable rts/cts and cts2self protection. + * @wmi_rts_cts_disabled : rts/cts protection is disabled. + * @wmi_use_rts_cts : rts/cts enabled. + * @wmi_use_cts2self : cts to self protection enabled. + */ +enum wmi_rtscts_prot_mode { + wmi_rts_cts_disabled = 0, + wmi_use_rts_cts = 1, + wmi_use_cts2self = 2, +}; + +/** + * enum wmi_rtscts_profile - selection of rts cts profile along with enabling + * protection mode. + * @wmi_rtscts_for_no_rateseries - neither of rate-series should use rts-cts + * @wmi_rtscts_for_second_rateseries - only second rate-series will use rts-cts + * @wmi_rtscts_across_sw_retries - only the second rate-series will use rts-cts, + * but if there's a sw retry, both the rate + * series will use rts-cts. + * @wmi_rtscts_erp - rts/cts used for erp protection for every ppdu. + * @wmi_rtscts_for_all_rateseries - enable rts-cts for all rate series. + */ +enum wmi_rtscts_profile { + wmi_rtscts_for_no_rateseries = 0, + wmi_rtscts_for_second_rateseries = 1, + wmi_rtscts_across_sw_retries = 2, + wmi_rtscts_erp = 3, + wmi_rtscts_for_all_rateseries = 4, +}; + +struct ath11k_hal_reg_cap { + u32 eeprom_rd; + u32 eeprom_rd_ext; + u32 regcap1; + u32 regcap2; + u32 wireless_modes; + u32 low_2ghz_chan; + u32 high_2ghz_chan; + u32 low_5ghz_chan; + u32 high_5ghz_chan; +}; + +struct ath11k_mem_chunk { + void *vaddr; + dma_addr_t paddr; + u32 len; + u32 req_id; +}; + +#define wmi_skb_headroom sizeof(struct wmi_cmd_hdr) + +enum wmi_sta_ps_param_rx_wake_policy { + wmi_sta_ps_rx_wake_policy_wake = 0, + wmi_sta_ps_rx_wake_policy_poll_uapsd = 1, +}; + +enum ath11k_hw_txrx_mode { + ath11k_hw_txrx_raw = 0, + ath11k_hw_txrx_native_wifi = 1, + ath11k_hw_txrx_ethernet = 2, +}; + +struct wmi_wmm_params { + u32 tlv_header; + u32 cwmin; + u32 cwmax; + u32 aifs; + u32 txoplimit; + u32 acm; + u32 no_ack; +} __packed; + +struct wmi_wmm_params_arg { + u8 acm; + u8 aifs; + u8 cwmin; + u8 cwmax; + u16 txop; + u8 no_ack; +}; + +struct wmi_vdev_set_wmm_params_cmd { + u32 tlv_header; + u32 vdev_id; + struct wmi_wmm_params wmm_params[4]; + u32 wmm_param_type; +} __packed; + +struct wmi_wmm_params_all_arg { + struct wmi_wmm_params_arg ac_be; + struct wmi_wmm_params_arg ac_bk; + struct wmi_wmm_params_arg ac_vi; + struct wmi_wmm_params_arg ac_vo; +}; + +struct target_resource_config { + u32 num_vdevs; + u32 num_peers; + u32 num_active_peers; + u32 num_offload_peers; + u32 num_offload_reorder_buffs; + u32 num_peer_keys; + u32 num_tids; + u32 ast_skid_limit; + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 rx_timeout_pri[4]; + u32 rx_decap_mode; + u32 scan_max_pending_req; + u32 bmiss_offload_max_vdev; + u32 roam_offload_max_vdev; + u32 roam_offload_max_ap_profiles; + u32 num_mcast_groups; + u32 num_mcast_table_elems; + u32 mcast2ucast_mode; + u32 tx_dbg_log_size; + u32 num_wds_entries; + u32 dma_burst_size; + u32 mac_aggr_delim; + u32 rx_skip_defrag_timeout_dup_detection_check; + u32 vow_config; + u32 gtk_offload_max_vdev; + u32 num_msdu_desc; + u32 max_frag_entries; + u32 max_peer_ext_stats; + u32 smart_ant_cap; + u32 bk_minfree; + u32 be_minfree; + u32 vi_minfree; + u32 vo_minfree; + u32 rx_batchmode; + u32 tt_support; + u32 atf_config; + u32 iphdr_pad_config; + u32 qwrap_config:16, + alloc_frag_desc_for_data_pkt:16; + u32 num_tdls_vdevs; + u32 num_tdls_conn_table_entries; + u32 beacon_tx_offload_max_vdev; + u32 num_multicast_filter_entries; + u32 num_wow_filters; + u32 num_keep_alive_pattern; + u32 keep_alive_pattern_size; + u32 max_tdls_concurrent_sleep_sta; + u32 max_tdls_concurrent_buffer_sta; + u32 wmi_send_separate; + u32 num_ocb_vdevs; + u32 num_ocb_channels; + u32 num_ocb_schedules; + u32 num_ns_ext_tuples_cfg; + u32 bpf_instruction_size; + u32 max_bssid_rx_filters; + u32 use_pdev_id; + u32 peer_map_unmap_v2_support; +}; + +#define wmi_max_mem_reqs 32 + +#define max_radios 3 + +#define wmi_service_ready_timeout_hz (5 * hz) +#define wmi_send_timeout_hz (3 * hz) + +struct ath11k_wmi_base { + struct ath11k_base *ab; + struct ath11k_pdev_wmi wmi[max_radios]; + enum ath11k_htc_ep_id wmi_endpoint_id[max_radios]; + u32 max_msg_len[max_radios]; + + struct completion service_ready; + struct completion unified_ready; + declare_bitmap(svc_map, wmi_max_ext_service); + wait_queue_head_t tx_credits_wq; + const struct wmi_peer_flags_map *peer_flags; + u32 num_mem_chunks; + u32 rx_decap_mode; + struct wmi_host_mem_chunk mem_chunks[wmi_max_mem_reqs]; + + enum wmi_host_hw_mode_config_type preferred_hw_mode; + struct target_resource_config wlan_resource_config; + + struct ath11k_targ_cap *targ_cap; +}; + +int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, + u32 cmd_id); +struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); +int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id, + struct sk_buff *frame); +int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id, + struct ieee80211_mutable_offsets *offs, + struct sk_buff *bcn); +int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id); +int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, + const u8 *bssid); +int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id); +int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg, + bool restart); +int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr, + u32 vdev_id, u32 param_id, u32 param_val); +int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id, + u32 param_value, u8 pdev_id); +int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab); +int ath11k_wmi_cmd_init(struct ath11k_base *ab); +int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab); +int ath11k_wmi_connect(struct ath11k_base *ab); +int ath11k_wmi_pdev_attach(struct ath11k_base *ab, + u8 pdev_id); +int ath11k_wmi_attach(struct ath11k_base *ab); +void ath11k_wmi_detach(struct ath11k_base *ab); +int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr, + struct vdev_create_params *param); +int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, int vdev_id, + const u8 *addr, dma_addr_t paddr, + u8 tid, u8 ba_window_size_valid, + u32 ba_window_size); +int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar, + struct peer_create_params *param); +int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id, + u32 param_id, u32 param_value); + +int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id, + u32 param, u32 param_value); +int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms); +int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar, + const u8 *peer_addr, u8 vdev_id); +int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id); +void ath11k_wmi_start_scan_init(struct ath11k *ar, struct scan_req_params *arg); +int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, + struct scan_req_params *params); +int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, + struct scan_cancel_param *param); +int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, + struct wmi_wmm_params_all_arg *param); +int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt, + u32 pdev_id); +int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id); + +int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, + struct peer_assoc_params *param); +int ath11k_wmi_vdev_install_key(struct ath11k *ar, + struct wmi_vdev_install_key_arg *arg); +int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar, + enum wmi_bss_chan_info_req_type type); +int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar, + struct stats_request_params *param); +int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar, + u8 peer_addr[eth_alen], + struct peer_flush_params *param); +int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr, + struct ap_ps_params *param); +int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, + struct scan_chan_list_params *chan_list); +int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar, + u32 pdev_id); +int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar, + u32 vdev_id, u32 bcn_ctrl_op); +int +ath11k_wmi_send_init_country_cmd(struct ath11k *ar, + struct wmi_init_country_params init_cc_param); +int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter); +int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar); +int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable); +int +ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar, + struct rx_reorder_queue_remove_params *param); +int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar, + struct pdev_set_regdomain_params *param); +int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb, + struct ath11k_fw_stats *stats); +size_t ath11k_wmi_fw_stats_num_peers(struct list_head *head); +size_t ath11k_wmi_fw_stats_num_peers_extd(struct list_head *head); +size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head); +void ath11k_wmi_fw_stats_fill(struct ath11k *ar, + struct ath11k_fw_stats *fw_stats, u32 stats_id, + char *buf); +int ath11k_wmi_simulate_radar(struct ath11k *ar); +#endif
|
Networking
|
d5c65159f2895379e11ca13f62feabe93278985d
|
kalle valo
|
drivers
|
net
|
ath, ath11k, wireless
|
ath11k: add he support
|
add basic he support to the driver. the sband_iftype data is generated from the capabilities read from the fw.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
driver for qualcomm ieee 802.11ax devices
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath11k ']
|
['h', 'c']
| 5
| 270
| 10
|
--- diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h + struct ieee80211_sband_iftype_data + iftype[num_nl80211_bands][num_nl80211_iftypes]; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c - /* todo: implementation */ + const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; + u16 v; + + if (!he_cap->has_he) + return; + + arg->he_flag = true; + + memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info, + sizeof(arg->peer_he_cap_macinfo)); + memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info, + sizeof(arg->peer_he_cap_phyinfo)); + memcpy(&arg->peer_he_ops, &vif->bss_conf.he_operation, + sizeof(arg->peer_he_ops)); + + /* the top most byte is used to indicate bss color info */ + arg->peer_he_ops &= 0xffffff; + + if (he_cap->he_cap_elem.phy_cap_info[6] & + ieee80211_he_phy_cap6_ppe_threshold_present) { + int bit = 7; + int nss, ru; + + arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] & + ieee80211_ppe_thres_nss_mask; + arg->peer_ppet.ru_bit_mask = + (he_cap->ppe_thres[0] & + ieee80211_ppe_thres_ru_index_bitmask_mask) >> + ieee80211_ppe_thres_ru_index_bitmask_pos; + + for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) { + for (ru = 0; ru < 4; ru++) { + u32 val = 0; + int i; + + if ((arg->peer_ppet.ru_bit_mask & bit(ru)) == 0) + continue; + for (i = 0; i < 6; i++) { + val >>= 1; + val |= ((he_cap->ppe_thres[bit / 8] >> + (bit % 8)) & 0x1) << 5; + bit++; + } + arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |= + val << (ru * 6); + } + } + } + + switch (sta->bandwidth) { + case ieee80211_sta_rx_bw_160: + if (he_cap->he_cap_elem.phy_cap_info[0] & + ieee80211_he_phy_cap0_channel_width_set_80plus80_mhz_in_5g) { + v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80); + arg->peer_he_rx_mcs_set[wmi_hecap_txrx_mcs_nss_idx_80_80] = v; + + v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80); + arg->peer_he_tx_mcs_set[wmi_hecap_txrx_mcs_nss_idx_80_80] = v; + + arg->peer_he_mcs_count++; + } + v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); + arg->peer_he_rx_mcs_set[wmi_hecap_txrx_mcs_nss_idx_160] = v; + + v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); + arg->peer_he_tx_mcs_set[wmi_hecap_txrx_mcs_nss_idx_160] = v; + + arg->peer_he_mcs_count++; + /* fall through */ + + default: + v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + arg->peer_he_rx_mcs_set[wmi_hecap_txrx_mcs_nss_idx_80] = v; + + v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); + arg->peer_he_tx_mcs_set[wmi_hecap_txrx_mcs_nss_idx_80] = v; + + arg->peer_he_mcs_count++; + break; + } +static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar, + struct ieee80211_sta *sta) +{ + if (sta->bandwidth == ieee80211_sta_rx_bw_160) { + if (sta->he_cap.he_cap_elem.phy_cap_info[0] & + ieee80211_he_phy_cap0_channel_width_set_160mhz_in_5g) + return mode_11ax_he160; + else if (sta->he_cap.he_cap_elem.phy_cap_info[0] & + ieee80211_he_phy_cap0_channel_width_set_80plus80_mhz_in_5g) + return mode_11ax_he80_80; + /* not sure if this is a valid case? */ + return mode_11ax_he160; + } + + if (sta->bandwidth == ieee80211_sta_rx_bw_80) + return mode_11ax_he80; + + if (sta->bandwidth == ieee80211_sta_rx_bw_40) + return mode_11ax_he40; + + if (sta->bandwidth == ieee80211_sta_rx_bw_20) + return mode_11ax_he20; + + return mode_unknown; +} + - if (sta->vht_cap.vht_supported && + if (sta->he_cap.has_he) { + if (sta->bandwidth == ieee80211_sta_rx_bw_80) + phymode = mode_11ax_he80_2g; + else if (sta->bandwidth == ieee80211_sta_rx_bw_40) + phymode = mode_11ax_he40_2g; + else + phymode = mode_11ax_he20_2g; + } else if (sta->vht_cap.vht_supported && - /* todo: he */ - - /* check vht first */ - if (sta->vht_cap.vht_supported && + /* check he first */ + if (sta->he_cap.has_he) { + phymode = ath11k_mac_get_phymode_he(ar, sta); + } else if (sta->vht_cap.vht_supported && - /* todo: he phymode */ +static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet, + u8 *he_ppet) +{ + int nss, ru; + u8 bit = 7; + + he_ppet[0] = fw_ppet->numss_m1 & ieee80211_ppe_thres_nss_mask; + he_ppet[0] |= (fw_ppet->ru_bit_mask << + ieee80211_ppe_thres_ru_index_bitmask_pos) & + ieee80211_ppe_thres_ru_index_bitmask_mask; + for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { + for (ru = 0; ru < 4; ru++) { + u8 val; + int i; + + if ((fw_ppet->ru_bit_mask & bit(ru)) == 0) + continue; + val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) & + 0x3f; + val = ((val >> 3) & 0x7) | ((val & 0x7) << 3); + for (i = 5; i >= 0; i--) { + he_ppet[bit / 8] |= + ((val >> i) & 0x1) << ((bit % 8)); + bit++; + } + } + } +} + +static int ath11k_mac_copy_he_cap(struct ath11k *ar, + struct ath11k_pdev_cap *cap, + struct ieee80211_sband_iftype_data *data, + int band) +{ + int i, idx = 0; + + for (i = 0; i < num_nl80211_iftypes; i++) { + struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; + struct ath11k_band_cap *band_cap = &cap->band[band]; + struct ieee80211_he_cap_elem *he_cap_elem = + &he_cap->he_cap_elem; + + switch (i) { + case nl80211_iftype_station: + case nl80211_iftype_ap: + break; + + default: + continue; + } + + data[idx].types_mask = bit(i); + he_cap->has_he = true; + memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info, + sizeof(he_cap_elem->mac_cap_info)); + memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, + sizeof(he_cap_elem->phy_cap_info)); + + he_cap_elem->mac_cap_info[1] |= + ieee80211_he_mac_cap1_tf_mac_pad_dur_mask; + he_cap_elem->phy_cap_info[4] &= + ~ieee80211_he_phy_cap4_beamformee_max_sts_under_80mhz_mask; + he_cap_elem->phy_cap_info[4] &= + ~ieee80211_he_phy_cap4_beamformee_max_sts_above_80mhz_mask; + he_cap_elem->phy_cap_info[4] |= (ar->num_tx_chains - 1) << 2; + + he_cap_elem->phy_cap_info[5] &= + ~ieee80211_he_phy_cap5_beamformee_num_snd_dim_under_80mhz_mask; + he_cap_elem->phy_cap_info[5] &= + ~ieee80211_he_phy_cap5_beamformee_num_snd_dim_above_80mhz_mask; + he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1; + + switch (i) { + case nl80211_iftype_ap: + he_cap_elem->phy_cap_info[9] |= + ieee80211_he_phy_cap9_rx_1024_qam_less_than_242_tone_ru; + break; + case nl80211_iftype_station: + he_cap_elem->mac_cap_info[0] &= + ~ieee80211_he_mac_cap0_twt_res; + he_cap_elem->mac_cap_info[0] |= + ieee80211_he_mac_cap0_twt_req; + he_cap_elem->phy_cap_info[9] |= + ieee80211_he_phy_cap9_tx_1024_qam_less_than_242_tone_ru; + break; + } + + he_cap->he_mcs_nss_supp.rx_mcs_80 = + cpu_to_le16(band_cap->he_mcs & 0xffff); + he_cap->he_mcs_nss_supp.tx_mcs_80 = + cpu_to_le16(band_cap->he_mcs & 0xffff); + he_cap->he_mcs_nss_supp.rx_mcs_160 = + cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + he_cap->he_mcs_nss_supp.tx_mcs_160 = + cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + he_cap->he_mcs_nss_supp.rx_mcs_80p80 = + cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + he_cap->he_mcs_nss_supp.tx_mcs_80p80 = + cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); + + memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); + if (he_cap_elem->phy_cap_info[6] & + ieee80211_he_phy_cap6_ppe_threshold_present) + ath11k_gen_ppe_thresh(&band_cap->he_ppet, + he_cap->ppe_thres); + idx++; + } + + return idx; +} + +static void ath11k_mac_setup_he_cap(struct ath11k *ar, + struct ath11k_pdev_cap *cap) +{ + struct ieee80211_supported_band *band = null; + int count = 0; + + if (cap->supported_bands & wmi_host_wlan_2g_cap) { + count = ath11k_mac_copy_he_cap(ar, cap, + ar->mac.iftype[nl80211_band_2ghz], + nl80211_band_2ghz); + band = &ar->mac.sbands[nl80211_band_2ghz]; + band->iftype_data = ar->mac.iftype[nl80211_band_2ghz]; + } + + if (cap->supported_bands & wmi_host_wlan_5g_cap) { + count = ath11k_mac_copy_he_cap(ar, cap, + ar->mac.iftype[nl80211_band_5ghz], + nl80211_band_5ghz); + band = &ar->mac.sbands[nl80211_band_5ghz]; + band->iftype_data = ar->mac.iftype[nl80211_band_5ghz]; + } + + band->n_iftype_data = count; +} + - /* reload ht/vht capability */ + /* reload ht/vht/he capability */ + ath11k_mac_setup_he_cap(ar, &ar->pdev->cap); + ath11k_mac_setup_he_cap(ar, cap); diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c + ch->allow_he = true; diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c + if (arg->channel.allow_he) + chan->info |= wmi_chan_info_allow_he; - if (tchan_info->allow_vht) + if (tchan_info->allow_he) + chan_info->info |= wmi_chan_info_allow_he; + else if (tchan_info->allow_vht) diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h + allow_he:1, -#define wmi_host_max_he_rate_set 1 +#define wmi_host_max_he_rate_set 3 +#define wmi_hecap_txrx_mcs_nss_idx_80 0 +#define wmi_hecap_txrx_mcs_nss_idx_160 1 +#define wmi_hecap_txrx_mcs_nss_idx_80_80 2
|
Networking
|
9f056ed8ee01ad6898db49707cdc70ce923be3d0
|
john crispin
|
drivers
|
net
|
ath, ath11k, wireless
|
ath11k: add rx stats support for radiotap
|
mac80211 expects the definition of what he rate info is available inside a struct prepended to the skb.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
driver for qualcomm ieee 802.11ax devices
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath11k ']
|
['c']
| 1
| 10
| 0
|
--- diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c + static const struct ieee80211_radiotap_he known = { + .data1 = cpu_to_le16(ieee80211_radiotap_he_data1_data_mcs_known), + .data2 = cpu_to_le16(ieee80211_radiotap_he_data2_gi_known), + }; + struct ieee80211_radiotap_he *he = null; + if (status->encoding == rx_enc_he) { + he = skb_push(msdu, sizeof(known)); + memcpy(he, &known, sizeof(known)); + status->flag |= rx_flag_radiotap_he; + }
|
Networking
|
e4eb7b5c335ccfdb5c2a9a2004aca7cb81e0d577
|
john crispin
|
drivers
|
net
|
ath, ath11k, wireless
|
ath11k: add twt support
|
add target wait time wmi calls to the driver. en/disable the support from when the bss_config changes. we ignore the cmd completion events.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
driver for qualcomm ieee 802.11ax devices
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath11k ']
|
['c', 'h']
| 3
| 173
| 0
|
--- diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c + if (he_cap->he_cap_elem.mac_cap_info[0] & ieee80211_he_mac_cap0_twt_res) + arg->twt_responder = true; + if (he_cap->he_cap_elem.mac_cap_info[0] & ieee80211_he_mac_cap0_twt_req) + arg->twt_requester = true; + + if (changed & bss_changed_twt) { + if (info->twt_requester || info->twt_responder) + ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev_idx); + else + ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev_idx); + } + diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c + if (param->twt_requester) + cmd->peer_flags |= wmi_peer_twt_req; + if (param->twt_responder) + cmd->peer_flags |= wmi_peer_twt_resp; +int +ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ath11k_base *ab = wmi->wmi_sc->ab; + struct wmi_twt_enable_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (void *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_twt_enable_cmd) | + field_prep(wmi_tlv_len, len - tlv_hdr_size); + cmd->pdev_id = pdev_id; + cmd->sta_cong_timer_ms = ath11k_twt_def_sta_cong_timer_ms; + cmd->default_slot_size = ath11k_twt_def_default_slot_size; + cmd->congestion_thresh_setup = ath11k_twt_def_congestion_thresh_setup; + cmd->congestion_thresh_teardown = + ath11k_twt_def_congestion_thresh_teardown; + cmd->congestion_thresh_critical = + ath11k_twt_def_congestion_thresh_critical; + cmd->interference_thresh_teardown = + ath11k_twt_def_interference_thresh_teardown; + cmd->interference_thresh_setup = + ath11k_twt_def_interference_thresh_setup; + cmd->min_no_sta_setup = ath11k_twt_def_min_no_sta_setup; + cmd->min_no_sta_teardown = ath11k_twt_def_min_no_sta_teardown; + cmd->no_of_bcast_mcast_slots = ath11k_twt_def_no_of_bcast_mcast_slots; + cmd->min_no_twt_slots = ath11k_twt_def_min_no_twt_slots; + cmd->max_no_sta_twt = ath11k_twt_def_max_no_sta_twt; + cmd->mode_check_interval = ath11k_twt_def_mode_check_interval; + cmd->add_sta_slot_interval = ath11k_twt_def_add_sta_slot_interval; + cmd->remove_sta_slot_interval = + ath11k_twt_def_remove_sta_slot_interval; + /* todo add mbssid support */ + cmd->mbss_support = 0; + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_twt_enable_cmdid); + if (ret) { + ath11k_warn(ab, "failed to send wmi_twt_enable_cmdid"); + dev_kfree_skb(skb); + } + return ret; +} + +int +ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ath11k_base *ab = wmi->wmi_sc->ab; + struct wmi_twt_disable_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (void *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, wmi_tag_twt_disable_cmd) | + field_prep(wmi_tlv_len, len - tlv_hdr_size); + cmd->pdev_id = pdev_id; + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_twt_disable_cmdid); + if (ret) { + ath11k_warn(ab, "failed to send wmi_twt_dieable_cmdid"); + dev_kfree_skb(skb); + } + return ret; +} + + wmi_cfg->sched_params = tg_cfg->sched_params; + wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count; + wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count; + config.twt_ap_pdev_count = 2; + config.twt_ap_sta_count = 1000; + case wmi_twt_enable_eventid: + case wmi_twt_disable_eventid: diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h + wmi_grp_wlm = 0x3c, + wmi_grp_11k_offload = 0x3d, + wmi_grp_twt = 0x3e, + wmi_twt_enable_cmdid = wmi_tlv_cmd(wmi_grp_twt), + wmi_twt_disable_cmdid, + wmi_twt_add_dialog_cmdid, + wmi_twt_del_dialog_cmdid, + wmi_twt_pause_dialog_cmdid, + wmi_twt_resume_dialog_cmdid, + + wmi_twt_enable_eventid = wmi_tlv_cmd(wmi_grp_twt), + wmi_twt_disable_eventid, + wmi_twt_add_dialog_eventid, + wmi_twt_del_dialog_eventid, + wmi_twt_pause_dialog_eventid, + wmi_twt_resume_dialog_eventid, + u32 sched_params; + u32 twt_ap_pdev_count; + u32 twt_ap_sta_count; + bool twt_responder; + bool twt_requester; +#define wmi_peer_twt_req 0x00400000 +#define wmi_peer_twt_resp 0x00800000 +#define ath11k_twt_def_sta_cong_timer_ms 5000 +#define ath11k_twt_def_default_slot_size 10 +#define ath11k_twt_def_congestion_thresh_setup 50 +#define ath11k_twt_def_congestion_thresh_teardown 20 +#define ath11k_twt_def_congestion_thresh_critical 100 +#define ath11k_twt_def_interference_thresh_teardown 80 +#define ath11k_twt_def_interference_thresh_setup 50 +#define ath11k_twt_def_min_no_sta_setup 10 +#define ath11k_twt_def_min_no_sta_teardown 2 +#define ath11k_twt_def_no_of_bcast_mcast_slots 2 +#define ath11k_twt_def_min_no_twt_slots 2 +#define ath11k_twt_def_max_no_sta_twt 500 +#define ath11k_twt_def_mode_check_interval 10000 +#define ath11k_twt_def_add_sta_slot_interval 1000 +#define ath11k_twt_def_remove_sta_slot_interval 5000 + +struct wmi_twt_enable_params_cmd { + u32 tlv_header; + u32 pdev_id; + u32 sta_cong_timer_ms; + u32 mbss_support; + u32 default_slot_size; + u32 congestion_thresh_setup; + u32 congestion_thresh_teardown; + u32 congestion_thresh_critical; + u32 interference_thresh_teardown; + u32 interference_thresh_setup; + u32 min_no_sta_setup; + u32 min_no_sta_teardown; + u32 no_of_bcast_mcast_slots; + u32 min_no_twt_slots; + u32 max_no_sta_twt; + u32 mode_check_interval; + u32 add_sta_slot_interval; + u32 remove_sta_slot_interval; +}; + +struct wmi_twt_disable_params_cmd { + u32 tlv_header; + u32 pdev_id; +}; + + u32 sched_params; + u32 twt_ap_pdev_count; + u32 twt_ap_sta_count; +int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id); +int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
|
Networking
|
6d293d447670da6325cc9c8fb809878d1930c234
|
john crispin
|
drivers
|
net
|
ath, ath11k, wireless
|
ath11k: add necessary peer assoc params in wmi dbg
|
add necessary peer assoc params in wmi debug message while sending the peer assoc command to firmware to aid in debugging.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
driver for qualcomm ieee 802.11ax devices
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath11k ']
|
['c']
| 1
| 11
| 2
|
--- diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c - "wmi peer assoc vdev id %d assoc id %d peer mac %pm ", - param->vdev_id, param->peer_associd, param->peer_mac); + "wmi peer assoc vdev id %d assoc id %d peer mac %pm peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x ", + cmd->vdev_id, cmd->peer_associd, param->peer_mac, + cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps, + cmd->peer_listen_intval, cmd->peer_ht_caps, + cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode, + cmd->peer_mpdu_density, + cmd->peer_vht_caps, cmd->peer_he_cap_info, + cmd->peer_he_ops, cmd->peer_he_cap_info_ext, + cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1], + cmd->peer_he_cap_phy[2], + cmd->peer_bw_rxnss_override);
|
Networking
|
1cb616a3b497d6f8b8160a3fee97e822cdb38f39
|
sriram r
|
drivers
|
net
|
ath, ath11k, wireless
|
ath11k: add spatial reuse support
|
trigger the wmi call en/disabling obss pd when the bss config changes or we assoc to an ap that broadcasts the ie.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
driver for qualcomm ieee 802.11ax devices
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath11k ']
|
['c', 'h']
| 3
| 61
| 0
|
--- diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c + + ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, + &bss_conf->he_obss_pd); + if (ret) + ath11k_warn(ar->ab, "failed to set vdev %i obss pd parameters: %d ", + arvif->vdev_id, ret); + if (changed & bss_changed_he_obss_pd) + ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, + &info->he_obss_pd); + diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c +int +ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id, + struct ieee80211_he_obss_pd *he_obss_pd) +{ + struct ath11k_pdev_wmi *wmi = ar->wmi; + struct ath11k_base *ab = wmi->wmi_sc->ab; + struct wmi_obss_spatial_reuse_params_cmd *cmd; + struct sk_buff *skb; + int ret, len; + + len = sizeof(*cmd); + + skb = ath11k_wmi_alloc_skb(wmi->wmi_sc, len); + if (!skb) + return -enomem; + + cmd = (void *)skb->data; + cmd->tlv_header = field_prep(wmi_tlv_tag, + wmi_tag_obss_spatial_reuse_set_cmd) | + field_prep(wmi_tlv_len, len - tlv_hdr_size); + cmd->vdev_id = vdev_id; + cmd->enable = he_obss_pd->enable; + cmd->obss_min = he_obss_pd->min_offset; + cmd->obss_max = he_obss_pd->max_offset; + + ret = ath11k_wmi_cmd_send(wmi, skb, + wmi_pdev_obss_pd_spatial_reuse_cmdid); + if (ret) { + ath11k_warn(ab, + "failed to send wmi_pdev_obss_pd_spatial_reuse_cmdid"); + dev_kfree_skb(skb); + } + return ret; +} + diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h + wmi_grp_motion_det = 0x3f, + wmi_grp_spatial_reuse = 0x40, + wmi_pdev_obss_pd_spatial_reuse_cmdid = + wmi_tlv_cmd(wmi_grp_spatial_reuse), + wmi_pdev_obss_pd_spatial_reuse_set_def_obss_thresh_cmdid, +struct wmi_obss_spatial_reuse_params_cmd { + u32 tlv_header; + u32 pdev_id; + u32 enable; + s32 obss_min; + s32 obss_max; + u32 vdev_id; +}; + +int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id, + struct ieee80211_he_obss_pd *he_obss_pd);
|
Networking
|
3f8be640077a33ef3bd717ade8b50bd0ef815ba9
|
john crispin
|
drivers
|
net
|
ath, ath11k, wireless
|
ath11k: add support for controlling tx power to a station
|
this patch will add the support to control the transmit power for traffic to a station associated with the ap.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
driver for qualcomm ieee 802.11ax devices
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['ath11k ']
|
['h', 'c']
| 2
| 40
| 0
|
--- diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h --- a/drivers/net/wireless/ath/ath11k/debug.h +++ b/drivers/net/wireless/ath/ath11k/debug.h +#define ath11k_tx_power_max_val 70 +#define ath11k_tx_power_min_val 0 + diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c +static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ath11k *ar = hw->priv; + struct ath11k_vif *arvif = (void *)vif->drv_priv; + int ret = 0; + s16 txpwr; + + if (sta->txpwr.type == nl80211_tx_power_automatic) { + txpwr = 0; + } else { + txpwr = sta->txpwr.power; + if (!txpwr) + return -einval; + } + + if (txpwr > ath11k_tx_power_max_val || txpwr < ath11k_tx_power_min_val) + return -einval; + + mutex_lock(&ar->conf_mutex); + + ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, + wmi_peer_use_fixed_pwr, txpwr); + if (ret) { + ath11k_warn(ar->ab, "failed to set tx power for station ret: %d ", + ret); + goto out; + } + +out: + mutex_unlock(&ar->conf_mutex); + return ret; +} + + .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr, + wiphy_ext_feature_set(ar->hw->wiphy, nl80211_ext_feature_sta_tx_pwr);
|
Networking
|
64f1d7e94daaaf53208e9505448406b5c8fbbd8a
|
maharaja kennadyrajan
|
drivers
|
net
|
ath, ath11k, wireless
|
net: bcmgenet: add software counters to track reallocations
|
when inserting the tsb, keep track of how many times we had to do it and if there was a failure in doing so, this helps profile the driver for possibly incorrect headroom settings.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add software counters to track reallocations
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bcmgenet ']
|
['c', 'h']
| 2
| 8
| 0
|
--- diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c + stat_genet_soft_mib("tx_realloc_tsb", mib.tx_realloc_tsb), + stat_genet_soft_mib("tx_realloc_tsb_failed", + mib.tx_realloc_tsb_failed), + struct bcmgenet_priv *priv = netdev_priv(dev); + priv->mib.tx_realloc_tsb_failed++; + priv->mib.tx_realloc_tsb++; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed;
|
Networking
|
f1af17c0b7a51357699bba7c8cba1276c9486907
|
doug berger florian fainelli f fainelli gmail com
|
drivers
|
net
|
broadcom, ethernet, genet
|
net: bcmgenet: turn on offloads by default
|
we can turn on the rx/tx checksum offloads and the scatter/gather features by default and make sure that those are properly reflected back to e.g: stacked devices such as vlan.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
turn on offloads by default
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bcmgenet ']
|
['c']
| 1
| 5
| 3
|
--- diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c - /* set hardware features */ - dev->hw_features |= netif_f_sg | netif_f_highdma | netif_f_hw_csum | - netif_f_rxcsum; + /* set default features */ + dev->features |= netif_f_sg | netif_f_highdma | netif_f_hw_csum | + netif_f_rxcsum; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features;
|
Networking
|
ae895c49905cd99aca24d23361b72ed559b30f4f
|
doug berger florian fainelli f fainelli gmail com
|
drivers
|
net
|
broadcom, ethernet, genet
|
net: bcmgenet: enable netif_f_hw_csum feature
|
the genet hardware should be capable of generating ip checksums using the netif_f_hw_csum feature, so switch to using that feature instead of the depricated netif_f_ip_csum and netif_f_ipv6_csum.
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
enable
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bcmgenet ']
|
['c']
| 1
| 12
| 17
|
--- diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c - desc_64b_en = !!(wanted & (netif_f_ip_csum | netif_f_ipv6_csum)); + desc_64b_en = !!(wanted & netif_f_hw_csum); - if (changed & (netif_f_ip_csum | netif_f_ipv6_csum)) + if (changed & netif_f_hw_csum) - return skb; + /* don't use udp flag */ + ip_proto = 0; + break; - (offset + skb->csum_offset); + (offset + skb->csum_offset) | + status_tx_csum_lv; - /* set the length valid bit for tcp and udp and just set - * the special udp flag for ipv4, else just set to 0. - */ - if (ip_proto == ipproto_tcp || ip_proto == ipproto_udp) { - tx_csum_info |= status_tx_csum_lv; - if (ip_proto == ipproto_udp && - ip_ver == htons(eth_p_ip)) - tx_csum_info |= status_tx_csum_proto_udp; - } else { - tx_csum_info = 0; - } + /* set the special udp flag for udp */ + if (ip_proto == ipproto_udp) + tx_csum_info |= status_tx_csum_proto_udp; - dev->hw_features |= netif_f_sg | netif_f_ip_csum | - netif_f_ipv6_csum | netif_f_highdma | netif_f_rxcsum; + dev->hw_features |= netif_f_sg | netif_f_highdma | netif_f_hw_csum | + netif_f_rxcsum;
|
Networking
|
dd8e911b7f2af2315184a8e551ca77ff226a4ec5
|
doug berger florian fainelli f fainelli gmail com
|
drivers
|
net
|
broadcom, ethernet, genet
|
bnxt_en: add support for devlink info command
|
display the following information via devlink info command: - driver name - board id - broad revision - board serial number - board fw version - fw parameter set version - fw app version - fw management version - fw roce version
|
this release adds wireguard, an fast and secure vpn design that aims to replace other vpns; initial support for usb 4; support for time namespaces; asynchronous ssd trimming in btrfs; initial merge of the multipath tcp support; support for virtualbox guest shared folders; a simple file system to expose the zones of zoned storage devices as files; boot-time tracing, which lets to trace the boot-time process with all the features of ftrace; and bootconfig, created to configure boot-time tracing, which lets to extend the command line in a file attached to initrds. as always, there are many other new drivers and improvements.
|
add support for devlink info command
|
['core (various)', 'file systems', 'memory management', 'block layer', 'tracing, perf and bpf', 'virtualization', 'power management', 'cryptography', 'security', 'networking', 'architectures x86 s390 riscv mips powerpc csky microblaze sparc uml arc']
|
['graphics', 'power management', 'storage', 'drivers in the staging area', 'networking', 'audio', 'tablets, touch screens, keyboards, mouses', 'tv tuners, webcams, video capturers', 'universal serial bus', 'serial peripheral interface (spi)', 'watchdog', 'serial', 'device voltage and frequency scaling', 'voltage, current regulators, power capping, power supply', 'real time clock (rtc)', 'pin controllers (pinctrl)', 'multi media card (mmc)', 'memory technology devices (mtd)', 'industrial i/o (iio)', 'multi function devices (mfd)', 'pulse-width modulation (pwm)', 'inter-integrated circuit (i2c + i3c)', 'hardware monitoring (hwmon)', 'general purpose i/o (gpio)', 'leds', 'dma engines', 'hardware random number generator (hwrng)', 'cryptography hardware acceleration', 'pci', 'thunderbolt', 'clock', 'phy ("physical layer" framework)', 'various']
|
['bnxt_en ']
|
['c', 'h']
| 2
| 138
| 0
|
- driver name - board id - broad revision - board serial number - board fw version - fw parameter set version - fw app version - fw management version - fw roce version --- diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack); + + .info_get = bnxt_dl_info_get, +static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, + union devlink_param_value *nvm_cfg_ver) +{ + struct hwrm_nvm_get_variable_input req = {0}; + union bnxt_nvm_data *data; + dma_addr_t data_dma_addr; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, hwrm_nvm_get_variable, -1, -1); + data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), + &data_dma_addr, gfp_kernel); + if (!data) + return -enomem; + + req.dest_data_addr = cpu_to_le64(data_dma_addr); + req.data_len = cpu_to_le16(bnxt_nvm_cfg_ver_bits); + req.option_num = cpu_to_le16(nvm_off_nvm_cfg_ver); + + rc = hwrm_send_message_silent(bp, &req, sizeof(req), hwrm_cmd_timeout); + if (!rc) + bnxt_copy_from_nvm_data(nvm_cfg_ver, data, + bnxt_nvm_cfg_ver_bits, + bnxt_nvm_cfg_ver_bytes); + + dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + return rc; +} + +static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + union devlink_param_value nvm_cfg_ver; + struct hwrm_ver_get_output *ver_resp; + char mgmt_ver[fw_ver_str_len]; + char roce_ver[fw_ver_str_len]; + char fw_ver[fw_ver_str_len]; + char buf[32]; + int rc; + + rc = devlink_info_driver_name_put(req, drv_module_name); + if (rc) + return rc; + + sprintf(buf, "%x", bp->chip_num); + rc = devlink_info_version_fixed_put(req, + devlink_info_version_generic_asic_id, buf); + if (rc) + return rc; + + ver_resp = &bp->ver_resp; + sprintf(buf, "%x", ver_resp->chip_rev); + rc = devlink_info_version_fixed_put(req, + devlink_info_version_generic_asic_rev, buf); + if (rc) + return rc; + + if (bnxt_pf(bp)) { + sprintf(buf, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4], + bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]); + rc = devlink_info_serial_number_put(req, buf); + if (rc) + return rc; + } + + if (strlen(ver_resp->active_pkg_name)) { + rc = + devlink_info_version_running_put(req, + devlink_info_version_generic_fw, + ver_resp->active_pkg_name); + if (rc) + return rc; + } + + if (bnxt_pf(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) { + u32 ver = nvm_cfg_ver.vu32; + + sprintf(buf, "%x.%x.%x", (ver >> 16) & 0xf, (ver >> 8) & 0xf, + ver & 0xf); + rc = devlink_info_version_running_put(req, + devlink_info_version_generic_fw_psid, buf); + if (rc) + return rc; + } + + if (ver_resp->flags & ver_get_resp_flags_ext_ver_avail) { + snprintf(fw_ver, fw_ver_str_len, "%d.%d.%d.%d", + ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor, + ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch); + + snprintf(mgmt_ver, fw_ver_str_len, "%d.%d.%d.%d", + ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor, + ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch); + + snprintf(roce_ver, fw_ver_str_len, "%d.%d.%d.%d", + ver_resp->roce_fw_major, ver_resp->roce_fw_minor, + ver_resp->roce_fw_build, ver_resp->roce_fw_patch); + } else { + snprintf(fw_ver, fw_ver_str_len, "%d.%d.%d.%d", + ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b, + ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b); + + snprintf(mgmt_ver, fw_ver_str_len, "%d.%d.%d.%d", + ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b, + ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b); + + snprintf(roce_ver, fw_ver_str_len, "%d.%d.%d.%d", + ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b, + ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b); + } + rc = devlink_info_version_running_put(req, + devlink_info_version_generic_fw_app, fw_ver); + if (rc) + return rc; + + if (!(bp->flags & bnxt_flag_chip_p5)) { + rc = devlink_info_version_running_put(req, + devlink_info_version_generic_fw_mgmt, mgmt_ver); + if (rc) + return rc; + + rc = devlink_info_version_running_put(req, + devlink_info_version_generic_fw_roce, roce_ver); + if (rc) + return rc; + } + return 0; +} + diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +#define nvm_off_nvm_cfg_ver 602 + +#define bnxt_nvm_cfg_ver_bits 24 +#define bnxt_nvm_cfg_ver_bytes 4
|
Networking
|
9599e036b161243d7c62399a1b6c250573e08a43
|
vasundhara volam
|
drivers
|
net
|
bnxt, broadcom, ethernet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.